From 8f788c64db587e74079d812dbcea40c2bd4b91ff Mon Sep 17 00:00:00 2001 From: Joseph Eagar Date: Thu, 22 Oct 2009 23:22:05 +0000 Subject: merge with trunk/2.5 at r23876 [[Split portion of a mixed commit.]] --- CMake/macros.cmake | 38 +- CMakeLists.txt | 37 +- SConstruct | 72 +- config/darwin-config.py | 1 - config/irix6-config.py | 1 - config/linux2-config.py | 2 - config/linuxcross-config.py | 1 - config/openbsd3-config.py | 1 - config/sunos5-config.py | 1 - config/win32-mingw-config.py | 4 +- config/win32-vc-config.py | 2 +- config/win64-vc-config.py | 4 +- extern/CMakeLists.txt | 9 +- extern/Eigen2/Eigen/Array | 39 + extern/Eigen2/Eigen/Cholesky | 65 + extern/Eigen2/Eigen/Core | 154 + extern/Eigen2/Eigen/Dense | 8 + extern/Eigen2/Eigen/Eigen | 2 + extern/Eigen2/Eigen/Geometry | 51 + extern/Eigen2/Eigen/LU | 29 + extern/Eigen2/Eigen/LeastSquares | 27 + extern/Eigen2/Eigen/NewStdVector | 168 + extern/Eigen2/Eigen/QR | 73 + extern/Eigen2/Eigen/QtAlignedMalloc | 29 + extern/Eigen2/Eigen/SVD | 29 + extern/Eigen2/Eigen/Sparse | 132 + extern/Eigen2/Eigen/StdVector | 147 + extern/Eigen2/Eigen/src/Array/BooleanRedux.h | 145 + extern/Eigen2/Eigen/src/Array/CwiseOperators.h | 453 + extern/Eigen2/Eigen/src/Array/Functors.h | 305 + extern/Eigen2/Eigen/src/Array/Norms.h | 80 + extern/Eigen2/Eigen/src/Array/PartialRedux.h | 342 + extern/Eigen2/Eigen/src/Array/Random.h | 156 + extern/Eigen2/Eigen/src/Array/Select.h | 159 + .../Eigen/src/Cholesky/CholeskyInstantiations.cpp | 35 + extern/Eigen2/Eigen/src/Cholesky/LDLT.h | 198 + extern/Eigen2/Eigen/src/Cholesky/LLT.h | 219 + extern/Eigen2/Eigen/src/Core/Assign.h | 445 + extern/Eigen2/Eigen/src/Core/Block.h | 752 ++ .../Eigen2/Eigen/src/Core/CacheFriendlyProduct.h | 753 ++ extern/Eigen2/Eigen/src/Core/Coeffs.h | 384 + extern/Eigen2/Eigen/src/Core/CommaInitializer.h | 149 + .../Eigen2/Eigen/src/Core/CoreInstantiations.cpp | 47 + extern/Eigen2/Eigen/src/Core/Cwise.h | 211 + extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h | 304 + extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h | 763 ++ extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h | 229 + extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h | 124 + extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h | 144 + extern/Eigen2/Eigen/src/Core/DiagonalProduct.h | 130 + extern/Eigen2/Eigen/src/Core/Dot.h | 361 + extern/Eigen2/Eigen/src/Core/Flagged.h | 146 + extern/Eigen2/Eigen/src/Core/Functors.h | 368 + extern/Eigen2/Eigen/src/Core/Fuzzy.h | 234 + extern/Eigen2/Eigen/src/Core/GenericPacketMath.h | 150 + extern/Eigen2/Eigen/src/Core/IO.h | 184 + extern/Eigen2/Eigen/src/Core/Map.h | 111 + extern/Eigen2/Eigen/src/Core/MapBase.h | 202 + extern/Eigen2/Eigen/src/Core/MathFunctions.h | 295 + extern/Eigen2/Eigen/src/Core/Matrix.h | 637 + extern/Eigen2/Eigen/src/Core/MatrixBase.h | 632 + extern/Eigen2/Eigen/src/Core/MatrixStorage.h | 249 + extern/Eigen2/Eigen/src/Core/Minor.h | 122 + extern/Eigen2/Eigen/src/Core/NestByValue.h | 114 + extern/Eigen2/Eigen/src/Core/NumTraits.h | 142 + extern/Eigen2/Eigen/src/Core/Part.h | 375 + extern/Eigen2/Eigen/src/Core/Product.h | 769 ++ extern/Eigen2/Eigen/src/Core/Redux.h | 117 + extern/Eigen2/Eigen/src/Core/SolveTriangular.h | 297 + extern/Eigen2/Eigen/src/Core/Sum.h | 271 + extern/Eigen2/Eigen/src/Core/Swap.h | 142 + extern/Eigen2/Eigen/src/Core/Transpose.h | 228 + extern/Eigen2/Eigen/src/Core/Visitor.h | 228 + .../Eigen/src/Core/arch/AltiVec/PacketMath.h | 354 + extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h | 321 + extern/Eigen2/Eigen/src/Core/util/Constants.h | 254 + .../Eigen/src/Core/util/DisableMSVCWarnings.h | 5 + .../Eigen/src/Core/util/EnableMSVCWarnings.h | 4 + .../Eigen/src/Core/util/ForwardDeclarations.h | 125 + extern/Eigen2/Eigen/src/Core/util/Macros.h | 273 + extern/Eigen2/Eigen/src/Core/util/Memory.h | 368 + extern/Eigen2/Eigen/src/Core/util/Meta.h | 183 + extern/Eigen2/Eigen/src/Core/util/StaticAssert.h | 148 + extern/Eigen2/Eigen/src/Core/util/XprHelper.h | 219 + extern/Eigen2/Eigen/src/Geometry/AlignedBox.h | 173 + extern/Eigen2/Eigen/src/Geometry/AngleAxis.h | 228 + extern/Eigen2/Eigen/src/Geometry/EulerAngles.h | 96 + extern/Eigen2/Eigen/src/Geometry/Hyperplane.h | 268 + extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h | 119 + .../Eigen2/Eigen/src/Geometry/ParametrizedLine.h | 155 + extern/Eigen2/Eigen/src/Geometry/Quaternion.h | 521 + extern/Eigen2/Eigen/src/Geometry/Rotation2D.h | 159 + extern/Eigen2/Eigen/src/Geometry/RotationBase.h | 137 + extern/Eigen2/Eigen/src/Geometry/Scaling.h | 181 + extern/Eigen2/Eigen/src/Geometry/Transform.h | 785 ++ extern/Eigen2/Eigen/src/Geometry/Translation.h | 198 + extern/Eigen2/Eigen/src/LU/Determinant.h | 122 + extern/Eigen2/Eigen/src/LU/Inverse.h | 258 + extern/Eigen2/Eigen/src/LU/LU.h | 541 + .../Eigen2/Eigen/src/LeastSquares/LeastSquares.h | 182 + extern/Eigen2/Eigen/src/QR/EigenSolver.h | 722 ++ .../Eigen2/Eigen/src/QR/HessenbergDecomposition.h | 250 + extern/Eigen2/Eigen/src/QR/QR.h | 334 + extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp | 43 + .../Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h | 402 + extern/Eigen2/Eigen/src/QR/Tridiagonalization.h | 431 + extern/Eigen2/Eigen/src/SVD/SVD.h | 645 + extern/Eigen2/Eigen/src/Sparse/AmbiVector.h | 371 + extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h | 236 + extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h | 230 + extern/Eigen2/Eigen/src/Sparse/CoreIterators.h | 68 + .../Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h | 297 + .../Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h | 175 + extern/Eigen2/Eigen/src/Sparse/RandomSetter.h | 330 + extern/Eigen2/Eigen/src/Sparse/SparseAssign.h | 0 extern/Eigen2/Eigen/src/Sparse/SparseBlock.h | 449 + extern/Eigen2/Eigen/src/Sparse/SparseCwise.h | 175 + .../Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h | 442 + .../Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h | 183 + .../Eigen/src/Sparse/SparseDiagonalProduct.h | 157 + extern/Eigen2/Eigen/src/Sparse/SparseDot.h | 97 + extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h | 97 + extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h | 41 + extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h | 346 + extern/Eigen2/Eigen/src/Sparse/SparseLLT.h | 205 + extern/Eigen2/Eigen/src/Sparse/SparseLU.h | 148 + extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h | 447 + extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h | 626 + extern/Eigen2/Eigen/src/Sparse/SparseProduct.h | 415 + extern/Eigen2/Eigen/src/Sparse/SparseRedux.h | 40 + extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h | 85 + extern/Eigen2/Eigen/src/Sparse/SparseUtil.h | 148 + extern/Eigen2/Eigen/src/Sparse/SparseVector.h | 365 + extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h | 565 + extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h | 210 + extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h | 178 + extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h | 289 + extern/Eigen2/eigen-update.sh | 28 + extern/Makefile | 6 +- extern/SConscript | 7 +- extern/bullet2/CMakeLists.txt | 6 +- extern/bullet2/src/SConscript | 4 +- extern/glew/make/msvc_9_0/glew.vcproj | 1 + intern/CMakeLists.txt | 1 + intern/Makefile | 2 +- intern/SConscript | 1 + intern/audaspace/OpenAL/AUD_OpenALDevice.cpp | 389 +- intern/audaspace/intern/AUD_C-API.cpp | 38 +- intern/audaspace/intern/AUD_C-API.h | 23 +- intern/audaspace/intern/AUD_SoftwareDevice.cpp | 194 +- intern/audaspace/make/msvc_9_0/audaspace.vcproj | 1 + intern/boolop/make/msvc_9_0/boolop.vcproj | 1 + intern/bsp/make/msvc_9_0/bsplib.vcproj | 1 + intern/container/make/msvc_9_0/container.vcproj | 1 + intern/decimation/make/msvc_9_0/decimation.vcproj | 1 + intern/elbeem/make/msvc_9_0/elbeem.vcproj | 1 + intern/ghost/CMakeLists.txt | 17 +- intern/ghost/GHOST_C-api.h | 12 +- intern/ghost/GHOST_IWindow.h | 15 +- intern/ghost/GHOST_Types.h | 16 +- intern/ghost/intern/GHOST_C-api.cpp | 11 +- intern/ghost/intern/GHOST_DisplayManager.cpp | 2 - intern/ghost/intern/GHOST_ISystem.cpp | 12 +- intern/ghost/intern/GHOST_System.cpp | 2 +- intern/ghost/intern/GHOST_SystemCarbon.cpp | 10 +- intern/ghost/intern/GHOST_SystemWin32.cpp | 34 +- intern/ghost/intern/GHOST_SystemX11.cpp | 61 +- intern/ghost/intern/GHOST_Window.cpp | 26 +- intern/ghost/intern/GHOST_Window.h | 69 +- intern/ghost/intern/GHOST_WindowCarbon.cpp | 2 +- intern/ghost/intern/GHOST_WindowManager.cpp | 19 +- intern/ghost/intern/GHOST_WindowManager.h | 10 +- intern/ghost/intern/GHOST_WindowWin32.cpp | 10 +- intern/ghost/intern/GHOST_WindowX11.cpp | 173 +- intern/ghost/intern/GHOST_WindowX11.h | 3 +- intern/ghost/make/msvc_9_0/ghost.vcproj | 1 + intern/guardedalloc/MEM_guardedalloc.h | 2 - intern/guardedalloc/intern/mallocn.c | 24 +- intern/iksolver/make/msvc_9_0/iksolver.vcproj | 1 + intern/itasc/Armature.cpp | 775 ++ intern/itasc/Armature.hpp | 137 + intern/itasc/CMakeLists.txt | 32 + intern/itasc/Cache.cpp | 620 + intern/itasc/Cache.hpp | 227 + intern/itasc/ConstraintSet.cpp | 170 + intern/itasc/ConstraintSet.hpp | 115 + intern/itasc/ControlledObject.cpp | 61 + intern/itasc/ControlledObject.hpp | 70 + intern/itasc/CopyPose.cpp | 480 + intern/itasc/CopyPose.hpp | 99 + intern/itasc/Distance.cpp | 321 + intern/itasc/Distance.hpp | 62 + intern/itasc/FixedObject.cpp | 70 + intern/itasc/FixedObject.hpp | 45 + intern/itasc/Makefile | 53 + intern/itasc/MovingFrame.cpp | 156 + intern/itasc/MovingFrame.hpp | 48 + intern/itasc/Object.hpp | 48 + intern/itasc/SConscript | 11 + intern/itasc/Scene.cpp | 543 + intern/itasc/Scene.hpp | 104 + intern/itasc/Solver.hpp | 33 + intern/itasc/UncontrolledObject.cpp | 43 + intern/itasc/UncontrolledObject.hpp | 37 + intern/itasc/WDLSSolver.cpp | 101 + intern/itasc/WDLSSolver.hpp | 48 + intern/itasc/WSDLSSolver.cpp | 138 + intern/itasc/WSDLSSolver.hpp | 43 + intern/itasc/WorldObject.cpp | 26 + intern/itasc/WorldObject.hpp | 30 + intern/itasc/eigen_types.cpp | 12 + intern/itasc/eigen_types.hpp | 84 + intern/itasc/kdl/Makefile | 43 + intern/itasc/kdl/chain.cpp | 75 + intern/itasc/kdl/chain.hpp | 95 + intern/itasc/kdl/chainfksolver.hpp | 107 + intern/itasc/kdl/chainfksolverpos_recursive.cpp | 61 + intern/itasc/kdl/chainfksolverpos_recursive.hpp | 50 + intern/itasc/kdl/chainjnttojacsolver.cpp | 80 + intern/itasc/kdl/chainjnttojacsolver.hpp | 65 + intern/itasc/kdl/frameacc.cpp | 26 + intern/itasc/kdl/frameacc.hpp | 259 + intern/itasc/kdl/frameacc.inl | 598 + intern/itasc/kdl/frames.cpp | 389 + intern/itasc/kdl/frames.hpp | 1097 ++ intern/itasc/kdl/frames.inl | 1390 +++ intern/itasc/kdl/frames_io.cpp | 310 + intern/itasc/kdl/frames_io.hpp | 114 + intern/itasc/kdl/framevel.cpp | 27 + intern/itasc/kdl/framevel.hpp | 382 + intern/itasc/kdl/framevel.inl | 534 + intern/itasc/kdl/inertia.cpp | 48 + intern/itasc/kdl/inertia.hpp | 70 + intern/itasc/kdl/jacobian.cpp | 129 + intern/itasc/kdl/jacobian.hpp | 68 + intern/itasc/kdl/jntarray.cpp | 152 + intern/itasc/kdl/jntarray.hpp | 217 + intern/itasc/kdl/jntarrayacc.cpp | 170 + intern/itasc/kdl/jntarrayacc.hpp | 66 + intern/itasc/kdl/jntarrayvel.cpp | 111 + intern/itasc/kdl/jntarrayvel.hpp | 59 + intern/itasc/kdl/joint.cpp | 153 + intern/itasc/kdl/joint.hpp | 138 + intern/itasc/kdl/kinfam_io.cpp | 101 + intern/itasc/kdl/kinfam_io.hpp | 70 + intern/itasc/kdl/segment.cpp | 68 + intern/itasc/kdl/segment.hpp | 149 + intern/itasc/kdl/tree.cpp | 117 + intern/itasc/kdl/tree.hpp | 167 + intern/itasc/kdl/treefksolver.hpp | 110 + intern/itasc/kdl/treefksolverpos_recursive.cpp | 70 + intern/itasc/kdl/treefksolverpos_recursive.hpp | 53 + intern/itasc/kdl/treejnttojacsolver.cpp | 78 + intern/itasc/kdl/treejnttojacsolver.hpp | 38 + intern/itasc/kdl/utilities/Makefile | 40 + intern/itasc/kdl/utilities/error.h | 245 + intern/itasc/kdl/utilities/error_stack.cpp | 59 + intern/itasc/kdl/utilities/error_stack.h | 70 + intern/itasc/kdl/utilities/kdl-config.h | 33 + intern/itasc/kdl/utilities/rall1d.h | 478 + intern/itasc/kdl/utilities/rall2d.h | 538 + intern/itasc/kdl/utilities/svd_eigen_HH.hpp | 309 + intern/itasc/kdl/utilities/traits.h | 111 + intern/itasc/kdl/utilities/utility.cpp | 21 + intern/itasc/kdl/utilities/utility.h | 299 + intern/itasc/kdl/utilities/utility_io.cpp | 208 + intern/itasc/kdl/utilities/utility_io.h | 79 + intern/itasc/make/msvc_9_0/itasc.vcproj | 539 + intern/itasc/ublas_types.hpp | 82 + intern/memutil/make/msvc_9_0/memutil.vcproj | 1 + intern/moto/make/msvc_9_0/moto.vcproj | 1 + intern/smoke/extern/smoke_API.h | 4 + intern/smoke/intern/FLUID_3D.cpp | 30 +- intern/smoke/intern/FLUID_3D_STATIC.cpp | 40 +- intern/smoke/intern/WTURBULENCE.cpp | 53 +- intern/smoke/intern/smoke_API.cpp | 15 + intern/smoke/make/msvc_9_0/smoke.vcproj | 1 + intern/string/make/msvc_9_0/string.vcproj | 1 + .../BLO_readblenfile/BLO_readblenfile.vcproj | 2 + .../blender/BPY_python/BPY_python.vcproj | 1 + projectfiles_vc9/blender/avi/BL_avi.vcproj | 1 + projectfiles_vc9/blender/blender.sln | 79 +- projectfiles_vc9/blender/blender.vcproj | 1 + .../blender/blenfont/BLF_blenfont.vcproj | 1 + .../blender/blenkernel/BKE_blenkernel.vcproj | 13 +- .../blender/blenlib/BLI_blenlib.vcproj | 9 + .../blenpluginapi/blenpluginapi.vcproj | 14 +- projectfiles_vc9/blender/editors/ED_editors.vcproj | 57 +- projectfiles_vc9/blender/gpu/BL_gpu.vcproj | 9 + .../blender/ikplugin/BIK_ikplugin.vcproj | 214 + projectfiles_vc9/blender/imbuf/BL_imbuf.vcproj | 1 + projectfiles_vc9/blender/loader/BLO_loader.vcproj | 1 + .../blender/makesdna/DNA_makesdna.vcproj | 1 + .../blender/makesrna/RNA_makesrna.vcproj | 55 +- projectfiles_vc9/blender/makesrna/RNA_rna.vcproj | 4 +- projectfiles_vc9/blender/nodes/nodes.vcproj | 9 +- .../blender/render/BRE_raytrace.vcproj | 230 + projectfiles_vc9/blender/render/BRE_render.vcproj | 30 +- .../blender/windowmanager/windowmanager.vcproj | 4 +- .../gameengine/blenderhook/KX_blenderhook.vcproj | 5 +- .../gameengine/converter/KX_converter.vcproj | 39 +- .../gameengine/expression/EXP_expressions.vcproj | 13 +- .../gameengine/gamelogic/SCA_GameLogic.vcproj | 9 + .../gameengine/gameplayer/axctl/GP_axctl.vcproj | 1 + .../gameengine/gameplayer/common/GP_common.vcproj | 1 + .../gameengine/gameplayer/ghost/GP_ghost.vcproj | 1 + .../gameengine/ketsji/KX_ketsji.vcproj | 9 + .../gameengine/ketsji/network/KX_network.vcproj | 1 + .../loopbacknetwork/NG_loopbacknetwork.vcproj | 2 + .../gameengine/network/network/NG_network.vcproj | 2 + .../PHY_Physics/PHY_Bullet/PHY_Bullet.vcproj | 1 + .../physics/PHY_Physics/PHY_Dummy/PHY_Dummy.vcproj | 2 + .../physics/PHY_Physics/PHY_Ode/PHY_Ode.vcproj | 2 + .../physics/PHY_Physics/PHY_Physics.vcproj | 1 + .../gameengine/rasterizer/RAS_rasterizer.vcproj | 1 + .../openglrasterizer/RAS_openglrasterizer.vcproj | 1 + .../gameengine/scenegraph/SG_SceneGraph.vcproj | 1 + .../gameengine/videotexture/TEX_Video.vcproj | 1 + .../kernel/gen_messaging/gen_messaging.vcproj | 2 + projectfiles_vc9/kernel/system/SYS_system.vcproj | 2 + release/Makefile | 3 - release/datafiles/blenderbuttons | Bin 182512 -> 195438 bytes release/io/engine_render_pov.py | 928 -- release/io/export_ply.py | 286 - release/io/netrender/__init__.py | 17 - release/io/netrender/client.py | 204 - release/io/netrender/master.py | 635 - release/io/netrender/model.py | 162 - release/io/netrender/operators.py | 356 - release/io/netrender/slave.py | 180 - release/io/netrender/ui.py | 293 - release/io/netrender/utils.py | 72 - release/scripts/3ds_export.py | 1019 -- release/scripts/3ds_import.py | 1007 -- release/scripts/Axiscopy.py | 125 - release/scripts/DirectX8Exporter.py | 1196 -- release/scripts/DirectX8Importer.py | 238 - release/scripts/IDPropBrowser.py | 523 - release/scripts/ac3d_export.py | 828 -- release/scripts/ac3d_import.py | 783 -- release/scripts/add_mesh_empty.py | 13 - release/scripts/add_mesh_torus.py | 69 - release/scripts/animation_bake_constraints.py | 792 -- release/scripts/animation_clean.py | 192 - release/scripts/animation_trajectory.py | 575 - release/scripts/armature_symmetry.py | 325 - release/scripts/bevel_center.py | 474 - release/scripts/blenderLipSynchro.py | 729 -- release/scripts/bpydata/KUlang.txt | 121 - release/scripts/bpydata/config/readme.txt | 6 - release/scripts/bpydata/readme.txt | 9 - release/scripts/bpymodules/BPyAddMesh.py | 159 - release/scripts/bpymodules/BPyArmature.py | 152 - release/scripts/bpymodules/BPyBlender.py | 36 - release/scripts/bpymodules/BPyCurve.py | 79 - release/scripts/bpymodules/BPyImage.py | 318 - release/scripts/bpymodules/BPyMathutils.py | 228 - release/scripts/bpymodules/BPyMesh.py | 1326 --- release/scripts/bpymodules/BPyMesh_redux.py | 652 - release/scripts/bpymodules/BPyMessages.py | 61 - release/scripts/bpymodules/BPyNMesh.py | 48 - release/scripts/bpymodules/BPyObject.py | 108 - release/scripts/bpymodules/BPyRegistry.py | 267 - release/scripts/bpymodules/BPyRender.py | 633 - release/scripts/bpymodules/BPySys.py | 74 - release/scripts/bpymodules/BPyTextPlugin.py | 814 -- release/scripts/bpymodules/BPyWindow.py | 206 - release/scripts/bpymodules/blend2renderinfo.py | 95 - release/scripts/bpymodules/defaultdoodads.py | 941 -- release/scripts/bpymodules/dxfColorMap.py | 282 - release/scripts/bpymodules/dxfImportObjects.py | 1326 --- release/scripts/bpymodules/dxfLibrary.py | 880 -- release/scripts/bpymodules/dxfReader.py | 381 - release/scripts/bpymodules/mesh_gradient.py | 229 - release/scripts/bpymodules/meshtools.py | 355 - release/scripts/bpymodules/paths_ai2obj.py | 506 - release/scripts/bpymodules/paths_eps2obj.py | 452 - release/scripts/bpymodules/paths_gimp2obj.py | 363 - release/scripts/bpymodules/paths_svg2obj.py | 1651 --- release/scripts/bvh_import.py | 757 -- release/scripts/c3d_import.py | 1244 -- release/scripts/camera_changer.py | 121 - release/scripts/config.py | 801 -- release/scripts/console.py | 861 -- release/scripts/discombobulator.py | 1526 --- release/scripts/envelope_symmetry.py | 174 - release/scripts/export-iv-0.1.py | 304 - release/scripts/export_dxf.py | 3041 ----- release/scripts/export_fbx.py | 3084 ----- release/scripts/export_lightwave_motion.py | 157 - release/scripts/export_m3g.py | 3074 ----- release/scripts/export_map.py | 454 - release/scripts/export_mdd.py | 168 - release/scripts/export_obj.py | 933 -- release/scripts/faceselect_same_weights.py | 111 - release/scripts/flt_defaultp.py | 1 - release/scripts/flt_dofedit.py | 835 -- release/scripts/flt_export.py | 1697 --- release/scripts/flt_filewalker.py | 286 - release/scripts/flt_import.py | 2534 ---- release/scripts/flt_lodedit.py | 502 - release/scripts/flt_palettemanager.py | 505 - release/scripts/flt_properties.py | 630 - release/scripts/flt_toolbar.py | 809 -- release/scripts/help_bpy_api.py | 47 - release/scripts/help_browser.py | 814 -- release/scripts/hotkeys.py | 944 -- release/scripts/image_2d_cutout.py | 559 - release/scripts/image_auto_layout.py | 455 - release/scripts/image_billboard.py | 269 - release/scripts/image_edit.py | 158 - release/scripts/import_dxf.py | 6225 ---------- release/scripts/import_edl.py | 961 -- release/scripts/import_lightwave_motion.py | 244 - release/scripts/import_mdd.py | 158 - release/scripts/import_obj.py | 1234 -- release/scripts/import_web3d.py | 2594 ---- release/scripts/io/engine_render_pov.py | 912 ++ release/scripts/io/export_3ds.py | 1130 ++ release/scripts/io/export_fbx.py | 3453 ++++++ release/scripts/io/export_obj.py | 996 ++ release/scripts/io/export_ply.py | 279 + release/scripts/io/export_x3d.py | 1240 ++ release/scripts/io/import_3ds.py | 1167 ++ release/scripts/io/import_obj.py | 1638 +++ release/scripts/io/netrender/__init__.py | 19 + release/scripts/io/netrender/balancing.py | 94 + release/scripts/io/netrender/client.py | 203 + release/scripts/io/netrender/master.py | 752 ++ release/scripts/io/netrender/master_html.py | 142 + release/scripts/io/netrender/model.py | 198 + release/scripts/io/netrender/operators.py | 423 + release/scripts/io/netrender/slave.py | 207 + release/scripts/io/netrender/ui.py | 321 + release/scripts/io/netrender/utils.py | 86 + release/scripts/lightwave_export.py | 707 -- release/scripts/lightwave_import.py | 1705 --- release/scripts/md2_export.py | 1271 -- release/scripts/md2_import.py | 600 - release/scripts/mesh_boneweight_copy.py | 287 - release/scripts/mesh_cleanup.py | 456 - release/scripts/mesh_edges2curves.py | 166 - release/scripts/mesh_mirror_tool.py | 352 - release/scripts/mesh_poly_reduce.py | 143 - release/scripts/mesh_poly_reduce_grid.py | 351 - release/scripts/mesh_skin.py | 639 - release/scripts/mesh_solidify.py | 345 - release/scripts/mesh_unfolder.py | 1582 --- release/scripts/mesh_wire.py | 290 - release/scripts/modules/autocomplete.py | 211 + release/scripts/modules/bpy_ops.py | 141 + release/scripts/modules/bpy_sys.py | 12 + release/scripts/ms3d_import.py | 487 - release/scripts/ms3d_import_ascii.py | 479 - release/scripts/obdatacopier.py | 215 - release/scripts/object_active_to_other.py | 58 - release/scripts/object_apply_def.py | 178 - release/scripts/object_batch_name_edit.py | 274 - release/scripts/object_cookie_cutter.py | 667 -- release/scripts/object_drop.py | 253 - release/scripts/object_find.py | 222 - release/scripts/object_random_loc_sz_rot.py | 129 - release/scripts/object_sel2dupgroup.py | 84 - release/scripts/object_timeofs_follow_act.py | 107 - release/scripts/off_export.py | 106 - release/scripts/off_import.py | 177 - release/scripts/paths_import.py | 96 - release/scripts/ply_import.py | 354 - release/scripts/raw_export.py | 100 - release/scripts/raw_import.py | 120 - release/scripts/renameobjectbyblock.py | 178 - release/scripts/render_save_layers.py | 120 - release/scripts/rvk1_torvk2.py | 341 - release/scripts/save_theme.py | 143 - release/scripts/scripttemplate_background_job.py | 124 - release/scripts/scripttemplate_camera_object.py | 104 - release/scripts/scripttemplate_gamelogic.py | 97 - release/scripts/scripttemplate_gamelogic_basic.py | 33 - release/scripts/scripttemplate_gamelogic_module.py | 45 - release/scripts/scripttemplate_ipo_gen.py | 92 - release/scripts/scripttemplate_mesh_edit.py | 98 - release/scripts/scripttemplate_metaball_create.py | 76 - release/scripts/scripttemplate_object_edit.py | 81 - release/scripts/scripttemplate_pyconstraint.py | 114 - release/scripts/scripttemplate_text_plugin.py | 69 - release/scripts/slp_import.py | 116 - release/scripts/sysinfo.py | 287 - release/scripts/textplugin_convert_ge.py | 863 -- release/scripts/textplugin_functiondocs.py | 64 - release/scripts/textplugin_imports.py | 91 - release/scripts/textplugin_membersuggest.py | 90 - release/scripts/textplugin_outliner.py | 142 - release/scripts/textplugin_suggest.py | 94 - release/scripts/textplugin_templates.py | 123 - release/scripts/ui/buttons_data_armature.py | 186 + release/scripts/ui/buttons_data_bone.py | 349 + release/scripts/ui/buttons_data_camera.py | 98 + release/scripts/ui/buttons_data_curve.py | 227 + release/scripts/ui/buttons_data_empty.py | 23 + release/scripts/ui/buttons_data_lamp.py | 310 + release/scripts/ui/buttons_data_lattice.py | 56 + release/scripts/ui/buttons_data_mesh.py | 203 + release/scripts/ui/buttons_data_metaball.py | 106 + release/scripts/ui/buttons_data_modifier.py | 449 + release/scripts/ui/buttons_data_text.py | 179 + release/scripts/ui/buttons_game.py | 416 + release/scripts/ui/buttons_material.py | 727 ++ release/scripts/ui/buttons_object.py | 223 + release/scripts/ui/buttons_object_constraint.py | 561 + release/scripts/ui/buttons_particle.py | 909 ++ release/scripts/ui/buttons_physics_cloth.py | 186 + release/scripts/ui/buttons_physics_common.py | 153 + release/scripts/ui/buttons_physics_field.py | 187 + release/scripts/ui/buttons_physics_fluid.py | 257 + release/scripts/ui/buttons_physics_smoke.py | 179 + release/scripts/ui/buttons_physics_softbody.py | 244 + release/scripts/ui/buttons_scene.py | 482 + release/scripts/ui/buttons_texture.py | 760 ++ release/scripts/ui/buttons_world.py | 181 + release/scripts/ui/space_buttons.py | 36 + release/scripts/ui/space_console.py | 242 + release/scripts/ui/space_filebrowser.py | 42 + release/scripts/ui/space_image.py | 503 + release/scripts/ui/space_info.py | 289 + release/scripts/ui/space_logic.py | 29 + release/scripts/ui/space_node.py | 118 + release/scripts/ui/space_outliner.py | 52 + release/scripts/ui/space_sequencer.py | 600 + release/scripts/ui/space_text.py | 245 + release/scripts/ui/space_time.py | 151 + release/scripts/ui/space_userpref.py | 405 + release/scripts/ui/space_view3d.py | 1383 +++ release/scripts/ui/space_view3d_toolbar.py | 746 ++ release/scripts/unweld.py | 247 - release/scripts/uv_export.py | 498 - release/scripts/uv_seams_from_islands.py | 101 - release/scripts/uvcalc_follow_active_coords.py | 254 - release/scripts/uvcalc_lightmap.py | 599 - release/scripts/uvcalc_quad_clickproj.py | 271 - release/scripts/uvcalc_smart_project.py | 1132 -- release/scripts/uvcopy.py | 112 - release/scripts/vertexpaint_from_material.py | 99 - release/scripts/vertexpaint_gradient.py | 46 - release/scripts/vertexpaint_selfshadow_ao.py | 186 - release/scripts/vrml97_export.py | 1300 -- release/scripts/weightpaint_average.py | 121 - release/scripts/weightpaint_clean.py | 121 - release/scripts/weightpaint_copy.py | 101 - release/scripts/weightpaint_envelope_assign.py | 240 - release/scripts/weightpaint_gradient.py | 59 - release/scripts/weightpaint_grow_shrink.py | 131 - release/scripts/weightpaint_invert.py | 95 - release/scripts/weightpaint_normalize.py | 170 - release/scripts/widgetwizard.py | 917 -- release/scripts/wizard_bolt_factory.py | 2811 ----- release/scripts/wizard_curve2tree.py | 4048 ------- release/scripts/wizard_landscape_ant.py | 2148 ---- release/scripts/x3d_export.py | 1051 -- release/scripts/xsi_export.py | 1227 -- release/ui/bpy_ops.py | 141 - release/ui/buttons_data_armature.py | 177 - release/ui/buttons_data_bone.py | 278 - release/ui/buttons_data_camera.py | 97 - release/ui/buttons_data_curve.py | 228 - release/ui/buttons_data_empty.py | 23 - release/ui/buttons_data_lamp.py | 313 - release/ui/buttons_data_lattice.py | 56 - release/ui/buttons_data_mesh.py | 203 - release/ui/buttons_data_metaball.py | 112 - release/ui/buttons_data_modifier.py | 449 - release/ui/buttons_data_text.py | 181 - release/ui/buttons_game.py | 407 - release/ui/buttons_material.py | 713 -- release/ui/buttons_object.py | 185 - release/ui/buttons_object_constraint.py | 536 - release/ui/buttons_particle.py | 960 -- release/ui/buttons_physics_cloth.py | 172 - release/ui/buttons_physics_field.py | 228 - release/ui/buttons_physics_fluid.py | 260 - release/ui/buttons_physics_smoke.py | 182 - release/ui/buttons_physics_softbody.py | 231 - release/ui/buttons_scene.py | 463 - release/ui/buttons_texture.py | 761 -- release/ui/buttons_world.py | 182 - release/ui/space_buttons.py | 36 - release/ui/space_console.py | 446 - release/ui/space_filebrowser.py | 42 - release/ui/space_image.py | 381 - release/ui/space_info.py | 285 - release/ui/space_logic.py | 29 - release/ui/space_node.py | 118 - release/ui/space_outliner.py | 52 - release/ui/space_sequencer.py | 602 - release/ui/space_text.py | 245 - release/ui/space_time.py | 151 - release/ui/space_userpref.py | 418 - release/ui/space_view3d.py | 1379 --- release/ui/space_view3d_toolbar.py | 730 -- source/Makefile | 7 +- source/blender/CMakeLists.txt | 1 + source/blender/Makefile | 2 +- source/blender/SConscript | 3 +- source/blender/blenfont/CMakeLists.txt | 1 + source/blender/blenfont/Makefile | 4 + source/blender/blenfont/SConscript | 8 +- source/blender/blenfont/intern/blf_font.c | 4 +- source/blender/blenfont/intern/blf_lang.c | 55 +- source/blender/blenkernel/BKE_DerivedMesh.h | 2 + source/blender/blenkernel/BKE_action.h | 53 +- source/blender/blenkernel/BKE_anim.h | 9 +- source/blender/blenkernel/BKE_armature.h | 7 +- source/blender/blenkernel/BKE_blender.h | 2 +- source/blender/blenkernel/BKE_boids.h | 4 +- source/blender/blenkernel/BKE_brush.h | 4 +- source/blender/blenkernel/BKE_bvhutils.h | 4 + source/blender/blenkernel/BKE_collision.h | 11 +- source/blender/blenkernel/BKE_constraint.h | 8 +- source/blender/blenkernel/BKE_depsgraph.h | 3 + source/blender/blenkernel/BKE_effect.h | 110 +- source/blender/blenkernel/BKE_fcurve.h | 7 + source/blender/blenkernel/BKE_image.h | 11 +- source/blender/blenkernel/BKE_material.h | 5 +- source/blender/blenkernel/BKE_mesh.h | 1 + source/blender/blenkernel/BKE_modifier.h | 2 + source/blender/blenkernel/BKE_node.h | 8 +- source/blender/blenkernel/BKE_object.h | 2 +- source/blender/blenkernel/BKE_paint.h | 2 + source/blender/blenkernel/BKE_particle.h | 176 +- source/blender/blenkernel/BKE_screen.h | 11 +- source/blender/blenkernel/BKE_sound.h | 2 +- source/blender/blenkernel/BKE_text.h | 8 + source/blender/blenkernel/BKE_texture.h | 33 +- source/blender/blenkernel/BKE_utildefines.h | 8 + source/blender/blenkernel/CMakeLists.txt | 14 +- source/blender/blenkernel/SConscript | 14 +- source/blender/blenkernel/intern/DerivedMesh.c | 213 +- source/blender/blenkernel/intern/Makefile | 19 +- source/blender/blenkernel/intern/action.c | 428 +- source/blender/blenkernel/intern/anim.c | 78 +- source/blender/blenkernel/intern/anim_sys.c | 143 +- source/blender/blenkernel/intern/armature.c | 541 +- source/blender/blenkernel/intern/blender.c | 17 + source/blender/blenkernel/intern/boids.c | 252 +- source/blender/blenkernel/intern/brush.c | 115 +- source/blender/blenkernel/intern/bvhutils.c | 88 + source/blender/blenkernel/intern/cdderivedmesh.c | 1096 +- source/blender/blenkernel/intern/cloth.c | 88 +- source/blender/blenkernel/intern/collision.c | 122 +- source/blender/blenkernel/intern/constraint.c | 30 +- source/blender/blenkernel/intern/context.c | 2 +- source/blender/blenkernel/intern/curve.c | 2 +- source/blender/blenkernel/intern/customdata.c | 4 + source/blender/blenkernel/intern/depsgraph.c | 102 +- .../blender/blenkernel/intern/editderivedbmesh.c | 24 +- source/blender/blenkernel/intern/effect.c | 865 +- source/blender/blenkernel/intern/fcurve.c | 81 + source/blender/blenkernel/intern/image.c | 215 +- source/blender/blenkernel/intern/implicit.c | 26 +- source/blender/blenkernel/intern/ipo.c | 88 +- source/blender/blenkernel/intern/key.c | 2 +- source/blender/blenkernel/intern/lattice.c | 144 +- source/blender/blenkernel/intern/library.c | 47 +- source/blender/blenkernel/intern/material.c | 41 +- source/blender/blenkernel/intern/mesh.c | 186 +- source/blender/blenkernel/intern/modifier.c | 66 +- source/blender/blenkernel/intern/multires.c | 1 + source/blender/blenkernel/intern/nla.c | 3 + source/blender/blenkernel/intern/node.c | 39 +- source/blender/blenkernel/intern/object.c | 143 +- source/blender/blenkernel/intern/packedFile.c | 7 +- source/blender/blenkernel/intern/paint.c | 27 +- source/blender/blenkernel/intern/particle.c | 924 +- source/blender/blenkernel/intern/particle_system.c | 1756 +-- source/blender/blenkernel/intern/pointcache.c | 298 +- source/blender/blenkernel/intern/report.c | 2 +- source/blender/blenkernel/intern/sca.c | 18 +- source/blender/blenkernel/intern/scene.c | 20 +- source/blender/blenkernel/intern/screen.c | 6 - source/blender/blenkernel/intern/sequence.c | 4 +- source/blender/blenkernel/intern/smoke.c | 309 +- source/blender/blenkernel/intern/softbody.c | 67 +- source/blender/blenkernel/intern/sound.c | 8 +- source/blender/blenkernel/intern/text.c | 63 +- source/blender/blenkernel/intern/texture.c | 285 +- source/blender/blenkernel/intern/unit.c | 4 +- source/blender/blenkernel/intern/world.c | 11 +- source/blender/blenkernel/intern/writeffmpeg.c | 13 +- source/blender/blenlib/BLI_arithb.h | 18 +- source/blender/blenlib/BLI_bfile.h | 138 + source/blender/blenlib/BLI_listbase.h | 4 + source/blender/blenlib/BLI_memarena.h | 11 + source/blender/blenlib/BLI_string.h | 23 + source/blender/blenlib/BLI_threads.h | 53 +- source/blender/blenlib/BLI_util.h | 9 +- source/blender/blenlib/BLI_winstuff.h | 21 +- source/blender/blenlib/intern/BLI_bfile.c | 236 + source/blender/blenlib/intern/BLI_kdopbvh.c | 92 +- source/blender/blenlib/intern/BLI_memarena.c | 24 +- source/blender/blenlib/intern/arithb.c | 48 +- source/blender/blenlib/intern/dynlib.c | 200 +- source/blender/blenlib/intern/listbase.c | 14 + source/blender/blenlib/intern/string.c | 95 + source/blender/blenlib/intern/threads.c | 94 +- source/blender/blenlib/intern/util.c | 169 +- source/blender/blenloader/BLO_readfile.h | 2 +- source/blender/blenloader/intern/readfile.c | 411 +- source/blender/blenloader/intern/writefile.c | 49 +- source/blender/blenpluginapi/CMakeLists.txt | 4 + source/blender/blenpluginapi/SConscript | 9 + source/blender/bmesh/intern/bmesh_opdefines.c | 17 + .../blender/bmesh/intern/bmesh_operators_private.h | 2 + source/blender/bmesh/operators/removedoubles.c | 56 + source/blender/bmesh/operators/utils.c | 10 +- source/blender/editors/CMakeLists.txt | 1 + source/blender/editors/Makefile | 2 +- source/blender/editors/SConscript | 2 +- .../editors/animation/anim_channels_defines.c | 225 +- .../blender/editors/animation/anim_channels_edit.c | 147 +- source/blender/editors/animation/anim_deps.c | 14 +- source/blender/editors/animation/anim_draw.c | 59 + source/blender/editors/animation/anim_filter.c | 309 +- source/blender/editors/animation/anim_intern.h | 42 + source/blender/editors/animation/anim_ipo_utils.c | 45 +- source/blender/editors/animation/anim_markers.c | 7 +- source/blender/editors/animation/anim_ops.c | 26 +- source/blender/editors/animation/drivers.c | 280 +- source/blender/editors/animation/fmodifier_ui.c | 2 +- source/blender/editors/animation/keyframes_draw.c | 169 +- source/blender/editors/animation/keyframes_edit.c | 166 +- .../blender/editors/animation/keyframes_general.c | 1 + source/blender/editors/animation/keyframing.c | 111 +- source/blender/editors/animation/keyingsets.c | 312 +- source/blender/editors/armature/SConscript | 7 + source/blender/editors/armature/armature_intern.h | 8 + source/blender/editors/armature/armature_ops.c | 38 +- source/blender/editors/armature/editarmature.c | 263 +- .../blender/editors/armature/editarmature_sketch.c | 33 +- source/blender/editors/armature/meshlaplacian.c | 131 +- source/blender/editors/armature/poseSlide.c | 936 ++ source/blender/editors/armature/poselib.c | 120 +- source/blender/editors/armature/poseobject.c | 262 +- source/blender/editors/curve/curve_ops.c | 12 +- source/blender/editors/curve/editcurve.c | 5 +- source/blender/editors/curve/editfont.c | 10 +- source/blender/editors/datafiles/B.blend.c | 5391 ++++----- source/blender/editors/datafiles/blenderbuttons.c | 11816 ++++++++++--------- source/blender/editors/gpencil/gpencil_ops.c | 6 +- source/blender/editors/gpencil/gpencil_paint.c | 2 +- source/blender/editors/include/BIF_glutil.h | 1 + source/blender/editors/include/ED_anim_api.h | 24 +- source/blender/editors/include/ED_armature.h | 4 +- source/blender/editors/include/ED_curve.h | 4 +- source/blender/editors/include/ED_fileselect.h | 32 +- source/blender/editors/include/ED_gpencil.h | 4 +- source/blender/editors/include/ED_image.h | 8 +- source/blender/editors/include/ED_keyframes_draw.h | 7 + source/blender/editors/include/ED_keyframes_edit.h | 1 + source/blender/editors/include/ED_keyframing.h | 46 +- source/blender/editors/include/ED_markers.h | 4 +- source/blender/editors/include/ED_mball.h | 4 +- source/blender/editors/include/ED_mesh.h | 41 +- source/blender/editors/include/ED_node.h | 6 + source/blender/editors/include/ED_object.h | 12 +- source/blender/editors/include/ED_particle.h | 21 +- source/blender/editors/include/ED_physics.h | 9 +- source/blender/editors/include/ED_previewrender.h | 79 - source/blender/editors/include/ED_render.h | 84 + source/blender/editors/include/ED_screen.h | 5 +- source/blender/editors/include/ED_sculpt.h | 4 +- source/blender/editors/include/ED_transform.h | 19 +- source/blender/editors/include/ED_uvedit.h | 4 +- source/blender/editors/include/ED_view3d.h | 6 +- source/blender/editors/include/UI_icons.h | 38 +- source/blender/editors/include/UI_interface.h | 46 +- .../blender/editors/include/UI_interface_icons.h | 6 +- source/blender/editors/include/UI_view2d.h | 4 +- source/blender/editors/interface/SConscript | 5 +- source/blender/editors/interface/interface.c | 59 +- source/blender/editors/interface/interface_anim.c | 25 +- .../blender/editors/interface/interface_handlers.c | 269 +- source/blender/editors/interface/interface_icons.c | 467 +- .../blender/editors/interface/interface_intern.h | 10 +- .../blender/editors/interface/interface_layout.c | 138 +- source/blender/editors/interface/interface_panel.c | 13 +- .../blender/editors/interface/interface_regions.c | 39 +- .../editors/interface/interface_templates.c | 772 +- source/blender/editors/interface/interface_utils.c | 1083 +- .../blender/editors/interface/interface_widgets.c | 80 +- source/blender/editors/interface/resources.c | 23 +- source/blender/editors/interface/view2d.c | 57 +- source/blender/editors/interface/view2d_ops.c | 18 +- source/blender/editors/mesh/SConscript | 7 + source/blender/editors/mesh/bmesh_select.c | 116 +- source/blender/editors/mesh/bmesh_tools.c | 74 +- source/blender/editors/mesh/editface.c | 61 +- source/blender/editors/mesh/editmesh.c | 21 +- source/blender/editors/mesh/editmesh_add.c | 240 +- source/blender/editors/mesh/editmesh_mods.c | 27 +- source/blender/editors/mesh/loopcut.c | 65 +- source/blender/editors/mesh/mesh_data.c | 669 ++ source/blender/editors/mesh/mesh_intern.h | 8 +- source/blender/editors/mesh/mesh_layers.c | 445 - source/blender/editors/mesh/mesh_ops.c | 237 +- source/blender/editors/mesh/meshtools.c | 4 +- source/blender/editors/metaball/mball_ops.c | 9 +- source/blender/editors/object/Makefile | 1 + source/blender/editors/object/SConscript | 2 +- source/blender/editors/object/object_add.c | 352 +- source/blender/editors/object/object_constraint.c | 44 +- source/blender/editors/object/object_edit.c | 29 +- source/blender/editors/object/object_group.c | 9 +- source/blender/editors/object/object_hook.c | 1 + source/blender/editors/object/object_intern.h | 9 +- source/blender/editors/object/object_lattice.c | 1 + source/blender/editors/object/object_modifier.c | 33 +- source/blender/editors/object/object_ops.c | 70 +- source/blender/editors/object/object_relations.c | 32 +- source/blender/editors/object/object_select.c | 13 +- source/blender/editors/object/object_transform.c | 98 +- source/blender/editors/object/object_vgroup.c | 12 +- source/blender/editors/physics/SConscript | 7 + source/blender/editors/physics/ed_fluidsim.c | 1207 -- source/blender/editors/physics/ed_pointcache.c | 357 - source/blender/editors/physics/editparticle.c | 3999 ------- source/blender/editors/physics/particle_boids.c | 396 + source/blender/editors/physics/particle_edit.c | 3981 +++++++ source/blender/editors/physics/particle_object.c | 651 + source/blender/editors/physics/physics_boids.c | 430 - source/blender/editors/physics/physics_fluid.c | 1204 ++ source/blender/editors/physics/physics_intern.h | 69 + source/blender/editors/physics/physics_ops.c | 176 + .../blender/editors/physics/physics_pointcache.c | 337 + source/blender/editors/preview/Makefile | 56 - source/blender/editors/preview/SConscript | 12 - source/blender/editors/preview/previewrender.c | 1007 -- .../blender/editors/preview/previewrender_intern.h | 37 - source/blender/editors/render/Makefile | 56 + source/blender/editors/render/SConscript | 19 + source/blender/editors/render/render_intern.h | 53 + source/blender/editors/render/render_ops.c | 57 + source/blender/editors/render/render_preview.c | 1163 ++ source/blender/editors/render/render_shading.c | 762 ++ source/blender/editors/screen/SConscript | 7 + source/blender/editors/screen/area.c | 49 +- source/blender/editors/screen/glutil.c | 12 + source/blender/editors/screen/screen_context.c | 2 +- source/blender/editors/screen/screen_edit.c | 2 +- source/blender/editors/screen/screen_ops.c | 197 +- source/blender/editors/screen/screendump.c | 2 +- source/blender/editors/sculpt_paint/SConscript | 7 + source/blender/editors/sculpt_paint/paint_image.c | 131 +- source/blender/editors/sculpt_paint/paint_intern.h | 7 +- source/blender/editors/sculpt_paint/paint_ops.c | 46 + source/blender/editors/sculpt_paint/paint_stroke.c | 2 +- source/blender/editors/sculpt_paint/paint_vertex.c | 36 +- source/blender/editors/sculpt_paint/sculpt.c | 187 +- .../blender/editors/sculpt_paint/sculpt_intern.h | 3 + source/blender/editors/space_action/action_draw.c | 10 + source/blender/editors/space_action/action_edit.c | 51 + .../blender/editors/space_action/action_header.c | 104 +- .../blender/editors/space_action/action_intern.h | 4 +- source/blender/editors/space_action/action_ops.c | 15 +- .../blender/editors/space_action/action_select.c | 36 +- source/blender/editors/space_action/space_action.c | 9 +- source/blender/editors/space_api/spacetypes.c | 51 +- .../editors/space_buttons/buttons_context.c | 41 +- .../blender/editors/space_buttons/buttons_header.c | 10 +- .../blender/editors/space_buttons/buttons_intern.h | 27 - source/blender/editors/space_buttons/buttons_ops.c | 990 +- .../blender/editors/space_buttons/space_buttons.c | 106 +- source/blender/editors/space_console/console_ops.c | 99 +- .../blender/editors/space_console/space_console.c | 19 +- source/blender/editors/space_file/SConscript | 7 + source/blender/editors/space_file/file_draw.c | 84 +- source/blender/editors/space_file/file_intern.h | 2 +- source/blender/editors/space_file/file_ops.c | 110 +- source/blender/editors/space_file/file_panels.c | 12 +- source/blender/editors/space_file/filelist.c | 10 +- source/blender/editors/space_file/filesel.c | 82 +- source/blender/editors/space_file/space_file.c | 44 +- source/blender/editors/space_file/writeimage.c | 14 +- source/blender/editors/space_graph/graph_buttons.c | 147 +- source/blender/editors/space_graph/graph_draw.c | 71 +- source/blender/editors/space_graph/graph_edit.c | 86 +- source/blender/editors/space_graph/graph_header.c | 39 +- source/blender/editors/space_graph/graph_intern.h | 3 +- source/blender/editors/space_graph/graph_ops.c | 18 +- source/blender/editors/space_graph/graph_select.c | 9 +- source/blender/editors/space_graph/graph_utils.c | 28 + source/blender/editors/space_graph/space_graph.c | 17 +- source/blender/editors/space_image/image_buttons.c | 809 +- source/blender/editors/space_image/image_draw.c | 14 +- source/blender/editors/space_image/image_ops.c | 210 +- source/blender/editors/space_image/space_image.c | 141 +- source/blender/editors/space_info/space_info.c | 2 +- source/blender/editors/space_logic/logic_buttons.c | 4 +- source/blender/editors/space_logic/logic_window.c | 274 +- source/blender/editors/space_logic/space_logic.c | 12 +- source/blender/editors/space_nla/nla_buttons.c | 83 +- source/blender/editors/space_nla/nla_channels.c | 47 +- source/blender/editors/space_nla/nla_header.c | 37 +- source/blender/editors/space_nla/nla_intern.h | 2 +- source/blender/editors/space_nla/nla_ops.c | 22 +- source/blender/editors/space_nla/space_nla.c | 17 +- source/blender/editors/space_node/SConscript | 7 + source/blender/editors/space_node/drawnode.c | 2411 ++-- source/blender/editors/space_node/node_draw.c | 249 +- source/blender/editors/space_node/node_edit.c | 110 +- source/blender/editors/space_node/node_header.c | 2 +- source/blender/editors/space_node/node_intern.h | 5 +- source/blender/editors/space_node/node_ops.c | 10 +- source/blender/editors/space_node/node_select.c | 19 +- source/blender/editors/space_node/space_node.c | 12 +- source/blender/editors/space_outliner/outliner.c | 64 +- .../editors/space_outliner/outliner_header.c | 2 +- .../editors/space_outliner/outliner_intern.h | 2 +- .../blender/editors/space_outliner/outliner_ops.c | 4 +- .../editors/space_outliner/space_outliner.c | 11 +- .../blender/editors/space_script/script_intern.h | 2 +- source/blender/editors/space_script/script_ops.c | 4 +- source/blender/editors/space_script/space_script.c | 4 +- .../editors/space_sequencer/sequencer_buttons.c | 4 +- .../editors/space_sequencer/sequencer_intern.h | 5 +- .../editors/space_sequencer/sequencer_ops.c | 11 +- .../editors/space_sequencer/space_sequencer.c | 4 +- source/blender/editors/space_sound/space_sound.c | 6 +- source/blender/editors/space_text/space_text.c | 12 +- source/blender/editors/space_text/text_draw.c | 54 - source/blender/editors/space_text/text_ops.c | 71 +- source/blender/editors/space_time/space_time.c | 4 +- source/blender/editors/space_time/time_intern.h | 2 +- source/blender/editors/space_time/time_ops.c | 4 +- .../editors/space_userpref/space_userpref.c | 4 +- source/blender/editors/space_view3d/Makefile | 5 +- source/blender/editors/space_view3d/drawarmature.c | 121 +- source/blender/editors/space_view3d/drawmesh.c | 101 +- source/blender/editors/space_view3d/drawobject.c | 1288 +- source/blender/editors/space_view3d/drawvolume.c | 1 - source/blender/editors/space_view3d/space_view3d.c | 189 +- .../blender/editors/space_view3d/view3d_buttons.c | 139 +- source/blender/editors/space_view3d/view3d_draw.c | 9 +- source/blender/editors/space_view3d/view3d_edit.c | 305 +- .../blender/editors/space_view3d/view3d_header.c | 72 +- .../blender/editors/space_view3d/view3d_intern.h | 20 +- source/blender/editors/space_view3d/view3d_ops.c | 108 +- .../blender/editors/space_view3d/view3d_select.c | 13 +- .../blender/editors/space_view3d/view3d_toolbar.c | 4 +- source/blender/editors/space_view3d/view3d_view.c | 874 +- source/blender/editors/transform/transform.c | 1463 ++- source/blender/editors/transform/transform.h | 79 +- .../editors/transform/transform_constraints.c | 5 +- .../editors/transform/transform_conversions.c | 447 +- .../blender/editors/transform/transform_generics.c | 170 +- source/blender/editors/transform/transform_input.c | 48 +- .../editors/transform/transform_manipulator.c | 19 +- source/blender/editors/transform/transform_ops.c | 191 +- .../editors/transform/transform_orientations.c | 273 +- source/blender/editors/transform/transform_snap.c | 127 +- source/blender/editors/uvedit/uvedit_draw.c | 28 +- source/blender/editors/uvedit/uvedit_ops.c | 12 +- source/blender/editors/uvedit/uvedit_unwrap_ops.c | 4 +- source/blender/gpu/intern/gpu_material.c | 107 +- source/blender/gpu/intern/gpu_shader_material.glsl | 42 +- source/blender/ikplugin/BIK_api.h | 93 + source/blender/ikplugin/CMakeLists.txt | 35 + source/blender/ikplugin/Makefile | 31 + source/blender/ikplugin/SConscript | 9 + source/blender/ikplugin/intern/Makefile | 51 + source/blender/ikplugin/intern/ikplugin_api.c | 138 + source/blender/ikplugin/intern/ikplugin_api.h | 60 + source/blender/ikplugin/intern/iksolver_plugin.c | 530 + source/blender/ikplugin/intern/iksolver_plugin.h | 47 + source/blender/ikplugin/intern/itasc_plugin.cpp | 1777 +++ source/blender/ikplugin/intern/itasc_plugin.h | 52 + source/blender/imbuf/intern/divers.c | 14 +- source/blender/makesdna/DNA_action_types.h | 110 +- source/blender/makesdna/DNA_actuator_types.h | 21 +- source/blender/makesdna/DNA_anim_types.h | 15 +- source/blender/makesdna/DNA_armature_types.h | 6 +- source/blender/makesdna/DNA_brush_types.h | 4 +- source/blender/makesdna/DNA_cloth_types.h | 2 + source/blender/makesdna/DNA_constraint_types.h | 57 +- source/blender/makesdna/DNA_curve_types.h | 3 +- source/blender/makesdna/DNA_customdata_types.h | 6 +- source/blender/makesdna/DNA_lamp_types.h | 3 +- source/blender/makesdna/DNA_material_types.h | 58 +- source/blender/makesdna/DNA_mesh_types.h | 7 +- source/blender/makesdna/DNA_meta_types.h | 2 +- source/blender/makesdna/DNA_modifier_types.h | 25 +- source/blender/makesdna/DNA_node_types.h | 4 +- source/blender/makesdna/DNA_object_force.h | 125 +- source/blender/makesdna/DNA_object_types.h | 27 +- source/blender/makesdna/DNA_particle_types.h | 54 +- source/blender/makesdna/DNA_scene_types.h | 72 +- source/blender/makesdna/DNA_sensor_types.h | 17 + source/blender/makesdna/DNA_smoke_types.h | 3 +- source/blender/makesdna/DNA_space_types.h | 29 +- source/blender/makesdna/DNA_texture_types.h | 27 +- source/blender/makesdna/DNA_userdef_types.h | 16 +- source/blender/makesdna/DNA_view3d_types.h | 6 +- source/blender/makesdna/DNA_windowmanager_types.h | 73 +- source/blender/makesdna/intern/SConscript | 2 +- source/blender/makesrna/RNA_access.h | 30 +- source/blender/makesrna/RNA_define.h | 2 + source/blender/makesrna/RNA_enum_types.h | 13 +- source/blender/makesrna/RNA_types.h | 66 +- source/blender/makesrna/SConscript | 10 +- source/blender/makesrna/intern/CMakeLists.txt | 6 +- source/blender/makesrna/intern/Makefile | 1 + source/blender/makesrna/intern/SConscript | 14 +- source/blender/makesrna/intern/makesrna.c | 21 +- source/blender/makesrna/intern/rna_ID.c | 53 +- source/blender/makesrna/intern/rna_access.c | 46 +- source/blender/makesrna/intern/rna_action.c | 2 + source/blender/makesrna/intern/rna_actuator.c | 1 + source/blender/makesrna/intern/rna_animation.c | 95 +- source/blender/makesrna/intern/rna_animation_api.c | 1 + source/blender/makesrna/intern/rna_armature.c | 118 +- source/blender/makesrna/intern/rna_boid.c | 27 +- source/blender/makesrna/intern/rna_brush.c | 141 +- source/blender/makesrna/intern/rna_camera.c | 21 +- source/blender/makesrna/intern/rna_cloth.c | 5 + source/blender/makesrna/intern/rna_color.c | 6 +- source/blender/makesrna/intern/rna_constraint.c | 221 +- source/blender/makesrna/intern/rna_curve.c | 4 +- source/blender/makesrna/intern/rna_define.c | 29 +- source/blender/makesrna/intern/rna_fcurve.c | 67 +- source/blender/makesrna/intern/rna_fluidsim.c | 1 + source/blender/makesrna/intern/rna_image.c | 180 +- source/blender/makesrna/intern/rna_internal.h | 10 +- source/blender/makesrna/intern/rna_key.c | 12 +- source/blender/makesrna/intern/rna_lamp.c | 219 +- source/blender/makesrna/intern/rna_main.c | 4 +- source/blender/makesrna/intern/rna_main_api.c | 129 +- source/blender/makesrna/intern/rna_material.c | 697 +- source/blender/makesrna/intern/rna_mesh.c | 78 +- source/blender/makesrna/intern/rna_mesh_api.c | 201 +- source/blender/makesrna/intern/rna_modifier.c | 57 +- source/blender/makesrna/intern/rna_nodetree.c | 490 +- .../blender/makesrna/intern/rna_nodetree_types.h | 4 +- source/blender/makesrna/intern/rna_object.c | 302 +- source/blender/makesrna/intern/rna_object_api.c | 383 +- source/blender/makesrna/intern/rna_object_force.c | 528 +- source/blender/makesrna/intern/rna_particle.c | 442 +- source/blender/makesrna/intern/rna_pose.c | 478 +- source/blender/makesrna/intern/rna_rna.c | 9 +- source/blender/makesrna/intern/rna_scene.c | 429 +- source/blender/makesrna/intern/rna_scene_api.c | 78 +- source/blender/makesrna/intern/rna_screen.c | 31 +- source/blender/makesrna/intern/rna_sculpt_paint.c | 30 +- source/blender/makesrna/intern/rna_sensor.c | 38 + source/blender/makesrna/intern/rna_sequence.c | 45 +- source/blender/makesrna/intern/rna_smoke.c | 11 +- source/blender/makesrna/intern/rna_space.c | 232 +- source/blender/makesrna/intern/rna_text.c | 6 +- source/blender/makesrna/intern/rna_texture.c | 454 +- source/blender/makesrna/intern/rna_ui.c | 27 +- source/blender/makesrna/intern/rna_ui_api.c | 69 +- source/blender/makesrna/intern/rna_userdef.c | 415 +- source/blender/makesrna/intern/rna_wm.c | 391 +- source/blender/makesrna/intern/rna_wm_api.c | 74 +- source/blender/makesrna/intern/rna_world.c | 134 +- source/blender/nodes/CMakeLists.txt | 16 +- source/blender/nodes/SConscript | 14 +- source/blender/nodes/intern/CMP_nodes/CMP_blur.c | 2 +- .../nodes/intern/CMP_nodes/CMP_channelMatte.c | 2 +- .../nodes/intern/CMP_nodes/CMP_chromaMatte.c | 2 +- .../nodes/intern/CMP_nodes/CMP_colorMatte.c | 2 +- .../blender/nodes/intern/CMP_nodes/CMP_composite.c | 11 +- .../blender/nodes/intern/CMP_nodes/CMP_diffMatte.c | 2 +- .../nodes/intern/CMP_nodes/CMP_distanceMatte.c | 2 +- source/blender/nodes/intern/CMP_nodes/CMP_filter.c | 2 +- source/blender/nodes/intern/CMP_nodes/CMP_image.c | 14 +- source/blender/nodes/intern/CMP_nodes/CMP_levels.c | 6 +- .../nodes/intern/CMP_nodes/CMP_lummaMatte.c | 2 +- source/blender/nodes/intern/CMP_nodes/CMP_mixrgb.c | 2 +- .../nodes/intern/CMP_nodes/CMP_outputFile.c | 2 +- .../nodes/intern/CMP_nodes/CMP_splitViewer.c | 2 +- .../blender/nodes/intern/CMP_nodes/CMP_texture.c | 2 +- source/blender/nodes/intern/CMP_nodes/CMP_viewer.c | 4 +- source/blender/nodes/intern/CMP_util.c | 40 +- source/blender/nodes/intern/CMP_util.h | 2 +- source/blender/nodes/intern/TEX_nodes/TEX_at.c | 2 +- source/blender/nodes/intern/TEX_nodes/TEX_bricks.c | 4 +- .../blender/nodes/intern/TEX_nodes/TEX_checker.c | 4 +- .../blender/nodes/intern/TEX_nodes/TEX_compose.c | 2 +- source/blender/nodes/intern/TEX_nodes/TEX_coord.c | 4 +- source/blender/nodes/intern/TEX_nodes/TEX_curves.c | 4 +- .../blender/nodes/intern/TEX_nodes/TEX_decompose.c | 8 +- .../blender/nodes/intern/TEX_nodes/TEX_distance.c | 4 +- .../blender/nodes/intern/TEX_nodes/TEX_hueSatVal.c | 2 +- source/blender/nodes/intern/TEX_nodes/TEX_image.c | 4 +- source/blender/nodes/intern/TEX_nodes/TEX_invert.c | 4 +- source/blender/nodes/intern/TEX_nodes/TEX_math.c | 4 +- source/blender/nodes/intern/TEX_nodes/TEX_mixRgb.c | 4 +- source/blender/nodes/intern/TEX_nodes/TEX_output.c | 21 +- source/blender/nodes/intern/TEX_nodes/TEX_proc.c | 5 +- source/blender/nodes/intern/TEX_nodes/TEX_rotate.c | 2 +- source/blender/nodes/intern/TEX_nodes/TEX_scale.c | 2 +- .../blender/nodes/intern/TEX_nodes/TEX_texture.c | 4 +- .../blender/nodes/intern/TEX_nodes/TEX_translate.c | 2 +- .../blender/nodes/intern/TEX_nodes/TEX_valToNor.c | 4 +- .../blender/nodes/intern/TEX_nodes/TEX_valToRgb.c | 6 +- source/blender/nodes/intern/TEX_nodes/TEX_viewer.c | 11 +- source/blender/nodes/intern/TEX_util.c | 93 +- source/blender/nodes/intern/TEX_util.h | 7 +- source/blender/python/CMakeLists.txt | 2 - source/blender/python/generic/Mathutils.c | 2 +- source/blender/python/generic/matrix.c | 2 +- source/blender/python/intern/bpy_interface.c | 136 +- source/blender/python/intern/bpy_operator.c | 12 +- source/blender/python/intern/bpy_operator_wrap.c | 63 +- source/blender/python/intern/bpy_rna.c | 159 +- source/blender/render/CMakeLists.txt | 10 +- source/blender/render/SConscript | 22 +- source/blender/render/extern/include/RE_pipeline.h | 7 +- source/blender/render/extern/include/RE_raytrace.h | 227 +- .../blender/render/extern/include/RE_render_ext.h | 2 +- .../blender/render/extern/include/RE_shader_ext.h | 7 +- source/blender/render/intern/Makefile | 2 +- .../blender/render/intern/include/render_types.h | 39 +- source/blender/render/intern/include/rendercore.h | 1 + .../render/intern/include/volume_precache.h | 4 +- source/blender/render/intern/include/volumetric.h | 3 +- source/blender/render/intern/include/zbuf.h | 7 +- source/blender/render/intern/raytrace/Makefile | 65 + source/blender/render/intern/raytrace/bvh.h | 400 + .../blender/render/intern/raytrace/rayobject.cpp | 580 + .../render/intern/raytrace/rayobject_hint.h | 70 + .../render/intern/raytrace/rayobject_qbvh.cpp | 149 + .../render/intern/raytrace/rayobject_rtbuild.cpp | 496 + .../render/intern/raytrace/rayobject_rtbuild.h | 121 + .../render/intern/raytrace/rayobject_svbvh.cpp | 181 + .../render/intern/raytrace/rayobject_vbvh.cpp | 191 + source/blender/render/intern/raytrace/reorganize.h | 516 + source/blender/render/intern/raytrace/svbvh.h | 249 + source/blender/render/intern/raytrace/vbvh.h | 237 + .../blender/render/intern/source/convertblender.c | 76 +- source/blender/render/intern/source/pipeline.c | 188 +- source/blender/render/intern/source/pointdensity.c | 5 +- source/blender/render/intern/source/rayshade.c | 710 +- source/blender/render/intern/source/raytrace.c | 1442 --- source/blender/render/intern/source/rendercore.c | 43 +- .../blender/render/intern/source/renderdatabase.c | 32 +- source/blender/render/intern/source/shadbuf.c | 488 +- source/blender/render/intern/source/shadeinput.c | 31 +- source/blender/render/intern/source/sss.c | 4 + source/blender/render/intern/source/strand.c | 71 +- source/blender/render/intern/source/texture.c | 134 +- .../blender/render/intern/source/volume_precache.c | 164 +- source/blender/render/intern/source/volumetric.c | 389 +- source/blender/render/intern/source/voxeldata.c | 6 - source/blender/render/intern/source/zbuf.c | 172 +- source/blender/windowmanager/CMakeLists.txt | 4 + source/blender/windowmanager/SConscript | 7 + source/blender/windowmanager/WM_api.h | 65 +- source/blender/windowmanager/WM_types.h | 16 +- source/blender/windowmanager/intern/wm.c | 86 +- source/blender/windowmanager/intern/wm_cursors.c | 17 +- .../blender/windowmanager/intern/wm_event_system.c | 242 +- source/blender/windowmanager/intern/wm_files.c | 7 +- source/blender/windowmanager/intern/wm_init_exit.c | 19 +- source/blender/windowmanager/intern/wm_jobs.c | 33 +- source/blender/windowmanager/intern/wm_keymap.c | 327 +- source/blender/windowmanager/intern/wm_operators.c | 254 +- source/blender/windowmanager/intern/wm_subwindow.c | 4 +- source/blender/windowmanager/intern/wm_window.c | 114 +- source/blender/windowmanager/wm.h | 2 +- source/blender/windowmanager/wm_event_system.h | 3 +- source/blender/windowmanager/wm_event_types.h | 22 +- source/blender/windowmanager/wm_subwindow.h | 1 + source/blender/windowmanager/wm_window.h | 1 - source/blenderplayer/bad_level_call_stubs/stubs.c | 18 + source/creator/CMakeLists.txt | 79 +- source/creator/SConscript | 5 + source/creator/creator.c | 21 +- source/darwin/Makefile | 2 - .../BlenderRoutines/BL_KetsjiEmbedStart.cpp | 48 +- source/gameengine/BlenderRoutines/CMakeLists.txt | 8 +- source/gameengine/BlenderRoutines/KX_BlenderGL.cpp | 1 + source/gameengine/BlenderRoutines/Makefile | 1 + source/gameengine/BlenderRoutines/SConscript | 7 +- source/gameengine/CMakeLists.txt | 5 +- source/gameengine/Converter/BL_ActionActuator.cpp | 10 +- source/gameengine/Converter/BL_ActionActuator.h | 5 +- source/gameengine/Converter/BL_ArmatureObject.cpp | 496 +- source/gameengine/Converter/BL_ArmatureObject.h | 57 +- .../Converter/BL_BlenderDataConversion.cpp | 17 +- .../Converter/BL_BlenderDataConversion.h | 1 - .../Converter/BL_ShapeActionActuator.cpp | 4 + .../gameengine/Converter/BL_ShapeActionActuator.h | 6 +- source/gameengine/Converter/CMakeLists.txt | 8 +- .../Converter/KX_BlenderSceneConverter.cpp | 9 +- .../Converter/KX_BlenderSceneConverter.h | 4 +- .../gameengine/Converter/KX_ConvertActuators.cpp | 10 + .../gameengine/Converter/KX_ConvertControllers.cpp | 16 +- .../gameengine/Converter/KX_ConvertControllers.h | 3 +- .../gameengine/Converter/KX_ConvertProperties.cpp | 3 +- source/gameengine/Converter/KX_ConvertSensors.cpp | 27 +- source/gameengine/Converter/Makefile | 1 + source/gameengine/Converter/SConscript | 13 +- source/gameengine/Expressions/BoolValue.cpp | 4 +- source/gameengine/Expressions/BoolValue.h | 2 + source/gameengine/Expressions/CMakeLists.txt | 8 +- source/gameengine/Expressions/FloatValue.cpp | 4 +- source/gameengine/Expressions/FloatValue.h | 2 + source/gameengine/Expressions/IntValue.cpp | 3 +- source/gameengine/Expressions/IntValue.h | 3 + source/gameengine/Expressions/KX_Python.h | 4 +- source/gameengine/Expressions/ListValue.cpp | 526 +- source/gameengine/Expressions/ListValue.h | 3 +- source/gameengine/Expressions/PyObjectPlus.cpp | 475 +- source/gameengine/Expressions/PyObjectPlus.h | 238 +- source/gameengine/Expressions/SConscript | 12 +- source/gameengine/Expressions/StringValue.h | 2 + source/gameengine/Expressions/Value.cpp | 14 +- source/gameengine/Expressions/Value.h | 22 +- source/gameengine/GameLogic/CMakeLists.txt | 7 +- .../gameengine/GameLogic/SCA_2DFilterActuator.cpp | 8 +- source/gameengine/GameLogic/SCA_ANDController.cpp | 4 +- .../GameLogic/SCA_ActuatorEventManager.cpp | 3 +- .../GameLogic/SCA_ActuatorEventManager.h | 2 - source/gameengine/GameLogic/SCA_ActuatorSensor.cpp | 3 + source/gameengine/GameLogic/SCA_ActuatorSensor.h | 6 +- .../GameLogic/SCA_AlwaysEventManager.cpp | 3 +- .../gameengine/GameLogic/SCA_AlwaysEventManager.h | 2 - source/gameengine/GameLogic/SCA_AlwaysSensor.cpp | 4 + source/gameengine/GameLogic/SCA_DelaySensor.cpp | 4 + source/gameengine/GameLogic/SCA_EventManager.cpp | 5 +- source/gameengine/GameLogic/SCA_EventManager.h | 8 +- .../GameLogic/SCA_ExpressionController.cpp | 2 + source/gameengine/GameLogic/SCA_IActuator.cpp | 4 +- source/gameengine/GameLogic/SCA_IActuator.h | 32 +- source/gameengine/GameLogic/SCA_IController.cpp | 5 + source/gameengine/GameLogic/SCA_IController.h | 4 +- source/gameengine/GameLogic/SCA_ILogicBrick.cpp | 3 + source/gameengine/GameLogic/SCA_ILogicBrick.h | 4 +- source/gameengine/GameLogic/SCA_IObject.cpp | 33 +- source/gameengine/GameLogic/SCA_IObject.h | 17 +- source/gameengine/GameLogic/SCA_ISensor.cpp | 6 + source/gameengine/GameLogic/SCA_ISensor.h | 2 + .../gameengine/GameLogic/SCA_JoystickManager.cpp | 3 +- source/gameengine/GameLogic/SCA_JoystickManager.h | 2 - source/gameengine/GameLogic/SCA_JoystickSensor.cpp | 4 + source/gameengine/GameLogic/SCA_JoystickSensor.h | 4 + .../gameengine/GameLogic/SCA_KeyboardManager.cpp | 7 +- source/gameengine/GameLogic/SCA_KeyboardManager.h | 2 - source/gameengine/GameLogic/SCA_KeyboardSensor.cpp | 2 + source/gameengine/GameLogic/SCA_KeyboardSensor.h | 2 + source/gameengine/GameLogic/SCA_LogicManager.cpp | 4 + source/gameengine/GameLogic/SCA_MouseManager.cpp | 7 +- source/gameengine/GameLogic/SCA_MouseManager.h | 1 - source/gameengine/GameLogic/SCA_MouseSensor.cpp | 21 +- source/gameengine/GameLogic/SCA_MouseSensor.h | 12 +- source/gameengine/GameLogic/SCA_NANDController.cpp | 4 +- source/gameengine/GameLogic/SCA_NORController.cpp | 4 +- source/gameengine/GameLogic/SCA_ORController.cpp | 4 + .../gameengine/GameLogic/SCA_PropertyActuator.cpp | 5 +- .../GameLogic/SCA_PropertyEventManager.cpp | 3 +- .../GameLogic/SCA_PropertyEventManager.h | 2 - source/gameengine/GameLogic/SCA_PropertySensor.cpp | 29 +- source/gameengine/GameLogic/SCA_PropertySensor.h | 4 + .../gameengine/GameLogic/SCA_PythonController.cpp | 36 +- source/gameengine/GameLogic/SCA_PythonController.h | 14 +- source/gameengine/GameLogic/SCA_RandomActuator.cpp | 8 +- source/gameengine/GameLogic/SCA_RandomActuator.h | 5 + .../GameLogic/SCA_RandomEventManager.cpp | 3 +- .../gameengine/GameLogic/SCA_RandomEventManager.h | 2 - source/gameengine/GameLogic/SCA_RandomSensor.cpp | 4 + source/gameengine/GameLogic/SCA_RandomSensor.h | 4 +- .../gameengine/GameLogic/SCA_TimeEventManager.cpp | 2 +- source/gameengine/GameLogic/SCA_XNORController.cpp | 4 +- source/gameengine/GameLogic/SCA_XORController.cpp | 3 +- source/gameengine/GameLogic/SConscript | 8 +- source/gameengine/GamePlayer/common/SConscript | 10 +- .../GamePlayer/ghost/GPG_Application.cpp | 3 +- source/gameengine/GamePlayer/ghost/SConscript | 8 +- source/gameengine/Ketsji/BL_Shader.cpp | 4 + source/gameengine/Ketsji/BL_Shader.h | 2 + source/gameengine/Ketsji/CMakeLists.txt | 7 +- source/gameengine/Ketsji/KXNetwork/CMakeLists.txt | 7 +- .../Ketsji/KXNetwork/KX_NetworkEventManager.cpp | 2 +- .../Ketsji/KXNetwork/KX_NetworkEventManager.h | 1 - .../Ketsji/KXNetwork/KX_NetworkMessageActuator.cpp | 6 +- .../Ketsji/KXNetwork/KX_NetworkMessageSensor.cpp | 4 + .../Ketsji/KXNetwork/KX_NetworkMessageSensor.h | 4 + source/gameengine/Ketsji/KXNetwork/SConscript | 9 +- source/gameengine/Ketsji/KX_BlenderMaterial.cpp | 25 +- source/gameengine/Ketsji/KX_BlenderMaterial.h | 2 + source/gameengine/Ketsji/KX_Camera.cpp | 3 +- source/gameengine/Ketsji/KX_Camera.h | 4 + source/gameengine/Ketsji/KX_CameraActuator.cpp | 6 +- source/gameengine/Ketsji/KX_CameraActuator.h | 4 + source/gameengine/Ketsji/KX_ConstraintActuator.cpp | 8 +- source/gameengine/Ketsji/KX_ConstraintWrapper.cpp | 6 +- source/gameengine/Ketsji/KX_ConstraintWrapper.h | 2 + source/gameengine/Ketsji/KX_Dome.cpp | 71 +- source/gameengine/Ketsji/KX_GameActuator.cpp | 14 +- source/gameengine/Ketsji/KX_GameObject.cpp | 96 +- source/gameengine/Ketsji/KX_GameObject.h | 41 +- source/gameengine/Ketsji/KX_ISceneConverter.h | 1 - source/gameengine/Ketsji/KX_IpoActuator.cpp | 7 +- source/gameengine/Ketsji/KX_KetsjiEngine.cpp | 18 +- source/gameengine/Ketsji/KX_KetsjiEngine.h | 11 +- source/gameengine/Ketsji/KX_Light.cpp | 2 + source/gameengine/Ketsji/KX_Light.h | 6 +- source/gameengine/Ketsji/KX_MeshProxy.cpp | 4 + source/gameengine/Ketsji/KX_MeshProxy.h | 4 + source/gameengine/Ketsji/KX_MouseFocusSensor.cpp | 4 +- source/gameengine/Ketsji/KX_MouseFocusSensor.h | 4 + source/gameengine/Ketsji/KX_NearSensor.cpp | 5 +- source/gameengine/Ketsji/KX_NearSensor.h | 4 + source/gameengine/Ketsji/KX_ObjectActuator.cpp | 6 +- source/gameengine/Ketsji/KX_ObjectActuator.h | 5 +- source/gameengine/Ketsji/KX_ParentActuator.cpp | 6 +- source/gameengine/Ketsji/KX_ParentActuator.h | 4 + .../gameengine/Ketsji/KX_PhysicsObjectWrapper.cpp | 5 +- source/gameengine/Ketsji/KX_PhysicsObjectWrapper.h | 4 + source/gameengine/Ketsji/KX_PolyProxy.cpp | 4 + source/gameengine/Ketsji/KX_PolyProxy.h | 4 + source/gameengine/Ketsji/KX_PolygonMaterial.cpp | 12 + source/gameengine/Ketsji/KX_PolygonMaterial.h | 5 + .../gameengine/Ketsji/KX_PyConstraintBinding.cpp | 4 + source/gameengine/Ketsji/KX_PyConstraintBinding.h | 4 +- source/gameengine/Ketsji/KX_PyMath.cpp | 4 + source/gameengine/Ketsji/KX_PyMath.h | 5 +- source/gameengine/Ketsji/KX_PythonInit.cpp | 104 +- source/gameengine/Ketsji/KX_PythonInit.h | 3 +- source/gameengine/Ketsji/KX_PythonInitTypes.cpp | 67 +- source/gameengine/Ketsji/KX_PythonInitTypes.h | 2 + source/gameengine/Ketsji/KX_PythonSeq.cpp | 39 + source/gameengine/Ketsji/KX_PythonSeq.h | 8 +- source/gameengine/Ketsji/KX_RadarSensor.cpp | 3 +- source/gameengine/Ketsji/KX_RayEventManager.h | 5 +- source/gameengine/Ketsji/KX_RaySensor.cpp | 6 +- source/gameengine/Ketsji/KX_RaySensor.h | 3 + .../gameengine/Ketsji/KX_SCA_AddObjectActuator.cpp | 18 +- .../gameengine/Ketsji/KX_SCA_AddObjectActuator.h | 4 + .../gameengine/Ketsji/KX_SCA_DynamicActuator.cpp | 6 +- .../gameengine/Ketsji/KX_SCA_EndObjectActuator.cpp | 6 +- .../Ketsji/KX_SCA_ReplaceMeshActuator.cpp | 6 +- .../gameengine/Ketsji/KX_SCA_ReplaceMeshActuator.h | 4 + source/gameengine/Ketsji/KX_Scene.cpp | 46 +- source/gameengine/Ketsji/KX_Scene.h | 6 + source/gameengine/Ketsji/KX_SceneActuator.cpp | 6 +- source/gameengine/Ketsji/KX_SceneActuator.h | 4 + source/gameengine/Ketsji/KX_SoundActuator.cpp | 6 +- source/gameengine/Ketsji/KX_SoundActuator.h | 5 + source/gameengine/Ketsji/KX_StateActuator.cpp | 5 +- source/gameengine/Ketsji/KX_TouchEventManager.cpp | 3 +- source/gameengine/Ketsji/KX_TouchEventManager.h | 1 - source/gameengine/Ketsji/KX_TouchSensor.cpp | 2 + source/gameengine/Ketsji/KX_TouchSensor.h | 5 +- source/gameengine/Ketsji/KX_TrackToActuator.cpp | 8 +- source/gameengine/Ketsji/KX_TrackToActuator.h | 4 + source/gameengine/Ketsji/KX_VehicleWrapper.cpp | 4 +- source/gameengine/Ketsji/KX_VehicleWrapper.h | 3 +- source/gameengine/Ketsji/KX_VertexProxy.cpp | 4 + source/gameengine/Ketsji/KX_VertexProxy.h | 4 + source/gameengine/Ketsji/KX_VisibilityActuator.cpp | 6 +- source/gameengine/Ketsji/SConscript | 8 +- .../Physics/Bullet/CcdPhysicsController.cpp | 6 +- source/gameengine/Physics/Bullet/SConscript | 10 +- source/gameengine/PyDoc/GameTypes.py | 334 +- source/gameengine/PyDoc/SConscript | 7 +- .../gameengine/Rasterizer/RAS_2DFilterManager.cpp | 2 + source/gameengine/Rasterizer/SConscript | 10 +- source/gameengine/SConscript | 6 +- source/gameengine/SceneGraph/SG_DList.h | 79 + source/gameengine/VideoTexture/CMakeLists.txt | 7 +- source/gameengine/VideoTexture/SConscript | 8 +- source/gameengine/VideoTexture/VideoFFmpeg.h | 2 + source/nan_definitions.mk | 2 + source/nan_warn.mk | 28 +- tools/Blender.py | 146 +- tools/btools.py | 19 +- 1376 files changed, 132036 insertions(+), 148017 deletions(-) create mode 100644 extern/Eigen2/Eigen/Array create mode 100644 extern/Eigen2/Eigen/Cholesky create mode 100644 extern/Eigen2/Eigen/Core create mode 100644 extern/Eigen2/Eigen/Dense create mode 100644 extern/Eigen2/Eigen/Eigen create mode 100644 extern/Eigen2/Eigen/Geometry create mode 100644 extern/Eigen2/Eigen/LU create mode 100644 extern/Eigen2/Eigen/LeastSquares create mode 100644 extern/Eigen2/Eigen/NewStdVector create mode 100644 extern/Eigen2/Eigen/QR create mode 100644 extern/Eigen2/Eigen/QtAlignedMalloc create mode 100644 extern/Eigen2/Eigen/SVD create mode 100644 extern/Eigen2/Eigen/Sparse create mode 100644 extern/Eigen2/Eigen/StdVector create mode 100644 extern/Eigen2/Eigen/src/Array/BooleanRedux.h create mode 100644 extern/Eigen2/Eigen/src/Array/CwiseOperators.h create mode 100644 extern/Eigen2/Eigen/src/Array/Functors.h create mode 100644 extern/Eigen2/Eigen/src/Array/Norms.h create mode 100644 extern/Eigen2/Eigen/src/Array/PartialRedux.h create mode 100644 extern/Eigen2/Eigen/src/Array/Random.h create mode 100644 extern/Eigen2/Eigen/src/Array/Select.h create mode 100644 extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp create mode 100644 extern/Eigen2/Eigen/src/Cholesky/LDLT.h create mode 100644 extern/Eigen2/Eigen/src/Cholesky/LLT.h create mode 100644 extern/Eigen2/Eigen/src/Core/Assign.h create mode 100644 extern/Eigen2/Eigen/src/Core/Block.h create mode 100644 extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h create mode 100644 extern/Eigen2/Eigen/src/Core/Coeffs.h create mode 100644 extern/Eigen2/Eigen/src/Core/CommaInitializer.h create mode 100644 extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp create mode 100644 extern/Eigen2/Eigen/src/Core/Cwise.h create mode 100644 extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h create mode 100644 extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h create mode 100644 extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h create mode 100644 extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h create mode 100644 extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h create mode 100644 extern/Eigen2/Eigen/src/Core/DiagonalProduct.h create mode 100644 extern/Eigen2/Eigen/src/Core/Dot.h create mode 100644 extern/Eigen2/Eigen/src/Core/Flagged.h create mode 100644 extern/Eigen2/Eigen/src/Core/Functors.h create mode 100644 extern/Eigen2/Eigen/src/Core/Fuzzy.h create mode 100644 extern/Eigen2/Eigen/src/Core/GenericPacketMath.h create mode 100644 extern/Eigen2/Eigen/src/Core/IO.h create mode 100644 extern/Eigen2/Eigen/src/Core/Map.h create mode 100644 extern/Eigen2/Eigen/src/Core/MapBase.h create mode 100644 extern/Eigen2/Eigen/src/Core/MathFunctions.h create mode 100644 extern/Eigen2/Eigen/src/Core/Matrix.h create mode 100644 extern/Eigen2/Eigen/src/Core/MatrixBase.h create mode 100644 extern/Eigen2/Eigen/src/Core/MatrixStorage.h create mode 100644 extern/Eigen2/Eigen/src/Core/Minor.h create mode 100644 extern/Eigen2/Eigen/src/Core/NestByValue.h create mode 100644 extern/Eigen2/Eigen/src/Core/NumTraits.h create mode 100644 extern/Eigen2/Eigen/src/Core/Part.h create mode 100644 extern/Eigen2/Eigen/src/Core/Product.h create mode 100644 extern/Eigen2/Eigen/src/Core/Redux.h create mode 100644 extern/Eigen2/Eigen/src/Core/SolveTriangular.h create mode 100644 extern/Eigen2/Eigen/src/Core/Sum.h create mode 100644 extern/Eigen2/Eigen/src/Core/Swap.h create mode 100644 extern/Eigen2/Eigen/src/Core/Transpose.h create mode 100644 extern/Eigen2/Eigen/src/Core/Visitor.h create mode 100644 extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h create mode 100644 extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/Constants.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/Macros.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/Memory.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/Meta.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/StaticAssert.h create mode 100644 extern/Eigen2/Eigen/src/Core/util/XprHelper.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/AlignedBox.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/AngleAxis.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/EulerAngles.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/Hyperplane.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/Quaternion.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/Rotation2D.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/RotationBase.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/Scaling.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/Transform.h create mode 100644 extern/Eigen2/Eigen/src/Geometry/Translation.h create mode 100644 extern/Eigen2/Eigen/src/LU/Determinant.h create mode 100644 extern/Eigen2/Eigen/src/LU/Inverse.h create mode 100644 extern/Eigen2/Eigen/src/LU/LU.h create mode 100644 extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h create mode 100644 extern/Eigen2/Eigen/src/QR/EigenSolver.h create mode 100644 extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h create mode 100644 extern/Eigen2/Eigen/src/QR/QR.h create mode 100644 extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp create mode 100644 extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h create mode 100644 extern/Eigen2/Eigen/src/QR/Tridiagonalization.h create mode 100644 extern/Eigen2/Eigen/src/SVD/SVD.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/AmbiVector.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/CholmodSupport.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/CoreIterators.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/RandomSetter.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseAssign.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseBlock.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseCwise.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseDiagonalProduct.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseDot.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseLLT.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseLU.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseProduct.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseRedux.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseTranspose.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseUtil.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SparseVector.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/TriangularSolver.h create mode 100644 extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h create mode 100755 extern/Eigen2/eigen-update.sh create mode 100644 intern/itasc/Armature.cpp create mode 100644 intern/itasc/Armature.hpp create mode 100644 intern/itasc/CMakeLists.txt create mode 100644 intern/itasc/Cache.cpp create mode 100644 intern/itasc/Cache.hpp create mode 100644 intern/itasc/ConstraintSet.cpp create mode 100644 intern/itasc/ConstraintSet.hpp create mode 100644 intern/itasc/ControlledObject.cpp create mode 100644 intern/itasc/ControlledObject.hpp create mode 100644 intern/itasc/CopyPose.cpp create mode 100644 intern/itasc/CopyPose.hpp create mode 100644 intern/itasc/Distance.cpp create mode 100644 intern/itasc/Distance.hpp create mode 100644 intern/itasc/FixedObject.cpp create mode 100644 intern/itasc/FixedObject.hpp create mode 100644 intern/itasc/Makefile create mode 100644 intern/itasc/MovingFrame.cpp create mode 100644 intern/itasc/MovingFrame.hpp create mode 100644 intern/itasc/Object.hpp create mode 100644 intern/itasc/SConscript create mode 100644 intern/itasc/Scene.cpp create mode 100644 intern/itasc/Scene.hpp create mode 100644 intern/itasc/Solver.hpp create mode 100644 intern/itasc/UncontrolledObject.cpp create mode 100644 intern/itasc/UncontrolledObject.hpp create mode 100644 intern/itasc/WDLSSolver.cpp create mode 100644 intern/itasc/WDLSSolver.hpp create mode 100644 intern/itasc/WSDLSSolver.cpp create mode 100644 intern/itasc/WSDLSSolver.hpp create mode 100644 intern/itasc/WorldObject.cpp create mode 100644 intern/itasc/WorldObject.hpp create mode 100644 intern/itasc/eigen_types.cpp create mode 100644 intern/itasc/eigen_types.hpp create mode 100644 intern/itasc/kdl/Makefile create mode 100644 intern/itasc/kdl/chain.cpp create mode 100644 intern/itasc/kdl/chain.hpp create mode 100644 intern/itasc/kdl/chainfksolver.hpp create mode 100644 intern/itasc/kdl/chainfksolverpos_recursive.cpp create mode 100644 intern/itasc/kdl/chainfksolverpos_recursive.hpp create mode 100644 intern/itasc/kdl/chainjnttojacsolver.cpp create mode 100644 intern/itasc/kdl/chainjnttojacsolver.hpp create mode 100644 intern/itasc/kdl/frameacc.cpp create mode 100644 intern/itasc/kdl/frameacc.hpp create mode 100644 intern/itasc/kdl/frameacc.inl create mode 100644 intern/itasc/kdl/frames.cpp create mode 100644 intern/itasc/kdl/frames.hpp create mode 100644 intern/itasc/kdl/frames.inl create mode 100644 intern/itasc/kdl/frames_io.cpp create mode 100644 intern/itasc/kdl/frames_io.hpp create mode 100644 intern/itasc/kdl/framevel.cpp create mode 100644 intern/itasc/kdl/framevel.hpp create mode 100644 intern/itasc/kdl/framevel.inl create mode 100644 intern/itasc/kdl/inertia.cpp create mode 100644 intern/itasc/kdl/inertia.hpp create mode 100644 intern/itasc/kdl/jacobian.cpp create mode 100644 intern/itasc/kdl/jacobian.hpp create mode 100644 intern/itasc/kdl/jntarray.cpp create mode 100644 intern/itasc/kdl/jntarray.hpp create mode 100644 intern/itasc/kdl/jntarrayacc.cpp create mode 100644 intern/itasc/kdl/jntarrayacc.hpp create mode 100644 intern/itasc/kdl/jntarrayvel.cpp create mode 100644 intern/itasc/kdl/jntarrayvel.hpp create mode 100644 intern/itasc/kdl/joint.cpp create mode 100644 intern/itasc/kdl/joint.hpp create mode 100644 intern/itasc/kdl/kinfam_io.cpp create mode 100644 intern/itasc/kdl/kinfam_io.hpp create mode 100644 intern/itasc/kdl/segment.cpp create mode 100644 intern/itasc/kdl/segment.hpp create mode 100644 intern/itasc/kdl/tree.cpp create mode 100644 intern/itasc/kdl/tree.hpp create mode 100644 intern/itasc/kdl/treefksolver.hpp create mode 100644 intern/itasc/kdl/treefksolverpos_recursive.cpp create mode 100644 intern/itasc/kdl/treefksolverpos_recursive.hpp create mode 100644 intern/itasc/kdl/treejnttojacsolver.cpp create mode 100644 intern/itasc/kdl/treejnttojacsolver.hpp create mode 100644 intern/itasc/kdl/utilities/Makefile create mode 100644 intern/itasc/kdl/utilities/error.h create mode 100644 intern/itasc/kdl/utilities/error_stack.cpp create mode 100644 intern/itasc/kdl/utilities/error_stack.h create mode 100644 intern/itasc/kdl/utilities/kdl-config.h create mode 100644 intern/itasc/kdl/utilities/rall1d.h create mode 100644 intern/itasc/kdl/utilities/rall2d.h create mode 100644 intern/itasc/kdl/utilities/svd_eigen_HH.hpp create mode 100644 intern/itasc/kdl/utilities/traits.h create mode 100644 intern/itasc/kdl/utilities/utility.cpp create mode 100644 intern/itasc/kdl/utilities/utility.h create mode 100644 intern/itasc/kdl/utilities/utility_io.cpp create mode 100644 intern/itasc/kdl/utilities/utility_io.h create mode 100644 intern/itasc/make/msvc_9_0/itasc.vcproj create mode 100644 intern/itasc/ublas_types.hpp create mode 100644 projectfiles_vc9/blender/ikplugin/BIK_ikplugin.vcproj create mode 100644 projectfiles_vc9/blender/render/BRE_raytrace.vcproj delete mode 100644 release/io/engine_render_pov.py delete mode 100644 release/io/export_ply.py delete mode 100644 release/io/netrender/__init__.py delete mode 100644 release/io/netrender/client.py delete mode 100644 release/io/netrender/master.py delete mode 100644 release/io/netrender/model.py delete mode 100644 release/io/netrender/operators.py delete mode 100644 release/io/netrender/slave.py delete mode 100644 release/io/netrender/ui.py delete mode 100644 release/io/netrender/utils.py delete mode 100644 release/scripts/3ds_export.py delete mode 100644 release/scripts/3ds_import.py delete mode 100644 release/scripts/Axiscopy.py delete mode 100644 release/scripts/DirectX8Exporter.py delete mode 100644 release/scripts/DirectX8Importer.py delete mode 100644 release/scripts/IDPropBrowser.py delete mode 100644 release/scripts/ac3d_export.py delete mode 100644 release/scripts/ac3d_import.py delete mode 100644 release/scripts/add_mesh_empty.py delete mode 100644 release/scripts/add_mesh_torus.py delete mode 100644 release/scripts/animation_bake_constraints.py delete mode 100644 release/scripts/animation_clean.py delete mode 100644 release/scripts/animation_trajectory.py delete mode 100644 release/scripts/armature_symmetry.py delete mode 100644 release/scripts/bevel_center.py delete mode 100644 release/scripts/blenderLipSynchro.py delete mode 100644 release/scripts/bpydata/KUlang.txt delete mode 100644 release/scripts/bpydata/config/readme.txt delete mode 100644 release/scripts/bpydata/readme.txt delete mode 100644 release/scripts/bpymodules/BPyAddMesh.py delete mode 100644 release/scripts/bpymodules/BPyArmature.py delete mode 100644 release/scripts/bpymodules/BPyBlender.py delete mode 100644 release/scripts/bpymodules/BPyCurve.py delete mode 100644 release/scripts/bpymodules/BPyImage.py delete mode 100644 release/scripts/bpymodules/BPyMathutils.py delete mode 100644 release/scripts/bpymodules/BPyMesh.py delete mode 100644 release/scripts/bpymodules/BPyMesh_redux.py delete mode 100644 release/scripts/bpymodules/BPyMessages.py delete mode 100644 release/scripts/bpymodules/BPyNMesh.py delete mode 100644 release/scripts/bpymodules/BPyObject.py delete mode 100644 release/scripts/bpymodules/BPyRegistry.py delete mode 100644 release/scripts/bpymodules/BPyRender.py delete mode 100644 release/scripts/bpymodules/BPySys.py delete mode 100644 release/scripts/bpymodules/BPyTextPlugin.py delete mode 100644 release/scripts/bpymodules/BPyWindow.py delete mode 100644 release/scripts/bpymodules/blend2renderinfo.py delete mode 100644 release/scripts/bpymodules/defaultdoodads.py delete mode 100644 release/scripts/bpymodules/dxfColorMap.py delete mode 100644 release/scripts/bpymodules/dxfImportObjects.py delete mode 100644 release/scripts/bpymodules/dxfLibrary.py delete mode 100644 release/scripts/bpymodules/dxfReader.py delete mode 100644 release/scripts/bpymodules/mesh_gradient.py delete mode 100644 release/scripts/bpymodules/meshtools.py delete mode 100644 release/scripts/bpymodules/paths_ai2obj.py delete mode 100644 release/scripts/bpymodules/paths_eps2obj.py delete mode 100644 release/scripts/bpymodules/paths_gimp2obj.py delete mode 100644 release/scripts/bpymodules/paths_svg2obj.py delete mode 100644 release/scripts/bvh_import.py delete mode 100644 release/scripts/c3d_import.py delete mode 100644 release/scripts/camera_changer.py delete mode 100644 release/scripts/config.py delete mode 100644 release/scripts/console.py delete mode 100644 release/scripts/discombobulator.py delete mode 100644 release/scripts/envelope_symmetry.py delete mode 100644 release/scripts/export-iv-0.1.py delete mode 100644 release/scripts/export_dxf.py delete mode 100644 release/scripts/export_fbx.py delete mode 100644 release/scripts/export_lightwave_motion.py delete mode 100644 release/scripts/export_m3g.py delete mode 100644 release/scripts/export_map.py delete mode 100644 release/scripts/export_mdd.py delete mode 100644 release/scripts/export_obj.py delete mode 100644 release/scripts/faceselect_same_weights.py delete mode 100644 release/scripts/flt_defaultp.py delete mode 100644 release/scripts/flt_dofedit.py delete mode 100644 release/scripts/flt_export.py delete mode 100644 release/scripts/flt_filewalker.py delete mode 100644 release/scripts/flt_import.py delete mode 100644 release/scripts/flt_lodedit.py delete mode 100644 release/scripts/flt_palettemanager.py delete mode 100644 release/scripts/flt_properties.py delete mode 100644 release/scripts/flt_toolbar.py delete mode 100644 release/scripts/help_bpy_api.py delete mode 100644 release/scripts/help_browser.py delete mode 100644 release/scripts/hotkeys.py delete mode 100644 release/scripts/image_2d_cutout.py delete mode 100644 release/scripts/image_auto_layout.py delete mode 100644 release/scripts/image_billboard.py delete mode 100644 release/scripts/image_edit.py delete mode 100644 release/scripts/import_dxf.py delete mode 100644 release/scripts/import_edl.py delete mode 100644 release/scripts/import_lightwave_motion.py delete mode 100644 release/scripts/import_mdd.py delete mode 100644 release/scripts/import_obj.py delete mode 100644 release/scripts/import_web3d.py create mode 100644 release/scripts/io/engine_render_pov.py create mode 100644 release/scripts/io/export_3ds.py create mode 100644 release/scripts/io/export_fbx.py create mode 100644 release/scripts/io/export_obj.py create mode 100644 release/scripts/io/export_ply.py create mode 100644 release/scripts/io/export_x3d.py create mode 100644 release/scripts/io/import_3ds.py create mode 100644 release/scripts/io/import_obj.py create mode 100644 release/scripts/io/netrender/__init__.py create mode 100644 release/scripts/io/netrender/balancing.py create mode 100644 release/scripts/io/netrender/client.py create mode 100644 release/scripts/io/netrender/master.py create mode 100644 release/scripts/io/netrender/master_html.py create mode 100644 release/scripts/io/netrender/model.py create mode 100644 release/scripts/io/netrender/operators.py create mode 100644 release/scripts/io/netrender/slave.py create mode 100644 release/scripts/io/netrender/ui.py create mode 100644 release/scripts/io/netrender/utils.py delete mode 100644 release/scripts/lightwave_export.py delete mode 100644 release/scripts/lightwave_import.py delete mode 100644 release/scripts/md2_export.py delete mode 100644 release/scripts/md2_import.py delete mode 100644 release/scripts/mesh_boneweight_copy.py delete mode 100644 release/scripts/mesh_cleanup.py delete mode 100644 release/scripts/mesh_edges2curves.py delete mode 100644 release/scripts/mesh_mirror_tool.py delete mode 100644 release/scripts/mesh_poly_reduce.py delete mode 100644 release/scripts/mesh_poly_reduce_grid.py delete mode 100644 release/scripts/mesh_skin.py delete mode 100644 release/scripts/mesh_solidify.py delete mode 100644 release/scripts/mesh_unfolder.py delete mode 100644 release/scripts/mesh_wire.py create mode 100644 release/scripts/modules/autocomplete.py create mode 100644 release/scripts/modules/bpy_ops.py create mode 100644 release/scripts/modules/bpy_sys.py delete mode 100644 release/scripts/ms3d_import.py delete mode 100644 release/scripts/ms3d_import_ascii.py delete mode 100644 release/scripts/obdatacopier.py delete mode 100644 release/scripts/object_active_to_other.py delete mode 100644 release/scripts/object_apply_def.py delete mode 100644 release/scripts/object_batch_name_edit.py delete mode 100644 release/scripts/object_cookie_cutter.py delete mode 100644 release/scripts/object_drop.py delete mode 100644 release/scripts/object_find.py delete mode 100644 release/scripts/object_random_loc_sz_rot.py delete mode 100644 release/scripts/object_sel2dupgroup.py delete mode 100644 release/scripts/object_timeofs_follow_act.py delete mode 100644 release/scripts/off_export.py delete mode 100644 release/scripts/off_import.py delete mode 100644 release/scripts/paths_import.py delete mode 100644 release/scripts/ply_import.py delete mode 100644 release/scripts/raw_export.py delete mode 100644 release/scripts/raw_import.py delete mode 100644 release/scripts/renameobjectbyblock.py delete mode 100644 release/scripts/render_save_layers.py delete mode 100644 release/scripts/rvk1_torvk2.py delete mode 100644 release/scripts/save_theme.py delete mode 100644 release/scripts/scripttemplate_background_job.py delete mode 100644 release/scripts/scripttemplate_camera_object.py delete mode 100644 release/scripts/scripttemplate_gamelogic.py delete mode 100644 release/scripts/scripttemplate_gamelogic_basic.py delete mode 100644 release/scripts/scripttemplate_gamelogic_module.py delete mode 100644 release/scripts/scripttemplate_ipo_gen.py delete mode 100644 release/scripts/scripttemplate_mesh_edit.py delete mode 100644 release/scripts/scripttemplate_metaball_create.py delete mode 100644 release/scripts/scripttemplate_object_edit.py delete mode 100644 release/scripts/scripttemplate_pyconstraint.py delete mode 100644 release/scripts/scripttemplate_text_plugin.py delete mode 100644 release/scripts/slp_import.py delete mode 100644 release/scripts/sysinfo.py delete mode 100644 release/scripts/textplugin_convert_ge.py delete mode 100644 release/scripts/textplugin_functiondocs.py delete mode 100644 release/scripts/textplugin_imports.py delete mode 100644 release/scripts/textplugin_membersuggest.py delete mode 100644 release/scripts/textplugin_outliner.py delete mode 100644 release/scripts/textplugin_suggest.py delete mode 100644 release/scripts/textplugin_templates.py create mode 100644 release/scripts/ui/buttons_data_armature.py create mode 100644 release/scripts/ui/buttons_data_bone.py create mode 100644 release/scripts/ui/buttons_data_camera.py create mode 100644 release/scripts/ui/buttons_data_curve.py create mode 100644 release/scripts/ui/buttons_data_empty.py create mode 100644 release/scripts/ui/buttons_data_lamp.py create mode 100644 release/scripts/ui/buttons_data_lattice.py create mode 100644 release/scripts/ui/buttons_data_mesh.py create mode 100644 release/scripts/ui/buttons_data_metaball.py create mode 100644 release/scripts/ui/buttons_data_modifier.py create mode 100644 release/scripts/ui/buttons_data_text.py create mode 100644 release/scripts/ui/buttons_game.py create mode 100644 release/scripts/ui/buttons_material.py create mode 100644 release/scripts/ui/buttons_object.py create mode 100644 release/scripts/ui/buttons_object_constraint.py create mode 100644 release/scripts/ui/buttons_particle.py create mode 100644 release/scripts/ui/buttons_physics_cloth.py create mode 100644 release/scripts/ui/buttons_physics_common.py create mode 100644 release/scripts/ui/buttons_physics_field.py create mode 100644 release/scripts/ui/buttons_physics_fluid.py create mode 100644 release/scripts/ui/buttons_physics_smoke.py create mode 100644 release/scripts/ui/buttons_physics_softbody.py create mode 100644 release/scripts/ui/buttons_scene.py create mode 100644 release/scripts/ui/buttons_texture.py create mode 100644 release/scripts/ui/buttons_world.py create mode 100644 release/scripts/ui/space_buttons.py create mode 100644 release/scripts/ui/space_console.py create mode 100644 release/scripts/ui/space_filebrowser.py create mode 100644 release/scripts/ui/space_image.py create mode 100644 release/scripts/ui/space_info.py create mode 100644 release/scripts/ui/space_logic.py create mode 100644 release/scripts/ui/space_node.py create mode 100644 release/scripts/ui/space_outliner.py create mode 100644 release/scripts/ui/space_sequencer.py create mode 100644 release/scripts/ui/space_text.py create mode 100644 release/scripts/ui/space_time.py create mode 100644 release/scripts/ui/space_userpref.py create mode 100644 release/scripts/ui/space_view3d.py create mode 100644 release/scripts/ui/space_view3d_toolbar.py delete mode 100644 release/scripts/unweld.py delete mode 100644 release/scripts/uv_export.py delete mode 100644 release/scripts/uv_seams_from_islands.py delete mode 100644 release/scripts/uvcalc_follow_active_coords.py delete mode 100644 release/scripts/uvcalc_lightmap.py delete mode 100644 release/scripts/uvcalc_quad_clickproj.py delete mode 100644 release/scripts/uvcalc_smart_project.py delete mode 100644 release/scripts/uvcopy.py delete mode 100644 release/scripts/vertexpaint_from_material.py delete mode 100644 release/scripts/vertexpaint_gradient.py delete mode 100644 release/scripts/vertexpaint_selfshadow_ao.py delete mode 100644 release/scripts/vrml97_export.py delete mode 100644 release/scripts/weightpaint_average.py delete mode 100644 release/scripts/weightpaint_clean.py delete mode 100644 release/scripts/weightpaint_copy.py delete mode 100644 release/scripts/weightpaint_envelope_assign.py delete mode 100644 release/scripts/weightpaint_gradient.py delete mode 100644 release/scripts/weightpaint_grow_shrink.py delete mode 100644 release/scripts/weightpaint_invert.py delete mode 100644 release/scripts/weightpaint_normalize.py delete mode 100644 release/scripts/widgetwizard.py delete mode 100644 release/scripts/wizard_bolt_factory.py delete mode 100644 release/scripts/wizard_curve2tree.py delete mode 100644 release/scripts/wizard_landscape_ant.py delete mode 100644 release/scripts/x3d_export.py delete mode 100644 release/scripts/xsi_export.py delete mode 100644 release/ui/bpy_ops.py delete mode 100644 release/ui/buttons_data_armature.py delete mode 100644 release/ui/buttons_data_bone.py delete mode 100644 release/ui/buttons_data_camera.py delete mode 100644 release/ui/buttons_data_curve.py delete mode 100644 release/ui/buttons_data_empty.py delete mode 100644 release/ui/buttons_data_lamp.py delete mode 100644 release/ui/buttons_data_lattice.py delete mode 100644 release/ui/buttons_data_mesh.py delete mode 100644 release/ui/buttons_data_metaball.py delete mode 100644 release/ui/buttons_data_modifier.py delete mode 100644 release/ui/buttons_data_text.py delete mode 100644 release/ui/buttons_game.py delete mode 100644 release/ui/buttons_material.py delete mode 100644 release/ui/buttons_object.py delete mode 100644 release/ui/buttons_object_constraint.py delete mode 100644 release/ui/buttons_particle.py delete mode 100644 release/ui/buttons_physics_cloth.py delete mode 100644 release/ui/buttons_physics_field.py delete mode 100644 release/ui/buttons_physics_fluid.py delete mode 100644 release/ui/buttons_physics_smoke.py delete mode 100644 release/ui/buttons_physics_softbody.py delete mode 100644 release/ui/buttons_scene.py delete mode 100644 release/ui/buttons_texture.py delete mode 100644 release/ui/buttons_world.py delete mode 100644 release/ui/space_buttons.py delete mode 100644 release/ui/space_console.py delete mode 100644 release/ui/space_filebrowser.py delete mode 100644 release/ui/space_image.py delete mode 100644 release/ui/space_info.py delete mode 100644 release/ui/space_logic.py delete mode 100644 release/ui/space_node.py delete mode 100644 release/ui/space_outliner.py delete mode 100644 release/ui/space_sequencer.py delete mode 100644 release/ui/space_text.py delete mode 100644 release/ui/space_time.py delete mode 100644 release/ui/space_userpref.py delete mode 100644 release/ui/space_view3d.py delete mode 100644 release/ui/space_view3d_toolbar.py create mode 100644 source/blender/blenlib/BLI_bfile.h create mode 100644 source/blender/blenlib/intern/BLI_bfile.c create mode 100644 source/blender/editors/armature/poseSlide.c delete mode 100644 source/blender/editors/include/ED_previewrender.h create mode 100644 source/blender/editors/include/ED_render.h create mode 100644 source/blender/editors/mesh/mesh_data.c delete mode 100644 source/blender/editors/mesh/mesh_layers.c delete mode 100644 source/blender/editors/physics/ed_fluidsim.c delete mode 100644 source/blender/editors/physics/ed_pointcache.c delete mode 100644 source/blender/editors/physics/editparticle.c create mode 100644 source/blender/editors/physics/particle_boids.c create mode 100644 source/blender/editors/physics/particle_edit.c create mode 100644 source/blender/editors/physics/particle_object.c delete mode 100644 source/blender/editors/physics/physics_boids.c create mode 100644 source/blender/editors/physics/physics_fluid.c create mode 100644 source/blender/editors/physics/physics_ops.c create mode 100644 source/blender/editors/physics/physics_pointcache.c delete mode 100644 source/blender/editors/preview/Makefile delete mode 100644 source/blender/editors/preview/SConscript delete mode 100644 source/blender/editors/preview/previewrender.c delete mode 100644 source/blender/editors/preview/previewrender_intern.h create mode 100644 source/blender/editors/render/Makefile create mode 100644 source/blender/editors/render/SConscript create mode 100644 source/blender/editors/render/render_intern.h create mode 100644 source/blender/editors/render/render_ops.c create mode 100644 source/blender/editors/render/render_preview.c create mode 100644 source/blender/editors/render/render_shading.c create mode 100644 source/blender/ikplugin/BIK_api.h create mode 100644 source/blender/ikplugin/CMakeLists.txt create mode 100644 source/blender/ikplugin/Makefile create mode 100644 source/blender/ikplugin/SConscript create mode 100644 source/blender/ikplugin/intern/Makefile create mode 100644 source/blender/ikplugin/intern/ikplugin_api.c create mode 100644 source/blender/ikplugin/intern/ikplugin_api.h create mode 100644 source/blender/ikplugin/intern/iksolver_plugin.c create mode 100644 source/blender/ikplugin/intern/iksolver_plugin.h create mode 100644 source/blender/ikplugin/intern/itasc_plugin.cpp create mode 100644 source/blender/ikplugin/intern/itasc_plugin.h create mode 100644 source/blender/render/intern/raytrace/Makefile create mode 100644 source/blender/render/intern/raytrace/bvh.h create mode 100644 source/blender/render/intern/raytrace/rayobject.cpp create mode 100644 source/blender/render/intern/raytrace/rayobject_hint.h create mode 100644 source/blender/render/intern/raytrace/rayobject_qbvh.cpp create mode 100644 source/blender/render/intern/raytrace/rayobject_rtbuild.cpp create mode 100644 source/blender/render/intern/raytrace/rayobject_rtbuild.h create mode 100644 source/blender/render/intern/raytrace/rayobject_svbvh.cpp create mode 100644 source/blender/render/intern/raytrace/rayobject_vbvh.cpp create mode 100644 source/blender/render/intern/raytrace/reorganize.h create mode 100644 source/blender/render/intern/raytrace/svbvh.h create mode 100644 source/blender/render/intern/raytrace/vbvh.h delete mode 100644 source/blender/render/intern/source/raytrace.c diff --git a/CMake/macros.cmake b/CMake/macros.cmake index 6a337505c00..150bd55bfd7 100644 --- a/CMake/macros.cmake +++ b/CMake/macros.cmake @@ -85,33 +85,32 @@ ENDMACRO(SETUP_LIBDIRS) MACRO(SETUP_LIBLINKS target) SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${PLATFORM_LINKFLAGS} ") - #TARGET_LINK_LIBRARIES(${target} ${OPENGL_gl_LIBRARY} ${OPENGL_glu_LIBRARY} ${PYTHON_LIB} ${PYTHON_LINKFLAGS} ${JPEG_LIB} ${PNG_LIB} ${ZLIB_LIB} ${SDL_LIBRARY} ${LLIBS}) - TARGET_LINK_LIBRARIES(${target} ${OPENGL_gl_LIBRARY} ${OPENGL_glu_LIBRARY} ${PYTHON_LINKFLAGS} ${JPEG_LIBRARY} ${PNG_LIBRARIES} ${ZLIB_LIBRARIES} ${LLIBS}) + TARGET_LINK_LIBRARIES(${target} ${OPENGL_gl_LIBRARY} ${OPENGL_glu_LIBRARY} ${JPEG_LIBRARY} ${PNG_LIBRARIES} ${ZLIB_LIBRARIES} ${LLIBS}) # since we are using the local libs for python when compiling msvc projects, we need to add _d when compiling debug versions - IF(WIN32) - TARGET_LINK_LIBRARIES(${target} debug ${PYTHON_LIB}_d) - TARGET_LINK_LIBRARIES(${target} optimized ${PYTHON_LIB}) - ELSE(WIN32) - TARGET_LINK_LIBRARIES(${target} ${PYTHON_LIB}) - ENDIF(WIN32) + IF(WITH_PYTHON) + TARGET_LINK_LIBRARIES(${target} ${PYTHON_LINKFLAGS}) - TARGET_LINK_LIBRARIES(${target} ${OPENGL_gl_LIBRARY} ${OPENGL_glu_LIBRARY} ${PYTHON_LINKFLAGS} ${JPEG_LIB} ${PNG_LIB} ${ZLIB_LIB} ${LLIBS}) + IF(WIN32) + TARGET_LINK_LIBRARIES(${target} debug ${PYTHON_LIB}_d) + TARGET_LINK_LIBRARIES(${target} optimized ${PYTHON_LIB}) + ELSE(WIN32) + TARGET_LINK_LIBRARIES(${target} ${PYTHON_LIB}) + ENDIF(WIN32) + ENDIF(WITH_PYTHON) + + TARGET_LINK_LIBRARIES(${target} ${OPENGL_glu_LIBRARY} ${JPEG_LIB} ${PNG_LIB} ${ZLIB_LIB}) TARGET_LINK_LIBRARIES(${target} ${FREETYPE_LIBRARY} ${LIBSAMPLERATE_LIB}) - # since we are using the local libs for python when compiling msvc projects, we need to add _d when compiling debug versions - - IF(WIN32) - TARGET_LINK_LIBRARIES(${target} debug ${PYTHON_LIB}_d) - TARGET_LINK_LIBRARIES(${target} optimized ${PYTHON_LIB}) - ELSE(WIN32) - TARGET_LINK_LIBRARIES(${target} ${PYTHON_LIB}) - ENDIF(WIN32) - IF(WITH_INTERNATIONAL) TARGET_LINK_LIBRARIES(${target} ${GETTEXT_LIB}) + + IF(WIN32) + TARGET_LINK_LIBRARIES(${target} ${ICONV_LIB}) + ENDIF(WIN32) ENDIF(WITH_INTERNATIONAL) + IF(WITH_OPENAL) TARGET_LINK_LIBRARIES(${target} ${OPENAL_LIBRARY}) ENDIF(WITH_OPENAL) @@ -127,9 +126,6 @@ MACRO(SETUP_LIBLINKS IF(WITH_SDL) TARGET_LINK_LIBRARIES(${target} ${SDL_LIBRARY}) ENDIF(WITH_SDL) - IF(WIN32) - TARGET_LINK_LIBRARIES(${target} ${ICONV_LIB}) - ENDIF(WIN32) IF(WITH_QUICKTIME) TARGET_LINK_LIBRARIES(${target} ${QUICKTIME_LIB}) ENDIF(WITH_QUICKTIME) diff --git a/CMakeLists.txt b/CMakeLists.txt index c881dec03db..d53f4ed9966 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -51,6 +51,10 @@ PROJECT(Blender) SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/bin) SET(LIBRARY_OUTPUT_PATH ${CMAKE_BINARY_DIR}/lib) +# Note! - Could create this from the blender version string +# ...but thats quite involved, make sure this matches the blender version. +SET(BLENDER_VERSION 2.5) + #----------------------------------------------------------------------------- # Set default config options OPTION(WITH_PLAYER "Build Player" OFF) @@ -71,10 +75,16 @@ OPTION(WITH_WEBPLUGIN "Enable Web Plugin (Unix only)" OFF) OPTION(WITH_FFTW3 "Enable FFTW3 support" OFF) OPTION(WITH_JACK "Enable Jack Support (http://www.jackaudio.org)" OFF) OPTION(WITH_SNDFILE "Enable libsndfile Support (http://www.mega-nerd.com/libsndfile)" OFF) +OPTION(WITH_LZO "Enable fast LZO compression, used for pointcache" ON) +OPTION(WITH_LZMA "Enable best LZMA compression, used for pointcache" ON) OPTION(WITH_CXX_GUARDEDALLOC "Enable GuardedAlloc for C++ memory allocation" OFF) -# OPTION(WITH_BUILDINFO "Include extra build details" ON) +OPTION(WITH_BUILDINFO "Include extra build details" ON) OPTION(WITH_INSTALL "Install accompanying scripts and language files needed to run blender" ON) +IF (APPLE) +OPTION(WITH_COCOA "Use Cocoa framework instead of deprecated Carbon" OFF) +ENDIF (APPLE) + IF(NOT WITH_GAMEENGINE AND WITH_PLAYER) MESSAGE("WARNING: WITH_PLAYER needs WITH_GAMEENGINE") ENDIF(NOT WITH_GAMEENGINE AND WITH_PLAYER) @@ -396,6 +406,7 @@ IF(APPLE) FIND_PACKAGE(OpenAL) IF(OPENAL_FOUND) SET(WITH_OPENAL ON) + SET(OPENAL_INCLUDE_DIR "${OPENAL_INCLUDE_DIR};${LIBDIR}/openal/include") ELSE(OPENAL_FOUND) SET(WITH_OPENAL OFF) ENDIF(OPENAL_FOUND) @@ -479,9 +490,13 @@ IF(APPLE) SET(LLIBS stdc++ SystemStubs) + IF (WITH_COCOA) + SET(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing -DGHOST_COCOA") + SET(PLATFORM_LINKFLAGS "-fexceptions -framework CoreServices -framework Foundation -framework IOKit -framework AppKit -framework Cocoa -framework Carbon -framework AudioUnit -framework AudioToolbox -framework CoreAudio -framework QuickTime") + ELSE (WITH_COCOA) SET(PLATFORM_CFLAGS "-pipe -fPIC -funsigned-char -fno-strict-aliasing") SET(PLATFORM_LINKFLAGS "-fexceptions -framework CoreServices -framework Foundation -framework IOKit -framework AppKit -framework Carbon -framework AGL -framework AudioUnit -framework AudioToolbox -framework CoreAudio -framework QuickTime") - + ENDIF (WITH_COCOA) IF(WITH_OPENMP) SET(LLIBS "${LLIBS} -lgomp ") SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp ") @@ -513,15 +528,15 @@ IF(CMAKE_SYSTEM_NAME MATCHES "Linux") ENDIF(CMAKE_SYSTEM_NAME MATCHES "Linux") -# TODO - buildinfo -# IF(UNIX) -# IF(WITH_BUILDINFO) -# EXEC_PROGRAM("date \"+%Y-%m-%d\"" OUTPUT_VARIABLE BUILD_DATE) -# EXEC_PROGRAM("date \"+%H:%M:%S\"" OUTPUT_VARIABLE BUILD_TIME) -# EXEC_PROGRAM("svnversion ${CMAKE_SOURCE_DIR}" OUTPUT_VARIABLE BUILD_REV) -# SET(BUILD_TYPE ${CMAKE_BUILD_TYPE}) -# ENDIF(WITH_BUILDINFO) -# ENDIF(UNIX) +# buildinfo +IF(UNIX) + IF(WITH_BUILDINFO) + EXEC_PROGRAM("date \"+%Y-%m-%d\"" OUTPUT_VARIABLE BUILD_DATE) + EXEC_PROGRAM("date \"+%H:%M:%S\"" OUTPUT_VARIABLE BUILD_TIME) + EXEC_PROGRAM("svnversion ${CMAKE_SOURCE_DIR}" OUTPUT_VARIABLE BUILD_REV) + # BUILD_PLATFORM and BUILD_PLATFORM are taken from CMake + ENDIF(WITH_BUILDINFO) +ENDIF(UNIX) #----------------------------------------------------------------------------- # Common. diff --git a/SConstruct b/SConstruct index 19bcee524f4..1385e3e7128 100644 --- a/SConstruct +++ b/SConstruct @@ -186,6 +186,15 @@ if not env['BF_FANCY']: SetOption('num_jobs', int(env['BF_NUMJOBS'])) print "Build with %d parallel jobs" % (GetOption('num_jobs')) +# BLENDERPATH is a unix only option to enable typical style paths this is +# spesifically a data-dir, which is used a lot but cant replace BF_INSTALLDIR +# because the blender binary is installed in $BF_INSTALLDIR/bin/blender + +if env['WITH_BF_FHS']: + BLENDERPATH = os.path.join(env['BF_INSTALLDIR'], 'share', 'blender', env['BF_VERSION']) +else: + BLENDERPATH = env['BF_INSTALLDIR'] + # disable elbeem (fluidsim) compilation? if env['BF_NO_ELBEEM'] == 1: env['CPPFLAGS'].append('-DDISABLE_ELBEEM') @@ -198,7 +207,7 @@ if env['WITH_BF_OPENMP'] == 1: env['CPPFLAGS'].append('/openmp') env['CXXFLAGS'].append('/openmp') else: - if env['CC'][-3:] == 'icc': # to be able to handle CC=/opt/bla/icc case + if env['CC'].endswith('icc'): # to be able to handle CC=/opt/bla/icc case env.Append(LINKFLAGS=['-openmp', '-static-intel']) env['CCFLAGS'].append('-openmp') env['CPPFLAGS'].append('-openmp') @@ -301,7 +310,7 @@ if env['WITH_BF_SDL'] == False and env['OURPLATFORM'] in ('win32-vc', 'win32-min # lastly we check for root_build_dir ( we should not do before, otherwise we might do wrong builddir B.root_build_dir = env['BF_BUILDDIR'] -B.doc_build_dir = env['BF_DOCDIR'] +B.doc_build_dir = os.path.join(BLENDERPATH, 'doc') if not B.root_build_dir[-1]==os.sep: B.root_build_dir += os.sep if not B.doc_build_dir[-1]==os.sep: @@ -426,7 +435,10 @@ if env['OURPLATFORM']=='darwin': source=[dp+os.sep+f for f in df] blenderinstall.append(env.Install(dir=dir,source=source)) else: - blenderinstall = env.Install(dir=env['BF_INSTALLDIR'], source=B.program_list) + if env['WITH_BF_FHS']: dir= os.path.join(env['BF_INSTALLDIR'], 'bin') + else: dir= env['BF_INSTALLDIR'] + + blenderinstall = env.Install(dir=dir, source=B.program_list) #-- .blender #- dont do .blender and scripts for darwin, it is already in the bundle @@ -450,7 +462,13 @@ if env['OURPLATFORM']!='darwin': continue dotblendlist.append(os.path.join(dp, f)) - dottargetlist.append(env['BF_INSTALLDIR']+dp[3:]+os.sep+f) + if env['WITH_BF_FHS']: dir= os.path.join(*([BLENDERPATH] + dp.split(os.sep)[2:])) # skip bin/.blender + else: dir= os.path.join(*([BLENDERPATH] + dp.split(os.sep)[1:])) # skip bin + + # print dir+ os.sep + f + print dir + dottargetlist.append(dir + os.sep + f) + dotblenderinstall = [] for targetdir,srcfile in zip(dottargetlist, dotblendlist): @@ -458,14 +476,18 @@ if env['OURPLATFORM']!='darwin': dotblenderinstall.append(env.Install(dir=td, source=srcfile)) if env['WITH_BF_PYTHON']: - #-- .blender/scripts, .blender/ui, .blender/io - scriptpaths=['release/scripts', 'release/ui', 'release/io'] + #-- .blender/scripts + scriptpaths=['release/scripts'] for scriptpath in scriptpaths: for dp, dn, df in os.walk(scriptpath): if '.svn' in dn: dn.remove('.svn') - dir=env['BF_INSTALLDIR']+'/.blender/'+os.path.basename(scriptpath)+dp[len(scriptpath):] - source=[dp+os.sep+f for f in df] + + if env['WITH_BF_FHS']: dir = BLENDERPATH + else: dir = os.path.join(env['BF_INSTALLDIR'], '.blender') + dir += os.sep + os.path.basename(scriptpath) + dp[len(scriptpath):] + + source=[os.path.join(dp, f) for f in df] scriptinstall.append(env.Install(dir=dir,source=source)) #-- icons @@ -477,8 +499,8 @@ if env['OURPLATFORM']=='linux2': if '.svn' in tn: tn.remove('.svn') for f in tf: - iconlist.append(tp+os.sep+f) - icontargetlist.append(env['BF_INSTALLDIR']+tp[19:]+os.sep+f) + iconlist.append(os.path.join(tp, f)) + icontargetlist.append( os.path.join(*([BLENDERPATH] + tp.split(os.sep)[2:] + [f])) ) iconinstall = [] for targetdir,srcfile in zip(icontargetlist, iconlist): @@ -499,24 +521,25 @@ for tp, tn, tf in os.walk('release/plugins'): if '.svn' in tn: tn.remove('.svn') for f in tf: - pluglist.append(tp+os.sep+f) - plugtargetlist.append(env['BF_INSTALLDIR']+tp[7:]+os.sep+f) + pluglist.append(os.path.join(tp, f)) + plugtargetlist.append( os.path.join(*([BLENDERPATH] + tp.split(os.sep)[1:] + [f])) ) + # header files for plugins pluglist.append('source/blender/blenpluginapi/documentation.h') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep +'documentation.h') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'documentation.h')) pluglist.append('source/blender/blenpluginapi/externdef.h') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep +'externdef.h') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'externdef.h')) pluglist.append('source/blender/blenpluginapi/floatpatch.h') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep +'floatpatch.h') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'floatpatch.h')) pluglist.append('source/blender/blenpluginapi/iff.h') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep +'iff.h') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'iff.h')) pluglist.append('source/blender/blenpluginapi/plugin.h') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep +'plugin.h') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'plugin.h')) pluglist.append('source/blender/blenpluginapi/util.h') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep +'util.h') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'util.h')) pluglist.append('source/blender/blenpluginapi/plugin.DEF') -plugtargetlist.append(env['BF_INSTALLDIR'] + os.sep + 'plugins' + os.sep + 'include' + os.sep + 'plugin.def') +plugtargetlist.append(os.path.join(BLENDERPATH, 'plugins', 'include', 'plugin.def')) plugininstall = [] for targetdir,srcfile in zip(plugtargetlist, pluglist): @@ -531,7 +554,7 @@ for tp, tn, tf in os.walk('release/text'): for f in tf: textlist.append(tp+os.sep+f) -textinstall = env.Install(dir=env['BF_INSTALLDIR'], source=textlist) +textinstall = env.Install(dir=BLENDERPATH, source=textlist) if env['OURPLATFORM']=='darwin': allinstall = [blenderinstall, plugininstall, textinstall] @@ -555,14 +578,10 @@ if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'win64-vc'): else: dllsources.append('${BF_SDL_LIBPATH}/SDL.dll') if env['WITH_BF_PYTHON']: - ver = env["BF_PYTHON_VERSION"].replace(".", "") - - dllsources.append('${LCGDIR}/release/python' + ver + '.zip') - dllsources.append('${LCGDIR}/release/zlib.pyd') if env['BF_DEBUG'] and not env["BF_NO_PYDEBUG"]: - dllsources.append('${BF_PYTHON_LIBPATH}/${BF_PYTHON_LIB}_d.dll') + dllsources.append('${BF_PYTHON_LIBPATH}/${BF_PYTHON_DLL}_d.dll') else: - dllsources.append('${BF_PYTHON_LIBPATH}/${BF_PYTHON_LIB}.dll') + dllsources.append('${BF_PYTHON_LIBPATH}/${BF_PYTHON_DLL}.dll') if env['WITH_BF_ICONV']: if env['OURPLATFORM'] == 'win64-vc': pass # we link statically to iconv on win64 @@ -620,7 +639,6 @@ if env['WITH_BF_DOCS']: except: epydoc = None if epydoc: - SConscript('source/blender/python/api2_2x/doc/SConscript') SConscript('source/gameengine/PyDoc/SConscript') else: print "No epydoc install detected, Python API and Gameengine API Docs will not be generated " diff --git a/config/darwin-config.py b/config/darwin-config.py index 92f70d716fc..981f321e5bc 100644 --- a/config/darwin-config.py +++ b/config/darwin-config.py @@ -274,4 +274,3 @@ BF_DEBUG_CCFLAGS = ['-g'] BF_BUILDDIR='../build/darwin' BF_INSTALLDIR='../install/darwin' -BF_DOCDIR='../install/doc' diff --git a/config/irix6-config.py b/config/irix6-config.py index d38665f282a..085d1dd1e62 100644 --- a/config/irix6-config.py +++ b/config/irix6-config.py @@ -189,7 +189,6 @@ BF_DEBUG_FLAGS = '-g' BF_BUILDDIR = '../build/irix6' BF_INSTALLDIR='../install/irix6' -BF_DOCDIR='../install/doc' #Link against pthread LDIRS = [] diff --git a/config/linux2-config.py b/config/linux2-config.py index 757b8210e49..026d0a200a5 100644 --- a/config/linux2-config.py +++ b/config/linux2-config.py @@ -189,8 +189,6 @@ BF_DEBUG_CCFLAGS = ['-g'] BF_BUILDDIR = '../build/linux2' BF_INSTALLDIR='../install/linux2' -BF_DOCDIR='../install/doc' - #Link against pthread PLATFORM_LINKFLAGS = ['-pthread'] diff --git a/config/linuxcross-config.py b/config/linuxcross-config.py index a7ce2dc2908..a5c83dc3503 100644 --- a/config/linuxcross-config.py +++ b/config/linuxcross-config.py @@ -139,4 +139,3 @@ BF_PROFILE_LINKFLAGS = ['-pg'] BF_BUILDDIR = '../build/linuxcross' BF_INSTALLDIR='../install/linuxcross' -BF_DOCDIR='../install/doc' diff --git a/config/openbsd3-config.py b/config/openbsd3-config.py index 95649321c07..353d30f50b3 100644 --- a/config/openbsd3-config.py +++ b/config/openbsd3-config.py @@ -151,4 +151,3 @@ BF_DEBUG_CCFLAGS = ['-g'] BF_BUILDDIR='../build/openbsd3' BF_INSTALLDIR='../install/openbsd3' -BF_DOCDIR='../install/doc' diff --git a/config/sunos5-config.py b/config/sunos5-config.py index 8af30e4f4f3..8e4c53b5bc4 100644 --- a/config/sunos5-config.py +++ b/config/sunos5-config.py @@ -165,7 +165,6 @@ BF_DEBUG_CCFLAGS = [] BF_BUILDDIR = '../build/sunos5' BF_INSTALLDIR='../install/sunos5' -BF_DOCDIR='../install/doc' PLATFORM_LINKFLAGS = [] diff --git a/config/win32-mingw-config.py b/config/win32-mingw-config.py index e3834c41a81..6b10b410715 100644 --- a/config/win32-mingw-config.py +++ b/config/win32-mingw-config.py @@ -6,7 +6,8 @@ BF_PYTHON_VERSION = '3.1' WITH_BF_STATICPYTHON = False BF_PYTHON_INC = '${BF_PYTHON}/include/python${BF_PYTHON_VERSION}' BF_PYTHON_BINARY = 'python' -BF_PYTHON_LIB = 'python${BF_PYTHON_VERSION[0]}${BF_PYTHON_VERSION[2]}' +BF_PYTHON_LIB = 'python${BF_PYTHON_VERSION[0]}${BF_PYTHON_VERSION[2]}mw' +BF_PYTHON_DLL = 'python31' BF_PYTHON_LIBPATH = '${BF_PYTHON}/lib' BF_PYTHON_LIB_STATIC = '${BF_PYTHON}/lib/libpython${BF_PYTHON_VERSION[0]}${BF_PYTHON_VERSION[2]}.a' @@ -152,4 +153,3 @@ BF_PROFILE = False BF_BUILDDIR = '..\\build\\win32-mingw' BF_INSTALLDIR='..\\install\\win32-mingw' -BF_DOCDIR = '..\\install\\doc' diff --git a/config/win32-vc-config.py b/config/win32-vc-config.py index d5e8ada060d..bfb8930553d 100644 --- a/config/win32-vc-config.py +++ b/config/win32-vc-config.py @@ -13,6 +13,7 @@ BF_PYTHON_VERSION = '3.1' BF_PYTHON_INC = '${BF_PYTHON}/include/python${BF_PYTHON_VERSION}' BF_PYTHON_BINARY = 'python' BF_PYTHON_LIB = 'python31' +BF_PYTHON_DLL = '${BF_PYTHON_LIB}' BF_PYTHON_LIBPATH = '${BF_PYTHON}/lib' WITH_BF_OPENAL = True @@ -174,4 +175,3 @@ PLATFORM_LINKFLAGS = ['/SUBSYSTEM:CONSOLE','/MACHINE:IX86','/INCREMENTAL:YES','/ BF_BUILDDIR = '..\\build\\win32-vc' BF_INSTALLDIR='..\\install\\win32-vc' -BF_DOCDIR='..\\install\\doc' diff --git a/config/win64-vc-config.py b/config/win64-vc-config.py index ce2fd8cd405..5f088489b34 100644 --- a/config/win64-vc-config.py +++ b/config/win64-vc-config.py @@ -13,6 +13,7 @@ BF_PYTHON_VERSION = '3.1' BF_PYTHON_INC = '${BF_PYTHON}/include/python${BF_PYTHON_VERSION}' BF_PYTHON_BINARY = 'python' BF_PYTHON_LIB = 'python31' +BF_PYTHON_DLL = '${BF_PYTHON_LIB}' BF_PYTHON_LIBPATH = '${BF_PYTHON}/lib' WITH_BF_OPENAL = False @@ -163,7 +164,7 @@ CXX = 'cl.exe' CFLAGS = [] CCFLAGS = ['/nologo', '/Ob1', '/J', '/W3', '/Gd', '/wd4244', '/wd4305', '/wd4800', '/wd4065', '/wd4267'] CXXFLAGS = ['/EHsc'] -BGE_CXXFLAGS = ['/O2', '/EHsc', '/GR', '/fp:fast', '/arch:SSE2'] +BGE_CXXFLAGS = ['/O2', '/EHsc', '/GR', '/fp:fast'] BF_DEBUG_CCFLAGS = ['/Zi', '/FR${TARGET}.sbr'] @@ -192,7 +193,6 @@ PLATFORM_LINKFLAGS = ['/SUBSYSTEM:CONSOLE','/MACHINE:X64','/INCREMENTAL:NO','/NO BF_BUILDDIR = '..\\build\\blender25-win64-vc' BF_INSTALLDIR='..\\install\\blender25-win64-vc' -BF_DOCDIR='..\\install\\blender25-win64-vc\\doc' diff --git a/extern/CMakeLists.txt b/extern/CMakeLists.txt index 44e47aaf88d..35271d24a2d 100644 --- a/extern/CMakeLists.txt +++ b/extern/CMakeLists.txt @@ -38,5 +38,10 @@ IF(WITH_OPENJPEG) ADD_SUBDIRECTORY(libopenjpeg) ENDIF(WITH_OPENJPEG) -ADD_SUBDIRECTORY(lzo) -ADD_SUBDIRECTORY(lzma) +IF(WITH_LZO) + ADD_SUBDIRECTORY(lzo) +ENDIF(WITH_LZO) + +IF(WITH_LZMA) + ADD_SUBDIRECTORY(lzma) +ENDIF(WITH_LZMA) diff --git a/extern/Eigen2/Eigen/Array b/extern/Eigen2/Eigen/Array new file mode 100644 index 00000000000..c847f9521fe --- /dev/null +++ b/extern/Eigen2/Eigen/Array @@ -0,0 +1,39 @@ +#ifndef EIGEN_ARRAY_MODULE_H +#define EIGEN_ARRAY_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +namespace Eigen { + +/** \defgroup Array_Module Array module + * This module provides several handy features to manipulate matrices as simple array of values. + * In addition to listed classes, it defines various methods of the Cwise interface + * (accessible from MatrixBase::cwise()), including: + * - matrix-scalar sum, + * - coeff-wise comparison operators, + * - sin, cos, sqrt, pow, exp, log, square, cube, inverse (reciprocal). + * + * This module also provides various MatrixBase methods, including: + * - \ref MatrixBase::all() "all", \ref MatrixBase::any() "any", + * - \ref MatrixBase::Random() "random matrix initialization" + * + * \code + * #include + * \endcode + */ + +#include "src/Array/CwiseOperators.h" +#include "src/Array/Functors.h" +#include "src/Array/BooleanRedux.h" +#include "src/Array/Select.h" +#include "src/Array/PartialRedux.h" +#include "src/Array/Random.h" +#include "src/Array/Norms.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_ARRAY_MODULE_H diff --git a/extern/Eigen2/Eigen/Cholesky b/extern/Eigen2/Eigen/Cholesky new file mode 100644 index 00000000000..f1806f726c7 --- /dev/null +++ b/extern/Eigen2/Eigen/Cholesky @@ -0,0 +1,65 @@ +#ifndef EIGEN_CHOLESKY_MODULE_H +#define EIGEN_CHOLESKY_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module +#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) + #ifndef EIGEN_HIDE_HEAVY_CODE + #define EIGEN_HIDE_HEAVY_CODE + #endif +#elif defined EIGEN_HIDE_HEAVY_CODE + #undef EIGEN_HIDE_HEAVY_CODE +#endif + +namespace Eigen { + +/** \defgroup Cholesky_Module Cholesky module + * + * \nonstableyet + * + * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices. + * Those decompositions are accessible via the following MatrixBase methods: + * - MatrixBase::llt(), + * - MatrixBase::ldlt() + * + * \code + * #include + * \endcode + */ + +#include "src/Array/CwiseOperators.h" +#include "src/Array/Functors.h" +#include "src/Cholesky/LLT.h" +#include "src/Cholesky/LDLT.h" + +} // namespace Eigen + +#define EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \ + PREFIX template class LLT; \ + PREFIX template class LDLT + +#define EIGEN_CHOLESKY_MODULE_INSTANTIATE(PREFIX) \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \ + EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX) + +#ifdef EIGEN_EXTERN_INSTANTIATIONS + +namespace Eigen { + EIGEN_CHOLESKY_MODULE_INSTANTIATE(extern); +} // namespace Eigen +#endif + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_CHOLESKY_MODULE_H diff --git a/extern/Eigen2/Eigen/Core b/extern/Eigen2/Eigen/Core new file mode 100644 index 00000000000..f5e315a2c9d --- /dev/null +++ b/extern/Eigen2/Eigen/Core @@ -0,0 +1,154 @@ +#ifndef EIGEN_CORE_H +#define EIGEN_CORE_H + +// first thing Eigen does: prevent MSVC from committing suicide +#include "src/Core/util/DisableMSVCWarnings.h" + +#ifdef _MSC_VER + #include // for _aligned_malloc -- need it regardless of whether vectorization is enabled + #if (_MSC_VER >= 1500) // 2008 or later + // Remember that usage of defined() in a #define is undefined by the standard. + // a user reported that in 64-bit mode, MSVC doesn't care to define _M_IX86_FP. + #if (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64) + #define EIGEN_SSE2_ON_MSVC_2008_OR_LATER + #endif + #endif +#endif + +#ifdef __GNUC__ + #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x) +#else + #define EIGEN_GNUC_AT_LEAST(x,y) 0 +#endif + +// Remember that usage of defined() in a #define is undefined by the standard +#if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) ) + #define EIGEN_SSE2_BUT_NOT_OLD_GCC +#endif + +#ifndef EIGEN_DONT_VECTORIZE + #if defined (EIGEN_SSE2_BUT_NOT_OLD_GCC) || defined(EIGEN_SSE2_ON_MSVC_2008_OR_LATER) + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_SSE + #include + #include + #ifdef __SSE3__ + #include + #endif + #ifdef __SSSE3__ + #include + #endif + #elif defined __ALTIVEC__ + #define EIGEN_VECTORIZE + #define EIGEN_VECTORIZE_ALTIVEC + #include + // We need to #undef all these ugly tokens defined in + // => use __vector instead of vector + #undef bool + #undef vector + #undef pixel + #endif +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if (defined(_CPPUNWIND) || defined(__EXCEPTIONS)) && !defined(EIGEN_NO_EXCEPTIONS) + #define EIGEN_EXCEPTIONS +#endif + +#ifdef EIGEN_EXCEPTIONS + #include +#endif + +// this needs to be done after all possible windows C header includes and before any Eigen source includes +// (system C++ includes are supposed to be able to deal with this already): +// windows.h defines min and max macros which would make Eigen fail to compile. +#if defined(min) || defined(max) +#error The preprocessor symbols 'min' or 'max' are defined. If you are compiling on Windows, do #define NOMINMAX to prevent windows.h from defining these symbols. +#endif + +namespace Eigen { + +/** \defgroup Core_Module Core module + * This is the main module of Eigen providing dense matrix and vector support + * (both fixed and dynamic size) with all the features corresponding to a BLAS library + * and much more... + * + * \code + * #include + * \endcode + */ + +#include "src/Core/util/Macros.h" +#include "src/Core/util/Constants.h" +#include "src/Core/util/ForwardDeclarations.h" +#include "src/Core/util/Meta.h" +#include "src/Core/util/XprHelper.h" +#include "src/Core/util/StaticAssert.h" +#include "src/Core/util/Memory.h" + +#include "src/Core/NumTraits.h" +#include "src/Core/MathFunctions.h" +#include "src/Core/GenericPacketMath.h" + +#if defined EIGEN_VECTORIZE_SSE + #include "src/Core/arch/SSE/PacketMath.h" +#elif defined EIGEN_VECTORIZE_ALTIVEC + #include "src/Core/arch/AltiVec/PacketMath.h" +#endif + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16 +#endif + +#include "src/Core/Functors.h" +#include "src/Core/MatrixBase.h" +#include "src/Core/Coeffs.h" + +#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874 + // at least confirmed with Doxygen 1.5.5 and 1.5.6 + #include "src/Core/Assign.h" +#endif + +#include "src/Core/MatrixStorage.h" +#include "src/Core/NestByValue.h" +#include "src/Core/Flagged.h" +#include "src/Core/Matrix.h" +#include "src/Core/Cwise.h" +#include "src/Core/CwiseBinaryOp.h" +#include "src/Core/CwiseUnaryOp.h" +#include "src/Core/CwiseNullaryOp.h" +#include "src/Core/Dot.h" +#include "src/Core/Product.h" +#include "src/Core/DiagonalProduct.h" +#include "src/Core/SolveTriangular.h" +#include "src/Core/MapBase.h" +#include "src/Core/Map.h" +#include "src/Core/Block.h" +#include "src/Core/Minor.h" +#include "src/Core/Transpose.h" +#include "src/Core/DiagonalMatrix.h" +#include "src/Core/DiagonalCoeffs.h" +#include "src/Core/Sum.h" +#include "src/Core/Redux.h" +#include "src/Core/Visitor.h" +#include "src/Core/Fuzzy.h" +#include "src/Core/IO.h" +#include "src/Core/Swap.h" +#include "src/Core/CommaInitializer.h" +#include "src/Core/Part.h" +#include "src/Core/CacheFriendlyProduct.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_CORE_H diff --git a/extern/Eigen2/Eigen/Dense b/extern/Eigen2/Eigen/Dense new file mode 100644 index 00000000000..9655edcd7aa --- /dev/null +++ b/extern/Eigen2/Eigen/Dense @@ -0,0 +1,8 @@ +#include "Core" +#include "Array" +#include "LU" +#include "Cholesky" +#include "QR" +#include "SVD" +#include "Geometry" +#include "LeastSquares" diff --git a/extern/Eigen2/Eigen/Eigen b/extern/Eigen2/Eigen/Eigen new file mode 100644 index 00000000000..654c8dc6380 --- /dev/null +++ b/extern/Eigen2/Eigen/Eigen @@ -0,0 +1,2 @@ +#include "Dense" +#include "Sparse" diff --git a/extern/Eigen2/Eigen/Geometry b/extern/Eigen2/Eigen/Geometry new file mode 100644 index 00000000000..617b25eb6f5 --- /dev/null +++ b/extern/Eigen2/Eigen/Geometry @@ -0,0 +1,51 @@ +#ifndef EIGEN_GEOMETRY_MODULE_H +#define EIGEN_GEOMETRY_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +#include "Array" +#include + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +namespace Eigen { + +/** \defgroup Geometry_Module Geometry module + * + * \nonstableyet + * + * This module provides support for: + * - fixed-size homogeneous transformations + * - translation, scaling, 2D and 3D rotations + * - quaternions + * - \ref MatrixBase::cross() "cross product" + * - \ref MatrixBase::unitOrthogonal() "orthognal vector generation" + * - some linear components: parametrized-lines and hyperplanes + * + * \code + * #include + * \endcode + */ + +#include "src/Geometry/OrthoMethods.h" +#include "src/Geometry/RotationBase.h" +#include "src/Geometry/Rotation2D.h" +#include "src/Geometry/Quaternion.h" +#include "src/Geometry/AngleAxis.h" +#include "src/Geometry/EulerAngles.h" +#include "src/Geometry/Transform.h" +#include "src/Geometry/Translation.h" +#include "src/Geometry/Scaling.h" +#include "src/Geometry/Hyperplane.h" +#include "src/Geometry/ParametrizedLine.h" +#include "src/Geometry/AlignedBox.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_GEOMETRY_MODULE_H diff --git a/extern/Eigen2/Eigen/LU b/extern/Eigen2/Eigen/LU new file mode 100644 index 00000000000..0ce69456598 --- /dev/null +++ b/extern/Eigen2/Eigen/LU @@ -0,0 +1,29 @@ +#ifndef EIGEN_LU_MODULE_H +#define EIGEN_LU_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +namespace Eigen { + +/** \defgroup LU_Module LU module + * This module includes %LU decomposition and related notions such as matrix inversion and determinant. + * This module defines the following MatrixBase methods: + * - MatrixBase::inverse() + * - MatrixBase::determinant() + * + * \code + * #include + * \endcode + */ + +#include "src/LU/LU.h" +#include "src/LU/Determinant.h" +#include "src/LU/Inverse.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_LU_MODULE_H diff --git a/extern/Eigen2/Eigen/LeastSquares b/extern/Eigen2/Eigen/LeastSquares new file mode 100644 index 00000000000..573a13cb42f --- /dev/null +++ b/extern/Eigen2/Eigen/LeastSquares @@ -0,0 +1,27 @@ +#ifndef EIGEN_REGRESSION_MODULE_H +#define EIGEN_REGRESSION_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +#include "QR" +#include "Geometry" + +namespace Eigen { + +/** \defgroup LeastSquares_Module LeastSquares module + * This module provides linear regression and related features. + * + * \code + * #include + * \endcode + */ + +#include "src/LeastSquares/LeastSquares.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_REGRESSION_MODULE_H diff --git a/extern/Eigen2/Eigen/NewStdVector b/extern/Eigen2/Eigen/NewStdVector new file mode 100644 index 00000000000..f37de5ff673 --- /dev/null +++ b/extern/Eigen2/Eigen/NewStdVector @@ -0,0 +1,168 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2009 Gael Guennebaud +// Copyright (C) 2009 Hauke Heibel +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_STDVECTOR_MODULE_H +#define EIGEN_STDVECTOR_MODULE_H + +#include "Core" +#include + +namespace Eigen { + +// This one is needed to prevent reimplementing the whole std::vector. +template +class aligned_allocator_indirection : public aligned_allocator +{ +public: + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template + struct rebind + { + typedef aligned_allocator_indirection other; + }; + + aligned_allocator_indirection() throw() {} + aligned_allocator_indirection(const aligned_allocator_indirection& ) throw() : aligned_allocator() {} + aligned_allocator_indirection(const aligned_allocator& ) throw() {} + template + aligned_allocator_indirection(const aligned_allocator_indirection& ) throw() {} + template + aligned_allocator_indirection(const aligned_allocator& ) throw() {} + ~aligned_allocator_indirection() throw() {} +}; + +#ifdef _MSC_VER + + // sometimes, MSVC detects, at compile time, that the argument x + // in std::vector::resize(size_t s,T x) won't be aligned and generate an error + // even if this function is never called. Whence this little wrapper. + #define EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) Eigen::ei_workaround_msvc_std_vector + template struct ei_workaround_msvc_std_vector : public T + { + inline ei_workaround_msvc_std_vector() : T() {} + inline ei_workaround_msvc_std_vector(const T& other) : T(other) {} + inline operator T& () { return *static_cast(this); } + inline operator const T& () const { return *static_cast(this); } + template + inline T& operator=(const OtherT& other) + { T::operator=(other); return *this; } + inline ei_workaround_msvc_std_vector& operator=(const ei_workaround_msvc_std_vector& other) + { T::operator=(other); return *this; } + }; + +#else + + #define EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) T + +#endif + +} + +namespace std { + +#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \ + public: \ + typedef T value_type; \ + typedef typename vector_base::allocator_type allocator_type; \ + typedef typename vector_base::size_type size_type; \ + typedef typename vector_base::iterator iterator; \ + typedef typename vector_base::const_iterator const_iterator; \ + explicit vector(const allocator_type& a = allocator_type()) : vector_base(a) {} \ + template \ + vector(InputIterator first, InputIterator last, const allocator_type& a = allocator_type()) \ + : vector_base(first, last, a) {} \ + vector(const vector& c) : vector_base(c) {} \ + explicit vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ + vector(iterator start, iterator end) : vector_base(start, end) {} \ + vector& operator=(const vector& x) { \ + vector_base::operator=(x); \ + return *this; \ + } + +template +class vector > + : public vector > +{ + typedef vector > vector_base; + EIGEN_STD_VECTOR_SPECIALIZATION_BODY + + void resize(size_type new_size) + { resize(new_size, T()); } + +#if defined(_VECTOR_) + // workaround MSVC std::vector implementation + void resize(size_type new_size, const value_type& x) + { + if (vector_base::size() < new_size) + vector_base::_Insert_n(vector_base::end(), new_size - vector_base::size(), x); + else if (new_size < vector_base::size()) + vector_base::erase(vector_base::begin() + new_size, vector_base::end()); + } + void push_back(const value_type& x) + { vector_base::push_back(x); } + using vector_base::insert; + iterator insert(const_iterator position, const value_type& x) + { return vector_base::insert(position,x); } + void insert(const_iterator position, size_type new_size, const value_type& x) + { vector_base::insert(position, new_size, x); } +#elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2) + // workaround GCC std::vector implementation + void resize(size_type new_size, const value_type& x) + { + if (new_size < vector_base::size()) + vector_base::_M_erase_at_end(this->_M_impl._M_start + new_size); + else + vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); + } +#elif defined(_GLIBCXX_VECTOR) && (!EIGEN_GNUC_AT_LEAST(4,1)) + // Note that before gcc-4.1 we already have: std::vector::resize(size_type,const T&), + // no no need to workaround ! + using vector_base::resize; +#else + // either GCC 4.1 or non-GCC + // default implementation which should always work. + void resize(size_type new_size, const value_type& x) + { + if (new_size < vector_base::size()) + vector_base::erase(vector_base::begin() + new_size, vector_base::end()); + else if (new_size > vector_base::size()) + vector_base::insert(vector_base::end(), new_size - vector_base::size(), x); + } +#endif + +}; + +} + +#endif // EIGEN_STDVECTOR_MODULE_H diff --git a/extern/Eigen2/Eigen/QR b/extern/Eigen2/Eigen/QR new file mode 100644 index 00000000000..97907d1e50f --- /dev/null +++ b/extern/Eigen2/Eigen/QR @@ -0,0 +1,73 @@ +#ifndef EIGEN_QR_MODULE_H +#define EIGEN_QR_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +#include "Cholesky" + +// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module +#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2) + #ifndef EIGEN_HIDE_HEAVY_CODE + #define EIGEN_HIDE_HEAVY_CODE + #endif +#elif defined EIGEN_HIDE_HEAVY_CODE + #undef EIGEN_HIDE_HEAVY_CODE +#endif + +namespace Eigen { + +/** \defgroup QR_Module QR module + * + * \nonstableyet + * + * This module mainly provides QR decomposition and an eigen value solver. + * This module also provides some MatrixBase methods, including: + * - MatrixBase::qr(), + * - MatrixBase::eigenvalues(), + * - MatrixBase::operatorNorm() + * + * \code + * #include + * \endcode + */ + +#include "src/QR/QR.h" +#include "src/QR/Tridiagonalization.h" +#include "src/QR/EigenSolver.h" +#include "src/QR/SelfAdjointEigenSolver.h" +#include "src/QR/HessenbergDecomposition.h" + +// declare all classes for a given matrix type +#define EIGEN_QR_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \ + PREFIX template class QR; \ + PREFIX template class Tridiagonalization; \ + PREFIX template class HessenbergDecomposition; \ + PREFIX template class SelfAdjointEigenSolver + +// removed because it does not support complex yet +// PREFIX template class EigenSolver + +// declare all class for all types +#define EIGEN_QR_MODULE_INSTANTIATE(PREFIX) \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \ + EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX) + +#ifdef EIGEN_EXTERN_INSTANTIATIONS + EIGEN_QR_MODULE_INSTANTIATE(extern); +#endif // EIGEN_EXTERN_INSTANTIATIONS + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_QR_MODULE_H diff --git a/extern/Eigen2/Eigen/QtAlignedMalloc b/extern/Eigen2/Eigen/QtAlignedMalloc new file mode 100644 index 00000000000..fde227328fa --- /dev/null +++ b/extern/Eigen2/Eigen/QtAlignedMalloc @@ -0,0 +1,29 @@ + +#ifndef EIGEN_QTMALLOC_MODULE_H +#define EIGEN_QTMALLOC_MODULE_H + +#include "Core" + +#if (!EIGEN_MALLOC_ALREADY_ALIGNED) + +inline void *qMalloc(size_t size) +{ + return Eigen::ei_aligned_malloc(size); +} + +inline void qFree(void *ptr) +{ + Eigen::ei_aligned_free(ptr); +} + +inline void *qRealloc(void *ptr, size_t size) +{ + void* newPtr = Eigen::ei_aligned_malloc(size); + memcpy(newPtr, ptr, size); + Eigen::ei_aligned_free(ptr); + return newPtr; +} + +#endif + +#endif // EIGEN_QTMALLOC_MODULE_H diff --git a/extern/Eigen2/Eigen/SVD b/extern/Eigen2/Eigen/SVD new file mode 100644 index 00000000000..eef05564bde --- /dev/null +++ b/extern/Eigen2/Eigen/SVD @@ -0,0 +1,29 @@ +#ifndef EIGEN_SVD_MODULE_H +#define EIGEN_SVD_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +namespace Eigen { + +/** \defgroup SVD_Module SVD module + * + * \nonstableyet + * + * This module provides SVD decomposition for (currently) real matrices. + * This decomposition is accessible via the following MatrixBase method: + * - MatrixBase::svd() + * + * \code + * #include + * \endcode + */ + +#include "src/SVD/SVD.h" + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_SVD_MODULE_H diff --git a/extern/Eigen2/Eigen/Sparse b/extern/Eigen2/Eigen/Sparse new file mode 100644 index 00000000000..536c284549b --- /dev/null +++ b/extern/Eigen2/Eigen/Sparse @@ -0,0 +1,132 @@ +#ifndef EIGEN_SPARSE_MODULE_H +#define EIGEN_SPARSE_MODULE_H + +#include "Core" + +#include "src/Core/util/DisableMSVCWarnings.h" + +#include +#include +#include +#include +#include + +#ifdef EIGEN_GOOGLEHASH_SUPPORT + #include +#endif + +#ifdef EIGEN_CHOLMOD_SUPPORT + extern "C" { + #include "cholmod.h" + } +#endif + +#ifdef EIGEN_TAUCS_SUPPORT + // taucs.h declares a lot of mess + #define isnan + #define finite + #define isinf + extern "C" { + #include "taucs.h" + } + #undef isnan + #undef finite + #undef isinf + + #ifdef min + #undef min + #endif + #ifdef max + #undef max + #endif + #ifdef complex + #undef complex + #endif +#endif + +#ifdef EIGEN_SUPERLU_SUPPORT + typedef int int_t; + #include "superlu/slu_Cnames.h" + #include "superlu/supermatrix.h" + #include "superlu/slu_util.h" + + namespace SuperLU_S { + #include "superlu/slu_sdefs.h" + } + namespace SuperLU_D { + #include "superlu/slu_ddefs.h" + } + namespace SuperLU_C { + #include "superlu/slu_cdefs.h" + } + namespace SuperLU_Z { + #include "superlu/slu_zdefs.h" + } + namespace Eigen { struct SluMatrix; } +#endif + +#ifdef EIGEN_UMFPACK_SUPPORT + #include "umfpack.h" +#endif + +namespace Eigen { + +/** \defgroup Sparse_Module Sparse module + * + * \nonstableyet + * + * See the \ref TutorialSparse "Sparse tutorial" + * + * \code + * #include + * \endcode + */ + +#include "src/Sparse/SparseUtil.h" +#include "src/Sparse/SparseMatrixBase.h" +#include "src/Sparse/CompressedStorage.h" +#include "src/Sparse/AmbiVector.h" +#include "src/Sparse/RandomSetter.h" +#include "src/Sparse/SparseBlock.h" +#include "src/Sparse/SparseMatrix.h" +#include "src/Sparse/DynamicSparseMatrix.h" +#include "src/Sparse/MappedSparseMatrix.h" +#include "src/Sparse/SparseVector.h" +#include "src/Sparse/CoreIterators.h" +#include "src/Sparse/SparseTranspose.h" +#include "src/Sparse/SparseCwise.h" +#include "src/Sparse/SparseCwiseUnaryOp.h" +#include "src/Sparse/SparseCwiseBinaryOp.h" +#include "src/Sparse/SparseDot.h" +#include "src/Sparse/SparseAssign.h" +#include "src/Sparse/SparseRedux.h" +#include "src/Sparse/SparseFuzzy.h" +#include "src/Sparse/SparseFlagged.h" +#include "src/Sparse/SparseProduct.h" +#include "src/Sparse/SparseDiagonalProduct.h" +#include "src/Sparse/TriangularSolver.h" +#include "src/Sparse/SparseLLT.h" +#include "src/Sparse/SparseLDLT.h" +#include "src/Sparse/SparseLU.h" + +#ifdef EIGEN_CHOLMOD_SUPPORT +# include "src/Sparse/CholmodSupport.h" +#endif + +#ifdef EIGEN_TAUCS_SUPPORT +# include "src/Sparse/TaucsSupport.h" +#endif + +#ifdef EIGEN_SUPERLU_SUPPORT +# include "src/Sparse/SuperLUSupport.h" +#endif + +#ifdef EIGEN_UMFPACK_SUPPORT +# include "src/Sparse/UmfPackSupport.h" +#endif + +} // namespace Eigen + +#include "src/Core/util/EnableMSVCWarnings.h" + +#endif // EIGEN_SPARSE_MODULE_H diff --git a/extern/Eigen2/Eigen/StdVector b/extern/Eigen2/Eigen/StdVector new file mode 100644 index 00000000000..c0744d6a0f3 --- /dev/null +++ b/extern/Eigen2/Eigen/StdVector @@ -0,0 +1,147 @@ +#ifdef EIGEN_USE_NEW_STDVECTOR +#include "NewStdVector" +#else + +#ifndef EIGEN_STDVECTOR_MODULE_H +#define EIGEN_STDVECTOR_MODULE_H + +#if defined(_GLIBCXX_VECTOR) || defined(_VECTOR_) +#error you must include before . Also note that includes , so it must be included after too. +#endif + +#ifndef EIGEN_GNUC_AT_LEAST +#ifdef __GNUC__ + #define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x) +#else + #define EIGEN_GNUC_AT_LEAST(x,y) 0 +#endif +#endif + +#define vector std_vector +#include +#undef vector + +namespace Eigen { + +template class aligned_allocator; + +// meta programming to determine if a class has a given member +struct ei_does_not_have_aligned_operator_new_marker_sizeof {int a[1];}; +struct ei_has_aligned_operator_new_marker_sizeof {int a[2];}; + +template +struct ei_has_aligned_operator_new { + template + static ei_has_aligned_operator_new_marker_sizeof + test(T const *, typename T::ei_operator_new_marker_type const * = 0); + static ei_does_not_have_aligned_operator_new_marker_sizeof + test(...); + + // note that the following indirection is needed for gcc-3.3 + enum {ret = sizeof(test(static_cast(0))) + == sizeof(ei_has_aligned_operator_new_marker_sizeof) }; +}; + +#ifdef _MSC_VER + + // sometimes, MSVC detects, at compile time, that the argument x + // in std::vector::resize(size_t s,T x) won't be aligned and generate an error + // even if this function is never called. Whence this little wrapper. + #define _EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) Eigen::ei_workaround_msvc_std_vector + template struct ei_workaround_msvc_std_vector : public T + { + inline ei_workaround_msvc_std_vector() : T() {} + inline ei_workaround_msvc_std_vector(const T& other) : T(other) {} + inline operator T& () { return *static_cast(this); } + inline operator const T& () const { return *static_cast(this); } + template + inline T& operator=(const OtherT& other) + { T::operator=(other); return *this; } + inline ei_workaround_msvc_std_vector& operator=(const ei_workaround_msvc_std_vector& other) + { T::operator=(other); return *this; } + }; + +#else + + #define _EIGEN_WORKAROUND_MSVC_STD_VECTOR(T) T + +#endif + +} + +namespace std { + +#define EIGEN_STD_VECTOR_SPECIALIZATION_BODY \ + public: \ + typedef T value_type; \ + typedef typename vector_base::allocator_type allocator_type; \ + typedef typename vector_base::size_type size_type; \ + typedef typename vector_base::iterator iterator; \ + explicit vector(const allocator_type& __a = allocator_type()) : vector_base(__a) {} \ + vector(const vector& c) : vector_base(c) {} \ + vector(size_type num, const value_type& val = value_type()) : vector_base(num, val) {} \ + vector(iterator start, iterator end) : vector_base(start, end) {} \ + vector& operator=(const vector& __x) { \ + vector_base::operator=(__x); \ + return *this; \ + } + +template, + bool HasAlignedNew = Eigen::ei_has_aligned_operator_new::ret> +class vector : public std::std_vector +{ + typedef std_vector vector_base; + EIGEN_STD_VECTOR_SPECIALIZATION_BODY +}; + +template +class vector + : public std::std_vector<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T), + Eigen::aligned_allocator<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T)> > +{ + typedef std_vector<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T), + Eigen::aligned_allocator<_EIGEN_WORKAROUND_MSVC_STD_VECTOR(T)> > vector_base; + EIGEN_STD_VECTOR_SPECIALIZATION_BODY + + void resize(size_type __new_size) + { resize(__new_size, T()); } + + #if defined(_VECTOR_) + // workaround MSVC std::vector implementation + void resize(size_type __new_size, const value_type& __x) + { + if (vector_base::size() < __new_size) + vector_base::_Insert_n(vector_base::end(), __new_size - vector_base::size(), __x); + else if (__new_size < vector_base::size()) + vector_base::erase(vector_base::begin() + __new_size, vector_base::end()); + } + #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,2) + // workaround GCC std::vector implementation + void resize(size_type __new_size, const value_type& __x) + { + if (__new_size < vector_base::size()) + vector_base::_M_erase_at_end(this->_M_impl._M_start + __new_size); + else + vector_base::insert(vector_base::end(), __new_size - vector_base::size(), __x); + } + #elif defined(_GLIBCXX_VECTOR) && EIGEN_GNUC_AT_LEAST(4,1) + void resize(size_type __new_size, const value_type& __x) + { + if (__new_size < vector_base::size()) + vector_base::erase(vector_base::begin() + __new_size, vector_base::end()); + else + vector_base::insert(vector_base::end(), __new_size - vector_base::size(), __x); + } + #else + // Before gcc-4.1 we already have: std::vector::resize(size_type,const T&), + // so no need for a workaround ! + using vector_base::resize; + #endif +}; + +} + +#endif // EIGEN_STDVECTOR_MODULE_H + +#endif // EIGEN_USE_NEW_STDVECTOR \ No newline at end of file diff --git a/extern/Eigen2/Eigen/src/Array/BooleanRedux.h b/extern/Eigen2/Eigen/src/Array/BooleanRedux.h new file mode 100644 index 00000000000..4e8218327eb --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/BooleanRedux.h @@ -0,0 +1,145 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ALLANDANY_H +#define EIGEN_ALLANDANY_H + +template +struct ei_all_unroller +{ + enum { + col = (UnrollCount-1) / Derived::RowsAtCompileTime, + row = (UnrollCount-1) % Derived::RowsAtCompileTime + }; + + inline static bool run(const Derived &mat) + { + return ei_all_unroller::run(mat) && mat.coeff(row, col); + } +}; + +template +struct ei_all_unroller +{ + inline static bool run(const Derived &mat) { return mat.coeff(0, 0); } +}; + +template +struct ei_all_unroller +{ + inline static bool run(const Derived &) { return false; } +}; + +template +struct ei_any_unroller +{ + enum { + col = (UnrollCount-1) / Derived::RowsAtCompileTime, + row = (UnrollCount-1) % Derived::RowsAtCompileTime + }; + + inline static bool run(const Derived &mat) + { + return ei_any_unroller::run(mat) || mat.coeff(row, col); + } +}; + +template +struct ei_any_unroller +{ + inline static bool run(const Derived &mat) { return mat.coeff(0, 0); } +}; + +template +struct ei_any_unroller +{ + inline static bool run(const Derived &) { return false; } +}; + +/** \array_module + * + * \returns true if all coefficients are true + * + * \addexample CwiseAll \label How to check whether a point is inside a box (using operator< and all()) + * + * Example: \include MatrixBase_all.cpp + * Output: \verbinclude MatrixBase_all.out + * + * \sa MatrixBase::any(), Cwise::operator<() + */ +template +inline bool MatrixBase::all() const +{ + const bool unroll = SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) + <= EIGEN_UNROLLING_LIMIT; + if(unroll) + return ei_all_unroller::run(derived()); + else + { + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) + if (!coeff(i, j)) return false; + return true; + } +} + +/** \array_module + * + * \returns true if at least one coefficient is true + * + * \sa MatrixBase::all() + */ +template +inline bool MatrixBase::any() const +{ + const bool unroll = SizeAtCompileTime * (CoeffReadCost + NumTraits::AddCost) + <= EIGEN_UNROLLING_LIMIT; + if(unroll) + return ei_any_unroller::run(derived()); + else + { + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) + if (coeff(i, j)) return true; + return false; + } +} + +/** \array_module + * + * \returns the number of coefficients which evaluate to true + * + * \sa MatrixBase::all(), MatrixBase::any() + */ +template +inline int MatrixBase::count() const +{ + return this->cast().cast().sum(); +} + +#endif // EIGEN_ALLANDANY_H diff --git a/extern/Eigen2/Eigen/src/Array/CwiseOperators.h b/extern/Eigen2/Eigen/src/Array/CwiseOperators.h new file mode 100644 index 00000000000..4b6346daa51 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/CwiseOperators.h @@ -0,0 +1,453 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ARRAY_CWISE_OPERATORS_H +#define EIGEN_ARRAY_CWISE_OPERATORS_H + +// -- unary operators -- + +/** \array_module + * + * \returns an expression of the coefficient-wise square root of *this. + * + * Example: \include Cwise_sqrt.cpp + * Output: \verbinclude Cwise_sqrt.out + * + * \sa pow(), square() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) +Cwise::sqrt() const +{ + return _expression(); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise exponential of *this. + * + * Example: \include Cwise_exp.cpp + * Output: \verbinclude Cwise_exp.out + * + * \sa pow(), log(), sin(), cos() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) +Cwise::exp() const +{ + return _expression(); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise logarithm of *this. + * + * Example: \include Cwise_log.cpp + * Output: \verbinclude Cwise_log.out + * + * \sa exp() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) +Cwise::log() const +{ + return _expression(); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise cosine of *this. + * + * Example: \include Cwise_cos.cpp + * Output: \verbinclude Cwise_cos.out + * + * \sa sin(), exp() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) +Cwise::cos() const +{ + return _expression(); +} + + +/** \array_module + * + * \returns an expression of the coefficient-wise sine of *this. + * + * Example: \include Cwise_sin.cpp + * Output: \verbinclude Cwise_sin.out + * + * \sa cos(), exp() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) +Cwise::sin() const +{ + return _expression(); +} + + +/** \array_module + * + * \returns an expression of the coefficient-wise power of *this to the given exponent. + * + * Example: \include Cwise_pow.cpp + * Output: \verbinclude Cwise_pow.out + * + * \sa exp(), log() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) +Cwise::pow(const Scalar& exponent) const +{ + return EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)(_expression(), ei_scalar_pow_op(exponent)); +} + + +/** \array_module + * + * \returns an expression of the coefficient-wise inverse of *this. + * + * Example: \include Cwise_inverse.cpp + * Output: \verbinclude Cwise_inverse.out + * + * \sa operator/(), operator*() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) +Cwise::inverse() const +{ + return _expression(); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise square of *this. + * + * Example: \include Cwise_square.cpp + * Output: \verbinclude Cwise_square.out + * + * \sa operator/(), operator*(), abs2() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) +Cwise::square() const +{ + return _expression(); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise cube of *this. + * + * Example: \include Cwise_cube.cpp + * Output: \verbinclude Cwise_cube.out + * + * \sa square(), pow() + */ +template +inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) +Cwise::cube() const +{ + return _expression(); +} + + +// -- binary operators -- + +/** \array_module + * + * \returns an expression of the coefficient-wise \< operator of *this and \a other + * + * Example: \include Cwise_less.cpp + * Output: \verbinclude Cwise_less.out + * + * \sa MatrixBase::all(), MatrixBase::any(), operator>(), operator<=() + */ +template +template +inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less) +Cwise::operator<(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)(_expression(), other.derived()); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise \<= operator of *this and \a other + * + * Example: \include Cwise_less_equal.cpp + * Output: \verbinclude Cwise_less_equal.out + * + * \sa MatrixBase::all(), MatrixBase::any(), operator>=(), operator<() + */ +template +template +inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal) +Cwise::operator<=(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)(_expression(), other.derived()); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise \> operator of *this and \a other + * + * Example: \include Cwise_greater.cpp + * Output: \verbinclude Cwise_greater.out + * + * \sa MatrixBase::all(), MatrixBase::any(), operator>=(), operator<() + */ +template +template +inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater) +Cwise::operator>(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)(_expression(), other.derived()); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise \>= operator of *this and \a other + * + * Example: \include Cwise_greater_equal.cpp + * Output: \verbinclude Cwise_greater_equal.out + * + * \sa MatrixBase::all(), MatrixBase::any(), operator>(), operator<=() + */ +template +template +inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal) +Cwise::operator>=(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)(_expression(), other.derived()); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise == operator of *this and \a other + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and + * MatrixBase::isMuchSmallerThan(). + * + * Example: \include Cwise_equal_equal.cpp + * Output: \verbinclude Cwise_equal_equal.out + * + * \sa MatrixBase::all(), MatrixBase::any(), MatrixBase::isApprox(), MatrixBase::isMuchSmallerThan() + */ +template +template +inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to) +Cwise::operator==(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)(_expression(), other.derived()); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise != operator of *this and \a other + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and + * MatrixBase::isMuchSmallerThan(). + * + * Example: \include Cwise_not_equal.cpp + * Output: \verbinclude Cwise_not_equal.out + * + * \sa MatrixBase::all(), MatrixBase::any(), MatrixBase::isApprox(), MatrixBase::isMuchSmallerThan() + */ +template +template +inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to) +Cwise::operator!=(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)(_expression(), other.derived()); +} + +// comparisons to scalar value + +/** \array_module + * + * \returns an expression of the coefficient-wise \< operator of *this and a scalar \a s + * + * \sa operator<(const MatrixBase &) const + */ +template +inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less) +Cwise::operator<(Scalar s) const +{ + return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less)(_expression(), + typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s)); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise \<= operator of *this and a scalar \a s + * + * \sa operator<=(const MatrixBase &) const + */ +template +inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal) +Cwise::operator<=(Scalar s) const +{ + return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal)(_expression(), + typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s)); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise \> operator of *this and a scalar \a s + * + * \sa operator>(const MatrixBase &) const + */ +template +inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater) +Cwise::operator>(Scalar s) const +{ + return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater)(_expression(), + typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s)); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise \>= operator of *this and a scalar \a s + * + * \sa operator>=(const MatrixBase &) const + */ +template +inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal) +Cwise::operator>=(Scalar s) const +{ + return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal)(_expression(), + typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s)); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise == operator of *this and a scalar \a s + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and + * MatrixBase::isMuchSmallerThan(). + * + * \sa operator==(const MatrixBase &) const + */ +template +inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to) +Cwise::operator==(Scalar s) const +{ + return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to)(_expression(), + typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s)); +} + +/** \array_module + * + * \returns an expression of the coefficient-wise != operator of *this and a scalar \a s + * + * \warning this performs an exact comparison, which is generally a bad idea with floating-point types. + * In order to check for equality between two vectors or matrices with floating-point coefficients, it is + * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and + * MatrixBase::isMuchSmallerThan(). + * + * \sa operator!=(const MatrixBase &) const + */ +template +inline const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to) +Cwise::operator!=(Scalar s) const +{ + return EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to)(_expression(), + typename ExpressionType::ConstantReturnType(_expression().rows(), _expression().cols(), s)); +} + +// scalar addition + +/** \array_module + * + * \returns an expression of \c *this with each coeff incremented by the constant \a scalar + * + * Example: \include Cwise_plus.cpp + * Output: \verbinclude Cwise_plus.out + * + * \sa operator+=(), operator-() + */ +template +inline const typename Cwise::ScalarAddReturnType +Cwise::operator+(const Scalar& scalar) const +{ + return typename Cwise::ScalarAddReturnType(m_matrix, ei_scalar_add_op(scalar)); +} + +/** \array_module + * + * Adds the given \a scalar to each coeff of this expression. + * + * Example: \include Cwise_plus_equal.cpp + * Output: \verbinclude Cwise_plus_equal.out + * + * \sa operator+(), operator-=() + */ +template +inline ExpressionType& Cwise::operator+=(const Scalar& scalar) +{ + return m_matrix.const_cast_derived() = *this + scalar; +} + +/** \array_module + * + * \returns an expression of \c *this with each coeff decremented by the constant \a scalar + * + * Example: \include Cwise_minus.cpp + * Output: \verbinclude Cwise_minus.out + * + * \sa operator+(), operator-=() + */ +template +inline const typename Cwise::ScalarAddReturnType +Cwise::operator-(const Scalar& scalar) const +{ + return *this + (-scalar); +} + +/** \array_module + * + * Substracts the given \a scalar from each coeff of this expression. + * + * Example: \include Cwise_minus_equal.cpp + * Output: \verbinclude Cwise_minus_equal.out + * + * \sa operator+=(), operator-() + */ + +template +inline ExpressionType& Cwise::operator-=(const Scalar& scalar) +{ + return m_matrix.const_cast_derived() = *this - scalar; +} + +#endif // EIGEN_ARRAY_CWISE_OPERATORS_H diff --git a/extern/Eigen2/Eigen/src/Array/Functors.h b/extern/Eigen2/Eigen/src/Array/Functors.h new file mode 100644 index 00000000000..0aae7fd2c40 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/Functors.h @@ -0,0 +1,305 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ARRAY_FUNCTORS_H +#define EIGEN_ARRAY_FUNCTORS_H + +/** \internal + * \array_module + * + * \brief Template functor to add a scalar to a fixed other one + * + * \sa class CwiseUnaryOp, Array::operator+ + */ +/* If you wonder why doing the ei_pset1() in packetOp() is an optimization check ei_scalar_multiple_op */ +template +struct ei_scalar_add_op { + typedef typename ei_packet_traits::type PacketScalar; + // FIXME default copy constructors seems bugged with std::complex<> + inline ei_scalar_add_op(const ei_scalar_add_op& other) : m_other(other.m_other) { } + inline ei_scalar_add_op(const Scalar& other) : m_other(other) { } + inline Scalar operator() (const Scalar& a) const { return a + m_other; } + inline const PacketScalar packetOp(const PacketScalar& a) const + { return ei_padd(a, ei_pset1(m_other)); } + const Scalar m_other; +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = ei_packet_traits::size>1 }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the square root of a scalar + * + * \sa class CwiseUnaryOp, Cwise::sqrt() + */ +template struct ei_scalar_sqrt_op EIGEN_EMPTY_STRUCT { + inline const Scalar operator() (const Scalar& a) const { return ei_sqrt(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the exponential of a scalar + * + * \sa class CwiseUnaryOp, Cwise::exp() + */ +template struct ei_scalar_exp_op EIGEN_EMPTY_STRUCT { + inline const Scalar operator() (const Scalar& a) const { return ei_exp(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the logarithm of a scalar + * + * \sa class CwiseUnaryOp, Cwise::log() + */ +template struct ei_scalar_log_op EIGEN_EMPTY_STRUCT { + inline const Scalar operator() (const Scalar& a) const { return ei_log(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the cosine of a scalar + * + * \sa class CwiseUnaryOp, Cwise::cos() + */ +template struct ei_scalar_cos_op EIGEN_EMPTY_STRUCT { + inline const Scalar operator() (const Scalar& a) const { return ei_cos(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the sine of a scalar + * + * \sa class CwiseUnaryOp, Cwise::sin() + */ +template struct ei_scalar_sin_op EIGEN_EMPTY_STRUCT { + inline const Scalar operator() (const Scalar& a) const { return ei_sin(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to raise a scalar to a power + * + * \sa class CwiseUnaryOp, Cwise::pow + */ +template +struct ei_scalar_pow_op { + // FIXME default copy constructors seems bugged with std::complex<> + inline ei_scalar_pow_op(const ei_scalar_pow_op& other) : m_exponent(other.m_exponent) { } + inline ei_scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {} + inline Scalar operator() (const Scalar& a) const { return ei_pow(a, m_exponent); } + const Scalar m_exponent; +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the inverse of a scalar + * + * \sa class CwiseUnaryOp, Cwise::inverse() + */ +template +struct ei_scalar_inverse_op { + inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; } + template + inline const PacketScalar packetOp(const PacketScalar& a) const + { return ei_pdiv(ei_pset1(Scalar(1)),a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = int(ei_packet_traits::size)>1 }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the square of a scalar + * + * \sa class CwiseUnaryOp, Cwise::square() + */ +template +struct ei_scalar_square_op { + inline Scalar operator() (const Scalar& a) const { return a*a; } + template + inline const PacketScalar packetOp(const PacketScalar& a) const + { return ei_pmul(a,a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = int(ei_packet_traits::size)>1 }; }; + +/** \internal + * + * \array_module + * + * \brief Template functor to compute the cube of a scalar + * + * \sa class CwiseUnaryOp, Cwise::cube() + */ +template +struct ei_scalar_cube_op { + inline Scalar operator() (const Scalar& a) const { return a*a*a; } + template + inline const PacketScalar packetOp(const PacketScalar& a) const + { return ei_pmul(a,ei_pmul(a,a)); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 2*NumTraits::MulCost, PacketAccess = int(ei_packet_traits::size)>1 }; }; + +// default ei_functor_traits for STL functors: + +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = ei_functor_traits::Cost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = ei_functor_traits::Cost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1 + ei_functor_traits::Cost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 1 + ei_functor_traits::Cost, PacketAccess = false }; }; + +#ifdef EIGEN_STDEXT_SUPPORT + +template +struct ei_functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct ei_functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct ei_functor_traits > > +{ enum { Cost = 0, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = ei_functor_traits::Cost + ei_functor_traits::Cost, PacketAccess = false }; }; + +template +struct ei_functor_traits > +{ enum { Cost = ei_functor_traits::Cost + ei_functor_traits::Cost + ei_functor_traits::Cost, PacketAccess = false }; }; + +#endif // EIGEN_STDEXT_SUPPORT + +#endif // EIGEN_ARRAY_FUNCTORS_H diff --git a/extern/Eigen2/Eigen/src/Array/Norms.h b/extern/Eigen2/Eigen/src/Array/Norms.h new file mode 100644 index 00000000000..6b92e6a099d --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/Norms.h @@ -0,0 +1,80 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ARRAY_NORMS_H +#define EIGEN_ARRAY_NORMS_H + +template +struct ei_lpNorm_selector +{ + typedef typename NumTraits::Scalar>::Real RealScalar; + inline static RealScalar run(const MatrixBase& m) + { + return ei_pow(m.cwise().abs().cwise().pow(p).sum(), RealScalar(1)/p); + } +}; + +template +struct ei_lpNorm_selector +{ + inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.cwise().abs().sum(); + } +}; + +template +struct ei_lpNorm_selector +{ + inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.norm(); + } +}; + +template +struct ei_lpNorm_selector +{ + inline static typename NumTraits::Scalar>::Real run(const MatrixBase& m) + { + return m.cwise().abs().maxCoeff(); + } +}; + +/** \array_module + * + * \returns the \f$ \ell^p \f$ norm of *this, that is, returns the p-th root of the sum of the p-th powers of the absolute values + * of the coefficients of *this. If \a p is the special value \a Eigen::Infinity, this function returns the \f$ \ell^p\infty \f$ + * norm, that is the maximum of the absolute values of the coefficients of *this. + * + * \sa norm() + */ +template +template +inline typename NumTraits::Scalar>::Real MatrixBase::lpNorm() const +{ + return ei_lpNorm_selector::run(*this); +} + +#endif // EIGEN_ARRAY_NORMS_H diff --git a/extern/Eigen2/Eigen/src/Array/PartialRedux.h b/extern/Eigen2/Eigen/src/Array/PartialRedux.h new file mode 100644 index 00000000000..b1e8fd4babd --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/PartialRedux.h @@ -0,0 +1,342 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PARTIAL_REDUX_H +#define EIGEN_PARTIAL_REDUX_H + +/** \array_module \ingroup Array + * + * \class PartialReduxExpr + * + * \brief Generic expression of a partially reduxed matrix + * + * \param MatrixType the type of the matrix we are applying the redux operation + * \param MemberOp type of the member functor + * \param Direction indicates the direction of the redux (Vertical or Horizontal) + * + * This class represents an expression of a partial redux operator of a matrix. + * It is the return type of PartialRedux functions, + * and most of the time this is the only way it is used. + * + * \sa class PartialRedux + */ + +template< typename MatrixType, typename MemberOp, int Direction> +class PartialReduxExpr; + +template +struct ei_traits > +{ + typedef typename MemberOp::result_type Scalar; + typedef typename MatrixType::Scalar InputScalar; + typedef typename ei_nested::type MatrixTypeNested; + typedef typename ei_cleantype::type _MatrixTypeNested; + enum { + RowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::RowsAtCompileTime, + ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = Direction==Vertical ? 1 : MatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime, + Flags = (unsigned int)_MatrixTypeNested::Flags & HereditaryBits, + TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime + }; + #if EIGEN_GNUC_AT_LEAST(3,4) + typedef typename MemberOp::template Cost CostOpType; + #else + typedef typename MemberOp::template Cost CostOpType; + #endif + enum { + CoeffReadCost = TraversalSize * ei_traits<_MatrixTypeNested>::CoeffReadCost + int(CostOpType::value) + }; +}; + +template< typename MatrixType, typename MemberOp, int Direction> +class PartialReduxExpr : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(PartialReduxExpr) + typedef typename ei_traits::MatrixTypeNested MatrixTypeNested; + typedef typename ei_traits::_MatrixTypeNested _MatrixTypeNested; + + PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp()) + : m_matrix(mat), m_functor(func) {} + + int rows() const { return (Direction==Vertical ? 1 : m_matrix.rows()); } + int cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); } + + const Scalar coeff(int i, int j) const + { + if (Direction==Vertical) + return m_functor(m_matrix.col(j)); + else + return m_functor(m_matrix.row(i)); + } + + protected: + const MatrixTypeNested m_matrix; + const MemberOp m_functor; +}; + +#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST) \ + template \ + struct ei_member_##MEMBER EIGEN_EMPTY_STRUCT { \ + typedef ResultType result_type; \ + template struct Cost \ + { enum { value = COST }; }; \ + template \ + inline ResultType operator()(const MatrixBase& mat) const \ + { return mat.MEMBER(); } \ + } + +EIGEN_MEMBER_FUNCTOR(squaredNorm, Size * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits::MulCost + (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits::AddCost); +EIGEN_MEMBER_FUNCTOR(count, (Size-1)*NumTraits::AddCost); + +/** \internal */ +template +struct ei_member_redux { + typedef typename ei_result_of< + BinaryOp(Scalar) + >::type result_type; + template struct Cost + { enum { value = (Size-1) * ei_functor_traits::Cost }; }; + ei_member_redux(const BinaryOp func) : m_functor(func) {} + template + inline result_type operator()(const MatrixBase& mat) const + { return mat.redux(m_functor); } + const BinaryOp m_functor; +}; + +/** \array_module \ingroup Array + * + * \class PartialRedux + * + * \brief Pseudo expression providing partial reduction operations + * + * \param ExpressionType the type of the object on which to do partial reductions + * \param Direction indicates the direction of the redux (Vertical or Horizontal) + * + * This class represents a pseudo expression with partial reduction features. + * It is the return type of MatrixBase::colwise() and MatrixBase::rowwise() + * and most of the time this is the only way it is used. + * + * Example: \include MatrixBase_colwise.cpp + * Output: \verbinclude MatrixBase_colwise.out + * + * \sa MatrixBase::colwise(), MatrixBase::rowwise(), class PartialReduxExpr + */ +template class PartialRedux +{ + public: + + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_meta_if::ret, + ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + + template class Functor> struct ReturnType + { + typedef PartialReduxExpr::Scalar>, + Direction + > Type; + }; + + template struct ReduxReturnType + { + typedef PartialReduxExpr::Scalar>, + Direction + > Type; + }; + + typedef typename ExpressionType::PlainMatrixType CrossReturnType; + + inline PartialRedux(const ExpressionType& matrix) : m_matrix(matrix) {} + + /** \internal */ + inline const ExpressionType& _expression() const { return m_matrix; } + + template + const typename ReduxReturnType::Type + redux(const BinaryOp& func = BinaryOp()) const; + + /** \returns a row (or column) vector expression of the smallest coefficient + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_minCoeff.cpp + * Output: \verbinclude PartialRedux_minCoeff.out + * + * \sa MatrixBase::minCoeff() */ + const typename ReturnType::Type minCoeff() const + { return _expression(); } + + /** \returns a row (or column) vector expression of the largest coefficient + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_maxCoeff.cpp + * Output: \verbinclude PartialRedux_maxCoeff.out + * + * \sa MatrixBase::maxCoeff() */ + const typename ReturnType::Type maxCoeff() const + { return _expression(); } + + /** \returns a row (or column) vector expression of the squared norm + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_squaredNorm.cpp + * Output: \verbinclude PartialRedux_squaredNorm.out + * + * \sa MatrixBase::squaredNorm() */ + const typename ReturnType::Type squaredNorm() const + { return _expression(); } + + /** \returns a row (or column) vector expression of the norm + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_norm.cpp + * Output: \verbinclude PartialRedux_norm.out + * + * \sa MatrixBase::norm() */ + const typename ReturnType::Type norm() const + { return _expression(); } + + /** \returns a row (or column) vector expression of the sum + * of each column (or row) of the referenced expression. + * + * Example: \include PartialRedux_sum.cpp + * Output: \verbinclude PartialRedux_sum.out + * + * \sa MatrixBase::sum() */ + const typename ReturnType::Type sum() const + { return _expression(); } + + /** \returns a row (or column) vector expression representing + * whether \b all coefficients of each respective column (or row) are \c true. + * + * \sa MatrixBase::all() */ + const typename ReturnType::Type all() const + { return _expression(); } + + /** \returns a row (or column) vector expression representing + * whether \b at \b least one coefficient of each respective column (or row) is \c true. + * + * \sa MatrixBase::any() */ + const typename ReturnType::Type any() const + { return _expression(); } + + /** \returns a row (or column) vector expression representing + * the number of \c true coefficients of each respective column (or row). + * + * Example: \include PartialRedux_count.cpp + * Output: \verbinclude PartialRedux_count.out + * + * \sa MatrixBase::count() */ + const PartialReduxExpr, Direction> count() const + { return _expression(); } + + /** \returns a 3x3 matrix expression of the cross product + * of each column or row of the referenced expression with the \a other vector. + * + * \geometry_module + * + * \sa MatrixBase::cross() */ + template + const CrossReturnType cross(const MatrixBase& other) const + { + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(CrossReturnType,3,3) + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,3) + EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + if(Direction==Vertical) + return (CrossReturnType() + << _expression().col(0).cross(other), + _expression().col(1).cross(other), + _expression().col(2).cross(other)).finished(); + else + return (CrossReturnType() + << _expression().row(0).cross(other), + _expression().row(1).cross(other), + _expression().row(2).cross(other)).finished(); + } + + protected: + ExpressionTypeNested m_matrix; +}; + +/** \array_module + * + * \returns a PartialRedux wrapper of *this providing additional partial reduction operations + * + * Example: \include MatrixBase_colwise.cpp + * Output: \verbinclude MatrixBase_colwise.out + * + * \sa rowwise(), class PartialRedux + */ +template +inline const PartialRedux +MatrixBase::colwise() const +{ + return derived(); +} + +/** \array_module + * + * \returns a PartialRedux wrapper of *this providing additional partial reduction operations + * + * Example: \include MatrixBase_rowwise.cpp + * Output: \verbinclude MatrixBase_rowwise.out + * + * \sa colwise(), class PartialRedux + */ +template +inline const PartialRedux +MatrixBase::rowwise() const +{ + return derived(); +} + +/** \returns a row or column vector expression of \c *this reduxed by \a func + * + * The template parameter \a BinaryOp is the type of the functor + * of the custom redux operator. Note that func must be an associative operator. + * + * \sa class PartialRedux, MatrixBase::colwise(), MatrixBase::rowwise() + */ +template +template +const typename PartialRedux::template ReduxReturnType::Type +PartialRedux::redux(const BinaryOp& func) const +{ + return typename ReduxReturnType::Type(_expression(), func); +} + +#endif // EIGEN_PARTIAL_REDUX_H diff --git a/extern/Eigen2/Eigen/src/Array/Random.h b/extern/Eigen2/Eigen/src/Array/Random.h new file mode 100644 index 00000000000..9185fe4a7d3 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/Random.h @@ -0,0 +1,156 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_RANDOM_H +#define EIGEN_RANDOM_H + +template struct ei_scalar_random_op EIGEN_EMPTY_STRUCT { + inline ei_scalar_random_op(void) {} + inline const Scalar operator() (int, int) const { return ei_random(); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 5 * NumTraits::MulCost, PacketAccess = false, IsRepeatable = false }; }; + +/** \array_module + * + * \returns a random matrix (not an expression, the matrix is immediately evaluated). + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so ei_random() should be used + * instead. + * + * \addexample RandomExample \label How to create a matrix with random coefficients + * + * Example: \include MatrixBase_random_int_int.cpp + * Output: \verbinclude MatrixBase_random_int_int.out + * + * \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random() + */ +template +inline const CwiseNullaryOp::Scalar>, Derived> +MatrixBase::Random(int rows, int cols) +{ + return NullaryExpr(rows, cols, ei_scalar_random_op()); +} + +/** \array_module + * + * \returns a random vector (not an expression, the vector is immediately evaluated). + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so ei_random() should be used + * instead. + * + * Example: \include MatrixBase_random_int.cpp + * Output: \verbinclude MatrixBase_random_int.out + * + * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random() + */ +template +inline const CwiseNullaryOp::Scalar>, Derived> +MatrixBase::Random(int size) +{ + return NullaryExpr(size, ei_scalar_random_op()); +} + +/** \array_module + * + * \returns a fixed-size random matrix or vector + * (not an expression, the matrix is immediately evaluated). + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_random.cpp + * Output: \verbinclude MatrixBase_random.out + * + * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int) + */ +template +inline const CwiseNullaryOp::Scalar>, Derived> +MatrixBase::Random() +{ + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_random_op()); +} + +/** \array_module + * + * Sets all coefficients in this expression to random values. + * + * Example: \include MatrixBase_setRandom.cpp + * Output: \verbinclude MatrixBase_setRandom.out + * + * \sa class CwiseNullaryOp, setRandom(int), setRandom(int,int) + */ +template +inline Derived& MatrixBase::setRandom() +{ + return *this = Random(rows(), cols()); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to random values. + * + * \only_for_vectors + * + * Example: \include Matrix_setRandom_int.cpp + * Output: \verbinclude Matrix_setRandom_int.out + * + * \sa MatrixBase::setRandom(), setRandom(int,int), class CwiseNullaryOp, MatrixBase::Random() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setRandom(int size) +{ + resize(size); + return setRandom(); +} + +/** Resizes to the given size, and sets all coefficients in this expression to random values. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setRandom_int_int.cpp + * Output: \verbinclude Matrix_setRandom_int_int.out + * + * \sa MatrixBase::setRandom(), setRandom(int), class CwiseNullaryOp, MatrixBase::Random() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setRandom(int rows, int cols) +{ + resize(rows, cols); + return setRandom(); +} + +#endif // EIGEN_RANDOM_H diff --git a/extern/Eigen2/Eigen/src/Array/Select.h b/extern/Eigen2/Eigen/src/Array/Select.h new file mode 100644 index 00000000000..9dc3fb1b27a --- /dev/null +++ b/extern/Eigen2/Eigen/src/Array/Select.h @@ -0,0 +1,159 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SELECT_H +#define EIGEN_SELECT_H + +/** \array_module \ingroup Array + * + * \class Select + * + * \brief Expression of a coefficient wise version of the C++ ternary operator ?: + * + * \param ConditionMatrixType the type of the \em condition expression which must be a boolean matrix + * \param ThenMatrixType the type of the \em then expression + * \param ElseMatrixType the type of the \em else expression + * + * This class represents an expression of a coefficient wise version of the C++ ternary operator ?:. + * It is the return type of MatrixBase::select() and most of the time this is the only way it is used. + * + * \sa MatrixBase::select(const MatrixBase&, const MatrixBase&) const + */ + +template +struct ei_traits > +{ + typedef typename ei_traits::Scalar Scalar; + typedef typename ConditionMatrixType::Nested ConditionMatrixNested; + typedef typename ThenMatrixType::Nested ThenMatrixNested; + typedef typename ElseMatrixType::Nested ElseMatrixNested; + enum { + RowsAtCompileTime = ConditionMatrixType::RowsAtCompileTime, + ColsAtCompileTime = ConditionMatrixType::ColsAtCompileTime, + MaxRowsAtCompileTime = ConditionMatrixType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = ConditionMatrixType::MaxColsAtCompileTime, + Flags = (unsigned int)ThenMatrixType::Flags & ElseMatrixType::Flags & HereditaryBits, + CoeffReadCost = ei_traits::type>::CoeffReadCost + + EIGEN_ENUM_MAX(ei_traits::type>::CoeffReadCost, + ei_traits::type>::CoeffReadCost) + }; +}; + +template +class Select : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Select) + + Select(const ConditionMatrixType& conditionMatrix, + const ThenMatrixType& thenMatrix, + const ElseMatrixType& elseMatrix) + : m_condition(conditionMatrix), m_then(thenMatrix), m_else(elseMatrix) + { + ei_assert(m_condition.rows() == m_then.rows() && m_condition.rows() == m_else.rows()); + ei_assert(m_condition.cols() == m_then.cols() && m_condition.cols() == m_else.cols()); + } + + int rows() const { return m_condition.rows(); } + int cols() const { return m_condition.cols(); } + + const Scalar coeff(int i, int j) const + { + if (m_condition.coeff(i,j)) + return m_then.coeff(i,j); + else + return m_else.coeff(i,j); + } + + const Scalar coeff(int i) const + { + if (m_condition.coeff(i)) + return m_then.coeff(i); + else + return m_else.coeff(i); + } + + protected: + const typename ConditionMatrixType::Nested m_condition; + const typename ThenMatrixType::Nested m_then; + const typename ElseMatrixType::Nested m_else; +}; + + +/** \array_module + * + * \returns a matrix where each coefficient (i,j) is equal to \a thenMatrix(i,j) + * if \c *this(i,j), and \a elseMatrix(i,j) otherwise. + * + * Example: \include MatrixBase_select.cpp + * Output: \verbinclude MatrixBase_select.out + * + * \sa class Select + */ +template +template +inline const Select +MatrixBase::select(const MatrixBase& thenMatrix, + const MatrixBase& elseMatrix) const +{ + return Select(derived(), thenMatrix.derived(), elseMatrix.derived()); +} + +/** \array_module + * + * Version of MatrixBase::select(const MatrixBase&, const MatrixBase&) with + * the \em else expression being a scalar value. + * + * \sa MatrixBase::select(const MatrixBase&, const MatrixBase&) const, class Select + */ +template +template +inline const Select > +MatrixBase::select(const MatrixBase& thenMatrix, + typename ThenDerived::Scalar elseScalar) const +{ + return Select >( + derived(), thenMatrix.derived(), ThenDerived::Constant(rows(),cols(),elseScalar)); +} + +/** \array_module + * + * Version of MatrixBase::select(const MatrixBase&, const MatrixBase&) with + * the \em then expression being a scalar value. + * + * \sa MatrixBase::select(const MatrixBase&, const MatrixBase&) const, class Select + */ +template +template +inline const Select, ElseDerived > +MatrixBase::select(typename ElseDerived::Scalar thenScalar, + const MatrixBase& elseMatrix) const +{ + return Select,ElseDerived>( + derived(), ElseDerived::Constant(rows(),cols(),thenScalar), elseMatrix.derived()); +} + +#endif // EIGEN_SELECT_H diff --git a/extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp b/extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp new file mode 100644 index 00000000000..e7f40a2ce9c --- /dev/null +++ b/extern/Eigen2/Eigen/src/Cholesky/CholeskyInstantiations.cpp @@ -0,0 +1,35 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_EXTERN_INSTANTIATIONS +#define EIGEN_EXTERN_INSTANTIATIONS +#endif +#include "../../Core" +#undef EIGEN_EXTERN_INSTANTIATIONS + +#include "../../Cholesky" + +namespace Eigen { + EIGEN_CHOLESKY_MODULE_INSTANTIATE(); +} diff --git a/extern/Eigen2/Eigen/src/Cholesky/LDLT.h b/extern/Eigen2/Eigen/src/Cholesky/LDLT.h new file mode 100644 index 00000000000..205b78a6ded --- /dev/null +++ b/extern/Eigen2/Eigen/src/Cholesky/LDLT.h @@ -0,0 +1,198 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_LDLT_H +#define EIGEN_LDLT_H + +/** \ingroup cholesky_Module + * + * \class LDLT + * + * \brief Robust Cholesky decomposition of a matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LDL^T Cholesky decomposition + * + * This class performs a Cholesky decomposition without square root of a symmetric, positive definite + * matrix A such that A = L D L^* = U^* D U, where L is lower triangular with a unit diagonal + * and D is a diagonal matrix. + * + * Compared to a standard Cholesky decomposition, avoiding the square roots allows for faster and more + * stable computation. + * + * Note that during the decomposition, only the upper triangular part of A is considered. Therefore, + * the strict lower part does not have to store correct values. + * + * \sa MatrixBase::ldlt(), class LLT + */ +template class LDLT +{ + public: + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + LDLT(const MatrixType& matrix) + : m_matrix(matrix.rows(), matrix.cols()) + { + compute(matrix); + } + + /** \returns the lower triangular matrix L */ + inline Part matrixL(void) const { return m_matrix; } + + /** \returns the coefficients of the diagonal matrix D */ + inline DiagonalCoeffs vectorD(void) const { return m_matrix.diagonal(); } + + /** \returns true if the matrix is positive definite */ + inline bool isPositiveDefinite(void) const { return m_isPositiveDefinite; } + + template + bool solve(const MatrixBase &b, ResultType *result) const; + + template + bool solveInPlace(MatrixBase &bAndX) const; + + void compute(const MatrixType& matrix); + + protected: + /** \internal + * Used to compute and store the cholesky decomposition A = L D L^* = U^* D U. + * The strict upper part is used during the decomposition, the strict lower + * part correspond to the coefficients of L (its diagonal is equal to 1 and + * is not stored), and the diagonal entries correspond to D. + */ + MatrixType m_matrix; + + bool m_isPositiveDefinite; +}; + +/** Compute / recompute the LLT decomposition A = L D L^* = U^* D U of \a matrix + */ +template +void LDLT::compute(const MatrixType& a) +{ + assert(a.rows()==a.cols()); + const int size = a.rows(); + m_matrix.resize(size, size); + m_isPositiveDefinite = true; + const RealScalar eps = ei_sqrt(precision()); + + if (size<=1) + { + m_matrix = a; + return; + } + + // Let's preallocate a temporay vector to evaluate the matrix-vector product into it. + // Unlike the standard LLT decomposition, here we cannot evaluate it to the destination + // matrix because it a sub-row which is not compatible suitable for efficient packet evaluation. + // (at least if we assume the matrix is col-major) + Matrix _temporary(size); + + // Note that, in this algorithm the rows of the strict upper part of m_matrix is used to store + // column vector, thus the strange .conjugate() and .transpose()... + + m_matrix.row(0) = a.row(0).conjugate(); + m_matrix.col(0).end(size-1) = m_matrix.row(0).end(size-1) / m_matrix.coeff(0,0); + for (int j = 1; j < size; ++j) + { + RealScalar tmp = ei_real(a.coeff(j,j) - (m_matrix.row(j).start(j) * m_matrix.col(j).start(j).conjugate()).coeff(0,0)); + m_matrix.coeffRef(j,j) = tmp; + + if (tmp < eps) + { + m_isPositiveDefinite = false; + return; + } + + int endSize = size-j-1; + if (endSize>0) + { + _temporary.end(endSize) = ( m_matrix.block(j+1,0, endSize, j) + * m_matrix.col(j).start(j).conjugate() ).lazy(); + + m_matrix.row(j).end(endSize) = a.row(j).end(endSize).conjugate() + - _temporary.end(endSize).transpose(); + + m_matrix.col(j).end(endSize) = m_matrix.row(j).end(endSize) / tmp; + } + } +} + +/** Computes the solution x of \f$ A x = b \f$ using the current decomposition of A. + * The result is stored in \a result + * + * \returns true in case of success, false otherwise. + * + * In other words, it computes \f$ b = A^{-1} b \f$ with + * \f$ {L^{*}}^{-1} D^{-1} L^{-1} b \f$ from right to left. + * + * \sa LDLT::solveInPlace(), MatrixBase::ldlt() + */ +template +template +bool LDLT +::solve(const MatrixBase &b, ResultType *result) const +{ + const int size = m_matrix.rows(); + ei_assert(size==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b"); + *result = b; + return solveInPlace(*result); +} + +/** This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * This version avoids a copy when the right hand side matrix b is not + * needed anymore. + * + * \sa LDLT::solve(), MatrixBase::ldlt() + */ +template +template +bool LDLT::solveInPlace(MatrixBase &bAndX) const +{ + const int size = m_matrix.rows(); + ei_assert(size==bAndX.rows()); + if (!m_isPositiveDefinite) + return false; + matrixL().solveTriangularInPlace(bAndX); + bAndX = (m_matrix.cwise().inverse().template part() * bAndX).lazy(); + m_matrix.adjoint().template part().solveTriangularInPlace(bAndX); + return true; +} + +/** \cholesky_module + * \returns the Cholesky decomposition without square root of \c *this + */ +template +inline const LDLT::PlainMatrixType> +MatrixBase::ldlt() const +{ + return derived(); +} + +#endif // EIGEN_LDLT_H diff --git a/extern/Eigen2/Eigen/src/Cholesky/LLT.h b/extern/Eigen2/Eigen/src/Cholesky/LLT.h new file mode 100644 index 00000000000..42c959f83a2 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Cholesky/LLT.h @@ -0,0 +1,219 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_LLT_H +#define EIGEN_LLT_H + +/** \ingroup cholesky_Module + * + * \class LLT + * + * \brief Standard Cholesky decomposition (LL^T) of a matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LL^T Cholesky decomposition + * + * This class performs a LL^T Cholesky decomposition of a symmetric, positive definite + * matrix A such that A = LL^* = U^*U, where L is lower triangular. + * + * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like D^*D x = b, + * for that purpose, we recommend the Cholesky decomposition without square root which is more stable + * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other + * situations like generalised eigen problems with hermitian matrices. + * + * Remember that Cholesky decompositions are not rank-revealing. This LLT decomposition is only stable on positive definite matrices, + * use LDLT instead for the semidefinite case. Also, do not use a Cholesky decomposition to determine whether a system of equations + * has a solution. + * + * \sa MatrixBase::llt(), class LDLT + */ + /* HEY THIS DOX IS DISABLED BECAUSE THERE's A BUG EITHER HERE OR IN LDLT ABOUT THAT (OR BOTH) + * Note that during the decomposition, only the upper triangular part of A is considered. Therefore, + * the strict lower part does not have to store correct values. + */ +template class LLT +{ + private: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + enum { + PacketSize = ei_packet_traits::size, + AlignmentMask = int(PacketSize)-1 + }; + + public: + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via LLT::compute(const MatrixType&). + */ + LLT() : m_matrix(), m_isInitialized(false) {} + + LLT(const MatrixType& matrix) + : m_matrix(matrix.rows(), matrix.cols()), + m_isInitialized(false) + { + compute(matrix); + } + + /** \returns the lower triangular matrix L */ + inline Part matrixL(void) const + { + ei_assert(m_isInitialized && "LLT is not initialized."); + return m_matrix; + } + + /** \deprecated */ + inline bool isPositiveDefinite(void) const { return m_isInitialized && m_isPositiveDefinite; } + + template + bool solve(const MatrixBase &b, ResultType *result) const; + + template + bool solveInPlace(MatrixBase &bAndX) const; + + void compute(const MatrixType& matrix); + + protected: + /** \internal + * Used to compute and store L + * The strict upper part is not used and even not initialized. + */ + MatrixType m_matrix; + bool m_isInitialized; + bool m_isPositiveDefinite; +}; + +/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix + */ +template +void LLT::compute(const MatrixType& a) +{ + assert(a.rows()==a.cols()); + m_isPositiveDefinite = true; + const int size = a.rows(); + m_matrix.resize(size, size); + // The biggest overall is the point of reference to which further diagonals + // are compared; if any diagonal is negligible compared + // to the largest overall, the algorithm bails. This cutoff is suggested + // in "Analysis of the Cholesky Decomposition of a Semi-definite Matrix" by + // Nicholas J. Higham. Also see "Accuracy and Stability of Numerical + // Algorithms" page 217, also by Higham. + const RealScalar cutoff = machine_epsilon() * size * a.diagonal().cwise().abs().maxCoeff(); + RealScalar x; + x = ei_real(a.coeff(0,0)); + m_matrix.coeffRef(0,0) = ei_sqrt(x); + if(size==1) + { + m_isInitialized = true; + return; + } + m_matrix.col(0).end(size-1) = a.row(0).end(size-1).adjoint() / ei_real(m_matrix.coeff(0,0)); + for (int j = 1; j < size; ++j) + { + x = ei_real(a.coeff(j,j)) - m_matrix.row(j).start(j).squaredNorm(); + if (x < cutoff) + { + m_isPositiveDefinite = false; + continue; + } + + m_matrix.coeffRef(j,j) = x = ei_sqrt(x); + + int endSize = size-j-1; + if (endSize>0) { + // Note that when all matrix columns have good alignment, then the following + // product is guaranteed to be optimal with respect to alignment. + m_matrix.col(j).end(endSize) = + (m_matrix.block(j+1, 0, endSize, j) * m_matrix.row(j).start(j).adjoint()).lazy(); + + // FIXME could use a.col instead of a.row + m_matrix.col(j).end(endSize) = (a.row(j).end(endSize).adjoint() + - m_matrix.col(j).end(endSize) ) / x; + } + } + + m_isInitialized = true; +} + +/** Computes the solution x of \f$ A x = b \f$ using the current decomposition of A. + * The result is stored in \a result + * + * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. + * + * In other words, it computes \f$ b = A^{-1} b \f$ with + * \f$ {L^{*}}^{-1} L^{-1} b \f$ from right to left. + * + * Example: \include LLT_solve.cpp + * Output: \verbinclude LLT_solve.out + * + * \sa LLT::solveInPlace(), MatrixBase::llt() + */ +template +template +bool LLT::solve(const MatrixBase &b, ResultType *result) const +{ + ei_assert(m_isInitialized && "LLT is not initialized."); + const int size = m_matrix.rows(); + ei_assert(size==b.rows() && "LLT::solve(): invalid number of rows of the right hand side matrix b"); + return solveInPlace((*result) = b); +} + +/** This is the \em in-place version of solve(). + * + * \param bAndX represents both the right-hand side matrix b and result x. + * + * \returns true always! If you need to check for existence of solutions, use another decomposition like LU, QR, or SVD. + * + * This version avoids a copy when the right hand side matrix b is not + * needed anymore. + * + * \sa LLT::solve(), MatrixBase::llt() + */ +template +template +bool LLT::solveInPlace(MatrixBase &bAndX) const +{ + ei_assert(m_isInitialized && "LLT is not initialized."); + const int size = m_matrix.rows(); + ei_assert(size==bAndX.rows()); + matrixL().solveTriangularInPlace(bAndX); + m_matrix.adjoint().template part().solveTriangularInPlace(bAndX); + return true; +} + +/** \cholesky_module + * \returns the LLT decomposition of \c *this + */ +template +inline const LLT::PlainMatrixType> +MatrixBase::llt() const +{ + return LLT(derived()); +} + +#endif // EIGEN_LLT_H diff --git a/extern/Eigen2/Eigen/src/Core/Assign.h b/extern/Eigen2/Eigen/src/Core/Assign.h new file mode 100644 index 00000000000..57205075596 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Assign.h @@ -0,0 +1,445 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2007 Michael Olbrich +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ASSIGN_H +#define EIGEN_ASSIGN_H + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for vectorization and unrolling +***************************************************************************/ + +template +struct ei_assign_traits +{ +public: + enum { + DstIsAligned = Derived::Flags & AlignedBit, + SrcIsAligned = OtherDerived::Flags & AlignedBit, + SrcAlignment = DstIsAligned && SrcIsAligned ? Aligned : Unaligned + }; + +private: + enum { + InnerSize = int(Derived::Flags)&RowMajorBit + ? Derived::ColsAtCompileTime + : Derived::RowsAtCompileTime, + InnerMaxSize = int(Derived::Flags)&RowMajorBit + ? Derived::MaxColsAtCompileTime + : Derived::MaxRowsAtCompileTime, + PacketSize = ei_packet_traits::size + }; + + enum { + MightVectorize = (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit) + && ((int(Derived::Flags)&RowMajorBit)==(int(OtherDerived::Flags)&RowMajorBit)), + MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0 + && int(DstIsAligned) && int(SrcIsAligned), + MayLinearVectorize = MightVectorize && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit), + MaySliceVectorize = MightVectorize && int(InnerMaxSize)>=3*PacketSize /* slice vectorization can be slow, so we only + want it if the slices are big, which is indicated by InnerMaxSize rather than InnerSize, think of the case + of a dynamic block in a fixed-size matrix */ + }; + +public: + enum { + Vectorization = int(MayInnerVectorize) ? int(InnerVectorization) + : int(MayLinearVectorize) ? int(LinearVectorization) + : int(MaySliceVectorize) ? int(SliceVectorization) + : int(NoVectorization) + }; + +private: + enum { + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize)), + MayUnrollCompletely = int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit), + MayUnrollInner = int(InnerSize * OtherDerived::CoeffReadCost) <= int(UnrollingLimit) + }; + +public: + enum { + Unrolling = (int(Vectorization) == int(InnerVectorization) || int(Vectorization) == int(NoVectorization)) + ? ( + int(MayUnrollCompletely) ? int(CompleteUnrolling) + : int(MayUnrollInner) ? int(InnerUnrolling) + : int(NoUnrolling) + ) + : int(Vectorization) == int(LinearVectorization) + ? ( int(MayUnrollCompletely) && int(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) ) + : int(NoUnrolling) + }; +}; + +/*************************************************************************** +* Part 2 : meta-unrollers +***************************************************************************/ + +/*********************** +*** No vectorization *** +***********************/ + +template +struct ei_assign_novec_CompleteUnrolling +{ + enum { + row = int(Derived1::Flags)&RowMajorBit + ? Index / int(Derived1::ColsAtCompileTime) + : Index % Derived1::RowsAtCompileTime, + col = int(Derived1::Flags)&RowMajorBit + ? Index % int(Derived1::ColsAtCompileTime) + : Index / Derived1::RowsAtCompileTime + }; + + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + dst.copyCoeff(row, col, src); + ei_assign_novec_CompleteUnrolling::run(dst, src); + } +}; + +template +struct ei_assign_novec_CompleteUnrolling +{ + EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} +}; + +template +struct ei_assign_novec_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int row_or_col) + { + const bool rowMajor = int(Derived1::Flags)&RowMajorBit; + const int row = rowMajor ? row_or_col : Index; + const int col = rowMajor ? Index : row_or_col; + dst.copyCoeff(row, col, src); + ei_assign_novec_InnerUnrolling::run(dst, src, row_or_col); + } +}; + +template +struct ei_assign_novec_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {} +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct ei_assign_innervec_CompleteUnrolling +{ + enum { + row = int(Derived1::Flags)&RowMajorBit + ? Index / int(Derived1::ColsAtCompileTime) + : Index % Derived1::RowsAtCompileTime, + col = int(Derived1::Flags)&RowMajorBit + ? Index % int(Derived1::ColsAtCompileTime) + : Index / Derived1::RowsAtCompileTime, + SrcAlignment = ei_assign_traits::SrcAlignment + }; + + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + dst.template copyPacket(row, col, src); + ei_assign_innervec_CompleteUnrolling::size, Stop>::run(dst, src); + } +}; + +template +struct ei_assign_innervec_CompleteUnrolling +{ + EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &) {} +}; + +template +struct ei_assign_innervec_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src, int row_or_col) + { + const int row = int(Derived1::Flags)&RowMajorBit ? row_or_col : Index; + const int col = int(Derived1::Flags)&RowMajorBit ? Index : row_or_col; + dst.template copyPacket(row, col, src); + ei_assign_innervec_InnerUnrolling::size, Stop>::run(dst, src, row_or_col); + } +}; + +template +struct ei_assign_innervec_InnerUnrolling +{ + EIGEN_STRONG_INLINE static void run(Derived1 &, const Derived2 &, int) {} +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +template::Vectorization, + int Unrolling = ei_assign_traits::Unrolling> +struct ei_assign_impl; + +/*********************** +*** No vectorization *** +***********************/ + +template +struct ei_assign_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + const int innerSize = dst.innerSize(); + const int outerSize = dst.outerSize(); + for(int j = 0; j < outerSize; ++j) + for(int i = 0; i < innerSize; ++i) + { + if(int(Derived1::Flags)&RowMajorBit) + dst.copyCoeff(j, i, src); + else + dst.copyCoeff(i, j, src); + } + } +}; + +template +struct ei_assign_impl +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + ei_assign_novec_CompleteUnrolling + ::run(dst, src); + } +}; + +template +struct ei_assign_impl +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + const bool rowMajor = int(Derived1::Flags)&RowMajorBit; + const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime; + const int outerSize = dst.outerSize(); + for(int j = 0; j < outerSize; ++j) + ei_assign_novec_InnerUnrolling + ::run(dst, src, j); + } +}; + +/************************** +*** Inner vectorization *** +**************************/ + +template +struct ei_assign_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + const int innerSize = dst.innerSize(); + const int outerSize = dst.outerSize(); + const int packetSize = ei_packet_traits::size; + for(int j = 0; j < outerSize; ++j) + for(int i = 0; i < innerSize; i+=packetSize) + { + if(int(Derived1::Flags)&RowMajorBit) + dst.template copyPacket(j, i, src); + else + dst.template copyPacket(i, j, src); + } + } +}; + +template +struct ei_assign_impl +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + ei_assign_innervec_CompleteUnrolling + ::run(dst, src); + } +}; + +template +struct ei_assign_impl +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + const bool rowMajor = int(Derived1::Flags)&RowMajorBit; + const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime; + const int outerSize = dst.outerSize(); + for(int j = 0; j < outerSize; ++j) + ei_assign_innervec_InnerUnrolling + ::run(dst, src, j); + } +}; + +/*************************** +*** Linear vectorization *** +***************************/ + +template +struct ei_assign_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + const int size = dst.size(); + const int packetSize = ei_packet_traits::size; + const int alignedStart = ei_assign_traits::DstIsAligned ? 0 + : ei_alignmentOffset(&dst.coeffRef(0), size); + const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize; + + for(int index = 0; index < alignedStart; ++index) + dst.copyCoeff(index, src); + + for(int index = alignedStart; index < alignedEnd; index += packetSize) + { + dst.template copyPacket::SrcAlignment>(index, src); + } + + for(int index = alignedEnd; index < size; ++index) + dst.copyCoeff(index, src); + } +}; + +template +struct ei_assign_impl +{ + EIGEN_STRONG_INLINE static void run(Derived1 &dst, const Derived2 &src) + { + const int size = Derived1::SizeAtCompileTime; + const int packetSize = ei_packet_traits::size; + const int alignedSize = (size/packetSize)*packetSize; + + ei_assign_innervec_CompleteUnrolling::run(dst, src); + ei_assign_novec_CompleteUnrolling::run(dst, src); + } +}; + +/************************** +*** Slice vectorization *** +***************************/ + +template +struct ei_assign_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + const int packetSize = ei_packet_traits::size; + const int packetAlignedMask = packetSize - 1; + const int innerSize = dst.innerSize(); + const int outerSize = dst.outerSize(); + const int alignedStep = (packetSize - dst.stride() % packetSize) & packetAlignedMask; + int alignedStart = ei_assign_traits::DstIsAligned ? 0 + : ei_alignmentOffset(&dst.coeffRef(0,0), innerSize); + + for(int i = 0; i < outerSize; ++i) + { + const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask); + + // do the non-vectorizable part of the assignment + for (int index = 0; index(i, index, src); + else + dst.template copyPacket(index, i, src); + } + + // do the non-vectorizable part of the assignment + for (int index = alignedEnd; index((alignedStart+alignedStep)%packetSize, innerSize); + } + } +}; + +/*************************************************************************** +* Part 4 : implementation of MatrixBase methods +***************************************************************************/ + +template +template +EIGEN_STRONG_INLINE Derived& MatrixBase + ::lazyAssign(const MatrixBase& other) +{ + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + ei_assert(rows() == other.rows() && cols() == other.cols()); + ei_assign_impl::run(derived(),other.derived()); + return derived(); +} + +template +struct ei_assign_selector; + +template +struct ei_assign_selector { + EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); } +}; +template +struct ei_assign_selector { + EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); } +}; +template +struct ei_assign_selector { + EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); } +}; +template +struct ei_assign_selector { + EIGEN_STRONG_INLINE static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); } +}; + +template +template +EIGEN_STRONG_INLINE Derived& MatrixBase + ::operator=(const MatrixBase& other) +{ + return ei_assign_selector::run(derived(), other.derived()); +} + +#endif // EIGEN_ASSIGN_H diff --git a/extern/Eigen2/Eigen/src/Core/Block.h b/extern/Eigen2/Eigen/src/Core/Block.h new file mode 100644 index 00000000000..7f422aa5c07 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Block.h @@ -0,0 +1,752 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_BLOCK_H +#define EIGEN_BLOCK_H + +/** \class Block + * + * \brief Expression of a fixed-size or dynamic-size block + * + * \param MatrixType the type of the object in which we are taking a block + * \param BlockRows the number of rows of the block we are taking at compile time (optional) + * \param BlockCols the number of columns of the block we are taking at compile time (optional) + * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned. + * The default is AsRequested. This parameter is internaly used by Eigen + * in expressions such as \code mat.block() += other; \endcode and most of + * the time this is the only way it is used. + * \param _DirectAccessStatus \internal used for partial specialization + * + * This class represents an expression of either a fixed-size or dynamic-size block. It is the return + * type of MatrixBase::block(int,int,int,int) and MatrixBase::block(int,int) and + * most of the time this is the only way it is used. + * + * However, if you want to directly maniputate block expressions, + * for instance if you want to write a function returning such an expression, you + * will need to use this class. + * + * Here is an example illustrating the dynamic case: + * \include class_Block.cpp + * Output: \verbinclude class_Block.out + * + * \note Even though this expression has dynamic size, in the case where \a MatrixType + * has fixed size, this expression inherits a fixed maximal size which means that evaluating + * it does not cause a dynamic memory allocation. + * + * Here is an example illustrating the fixed-size case: + * \include class_FixedBlock.cpp + * Output: \verbinclude class_FixedBlock.out + * + * \sa MatrixBase::block(int,int,int,int), MatrixBase::block(int,int), class VectorBlock + */ + +template +struct ei_traits > +{ + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_nested::type MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum{ + RowsAtCompileTime = ei_traits::RowsAtCompileTime == 1 ? 1 : BlockRows, + ColsAtCompileTime = ei_traits::ColsAtCompileTime == 1 ? 1 : BlockCols, + MaxRowsAtCompileTime = RowsAtCompileTime == 1 ? 1 + : (BlockRows==Dynamic ? int(ei_traits::MaxRowsAtCompileTime) : BlockRows), + MaxColsAtCompileTime = ColsAtCompileTime == 1 ? 1 + : (BlockCols==Dynamic ? int(ei_traits::MaxColsAtCompileTime) : BlockCols), + RowMajor = int(ei_traits::Flags)&RowMajorBit, + InnerSize = RowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime), + InnerMaxSize = RowMajor ? int(MaxColsAtCompileTime) : int(MaxRowsAtCompileTime), + MaskPacketAccessBit = (InnerMaxSize == Dynamic || (InnerSize >= ei_packet_traits::size)) + ? PacketAccessBit : 0, + FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0, + Flags = (ei_traits::Flags & (HereditaryBits | MaskPacketAccessBit | DirectAccessBit)) | FlagsLinearAccessBit, + CoeffReadCost = ei_traits::CoeffReadCost, + PacketAccess = _PacketAccess + }; + typedef typename ei_meta_if&, + Block >::ret AlignedDerivedType; +}; + +template class Block + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Block) + + class InnerIterator; + + /** Column or Row constructor + */ + inline Block(const MatrixType& matrix, int i) + : m_matrix(matrix), + // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime, + // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1, + // all other cases are invalid. + // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. + m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it + m_blockCols(matrix.cols()) // same for m_blockCols + { + ei_assert( (i>=0) && ( + ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows() + && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols()); + } + + /** Dynamic-size constructor + */ + inline Block(const MatrixType& matrix, + int startRow, int startCol, + int blockRows, int blockCols) + : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol), + m_blockRows(blockRows), m_blockCols(blockCols) + { + ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); + ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows() + && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols()); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) + + inline int rows() const { return m_blockRows.value(); } + inline int cols() const { return m_blockCols.value(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived() + .coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); + } + + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived() + .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix + .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + inline PacketScalar packet(int row, int col) const + { + return m_matrix.template packet + (row + m_startRow.value(), col + m_startCol.value()); + } + + template + inline void writePacket(int row, int col, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket + (row + m_startRow.value(), col + m_startCol.value(), x); + } + + template + inline PacketScalar packet(int index) const + { + return m_matrix.template packet + (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + template + inline void writePacket(int index, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket + (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x); + } + + protected: + + const typename MatrixType::Nested m_matrix; + const ei_int_if_dynamic m_startRow; + const ei_int_if_dynamic m_startCol; + const ei_int_if_dynamic m_blockRows; + const ei_int_if_dynamic m_blockCols; +}; + +/** \internal */ +template +class Block + : public MapBase > +{ + public: + + _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, MapBase) + + class InnerIterator; + typedef typename ei_traits::AlignedDerivedType AlignedDerivedType; + friend class Block; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block) + + AlignedDerivedType _convertToForceAligned() + { + return Block + (m_matrix, Base::m_data, Base::m_rows.value(), Base::m_cols.value()); + } + + /** Column or Row constructor + */ + inline Block(const MatrixType& matrix, int i) + : Base(&matrix.const_cast_derived().coeffRef( + (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0, + (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + BlockRows==1 ? 1 : matrix.rows(), + BlockCols==1 ? 1 : matrix.cols()), + m_matrix(matrix) + { + ei_assert( (i>=0) && ( + ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows() + && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols()); + } + + /** Dynamic-size constructor + */ + inline Block(const MatrixType& matrix, + int startRow, int startCol, + int blockRows, int blockCols) + : Base(&matrix.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols), + m_matrix(matrix) + { + ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); + ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows() + && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols()); + } + + inline int stride(void) const { return m_matrix.stride(); } + + protected: + + /** \internal used by allowAligned() */ + inline Block(const MatrixType& matrix, const Scalar* data, int blockRows, int blockCols) + : Base(data, blockRows, blockCols), m_matrix(matrix) + {} + + const typename MatrixType::Nested m_matrix; +}; + +/** \returns a dynamic-size expression of a block in *this. + * + * \param startRow the first row in the block + * \param startCol the first column in the block + * \param blockRows the number of rows in the block + * \param blockCols the number of columns in the block + * + * \addexample BlockIntIntIntInt \label How to reference a sub-matrix (dynamic-size) + * + * Example: \include MatrixBase_block_int_int_int_int.cpp + * Output: \verbinclude MatrixBase_block_int_int_int_int.out + * + * \note Even though the returned expression has dynamic size, in the case + * when it is applied to a fixed-size matrix, it inherits a fixed maximal size, + * which means that evaluating it does not cause a dynamic memory allocation. + * + * \sa class Block, block(int,int) + */ +template +inline typename BlockReturnType::Type MatrixBase + ::block(int startRow, int startCol, int blockRows, int blockCols) +{ + return typename BlockReturnType::Type(derived(), startRow, startCol, blockRows, blockCols); +} + +/** This is the const version of block(int,int,int,int). */ +template +inline const typename BlockReturnType::Type MatrixBase + ::block(int startRow, int startCol, int blockRows, int blockCols) const +{ + return typename BlockReturnType::Type(derived(), startRow, startCol, blockRows, blockCols); +} + +/** \returns a dynamic-size expression of a segment (i.e. a vector block) in *this. + * + * \only_for_vectors + * + * \addexample SegmentIntInt \label How to reference a sub-vector (dynamic size) + * + * \param start the first coefficient in the segment + * \param size the number of coefficients in the segment + * + * Example: \include MatrixBase_segment_int_int.cpp + * Output: \verbinclude MatrixBase_segment_int_int.out + * + * \note Even though the returned expression has dynamic size, in the case + * when it is applied to a fixed-size vector, it inherits a fixed maximal size, + * which means that evaluating it does not cause a dynamic memory allocation. + * + * \sa class Block, segment(int) + */ +template +inline typename BlockReturnType::SubVectorType MatrixBase + ::segment(int start, int size) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename BlockReturnType::SubVectorType(derived(), RowsAtCompileTime == 1 ? 0 : start, + ColsAtCompileTime == 1 ? 0 : start, + RowsAtCompileTime == 1 ? 1 : size, + ColsAtCompileTime == 1 ? 1 : size); +} + +/** This is the const version of segment(int,int).*/ +template +inline const typename BlockReturnType::SubVectorType +MatrixBase::segment(int start, int size) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return typename BlockReturnType::SubVectorType(derived(), RowsAtCompileTime == 1 ? 0 : start, + ColsAtCompileTime == 1 ? 0 : start, + RowsAtCompileTime == 1 ? 1 : size, + ColsAtCompileTime == 1 ? 1 : size); +} + +/** \returns a dynamic-size expression of the first coefficients of *this. + * + * \only_for_vectors + * + * \param size the number of coefficients in the block + * + * \addexample BlockInt \label How to reference a sub-vector (fixed-size) + * + * Example: \include MatrixBase_start_int.cpp + * Output: \verbinclude MatrixBase_start_int.out + * + * \note Even though the returned expression has dynamic size, in the case + * when it is applied to a fixed-size vector, it inherits a fixed maximal size, + * which means that evaluating it does not cause a dynamic memory allocation. + * + * \sa class Block, block(int,int) + */ +template +inline typename BlockReturnType::SubVectorType +MatrixBase::start(int size) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), 0, 0, + RowsAtCompileTime == 1 ? 1 : size, + ColsAtCompileTime == 1 ? 1 : size); +} + +/** This is the const version of start(int).*/ +template +inline const typename BlockReturnType::SubVectorType +MatrixBase::start(int size) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), 0, 0, + RowsAtCompileTime == 1 ? 1 : size, + ColsAtCompileTime == 1 ? 1 : size); +} + +/** \returns a dynamic-size expression of the last coefficients of *this. + * + * \only_for_vectors + * + * \param size the number of coefficients in the block + * + * \addexample BlockEnd \label How to reference the end of a vector (fixed-size) + * + * Example: \include MatrixBase_end_int.cpp + * Output: \verbinclude MatrixBase_end_int.out + * + * \note Even though the returned expression has dynamic size, in the case + * when it is applied to a fixed-size vector, it inherits a fixed maximal size, + * which means that evaluating it does not cause a dynamic memory allocation. + * + * \sa class Block, block(int,int) + */ +template +inline typename BlockReturnType::SubVectorType +MatrixBase::end(int size) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), + RowsAtCompileTime == 1 ? 0 : rows() - size, + ColsAtCompileTime == 1 ? 0 : cols() - size, + RowsAtCompileTime == 1 ? 1 : size, + ColsAtCompileTime == 1 ? 1 : size); +} + +/** This is the const version of end(int).*/ +template +inline const typename BlockReturnType::SubVectorType +MatrixBase::end(int size) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), + RowsAtCompileTime == 1 ? 0 : rows() - size, + ColsAtCompileTime == 1 ? 0 : cols() - size, + RowsAtCompileTime == 1 ? 1 : size, + ColsAtCompileTime == 1 ? 1 : size); +} + +/** \returns a fixed-size expression of a segment (i.e. a vector block) in \c *this + * + * \only_for_vectors + * + * The template parameter \a Size is the number of coefficients in the block + * + * \param start the index of the first element of the sub-vector + * + * Example: \include MatrixBase_template_int_segment.cpp + * Output: \verbinclude MatrixBase_template_int_segment.out + * + * \sa class Block + */ +template +template +inline typename BlockReturnType::SubVectorType +MatrixBase::segment(int start) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), RowsAtCompileTime == 1 ? 0 : start, + ColsAtCompileTime == 1 ? 0 : start); +} + +/** This is the const version of segment(int).*/ +template +template +inline const typename BlockReturnType::SubVectorType +MatrixBase::segment(int start) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), RowsAtCompileTime == 1 ? 0 : start, + ColsAtCompileTime == 1 ? 0 : start); +} + +/** \returns a fixed-size expression of the first coefficients of *this. + * + * \only_for_vectors + * + * The template parameter \a Size is the number of coefficients in the block + * + * \addexample BlockStart \label How to reference the start of a vector (fixed-size) + * + * Example: \include MatrixBase_template_int_start.cpp + * Output: \verbinclude MatrixBase_template_int_start.out + * + * \sa class Block + */ +template +template +inline typename BlockReturnType::SubVectorType +MatrixBase::start() +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block(derived(), 0, 0); +} + +/** This is the const version of start().*/ +template +template +inline const typename BlockReturnType::SubVectorType +MatrixBase::start() const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block(derived(), 0, 0); +} + +/** \returns a fixed-size expression of the last coefficients of *this. + * + * \only_for_vectors + * + * The template parameter \a Size is the number of coefficients in the block + * + * Example: \include MatrixBase_template_int_end.cpp + * Output: \verbinclude MatrixBase_template_int_end.out + * + * \sa class Block + */ +template +template +inline typename BlockReturnType::SubVectorType +MatrixBase::end() +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), + RowsAtCompileTime == 1 ? 0 : rows() - Size, + ColsAtCompileTime == 1 ? 0 : cols() - Size); +} + +/** This is the const version of end.*/ +template +template +inline const typename BlockReturnType::SubVectorType +MatrixBase::end() const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return Block + (derived(), + RowsAtCompileTime == 1 ? 0 : rows() - Size, + ColsAtCompileTime == 1 ? 0 : cols() - Size); +} + +/** \returns a dynamic-size expression of a corner of *this. + * + * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight, + * \a Eigen::BottomLeft, \a Eigen::BottomRight. + * \param cRows the number of rows in the corner + * \param cCols the number of columns in the corner + * + * \addexample BlockCornerDynamicSize \label How to reference a sub-corner of a matrix + * + * Example: \include MatrixBase_corner_enum_int_int.cpp + * Output: \verbinclude MatrixBase_corner_enum_int_int.out + * + * \note Even though the returned expression has dynamic size, in the case + * when it is applied to a fixed-size matrix, it inherits a fixed maximal size, + * which means that evaluating it does not cause a dynamic memory allocation. + * + * \sa class Block, block(int,int,int,int) + */ +template +inline typename BlockReturnType::Type MatrixBase + ::corner(CornerType type, int cRows, int cCols) +{ + switch(type) + { + default: + ei_assert(false && "Bad corner type."); + case TopLeft: + return typename BlockReturnType::Type(derived(), 0, 0, cRows, cCols); + case TopRight: + return typename BlockReturnType::Type(derived(), 0, cols() - cCols, cRows, cCols); + case BottomLeft: + return typename BlockReturnType::Type(derived(), rows() - cRows, 0, cRows, cCols); + case BottomRight: + return typename BlockReturnType::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols); + } +} + +/** This is the const version of corner(CornerType, int, int).*/ +template +inline const typename BlockReturnType::Type +MatrixBase::corner(CornerType type, int cRows, int cCols) const +{ + switch(type) + { + default: + ei_assert(false && "Bad corner type."); + case TopLeft: + return typename BlockReturnType::Type(derived(), 0, 0, cRows, cCols); + case TopRight: + return typename BlockReturnType::Type(derived(), 0, cols() - cCols, cRows, cCols); + case BottomLeft: + return typename BlockReturnType::Type(derived(), rows() - cRows, 0, cRows, cCols); + case BottomRight: + return typename BlockReturnType::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols); + } +} + +/** \returns a fixed-size expression of a corner of *this. + * + * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight, + * \a Eigen::BottomLeft, \a Eigen::BottomRight. + * + * The template parameters CRows and CCols arethe number of rows and columns in the corner. + * + * Example: \include MatrixBase_template_int_int_corner_enum.cpp + * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out + * + * \sa class Block, block(int,int,int,int) + */ +template +template +inline typename BlockReturnType::Type +MatrixBase::corner(CornerType type) +{ + switch(type) + { + default: + ei_assert(false && "Bad corner type."); + case TopLeft: + return Block(derived(), 0, 0); + case TopRight: + return Block(derived(), 0, cols() - CCols); + case BottomLeft: + return Block(derived(), rows() - CRows, 0); + case BottomRight: + return Block(derived(), rows() - CRows, cols() - CCols); + } +} + +/** This is the const version of corner(CornerType).*/ +template +template +inline const typename BlockReturnType::Type +MatrixBase::corner(CornerType type) const +{ + switch(type) + { + default: + ei_assert(false && "Bad corner type."); + case TopLeft: + return Block(derived(), 0, 0); + case TopRight: + return Block(derived(), 0, cols() - CCols); + case BottomLeft: + return Block(derived(), rows() - CRows, 0); + case BottomRight: + return Block(derived(), rows() - CRows, cols() - CCols); + } +} + +/** \returns a fixed-size expression of a block in *this. + * + * The template parameters \a BlockRows and \a BlockCols are the number of + * rows and columns in the block. + * + * \param startRow the first row in the block + * \param startCol the first column in the block + * + * \addexample BlockSubMatrixFixedSize \label How to reference a sub-matrix (fixed-size) + * + * Example: \include MatrixBase_block_int_int.cpp + * Output: \verbinclude MatrixBase_block_int_int.out + * + * \note since block is a templated member, the keyword template has to be used + * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode + * + * \sa class Block, block(int,int,int,int) + */ +template +template +inline typename BlockReturnType::Type +MatrixBase::block(int startRow, int startCol) +{ + return Block(derived(), startRow, startCol); +} + +/** This is the const version of block<>(int, int). */ +template +template +inline const typename BlockReturnType::Type +MatrixBase::block(int startRow, int startCol) const +{ + return Block(derived(), startRow, startCol); +} + +/** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0. + * + * \addexample BlockColumn \label How to reference a single column of a matrix + * + * Example: \include MatrixBase_col.cpp + * Output: \verbinclude MatrixBase_col.out + * + * \sa row(), class Block */ +template +inline typename MatrixBase::ColXpr +MatrixBase::col(int i) +{ + return ColXpr(derived(), i); +} + +/** This is the const version of col(). */ +template +inline const typename MatrixBase::ColXpr +MatrixBase::col(int i) const +{ + return ColXpr(derived(), i); +} + +/** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0. + * + * \addexample BlockRow \label How to reference a single row of a matrix + * + * Example: \include MatrixBase_row.cpp + * Output: \verbinclude MatrixBase_row.out + * + * \sa col(), class Block */ +template +inline typename MatrixBase::RowXpr +MatrixBase::row(int i) +{ + return RowXpr(derived(), i); +} + +/** This is the const version of row(). */ +template +inline const typename MatrixBase::RowXpr +MatrixBase::row(int i) const +{ + return RowXpr(derived(), i); +} + +#endif // EIGEN_BLOCK_H diff --git a/extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h b/extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h new file mode 100644 index 00000000000..b1362b0a80c --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/CacheFriendlyProduct.h @@ -0,0 +1,753 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CACHE_FRIENDLY_PRODUCT_H +#define EIGEN_CACHE_FRIENDLY_PRODUCT_H + +template +struct ei_L2_block_traits { + enum {width = 8 * ei_meta_sqrt::ret }; +}; + +#ifndef EIGEN_EXTERN_INSTANTIATIONS + +template +static void ei_cache_friendly_product( + int _rows, int _cols, int depth, + bool _lhsRowMajor, const Scalar* _lhs, int _lhsStride, + bool _rhsRowMajor, const Scalar* _rhs, int _rhsStride, + bool resRowMajor, Scalar* res, int resStride) +{ + const Scalar* EIGEN_RESTRICT lhs; + const Scalar* EIGEN_RESTRICT rhs; + int lhsStride, rhsStride, rows, cols; + bool lhsRowMajor; + + if (resRowMajor) + { + lhs = _rhs; + rhs = _lhs; + lhsStride = _rhsStride; + rhsStride = _lhsStride; + cols = _rows; + rows = _cols; + lhsRowMajor = !_rhsRowMajor; + ei_assert(_lhsRowMajor); + } + else + { + lhs = _lhs; + rhs = _rhs; + lhsStride = _lhsStride; + rhsStride = _rhsStride; + rows = _rows; + cols = _cols; + lhsRowMajor = _lhsRowMajor; + ei_assert(!_rhsRowMajor); + } + + typedef typename ei_packet_traits::type PacketType; + + enum { + PacketSize = sizeof(PacketType)/sizeof(Scalar), + #if (defined __i386__) + // i386 architecture provides only 8 xmm registers, + // so let's reduce the max number of rows processed at once. + MaxBlockRows = 4, + MaxBlockRows_ClampingMask = 0xFFFFFC, + #else + MaxBlockRows = 8, + MaxBlockRows_ClampingMask = 0xFFFFF8, + #endif + // maximal size of the blocks fitted in L2 cache + MaxL2BlockSize = ei_L2_block_traits::width + }; + + const bool resIsAligned = (PacketSize==1) || (((resStride%PacketSize) == 0) && (size_t(res)%16==0)); + + const int remainingSize = depth % PacketSize; + const int size = depth - remainingSize; // third dimension of the product clamped to packet boundaries + const int l2BlockRows = MaxL2BlockSize > rows ? rows : MaxL2BlockSize; + const int l2BlockCols = MaxL2BlockSize > cols ? cols : MaxL2BlockSize; + const int l2BlockSize = MaxL2BlockSize > size ? size : MaxL2BlockSize; + const int l2BlockSizeAligned = (1 + std::max(l2BlockSize,l2BlockCols)/PacketSize)*PacketSize; + const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (size_t(rhs)%16!=0)); + Scalar* EIGEN_RESTRICT block = 0; + const int allocBlockSize = l2BlockRows*size; + block = ei_aligned_stack_new(Scalar, allocBlockSize); + Scalar* EIGEN_RESTRICT rhsCopy + = ei_aligned_stack_new(Scalar, l2BlockSizeAligned*l2BlockSizeAligned); + + // loops on each L2 cache friendly blocks of the result + for(int l2i=0; l2i0) + { + for (int k=l2k; k1 && resIsAligned) + { + // the result is aligned: let's do packet reduction + ei_pstore(&(localRes[0]), ei_padd(ei_pload(&(localRes[0])), ei_preduxp(&dst[0]))); + if (PacketSize==2) + ei_pstore(&(localRes[2]), ei_padd(ei_pload(&(localRes[2])), ei_preduxp(&(dst[2])))); + if (MaxBlockRows==8) + { + ei_pstore(&(localRes[4]), ei_padd(ei_pload(&(localRes[4])), ei_preduxp(&(dst[4])))); + if (PacketSize==2) + ei_pstore(&(localRes[6]), ei_padd(ei_pload(&(localRes[6])), ei_preduxp(&(dst[6])))); + } + } + else + { + // not aligned => per coeff packet reduction + localRes[0] += ei_predux(dst[0]); + localRes[1] += ei_predux(dst[1]); + localRes[2] += ei_predux(dst[2]); + localRes[3] += ei_predux(dst[3]); + if (MaxBlockRows==8) + { + localRes[4] += ei_predux(dst[4]); + localRes[5] += ei_predux(dst[5]); + localRes[6] += ei_predux(dst[6]); + localRes[7] += ei_predux(dst[7]); + } + } + } + } + if (l2blockRemainingRows>0) + { + int offsetblock = l2k * (l2blockRowEnd-l2i) + (l2blockRowEndBW-l2i)*(l2blockSizeEnd-l2k) - l2k*l2blockRemainingRows; + const Scalar* localB = &block[offsetblock]; + + for(int l1j=l2j; l1j=2) dst[1] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+ PacketSize])), dst[1]); + if (l2blockRemainingRows>=3) dst[2] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+2*PacketSize])), dst[2]); + if (l2blockRemainingRows>=4) dst[3] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+3*PacketSize])), dst[3]); + if (MaxBlockRows==8) + { + if (l2blockRemainingRows>=5) dst[4] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+4*PacketSize])), dst[4]); + if (l2blockRemainingRows>=6) dst[5] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+5*PacketSize])), dst[5]); + if (l2blockRemainingRows>=7) dst[6] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+6*PacketSize])), dst[6]); + if (l2blockRemainingRows>=8) dst[7] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+7*PacketSize])), dst[7]); + } + } + + Scalar* EIGEN_RESTRICT localRes = &(res[l2blockRowEndBW + l1j*resStride]); + + // process the remaining rows once at a time + localRes[0] += ei_predux(dst[0]); + if (l2blockRemainingRows>=2) localRes[1] += ei_predux(dst[1]); + if (l2blockRemainingRows>=3) localRes[2] += ei_predux(dst[2]); + if (l2blockRemainingRows>=4) localRes[3] += ei_predux(dst[3]); + if (MaxBlockRows==8) + { + if (l2blockRemainingRows>=5) localRes[4] += ei_predux(dst[4]); + if (l2blockRemainingRows>=6) localRes[5] += ei_predux(dst[5]); + if (l2blockRemainingRows>=7) localRes[6] += ei_predux(dst[6]); + if (l2blockRemainingRows>=8) localRes[7] += ei_predux(dst[7]); + } + + } + } + } + } + } + if (PacketSize>1 && remainingSize) + { + if (lhsRowMajor) + { + for (int j=0; j +static EIGEN_DONT_INLINE void ei_cache_friendly_product_colmajor_times_vector( + int size, + const Scalar* lhs, int lhsStride, + const RhsType& rhs, + Scalar* res) +{ + #ifdef _EIGEN_ACCUMULATE_PACKETS + #error _EIGEN_ACCUMULATE_PACKETS has already been defined + #endif + #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) \ + ei_pstore(&res[j], \ + ei_padd(ei_pload(&res[j]), \ + ei_padd( \ + ei_padd(ei_pmul(ptmp0,EIGEN_CAT(ei_ploa , A0)(&lhs0[j])), \ + ei_pmul(ptmp1,EIGEN_CAT(ei_ploa , A13)(&lhs1[j]))), \ + ei_padd(ei_pmul(ptmp2,EIGEN_CAT(ei_ploa , A2)(&lhs2[j])), \ + ei_pmul(ptmp3,EIGEN_CAT(ei_ploa , A13)(&lhs3[j]))) ))) + + typedef typename ei_packet_traits::type Packet; + const int PacketSize = sizeof(Packet)/sizeof(Scalar); + + enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned }; + const int columnsAtOnce = 4; + const int peels = 2; + const int PacketAlignedMask = PacketSize-1; + const int PeelAlignedMask = PacketSize*peels-1; + + // How many coeffs of the result do we have to skip to be aligned. + // Here we assume data are at least aligned on the base scalar type that is mandatory anyway. + const int alignedStart = ei_alignmentOffset(res,size); + const int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; + const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; + + const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; + int alignmentPattern = alignmentStep==0 ? AllAligned + : alignmentStep==(PacketSize/2) ? EvenAligned + : FirstAligned; + + // we cannot assume the first element is aligned because of sub-matrices + const int lhsAlignmentOffset = ei_alignmentOffset(lhs,size); + + // find how many columns do we have to skip to be aligned with the result (if possible) + int skipColumns = 0; + if (PacketSize>1) + { + ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size1) + { + /* explicit vectorization */ + // process initial unaligned coeffs + for (int j=0; jalignedStart) + { + switch(alignmentPattern) + { + case AllAligned: + for (int j = alignedStart; j1) + { + Packet A00, A01, A02, A03, A10, A11, A12, A13; + + A01 = ei_pload(&lhs1[alignedStart-1]); + A02 = ei_pload(&lhs2[alignedStart-2]); + A03 = ei_pload(&lhs3[alignedStart-3]); + + for (int j = alignedStart; j(A01,A11); + A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12); + A13 = ei_pload(&lhs3[j-3+PacketSize]); ei_palign<3>(A03,A13); + + A00 = ei_pload (&lhs0[j]); + A10 = ei_pload (&lhs0[j+PacketSize]); + A00 = ei_pmadd(ptmp0, A00, ei_pload(&res[j])); + A10 = ei_pmadd(ptmp0, A10, ei_pload(&res[j+PacketSize])); + + A00 = ei_pmadd(ptmp1, A01, A00); + A01 = ei_pload(&lhs1[j-1+2*PacketSize]); ei_palign<1>(A11,A01); + A00 = ei_pmadd(ptmp2, A02, A00); + A02 = ei_pload(&lhs2[j-2+2*PacketSize]); ei_palign<2>(A12,A02); + A00 = ei_pmadd(ptmp3, A03, A00); + ei_pstore(&res[j],A00); + A03 = ei_pload(&lhs3[j-3+2*PacketSize]); ei_palign<3>(A13,A03); + A10 = ei_pmadd(ptmp1, A11, A10); + A10 = ei_pmadd(ptmp2, A12, A10); + A10 = ei_pmadd(ptmp3, A13, A10); + ei_pstore(&res[j+PacketSize],A10); + } + } + for (int j = peeledSize; j1) + { + /* explicit vectorization */ + // process first unaligned result's coeffs + for (int j=0; j1); + #undef _EIGEN_ACCUMULATE_PACKETS +} + +// TODO add peeling to mask unaligned load/stores +template +static EIGEN_DONT_INLINE void ei_cache_friendly_product_rowmajor_times_vector( + const Scalar* lhs, int lhsStride, + const Scalar* rhs, int rhsSize, + ResType& res) +{ + #ifdef _EIGEN_ACCUMULATE_PACKETS + #error _EIGEN_ACCUMULATE_PACKETS has already been defined + #endif + + #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2) {\ + Packet b = ei_pload(&rhs[j]); \ + ptmp0 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A0) (&lhs0[j]), ptmp0); \ + ptmp1 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A13)(&lhs1[j]), ptmp1); \ + ptmp2 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A2) (&lhs2[j]), ptmp2); \ + ptmp3 = ei_pmadd(b, EIGEN_CAT(ei_ploa,A13)(&lhs3[j]), ptmp3); } + + typedef typename ei_packet_traits::type Packet; + const int PacketSize = sizeof(Packet)/sizeof(Scalar); + + enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 }; + const int rowsAtOnce = 4; + const int peels = 2; + const int PacketAlignedMask = PacketSize-1; + const int PeelAlignedMask = PacketSize*peels-1; + const int size = rhsSize; + + // How many coeffs of the result do we have to skip to be aligned. + // Here we assume data are at least aligned on the base scalar type that is mandatory anyway. + const int alignedStart = ei_alignmentOffset(rhs, size); + const int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0; + const int peeledSize = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart; + + const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0; + int alignmentPattern = alignmentStep==0 ? AllAligned + : alignmentStep==(PacketSize/2) ? EvenAligned + : FirstAligned; + + // we cannot assume the first element is aligned because of sub-matrices + const int lhsAlignmentOffset = ei_alignmentOffset(lhs,size); + + // find how many rows do we have to skip to be aligned with rhs (if possible) + int skipRows = 0; + if (PacketSize>1) + { + ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size1) + { + /* explicit vectorization */ + Packet ptmp0 = ei_pset1(Scalar(0)), ptmp1 = ei_pset1(Scalar(0)), ptmp2 = ei_pset1(Scalar(0)), ptmp3 = ei_pset1(Scalar(0)); + + // process initial unaligned coeffs + // FIXME this loop get vectorized by the compiler ! + for (int j=0; jalignedStart) + { + switch(alignmentPattern) + { + case AllAligned: + for (int j = alignedStart; j1) + { + /* Here we proccess 4 rows with with two peeled iterations to hide + * tghe overhead of unaligned loads. Moreover unaligned loads are handled + * using special shift/move operations between the two aligned packets + * overlaping the desired unaligned packet. This is *much* more efficient + * than basic unaligned loads. + */ + Packet A01, A02, A03, b, A11, A12, A13; + A01 = ei_pload(&lhs1[alignedStart-1]); + A02 = ei_pload(&lhs2[alignedStart-2]); + A03 = ei_pload(&lhs3[alignedStart-3]); + + for (int j = alignedStart; j(A01,A11); + A12 = ei_pload(&lhs2[j-2+PacketSize]); ei_palign<2>(A02,A12); + A13 = ei_pload(&lhs3[j-3+PacketSize]); ei_palign<3>(A03,A13); + + ptmp0 = ei_pmadd(b, ei_pload (&lhs0[j]), ptmp0); + ptmp1 = ei_pmadd(b, A01, ptmp1); + A01 = ei_pload(&lhs1[j-1+2*PacketSize]); ei_palign<1>(A11,A01); + ptmp2 = ei_pmadd(b, A02, ptmp2); + A02 = ei_pload(&lhs2[j-2+2*PacketSize]); ei_palign<2>(A12,A02); + ptmp3 = ei_pmadd(b, A03, ptmp3); + A03 = ei_pload(&lhs3[j-3+2*PacketSize]); ei_palign<3>(A13,A03); + + b = ei_pload(&rhs[j+PacketSize]); + ptmp0 = ei_pmadd(b, ei_pload (&lhs0[j+PacketSize]), ptmp0); + ptmp1 = ei_pmadd(b, A11, ptmp1); + ptmp2 = ei_pmadd(b, A12, ptmp2); + ptmp3 = ei_pmadd(b, A13, ptmp3); + } + } + for (int j = peeledSize; jalignedStart) + { + // process aligned rhs coeffs + if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0) + for (int j = alignedStart;j1); + + #undef _EIGEN_ACCUMULATE_PACKETS +} + +#endif // EIGEN_CACHE_FRIENDLY_PRODUCT_H diff --git a/extern/Eigen2/Eigen/src/Core/Coeffs.h b/extern/Eigen2/Eigen/src/Core/Coeffs.h new file mode 100644 index 00000000000..23a84228b24 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Coeffs.h @@ -0,0 +1,384 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_COEFFS_H +#define EIGEN_COEFFS_H + +/** Short version: don't use this function, use + * \link operator()(int,int) const \endlink instead. + * + * Long version: this function is similar to + * \link operator()(int,int) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(int,int) const \endlink. + * + * \sa operator()(int,int) const, coeffRef(int,int), coeff(int) const + */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::coeff(int row, int col) const +{ + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeff(row, col); +} + +/** \returns the coefficient at given the given row and column. + * + * \sa operator()(int,int), operator[](int) const + */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::operator()(int row, int col) const +{ + ei_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeff(row, col); +} + +/** Short version: don't use this function, use + * \link operator()(int,int) \endlink instead. + * + * Long version: this function is similar to + * \link operator()(int,int) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator()(int,int) \endlink. + * + * \sa operator()(int,int), coeff(int, int) const, coeffRef(int) + */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::coeffRef(int row, int col) +{ + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeffRef(row, col); +} + +/** \returns a reference to the coefficient at given the given row and column. + * + * \sa operator()(int,int) const, operator[](int) + */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::operator()(int row, int col) +{ + ei_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().coeffRef(row, col); +} + +/** Short version: don't use this function, use + * \link operator[](int) const \endlink instead. + * + * Long version: this function is similar to + * \link operator[](int) const \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameter \a index is in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](int) const \endlink. + * + * \sa operator[](int) const, coeffRef(int), coeff(int,int) const + */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::coeff(int index) const +{ + ei_internal_assert(index >= 0 && index < size()); + return derived().coeff(index); +} + +/** \returns the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](int), operator()(int,int) const, x() const, y() const, + * z() const, w() const + */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::operator[](int index) const +{ + ei_assert(index >= 0 && index < size()); + return derived().coeff(index); +} + +/** \returns the coefficient at given index. + * + * This is synonymous to operator[](int) const. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](int), operator()(int,int) const, x() const, y() const, + * z() const, w() const + */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::operator()(int index) const +{ + ei_assert(index >= 0 && index < size()); + return derived().coeff(index); +} + +/** Short version: don't use this function, use + * \link operator[](int) \endlink instead. + * + * Long version: this function is similar to + * \link operator[](int) \endlink, but without the assertion. + * Use this for limiting the performance cost of debugging code when doing + * repeated coefficient access. Only use this when it is guaranteed that the + * parameters \a row and \a col are in range. + * + * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this + * function equivalent to \link operator[](int) \endlink. + * + * \sa operator[](int), coeff(int) const, coeffRef(int,int) + */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::coeffRef(int index) +{ + ei_internal_assert(index >= 0 && index < size()); + return derived().coeffRef(index); +} + +/** \returns a reference to the coefficient at given index. + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w() + */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::operator[](int index) +{ + ei_assert(index >= 0 && index < size()); + return derived().coeffRef(index); +} + +/** \returns a reference to the coefficient at given index. + * + * This is synonymous to operator[](int). + * + * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit. + * + * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w() + */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::operator()(int index) +{ + ei_assert(index >= 0 && index < size()); + return derived().coeffRef(index); +} + +/** equivalent to operator[](0). */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::x() const { return (*this)[0]; } + +/** equivalent to operator[](1). */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::y() const { return (*this)[1]; } + +/** equivalent to operator[](2). */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::z() const { return (*this)[2]; } + +/** equivalent to operator[](3). */ +template +EIGEN_STRONG_INLINE const typename ei_traits::Scalar MatrixBase + ::w() const { return (*this)[3]; } + +/** equivalent to operator[](0). */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::x() { return (*this)[0]; } + +/** equivalent to operator[](1). */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::y() { return (*this)[1]; } + +/** equivalent to operator[](2). */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::z() { return (*this)[2]; } + +/** equivalent to operator[](3). */ +template +EIGEN_STRONG_INLINE typename ei_traits::Scalar& MatrixBase + ::w() { return (*this)[3]; } + +/** \returns the packet of coefficients starting at the given row and column. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit. + * + * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ +template +template +EIGEN_STRONG_INLINE typename ei_packet_traits::Scalar>::type +MatrixBase::packet(int row, int col) const +{ + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + return derived().template packet(row,col); +} + +/** Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit. + * + * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ +template +template +EIGEN_STRONG_INLINE void MatrixBase::writePacket +(int row, int col, const typename ei_packet_traits::Scalar>::type& x) +{ + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + derived().template writePacket(row,col,x); +} + +/** \returns the packet of coefficients starting at the given index. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit and the LinearAccessBit. + * + * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ +template +template +EIGEN_STRONG_INLINE typename ei_packet_traits::Scalar>::type +MatrixBase::packet(int index) const +{ + ei_internal_assert(index >= 0 && index < size()); + return derived().template packet(index); +} + +/** Stores the given packet of coefficients, at the given index in this expression. It is your responsibility + * to ensure that a packet really starts there. This method is only available on expressions having the + * PacketAccessBit and the LinearAccessBit. + * + * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select + * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets + * starting at an address which is a multiple of the packet size. + */ +template +template +EIGEN_STRONG_INLINE void MatrixBase::writePacket +(int index, const typename ei_packet_traits::Scalar>::type& x) +{ + ei_internal_assert(index >= 0 && index < size()); + derived().template writePacket(index,x); +} + +#ifndef EIGEN_PARSED_BY_DOXYGEN + +/** \internal Copies the coefficient at position (row,col) of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ +template +template +EIGEN_STRONG_INLINE void MatrixBase::copyCoeff(int row, int col, const MatrixBase& other) +{ + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + derived().coeffRef(row, col) = other.derived().coeff(row, col); +} + +/** \internal Copies the coefficient at the given index of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ +template +template +EIGEN_STRONG_INLINE void MatrixBase::copyCoeff(int index, const MatrixBase& other) +{ + ei_internal_assert(index >= 0 && index < size()); + derived().coeffRef(index) = other.derived().coeff(index); +} + +/** \internal Copies the packet at position (row,col) of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ +template +template +EIGEN_STRONG_INLINE void MatrixBase::copyPacket(int row, int col, const MatrixBase& other) +{ + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + derived().template writePacket(row, col, + other.derived().template packet(row, col)); +} + +/** \internal Copies the packet at the given index of other into *this. + * + * This method is overridden in SwapWrapper, allowing swap() assignments to share 99% of their code + * with usual assignments. + * + * Outside of this internal usage, this method has probably no usefulness. It is hidden in the public API dox. + */ +template +template +EIGEN_STRONG_INLINE void MatrixBase::copyPacket(int index, const MatrixBase& other) +{ + ei_internal_assert(index >= 0 && index < size()); + derived().template writePacket(index, + other.derived().template packet(index)); +} + +#endif + +#endif // EIGEN_COEFFS_H diff --git a/extern/Eigen2/Eigen/src/Core/CommaInitializer.h b/extern/Eigen2/Eigen/src/Core/CommaInitializer.h new file mode 100644 index 00000000000..ed28e0ca371 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/CommaInitializer.h @@ -0,0 +1,149 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_COMMAINITIALIZER_H +#define EIGEN_COMMAINITIALIZER_H + +/** \class CommaInitializer + * + * \brief Helper class used by the comma initializer operator + * + * This class is internally used to implement the comma initializer feature. It is + * the return type of MatrixBase::operator<<, and most of the time this is the only + * way it is used. + * + * \sa \ref MatrixBaseCommaInitRef "MatrixBase::operator<<", CommaInitializer::finished() + */ +template +struct CommaInitializer +{ + typedef typename ei_traits::Scalar Scalar; + inline CommaInitializer(MatrixType& mat, const Scalar& s) + : m_matrix(mat), m_row(0), m_col(1), m_currentBlockRows(1) + { + m_matrix.coeffRef(0,0) = s; + } + + template + inline CommaInitializer(MatrixType& mat, const MatrixBase& other) + : m_matrix(mat), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows()) + { + m_matrix.block(0, 0, other.rows(), other.cols()) = other; + } + + /* inserts a scalar value in the target matrix */ + CommaInitializer& operator,(const Scalar& s) + { + if (m_col==m_matrix.cols()) + { + m_row+=m_currentBlockRows; + m_col = 0; + m_currentBlockRows = 1; + ei_assert(m_row + CommaInitializer& operator,(const MatrixBase& other) + { + if (m_col==m_matrix.cols()) + { + m_row+=m_currentBlockRows; + m_col = 0; + m_currentBlockRows = other.rows(); + ei_assert(m_row+m_currentBlockRows<=m_matrix.rows() + && "Too many rows passed to comma initializer (operator<<)"); + } + ei_assert(m_col + (m_row, m_col) = other; + else + m_matrix.block(m_row, m_col, other.rows(), other.cols()) = other; + m_col += other.cols(); + return *this; + } + + inline ~CommaInitializer() + { + ei_assert((m_row+m_currentBlockRows) == m_matrix.rows() + && m_col == m_matrix.cols() + && "Too few coefficients passed to comma initializer (operator<<)"); + } + + /** \returns the built matrix once all its coefficients have been set. + * Calling finished is 100% optional. Its purpose is to write expressions + * like this: + * \code + * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished()); + * \endcode + */ + inline MatrixType& finished() { return m_matrix; } + + MatrixType& m_matrix; // target matrix + int m_row; // current row id + int m_col; // current col id + int m_currentBlockRows; // current block height +}; + +/** \anchor MatrixBaseCommaInitRef + * Convenient operator to set the coefficients of a matrix. + * + * The coefficients must be provided in a row major order and exactly match + * the size of the matrix. Otherwise an assertion is raised. + * + * \addexample CommaInit \label How to easily set all the coefficients of a matrix + * + * Example: \include MatrixBase_set.cpp + * Output: \verbinclude MatrixBase_set.out + * + * \sa CommaInitializer::finished(), class CommaInitializer + */ +template +inline CommaInitializer MatrixBase::operator<< (const Scalar& s) +{ + return CommaInitializer(*static_cast(this), s); +} + +/** \sa operator<<(const Scalar&) */ +template +template +inline CommaInitializer +MatrixBase::operator<<(const MatrixBase& other) +{ + return CommaInitializer(*static_cast(this), other); +} + +#endif // EIGEN_COMMAINITIALIZER_H diff --git a/extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp b/extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp new file mode 100644 index 00000000000..56a9448917a --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/CoreInstantiations.cpp @@ -0,0 +1,47 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifdef EIGEN_EXTERN_INSTANTIATIONS +#undef EIGEN_EXTERN_INSTANTIATIONS +#endif + +#include "../../Core" + +namespace Eigen +{ + +#define EIGEN_INSTANTIATE_PRODUCT(TYPE) \ +template static void ei_cache_friendly_product( \ + int _rows, int _cols, int depth, \ + bool _lhsRowMajor, const TYPE* _lhs, int _lhsStride, \ + bool _rhsRowMajor, const TYPE* _rhs, int _rhsStride, \ + bool resRowMajor, TYPE* res, int resStride) + +EIGEN_INSTANTIATE_PRODUCT(float); +EIGEN_INSTANTIATE_PRODUCT(double); +EIGEN_INSTANTIATE_PRODUCT(int); +EIGEN_INSTANTIATE_PRODUCT(std::complex); +EIGEN_INSTANTIATE_PRODUCT(std::complex); + +} diff --git a/extern/Eigen2/Eigen/src/Core/Cwise.h b/extern/Eigen2/Eigen/src/Core/Cwise.h new file mode 100644 index 00000000000..0e92dce4e12 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Cwise.h @@ -0,0 +1,211 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CWISE_H +#define EIGEN_CWISE_H + +/** \internal + * convenient macro to defined the return type of a cwise binary operation */ +#define EIGEN_CWISE_BINOP_RETURN_TYPE(OP) \ + CwiseBinaryOp::Scalar>, ExpressionType, OtherDerived> + +#define EIGEN_CWISE_PRODUCT_RETURN_TYPE \ + CwiseBinaryOp< \ + ei_scalar_product_op< \ + typename ei_scalar_product_traits< \ + typename ei_traits::Scalar, \ + typename ei_traits::Scalar \ + >::ReturnType \ + >, \ + ExpressionType, \ + OtherDerived \ + > + +/** \internal + * convenient macro to defined the return type of a cwise unary operation */ +#define EIGEN_CWISE_UNOP_RETURN_TYPE(OP) \ + CwiseUnaryOp::Scalar>, ExpressionType> + +/** \internal + * convenient macro to defined the return type of a cwise comparison to a scalar */ +#define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \ + CwiseBinaryOp::Scalar>, ExpressionType, \ + NestByValue > + +/** \class Cwise + * + * \brief Pseudo expression providing additional coefficient-wise operations + * + * \param ExpressionType the type of the object on which to do coefficient-wise operations + * + * This class represents an expression with additional coefficient-wise features. + * It is the return type of MatrixBase::cwise() + * and most of the time this is the only way it is used. + * + * Note that some methods are defined in the \ref Array module. + * + * Example: \include MatrixBase_cwise_const.cpp + * Output: \verbinclude MatrixBase_cwise_const.out + * + * \sa MatrixBase::cwise() const, MatrixBase::cwise() + */ +template class Cwise +{ + public: + + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_meta_if::ret, + ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + typedef CwiseUnaryOp, ExpressionType> ScalarAddReturnType; + + inline Cwise(const ExpressionType& matrix) : m_matrix(matrix) {} + + /** \internal */ + inline const ExpressionType& _expression() const { return m_matrix; } + + template + const EIGEN_CWISE_PRODUCT_RETURN_TYPE + operator*(const MatrixBase &other) const; + + template + const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) + operator/(const MatrixBase &other) const; + + template + const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op) + min(const MatrixBase &other) const; + + template + const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op) + max(const MatrixBase &other) const; + + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) abs() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) abs2() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) square() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) cube() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) inverse() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) sqrt() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) exp() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) log() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) cos() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) sin() const; + const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) pow(const Scalar& exponent) const; + + const ScalarAddReturnType + operator+(const Scalar& scalar) const; + + /** \relates Cwise */ + friend const ScalarAddReturnType + operator+(const Scalar& scalar, const Cwise& mat) + { return mat + scalar; } + + ExpressionType& operator+=(const Scalar& scalar); + + const ScalarAddReturnType + operator-(const Scalar& scalar) const; + + ExpressionType& operator-=(const Scalar& scalar); + + template + inline ExpressionType& operator*=(const MatrixBase &other); + + template + inline ExpressionType& operator/=(const MatrixBase &other); + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less) + operator<(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal) + operator<=(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater) + operator>(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal) + operator>=(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to) + operator==(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to) + operator!=(const MatrixBase& other) const; + + // comparisons to a scalar value + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less) + operator<(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal) + operator<=(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater) + operator>(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal) + operator>=(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to) + operator==(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to) + operator!=(Scalar s) const; + + // allow to extend Cwise outside Eigen + #ifdef EIGEN_CWISE_PLUGIN + #include EIGEN_CWISE_PLUGIN + #endif + + protected: + ExpressionTypeNested m_matrix; +}; + +/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations + * + * Example: \include MatrixBase_cwise_const.cpp + * Output: \verbinclude MatrixBase_cwise_const.out + * + * \sa class Cwise, cwise() + */ +template +inline const Cwise +MatrixBase::cwise() const +{ + return derived(); +} + +/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations + * + * Example: \include MatrixBase_cwise.cpp + * Output: \verbinclude MatrixBase_cwise.out + * + * \sa class Cwise, cwise() const + */ +template +inline Cwise +MatrixBase::cwise() +{ + return derived(); +} + +#endif // EIGEN_CWISE_H diff --git a/extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h b/extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h new file mode 100644 index 00000000000..c4223e2204e --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/CwiseBinaryOp.h @@ -0,0 +1,304 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CWISE_BINARY_OP_H +#define EIGEN_CWISE_BINARY_OP_H + +/** \class CwiseBinaryOp + * + * \brief Generic expression of a coefficient-wise operator between two matrices or vectors + * + * \param BinaryOp template functor implementing the operator + * \param Lhs the type of the left-hand side + * \param Rhs the type of the right-hand side + * + * This class represents an expression of a generic binary operator of two matrices or vectors. + * It is the return type of the operator+, operator-, and the Cwise methods, and most + * of the time this is the only way it is used. + * + * However, if you want to write a function returning such an expression, you + * will need to use this class. + * + * \sa MatrixBase::binaryExpr(const MatrixBase &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp + */ +template +struct ei_traits > +{ + // even though we require Lhs and Rhs to have the same scalar type (see CwiseBinaryOp constructor), + // we still want to handle the case when the result type is different. + typedef typename ei_result_of< + BinaryOp( + typename Lhs::Scalar, + typename Rhs::Scalar + ) + >::type Scalar; + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef typename ei_unref::type _LhsNested; + typedef typename ei_unref::type _RhsNested; + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + RowsAtCompileTime = Lhs::RowsAtCompileTime, + ColsAtCompileTime = Lhs::ColsAtCompileTime, + MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Lhs::MaxColsAtCompileTime, + Flags = (int(LhsFlags) | int(RhsFlags)) & ( + HereditaryBits + | (int(LhsFlags) & int(RhsFlags) & (LinearAccessBit | AlignedBit)) + | (ei_functor_traits::PacketAccess && ((int(LhsFlags) & RowMajorBit)==(int(RhsFlags) & RowMajorBit)) + ? (int(LhsFlags) & int(RhsFlags) & PacketAccessBit) : 0)), + CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits::Cost + }; +}; + +template +class CwiseBinaryOp : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp) + typedef typename ei_traits::LhsNested LhsNested; + typedef typename ei_traits::RhsNested RhsNested; + + EIGEN_STRONG_INLINE CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs(lhs), m_rhs(rhs), m_functor(func) + { + // we require Lhs and Rhs to have the same scalar type. Currently there is no example of a binary functor + // that would take two operands of different types. If there were such an example, then this check should be + // moved to the BinaryOp functors, on a per-case basis. This would however require a change in the BinaryOp functors, as + // currently they take only one typename Scalar template parameter. + // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths. + // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to + // add together a float matrix and a double matrix. + EIGEN_STATIC_ASSERT((ei_functor_allows_mixing_real_and_complex::ret + ? int(ei_is_same_type::ret) + : int(ei_is_same_type::ret)), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + // require the sizes to match + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) + ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); + } + + EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_lhs.cols(); } + + EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const + { + return m_functor(m_lhs.coeff(row, col), m_rhs.coeff(row, col)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + { + return m_functor.packetOp(m_lhs.template packet(row, col), m_rhs.template packet(row, col)); + } + + EIGEN_STRONG_INLINE const Scalar coeff(int index) const + { + return m_functor(m_lhs.coeff(index), m_rhs.coeff(index)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int index) const + { + return m_functor.packetOp(m_lhs.template packet(index), m_rhs.template packet(index)); + } + + protected: + const LhsNested m_lhs; + const RhsNested m_rhs; + const BinaryOp m_functor; +}; + +/**\returns an expression of the difference of \c *this and \a other + * + * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-(). + * + * \sa class CwiseBinaryOp, MatrixBase::operator-=(), Cwise::operator-() + */ +template +template +EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, + Derived, OtherDerived> +MatrixBase::operator-(const MatrixBase &other) const +{ + return CwiseBinaryOp, + Derived, OtherDerived>(derived(), other.derived()); +} + +/** replaces \c *this by \c *this - \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +MatrixBase::operator-=(const MatrixBase &other) +{ + return *this = *this - other; +} + +/** \relates MatrixBase + * + * \returns an expression of the sum of \c *this and \a other + * + * \note If you want to add a given scalar to all coefficients, see Cwise::operator+(). + * + * \sa class CwiseBinaryOp, MatrixBase::operator+=(), Cwise::operator+() + */ +template +template +EIGEN_STRONG_INLINE const CwiseBinaryOp::Scalar>, Derived, OtherDerived> +MatrixBase::operator+(const MatrixBase &other) const +{ + return CwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); +} + +/** replaces \c *this by \c *this + \a other. + * + * \returns a reference to \c *this + */ +template +template +EIGEN_STRONG_INLINE Derived & +MatrixBase::operator+=(const MatrixBase& other) +{ + return *this = *this + other; +} + +/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other + * + * Example: \include Cwise_product.cpp + * Output: \verbinclude Cwise_product.out + * + * \sa class CwiseBinaryOp, operator/(), square() + */ +template +template +EIGEN_STRONG_INLINE const EIGEN_CWISE_PRODUCT_RETURN_TYPE +Cwise::operator*(const MatrixBase &other) const +{ + return EIGEN_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived()); +} + +/** \returns an expression of the coefficient-wise quotient of *this and \a other + * + * Example: \include Cwise_quotient.cpp + * Output: \verbinclude Cwise_quotient.out + * + * \sa class CwiseBinaryOp, operator*(), inverse() + */ +template +template +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +Cwise::operator/(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived()); +} + +/** Replaces this expression by its coefficient-wise product with \a other. + * + * Example: \include Cwise_times_equal.cpp + * Output: \verbinclude Cwise_times_equal.out + * + * \sa operator*(), operator/=() + */ +template +template +inline ExpressionType& Cwise::operator*=(const MatrixBase &other) +{ + return m_matrix.const_cast_derived() = *this * other; +} + +/** Replaces this expression by its coefficient-wise quotient by \a other. + * + * Example: \include Cwise_slash_equal.cpp + * Output: \verbinclude Cwise_slash_equal.out + * + * \sa operator/(), operator*=() + */ +template +template +inline ExpressionType& Cwise::operator/=(const MatrixBase &other) +{ + return m_matrix.const_cast_derived() = *this / other; +} + +/** \returns an expression of the coefficient-wise min of *this and \a other + * + * Example: \include Cwise_min.cpp + * Output: \verbinclude Cwise_min.out + * + * \sa class CwiseBinaryOp + */ +template +template +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op) +Cwise::min(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived()); +} + +/** \returns an expression of the coefficient-wise max of *this and \a other + * + * Example: \include Cwise_max.cpp + * Output: \verbinclude Cwise_max.out + * + * \sa class CwiseBinaryOp + */ +template +template +EIGEN_STRONG_INLINE const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op) +Cwise::max(const MatrixBase &other) const +{ + return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived()); +} + +/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other + * + * The template parameter \a CustomBinaryOp is the type of the functor + * of the custom operator (see class CwiseBinaryOp for an example) + * + * \addexample CustomCwiseBinaryFunctors \label How to use custom coeff wise binary functors + * + * Here is an example illustrating the use of custom functors: + * \include class_CwiseBinaryOp.cpp + * Output: \verbinclude class_CwiseBinaryOp.out + * + * \sa class CwiseBinaryOp, MatrixBase::operator+, MatrixBase::operator-, Cwise::operator*, Cwise::operator/ + */ +template +template +EIGEN_STRONG_INLINE const CwiseBinaryOp +MatrixBase::binaryExpr(const MatrixBase &other, const CustomBinaryOp& func) const +{ + return CwiseBinaryOp(derived(), other.derived(), func); +} + +#endif // EIGEN_CWISE_BINARY_OP_H diff --git a/extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h b/extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h new file mode 100644 index 00000000000..4ee5b58afec --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/CwiseNullaryOp.h @@ -0,0 +1,763 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CWISE_NULLARY_OP_H +#define EIGEN_CWISE_NULLARY_OP_H + +/** \class CwiseNullaryOp + * + * \brief Generic expression of a matrix where all coefficients are defined by a functor + * + * \param NullaryOp template functor implementing the operator + * + * This class represents an expression of a generic nullary operator. + * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() functions, + * and most of the time this is the only way it is used. + * + * However, if you want to write a function returning such an expression, you + * will need to use this class. + * + * \sa class CwiseUnaryOp, class CwiseBinaryOp, MatrixBase::NullaryExpr() + */ +template +struct ei_traits > : ei_traits +{ + enum { + Flags = (ei_traits::Flags + & ( HereditaryBits + | (ei_functor_has_linear_access::ret ? LinearAccessBit : 0) + | (ei_functor_traits::PacketAccess ? PacketAccessBit : 0))) + | (ei_functor_traits::IsRepeatable ? 0 : EvalBeforeNestingBit), + CoeffReadCost = ei_functor_traits::Cost + }; +}; + +template +class CwiseNullaryOp : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseNullaryOp) + + CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp()) + : m_rows(rows), m_cols(cols), m_functor(func) + { + ei_assert(rows > 0 + && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols > 0 + && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + } + + EIGEN_STRONG_INLINE int rows() const { return m_rows.value(); } + EIGEN_STRONG_INLINE int cols() const { return m_cols.value(); } + + EIGEN_STRONG_INLINE const Scalar coeff(int rows, int cols) const + { + return m_functor(rows, cols); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int, int) const + { + return m_functor.packetOp(); + } + + EIGEN_STRONG_INLINE const Scalar coeff(int index) const + { + if(RowsAtCompileTime == 1) + return m_functor(0, index); + else + return m_functor(index, 0); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int) const + { + return m_functor.packetOp(); + } + + protected: + const ei_int_if_dynamic m_rows; + const ei_int_if_dynamic m_cols; + const NullaryOp m_functor; +}; + + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseNullaryOp +MatrixBase::NullaryExpr(int rows, int cols, const CustomNullaryOp& func) +{ + return CwiseNullaryOp(rows, cols, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseNullaryOp +MatrixBase::NullaryExpr(int size, const CustomNullaryOp& func) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + ei_assert(IsVectorAtCompileTime); + if(RowsAtCompileTime == 1) return CwiseNullaryOp(1, size, func); + else return CwiseNullaryOp(size, 1, func); +} + +/** \returns an expression of a matrix defined by a custom functor \a func + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseNullaryOp +MatrixBase::NullaryExpr(const CustomNullaryOp& func) +{ + return CwiseNullaryOp(RowsAtCompileTime, ColsAtCompileTime, func); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Constant(int rows, int cols, const Scalar& value) +{ + return NullaryExpr(rows, cols, ei_scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Constant(int size, const Scalar& value) +{ + return NullaryExpr(size, ei_scalar_constant_op(value)); +} + +/** \returns an expression of a constant matrix of value \a value + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * The template parameter \a CustomNullaryOp is the type of the functor. + * + * \sa class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Constant(const Scalar& value) +{ + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_constant_op(value)); +} + +/** \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ +template +bool MatrixBase::isApproxToConstant +(const Scalar& value, RealScalar prec) const +{ + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) + if(!ei_isApprox(coeff(i, j), value, prec)) + return false; + return true; +} + +/** This is just an alias for isApproxToConstant(). + * + * \returns true if all coefficients in this matrix are approximately equal to \a value, to within precision \a prec */ +template +bool MatrixBase::isConstant +(const Scalar& value, RealScalar prec) const +{ + return isApproxToConstant(value, prec); +} + +/** Alias for setConstant(): sets all coefficients in this expression to \a value. + * + * \sa setConstant(), Constant(), class CwiseNullaryOp + */ +template +EIGEN_STRONG_INLINE void MatrixBase::fill(const Scalar& value) +{ + setConstant(value); +} + +/** Sets all coefficients in this expression to \a value. + * + * \sa fill(), setConstant(int,const Scalar&), setConstant(int,int,const Scalar&), setZero(), setOnes(), Constant(), class CwiseNullaryOp, setZero(), setOnes() + */ +template +EIGEN_STRONG_INLINE Derived& MatrixBase::setConstant(const Scalar& value) +{ + return derived() = Constant(rows(), cols(), value); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to the given \a value. + * + * \only_for_vectors + * + * Example: \include Matrix_set_int.cpp + * Output: \verbinclude Matrix_setConstant_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setConstant(int size, const Scalar& value) +{ + resize(size); + return setConstant(value); +} + +/** Resizes to the given size, and sets all coefficients in this expression to the given \a value. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setConstant_int_int.cpp + * Output: \verbinclude Matrix_setConstant_int_int.out + * + * \sa MatrixBase::setConstant(const Scalar&), setConstant(int,const Scalar&), class CwiseNullaryOp, MatrixBase::Constant(const Scalar&) + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setConstant(int rows, int cols, const Scalar& value) +{ + resize(rows, cols); + return setConstant(value); +} + + +// zero: + +/** \returns an expression of a zero matrix. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used + * instead. + * + * \addexample Zero \label How to take get a zero matrix + * + * Example: \include MatrixBase_zero_int_int.cpp + * Output: \verbinclude MatrixBase_zero_int_int.out + * + * \sa Zero(), Zero(int) + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Zero(int rows, int cols) +{ + return Constant(rows, cols, Scalar(0)); +} + +/** \returns an expression of a zero vector. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Zero() should be used + * instead. + * + * Example: \include MatrixBase_zero_int.cpp + * Output: \verbinclude MatrixBase_zero_int.out + * + * \sa Zero(), Zero(int,int) + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Zero(int size) +{ + return Constant(size, Scalar(0)); +} + +/** \returns an expression of a fixed-size zero matrix or vector. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_zero.cpp + * Output: \verbinclude MatrixBase_zero.out + * + * \sa Zero(int), Zero(int,int) + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Zero() +{ + return Constant(Scalar(0)); +} + +/** \returns true if *this is approximately equal to the zero matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isZero.cpp + * Output: \verbinclude MatrixBase_isZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +bool MatrixBase::isZero(RealScalar prec) const +{ + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < rows(); ++i) + if(!ei_isMuchSmallerThan(coeff(i, j), static_cast(1), prec)) + return false; + return true; +} + +/** Sets all coefficients in this expression to zero. + * + * Example: \include MatrixBase_setZero.cpp + * Output: \verbinclude MatrixBase_setZero.out + * + * \sa class CwiseNullaryOp, Zero() + */ +template +EIGEN_STRONG_INLINE Derived& MatrixBase::setZero() +{ + return setConstant(Scalar(0)); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to zero. + * + * \only_for_vectors + * + * Example: \include Matrix_setZero_int.cpp + * Output: \verbinclude Matrix_setZero_int.out + * + * \sa MatrixBase::setZero(), setZero(int,int), class CwiseNullaryOp, MatrixBase::Zero() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setZero(int size) +{ + resize(size); + return setConstant(Scalar(0)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to zero. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setZero_int_int.cpp + * Output: \verbinclude Matrix_setZero_int_int.out + * + * \sa MatrixBase::setZero(), setZero(int), class CwiseNullaryOp, MatrixBase::Zero() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setZero(int rows, int cols) +{ + resize(rows, cols); + return setConstant(Scalar(0)); +} + +// ones: + +/** \returns an expression of a matrix where all coefficients equal one. + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used + * instead. + * + * \addexample One \label How to get a matrix with all coefficients equal one + * + * Example: \include MatrixBase_ones_int_int.cpp + * Output: \verbinclude MatrixBase_ones_int_int.out + * + * \sa Ones(), Ones(int), isOnes(), class Ones + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Ones(int rows, int cols) +{ + return Constant(rows, cols, Scalar(1)); +} + +/** \returns an expression of a vector where all coefficients equal one. + * + * The parameter \a size is the size of the returned vector. + * Must be compatible with this MatrixBase type. + * + * \only_for_vectors + * + * This variant is meant to be used for dynamic-size vector types. For fixed-size types, + * it is redundant to pass \a size as argument, so Ones() should be used + * instead. + * + * Example: \include MatrixBase_ones_int.cpp + * Output: \verbinclude MatrixBase_ones_int.out + * + * \sa Ones(), Ones(int,int), isOnes(), class Ones + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Ones(int size) +{ + return Constant(size, Scalar(1)); +} + +/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one. + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variants taking size arguments. + * + * Example: \include MatrixBase_ones.cpp + * Output: \verbinclude MatrixBase_ones.out + * + * \sa Ones(int), Ones(int,int), isOnes(), class Ones + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ConstantReturnType +MatrixBase::Ones() +{ + return Constant(Scalar(1)); +} + +/** \returns true if *this is approximately equal to the matrix where all coefficients + * are equal to 1, within the precision given by \a prec. + * + * Example: \include MatrixBase_isOnes.cpp + * Output: \verbinclude MatrixBase_isOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +bool MatrixBase::isOnes +(RealScalar prec) const +{ + return isApproxToConstant(Scalar(1), prec); +} + +/** Sets all coefficients in this expression to one. + * + * Example: \include MatrixBase_setOnes.cpp + * Output: \verbinclude MatrixBase_setOnes.out + * + * \sa class CwiseNullaryOp, Ones() + */ +template +EIGEN_STRONG_INLINE Derived& MatrixBase::setOnes() +{ + return setConstant(Scalar(1)); +} + +/** Resizes to the given \a size, and sets all coefficients in this expression to one. + * + * \only_for_vectors + * + * Example: \include Matrix_setOnes_int.cpp + * Output: \verbinclude Matrix_setOnes_int.out + * + * \sa MatrixBase::setOnes(), setOnes(int,int), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int size) +{ + resize(size); + return setConstant(Scalar(1)); +} + +/** Resizes to the given size, and sets all coefficients in this expression to one. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setOnes_int_int.cpp + * Output: \verbinclude Matrix_setOnes_int_int.out + * + * \sa MatrixBase::setOnes(), setOnes(int), class CwiseNullaryOp, MatrixBase::Ones() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setOnes(int rows, int cols) +{ + resize(rows, cols); + return setConstant(Scalar(1)); +} + +// Identity: + +/** \returns an expression of the identity matrix (not necessarily square). + * + * The parameters \a rows and \a cols are the number of rows and of columns of + * the returned matrix. Must be compatible with this MatrixBase type. + * + * This variant is meant to be used for dynamic-size matrix types. For fixed-size types, + * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used + * instead. + * + * \addexample Identity \label How to get an identity matrix + * + * Example: \include MatrixBase_identity_int_int.cpp + * Output: \verbinclude MatrixBase_identity_int_int.out + * + * \sa Identity(), setIdentity(), isIdentity() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity(int rows, int cols) +{ + return NullaryExpr(rows, cols, ei_scalar_identity_op()); +} + +/** \returns an expression of the identity matrix (not necessarily square). + * + * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you + * need to use the variant taking size arguments. + * + * Example: \include MatrixBase_identity.cpp + * Output: \verbinclude MatrixBase_identity.out + * + * \sa Identity(int,int), setIdentity(), isIdentity() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::IdentityReturnType +MatrixBase::Identity() +{ + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_identity_op()); +} + +/** \returns true if *this is approximately equal to the identity matrix + * (not necessarily square), + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isIdentity.cpp + * Output: \verbinclude MatrixBase_isIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(int,int), setIdentity() + */ +template +bool MatrixBase::isIdentity +(RealScalar prec) const +{ + for(int j = 0; j < cols(); ++j) + { + for(int i = 0; i < rows(); ++i) + { + if(i == j) + { + if(!ei_isApprox(coeff(i, j), static_cast(1), prec)) + return false; + } + else + { + if(!ei_isMuchSmallerThan(coeff(i, j), static_cast(1), prec)) + return false; + } + } + } + return true; +} + +template=16)> +struct ei_setIdentity_impl +{ + static EIGEN_STRONG_INLINE Derived& run(Derived& m) + { + return m = Derived::Identity(m.rows(), m.cols()); + } +}; + +template +struct ei_setIdentity_impl +{ + static EIGEN_STRONG_INLINE Derived& run(Derived& m) + { + m.setZero(); + const int size = std::min(m.rows(), m.cols()); + for(int i = 0; i < size; ++i) m.coeffRef(i,i) = typename Derived::Scalar(1); + return m; + } +}; + +/** Writes the identity expression (not necessarily square) into *this. + * + * Example: \include MatrixBase_setIdentity.cpp + * Output: \verbinclude MatrixBase_setIdentity.out + * + * \sa class CwiseNullaryOp, Identity(), Identity(int,int), isIdentity() + */ +template +EIGEN_STRONG_INLINE Derived& MatrixBase::setIdentity() +{ + return ei_setIdentity_impl::run(derived()); +} + +/** Resizes to the given size, and writes the identity expression (not necessarily square) into *this. + * + * \param rows the new number of rows + * \param cols the new number of columns + * + * Example: \include Matrix_setIdentity_int_int.cpp + * Output: \verbinclude Matrix_setIdentity_int_int.out + * + * \sa MatrixBase::setIdentity(), class CwiseNullaryOp, MatrixBase::Identity() + */ +template +EIGEN_STRONG_INLINE Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::setIdentity(int rows, int cols) +{ + resize(rows, cols); + return setIdentity(); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(int size, int i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(size,size), i); +} + +/** \returns an expression of the i-th unit (basis) vector. + * + * \only_for_vectors + * + * This variant is for fixed-size vector only. + * + * \sa MatrixBase::Unit(int,int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::Unit(int i) +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return BasisReturnType(SquareMatrixType::Identity(),i); +} + +/** \returns an expression of the X axis unit vector (1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitX() +{ return Derived::Unit(0); } + +/** \returns an expression of the Y axis unit vector (0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitY() +{ return Derived::Unit(1); } + +/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitZ() +{ return Derived::Unit(2); } + +/** \returns an expression of the W axis unit vector (0,0,0,1) + * + * \only_for_vectors + * + * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW() + */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::BasisReturnType MatrixBase::UnitW() +{ return Derived::Unit(3); } + +#endif // EIGEN_CWISE_NULLARY_OP_H diff --git a/extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h b/extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h new file mode 100644 index 00000000000..076d568e023 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/CwiseUnaryOp.h @@ -0,0 +1,229 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CWISE_UNARY_OP_H +#define EIGEN_CWISE_UNARY_OP_H + +/** \class CwiseUnaryOp + * + * \brief Generic expression of a coefficient-wise unary operator of a matrix or a vector + * + * \param UnaryOp template functor implementing the operator + * \param MatrixType the type of the matrix we are applying the unary operator + * + * This class represents an expression of a generic unary operator of a matrix or a vector. + * It is the return type of the unary operator-, of a matrix or a vector, and most + * of the time this is the only way it is used. + * + * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp + */ +template +struct ei_traits > + : ei_traits +{ + typedef typename ei_result_of< + UnaryOp(typename MatrixType::Scalar) + >::type Scalar; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum { + Flags = (_MatrixTypeNested::Flags & ( + HereditaryBits | LinearAccessBit | AlignedBit + | (ei_functor_traits::PacketAccess ? PacketAccessBit : 0))), + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits::Cost + }; +}; + +template +class CwiseUnaryOp : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp) + + inline CwiseUnaryOp(const MatrixType& mat, const UnaryOp& func = UnaryOp()) + : m_matrix(mat), m_functor(func) {} + + EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); } + + EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const + { + return m_functor(m_matrix.coeff(row, col)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + { + return m_functor.packetOp(m_matrix.template packet(row, col)); + } + + EIGEN_STRONG_INLINE const Scalar coeff(int index) const + { + return m_functor(m_matrix.coeff(index)); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int index) const + { + return m_functor.packetOp(m_matrix.template packet(index)); + } + + protected: + const typename MatrixType::Nested m_matrix; + const UnaryOp m_functor; +}; + +/** \returns an expression of a custom coefficient-wise unary operator \a func of *this + * + * The template parameter \a CustomUnaryOp is the type of the functor + * of the custom unary operator. + * + * \addexample CustomCwiseUnaryFunctors \label How to use custom coeff wise unary functors + * + * Example: + * \include class_CwiseUnaryOp.cpp + * Output: \verbinclude class_CwiseUnaryOp.out + * + * \sa class CwiseUnaryOp, class CwiseBinarOp, MatrixBase::operator-, Cwise::abs + */ +template +template +EIGEN_STRONG_INLINE const CwiseUnaryOp +MatrixBase::unaryExpr(const CustomUnaryOp& func) const +{ + return CwiseUnaryOp(derived(), func); +} + +/** \returns an expression of the opposite of \c *this + */ +template +EIGEN_STRONG_INLINE const CwiseUnaryOp::Scalar>,Derived> +MatrixBase::operator-() const +{ + return derived(); +} + +/** \returns an expression of the coefficient-wise absolute value of \c *this + * + * Example: \include Cwise_abs.cpp + * Output: \verbinclude Cwise_abs.out + * + * \sa abs2() + */ +template +EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) +Cwise::abs() const +{ + return _expression(); +} + +/** \returns an expression of the coefficient-wise squared absolute value of \c *this + * + * Example: \include Cwise_abs2.cpp + * Output: \verbinclude Cwise_abs2.out + * + * \sa abs(), square() + */ +template +EIGEN_STRONG_INLINE const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) +Cwise::abs2() const +{ + return _expression(); +} + +/** \returns an expression of the complex conjugate of \c *this. + * + * \sa adjoint() */ +template +EIGEN_STRONG_INLINE typename MatrixBase::ConjugateReturnType +MatrixBase::conjugate() const +{ + return ConjugateReturnType(derived()); +} + +/** \returns an expression of the real part of \c *this. + * + * \sa imag() */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::RealReturnType +MatrixBase::real() const { return derived(); } + +/** \returns an expression of the imaginary part of \c *this. + * + * \sa real() */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ImagReturnType +MatrixBase::imag() const { return derived(); } + +/** \returns an expression of *this with the \a Scalar type casted to + * \a NewScalar. + * + * The template parameter \a NewScalar is the type we are casting the scalars to. + * + * \sa class CwiseUnaryOp + */ +template +template +EIGEN_STRONG_INLINE const CwiseUnaryOp::Scalar, NewType>, Derived> +MatrixBase::cast() const +{ + return derived(); +} + +/** \relates MatrixBase */ +template +EIGEN_STRONG_INLINE const typename MatrixBase::ScalarMultipleReturnType +MatrixBase::operator*(const Scalar& scalar) const +{ + return CwiseUnaryOp, Derived> + (derived(), ei_scalar_multiple_op(scalar)); +} + +/** \relates MatrixBase */ +template +EIGEN_STRONG_INLINE const CwiseUnaryOp::Scalar>, Derived> +MatrixBase::operator/(const Scalar& scalar) const +{ + return CwiseUnaryOp, Derived> + (derived(), ei_scalar_quotient1_op(scalar)); +} + +template +EIGEN_STRONG_INLINE Derived& +MatrixBase::operator*=(const Scalar& other) +{ + return *this = *this * other; +} + +template +EIGEN_STRONG_INLINE Derived& +MatrixBase::operator/=(const Scalar& other) +{ + return *this = *this / other; +} + +#endif // EIGEN_CWISE_UNARY_OP_H diff --git a/extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h b/extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h new file mode 100644 index 00000000000..767fe5fb7c0 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/DiagonalCoeffs.h @@ -0,0 +1,124 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_DIAGONALCOEFFS_H +#define EIGEN_DIAGONALCOEFFS_H + +/** \class DiagonalCoeffs + * + * \brief Expression of the main diagonal of a matrix + * + * \param MatrixType the type of the object in which we are taking the main diagonal + * + * The matrix is not required to be square. + * + * This class represents an expression of the main diagonal of a square matrix. + * It is the return type of MatrixBase::diagonal() and most of the time this is + * the only way it is used. + * + * \sa MatrixBase::diagonal() + */ +template +struct ei_traits > +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename ei_nested::type MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum { + RowsAtCompileTime = int(MatrixType::SizeAtCompileTime) == Dynamic ? Dynamic + : EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, + MatrixType::ColsAtCompileTime), + ColsAtCompileTime = 1, + MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic + : EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime, + MatrixType::MaxColsAtCompileTime), + MaxColsAtCompileTime = 1, + Flags = (unsigned int)_MatrixTypeNested::Flags & (HereditaryBits | LinearAccessBit), + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + }; +}; + +template class DiagonalCoeffs + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalCoeffs) + + inline DiagonalCoeffs(const MatrixType& matrix) : m_matrix(matrix) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(DiagonalCoeffs) + + inline int rows() const { return std::min(m_matrix.rows(), m_matrix.cols()); } + inline int cols() const { return 1; } + + inline Scalar& coeffRef(int row, int) + { + return m_matrix.const_cast_derived().coeffRef(row, row); + } + + inline const Scalar coeff(int row, int) const + { + return m_matrix.coeff(row, row); + } + + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived().coeffRef(index, index); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(index, index); + } + + protected: + + const typename MatrixType::Nested m_matrix; +}; + +/** \returns an expression of the main diagonal of the matrix \c *this + * + * \c *this is not required to be square. + * + * Example: \include MatrixBase_diagonal.cpp + * Output: \verbinclude MatrixBase_diagonal.out + * + * \sa class DiagonalCoeffs */ +template +inline DiagonalCoeffs +MatrixBase::diagonal() +{ + return DiagonalCoeffs(derived()); +} + +/** This is the const version of diagonal(). */ +template +inline const DiagonalCoeffs +MatrixBase::diagonal() const +{ + return DiagonalCoeffs(derived()); +} + +#endif // EIGEN_DIAGONALCOEFFS_H diff --git a/extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h b/extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h new file mode 100644 index 00000000000..01f01fdf259 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/DiagonalMatrix.h @@ -0,0 +1,144 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_DIAGONALMATRIX_H +#define EIGEN_DIAGONALMATRIX_H + +/** \class DiagonalMatrix + * \nonstableyet + * + * \brief Expression of a diagonal matrix + * + * \param CoeffsVectorType the type of the vector of diagonal coefficients + * + * This class is an expression of a diagonal matrix with given vector of diagonal + * coefficients. It is the return + * type of MatrixBase::diagonal(const OtherDerived&) and most of the time this is + * the only way it is used. + * + * \sa MatrixBase::diagonal(const OtherDerived&) + */ +template +struct ei_traits > +{ + typedef typename CoeffsVectorType::Scalar Scalar; + typedef typename ei_nested::type CoeffsVectorTypeNested; + typedef typename ei_unref::type _CoeffsVectorTypeNested; + enum { + RowsAtCompileTime = CoeffsVectorType::SizeAtCompileTime, + ColsAtCompileTime = CoeffsVectorType::SizeAtCompileTime, + MaxRowsAtCompileTime = CoeffsVectorType::MaxSizeAtCompileTime, + MaxColsAtCompileTime = CoeffsVectorType::MaxSizeAtCompileTime, + Flags = (_CoeffsVectorTypeNested::Flags & HereditaryBits) | Diagonal, + CoeffReadCost = _CoeffsVectorTypeNested::CoeffReadCost + }; +}; + +template +class DiagonalMatrix : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalMatrix) + typedef CoeffsVectorType _CoeffsVectorType; + + // needed to evaluate a DiagonalMatrix to a DiagonalMatrix > + template + inline DiagonalMatrix(const DiagonalMatrix& other) : m_coeffs(other.diagonal()) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(CoeffsVectorType); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherCoeffsVectorType); + ei_assert(m_coeffs.size() > 0); + } + + inline DiagonalMatrix(const CoeffsVectorType& coeffs) : m_coeffs(coeffs) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(CoeffsVectorType); + ei_assert(coeffs.size() > 0); + } + + inline int rows() const { return m_coeffs.size(); } + inline int cols() const { return m_coeffs.size(); } + + inline const Scalar coeff(int row, int col) const + { + return row == col ? m_coeffs.coeff(row) : static_cast(0); + } + + inline const CoeffsVectorType& diagonal() const { return m_coeffs; } + + protected: + const typename CoeffsVectorType::Nested m_coeffs; +}; + +/** \nonstableyet + * \returns an expression of a diagonal matrix with *this as vector of diagonal coefficients + * + * \only_for_vectors + * + * \addexample AsDiagonalExample \label How to build a diagonal matrix from a vector + * + * Example: \include MatrixBase_asDiagonal.cpp + * Output: \verbinclude MatrixBase_asDiagonal.out + * + * \sa class DiagonalMatrix, isDiagonal() + **/ +template +inline const DiagonalMatrix +MatrixBase::asDiagonal() const +{ + return derived(); +} + +/** \nonstableyet + * \returns true if *this is approximately equal to a diagonal matrix, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isDiagonal.cpp + * Output: \verbinclude MatrixBase_isDiagonal.out + * + * \sa asDiagonal() + */ +template +bool MatrixBase::isDiagonal +(RealScalar prec) const +{ + if(cols() != rows()) return false; + RealScalar maxAbsOnDiagonal = static_cast(-1); + for(int j = 0; j < cols(); ++j) + { + RealScalar absOnDiagonal = ei_abs(coeff(j,j)); + if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal; + } + for(int j = 0; j < cols(); ++j) + for(int i = 0; i < j; ++i) + { + if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false; + if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false; + } + return true; +} + +#endif // EIGEN_DIAGONALMATRIX_H diff --git a/extern/Eigen2/Eigen/src/Core/DiagonalProduct.h b/extern/Eigen2/Eigen/src/Core/DiagonalProduct.h new file mode 100644 index 00000000000..f33a26f98b0 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/DiagonalProduct.h @@ -0,0 +1,130 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_DIAGONALPRODUCT_H +#define EIGEN_DIAGONALPRODUCT_H + +/** \internal Specialization of ei_nested for DiagonalMatrix. + * Unlike ei_nested, if the argument is a DiagonalMatrix and if it must be evaluated, + * then it evaluated to a DiagonalMatrix having its own argument evaluated. + */ +template struct ei_nested_diagonal : ei_nested {}; +template struct ei_nested_diagonal,N > + : ei_nested, N, DiagonalMatrix::type> > > +{}; + +// specialization of ProductReturnType +template +struct ProductReturnType +{ + typedef typename ei_nested_diagonal::type LhsNested; + typedef typename ei_nested_diagonal::type RhsNested; + + typedef Product Type; +}; + +template +struct ei_traits > +{ + // clean the nested types: + typedef typename ei_cleantype::type _LhsNested; + typedef typename ei_cleantype::type _RhsNested; + typedef typename _LhsNested::Scalar Scalar; + + enum { + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + RowsAtCompileTime = _LhsNested::RowsAtCompileTime, + ColsAtCompileTime = _RhsNested::ColsAtCompileTime, + MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, + MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, + + LhsIsDiagonal = (_LhsNested::Flags&Diagonal)==Diagonal, + RhsIsDiagonal = (_RhsNested::Flags&Diagonal)==Diagonal, + + CanVectorizeRhs = (!RhsIsDiagonal) && (RhsFlags & RowMajorBit) && (RhsFlags & PacketAccessBit) + && (ColsAtCompileTime % ei_packet_traits::size == 0), + + CanVectorizeLhs = (!LhsIsDiagonal) && (!(LhsFlags & RowMajorBit)) && (LhsFlags & PacketAccessBit) + && (RowsAtCompileTime % ei_packet_traits::size == 0), + + RemovedBits = ~((RhsFlags & RowMajorBit) && (!CanVectorizeLhs) ? 0 : RowMajorBit), + + Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) + | (((CanVectorizeLhs&&RhsIsDiagonal) || (CanVectorizeRhs&&LhsIsDiagonal)) ? PacketAccessBit : 0), + + CoeffReadCost = NumTraits::MulCost + _LhsNested::CoeffReadCost + _RhsNested::CoeffReadCost + }; +}; + +template class Product : ei_no_assignment_operator, + public MatrixBase > +{ + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; + + enum { + RhsIsDiagonal = (_RhsNested::Flags&Diagonal)==Diagonal + }; + + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) + + template + inline Product(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + { + ei_assert(lhs.cols() == rhs.rows()); + } + + inline int rows() const { return m_lhs.rows(); } + inline int cols() const { return m_rhs.cols(); } + + const Scalar coeff(int row, int col) const + { + const int unique = RhsIsDiagonal ? col : row; + return m_lhs.coeff(row, unique) * m_rhs.coeff(unique, col); + } + + template + const PacketScalar packet(int row, int col) const + { + if (RhsIsDiagonal) + { + return ei_pmul(m_lhs.template packet(row, col), ei_pset1(m_rhs.coeff(col, col))); + } + else + { + return ei_pmul(ei_pset1(m_lhs.coeff(row, row)), m_rhs.template packet(row, col)); + } + } + + protected: + const LhsNested m_lhs; + const RhsNested m_rhs; +}; + +#endif // EIGEN_DIAGONALPRODUCT_H diff --git a/extern/Eigen2/Eigen/src/Core/Dot.h b/extern/Eigen2/Eigen/src/Core/Dot.h new file mode 100644 index 00000000000..5838af70d4a --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Dot.h @@ -0,0 +1,361 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_DOT_H +#define EIGEN_DOT_H + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for vectorization and unrolling +***************************************************************************/ + +template +struct ei_dot_traits +{ +public: + enum { + Vectorization = (int(Derived1::Flags)&int(Derived2::Flags)&ActualPacketAccessBit) + && (int(Derived1::Flags)&int(Derived2::Flags)&LinearAccessBit) + ? LinearVectorization + : NoVectorization + }; + +private: + typedef typename Derived1::Scalar Scalar; + enum { + PacketSize = ei_packet_traits::size, + Cost = Derived1::SizeAtCompileTime * (Derived1::CoeffReadCost + Derived2::CoeffReadCost + NumTraits::MulCost) + + (Derived1::SizeAtCompileTime-1) * NumTraits::AddCost, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize)) + }; + +public: + enum { + Unrolling = Cost <= UnrollingLimit + ? CompleteUnrolling + : NoUnrolling + }; +}; + +/*************************************************************************** +* Part 2 : unrollers +***************************************************************************/ + +/*** no vectorization ***/ + +template +struct ei_dot_novec_unroller +{ + enum { + HalfLength = Length/2 + }; + + typedef typename Derived1::Scalar Scalar; + + inline static Scalar run(const Derived1& v1, const Derived2& v2) + { + return ei_dot_novec_unroller::run(v1, v2) + + ei_dot_novec_unroller::run(v1, v2); + } +}; + +template +struct ei_dot_novec_unroller +{ + typedef typename Derived1::Scalar Scalar; + + inline static Scalar run(const Derived1& v1, const Derived2& v2) + { + return v1.coeff(Start) * ei_conj(v2.coeff(Start)); + } +}; + +/*** vectorization ***/ + +template::size)> +struct ei_dot_vec_unroller +{ + typedef typename Derived1::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + + enum { + row1 = Derived1::RowsAtCompileTime == 1 ? 0 : Index, + col1 = Derived1::RowsAtCompileTime == 1 ? Index : 0, + row2 = Derived2::RowsAtCompileTime == 1 ? 0 : Index, + col2 = Derived2::RowsAtCompileTime == 1 ? Index : 0 + }; + + inline static PacketScalar run(const Derived1& v1, const Derived2& v2) + { + return ei_pmadd( + v1.template packet(row1, col1), + v2.template packet(row2, col2), + ei_dot_vec_unroller::size, Stop>::run(v1, v2) + ); + } +}; + +template +struct ei_dot_vec_unroller +{ + enum { + row1 = Derived1::RowsAtCompileTime == 1 ? 0 : Index, + col1 = Derived1::RowsAtCompileTime == 1 ? Index : 0, + row2 = Derived2::RowsAtCompileTime == 1 ? 0 : Index, + col2 = Derived2::RowsAtCompileTime == 1 ? Index : 0, + alignment1 = (Derived1::Flags & AlignedBit) ? Aligned : Unaligned, + alignment2 = (Derived2::Flags & AlignedBit) ? Aligned : Unaligned + }; + + typedef typename Derived1::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + + inline static PacketScalar run(const Derived1& v1, const Derived2& v2) + { + return ei_pmul(v1.template packet(row1, col1), v2.template packet(row2, col2)); + } +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +template::Vectorization, + int Unrolling = ei_dot_traits::Unrolling +> +struct ei_dot_impl; + +template +struct ei_dot_impl +{ + typedef typename Derived1::Scalar Scalar; + static Scalar run(const Derived1& v1, const Derived2& v2) + { + ei_assert(v1.size()>0 && "you are using a non initialized vector"); + Scalar res; + res = v1.coeff(0) * ei_conj(v2.coeff(0)); + for(int i = 1; i < v1.size(); ++i) + res += v1.coeff(i) * ei_conj(v2.coeff(i)); + return res; + } +}; + +template +struct ei_dot_impl + : public ei_dot_novec_unroller +{}; + +template +struct ei_dot_impl +{ + typedef typename Derived1::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + + static Scalar run(const Derived1& v1, const Derived2& v2) + { + const int size = v1.size(); + const int packetSize = ei_packet_traits::size; + const int alignedSize = (size/packetSize)*packetSize; + enum { + alignment1 = (Derived1::Flags & AlignedBit) ? Aligned : Unaligned, + alignment2 = (Derived2::Flags & AlignedBit) ? Aligned : Unaligned + }; + Scalar res; + + // do the vectorizable part of the sum + if(size >= packetSize) + { + PacketScalar packet_res = ei_pmul( + v1.template packet(0), + v2.template packet(0) + ); + for(int index = packetSize; index(index), + v2.template packet(index), + packet_res + ); + } + res = ei_predux(packet_res); + + // now we must do the rest without vectorization. + if(alignedSize == size) return res; + } + else // too small to vectorize anything. + // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. + { + res = Scalar(0); + } + + // do the remainder of the vector + for(int index = alignedSize; index < size; ++index) + { + res += v1.coeff(index) * v2.coeff(index); + } + + return res; + } +}; + +template +struct ei_dot_impl +{ + typedef typename Derived1::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + enum { + PacketSize = ei_packet_traits::size, + Size = Derived1::SizeAtCompileTime, + VectorizationSize = (Size / PacketSize) * PacketSize + }; + static Scalar run(const Derived1& v1, const Derived2& v2) + { + Scalar res = ei_predux(ei_dot_vec_unroller::run(v1, v2)); + if (VectorizationSize != Size) + res += ei_dot_novec_unroller::run(v1, v2); + return res; + } +}; + +/*************************************************************************** +* Part 4 : implementation of MatrixBase methods +***************************************************************************/ + +/** \returns the dot product of *this with other. + * + * \only_for_vectors + * + * \note If the scalar type is complex numbers, then this function returns the hermitian + * (sesquilinear) dot product, linear in the first variable and conjugate-linear in the + * second variable. + * + * \sa squaredNorm(), norm() + */ +template +template +typename ei_traits::Scalar +MatrixBase::dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + ei_assert(size() == other.size()); + + return ei_dot_impl::run(derived(), other.derived()); +} + +/** \returns the squared \em l2 norm of *this, i.e., for vectors, the dot product of *this with itself. + * + * \sa dot(), norm() + */ +template +inline typename NumTraits::Scalar>::Real MatrixBase::squaredNorm() const +{ + return ei_real((*this).cwise().abs2().sum()); +} + +/** \returns the \em l2 norm of *this, i.e., for vectors, the square root of the dot product of *this with itself. + * + * \sa dot(), squaredNorm() + */ +template +inline typename NumTraits::Scalar>::Real MatrixBase::norm() const +{ + return ei_sqrt(squaredNorm()); +} + +/** \returns an expression of the quotient of *this by its own norm. + * + * \only_for_vectors + * + * \sa norm(), normalize() + */ +template +inline const typename MatrixBase::PlainMatrixType +MatrixBase::normalized() const +{ + typedef typename ei_nested::type Nested; + typedef typename ei_unref::type _Nested; + _Nested n(derived()); + return n / n.norm(); +} + +/** Normalizes the vector, i.e. divides it by its own norm. + * + * \only_for_vectors + * + * \sa norm(), normalized() + */ +template +inline void MatrixBase::normalize() +{ + *this /= norm(); +} + +/** \returns true if *this is approximately orthogonal to \a other, + * within the precision given by \a prec. + * + * Example: \include MatrixBase_isOrthogonal.cpp + * Output: \verbinclude MatrixBase_isOrthogonal.out + */ +template +template +bool MatrixBase::isOrthogonal +(const MatrixBase& other, RealScalar prec) const +{ + typename ei_nested::type nested(derived()); + typename ei_nested::type otherNested(other.derived()); + return ei_abs2(nested.dot(otherNested)) <= prec * prec * nested.squaredNorm() * otherNested.squaredNorm(); +} + +/** \returns true if *this is approximately an unitary matrix, + * within the precision given by \a prec. In the case where the \a Scalar + * type is real numbers, a unitary matrix is an orthogonal matrix, whence the name. + * + * \note This can be used to check whether a family of vectors forms an orthonormal basis. + * Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an + * orthonormal basis. + * + * Example: \include MatrixBase_isUnitary.cpp + * Output: \verbinclude MatrixBase_isUnitary.out + */ +template +bool MatrixBase::isUnitary(RealScalar prec) const +{ + typename Derived::Nested nested(derived()); + for(int i = 0; i < cols(); ++i) + { + if(!ei_isApprox(nested.col(i).squaredNorm(), static_cast(1), prec)) + return false; + for(int j = 0; j < i; ++j) + if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast(1), prec)) + return false; + } + return true; +} +#endif // EIGEN_DOT_H diff --git a/extern/Eigen2/Eigen/src/Core/Flagged.h b/extern/Eigen2/Eigen/src/Core/Flagged.h new file mode 100644 index 00000000000..ce50246cb67 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Flagged.h @@ -0,0 +1,146 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_FLAGGED_H +#define EIGEN_FLAGGED_H + +/** \class Flagged + * + * \brief Expression with modified flags + * + * \param ExpressionType the type of the object of which we are modifying the flags + * \param Added the flags added to the expression + * \param Removed the flags removed from the expression (has priority over Added). + * + * This class represents an expression whose flags have been modified. + * It is the return type of MatrixBase::flagged() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::flagged() + */ +template +struct ei_traits > : ei_traits +{ + enum { Flags = (ExpressionType::Flags | Added) & ~Removed }; +}; + +template class Flagged + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Flagged) + typedef typename ei_meta_if::ret, + ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + typedef typename ExpressionType::InnerIterator InnerIterator; + + inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {} + + inline int rows() const { return m_matrix.rows(); } + inline int cols() const { return m_matrix.cols(); } + inline int stride() const { return m_matrix.stride(); } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row, col); + } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived().coeffRef(row, col); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(index); + } + + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(int row, int col) const + { + return m_matrix.template packet(row, col); + } + + template + inline void writePacket(int row, int col, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(int index) const + { + return m_matrix.template packet(index); + } + + template + inline void writePacket(int index, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(index, x); + } + + const ExpressionType& _expression() const { return m_matrix; } + + protected: + ExpressionTypeNested m_matrix; +}; + +/** \returns an expression of *this with added flags + * + * \addexample MarkExample \label How to mark a triangular matrix as triangular + * + * Example: \include MatrixBase_marked.cpp + * Output: \verbinclude MatrixBase_marked.out + * + * \sa class Flagged, extract(), part() + */ +template +template +inline const Flagged +MatrixBase::marked() const +{ + return derived(); +} + +/** \returns an expression of *this with the following flags removed: + * EvalBeforeNestingBit and EvalBeforeAssigningBit. + * + * Example: \include MatrixBase_lazy.cpp + * Output: \verbinclude MatrixBase_lazy.out + * + * \sa class Flagged, marked() + */ +template +inline const Flagged +MatrixBase::lazy() const +{ + return derived(); +} + +#endif // EIGEN_FLAGGED_H diff --git a/extern/Eigen2/Eigen/src/Core/Functors.h b/extern/Eigen2/Eigen/src/Core/Functors.h new file mode 100644 index 00000000000..c8ca3dac1cf --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Functors.h @@ -0,0 +1,368 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_FUNCTORS_H +#define EIGEN_FUNCTORS_H + +// associative functors: + +/** \internal + * \brief Template functor to compute the sum of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator+, class PartialRedux, MatrixBase::sum() + */ +template struct ei_scalar_sum_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const + { return ei_padd(a,b); } +}; +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = ei_packet_traits::size>1 + }; +}; + +/** \internal + * \brief Template functor to compute the product of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator*(), class PartialRedux, MatrixBase::redux() + */ +template struct ei_scalar_product_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a * b; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const + { return ei_pmul(a,b); } +}; +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::MulCost, + PacketAccess = ei_packet_traits::size>1 + }; +}; + +/** \internal + * \brief Template functor to compute the min of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class PartialRedux, MatrixBase::minCoeff() + */ +template struct ei_scalar_min_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::min(a, b); } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const + { return ei_pmin(a,b); } +}; +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = ei_packet_traits::size>1 + }; +}; + +/** \internal + * \brief Template functor to compute the max of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class PartialRedux, MatrixBase::maxCoeff() + */ +template struct ei_scalar_max_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::max(a, b); } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const + { return ei_pmax(a,b); } +}; +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = ei_packet_traits::size>1 + }; +}; + + +// other binary functors: + +/** \internal + * \brief Template functor to compute the difference of two scalars + * + * \sa class CwiseBinaryOp, MatrixBase::operator- + */ +template struct ei_scalar_difference_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const + { return ei_psub(a,b); } +}; +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::AddCost, + PacketAccess = ei_packet_traits::size>1 + }; +}; + +/** \internal + * \brief Template functor to compute the quotient of two scalars + * + * \sa class CwiseBinaryOp, Cwise::operator/() + */ +template struct ei_scalar_quotient_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const + { return ei_pdiv(a,b); } +}; +template +struct ei_functor_traits > { + enum { + Cost = 2 * NumTraits::MulCost, + PacketAccess = ei_packet_traits::size>1 + #if (defined EIGEN_VECTORIZE_SSE) + && NumTraits::HasFloatingPoint + #endif + }; +}; + +// unary functors: + +/** \internal + * \brief Template functor to compute the opposite of a scalar + * + * \sa class CwiseUnaryOp, MatrixBase::operator- + */ +template struct ei_scalar_opposite_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return -a; } +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to compute the absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs + */ +template struct ei_scalar_abs_op EIGEN_EMPTY_STRUCT { + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return ei_abs(a); } +}; +template +struct ei_functor_traits > +{ + enum { + Cost = NumTraits::AddCost, + PacketAccess = false // this could actually be vectorized with SSSE3. + }; +}; + +/** \internal + * \brief Template functor to compute the squared absolute value of a scalar + * + * \sa class CwiseUnaryOp, Cwise::abs2 + */ +template struct ei_scalar_abs2_op EIGEN_EMPTY_STRUCT { + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE const result_type operator() (const Scalar& a) const { return ei_abs2(a); } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const + { return ei_pmul(a,a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = int(ei_packet_traits::size)>1 }; }; + +/** \internal + * \brief Template functor to compute the conjugate of a complex value + * + * \sa class CwiseUnaryOp, MatrixBase::conjugate() + */ +template struct ei_scalar_conjugate_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE const Scalar operator() (const Scalar& a) const { return ei_conj(a); } + template + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const { return a; } +}; +template +struct ei_functor_traits > +{ + enum { + Cost = NumTraits::IsComplex ? NumTraits::AddCost : 0, + PacketAccess = int(ei_packet_traits::size)>1 + }; +}; + +/** \internal + * \brief Template functor to cast a scalar to another type + * + * \sa class CwiseUnaryOp, MatrixBase::cast() + */ +template +struct ei_scalar_cast_op EIGEN_EMPTY_STRUCT { + typedef NewType result_type; + EIGEN_STRONG_INLINE const NewType operator() (const Scalar& a) const { return static_cast(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = ei_is_same_type::ret ? 0 : NumTraits::AddCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the real part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::real() + */ +template +struct ei_scalar_real_op EIGEN_EMPTY_STRUCT { + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return ei_real(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to extract the imaginary part of a complex + * + * \sa class CwiseUnaryOp, MatrixBase::imag() + */ +template +struct ei_scalar_imag_op EIGEN_EMPTY_STRUCT { + typedef typename NumTraits::Real result_type; + EIGEN_STRONG_INLINE result_type operator() (const Scalar& a) const { return ei_imag(a); } +}; +template +struct ei_functor_traits > +{ enum { Cost = 0, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to multiply a scalar by a fixed other one + * + * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/ + */ +/* NOTE why doing the ei_pset1() in packetOp *is* an optimization ? + * indeed it seems better to declare m_other as a PacketScalar and do the ei_pset1() once + * in the constructor. However, in practice: + * - GCC does not like m_other as a PacketScalar and generate a load every time it needs it + * - one the other hand GCC is able to moves the ei_pset1() away the loop :) + * - simpler code ;) + * (ICC and gcc 4.4 seems to perform well in both cases, the issue is visible with y = a*x + b*y) + */ +template +struct ei_scalar_multiple_op { + typedef typename ei_packet_traits::type PacketScalar; + // FIXME default copy constructors seems bugged with std::complex<> + EIGEN_STRONG_INLINE ei_scalar_multiple_op(const ei_scalar_multiple_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE ei_scalar_multiple_op(const Scalar& other) : m_other(other) { } + EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const + { return ei_pmul(a, ei_pset1(m_other)); } + const Scalar m_other; +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::size>1 }; }; + +template +struct ei_scalar_quotient1_impl { + typedef typename ei_packet_traits::type PacketScalar; + // FIXME default copy constructors seems bugged with std::complex<> + EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const ei_scalar_quotient1_impl& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const Scalar& other) : m_other(static_cast(1) / other) {} + EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a * m_other; } + EIGEN_STRONG_INLINE const PacketScalar packetOp(const PacketScalar& a) const + { return ei_pmul(a, ei_pset1(m_other)); } + const Scalar m_other; +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::MulCost, PacketAccess = ei_packet_traits::size>1 }; }; + +template +struct ei_scalar_quotient1_impl { + // FIXME default copy constructors seems bugged with std::complex<> + EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const ei_scalar_quotient1_impl& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE ei_scalar_quotient1_impl(const Scalar& other) : m_other(other) {} + EIGEN_STRONG_INLINE Scalar operator() (const Scalar& a) const { return a / m_other; } + const Scalar m_other; +}; +template +struct ei_functor_traits > +{ enum { Cost = 2 * NumTraits::MulCost, PacketAccess = false }; }; + +/** \internal + * \brief Template functor to divide a scalar by a fixed other one + * + * This functor is used to implement the quotient of a matrix by + * a scalar where the scalar type is not necessarily a floating point type. + * + * \sa class CwiseUnaryOp, MatrixBase::operator/ + */ +template +struct ei_scalar_quotient1_op : ei_scalar_quotient1_impl::HasFloatingPoint > { + EIGEN_STRONG_INLINE ei_scalar_quotient1_op(const Scalar& other) + : ei_scalar_quotient1_impl::HasFloatingPoint >(other) {} +}; + +// nullary functors + +template +struct ei_scalar_constant_op { + typedef typename ei_packet_traits::type PacketScalar; + EIGEN_STRONG_INLINE ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { } + EIGEN_STRONG_INLINE ei_scalar_constant_op(const Scalar& other) : m_other(other) { } + EIGEN_STRONG_INLINE const Scalar operator() (int, int = 0) const { return m_other; } + EIGEN_STRONG_INLINE const PacketScalar packetOp() const { return ei_pset1(m_other); } + const Scalar m_other; +}; +template +struct ei_functor_traits > +{ enum { Cost = 1, PacketAccess = ei_packet_traits::size>1, IsRepeatable = true }; }; + +template struct ei_scalar_identity_op EIGEN_EMPTY_STRUCT { + EIGEN_STRONG_INLINE ei_scalar_identity_op(void) {} + EIGEN_STRONG_INLINE const Scalar operator() (int row, int col) const { return row==col ? Scalar(1) : Scalar(0); } +}; +template +struct ei_functor_traits > +{ enum { Cost = NumTraits::AddCost, PacketAccess = false, IsRepeatable = true }; }; + +// allow to add new functors and specializations of ei_functor_traits from outside Eigen. +// this macro is really needed because ei_functor_traits must be specialized after it is declared but before it is used... +#ifdef EIGEN_FUNCTORS_PLUGIN +#include EIGEN_FUNCTORS_PLUGIN +#endif + +// all functors allow linear access, except ei_scalar_identity_op. So we fix here a quick meta +// to indicate whether a functor allows linear access, just always answering 'yes' except for +// ei_scalar_identity_op. +template struct ei_functor_has_linear_access { enum { ret = 1 }; }; +template struct ei_functor_has_linear_access > { enum { ret = 0 }; }; + +// in CwiseBinaryOp, we require the Lhs and Rhs to have the same scalar type, except for multiplication +// where we only require them to have the same _real_ scalar type so one may multiply, say, float by complex. +template struct ei_functor_allows_mixing_real_and_complex { enum { ret = 0 }; }; +template struct ei_functor_allows_mixing_real_and_complex > { enum { ret = 1 }; }; + +#endif // EIGEN_FUNCTORS_H diff --git a/extern/Eigen2/Eigen/src/Core/Fuzzy.h b/extern/Eigen2/Eigen/src/Core/Fuzzy.h new file mode 100644 index 00000000000..1285542966c --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Fuzzy.h @@ -0,0 +1,234 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_FUZZY_H +#define EIGEN_FUZZY_H + +#ifndef EIGEN_LEGACY_COMPARES + +/** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ + * are considered to be approximately equal within precision \f$ p \f$ if + * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm + * L2 norm). + * + * \note Because of the multiplicativeness of this comparison, one can't use this function + * to check whether \c *this is approximately equal to the zero matrix or vector. + * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix + * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const + * RealScalar&, RealScalar) instead. + * + * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +bool MatrixBase::isApprox( + const MatrixBase& other, + typename NumTraits::Real prec +) const +{ + const typename ei_nested::type nested(derived()); + const typename ei_nested::type otherNested(other.derived()); + return (nested - otherNested).cwise().abs2().sum() <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); +} + +/** \returns \c true if the norm of \c *this is much smaller than \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] + * + * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason, + * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm + * of a reference matrix of same dimensions. + * + * \sa isApprox(), isMuchSmallerThan(const MatrixBase&, RealScalar) const + */ +template +bool MatrixBase::isMuchSmallerThan( + const typename NumTraits::Real& other, + typename NumTraits::Real prec +) const +{ + return cwise().abs2().sum() <= prec * prec * other * other; +} + +/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] + * For matrices, the comparison is done using the Hilbert-Schmidt norm. + * + * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +bool MatrixBase::isMuchSmallerThan( + const MatrixBase& other, + typename NumTraits::Real prec +) const +{ + return this->cwise().abs2().sum() <= prec * prec * other.cwise().abs2().sum(); +} + +#else + +template +struct ei_fuzzy_selector; + +/** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$ + * are considered to be approximately equal within precision \f$ p \f$ if + * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f] + * For matrices, the comparison is done on all columns. + * + * \note Because of the multiplicativeness of this comparison, one can't use this function + * to check whether \c *this is approximately equal to the zero matrix or vector. + * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix + * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const + * RealScalar&, RealScalar) instead. + * + * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +bool MatrixBase::isApprox( + const MatrixBase& other, + typename NumTraits::Real prec +) const +{ + return ei_fuzzy_selector::isApprox(derived(), other.derived(), prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f] + * For matrices, the comparison is done on all columns. + * + * \sa isApprox(), isMuchSmallerThan(const MatrixBase&, RealScalar) const + */ +template +bool MatrixBase::isMuchSmallerThan( + const typename NumTraits::Real& other, + typename NumTraits::Real prec +) const +{ + return ei_fuzzy_selector::isMuchSmallerThan(derived(), other, prec); +} + +/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other, + * within the precision determined by \a prec. + * + * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is + * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if + * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f] + * For matrices, the comparison is done on all columns. + * + * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const + */ +template +template +bool MatrixBase::isMuchSmallerThan( + const MatrixBase& other, + typename NumTraits::Real prec +) const +{ + return ei_fuzzy_selector::isMuchSmallerThan(derived(), other.derived(), prec); +} + + +template +struct ei_fuzzy_selector +{ + typedef typename Derived::RealScalar RealScalar; + static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec) + { + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + ei_assert(self.size() == other.size()); + return((self - other).squaredNorm() <= std::min(self.squaredNorm(), other.squaredNorm()) * prec * prec); + } + static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) + { + return(self.squaredNorm() <= ei_abs2(other * prec)); + } + static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec) + { + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + ei_assert(self.size() == other.size()); + return(self.squaredNorm() <= other.squaredNorm() * prec * prec); + } +}; + +template +struct ei_fuzzy_selector +{ + typedef typename Derived::RealScalar RealScalar; + static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec) + { + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) + ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); + typename Derived::Nested nested(self); + typename OtherDerived::Nested otherNested(other); + for(int i = 0; i < self.cols(); ++i) + if((nested.col(i) - otherNested.col(i)).squaredNorm() + > std::min(nested.col(i).squaredNorm(), otherNested.col(i).squaredNorm()) * prec * prec) + return false; + return true; + } + static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec) + { + typename Derived::Nested nested(self); + for(int i = 0; i < self.cols(); ++i) + if(nested.col(i).squaredNorm() > ei_abs2(other * prec)) + return false; + return true; + } + static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec) + { + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived) + ei_assert(self.rows() == other.rows() && self.cols() == other.cols()); + typename Derived::Nested nested(self); + typename OtherDerived::Nested otherNested(other); + for(int i = 0; i < self.cols(); ++i) + if(nested.col(i).squaredNorm() > otherNested.col(i).squaredNorm() * prec * prec) + return false; + return true; + } +}; + +#endif + +#endif // EIGEN_FUZZY_H diff --git a/extern/Eigen2/Eigen/src/Core/GenericPacketMath.h b/extern/Eigen2/Eigen/src/Core/GenericPacketMath.h new file mode 100644 index 00000000000..b0eee29f70f --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/GenericPacketMath.h @@ -0,0 +1,150 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_GENERIC_PACKET_MATH_H +#define EIGEN_GENERIC_PACKET_MATH_H + +/** \internal + * \file GenericPacketMath.h + * + * Default implementation for types not supported by the vectorization. + * In practice these functions are provided to make easier the writing + * of generic vectorized code. + */ + +/** \internal \returns a + b (coeff-wise) */ +template inline Packet +ei_padd(const Packet& a, + const Packet& b) { return a+b; } + +/** \internal \returns a - b (coeff-wise) */ +template inline Packet +ei_psub(const Packet& a, + const Packet& b) { return a-b; } + +/** \internal \returns a * b (coeff-wise) */ +template inline Packet +ei_pmul(const Packet& a, + const Packet& b) { return a*b; } + +/** \internal \returns a / b (coeff-wise) */ +template inline Packet +ei_pdiv(const Packet& a, + const Packet& b) { return a/b; } + +/** \internal \returns the min of \a a and \a b (coeff-wise) */ +template inline Packet +ei_pmin(const Packet& a, + const Packet& b) { return std::min(a, b); } + +/** \internal \returns the max of \a a and \a b (coeff-wise) */ +template inline Packet +ei_pmax(const Packet& a, + const Packet& b) { return std::max(a, b); } + +/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */ +template inline typename ei_packet_traits::type +ei_pload(const Scalar* from) { return *from; } + +/** \internal \returns a packet version of \a *from, (un-aligned load) */ +template inline typename ei_packet_traits::type +ei_ploadu(const Scalar* from) { return *from; } + +/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */ +template inline typename ei_packet_traits::type +ei_pset1(const Scalar& a) { return a; } + +/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */ +template inline void ei_pstore(Scalar* to, const Packet& from) +{ (*to) = from; } + +/** \internal copy the packet \a from to \a *to, (un-aligned store) */ +template inline void ei_pstoreu(Scalar* to, const Packet& from) +{ (*to) = from; } + +/** \internal \returns the first element of a packet */ +template inline typename ei_unpacket_traits::type ei_pfirst(const Packet& a) +{ return a; } + +/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */ +template inline Packet +ei_preduxp(const Packet* vecs) { return vecs[0]; } + +/** \internal \returns the sum of the elements of \a a*/ +template inline typename ei_unpacket_traits::type ei_predux(const Packet& a) +{ return a; } + + +/*************************************************************************** +* The following functions might not have to be overwritten for vectorized types +***************************************************************************/ + +/** \internal \returns a * b + c (coeff-wise) */ +template inline Packet +ei_pmadd(const Packet& a, + const Packet& b, + const Packet& c) +{ return ei_padd(ei_pmul(a, b),c); } + +/** \internal \returns a packet version of \a *from. + * \If LoadMode equals Aligned, \a from must be 16 bytes aligned */ +template +inline typename ei_packet_traits::type ei_ploadt(const Scalar* from) +{ + if(LoadMode == Aligned) + return ei_pload(from); + else + return ei_ploadu(from); +} + +/** \internal copy the packet \a from to \a *to. + * If StoreMode equals Aligned, \a to must be 16 bytes aligned */ +template +inline void ei_pstoret(Scalar* to, const Packet& from) +{ + if(LoadMode == Aligned) + ei_pstore(to, from); + else + ei_pstoreu(to, from); +} + +/** \internal default implementation of ei_palign() allowing partial specialization */ +template +struct ei_palign_impl +{ + // by default data are aligned, so there is nothing to be done :) + inline static void run(PacketType&, const PacketType&) {} +}; + +/** \internal update \a first using the concatenation of the \a Offset last elements + * of \a first and packet_size minus \a Offset first elements of \a second */ +template +inline void ei_palign(PacketType& first, const PacketType& second) +{ + ei_palign_impl::run(first,second); +} + +#endif // EIGEN_GENERIC_PACKET_MATH_H + diff --git a/extern/Eigen2/Eigen/src/Core/IO.h b/extern/Eigen2/Eigen/src/Core/IO.h new file mode 100644 index 00000000000..2b00d5bc509 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/IO.h @@ -0,0 +1,184 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_IO_H +#define EIGEN_IO_H + +enum { Raw, AlignCols }; + +/** \class IOFormat + * + * \brief Stores a set of parameters controlling the way matrices are printed + * + * List of available parameters: + * - \b precision number of digits for floating point values + * - \b flags can be either Raw (default) or AlignCols which aligns all the columns + * - \b coeffSeparator string printed between two coefficients of the same row + * - \b rowSeparator string printed between two rows + * - \b rowPrefix string printed at the beginning of each row + * - \b rowSuffix string printed at the end of each row + * - \b matPrefix string printed at the beginning of the matrix + * - \b matSuffix string printed at the end of the matrix + * + * Example: \include IOFormat.cpp + * Output: \verbinclude IOFormat.out + * + * \sa MatrixBase::format(), class WithFormat + */ +struct IOFormat +{ + /** Default contructor, see class IOFormat for the meaning of the parameters */ + IOFormat(int _precision=4, int _flags=Raw, + const std::string& _coeffSeparator = " ", + const std::string& _rowSeparator = "\n", const std::string& _rowPrefix="", const std::string& _rowSuffix="", + const std::string& _matPrefix="", const std::string& _matSuffix="") + : matPrefix(_matPrefix), matSuffix(_matSuffix), rowPrefix(_rowPrefix), rowSuffix(_rowSuffix), rowSeparator(_rowSeparator), + coeffSeparator(_coeffSeparator), precision(_precision), flags(_flags) + { + rowSpacer = ""; + int i = int(matSuffix.length())-1; + while (i>=0 && matSuffix[i]!='\n') + { + rowSpacer += ' '; + i--; + } + } + std::string matPrefix, matSuffix; + std::string rowPrefix, rowSuffix, rowSeparator, rowSpacer; + std::string coeffSeparator; + int precision; + int flags; +}; + +/** \class WithFormat + * + * \brief Pseudo expression providing matrix output with given format + * + * \param ExpressionType the type of the object on which IO stream operations are performed + * + * This class represents an expression with stream operators controlled by a given IOFormat. + * It is the return type of MatrixBase::format() + * and most of the time this is the only way it is used. + * + * See class IOFormat for some examples. + * + * \sa MatrixBase::format(), class IOFormat + */ +template +class WithFormat +{ + public: + + WithFormat(const ExpressionType& matrix, const IOFormat& format) + : m_matrix(matrix), m_format(format) + {} + + friend std::ostream & operator << (std::ostream & s, const WithFormat& wf) + { + return ei_print_matrix(s, wf.m_matrix.eval(), wf.m_format); + } + + protected: + const typename ExpressionType::Nested m_matrix; + IOFormat m_format; +}; + +/** \returns a WithFormat proxy object allowing to print a matrix the with given + * format \a fmt. + * + * See class IOFormat for some examples. + * + * \sa class IOFormat, class WithFormat + */ +template +inline const WithFormat +MatrixBase::format(const IOFormat& fmt) const +{ + return WithFormat(derived(), fmt); +} + +/** \internal + * print the matrix \a _m to the output stream \a s using the output format \a fmt */ +template +std::ostream & ei_print_matrix(std::ostream & s, const Derived& _m, const IOFormat& fmt) +{ + const typename Derived::Nested m = _m; + + int width = 0; + if (fmt.flags & AlignCols) + { + // compute the largest width + for(int j = 1; j < m.cols(); ++j) + for(int i = 0; i < m.rows(); ++i) + { + std::stringstream sstr; + sstr.precision(fmt.precision); + sstr << m.coeff(i,j); + width = std::max(width, int(sstr.str().length())); + } + } + s.precision(fmt.precision); + s << fmt.matPrefix; + for(int i = 0; i < m.rows(); ++i) + { + if (i) + s << fmt.rowSpacer; + s << fmt.rowPrefix; + if(width) s.width(width); + s << m.coeff(i, 0); + for(int j = 1; j < m.cols(); ++j) + { + s << fmt.coeffSeparator; + if (width) s.width(width); + s << m.coeff(i, j); + } + s << fmt.rowSuffix; + if( i < m.rows() - 1) + s << fmt.rowSeparator; + } + s << fmt.matSuffix; + return s; +} + +/** \relates MatrixBase + * + * Outputs the matrix, to the given stream. + * + * If you wish to print the matrix with a format different than the default, use MatrixBase::format(). + * + * It is also possible to change the default format by defining EIGEN_DEFAULT_IO_FORMAT before including Eigen headers. + * If not defined, this will automatically be defined to Eigen::IOFormat(), that is the Eigen::IOFormat with default parameters. + * + * \sa MatrixBase::format() + */ +template +std::ostream & operator << +(std::ostream & s, + const MatrixBase & m) +{ + return ei_print_matrix(s, m.eval(), EIGEN_DEFAULT_IO_FORMAT); +} + +#endif // EIGEN_IO_H diff --git a/extern/Eigen2/Eigen/src/Core/Map.h b/extern/Eigen2/Eigen/src/Core/Map.h new file mode 100644 index 00000000000..5f44a87e685 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Map.h @@ -0,0 +1,111 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MAP_H +#define EIGEN_MAP_H + +/** \class Map + * + * \brief A matrix or vector expression mapping an existing array of data. + * + * \param MatrixType the equivalent matrix type of the mapped data + * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned. + * The default is AsRequested. This parameter is internaly used by Eigen + * in expressions such as \code Map<...>(...) += other; \endcode and most + * of the time this is the only way it is used. + * + * This class represents a matrix or vector expression mapping an existing array of data. + * It can be used to let Eigen interface without any overhead with non-Eigen data structures, + * such as plain C arrays or structures from other libraries. + * + * This class is the return type of Matrix::Map() but can also be used directly. + * + * \sa Matrix::Map() + */ +template +struct ei_traits > : public ei_traits +{ + enum { + PacketAccess = _PacketAccess, + Flags = ei_traits::Flags & ~AlignedBit + }; + typedef typename ei_meta_if&, + Map >::ret AlignedDerivedType; +}; + +template class Map + : public MapBase > +{ + public: + + _EIGEN_GENERIC_PUBLIC_INTERFACE(Map, MapBase) + typedef typename ei_traits::AlignedDerivedType AlignedDerivedType; + + inline int stride() const { return this->innerSize(); } + + AlignedDerivedType _convertToForceAligned() + { + return Map(Base::m_data, Base::m_rows.value(), Base::m_cols.value()); + } + + inline Map(const Scalar* data) : Base(data) {} + + inline Map(const Scalar* data, int size) : Base(data, size) {} + + inline Map(const Scalar* data, int rows, int cols) : Base(data, rows, cols) {} + + inline void resize(int rows, int cols) + { + EIGEN_ONLY_USED_FOR_DEBUG(rows); + EIGEN_ONLY_USED_FOR_DEBUG(cols); + ei_assert(rows == this->rows()); + ei_assert(cols == this->cols()); + } + + inline void resize(int size) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(MatrixType) + EIGEN_ONLY_USED_FOR_DEBUG(size); + ei_assert(size == this->size()); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map) +}; + +/** Constructor copying an existing array of data. + * Only for fixed-size matrices and vectors. + * \param data The array of data to copy + * + * \sa Matrix::Map(const Scalar *) + */ +template +inline Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols> + ::Matrix(const Scalar *data) +{ + _set_noalias(Eigen::Map(data)); +} + +#endif // EIGEN_MAP_H diff --git a/extern/Eigen2/Eigen/src/Core/MapBase.h b/extern/Eigen2/Eigen/src/Core/MapBase.h new file mode 100644 index 00000000000..c923bc34034 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/MapBase.h @@ -0,0 +1,202 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MAPBASE_H +#define EIGEN_MAPBASE_H + +/** \class MapBase + * + * \brief Base class for Map and Block expression with direct access + * + * Expression classes inheriting MapBase must define the constant \c PacketAccess, + * and type \c AlignedDerivedType in their respective ei_traits<> specialization structure. + * The value of \c PacketAccess can be either: + * - \b ForceAligned which enforces both aligned loads and stores + * - \b AsRequested which is the default behavior + * The type \c AlignedDerivedType should correspond to the equivalent expression type + * with \c PacketAccess being \c ForceAligned. + * + * \sa class Map, class Block + */ +template class MapBase + : public MatrixBase +{ + public: + + typedef MatrixBase Base; + enum { + IsRowMajor = (int(ei_traits::Flags) & RowMajorBit) ? 1 : 0, + PacketAccess = ei_traits::PacketAccess, + RowsAtCompileTime = ei_traits::RowsAtCompileTime, + ColsAtCompileTime = ei_traits::ColsAtCompileTime, + SizeAtCompileTime = Base::SizeAtCompileTime + }; + + typedef typename ei_traits::AlignedDerivedType AlignedDerivedType; + typedef typename ei_traits::Scalar Scalar; + typedef typename Base::PacketScalar PacketScalar; + using Base::derived; + + inline int rows() const { return m_rows.value(); } + inline int cols() const { return m_cols.value(); } + + inline int stride() const { return derived().stride(); } + inline const Scalar* data() const { return m_data; } + + template struct force_aligned_impl { + AlignedDerivedType static run(MapBase& a) { return a.derived(); } + }; + + template struct force_aligned_impl { + AlignedDerivedType static run(MapBase& a) { return a.derived()._convertToForceAligned(); } + }; + + /** \returns an expression equivalent to \c *this but having the \c PacketAccess constant + * set to \c ForceAligned. Must be reimplemented by the derived class. */ + AlignedDerivedType forceAligned() + { + return force_aligned_impl::run(*this); + } + + inline const Scalar& coeff(int row, int col) const + { + if(IsRowMajor) + return m_data[col + row * stride()]; + else // column-major + return m_data[row + col * stride()]; + } + + inline Scalar& coeffRef(int row, int col) + { + if(IsRowMajor) + return const_cast(m_data)[col + row * stride()]; + else // column-major + return const_cast(m_data)[row + col * stride()]; + } + + inline const Scalar coeff(int index) const + { + ei_assert(Derived::IsVectorAtCompileTime || (ei_traits::Flags & LinearAccessBit)); + if ( ((RowsAtCompileTime == 1) == IsRowMajor) ) + return m_data[index]; + else + return m_data[index*stride()]; + } + + inline Scalar& coeffRef(int index) + { + ei_assert(Derived::IsVectorAtCompileTime || (ei_traits::Flags & LinearAccessBit)); + if ( ((RowsAtCompileTime == 1) == IsRowMajor) ) + return const_cast(m_data)[index]; + else + return const_cast(m_data)[index*stride()]; + } + + template + inline PacketScalar packet(int row, int col) const + { + return ei_ploadt + (m_data + (IsRowMajor ? col + row * stride() + : row + col * stride())); + } + + template + inline PacketScalar packet(int index) const + { + return ei_ploadt(m_data + index); + } + + template + inline void writePacket(int row, int col, const PacketScalar& x) + { + ei_pstoret + (const_cast(m_data) + (IsRowMajor ? col + row * stride() + : row + col * stride()), x); + } + + template + inline void writePacket(int index, const PacketScalar& x) + { + ei_pstoret + (const_cast(m_data) + index, x); + } + + inline MapBase(const Scalar* data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime) + { + EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived) + } + + inline MapBase(const Scalar* data, int size) + : m_data(data), + m_rows(RowsAtCompileTime == Dynamic ? size : RowsAtCompileTime), + m_cols(ColsAtCompileTime == Dynamic ? size : ColsAtCompileTime) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + ei_assert(size > 0 || data == 0); + ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size); + } + + inline MapBase(const Scalar* data, int rows, int cols) + : m_data(data), m_rows(rows), m_cols(cols) + { + ei_assert( (data == 0) + || ( rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols))); + } + + Derived& operator=(const MapBase& other) + { + return Base::operator=(other); + } + + template + Derived& operator=(const MatrixBase& other) + { + return Base::operator=(other); + } + + using Base::operator*=; + + template + Derived& operator+=(const MatrixBase& other) + { return derived() = forceAligned() + other; } + + template + Derived& operator-=(const MatrixBase& other) + { return derived() = forceAligned() - other; } + + Derived& operator*=(const Scalar& other) + { return derived() = forceAligned() * other; } + + Derived& operator/=(const Scalar& other) + { return derived() = forceAligned() / other; } + + protected: + const Scalar* EIGEN_RESTRICT m_data; + const ei_int_if_dynamic m_rows; + const ei_int_if_dynamic m_cols; +}; + +#endif // EIGEN_MAPBASE_H diff --git a/extern/Eigen2/Eigen/src/Core/MathFunctions.h b/extern/Eigen2/Eigen/src/Core/MathFunctions.h new file mode 100644 index 00000000000..1ee64af02c6 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/MathFunctions.h @@ -0,0 +1,295 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MATHFUNCTIONS_H +#define EIGEN_MATHFUNCTIONS_H + +template inline typename NumTraits::Real precision(); +template inline typename NumTraits::Real machine_epsilon(); +template inline T ei_random(T a, T b); +template inline T ei_random(); +template inline T ei_random_amplitude() +{ + if(NumTraits::HasFloatingPoint) return static_cast(1); + else return static_cast(10); +} + +template inline T ei_hypot(T x, T y) +{ + T _x = ei_abs(x); + T _y = ei_abs(y); + T p = std::max(_x, _y); + T q = std::min(_x, _y); + T qp = q/p; + return p * ei_sqrt(T(1) + qp*qp); +} + +/************** +*** int *** +**************/ + +template<> inline int precision() { return 0; } +template<> inline int machine_epsilon() { return 0; } +inline int ei_real(int x) { return x; } +inline int ei_imag(int) { return 0; } +inline int ei_conj(int x) { return x; } +inline int ei_abs(int x) { return abs(x); } +inline int ei_abs2(int x) { return x*x; } +inline int ei_sqrt(int) { ei_assert(false); return 0; } +inline int ei_exp(int) { ei_assert(false); return 0; } +inline int ei_log(int) { ei_assert(false); return 0; } +inline int ei_sin(int) { ei_assert(false); return 0; } +inline int ei_cos(int) { ei_assert(false); return 0; } +inline int ei_atan2(int, int) { ei_assert(false); return 0; } +inline int ei_pow(int x, int y) { return int(std::pow(double(x), y)); } + +template<> inline int ei_random(int a, int b) +{ + // We can't just do rand()%n as only the high-order bits are really random + return a + static_cast((b-a+1) * (rand() / (RAND_MAX + 1.0))); +} +template<> inline int ei_random() +{ + return ei_random(-ei_random_amplitude(), ei_random_amplitude()); +} +inline bool ei_isMuchSmallerThan(int a, int, int = precision()) +{ + return a == 0; +} +inline bool ei_isApprox(int a, int b, int = precision()) +{ + return a == b; +} +inline bool ei_isApproxOrLessThan(int a, int b, int = precision()) +{ + return a <= b; +} + +/************** +*** float *** +**************/ + +template<> inline float precision() { return 1e-5f; } +template<> inline float machine_epsilon() { return 1.192e-07f; } +inline float ei_real(float x) { return x; } +inline float ei_imag(float) { return 0.f; } +inline float ei_conj(float x) { return x; } +inline float ei_abs(float x) { return std::abs(x); } +inline float ei_abs2(float x) { return x*x; } +inline float ei_sqrt(float x) { return std::sqrt(x); } +inline float ei_exp(float x) { return std::exp(x); } +inline float ei_log(float x) { return std::log(x); } +inline float ei_sin(float x) { return std::sin(x); } +inline float ei_cos(float x) { return std::cos(x); } +inline float ei_atan2(float y, float x) { return std::atan2(y,x); } +inline float ei_pow(float x, float y) { return std::pow(x, y); } + +template<> inline float ei_random(float a, float b) +{ +#ifdef EIGEN_NICE_RANDOM + int i; + do { i = ei_random(256*int(a),256*int(b)); + } while(i==0); + return float(i)/256.f; +#else + return a + (b-a) * float(std::rand()) / float(RAND_MAX); +#endif +} +template<> inline float ei_random() +{ + return ei_random(-ei_random_amplitude(), ei_random_amplitude()); +} +inline bool ei_isMuchSmallerThan(float a, float b, float prec = precision()) +{ + return ei_abs(a) <= ei_abs(b) * prec; +} +inline bool ei_isApprox(float a, float b, float prec = precision()) +{ + return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec; +} +inline bool ei_isApproxOrLessThan(float a, float b, float prec = precision()) +{ + return a <= b || ei_isApprox(a, b, prec); +} + +/************** +*** double *** +**************/ + +template<> inline double precision() { return 1e-11; } +template<> inline double machine_epsilon() { return 2.220e-16; } + +inline double ei_real(double x) { return x; } +inline double ei_imag(double) { return 0.; } +inline double ei_conj(double x) { return x; } +inline double ei_abs(double x) { return std::abs(x); } +inline double ei_abs2(double x) { return x*x; } +inline double ei_sqrt(double x) { return std::sqrt(x); } +inline double ei_exp(double x) { return std::exp(x); } +inline double ei_log(double x) { return std::log(x); } +inline double ei_sin(double x) { return std::sin(x); } +inline double ei_cos(double x) { return std::cos(x); } +inline double ei_atan2(double y, double x) { return std::atan2(y,x); } +inline double ei_pow(double x, double y) { return std::pow(x, y); } + +template<> inline double ei_random(double a, double b) +{ +#ifdef EIGEN_NICE_RANDOM + int i; + do { i= ei_random(256*int(a),256*int(b)); + } while(i==0); + return i/256.; +#else + return a + (b-a) * std::rand() / RAND_MAX; +#endif +} +template<> inline double ei_random() +{ + return ei_random(-ei_random_amplitude(), ei_random_amplitude()); +} +inline bool ei_isMuchSmallerThan(double a, double b, double prec = precision()) +{ + return ei_abs(a) <= ei_abs(b) * prec; +} +inline bool ei_isApprox(double a, double b, double prec = precision()) +{ + return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec; +} +inline bool ei_isApproxOrLessThan(double a, double b, double prec = precision()) +{ + return a <= b || ei_isApprox(a, b, prec); +} + +/********************* +*** complex *** +*********************/ + +template<> inline float precision >() { return precision(); } +template<> inline float machine_epsilon >() { return machine_epsilon(); } +inline float ei_real(const std::complex& x) { return std::real(x); } +inline float ei_imag(const std::complex& x) { return std::imag(x); } +inline std::complex ei_conj(const std::complex& x) { return std::conj(x); } +inline float ei_abs(const std::complex& x) { return std::abs(x); } +inline float ei_abs2(const std::complex& x) { return std::norm(x); } +inline std::complex ei_exp(std::complex x) { return std::exp(x); } +inline std::complex ei_sin(std::complex x) { return std::sin(x); } +inline std::complex ei_cos(std::complex x) { return std::cos(x); } +inline std::complex ei_atan2(std::complex, std::complex ) { ei_assert(false); return 0; } + +template<> inline std::complex ei_random() +{ + return std::complex(ei_random(), ei_random()); +} +inline bool ei_isMuchSmallerThan(const std::complex& a, const std::complex& b, float prec = precision()) +{ + return ei_abs2(a) <= ei_abs2(b) * prec * prec; +} +inline bool ei_isMuchSmallerThan(const std::complex& a, float b, float prec = precision()) +{ + return ei_abs2(a) <= ei_abs2(b) * prec * prec; +} +inline bool ei_isApprox(const std::complex& a, const std::complex& b, float prec = precision()) +{ + return ei_isApprox(ei_real(a), ei_real(b), prec) + && ei_isApprox(ei_imag(a), ei_imag(b), prec); +} +// ei_isApproxOrLessThan wouldn't make sense for complex numbers + +/********************** +*** complex *** +**********************/ + +template<> inline double precision >() { return precision(); } +template<> inline double machine_epsilon >() { return machine_epsilon(); } +inline double ei_real(const std::complex& x) { return std::real(x); } +inline double ei_imag(const std::complex& x) { return std::imag(x); } +inline std::complex ei_conj(const std::complex& x) { return std::conj(x); } +inline double ei_abs(const std::complex& x) { return std::abs(x); } +inline double ei_abs2(const std::complex& x) { return std::norm(x); } +inline std::complex ei_exp(std::complex x) { return std::exp(x); } +inline std::complex ei_sin(std::complex x) { return std::sin(x); } +inline std::complex ei_cos(std::complex x) { return std::cos(x); } +inline std::complex ei_atan2(std::complex, std::complex) { ei_assert(false); return 0; } + +template<> inline std::complex ei_random() +{ + return std::complex(ei_random(), ei_random()); +} +inline bool ei_isMuchSmallerThan(const std::complex& a, const std::complex& b, double prec = precision()) +{ + return ei_abs2(a) <= ei_abs2(b) * prec * prec; +} +inline bool ei_isMuchSmallerThan(const std::complex& a, double b, double prec = precision()) +{ + return ei_abs2(a) <= ei_abs2(b) * prec * prec; +} +inline bool ei_isApprox(const std::complex& a, const std::complex& b, double prec = precision()) +{ + return ei_isApprox(ei_real(a), ei_real(b), prec) + && ei_isApprox(ei_imag(a), ei_imag(b), prec); +} +// ei_isApproxOrLessThan wouldn't make sense for complex numbers + + +/****************** +*** long double *** +******************/ + +template<> inline long double precision() { return precision(); } +template<> inline long double machine_epsilon() { return 1.084e-19l; } +inline long double ei_real(long double x) { return x; } +inline long double ei_imag(long double) { return 0.; } +inline long double ei_conj(long double x) { return x; } +inline long double ei_abs(long double x) { return std::abs(x); } +inline long double ei_abs2(long double x) { return x*x; } +inline long double ei_sqrt(long double x) { return std::sqrt(x); } +inline long double ei_exp(long double x) { return std::exp(x); } +inline long double ei_log(long double x) { return std::log(x); } +inline long double ei_sin(long double x) { return std::sin(x); } +inline long double ei_cos(long double x) { return std::cos(x); } +inline long double ei_atan2(long double y, long double x) { return std::atan2(y,x); } +inline long double ei_pow(long double x, long double y) { return std::pow(x, y); } + +template<> inline long double ei_random(long double a, long double b) +{ + return ei_random(static_cast(a),static_cast(b)); +} +template<> inline long double ei_random() +{ + return ei_random(-ei_random_amplitude(), ei_random_amplitude()); +} +inline bool ei_isMuchSmallerThan(long double a, long double b, long double prec = precision()) +{ + return ei_abs(a) <= ei_abs(b) * prec; +} +inline bool ei_isApprox(long double a, long double b, long double prec = precision()) +{ + return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec; +} +inline bool ei_isApproxOrLessThan(long double a, long double b, long double prec = precision()) +{ + return a <= b || ei_isApprox(a, b, prec); +} + +#endif // EIGEN_MATHFUNCTIONS_H diff --git a/extern/Eigen2/Eigen/src/Core/Matrix.h b/extern/Eigen2/Eigen/src/Core/Matrix.h new file mode 100644 index 00000000000..ffd16d37606 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Matrix.h @@ -0,0 +1,637 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MATRIX_H +#define EIGEN_MATRIX_H + + +/** \class Matrix + * + * \brief The matrix class, also used for vectors and row-vectors + * + * The %Matrix class is the work-horse for all \em dense (\ref dense "note") matrices and vectors within Eigen. + * Vectors are matrices with one column, and row-vectors are matrices with one row. + * + * The %Matrix class encompasses \em both fixed-size and dynamic-size objects (\ref fixedsize "note"). + * + * The first three template parameters are required: + * \param _Scalar Numeric type, i.e. float, double, int + * \param _Rows Number of rows, or \b Dynamic + * \param _Cols Number of columns, or \b Dynamic + * + * The remaining template parameters are optional -- in most cases you don't have to worry about them. + * \param _Options A combination of either \b RowMajor or \b ColMajor, and of either + * \b AutoAlign or \b DontAlign. + * The former controls storage order, and defaults to column-major. The latter controls alignment, which is required + * for vectorization. It defaults to aligning matrices except for fixed sizes that aren't a multiple of the packet size. + * \param _MaxRows Maximum number of rows. Defaults to \a _Rows (\ref maxrows "note"). + * \param _MaxCols Maximum number of columns. Defaults to \a _Cols (\ref maxrows "note"). + * + * Eigen provides a number of typedefs covering the usual cases. Here are some examples: + * + * \li \c Matrix2d is a 2x2 square matrix of doubles (\c Matrix) + * \li \c Vector4f is a vector of 4 floats (\c Matrix) + * \li \c RowVector3i is a row-vector of 3 ints (\c Matrix) + * + * \li \c MatrixXf is a dynamic-size matrix of floats (\c Matrix) + * \li \c VectorXf is a dynamic-size vector of floats (\c Matrix) + * + * See \link matrixtypedefs this page \endlink for a complete list of predefined \em %Matrix and \em Vector typedefs. + * + * You can access elements of vectors and matrices using normal subscripting: + * + * \code + * Eigen::VectorXd v(10); + * v[0] = 0.1; + * v[1] = 0.2; + * v(0) = 0.3; + * v(1) = 0.4; + * + * Eigen::MatrixXi m(10, 10); + * m(0, 1) = 1; + * m(0, 2) = 2; + * m(0, 3) = 3; + * \endcode + * + * Some notes: + * + *
+ *
\anchor dense Dense versus sparse:
+ *
This %Matrix class handles dense, not sparse matrices and vectors. For sparse matrices and vectors, see the Sparse module. + * + * Dense matrices and vectors are plain usual arrays of coefficients. All the coefficients are stored, in an ordinary contiguous array. + * This is unlike Sparse matrices and vectors where the coefficients are stored as a list of nonzero coefficients.
+ * + *
\anchor fixedsize Fixed-size versus dynamic-size:
+ *
Fixed-size means that the numbers of rows and columns are known are compile-time. In this case, Eigen allocates the array + * of coefficients as a fixed-size array, as a class member. This makes sense for very small matrices, typically up to 4x4, sometimes up + * to 16x16. Larger matrices should be declared as dynamic-size even if one happens to know their size at compile-time. + * + * Dynamic-size means that the numbers of rows or columns are not necessarily known at compile-time. In this case they are runtime + * variables, and the array of coefficients is allocated dynamically on the heap. + * + * Note that \em dense matrices, be they Fixed-size or Dynamic-size, do not expand dynamically in the sense of a std::map. + * If you want this behavior, see the Sparse module.
+ * + *
\anchor maxrows _MaxRows and _MaxCols:
+ *
In most cases, one just leaves these parameters to the default values. + * These parameters mean the maximum size of rows and columns that the matrix may have. They are useful in cases + * when the exact numbers of rows and columns are not known are compile-time, but it is known at compile-time that they cannot + * exceed a certain value. This happens when taking dynamic-size blocks inside fixed-size matrices: in this case _MaxRows and _MaxCols + * are the dimensions of the original matrix, while _Rows and _Cols are Dynamic.
+ *
+ * + * \see MatrixBase for the majority of the API methods for matrices + */ +template +struct ei_traits > +{ + typedef _Scalar Scalar; + enum { + RowsAtCompileTime = _Rows, + ColsAtCompileTime = _Cols, + MaxRowsAtCompileTime = _MaxRows, + MaxColsAtCompileTime = _MaxCols, + Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret, + CoeffReadCost = NumTraits::ReadCost + }; +}; + +template +class Matrix + : public MatrixBase > +{ + public: + EIGEN_GENERIC_PUBLIC_INTERFACE(Matrix) + enum { Options = _Options }; + friend class Eigen::Map; + typedef class Eigen::Map UnalignedMapType; + friend class Eigen::Map; + typedef class Eigen::Map AlignedMapType; + + protected: + ei_matrix_storage m_storage; + + public: + enum { NeedsToAlign = (Options&AutoAlign) == AutoAlign + && SizeAtCompileTime!=Dynamic && ((sizeof(Scalar)*SizeAtCompileTime)%16)==0 }; + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) + + Base& base() { return *static_cast(this); } + const Base& base() const { return *static_cast(this); } + + EIGEN_STRONG_INLINE int rows() const { return m_storage.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_storage.cols(); } + + EIGEN_STRONG_INLINE int stride(void) const + { + if(Flags & RowMajorBit) + return m_storage.cols(); + else + return m_storage.rows(); + } + + EIGEN_STRONG_INLINE const Scalar& coeff(int row, int col) const + { + if(Flags & RowMajorBit) + return m_storage.data()[col + row * m_storage.cols()]; + else // column-major + return m_storage.data()[row + col * m_storage.rows()]; + } + + EIGEN_STRONG_INLINE const Scalar& coeff(int index) const + { + return m_storage.data()[index]; + } + + EIGEN_STRONG_INLINE Scalar& coeffRef(int row, int col) + { + if(Flags & RowMajorBit) + return m_storage.data()[col + row * m_storage.cols()]; + else // column-major + return m_storage.data()[row + col * m_storage.rows()]; + } + + EIGEN_STRONG_INLINE Scalar& coeffRef(int index) + { + return m_storage.data()[index]; + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int row, int col) const + { + return ei_ploadt + (m_storage.data() + (Flags & RowMajorBit + ? col + row * m_storage.cols() + : row + col * m_storage.rows())); + } + + template + EIGEN_STRONG_INLINE PacketScalar packet(int index) const + { + return ei_ploadt(m_storage.data() + index); + } + + template + EIGEN_STRONG_INLINE void writePacket(int row, int col, const PacketScalar& x) + { + ei_pstoret + (m_storage.data() + (Flags & RowMajorBit + ? col + row * m_storage.cols() + : row + col * m_storage.rows()), x); + } + + template + EIGEN_STRONG_INLINE void writePacket(int index, const PacketScalar& x) + { + ei_pstoret(m_storage.data() + index, x); + } + + /** \returns a const pointer to the data array of this matrix */ + EIGEN_STRONG_INLINE const Scalar *data() const + { return m_storage.data(); } + + /** \returns a pointer to the data array of this matrix */ + EIGEN_STRONG_INLINE Scalar *data() + { return m_storage.data(); } + + /** Resizes \c *this to a \a rows x \a cols matrix. + * + * Makes sense for dynamic-size matrices only. + * + * If the current number of coefficients of \c *this exactly matches the + * product \a rows * \a cols, then no memory allocation is performed and + * the current values are left unchanged. In all other cases, including + * shrinking, the data is reallocated and all previous values are lost. + * + * \sa resize(int) for vectors. + */ + inline void resize(int rows, int cols) + { + ei_assert((MaxRowsAtCompileTime == Dynamic || MaxRowsAtCompileTime >= rows) + && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows) + && (MaxColsAtCompileTime == Dynamic || MaxColsAtCompileTime >= cols) + && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols)); + m_storage.resize(rows * cols, rows, cols); + } + + /** Resizes \c *this to a vector of length \a size + * + * \sa resize(int,int) for the details. + */ + inline void resize(int size) + { + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix) + if(RowsAtCompileTime == 1) + m_storage.resize(size, 1, size); + else + m_storage.resize(size, size, 1); + } + + /** Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_STRONG_INLINE Matrix& operator=(const MatrixBase& other) + { + return _set(other); + } + + /** This is a special case of the templated operator=. Its purpose is to + * prevent a default operator= from hiding the templated operator=. + */ + EIGEN_STRONG_INLINE Matrix& operator=(const Matrix& other) + { + return _set(other); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Matrix, +=) + EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Matrix, -=) + EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Matrix, *=) + EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Matrix, /=) + + /** Default constructor. + * + * For fixed-size matrices, does nothing. + * + * For dynamic-size matrices, creates an empty matrix of size 0. Does not allocate any array. Such a matrix + * is called a null matrix. This constructor is the unique way to create null matrices: resizing + * a matrix to 0 is not supported. + * + * \sa resize(int,int) + */ + EIGEN_STRONG_INLINE explicit Matrix() : m_storage() + { + _check_template_params(); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal */ + Matrix(ei_constructor_without_unaligned_array_assert) + : m_storage(ei_constructor_without_unaligned_array_assert()) + {} +#endif + + /** Constructs a vector or row-vector with given dimension. \only_for_vectors + * + * Note that this is only useful for dynamic-size vectors. For fixed-size vectors, + * it is redundant to pass the dimension here, so it makes more sense to use the default + * constructor Matrix() instead. + */ + EIGEN_STRONG_INLINE explicit Matrix(int dim) + : m_storage(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim) + { + _check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix) + ei_assert(dim > 0); + ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim); + } + + /** This constructor has two very different behaviors, depending on the type of *this. + * + * \li When Matrix is a fixed-size vector type of size 2, this constructor constructs + * an initialized vector. The parameters \a x, \a y are copied into the first and second + * coords of the vector respectively. + * \li Otherwise, this constructor constructs an uninitialized matrix with \a x rows and + * \a y columns. This is useful for dynamic-size matrices. For fixed-size matrices, + * it is redundant to pass these parameters, so one should use the default constructor + * Matrix() instead. + */ + EIGEN_STRONG_INLINE Matrix(int x, int y) : m_storage(x*y, x, y) + { + _check_template_params(); + if((RowsAtCompileTime == 1 && ColsAtCompileTime == 2) + || (RowsAtCompileTime == 2 && ColsAtCompileTime == 1)) + { + m_storage.data()[0] = Scalar(x); + m_storage.data()[1] = Scalar(y); + } + else + { + ei_assert(x > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == x) + && y > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == y)); + } + } + /** constructs an initialized 2D vector with given coefficients */ + EIGEN_STRONG_INLINE Matrix(const float& x, const float& y) + { + _check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 2) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + } + /** constructs an initialized 2D vector with given coefficients */ + EIGEN_STRONG_INLINE Matrix(const double& x, const double& y) + { + _check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 2) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + } + /** constructs an initialized 3D vector with given coefficients */ + EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z) + { + _check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + } + /** constructs an initialized 4D vector with given coefficients */ + EIGEN_STRONG_INLINE Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w) + { + _check_template_params(); + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4) + m_storage.data()[0] = x; + m_storage.data()[1] = y; + m_storage.data()[2] = z; + m_storage.data()[3] = w; + } + + explicit Matrix(const Scalar *data); + + /** Constructor copying the value of the expression \a other */ + template + EIGEN_STRONG_INLINE Matrix(const MatrixBase& other) + : m_storage(other.rows() * other.cols(), other.rows(), other.cols()) + { + _check_template_params(); + _set_noalias(other); + } + /** Copy constructor */ + EIGEN_STRONG_INLINE Matrix(const Matrix& other) + : Base(), m_storage(other.rows() * other.cols(), other.rows(), other.cols()) + { + _check_template_params(); + _set_noalias(other); + } + /** Destructor */ + inline ~Matrix() {} + + /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the + * data pointers. + */ + template + void swap(const MatrixBase& other); + + /** \name Map + * These are convenience functions returning Map objects. The Map() static functions return unaligned Map objects, + * while the AlignedMap() functions return aligned Map objects and thus should be called only with 16-byte-aligned + * \a data pointers. + * + * \see class Map + */ + //@{ + inline static const UnalignedMapType Map(const Scalar* data) + { return UnalignedMapType(data); } + inline static UnalignedMapType Map(Scalar* data) + { return UnalignedMapType(data); } + inline static const UnalignedMapType Map(const Scalar* data, int size) + { return UnalignedMapType(data, size); } + inline static UnalignedMapType Map(Scalar* data, int size) + { return UnalignedMapType(data, size); } + inline static const UnalignedMapType Map(const Scalar* data, int rows, int cols) + { return UnalignedMapType(data, rows, cols); } + inline static UnalignedMapType Map(Scalar* data, int rows, int cols) + { return UnalignedMapType(data, rows, cols); } + + inline static const AlignedMapType MapAligned(const Scalar* data) + { return AlignedMapType(data); } + inline static AlignedMapType MapAligned(Scalar* data) + { return AlignedMapType(data); } + inline static const AlignedMapType MapAligned(const Scalar* data, int size) + { return AlignedMapType(data, size); } + inline static AlignedMapType MapAligned(Scalar* data, int size) + { return AlignedMapType(data, size); } + inline static const AlignedMapType MapAligned(const Scalar* data, int rows, int cols) + { return AlignedMapType(data, rows, cols); } + inline static AlignedMapType MapAligned(Scalar* data, int rows, int cols) + { return AlignedMapType(data, rows, cols); } + //@} + + using Base::setConstant; + Matrix& setConstant(int size, const Scalar& value); + Matrix& setConstant(int rows, int cols, const Scalar& value); + + using Base::setZero; + Matrix& setZero(int size); + Matrix& setZero(int rows, int cols); + + using Base::setOnes; + Matrix& setOnes(int size); + Matrix& setOnes(int rows, int cols); + + using Base::setRandom; + Matrix& setRandom(int size); + Matrix& setRandom(int rows, int cols); + + using Base::setIdentity; + Matrix& setIdentity(int rows, int cols); + +/////////// Geometry module /////////// + + template + explicit Matrix(const RotationBase& r); + template + Matrix& operator=(const RotationBase& r); + + // allow to extend Matrix outside Eigen + #ifdef EIGEN_MATRIX_PLUGIN + #include EIGEN_MATRIX_PLUGIN + #endif + + private: + /** \internal Resizes *this in preparation for assigning \a other to it. + * Takes care of doing all the checking that's needed. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + */ + template + EIGEN_STRONG_INLINE void _resize_to_match(const MatrixBase& other) + { + if(RowsAtCompileTime == 1) + { + ei_assert(other.isVector()); + resize(1, other.size()); + } + else if(ColsAtCompileTime == 1) + { + ei_assert(other.isVector()); + resize(other.size(), 1); + } + else resize(other.rows(), other.cols()); + } + + /** \internal Copies the value of the expression \a other into \c *this with automatic resizing. + * + * *this might be resized to match the dimensions of \a other. If *this was a null matrix (not already initialized), + * it will be initialized. + * + * Note that copying a row-vector into a vector (and conversely) is allowed. + * The resizing, if any, is then done in the appropriate way so that row-vectors + * remain row-vectors and vectors remain vectors. + * + * \sa operator=(const MatrixBase&), _set_noalias() + */ + template + EIGEN_STRONG_INLINE Matrix& _set(const MatrixBase& other) + { + _set_selector(other.derived(), typename ei_meta_if::ret()); + return *this; + } + + template + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_true&) { _set_noalias(other.eval()); } + + template + EIGEN_STRONG_INLINE void _set_selector(const OtherDerived& other, const ei_meta_false&) { _set_noalias(other); } + + /** \internal Like _set() but additionally makes the assumption that no aliasing effect can happen (which + * is the case when creating a new matrix) so one can enforce lazy evaluation. + * + * \sa operator=(const MatrixBase&), _set() + */ + template + EIGEN_STRONG_INLINE Matrix& _set_noalias(const MatrixBase& other) + { + _resize_to_match(other); + // the 'false' below means to enforce lazy evaluation. We don't use lazyAssign() because + // it wouldn't allow to copy a row-vector into a column-vector. + return ei_assign_selector::run(*this, other.derived()); + } + + static EIGEN_STRONG_INLINE void _check_template_params() + { + EIGEN_STATIC_ASSERT((_Rows > 0 + && _Cols > 0 + && _MaxRows <= _Rows + && _MaxCols <= _Cols + && (_Options & (AutoAlign|RowMajor)) == _Options), + INVALID_MATRIX_TEMPLATE_PARAMETERS) + } + + template + friend struct ei_matrix_swap_impl; +}; + +template::ret, + bool IsDynamicSize = MatrixType::SizeAtCompileTime==Dynamic> +struct ei_matrix_swap_impl +{ + static inline void run(MatrixType& matrix, MatrixBase& other) + { + matrix.base().swap(other); + } +}; + +template +struct ei_matrix_swap_impl +{ + static inline void run(MatrixType& matrix, MatrixBase& other) + { + matrix.m_storage.swap(other.derived().m_storage); + } +}; + +template +template +inline void Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::swap(const MatrixBase& other) +{ + ei_matrix_swap_impl::run(*this, *const_cast*>(&other)); +} + + +/** \defgroup matrixtypedefs Global matrix typedefs + * + * \ingroup Core_Module + * + * Eigen defines several typedef shortcuts for most common matrix and vector types. + * + * The general patterns are the following: + * + * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size, + * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd + * for complex double. + * + * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats. + * + * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is + * a fixed-size vector of 4 complex floats. + * + * \sa class Matrix + */ + +#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix) \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Matrix##SizeSuffix##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix Vector##SizeSuffix##TypeSuffix; \ +/** \ingroup matrixtypedefs */ \ +typedef Matrix RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \ +EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X) + +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int, i) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float, f) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double, d) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cf) +EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex, cd) + +#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES +#undef EIGEN_MAKE_TYPEDEFS + +#undef EIGEN_MAKE_TYPEDEFS_LARGE + +#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \ +using Eigen::Matrix##SizeSuffix##TypeSuffix; \ +using Eigen::Vector##SizeSuffix##TypeSuffix; \ +using Eigen::RowVector##SizeSuffix##TypeSuffix; + +#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \ + +#define EIGEN_USING_MATRIX_TYPEDEFS \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \ +EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd) + +#endif // EIGEN_MATRIX_H diff --git a/extern/Eigen2/Eigen/src/Core/MatrixBase.h b/extern/Eigen2/Eigen/src/Core/MatrixBase.h new file mode 100644 index 00000000000..7935a7554ea --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/MatrixBase.h @@ -0,0 +1,632 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MATRIXBASE_H +#define EIGEN_MATRIXBASE_H + +/** \class MatrixBase + * + * \brief Base class for all matrices, vectors, and expressions + * + * This class is the base that is inherited by all matrix, vector, and expression + * types. Most of the Eigen API is contained in this class. Other important classes for + * the Eigen API are Matrix, Cwise, and PartialRedux. + * + * Note that some methods are defined in the \ref Array module. + * + * \param Derived is the derived type, e.g. a matrix type, or an expression, etc. + * + * When writing a function taking Eigen objects as argument, if you want your function + * to take as argument any matrix, vector, or expression, just let it take a + * MatrixBase argument. As an example, here is a function printFirstRow which, given + * a matrix, vector, or expression \a x, prints the first row of \a x. + * + * \code + template + void printFirstRow(const Eigen::MatrixBase& x) + { + cout << x.row(0) << endl; + } + * \endcode + * + */ +template class MatrixBase +{ + public: + +#ifndef EIGEN_PARSED_BY_DOXYGEN + class InnerIterator; + + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + enum { + + RowsAtCompileTime = ei_traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = ei_traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + + SizeAtCompileTime = (ei_size_at_compile_time::RowsAtCompileTime, + ei_traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = ei_traits::MaxRowsAtCompileTime, + /**< This value is equal to the maximum possible number of rows that this expression + * might have. If this expression might have an arbitrarily high number of rows, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxColsAtCompileTime = ei_traits::MaxColsAtCompileTime, + /**< This value is equal to the maximum possible number of columns that this expression + * might have. If this expression might have an arbitrarily high number of columns, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime + */ + + MaxSizeAtCompileTime = (ei_size_at_compile_time::MaxRowsAtCompileTime, + ei_traits::MaxColsAtCompileTime>::ret), + /**< This value is equal to the maximum possible number of coefficients that this expression + * might have. If this expression might have an arbitrarily high number of coefficients, + * this value is set to \a Dynamic. + * + * This value is useful to know when evaluating an expression, in order to determine + * whether it is possible to avoid doing a dynamic memory allocation. + * + * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime + */ + + IsVectorAtCompileTime = ei_traits::RowsAtCompileTime == 1 + || ei_traits::ColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + Flags = ei_traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + CoeffReadCost = ei_traits::CoeffReadCost + /**< This is a rough measure of how expensive it is to read one coefficient from + * this expression. + */ + }; + + /** Default constructor. Just checks at compile-time for self-consistency of the flags. */ + MatrixBase() + { + ei_assert(ei_are_flags_consistent::ret); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is the "real scalar" type; if the \a Scalar type is already real numbers + * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If + * \a Scalar is \a std::complex then RealScalar is \a T. + * + * \sa class NumTraits + */ + typedef typename NumTraits::Real RealScalar; + + /** type of the equivalent square matrix */ + typedef Matrix SquareMatrixType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ + inline int rows() const { return derived().rows(); } + /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + inline int cols() const { return derived().cols(); } + /** \returns the number of coefficients, which is \a rows()*cols(). + * \sa rows(), cols(), SizeAtCompileTime. */ + inline int size() const { return rows() * cols(); } + /** \returns the number of nonzero coefficients which is in practice the number + * of stored coefficients. */ + inline int nonZeros() const { return derived.nonZeros(); } + /** \returns true if either the number of rows or the number of columns is equal to 1. + * In other words, this function returns + * \code rows()==1 || cols()==1 \endcode + * \sa rows(), cols(), IsVectorAtCompileTime. */ + inline bool isVector() const { return rows()==1 || cols()==1; } + /** \returns the size of the storage major dimension, + * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ + int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + /** \returns the size of the inner dimension according to the storage order, + * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ + int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** \internal the plain matrix type corresponding to this expression. Note that is not necessarily + * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const + * reference to a matrix, not a matrix! It guaranteed however, that the return type of eval() is either + * PlainMatrixType or const PlainMatrixType&. + */ + typedef typename ei_plain_matrix_type::type PlainMatrixType; + /** \internal the column-major plain matrix type corresponding to this expression. Note that is not necessarily + * exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const + * reference to a matrix, not a matrix! + * The only difference from PlainMatrixType is that PlainMatrixType_ColMajor is guaranteed to be column-major. + */ + typedef typename ei_plain_matrix_type::type PlainMatrixType_ColMajor; + + /** \internal Represents a matrix with all coefficients equal to one another*/ + typedef CwiseNullaryOp,Derived> ConstantReturnType; + /** \internal Represents a scalar multiple of a matrix */ + typedef CwiseUnaryOp, Derived> ScalarMultipleReturnType; + /** \internal Represents a quotient of a matrix by a scalar*/ + typedef CwiseUnaryOp, Derived> ScalarQuotient1ReturnType; + /** \internal the return type of MatrixBase::conjugate() */ + typedef typename ei_meta_if::IsComplex, + const CwiseUnaryOp, Derived>, + const Derived& + >::ret ConjugateReturnType; + /** \internal the return type of MatrixBase::real() */ + typedef CwiseUnaryOp, Derived> RealReturnType; + /** \internal the return type of MatrixBase::imag() */ + typedef CwiseUnaryOp, Derived> ImagReturnType; + /** \internal the return type of MatrixBase::adjoint() */ + typedef Eigen::Transpose::type> > + AdjointReturnType; + /** \internal the return type of MatrixBase::eigenvalues() */ + typedef Matrix::Scalar>::Real, ei_traits::ColsAtCompileTime, 1> EigenvaluesReturnType; + /** \internal expression tyepe of a column */ + typedef Block::RowsAtCompileTime, 1> ColXpr; + /** \internal expression tyepe of a column */ + typedef Block::ColsAtCompileTime> RowXpr; + /** \internal the return type of identity */ + typedef CwiseNullaryOp,Derived> IdentityReturnType; + /** \internal the return type of unit vectors */ + typedef Block, SquareMatrixType>, + ei_traits::RowsAtCompileTime, + ei_traits::ColsAtCompileTime> BasisReturnType; +#endif // not EIGEN_PARSED_BY_DOXYGEN + + + /** Copies \a other into *this. \returns a reference to *this. */ + template + Derived& operator=(const MatrixBase& other); + + /** Special case of the template operator=, in order to prevent the compiler + * from generating a default operator= (issue hit with g++ 4.1) + */ + inline Derived& operator=(const MatrixBase& other) + { + return this->operator=(other); + } + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** Copies \a other into *this without evaluating other. \returns a reference to *this. */ + template + Derived& lazyAssign(const MatrixBase& other); + + /** Overloaded for cache friendly product evaluation */ + template + Derived& lazyAssign(const Product& product); + + /** Overloaded for cache friendly product evaluation */ + template + Derived& lazyAssign(const Flagged& other) + { return lazyAssign(other._expression()); } +#endif // not EIGEN_PARSED_BY_DOXYGEN + + CommaInitializer operator<< (const Scalar& s); + + template + CommaInitializer operator<< (const MatrixBase& other); + + const Scalar coeff(int row, int col) const; + const Scalar operator()(int row, int col) const; + + Scalar& coeffRef(int row, int col); + Scalar& operator()(int row, int col); + + const Scalar coeff(int index) const; + const Scalar operator[](int index) const; + const Scalar operator()(int index) const; + + Scalar& coeffRef(int index); + Scalar& operator[](int index); + Scalar& operator()(int index); + +#ifndef EIGEN_PARSED_BY_DOXYGEN + template + void copyCoeff(int row, int col, const MatrixBase& other); + template + void copyCoeff(int index, const MatrixBase& other); + template + void copyPacket(int row, int col, const MatrixBase& other); + template + void copyPacket(int index, const MatrixBase& other); +#endif // not EIGEN_PARSED_BY_DOXYGEN + + template + PacketScalar packet(int row, int col) const; + template + void writePacket(int row, int col, const PacketScalar& x); + + template + PacketScalar packet(int index) const; + template + void writePacket(int index, const PacketScalar& x); + + const Scalar x() const; + const Scalar y() const; + const Scalar z() const; + const Scalar w() const; + Scalar& x(); + Scalar& y(); + Scalar& z(); + Scalar& w(); + + + const CwiseUnaryOp::Scalar>,Derived> operator-() const; + + template + const CwiseBinaryOp::Scalar>, Derived, OtherDerived> + operator+(const MatrixBase &other) const; + + template + const CwiseBinaryOp::Scalar>, Derived, OtherDerived> + operator-(const MatrixBase &other) const; + + template + Derived& operator+=(const MatrixBase& other); + template + Derived& operator-=(const MatrixBase& other); + + template + Derived& operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other); + + Derived& operator*=(const Scalar& other); + Derived& operator/=(const Scalar& other); + + const ScalarMultipleReturnType operator*(const Scalar& scalar) const; + const CwiseUnaryOp::Scalar>, Derived> + operator/(const Scalar& scalar) const; + + inline friend const CwiseUnaryOp::Scalar>, Derived> + operator*(const Scalar& scalar, const MatrixBase& matrix) + { return matrix*scalar; } + + + template + const typename ProductReturnType::Type + operator*(const MatrixBase &other) const; + + template + Derived& operator*=(const MatrixBase& other); + + template + typename ei_plain_matrix_type_column_major::type + solveTriangular(const MatrixBase& other) const; + + template + void solveTriangularInPlace(const MatrixBase& other) const; + + + template + Scalar dot(const MatrixBase& other) const; + RealScalar squaredNorm() const; + RealScalar norm() const; + const PlainMatrixType normalized() const; + void normalize(); + + Eigen::Transpose transpose(); + const Eigen::Transpose transpose() const; + void transposeInPlace(); + const AdjointReturnType adjoint() const; + + + RowXpr row(int i); + const RowXpr row(int i) const; + + ColXpr col(int i); + const ColXpr col(int i) const; + + Minor minor(int row, int col); + const Minor minor(int row, int col) const; + + typename BlockReturnType::Type block(int startRow, int startCol, int blockRows, int blockCols); + const typename BlockReturnType::Type + block(int startRow, int startCol, int blockRows, int blockCols) const; + + typename BlockReturnType::SubVectorType segment(int start, int size); + const typename BlockReturnType::SubVectorType segment(int start, int size) const; + + typename BlockReturnType::SubVectorType start(int size); + const typename BlockReturnType::SubVectorType start(int size) const; + + typename BlockReturnType::SubVectorType end(int size); + const typename BlockReturnType::SubVectorType end(int size) const; + + typename BlockReturnType::Type corner(CornerType type, int cRows, int cCols); + const typename BlockReturnType::Type corner(CornerType type, int cRows, int cCols) const; + + template + typename BlockReturnType::Type block(int startRow, int startCol); + template + const typename BlockReturnType::Type block(int startRow, int startCol) const; + + template + typename BlockReturnType::Type corner(CornerType type); + template + const typename BlockReturnType::Type corner(CornerType type) const; + + template typename BlockReturnType::SubVectorType start(void); + template const typename BlockReturnType::SubVectorType start() const; + + template typename BlockReturnType::SubVectorType end(); + template const typename BlockReturnType::SubVectorType end() const; + + template typename BlockReturnType::SubVectorType segment(int start); + template const typename BlockReturnType::SubVectorType segment(int start) const; + + DiagonalCoeffs diagonal(); + const DiagonalCoeffs diagonal() const; + + template Part part(); + template const Part part() const; + + + static const ConstantReturnType + Constant(int rows, int cols, const Scalar& value); + static const ConstantReturnType + Constant(int size, const Scalar& value); + static const ConstantReturnType + Constant(const Scalar& value); + + template + static const CwiseNullaryOp + NullaryExpr(int rows, int cols, const CustomNullaryOp& func); + template + static const CwiseNullaryOp + NullaryExpr(int size, const CustomNullaryOp& func); + template + static const CwiseNullaryOp + NullaryExpr(const CustomNullaryOp& func); + + static const ConstantReturnType Zero(int rows, int cols); + static const ConstantReturnType Zero(int size); + static const ConstantReturnType Zero(); + static const ConstantReturnType Ones(int rows, int cols); + static const ConstantReturnType Ones(int size); + static const ConstantReturnType Ones(); + static const IdentityReturnType Identity(); + static const IdentityReturnType Identity(int rows, int cols); + static const BasisReturnType Unit(int size, int i); + static const BasisReturnType Unit(int i); + static const BasisReturnType UnitX(); + static const BasisReturnType UnitY(); + static const BasisReturnType UnitZ(); + static const BasisReturnType UnitW(); + + const DiagonalMatrix asDiagonal() const; + + void fill(const Scalar& value); + Derived& setConstant(const Scalar& value); + Derived& setZero(); + Derived& setOnes(); + Derived& setRandom(); + Derived& setIdentity(); + + + template + bool isApprox(const MatrixBase& other, + RealScalar prec = precision()) const; + bool isMuchSmallerThan(const RealScalar& other, + RealScalar prec = precision()) const; + template + bool isMuchSmallerThan(const MatrixBase& other, + RealScalar prec = precision()) const; + + bool isApproxToConstant(const Scalar& value, RealScalar prec = precision()) const; + bool isConstant(const Scalar& value, RealScalar prec = precision()) const; + bool isZero(RealScalar prec = precision()) const; + bool isOnes(RealScalar prec = precision()) const; + bool isIdentity(RealScalar prec = precision()) const; + bool isDiagonal(RealScalar prec = precision()) const; + + bool isUpperTriangular(RealScalar prec = precision()) const; + bool isLowerTriangular(RealScalar prec = precision()) const; + + template + bool isOrthogonal(const MatrixBase& other, + RealScalar prec = precision()) const; + bool isUnitary(RealScalar prec = precision()) const; + + template + inline bool operator==(const MatrixBase& other) const + { return (cwise() == other).all(); } + + template + inline bool operator!=(const MatrixBase& other) const + { return (cwise() != other).any(); } + + + template + const CwiseUnaryOp::Scalar, NewType>, Derived> cast() const; + + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + */ + EIGEN_STRONG_INLINE const typename ei_eval::type eval() const + { return typename ei_eval::type(derived()); } + + template + void swap(const MatrixBase& other); + + template + const Flagged marked() const; + const Flagged lazy() const; + + /** \returns number of elements to skip to pass from one row (resp. column) to another + * for a row-major (resp. column-major) matrix. + * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data + * of the underlying matrix. + */ + inline int stride(void) const { return derived().stride(); } + + inline const NestByValue nestByValue() const; + + + ConjugateReturnType conjugate() const; + const RealReturnType real() const; + const ImagReturnType imag() const; + + template + const CwiseUnaryOp unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const; + + template + const CwiseBinaryOp + binaryExpr(const MatrixBase &other, const CustomBinaryOp& func = CustomBinaryOp()) const; + + + Scalar sum() const; + Scalar trace() const; + + typename ei_traits::Scalar minCoeff() const; + typename ei_traits::Scalar maxCoeff() const; + + typename ei_traits::Scalar minCoeff(int* row, int* col) const; + typename ei_traits::Scalar maxCoeff(int* row, int* col) const; + + typename ei_traits::Scalar minCoeff(int* index) const; + typename ei_traits::Scalar maxCoeff(int* index) const; + + template + typename ei_result_of::Scalar)>::type + redux(const BinaryOp& func) const; + + template + void visit(Visitor& func) const; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + inline const Derived& derived() const { return *static_cast(this); } + inline Derived& derived() { return *static_cast(this); } + inline Derived& const_cast_derived() const + { return *static_cast(const_cast(this)); } +#endif // not EIGEN_PARSED_BY_DOXYGEN + + const Cwise cwise() const; + Cwise cwise(); + + inline const WithFormat format(const IOFormat& fmt) const; + +/////////// Array module /////////// + + bool all(void) const; + bool any(void) const; + int count() const; + + const PartialRedux rowwise() const; + const PartialRedux colwise() const; + + static const CwiseNullaryOp,Derived> Random(int rows, int cols); + static const CwiseNullaryOp,Derived> Random(int size); + static const CwiseNullaryOp,Derived> Random(); + + template + const Select + select(const MatrixBase& thenMatrix, + const MatrixBase& elseMatrix) const; + + template + inline const Select > + select(const MatrixBase& thenMatrix, typename ThenDerived::Scalar elseScalar) const; + + template + inline const Select, ElseDerived > + select(typename ElseDerived::Scalar thenScalar, const MatrixBase& elseMatrix) const; + + template RealScalar lpNorm() const; + +/////////// LU module /////////// + + const LU lu() const; + const PlainMatrixType inverse() const; + void computeInverse(PlainMatrixType *result) const; + Scalar determinant() const; + +/////////// Cholesky module /////////// + + const LLT llt() const; + const LDLT ldlt() const; + +/////////// QR module /////////// + + const QR qr() const; + + EigenvaluesReturnType eigenvalues() const; + RealScalar operatorNorm() const; + +/////////// SVD module /////////// + + SVD svd() const; + +/////////// Geometry module /////////// + + template + PlainMatrixType cross(const MatrixBase& other) const; + PlainMatrixType unitOrthogonal(void) const; + Matrix eulerAngles(int a0, int a1, int a2) const; + +/////////// Sparse module /////////// + + // dense = spasre * dense + template + Derived& lazyAssign(const SparseProduct& product); + // dense = dense * spasre + template + Derived& lazyAssign(const SparseProduct& product); + + #ifdef EIGEN_MATRIXBASE_PLUGIN + #include EIGEN_MATRIXBASE_PLUGIN + #endif +}; + +#endif // EIGEN_MATRIXBASE_H diff --git a/extern/Eigen2/Eigen/src/Core/MatrixStorage.h b/extern/Eigen2/Eigen/src/Core/MatrixStorage.h new file mode 100644 index 00000000000..ba2355b8e60 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/MatrixStorage.h @@ -0,0 +1,249 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2009 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MATRIXSTORAGE_H +#define EIGEN_MATRIXSTORAGE_H + +struct ei_constructor_without_unaligned_array_assert {}; + +/** \internal + * Static array automatically aligned if the total byte size is a multiple of 16 and the matrix options require auto alignment + */ +template struct ei_matrix_array +{ + EIGEN_ALIGN_128 T array[Size]; + + ei_matrix_array() + { + #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT + ei_assert((reinterpret_cast(array) & 0xf) == 0 + && "this assertion is explained here: http://eigen.tuxfamily.org/dox/UnalignedArrayAssert.html **** READ THIS WEB PAGE !!! ****"); + #endif + } + + ei_matrix_array(ei_constructor_without_unaligned_array_assert) {} +}; + +template struct ei_matrix_array +{ + T array[Size]; + ei_matrix_array() {} + ei_matrix_array(ei_constructor_without_unaligned_array_assert) {} +}; + +/** \internal + * + * \class ei_matrix_storage + * + * \brief Stores the data of a matrix + * + * This class stores the data of fixed-size, dynamic-size or mixed matrices + * in a way as compact as possible. + * + * \sa Matrix + */ +template class ei_matrix_storage; + +// purely fixed-size matrix +template class ei_matrix_storage +{ + ei_matrix_array m_data; + public: + inline explicit ei_matrix_storage() {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) + : m_data(ei_constructor_without_unaligned_array_assert()) {} + inline ei_matrix_storage(int,int,int) {} + inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); } + inline static int rows(void) {return _Rows;} + inline static int cols(void) {return _Cols;} + inline void resize(int,int,int) {} + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage +template class ei_matrix_storage +{ + ei_matrix_array m_data; + int m_rows; + int m_cols; + public: + inline explicit ei_matrix_storage() : m_rows(0), m_cols(0) {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) + : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0), m_cols(0) {} + inline ei_matrix_storage(int, int rows, int cols) : m_rows(rows), m_cols(cols) {} + inline ~ei_matrix_storage() {} + inline void swap(ei_matrix_storage& other) + { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } + inline int rows(void) const {return m_rows;} + inline int cols(void) const {return m_cols;} + inline void resize(int, int rows, int cols) + { + m_rows = rows; + m_cols = cols; + } + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage and fixed width +template class ei_matrix_storage +{ + ei_matrix_array m_data; + int m_rows; + public: + inline explicit ei_matrix_storage() : m_rows(0) {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) + : m_data(ei_constructor_without_unaligned_array_assert()), m_rows(0) {} + inline ei_matrix_storage(int, int rows, int) : m_rows(rows) {} + inline ~ei_matrix_storage() {} + inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + inline int rows(void) const {return m_rows;} + inline int cols(void) const {return _Cols;} + inline void resize(int /*size*/, int rows, int) + { + m_rows = rows; + } + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// dynamic-size matrix with fixed-size storage and fixed height +template class ei_matrix_storage +{ + ei_matrix_array m_data; + int m_cols; + public: + inline explicit ei_matrix_storage() : m_cols(0) {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) + : m_data(ei_constructor_without_unaligned_array_assert()), m_cols(0) {} + inline ei_matrix_storage(int, int, int cols) : m_cols(cols) {} + inline ~ei_matrix_storage() {} + inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + inline int rows(void) const {return _Rows;} + inline int cols(void) const {return m_cols;} + inline void resize(int, int, int cols) + { + m_cols = cols; + } + inline const T *data() const { return m_data.array; } + inline T *data() { return m_data.array; } +}; + +// purely dynamic matrix. +template class ei_matrix_storage +{ + T *m_data; + int m_rows; + int m_cols; + public: + inline explicit ei_matrix_storage() : m_data(0), m_rows(0), m_cols(0) {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) + : m_data(0), m_rows(0), m_cols(0) {} + inline ei_matrix_storage(int size, int rows, int cols) + : m_data(ei_aligned_new(size)), m_rows(rows), m_cols(cols) {} + inline ~ei_matrix_storage() { ei_aligned_delete(m_data, m_rows*m_cols); } + inline void swap(ei_matrix_storage& other) + { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); } + inline int rows(void) const {return m_rows;} + inline int cols(void) const {return m_cols;} + void resize(int size, int rows, int cols) + { + if(size != m_rows*m_cols) + { + ei_aligned_delete(m_data, m_rows*m_cols); + if (size) + m_data = ei_aligned_new(size); + else + m_data = 0; + } + m_rows = rows; + m_cols = cols; + } + inline const T *data() const { return m_data; } + inline T *data() { return m_data; } +}; + +// matrix with dynamic width and fixed height (so that matrix has dynamic size). +template class ei_matrix_storage +{ + T *m_data; + int m_cols; + public: + inline explicit ei_matrix_storage() : m_data(0), m_cols(0) {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_cols(0) {} + inline ei_matrix_storage(int size, int, int cols) : m_data(ei_aligned_new(size)), m_cols(cols) {} + inline ~ei_matrix_storage() { ei_aligned_delete(m_data, _Rows*m_cols); } + inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); } + inline static int rows(void) {return _Rows;} + inline int cols(void) const {return m_cols;} + void resize(int size, int, int cols) + { + if(size != _Rows*m_cols) + { + ei_aligned_delete(m_data, _Rows*m_cols); + if (size) + m_data = ei_aligned_new(size); + else + m_data = 0; + } + m_cols = cols; + } + inline const T *data() const { return m_data; } + inline T *data() { return m_data; } +}; + +// matrix with dynamic height and fixed width (so that matrix has dynamic size). +template class ei_matrix_storage +{ + T *m_data; + int m_rows; + public: + inline explicit ei_matrix_storage() : m_data(0), m_rows(0) {} + inline ei_matrix_storage(ei_constructor_without_unaligned_array_assert) : m_data(0), m_rows(0) {} + inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_new(size)), m_rows(rows) {} + inline ~ei_matrix_storage() { ei_aligned_delete(m_data, _Cols*m_rows); } + inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); } + inline int rows(void) const {return m_rows;} + inline static int cols(void) {return _Cols;} + void resize(int size, int rows, int) + { + if(size != m_rows*_Cols) + { + ei_aligned_delete(m_data, _Cols*m_rows); + if (size) + m_data = ei_aligned_new(size); + else + m_data = 0; + } + m_rows = rows; + } + inline const T *data() const { return m_data; } + inline T *data() { return m_data; } +}; + +#endif // EIGEN_MATRIX_H diff --git a/extern/Eigen2/Eigen/src/Core/Minor.h b/extern/Eigen2/Eigen/src/Core/Minor.h new file mode 100644 index 00000000000..e2d47da79c2 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Minor.h @@ -0,0 +1,122 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MINOR_H +#define EIGEN_MINOR_H + +/** \nonstableyet + * \class Minor + * + * \brief Expression of a minor + * + * \param MatrixType the type of the object in which we are taking a minor + * + * This class represents an expression of a minor. It is the return + * type of MatrixBase::minor() and most of the time this is the only way it + * is used. + * + * \sa MatrixBase::minor() + */ +template +struct ei_traits > +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename ei_nested::type MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum { + RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ? + int(MatrixType::RowsAtCompileTime) - 1 : Dynamic, + ColsAtCompileTime = (MatrixType::ColsAtCompileTime != Dynamic) ? + int(MatrixType::ColsAtCompileTime) - 1 : Dynamic, + MaxRowsAtCompileTime = (MatrixType::MaxRowsAtCompileTime != Dynamic) ? + int(MatrixType::MaxRowsAtCompileTime) - 1 : Dynamic, + MaxColsAtCompileTime = (MatrixType::MaxColsAtCompileTime != Dynamic) ? + int(MatrixType::MaxColsAtCompileTime) - 1 : Dynamic, + Flags = _MatrixTypeNested::Flags & HereditaryBits, + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + }; +}; + +template class Minor + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Minor) + + inline Minor(const MatrixType& matrix, + int row, int col) + : m_matrix(matrix), m_row(row), m_col(col) + { + ei_assert(row >= 0 && row < matrix.rows() + && col >= 0 && col < matrix.cols()); + } + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor) + + inline int rows() const { return m_matrix.rows() - 1; } + inline int cols() const { return m_matrix.cols() - 1; } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col)); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col)); + } + + protected: + const typename MatrixType::Nested m_matrix; + const int m_row, m_col; +}; + +/** \nonstableyet + * \return an expression of the (\a row, \a col)-minor of *this, + * i.e. an expression constructed from *this by removing the specified + * row and column. + * + * Example: \include MatrixBase_minor.cpp + * Output: \verbinclude MatrixBase_minor.out + * + * \sa class Minor + */ +template +inline Minor +MatrixBase::minor(int row, int col) +{ + return Minor(derived(), row, col); +} + +/** \nonstableyet + * This is the const version of minor(). */ +template +inline const Minor +MatrixBase::minor(int row, int col) const +{ + return Minor(derived(), row, col); +} + +#endif // EIGEN_MINOR_H diff --git a/extern/Eigen2/Eigen/src/Core/NestByValue.h b/extern/Eigen2/Eigen/src/Core/NestByValue.h new file mode 100644 index 00000000000..da79315bffe --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/NestByValue.h @@ -0,0 +1,114 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_NESTBYVALUE_H +#define EIGEN_NESTBYVALUE_H + +/** \class NestByValue + * + * \brief Expression which must be nested by value + * + * \param ExpressionType the type of the object of which we are requiring nesting-by-value + * + * This class is the return type of MatrixBase::nestByValue() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::nestByValue() + */ +template +struct ei_traits > : public ei_traits +{}; + +template class NestByValue + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(NestByValue) + + inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {} + + inline int rows() const { return m_expression.rows(); } + inline int cols() const { return m_expression.cols(); } + inline int stride() const { return m_expression.stride(); } + + inline const Scalar coeff(int row, int col) const + { + return m_expression.coeff(row, col); + } + + inline Scalar& coeffRef(int row, int col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline const Scalar coeff(int index) const + { + return m_expression.coeff(index); + } + + inline Scalar& coeffRef(int index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(int row, int col) const + { + return m_expression.template packet(row, col); + } + + template + inline void writePacket(int row, int col, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(row, col, x); + } + + template + inline const PacketScalar packet(int index) const + { + return m_expression.template packet(index); + } + + template + inline void writePacket(int index, const PacketScalar& x) + { + m_expression.const_cast_derived().template writePacket(index, x); + } + + protected: + const ExpressionType m_expression; +}; + +/** \returns an expression of the temporary version of *this. + */ +template +inline const NestByValue +MatrixBase::nestByValue() const +{ + return NestByValue(derived()); +} + +#endif // EIGEN_NESTBYVALUE_H diff --git a/extern/Eigen2/Eigen/src/Core/NumTraits.h b/extern/Eigen2/Eigen/src/Core/NumTraits.h new file mode 100644 index 00000000000..b27284a78bc --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/NumTraits.h @@ -0,0 +1,142 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_NUMTRAITS_H +#define EIGEN_NUMTRAITS_H + +/** \class NumTraits + * + * \brief Holds some data about the various numeric (i.e. scalar) types allowed by Eigen. + * + * \param T the numeric type about which this class provides data. Recall that Eigen allows + * only the following types for \a T: \c int, \c float, \c double, + * \c std::complex, \c std::complex, and \c long \c double (especially + * useful to enforce x87 arithmetics when SSE is the default). + * + * The provided data consists of: + * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real, + * then \a Real is just a typedef to \a T. If \a T is \c std::complex then \a Real + * is a typedef to \a U. + * \li A typedef \a FloatingPoint, giving the "floating-point type" of \a T. If \a T is + * \c int, then \a FloatingPoint is a typedef to \c double. Otherwise, \a FloatingPoint + * is a typedef to \a T. + * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex + * type, and to 0 otherwise. + * \li An enum \a HasFloatingPoint. It is equal to \c 0 if \a T is \c int, + * and to \c 1 otherwise. + */ +template struct NumTraits; + +template<> struct NumTraits +{ + typedef int Real; + typedef double FloatingPoint; + enum { + IsComplex = 0, + HasFloatingPoint = 0, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template<> struct NumTraits +{ + typedef float Real; + typedef float FloatingPoint; + enum { + IsComplex = 0, + HasFloatingPoint = 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template<> struct NumTraits +{ + typedef double Real; + typedef double FloatingPoint; + enum { + IsComplex = 0, + HasFloatingPoint = 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template struct NumTraits > +{ + typedef _Real Real; + typedef std::complex<_Real> FloatingPoint; + enum { + IsComplex = 1, + HasFloatingPoint = NumTraits::HasFloatingPoint, + ReadCost = 2, + AddCost = 2 * NumTraits::AddCost, + MulCost = 4 * NumTraits::MulCost + 2 * NumTraits::AddCost + }; +}; + +template<> struct NumTraits +{ + typedef long long int Real; + typedef long double FloatingPoint; + enum { + IsComplex = 0, + HasFloatingPoint = 0, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template<> struct NumTraits +{ + typedef long double Real; + typedef long double FloatingPoint; + enum { + IsComplex = 0, + HasFloatingPoint = 1, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +template<> struct NumTraits +{ + typedef bool Real; + typedef float FloatingPoint; + enum { + IsComplex = 0, + HasFloatingPoint = 0, + ReadCost = 1, + AddCost = 1, + MulCost = 1 + }; +}; + +#endif // EIGEN_NUMTRAITS_H diff --git a/extern/Eigen2/Eigen/src/Core/Part.h b/extern/Eigen2/Eigen/src/Core/Part.h new file mode 100644 index 00000000000..9c273f249ec --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Part.h @@ -0,0 +1,375 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PART_H +#define EIGEN_PART_H + +/** \nonstableyet + * \class Part + * + * \brief Expression of a triangular matrix extracted from a given matrix + * + * \param MatrixType the type of the object in which we are taking the triangular part + * \param Mode the kind of triangular matrix expression to construct. Can be UpperTriangular, StrictlyUpperTriangular, + * UnitUpperTriangular, LowerTriangular, StrictlyLowerTriangular, UnitLowerTriangular. This is in fact a bit field; it must have either + * UpperTriangularBit or LowerTriangularBit, and additionnaly it may have either ZeroDiagBit or + * UnitDiagBit. + * + * This class represents an expression of the upper or lower triangular part of + * a square matrix, possibly with a further assumption on the diagonal. It is the return type + * of MatrixBase::part() and most of the time this is the only way it is used. + * + * \sa MatrixBase::part() + */ +template +struct ei_traits > : ei_traits +{ + typedef typename ei_nested::type MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum { + Flags = (_MatrixTypeNested::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode, + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + }; +}; + +template class Part + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Part) + + inline Part(const MatrixType& matrix) : m_matrix(matrix) + { ei_assert(ei_are_flags_consistent::ret); } + + /** \sa MatrixBase::operator+=() */ + template Part& operator+=(const Other& other); + /** \sa MatrixBase::operator-=() */ + template Part& operator-=(const Other& other); + /** \sa MatrixBase::operator*=() */ + Part& operator*=(const typename ei_traits::Scalar& other); + /** \sa MatrixBase::operator/=() */ + Part& operator/=(const typename ei_traits::Scalar& other); + + /** \sa operator=(), MatrixBase::lazyAssign() */ + template void lazyAssign(const Other& other); + /** \sa MatrixBase::operator=() */ + template Part& operator=(const Other& other); + + inline int rows() const { return m_matrix.rows(); } + inline int cols() const { return m_matrix.cols(); } + inline int stride() const { return m_matrix.stride(); } + + inline Scalar coeff(int row, int col) const + { + // SelfAdjointBit doesn't play any role here: just because a matrix is selfadjoint doesn't say anything about + // each individual coefficient, except for the not-very-useful-here fact that diagonal coefficients are real. + if( ((Flags & LowerTriangularBit) && (col>row)) || ((Flags & UpperTriangularBit) && (row>col)) ) + return (Scalar)0; + if(Flags & UnitDiagBit) + return col==row ? (Scalar)1 : m_matrix.coeff(row, col); + else if(Flags & ZeroDiagBit) + return col==row ? (Scalar)0 : m_matrix.coeff(row, col); + else + return m_matrix.coeff(row, col); + } + + inline Scalar& coeffRef(int row, int col) + { + EIGEN_STATIC_ASSERT(!(Flags & UnitDiagBit), WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED) + EIGEN_STATIC_ASSERT(!(Flags & SelfAdjointBit), COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED) + ei_assert( (Mode==UpperTriangular && col>=row) + || (Mode==LowerTriangular && col<=row) + || (Mode==StrictlyUpperTriangular && col>row) + || (Mode==StrictlyLowerTriangular && col row(int i) { return Base::row(i); } + const Block row(int i) const { return Base::row(i); } + /** discard any writes to a column */ + const Block col(int i) { return Base::col(i); } + const Block col(int i) const { return Base::col(i); } + + template + void swap(const MatrixBase& other) + { + Part,Mode>(const_cast(m_matrix)).lazyAssign(other.derived()); + } + + protected: + + const typename MatrixType::Nested m_matrix; +}; + +/** \nonstableyet + * \returns an expression of a triangular matrix extracted from the current matrix + * + * The parameter \a Mode can have the following values: \c UpperTriangular, \c StrictlyUpperTriangular, \c UnitUpperTriangular, + * \c LowerTriangular, \c StrictlyLowerTriangular, \c UnitLowerTriangular. + * + * \addexample PartExample \label How to extract a triangular part of an arbitrary matrix + * + * Example: \include MatrixBase_extract.cpp + * Output: \verbinclude MatrixBase_extract.out + * + * \sa class Part, part(), marked() + */ +template +template +const Part MatrixBase::part() const +{ + return derived(); +} + +template +template +inline Part& Part::operator=(const Other& other) +{ + if(Other::Flags & EvalBeforeAssigningBit) + { + typename MatrixBase::PlainMatrixType other_evaluated(other.rows(), other.cols()); + other_evaluated.template part().lazyAssign(other); + lazyAssign(other_evaluated); + } + else + lazyAssign(other.derived()); + return *this; +} + +template +struct ei_part_assignment_impl +{ + enum { + col = (UnrollCount-1) / Derived1::RowsAtCompileTime, + row = (UnrollCount-1) % Derived1::RowsAtCompileTime + }; + + inline static void run(Derived1 &dst, const Derived2 &src) + { + ei_part_assignment_impl::run(dst, src); + + if(Mode == SelfAdjoint) + { + if(row == col) + dst.coeffRef(row, col) = ei_real(src.coeff(row, col)); + else if(row < col) + dst.coeffRef(col, row) = ei_conj(dst.coeffRef(row, col) = src.coeff(row, col)); + } + else + { + ei_assert(Mode == UpperTriangular || Mode == LowerTriangular || Mode == StrictlyUpperTriangular || Mode == StrictlyLowerTriangular); + if((Mode == UpperTriangular && row <= col) + || (Mode == LowerTriangular && row >= col) + || (Mode == StrictlyUpperTriangular && row < col) + || (Mode == StrictlyLowerTriangular && row > col)) + dst.copyCoeff(row, col, src); + } + } +}; + +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + if(!(Mode & ZeroDiagBit)) + dst.copyCoeff(0, 0, src); + } +}; + +// prevent buggy user code from causing an infinite recursion +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &, const Derived2 &) {} +}; + +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + for(int j = 0; j < dst.cols(); ++j) + for(int i = 0; i <= j; ++i) + dst.copyCoeff(i, j, src); + } +}; + +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + for(int j = 0; j < dst.cols(); ++j) + for(int i = j; i < dst.rows(); ++i) + dst.copyCoeff(i, j, src); + } +}; + +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + for(int j = 0; j < dst.cols(); ++j) + for(int i = 0; i < j; ++i) + dst.copyCoeff(i, j, src); + } +}; +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + for(int j = 0; j < dst.cols(); ++j) + for(int i = j+1; i < dst.rows(); ++i) + dst.copyCoeff(i, j, src); + } +}; +template +struct ei_part_assignment_impl +{ + inline static void run(Derived1 &dst, const Derived2 &src) + { + for(int j = 0; j < dst.cols(); ++j) + { + for(int i = 0; i < j; ++i) + dst.coeffRef(j, i) = ei_conj(dst.coeffRef(i, j) = src.coeff(i, j)); + dst.coeffRef(j, j) = ei_real(src.coeff(j, j)); + } + } +}; + +template +template +void Part::lazyAssign(const Other& other) +{ + const bool unroll = MatrixType::SizeAtCompileTime * Other::CoeffReadCost / 2 <= EIGEN_UNROLLING_LIMIT; + ei_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols()); + + ei_part_assignment_impl + ::run(m_matrix.const_cast_derived(), other.derived()); +} + +/** \nonstableyet + * \returns a lvalue pseudo-expression allowing to perform special operations on \c *this. + * + * The \a Mode parameter can have the following values: \c UpperTriangular, \c StrictlyUpperTriangular, \c LowerTriangular, + * \c StrictlyLowerTriangular, \c SelfAdjoint. + * + * \addexample PartExample \label How to write to a triangular part of a matrix + * + * Example: \include MatrixBase_part.cpp + * Output: \verbinclude MatrixBase_part.out + * + * \sa class Part, MatrixBase::extract(), MatrixBase::marked() + */ +template +template +inline Part MatrixBase::part() +{ + return Part(derived()); +} + +/** \returns true if *this is approximately equal to an upper triangular matrix, + * within the precision given by \a prec. + * + * \sa isLowerTriangular(), extract(), part(), marked() + */ +template +bool MatrixBase::isUpperTriangular(RealScalar prec) const +{ + if(cols() != rows()) return false; + RealScalar maxAbsOnUpperTriangularPart = static_cast(-1); + for(int j = 0; j < cols(); ++j) + for(int i = 0; i <= j; ++i) + { + RealScalar absValue = ei_abs(coeff(i,j)); + if(absValue > maxAbsOnUpperTriangularPart) maxAbsOnUpperTriangularPart = absValue; + } + for(int j = 0; j < cols()-1; ++j) + for(int i = j+1; i < rows(); ++i) + if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnUpperTriangularPart, prec)) return false; + return true; +} + +/** \returns true if *this is approximately equal to a lower triangular matrix, + * within the precision given by \a prec. + * + * \sa isUpperTriangular(), extract(), part(), marked() + */ +template +bool MatrixBase::isLowerTriangular(RealScalar prec) const +{ + if(cols() != rows()) return false; + RealScalar maxAbsOnLowerTriangularPart = static_cast(-1); + for(int j = 0; j < cols(); ++j) + for(int i = j; i < rows(); ++i) + { + RealScalar absValue = ei_abs(coeff(i,j)); + if(absValue > maxAbsOnLowerTriangularPart) maxAbsOnLowerTriangularPart = absValue; + } + for(int j = 1; j < cols(); ++j) + for(int i = 0; i < j; ++i) + if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnLowerTriangularPart, prec)) return false; + return true; +} + +template +template +inline Part& Part::operator+=(const Other& other) +{ + return *this = m_matrix + other; +} + +template +template +inline Part& Part::operator-=(const Other& other) +{ + return *this = m_matrix - other; +} + +template +inline Part& Part::operator*= +(const typename ei_traits::Scalar& other) +{ + return *this = m_matrix * other; +} + +template +inline Part& Part::operator/= +(const typename ei_traits::Scalar& other) +{ + return *this = m_matrix / other; +} + +#endif // EIGEN_PART_H diff --git a/extern/Eigen2/Eigen/src/Core/Product.h b/extern/Eigen2/Eigen/src/Core/Product.h new file mode 100644 index 00000000000..1151b21641c --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Product.h @@ -0,0 +1,769 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PRODUCT_H +#define EIGEN_PRODUCT_H + +/*************************** +*** Forward declarations *** +***************************/ + +template +struct ei_product_coeff_impl; + +template +struct ei_product_packet_impl; + +/** \class ProductReturnType + * + * \brief Helper class to get the correct and optimized returned type of operator* + * + * \param Lhs the type of the left-hand side + * \param Rhs the type of the right-hand side + * \param ProductMode the type of the product (determined automatically by ei_product_mode) + * + * This class defines the typename Type representing the optimized product expression + * between two matrix expressions. In practice, using ProductReturnType::Type + * is the recommended way to define the result type of a function returning an expression + * which involve a matrix product. The class Product or DiagonalProduct should never be + * used directly. + * + * \sa class Product, class DiagonalProduct, MatrixBase::operator*(const MatrixBase&) + */ +template +struct ProductReturnType +{ + typedef typename ei_nested::type LhsNested; + typedef typename ei_nested::type RhsNested; + + typedef Product Type; +}; + +// cache friendly specialization +// note that there is a DiagonalProduct specialization in DiagonalProduct.h +template +struct ProductReturnType +{ + typedef typename ei_nested::type LhsNested; + + typedef typename ei_nested::type + >::type RhsNested; + + typedef Product Type; +}; + +/* Helper class to determine the type of the product, can be either: + * - NormalProduct + * - CacheFriendlyProduct + * - DiagonalProduct + */ +template struct ei_product_mode +{ + enum{ + + value = ((Rhs::Flags&Diagonal)==Diagonal) || ((Lhs::Flags&Diagonal)==Diagonal) + ? DiagonalProduct + : Lhs::MaxColsAtCompileTime == Dynamic + && ( Lhs::MaxRowsAtCompileTime == Dynamic + || Rhs::MaxColsAtCompileTime == Dynamic ) + && (!(Rhs::IsVectorAtCompileTime && (Lhs::Flags&RowMajorBit) && (!(Lhs::Flags&DirectAccessBit)))) + && (!(Lhs::IsVectorAtCompileTime && (!(Rhs::Flags&RowMajorBit)) && (!(Rhs::Flags&DirectAccessBit)))) + && (ei_is_same_type::ret) + ? CacheFriendlyProduct + : NormalProduct }; +}; + +/** \class Product + * + * \brief Expression of the product of two matrices + * + * \param LhsNested the type used to store the left-hand side + * \param RhsNested the type used to store the right-hand side + * \param ProductMode the type of the product + * + * This class represents an expression of the product of two matrices. + * It is the return type of the operator* between matrices. Its template + * arguments are determined automatically by ProductReturnType. Therefore, + * Product should never be used direclty. To determine the result type of a + * function which involves a matrix product, use ProductReturnType::Type. + * + * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase&) + */ +template +struct ei_traits > +{ + // clean the nested types: + typedef typename ei_cleantype::type _LhsNested; + typedef typename ei_cleantype::type _RhsNested; + typedef typename ei_scalar_product_traits::ReturnType Scalar; + + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + + RowsAtCompileTime = _LhsNested::RowsAtCompileTime, + ColsAtCompileTime = _RhsNested::ColsAtCompileTime, + InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), + + MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, + MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, + + LhsRowMajor = LhsFlags & RowMajorBit, + RhsRowMajor = RhsFlags & RowMajorBit, + + CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit) + && (ColsAtCompileTime % ei_packet_traits::size == 0), + + CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit) + && (RowsAtCompileTime % ei_packet_traits::size == 0), + + EvalToRowMajor = RhsRowMajor && (ProductMode==(int)CacheFriendlyProduct ? LhsRowMajor : (!CanVectorizeLhs)), + + RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit), + + Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) + | EvalBeforeAssigningBit + | EvalBeforeNestingBit + | (CanVectorizeLhs || CanVectorizeRhs ? PacketAccessBit : 0) + | (LhsFlags & RhsFlags & AlignedBit), + + CoeffReadCost = InnerSize == Dynamic ? Dynamic + : InnerSize * (NumTraits::MulCost + LhsCoeffReadCost + RhsCoeffReadCost) + + (InnerSize - 1) * NumTraits::AddCost, + + /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside + * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner + * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect + * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI. + */ + CanVectorizeInner = LhsRowMajor && (!RhsRowMajor) && (LhsFlags & RhsFlags & ActualPacketAccessBit) + && (InnerSize % ei_packet_traits::size == 0) + }; +}; + +template class Product : ei_no_assignment_operator, + public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Product) + + private: + + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; + + enum { + PacketSize = ei_packet_traits::size, + InnerSize = ei_traits::InnerSize, + Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT, + CanVectorizeInner = ei_traits::CanVectorizeInner + }; + + typedef ei_product_coeff_impl ScalarCoeffImpl; + + public: + + template + inline Product(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + { + // we don't allow taking products of matrices of different real types, as that wouldn't be vectorizable. + // We still allow to mix T and complex. + EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + ei_assert(lhs.cols() == rhs.rows() + && "invalid matrix product" + && "if you wanted a coeff-wise or a dot product use the respective explicit functions"); + } + + /** \internal + * compute \a res += \c *this using the cache friendly product. + */ + template + void _cacheFriendlyEvalAndAdd(DestDerived& res) const; + + /** \internal + * \returns whether it is worth it to use the cache friendly product. + */ + EIGEN_STRONG_INLINE bool _useCacheFriendlyProduct() const + { + return m_lhs.cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD + && ( rows()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD + || cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD); + } + + EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); } + + EIGEN_STRONG_INLINE const Scalar coeff(int row, int col) const + { + Scalar res; + ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); + return res; + } + + /* Allow index-based non-packet access. It is impossible though to allow index-based packed access, + * which is why we don't set the LinearAccessBit. + */ + EIGEN_STRONG_INLINE const Scalar coeff(int index) const + { + Scalar res; + const int row = RowsAtCompileTime == 1 ? 0 : index; + const int col = RowsAtCompileTime == 1 ? index : 0; + ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res); + return res; + } + + template + EIGEN_STRONG_INLINE const PacketScalar packet(int row, int col) const + { + PacketScalar res; + ei_product_packet_impl + ::run(row, col, m_lhs, m_rhs, res); + return res; + } + + EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } + EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } + + protected: + const LhsNested m_lhs; + const RhsNested m_rhs; +}; + +/** \returns the matrix product of \c *this and \a other. + * + * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*(). + * + * \sa lazy(), operator*=(const MatrixBase&), Cwise::operator*() + */ +template +template +inline const typename ProductReturnType::Type +MatrixBase::operator*(const MatrixBase &other) const +{ + enum { + ProductIsValid = Derived::ColsAtCompileTime==Dynamic + || OtherDerived::RowsAtCompileTime==Dynamic + || int(Derived::ColsAtCompileTime)==int(OtherDerived::RowsAtCompileTime), + AreVectors = Derived::IsVectorAtCompileTime && OtherDerived::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(Derived,OtherDerived) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwise()*v2 + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + return typename ProductReturnType::Type(derived(), other.derived()); +} + +/** replaces \c *this by \c *this * \a other. + * + * \returns a reference to \c *this + */ +template +template +inline Derived & +MatrixBase::operator*=(const MatrixBase &other) +{ + return derived() = derived() * other.derived(); +} + +/*************************************************************************** +* Normal product .coeff() implementation (with meta-unrolling) +***************************************************************************/ + +/************************************** +*** Scalar path - no vectorization *** +**************************************/ + +template +struct ei_product_coeff_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + { + ei_product_coeff_impl::run(row, col, lhs, rhs, res); + res += lhs.coeff(row, Index) * rhs.coeff(Index, col); + } +}; + +template +struct ei_product_coeff_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + { + res = lhs.coeff(row, 0) * rhs.coeff(0, col); + } +}; + +template +struct ei_product_coeff_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar& res) + { + ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); + res = lhs.coeff(row, 0) * rhs.coeff(0, col); + for(int i = 1; i < lhs.cols(); ++i) + res += lhs.coeff(row, i) * rhs.coeff(i, col); + } +}; + +// prevent buggy user code from causing an infinite recursion +template +struct ei_product_coeff_impl +{ + EIGEN_STRONG_INLINE static void run(int, int, const Lhs&, const Rhs&, RetScalar&) {} +}; + +/******************************************* +*** Scalar path with inner vectorization *** +*******************************************/ + +template +struct ei_product_coeff_vectorized_unroller +{ + enum { PacketSize = ei_packet_traits::size }; + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) + { + ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); + pres = ei_padd(pres, ei_pmul( lhs.template packet(row, Index) , rhs.template packet(Index, col) )); + } +}; + +template +struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar> +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres) + { + pres = ei_pmul(lhs.template packet(row, 0) , rhs.template packet(0, col)); + } +}; + +template +struct ei_product_coeff_impl +{ + typedef typename Lhs::PacketScalar PacketScalar; + enum { PacketSize = ei_packet_traits::size }; + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, RetScalar &res) + { + PacketScalar pres; + ei_product_coeff_vectorized_unroller::run(row, col, lhs, rhs, pres); + ei_product_coeff_impl::run(row, col, lhs, rhs, res); + res = ei_predux(pres); + } +}; + +template +struct ei_product_coeff_vectorized_dyn_selector +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + { + res = ei_dot_impl< + Block::ColsAtCompileTime>, + Block::RowsAtCompileTime, 1>, + LinearVectorization, NoUnrolling>::run(lhs.row(row), rhs.col(col)); + } +}; + +// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower +// NOTE maybe they are now useless since we have a specialization for Block +template +struct ei_product_coeff_vectorized_dyn_selector +{ + EIGEN_STRONG_INLINE static void run(int /*row*/, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + { + res = ei_dot_impl< + Lhs, + Block::RowsAtCompileTime, 1>, + LinearVectorization, NoUnrolling>::run(lhs, rhs.col(col)); + } +}; + +template +struct ei_product_coeff_vectorized_dyn_selector +{ + EIGEN_STRONG_INLINE static void run(int row, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + { + res = ei_dot_impl< + Block::ColsAtCompileTime>, + Rhs, + LinearVectorization, NoUnrolling>::run(lhs.row(row), rhs); + } +}; + +template +struct ei_product_coeff_vectorized_dyn_selector +{ + EIGEN_STRONG_INLINE static void run(int /*row*/, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + { + res = ei_dot_impl< + Lhs, + Rhs, + LinearVectorization, NoUnrolling>::run(lhs, rhs); + } +}; + +template +struct ei_product_coeff_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res) + { + ei_product_coeff_vectorized_dyn_selector::run(row, col, lhs, rhs, res); + } +}; + +/******************* +*** Packet path *** +*******************/ + +template +struct ei_product_packet_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + ei_product_packet_impl::run(row, col, lhs, rhs, res); + res = ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packet(Index, col), res); + } +}; + +template +struct ei_product_packet_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + ei_product_packet_impl::run(row, col, lhs, rhs, res); + res = ei_pmadd(lhs.template packet(row, Index), ei_pset1(rhs.coeff(Index, col)), res); + } +}; + +template +struct ei_product_packet_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); + } +}; + +template +struct ei_product_packet_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res) + { + res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); + } +}; + +template +struct ei_product_packet_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) + { + ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); + res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet(0, col)); + for(int i = 1; i < lhs.cols(); ++i) + res = ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet(i, col), res); + } +}; + +template +struct ei_product_packet_impl +{ + EIGEN_STRONG_INLINE static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res) + { + ei_assert(lhs.cols()>0 && "you are using a non initialized matrix"); + res = ei_pmul(lhs.template packet(row, 0), ei_pset1(rhs.coeff(0, col))); + for(int i = 1; i < lhs.cols(); ++i) + res = ei_pmadd(lhs.template packet(row, i), ei_pset1(rhs.coeff(i, col)), res); + } +}; + +/*************************************************************************** +* Cache friendly product callers and specific nested evaluation strategies +***************************************************************************/ + +template +static void ei_cache_friendly_product_colmajor_times_vector( + int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res); + +template +static void ei_cache_friendly_product_rowmajor_times_vector( + const Scalar* lhs, int lhsStride, const Scalar* rhs, int rhsSize, ResType& res); + +template::RowsAtCompileTime, + int LhsOrder = int(ei_traits::LhsFlags)&RowMajorBit ? RowMajor : ColMajor, + int LhsHasDirectAccess = int(ei_traits::LhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess, + int RhsCols = ei_traits::ColsAtCompileTime, + int RhsOrder = int(ei_traits::RhsFlags)&RowMajorBit ? RowMajor : ColMajor, + int RhsHasDirectAccess = int(ei_traits::RhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess> +struct ei_cache_friendly_product_selector +{ + template + inline static void run(DestDerived& res, const ProductType& product) + { + product._cacheFriendlyEvalAndAdd(res); + } +}; + +// optimized colmajor * vector path +template +struct ei_cache_friendly_product_selector +{ + template + inline static void run(DestDerived& res, const ProductType& product) + { + const int size = product.rhs().rows(); + for (int k=0; k +struct ei_cache_friendly_product_selector +{ + typedef typename ProductType::Scalar Scalar; + + template + inline static void run(DestDerived& res, const ProductType& product) + { + enum { + EvalToRes = (ei_packet_traits::size==1) + ||((DestDerived::Flags&ActualPacketAccessBit) && (!(DestDerived::Flags & RowMajorBit))) }; + Scalar* EIGEN_RESTRICT _res; + if (EvalToRes) + _res = &res.coeffRef(0); + else + { + _res = ei_aligned_stack_new(Scalar,res.size()); + Map >(_res, res.size()) = res; + } + ei_cache_friendly_product_colmajor_times_vector(res.size(), + &product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(), + product.rhs(), _res); + + if (!EvalToRes) + { + res = Map >(_res, res.size()); + ei_aligned_stack_delete(Scalar, _res, res.size()); + } + } +}; + +// optimized vector * rowmajor path +template +struct ei_cache_friendly_product_selector +{ + template + inline static void run(DestDerived& res, const ProductType& product) + { + const int cols = product.lhs().cols(); + for (int j=0; j +struct ei_cache_friendly_product_selector +{ + typedef typename ProductType::Scalar Scalar; + + template + inline static void run(DestDerived& res, const ProductType& product) + { + enum { + EvalToRes = (ei_packet_traits::size==1) + ||((DestDerived::Flags & ActualPacketAccessBit) && (DestDerived::Flags & RowMajorBit)) }; + Scalar* EIGEN_RESTRICT _res; + if (EvalToRes) + _res = &res.coeffRef(0); + else + { + _res = ei_aligned_stack_new(Scalar, res.size()); + Map >(_res, res.size()) = res; + } + ei_cache_friendly_product_colmajor_times_vector(res.size(), + &product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(), + product.lhs().transpose(), _res); + + if (!EvalToRes) + { + res = Map >(_res, res.size()); + ei_aligned_stack_delete(Scalar, _res, res.size()); + } + } +}; + +// optimized rowmajor - vector product +template +struct ei_cache_friendly_product_selector +{ + typedef typename ProductType::Scalar Scalar; + typedef typename ei_traits::_RhsNested Rhs; + enum { + UseRhsDirectly = ((ei_packet_traits::size==1) || (Rhs::Flags&ActualPacketAccessBit)) + && (!(Rhs::Flags & RowMajorBit)) }; + + template + inline static void run(DestDerived& res, const ProductType& product) + { + Scalar* EIGEN_RESTRICT _rhs; + if (UseRhsDirectly) + _rhs = &product.rhs().const_cast_derived().coeffRef(0); + else + { + _rhs = ei_aligned_stack_new(Scalar, product.rhs().size()); + Map >(_rhs, product.rhs().size()) = product.rhs(); + } + ei_cache_friendly_product_rowmajor_times_vector(&product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(), + _rhs, product.rhs().size(), res); + + if (!UseRhsDirectly) ei_aligned_stack_delete(Scalar, _rhs, product.rhs().size()); + } +}; + +// optimized vector - colmajor product +template +struct ei_cache_friendly_product_selector +{ + typedef typename ProductType::Scalar Scalar; + typedef typename ei_traits::_LhsNested Lhs; + enum { + UseLhsDirectly = ((ei_packet_traits::size==1) || (Lhs::Flags&ActualPacketAccessBit)) + && (Lhs::Flags & RowMajorBit) }; + + template + inline static void run(DestDerived& res, const ProductType& product) + { + Scalar* EIGEN_RESTRICT _lhs; + if (UseLhsDirectly) + _lhs = &product.lhs().const_cast_derived().coeffRef(0); + else + { + _lhs = ei_aligned_stack_new(Scalar, product.lhs().size()); + Map >(_lhs, product.lhs().size()) = product.lhs(); + } + ei_cache_friendly_product_rowmajor_times_vector(&product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(), + _lhs, product.lhs().size(), res); + + if(!UseLhsDirectly) ei_aligned_stack_delete(Scalar, _lhs, product.lhs().size()); + } +}; + +// discard this case which has to be handled by the default path +// (we keep it to be sure to hit a compilation error if this is not the case) +template +struct ei_cache_friendly_product_selector +{}; + +// discard this case which has to be handled by the default path +// (we keep it to be sure to hit a compilation error if this is not the case) +template +struct ei_cache_friendly_product_selector +{}; + + +/** \internal */ +template +template +inline Derived& +MatrixBase::operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other) +{ + if (other._expression()._useCacheFriendlyProduct()) + ei_cache_friendly_product_selector >::run(const_cast_derived(), other._expression()); + else + lazyAssign(derived() + other._expression()); + return derived(); +} + +template +template +inline Derived& MatrixBase::lazyAssign(const Product& product) +{ + if (product._useCacheFriendlyProduct()) + { + setZero(); + ei_cache_friendly_product_selector >::run(const_cast_derived(), product); + } + else + { + lazyAssign >(product); + } + return derived(); +} + +template struct ei_product_copy_rhs +{ + typedef typename ei_meta_if< + (ei_traits::Flags & RowMajorBit) + || (!(ei_traits::Flags & DirectAccessBit)), + typename ei_plain_matrix_type_column_major::type, + const T& + >::ret type; +}; + +template struct ei_product_copy_lhs +{ + typedef typename ei_meta_if< + (!(int(ei_traits::Flags) & DirectAccessBit)), + typename ei_plain_matrix_type::type, + const T& + >::ret type; +}; + +template +template +inline void Product::_cacheFriendlyEvalAndAdd(DestDerived& res) const +{ + typedef typename ei_product_copy_lhs<_LhsNested>::type LhsCopy; + typedef typename ei_unref::type _LhsCopy; + typedef typename ei_product_copy_rhs<_RhsNested>::type RhsCopy; + typedef typename ei_unref::type _RhsCopy; + LhsCopy lhs(m_lhs); + RhsCopy rhs(m_rhs); + ei_cache_friendly_product( + rows(), cols(), lhs.cols(), + _LhsCopy::Flags&RowMajorBit, (const Scalar*)&(lhs.const_cast_derived().coeffRef(0,0)), lhs.stride(), + _RhsCopy::Flags&RowMajorBit, (const Scalar*)&(rhs.const_cast_derived().coeffRef(0,0)), rhs.stride(), + DestDerived::Flags&RowMajorBit, (Scalar*)&(res.coeffRef(0,0)), res.stride() + ); +} + +#endif // EIGEN_PRODUCT_H diff --git a/extern/Eigen2/Eigen/src/Core/Redux.h b/extern/Eigen2/Eigen/src/Core/Redux.h new file mode 100644 index 00000000000..734ef1929a4 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Redux.h @@ -0,0 +1,117 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_REDUX_H +#define EIGEN_REDUX_H + +template +struct ei_redux_impl +{ + enum { + HalfLength = Length/2 + }; + + typedef typename ei_result_of::type Scalar; + + static Scalar run(const Derived &mat, const BinaryOp& func) + { + return func( + ei_redux_impl::run(mat, func), + ei_redux_impl::run(mat, func)); + } +}; + +template +struct ei_redux_impl +{ + enum { + col = Start / Derived::RowsAtCompileTime, + row = Start % Derived::RowsAtCompileTime + }; + + typedef typename ei_result_of::type Scalar; + + static Scalar run(const Derived &mat, const BinaryOp &) + { + return mat.coeff(row, col); + } +}; + +template +struct ei_redux_impl +{ + typedef typename ei_result_of::type Scalar; + static Scalar run(const Derived& mat, const BinaryOp& func) + { + ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix"); + Scalar res; + res = mat.coeff(0,0); + for(int i = 1; i < mat.rows(); ++i) + res = func(res, mat.coeff(i, 0)); + for(int j = 1; j < mat.cols(); ++j) + for(int i = 0; i < mat.rows(); ++i) + res = func(res, mat.coeff(i, j)); + return res; + } +}; + +/** \returns the result of a full redux operation on the whole matrix or vector using \a func + * + * The template parameter \a BinaryOp is the type of the functor \a func which must be + * an assiociative operator. Both current STL and TR1 functor styles are handled. + * + * \sa MatrixBase::sum(), MatrixBase::minCoeff(), MatrixBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise() + */ +template +template +typename ei_result_of::Scalar)>::type +MatrixBase::redux(const BinaryOp& func) const +{ + const bool unroll = SizeAtCompileTime * CoeffReadCost + + (SizeAtCompileTime-1) * ei_functor_traits::Cost + <= EIGEN_UNROLLING_LIMIT; + return ei_redux_impl + ::run(derived(), func); +} + +/** \returns the minimum of all coefficients of *this + */ +template +inline typename ei_traits::Scalar +MatrixBase::minCoeff() const +{ + return this->redux(Eigen::ei_scalar_min_op()); +} + +/** \returns the maximum of all coefficients of *this + */ +template +inline typename ei_traits::Scalar +MatrixBase::maxCoeff() const +{ + return this->redux(Eigen::ei_scalar_max_op()); +} + +#endif // EIGEN_REDUX_H diff --git a/extern/Eigen2/Eigen/src/Core/SolveTriangular.h b/extern/Eigen2/Eigen/src/Core/SolveTriangular.h new file mode 100644 index 00000000000..12fb0e1d159 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/SolveTriangular.h @@ -0,0 +1,297 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SOLVETRIANGULAR_H +#define EIGEN_SOLVETRIANGULAR_H + +template struct ei_is_part { enum {value=false}; }; +template struct ei_is_part > { enum {value=true}; }; + +template::value ? -1 // this is to solve ambiguous specializations + : int(Lhs::Flags) & (RowMajorBit|SparseBit) + > +struct ei_solve_triangular_selector; + +// transform a Part xpr to a Flagged xpr +template +struct ei_solve_triangular_selector,Rhs,UpLo,StorageOrder> +{ + static void run(const Part& lhs, Rhs& other) + { + ei_solve_triangular_selector,Rhs>::run(lhs._expression(), other); + } +}; + +// forward substitution, row-major +template +struct ei_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + static void run(const Lhs& lhs, Rhs& other) + { + const bool IsLowerTriangular = (UpLo==LowerTriangular); + const int size = lhs.cols(); + /* We perform the inverse product per block of 4 rows such that we perfectly match + * our optimized matrix * vector product. blockyStart represents the number of rows + * we have process first using the non-block version. + */ + int blockyStart = (std::max(size-5,0)/4)*4; + if (IsLowerTriangular) + blockyStart = size - blockyStart; + else + blockyStart -= 1; + for(int c=0 ; cblockyStart; i += (IsLowerTriangular ? 1 : -1) ) + { + Scalar tmp = other.coeff(i,c) + - (IsLowerTriangular ? ((lhs.row(i).start(i)) * other.col(c).start(i)).coeff(0,0) + : ((lhs.row(i).end(size-i-1)) * other.col(c).end(size-i-1)).coeff(0,0)); + if (Lhs::Flags & UnitDiagBit) + other.coeffRef(i,c) = tmp; + else + other.coeffRef(i,c) = tmp/lhs.coeff(i,i); + } + + // now let's process the remaining rows 4 at once + for(int i=blockyStart; IsLowerTriangular ? i0; ) + { + int startBlock = i; + int endBlock = startBlock + (IsLowerTriangular ? 4 : -4); + + /* Process the i cols times 4 rows block, and keep the result in a temporary vector */ + // FIXME use fixed size block but take care to small fixed size matrices... + Matrix btmp(4); + if (IsLowerTriangular) + btmp = lhs.block(startBlock,0,4,i) * other.col(c).start(i); + else + btmp = lhs.block(i-3,i+1,4,size-1-i) * other.col(c).end(size-1-i); + + /* Let's process the 4x4 sub-matrix as usual. + * btmp stores the diagonal coefficients used to update the remaining part of the result. + */ + { + Scalar tmp = other.coeff(startBlock,c)-btmp.coeff(IsLowerTriangular?0:3); + if (Lhs::Flags & UnitDiagBit) + other.coeffRef(i,c) = tmp; + else + other.coeffRef(i,c) = tmp/lhs.coeff(i,i); + } + + i += IsLowerTriangular ? 1 : -1; + for (;IsLowerTriangular ? iendBlock; i += IsLowerTriangular ? 1 : -1) + { + int remainingSize = IsLowerTriangular ? i-startBlock : startBlock-i; + Scalar tmp = other.coeff(i,c) + - btmp.coeff(IsLowerTriangular ? remainingSize : 3-remainingSize) + - ( lhs.row(i).segment(IsLowerTriangular ? startBlock : i+1, remainingSize) + * other.col(c).segment(IsLowerTriangular ? startBlock : i+1, remainingSize)).coeff(0,0); + + if (Lhs::Flags & UnitDiagBit) + other.coeffRef(i,c) = tmp; + else + other.coeffRef(i,c) = tmp/lhs.coeff(i,i); + } + } + } + } +}; + +// Implements the following configurations: +// - inv(LowerTriangular, ColMajor) * Column vector +// - inv(LowerTriangular,UnitDiag,ColMajor) * Column vector +// - inv(UpperTriangular, ColMajor) * Column vector +// - inv(UpperTriangular,UnitDiag,ColMajor) * Column vector +template +struct ei_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + typedef typename ei_packet_traits::type Packet; + enum { PacketSize = ei_packet_traits::size }; + + static void run(const Lhs& lhs, Rhs& other) + { + static const bool IsLowerTriangular = (UpLo==LowerTriangular); + const int size = lhs.cols(); + for(int c=0 ; cblockyEnd;) + { + /* Let's process the 4x4 sub-matrix as usual. + * btmp stores the diagonal coefficients used to update the remaining part of the result. + */ + int startBlock = i; + int endBlock = startBlock + (IsLowerTriangular ? 4 : -4); + Matrix btmp; + for (;IsLowerTriangular ? iendBlock; + i += IsLowerTriangular ? 1 : -1) + { + if(!(Lhs::Flags & UnitDiagBit)) + other.coeffRef(i,c) /= lhs.coeff(i,i); + int remainingSize = IsLowerTriangular ? endBlock-i-1 : i-endBlock-1; + if (remainingSize>0) + other.col(c).segment((IsLowerTriangular ? i : endBlock) + 1, remainingSize) -= + other.coeffRef(i,c) + * Block(lhs, (IsLowerTriangular ? i : endBlock) + 1, i, remainingSize, 1); + btmp.coeffRef(IsLowerTriangular ? i-startBlock : remainingSize) = -other.coeffRef(i,c); + } + + /* Now we can efficiently update the remaining part of the result as a matrix * vector product. + * NOTE in order to reduce both compilation time and binary size, let's directly call + * the fast product implementation. It is equivalent to the following code: + * other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock) + * * other.col(c).block(startBlock,endBlock-startBlock)).lazy(); + */ + // FIXME this is cool but what about conjugate/adjoint expressions ? do we want to evaluate them ? + // this is a more general problem though. + ei_cache_friendly_product_colmajor_times_vector( + IsLowerTriangular ? size-endBlock : endBlock+1, + &(lhs.const_cast_derived().coeffRef(IsLowerTriangular ? endBlock : 0, IsLowerTriangular ? startBlock : endBlock+1)), + lhs.stride(), + btmp, &(other.coeffRef(IsLowerTriangular ? endBlock : 0, c))); +// if (IsLowerTriangular) +// other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock) +// * other.col(c).block(startBlock,endBlock-startBlock)).lazy(); +// else +// other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock) +// * other.col(c).block(startBlock,endBlock-startBlock)).lazy(); + } + + /* Now we have to process the remaining part as usual */ + int i; + for(i=blockyEnd; IsLowerTriangular ? i0; i += (IsLowerTriangular ? 1 : -1) ) + { + if(!(Lhs::Flags & UnitDiagBit)) + other.coeffRef(i,c) /= lhs.coeff(i,i); + + /* NOTE we cannot use lhs.col(i).end(size-i-1) because Part::coeffRef gets called by .col() to + * get the address of the start of the row + */ + if(IsLowerTriangular) + other.col(c).end(size-i-1) -= other.coeffRef(i,c) * Block(lhs, i+1,i, size-i-1,1); + else + other.col(c).start(i) -= other.coeffRef(i,c) * Block(lhs, 0,i, i, 1); + } + if(!(Lhs::Flags & UnitDiagBit)) + other.coeffRef(i,c) /= lhs.coeff(i,i); + } + } +}; + +/** "in-place" version of MatrixBase::solveTriangular() where the result is written in \a other + * + * \nonstableyet + * + * The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. + * This function will const_cast it, so constness isn't honored here. + * + * See MatrixBase:solveTriangular() for the details. + */ +template +template +void MatrixBase::solveTriangularInPlace(const MatrixBase& _other) const +{ + MatrixBase& other = _other.const_cast_derived(); + ei_assert(derived().cols() == derived().rows()); + ei_assert(derived().cols() == other.rows()); + ei_assert(!(Flags & ZeroDiagBit)); + ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit)); + + enum { copy = ei_traits::Flags & RowMajorBit }; + + typedef typename ei_meta_if::type, OtherDerived&>::ret OtherCopy; + OtherCopy otherCopy(other.derived()); + + ei_solve_triangular_selector::type>::run(derived(), otherCopy); + + if (copy) + other = otherCopy; +} + +/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular. + * + * \nonstableyet + * + * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other. + * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the + * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this + * is an upper (resp. lower) triangular matrix. + * + * It is required that \c *this be marked as either an upper or a lower triangular matrix, which + * can be done by marked(), and that is automatically the case with expressions such as those returned + * by extract(). + * + * \addexample SolveTriangular \label How to solve a triangular system (aka. how to multiply the inverse of a triangular matrix by another one) + * + * Example: \include MatrixBase_marked.cpp + * Output: \verbinclude MatrixBase_marked.out + * + * This function is essentially a wrapper to the faster solveTriangularInPlace() function creating + * a temporary copy of \a other, calling solveTriangularInPlace() on the copy and returning it. + * Therefore, if \a other is not needed anymore, it is quite faster to call solveTriangularInPlace() + * instead of solveTriangular(). + * + * For users coming from BLAS, this function (and more specifically solveTriangularInPlace()) offer + * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines. + * + * \b Tips: to perform a \em "right-inverse-multiply" you can simply transpose the operation, e.g.: + * \code + * M * T^1 <=> T.transpose().solveTriangularInPlace(M.transpose()); + * \endcode + * + * \sa solveTriangularInPlace(), marked(), extract() + */ +template +template +typename ei_plain_matrix_type_column_major::type +MatrixBase::solveTriangular(const MatrixBase& other) const +{ + typename ei_plain_matrix_type_column_major::type res(other); + solveTriangularInPlace(res); + return res; +} + +#endif // EIGEN_SOLVETRIANGULAR_H diff --git a/extern/Eigen2/Eigen/src/Core/Sum.h b/extern/Eigen2/Eigen/src/Core/Sum.h new file mode 100644 index 00000000000..6d7e9959fa5 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Sum.h @@ -0,0 +1,271 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SUM_H +#define EIGEN_SUM_H + +/*************************************************************************** +* Part 1 : the logic deciding a strategy for vectorization and unrolling +***************************************************************************/ + +template +struct ei_sum_traits +{ +private: + enum { + PacketSize = ei_packet_traits::size + }; + +public: + enum { + Vectorization = (int(Derived::Flags)&ActualPacketAccessBit) + && (int(Derived::Flags)&LinearAccessBit) + ? LinearVectorization + : NoVectorization + }; + +private: + enum { + Cost = Derived::SizeAtCompileTime * Derived::CoeffReadCost + + (Derived::SizeAtCompileTime-1) * NumTraits::AddCost, + UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize)) + }; + +public: + enum { + Unrolling = Cost <= UnrollingLimit + ? CompleteUnrolling + : NoUnrolling + }; +}; + +/*************************************************************************** +* Part 2 : unrollers +***************************************************************************/ + +/*** no vectorization ***/ + +template +struct ei_sum_novec_unroller +{ + enum { + HalfLength = Length/2 + }; + + typedef typename Derived::Scalar Scalar; + + inline static Scalar run(const Derived &mat) + { + return ei_sum_novec_unroller::run(mat) + + ei_sum_novec_unroller::run(mat); + } +}; + +template +struct ei_sum_novec_unroller +{ + enum { + col = Start / Derived::RowsAtCompileTime, + row = Start % Derived::RowsAtCompileTime + }; + + typedef typename Derived::Scalar Scalar; + + inline static Scalar run(const Derived &mat) + { + return mat.coeff(row, col); + } +}; + +/*** vectorization ***/ + +template +struct ei_sum_vec_unroller +{ + enum { + PacketSize = ei_packet_traits::size, + HalfLength = Length/2 + }; + + typedef typename Derived::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + + inline static PacketScalar run(const Derived &mat) + { + return ei_padd( + ei_sum_vec_unroller::run(mat), + ei_sum_vec_unroller::run(mat) ); + } +}; + +template +struct ei_sum_vec_unroller +{ + enum { + index = Start * ei_packet_traits::size, + row = int(Derived::Flags)&RowMajorBit + ? index / int(Derived::ColsAtCompileTime) + : index % Derived::RowsAtCompileTime, + col = int(Derived::Flags)&RowMajorBit + ? index % int(Derived::ColsAtCompileTime) + : index / Derived::RowsAtCompileTime, + alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned + }; + + typedef typename Derived::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + + inline static PacketScalar run(const Derived &mat) + { + return mat.template packet(row, col); + } +}; + +/*************************************************************************** +* Part 3 : implementation of all cases +***************************************************************************/ + +template::Vectorization, + int Unrolling = ei_sum_traits::Unrolling +> +struct ei_sum_impl; + +template +struct ei_sum_impl +{ + typedef typename Derived::Scalar Scalar; + static Scalar run(const Derived& mat) + { + ei_assert(mat.rows()>0 && mat.cols()>0 && "you are using a non initialized matrix"); + Scalar res; + res = mat.coeff(0, 0); + for(int i = 1; i < mat.rows(); ++i) + res += mat.coeff(i, 0); + for(int j = 1; j < mat.cols(); ++j) + for(int i = 0; i < mat.rows(); ++i) + res += mat.coeff(i, j); + return res; + } +}; + +template +struct ei_sum_impl + : public ei_sum_novec_unroller +{}; + +template +struct ei_sum_impl +{ + typedef typename Derived::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + + static Scalar run(const Derived& mat) + { + const int size = mat.size(); + const int packetSize = ei_packet_traits::size; + const int alignedStart = (Derived::Flags & AlignedBit) + || !(Derived::Flags & DirectAccessBit) + ? 0 + : ei_alignmentOffset(&mat.const_cast_derived().coeffRef(0), size); + enum { + alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit) + ? Aligned : Unaligned + }; + const int alignedSize = ((size-alignedStart)/packetSize)*packetSize; + const int alignedEnd = alignedStart + alignedSize; + Scalar res; + + if(alignedSize) + { + PacketScalar packet_res = mat.template packet(alignedStart); + for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize) + packet_res = ei_padd(packet_res, mat.template packet(index)); + res = ei_predux(packet_res); + } + else // too small to vectorize anything. + // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize. + { + res = Scalar(0); + } + + for(int index = 0; index < alignedStart; ++index) + res += mat.coeff(index); + + for(int index = alignedEnd; index < size; ++index) + res += mat.coeff(index); + + return res; + } +}; + +template +struct ei_sum_impl +{ + typedef typename Derived::Scalar Scalar; + typedef typename ei_packet_traits::type PacketScalar; + enum { + PacketSize = ei_packet_traits::size, + Size = Derived::SizeAtCompileTime, + VectorizationSize = (Size / PacketSize) * PacketSize + }; + static Scalar run(const Derived& mat) + { + Scalar res = ei_predux(ei_sum_vec_unroller::run(mat)); + if (VectorizationSize != Size) + res += ei_sum_novec_unroller::run(mat); + return res; + } +}; + +/*************************************************************************** +* Part 4 : implementation of MatrixBase methods +***************************************************************************/ + +/** \returns the sum of all coefficients of *this + * + * \sa trace() + */ +template +inline typename ei_traits::Scalar +MatrixBase::sum() const +{ + return ei_sum_impl::run(derived()); +} + +/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal. + * + * \c *this can be any matrix, not necessarily square. + * + * \sa diagonal(), sum() + */ +template +inline typename ei_traits::Scalar +MatrixBase::trace() const +{ + return diagonal().sum(); +} + +#endif // EIGEN_SUM_H diff --git a/extern/Eigen2/Eigen/src/Core/Swap.h b/extern/Eigen2/Eigen/src/Core/Swap.h new file mode 100644 index 00000000000..77d562cd3ac --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Swap.h @@ -0,0 +1,142 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SWAP_H +#define EIGEN_SWAP_H + +/** \class SwapWrapper + * + * \internal + * + * \brief Internal helper class for swapping two expressions + */ +template +struct ei_traits > +{ + typedef typename ExpressionType::Scalar Scalar; + enum { + RowsAtCompileTime = ExpressionType::RowsAtCompileTime, + ColsAtCompileTime = ExpressionType::ColsAtCompileTime, + MaxRowsAtCompileTime = ExpressionType::MaxRowsAtCompileTime, + MaxColsAtCompileTime = ExpressionType::MaxColsAtCompileTime, + Flags = ExpressionType::Flags, + CoeffReadCost = ExpressionType::CoeffReadCost + }; +}; + +template class SwapWrapper + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(SwapWrapper) + typedef typename ei_packet_traits::type Packet; + + inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {} + + inline int rows() const { return m_expression.rows(); } + inline int cols() const { return m_expression.cols(); } + inline int stride() const { return m_expression.stride(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_expression.const_cast_derived().coeffRef(row, col); + } + + inline Scalar& coeffRef(int index) + { + return m_expression.const_cast_derived().coeffRef(index); + } + + template + void copyCoeff(int row, int col, const MatrixBase& other) + { + OtherDerived& _other = other.const_cast_derived(); + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + Scalar tmp = m_expression.coeff(row, col); + m_expression.coeffRef(row, col) = _other.coeff(row, col); + _other.coeffRef(row, col) = tmp; + } + + template + void copyCoeff(int index, const MatrixBase& other) + { + OtherDerived& _other = other.const_cast_derived(); + ei_internal_assert(index >= 0 && index < m_expression.size()); + Scalar tmp = m_expression.coeff(index); + m_expression.coeffRef(index) = _other.coeff(index); + _other.coeffRef(index) = tmp; + } + + template + void copyPacket(int row, int col, const MatrixBase& other) + { + OtherDerived& _other = other.const_cast_derived(); + ei_internal_assert(row >= 0 && row < rows() + && col >= 0 && col < cols()); + Packet tmp = m_expression.template packet(row, col); + m_expression.template writePacket(row, col, + _other.template packet(row, col) + ); + _other.template writePacket(row, col, tmp); + } + + template + void copyPacket(int index, const MatrixBase& other) + { + OtherDerived& _other = other.const_cast_derived(); + ei_internal_assert(index >= 0 && index < m_expression.size()); + Packet tmp = m_expression.template packet(index); + m_expression.template writePacket(index, + _other.template packet(index) + ); + _other.template writePacket(index, tmp); + } + + protected: + ExpressionType& m_expression; +}; + +/** swaps *this with the expression \a other. + * + * \note \a other is only marked for internal reasons, but of course + * it gets const-casted. One reason is that one will often call swap + * on temporary objects (hence non-const references are forbidden). + * Another reason is that lazyAssign takes a const argument anyway. + */ +template +template +void MatrixBase::swap(const MatrixBase& other) +{ + (SwapWrapper(derived())).lazyAssign(other); +} + +#endif // EIGEN_SWAP_H + + + + + + diff --git a/extern/Eigen2/Eigen/src/Core/Transpose.h b/extern/Eigen2/Eigen/src/Core/Transpose.h new file mode 100644 index 00000000000..870edfe320b --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Transpose.h @@ -0,0 +1,228 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_TRANSPOSE_H +#define EIGEN_TRANSPOSE_H + +/** \class Transpose + * + * \brief Expression of the transpose of a matrix + * + * \param MatrixType the type of the object of which we are taking the transpose + * + * This class represents an expression of the transpose of a matrix. + * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint() + * and most of the time this is the only way it is used. + * + * \sa MatrixBase::transpose(), MatrixBase::adjoint() + */ +template +struct ei_traits > +{ + typedef typename MatrixType::Scalar Scalar; + typedef typename ei_nested::type MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum { + RowsAtCompileTime = MatrixType::ColsAtCompileTime, + ColsAtCompileTime = MatrixType::RowsAtCompileTime, + MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime, + MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime, + Flags = ((int(_MatrixTypeNested::Flags) ^ RowMajorBit) + & ~(LowerTriangularBit | UpperTriangularBit)) + | (int(_MatrixTypeNested::Flags)&UpperTriangularBit ? LowerTriangularBit : 0) + | (int(_MatrixTypeNested::Flags)&LowerTriangularBit ? UpperTriangularBit : 0), + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + }; +}; + +template class Transpose + : public MatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose) + + inline Transpose(const MatrixType& matrix) : m_matrix(matrix) {} + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose) + + inline int rows() const { return m_matrix.cols(); } + inline int cols() const { return m_matrix.rows(); } + inline int nonZeros() const { return m_matrix.nonZeros(); } + inline int stride(void) const { return m_matrix.stride(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived().coeffRef(col, row); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(col, row); + } + + inline const Scalar coeff(int index) const + { + return m_matrix.coeff(index); + } + + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived().coeffRef(index); + } + + template + inline const PacketScalar packet(int row, int col) const + { + return m_matrix.template packet(col, row); + } + + template + inline void writePacket(int row, int col, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(col, row, x); + } + + template + inline const PacketScalar packet(int index) const + { + return m_matrix.template packet(index); + } + + template + inline void writePacket(int index, const PacketScalar& x) + { + m_matrix.const_cast_derived().template writePacket(index, x); + } + + protected: + const typename MatrixType::Nested m_matrix; +}; + +/** \returns an expression of the transpose of *this. + * + * Example: \include MatrixBase_transpose.cpp + * Output: \verbinclude MatrixBase_transpose.out + * + * \warning If you want to replace a matrix by its own transpose, do \b NOT do this: + * \code + * m = m.transpose(); // bug!!! caused by aliasing effect + * \endcode + * Instead, use the transposeInPlace() method: + * \code + * m.transposeInPlace(); + * \endcode + * which gives Eigen good opportunities for optimization, or alternatively you can also do: + * \code + * m = m.transpose().eval(); + * \endcode + * + * \sa transposeInPlace(), adjoint() */ +template +inline Transpose +MatrixBase::transpose() +{ + return derived(); +} + +/** This is the const version of transpose(). + * + * Make sure you read the warning for transpose() ! + * + * \sa transposeInPlace(), adjoint() */ +template +inline const Transpose +MatrixBase::transpose() const +{ + return derived(); +} + +/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this. + * + * Example: \include MatrixBase_adjoint.cpp + * Output: \verbinclude MatrixBase_adjoint.out + * + * \warning If you want to replace a matrix by its own adjoint, do \b NOT do this: + * \code + * m = m.adjoint(); // bug!!! caused by aliasing effect + * \endcode + * Instead, do: + * \code + * m = m.adjoint().eval(); + * \endcode + * + * \sa transpose(), conjugate(), class Transpose, class ei_scalar_conjugate_op */ +template +inline const typename MatrixBase::AdjointReturnType +MatrixBase::adjoint() const +{ + return conjugate().nestByValue(); +} + +/*************************************************************************** +* "in place" transpose implementation +***************************************************************************/ + +template +struct ei_inplace_transpose_selector; + +template +struct ei_inplace_transpose_selector { // square matrix + static void run(MatrixType& m) { + m.template part().swap(m.transpose()); + } +}; + +template +struct ei_inplace_transpose_selector { // non square matrix + static void run(MatrixType& m) { + if (m.rows()==m.cols()) + m.template part().swap(m.transpose()); + else + m = m.transpose().eval(); + } +}; + +/** This is the "in place" version of transpose: it transposes \c *this. + * + * In most cases it is probably better to simply use the transposed expression + * of a matrix. However, when transposing the matrix data itself is really needed, + * then this "in-place" version is probably the right choice because it provides + * the following additional features: + * - less error prone: doing the same operation with .transpose() requires special care: + * \code m = m.transpose().eval(); \endcode + * - no temporary object is created (currently only for squared matrices) + * - it allows future optimizations (cache friendliness, etc.) + * + * \note if the matrix is not square, then \c *this must be a resizable matrix. + * + * \sa transpose(), adjoint() */ +template +inline void MatrixBase::transposeInPlace() +{ + ei_inplace_transpose_selector::run(derived()); +} + +#endif // EIGEN_TRANSPOSE_H diff --git a/extern/Eigen2/Eigen/src/Core/Visitor.h b/extern/Eigen2/Eigen/src/Core/Visitor.h new file mode 100644 index 00000000000..7569114e90d --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/Visitor.h @@ -0,0 +1,228 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_VISITOR_H +#define EIGEN_VISITOR_H + +template +struct ei_visitor_impl +{ + enum { + col = (UnrollCount-1) / Derived::RowsAtCompileTime, + row = (UnrollCount-1) % Derived::RowsAtCompileTime + }; + + inline static void run(const Derived &mat, Visitor& visitor) + { + ei_visitor_impl::run(mat, visitor); + visitor(mat.coeff(row, col), row, col); + } +}; + +template +struct ei_visitor_impl +{ + inline static void run(const Derived &mat, Visitor& visitor) + { + return visitor.init(mat.coeff(0, 0), 0, 0); + } +}; + +template +struct ei_visitor_impl +{ + inline static void run(const Derived& mat, Visitor& visitor) + { + visitor.init(mat.coeff(0,0), 0, 0); + for(int i = 1; i < mat.rows(); ++i) + visitor(mat.coeff(i, 0), i, 0); + for(int j = 1; j < mat.cols(); ++j) + for(int i = 0; i < mat.rows(); ++i) + visitor(mat.coeff(i, j), i, j); + } +}; + + +/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector. + * + * The template parameter \a Visitor is the type of the visitor and provides the following interface: + * \code + * struct MyVisitor { + * // called for the first coefficient + * void init(const Scalar& value, int i, int j); + * // called for all other coefficients + * void operator() (const Scalar& value, int i, int j); + * }; + * \endcode + * + * \note compared to one or two \em for \em loops, visitors offer automatic + * unrolling for small fixed size matrix. + * + * \sa minCoeff(int*,int*), maxCoeff(int*,int*), MatrixBase::redux() + */ +template +template +void MatrixBase::visit(Visitor& visitor) const +{ + const bool unroll = SizeAtCompileTime * CoeffReadCost + + (SizeAtCompileTime-1) * ei_functor_traits::Cost + <= EIGEN_UNROLLING_LIMIT; + return ei_visitor_impl::run(derived(), visitor); +} + +/** \internal + * \brief Base class to implement min and max visitors + */ +template +struct ei_coeff_visitor +{ + int row, col; + Scalar res; + inline void init(const Scalar& value, int i, int j) + { + res = value; + row = i; + col = j; + } +}; + +/** \internal + * \brief Visitor computing the min coefficient with its value and coordinates + * + * \sa MatrixBase::minCoeff(int*, int*) + */ +template +struct ei_min_coeff_visitor : ei_coeff_visitor +{ + void operator() (const Scalar& value, int i, int j) + { + if(value < this->res) + { + this->res = value; + this->row = i; + this->col = j; + } + } +}; + +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::AddCost + }; +}; + +/** \internal + * \brief Visitor computing the max coefficient with its value and coordinates + * + * \sa MatrixBase::maxCoeff(int*, int*) + */ +template +struct ei_max_coeff_visitor : ei_coeff_visitor +{ + void operator() (const Scalar& value, int i, int j) + { + if(value > this->res) + { + this->res = value; + this->row = i; + this->col = j; + } + } +}; + +template +struct ei_functor_traits > { + enum { + Cost = NumTraits::AddCost + }; +}; + +/** \returns the minimum of all coefficients of *this + * and puts in *row and *col its location. + * + * \sa MatrixBase::minCoeff(int*), MatrixBase::maxCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::minCoeff() + */ +template +typename ei_traits::Scalar +MatrixBase::minCoeff(int* row, int* col) const +{ + ei_min_coeff_visitor minVisitor; + this->visit(minVisitor); + *row = minVisitor.row; + if (col) *col = minVisitor.col; + return minVisitor.res; +} + +/** \returns the minimum of all coefficients of *this + * and puts in *index its location. + * + * \sa MatrixBase::minCoeff(int*,int*), MatrixBase::maxCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::minCoeff() + */ +template +typename ei_traits::Scalar +MatrixBase::minCoeff(int* index) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + ei_min_coeff_visitor minVisitor; + this->visit(minVisitor); + *index = (RowsAtCompileTime==1) ? minVisitor.col : minVisitor.row; + return minVisitor.res; +} + +/** \returns the maximum of all coefficients of *this + * and puts in *row and *col its location. + * + * \sa MatrixBase::minCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::maxCoeff() + */ +template +typename ei_traits::Scalar +MatrixBase::maxCoeff(int* row, int* col) const +{ + ei_max_coeff_visitor maxVisitor; + this->visit(maxVisitor); + *row = maxVisitor.row; + if (col) *col = maxVisitor.col; + return maxVisitor.res; +} + +/** \returns the maximum of all coefficients of *this + * and puts in *index its location. + * + * \sa MatrixBase::maxCoeff(int*,int*), MatrixBase::minCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::maxCoeff() + */ +template +typename ei_traits::Scalar +MatrixBase::maxCoeff(int* index) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + ei_max_coeff_visitor maxVisitor; + this->visit(maxVisitor); + *index = (RowsAtCompileTime==1) ? maxVisitor.col : maxVisitor.row; + return maxVisitor.res; +} + +#endif // EIGEN_VISITOR_H diff --git a/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h b/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h new file mode 100644 index 00000000000..4de3b5e2e0b --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/arch/AltiVec/PacketMath.h @@ -0,0 +1,354 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Konstantinos Margaritis +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PACKET_MATH_ALTIVEC_H +#define EIGEN_PACKET_MATH_ALTIVEC_H + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4 +#endif + +typedef __vector float v4f; +typedef __vector int v4i; +typedef __vector unsigned int v4ui; +typedef __vector __bool int v4bi; + +// We don't want to write the same code all the time, but we need to reuse the constants +// and it doesn't really work to declare them global, so we define macros instead + +#define USE_CONST_v0i const v4i v0i = vec_splat_s32(0) +#define USE_CONST_v1i const v4i v1i = vec_splat_s32(1) +#define USE_CONST_v16i_ const v4i v16i_ = vec_splat_s32(-16) +#define USE_CONST_v0f USE_CONST_v0i; const v4f v0f = (v4f) v0i +#define USE_CONST_v1f USE_CONST_v1i; const v4f v1f = vec_ctf(v1i, 0) +#define USE_CONST_v1i_ const v4ui v1i_ = vec_splat_u32(-1) +#define USE_CONST_v0f_ USE_CONST_v1i_; const v4f v0f_ = (v4f) vec_sl(v1i_, v1i_) + +template<> struct ei_packet_traits { typedef v4f type; enum {size=4}; }; +template<> struct ei_packet_traits { typedef v4i type; enum {size=4}; }; + +template<> struct ei_unpacket_traits { typedef float type; enum {size=4}; }; +template<> struct ei_unpacket_traits { typedef int type; enum {size=4}; }; + +inline std::ostream & operator <<(std::ostream & s, const v4f & v) +{ + union { + v4f v; + float n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const v4i & v) +{ + union { + v4i v; + int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const v4ui & v) +{ + union { + v4ui v; + unsigned int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +inline std::ostream & operator <<(std::ostream & s, const v4bi & v) +{ + union { + __vector __bool int v; + unsigned int n[4]; + } vt; + vt.v = v; + s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3]; + return s; +} + +template<> inline v4f ei_padd(const v4f& a, const v4f& b) { return vec_add(a,b); } +template<> inline v4i ei_padd(const v4i& a, const v4i& b) { return vec_add(a,b); } + +template<> inline v4f ei_psub(const v4f& a, const v4f& b) { return vec_sub(a,b); } +template<> inline v4i ei_psub(const v4i& a, const v4i& b) { return vec_sub(a,b); } + +template<> inline v4f ei_pmul(const v4f& a, const v4f& b) { USE_CONST_v0f; return vec_madd(a,b, v0f); } +template<> inline v4i ei_pmul(const v4i& a, const v4i& b) +{ + // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec + //Set up constants, variables + v4i a1, b1, bswap, low_prod, high_prod, prod, prod_, v1sel; + USE_CONST_v0i; + USE_CONST_v1i; + USE_CONST_v16i_; + + // Get the absolute values + a1 = vec_abs(a); + b1 = vec_abs(b); + + // Get the signs using xor + v4bi sgn = (v4bi) vec_cmplt(vec_xor(a, b), v0i); + + // Do the multiplication for the asbolute values. + bswap = (v4i) vec_rl((v4ui) b1, (v4ui) v16i_ ); + low_prod = vec_mulo((__vector short)a1, (__vector short)b1); + high_prod = vec_msum((__vector short)a1, (__vector short)bswap, v0i); + high_prod = (v4i) vec_sl((v4ui) high_prod, (v4ui) v16i_); + prod = vec_add( low_prod, high_prod ); + + // NOR the product and select only the negative elements according to the sign mask + prod_ = vec_nor(prod, prod); + prod_ = vec_sel(v0i, prod_, sgn); + + // Add 1 to the result to get the negative numbers + v1sel = vec_sel(v0i, v1i, sgn); + prod_ = vec_add(prod_, v1sel); + + // Merge the results back to the final vector. + prod = vec_sel(prod, prod_, sgn); + + return prod; +} + +template<> inline v4f ei_pdiv(const v4f& a, const v4f& b) { + v4f t, y_0, y_1, res; + USE_CONST_v0f; + USE_CONST_v1f; + + // Altivec does not offer a divide instruction, we have to do a reciprocal approximation + y_0 = vec_re(b); + + // Do one Newton-Raphson iteration to get the needed accuracy + t = vec_nmsub(y_0, b, v1f); + y_1 = vec_madd(y_0, t, y_0); + + res = vec_madd(a, y_1, v0f); + return res; +} + +template<> inline v4f ei_pmadd(const v4f& a, const v4f& b, const v4f& c) { return vec_madd(a, b, c); } + +template<> inline v4f ei_pmin(const v4f& a, const v4f& b) { return vec_min(a,b); } +template<> inline v4i ei_pmin(const v4i& a, const v4i& b) { return vec_min(a,b); } + +template<> inline v4f ei_pmax(const v4f& a, const v4f& b) { return vec_max(a,b); } +template<> inline v4i ei_pmax(const v4i& a, const v4i& b) { return vec_max(a,b); } + +template<> inline v4f ei_pload(const float* from) { return vec_ld(0, from); } +template<> inline v4i ei_pload(const int* from) { return vec_ld(0, from); } + +template<> inline v4f ei_ploadu(const float* from) +{ + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + __vector unsigned char MSQ, LSQ; + __vector unsigned char mask; + MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword + mask = vec_lvsl(0, from); // create the permute mask + return (v4f) vec_perm(MSQ, LSQ, mask); // align the data +} + +template<> inline v4i ei_ploadu(const int* from) +{ + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + __vector unsigned char MSQ, LSQ; + __vector unsigned char mask; + MSQ = vec_ld(0, (unsigned char *)from); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)from); // least significant quadword + mask = vec_lvsl(0, from); // create the permute mask + return (v4i) vec_perm(MSQ, LSQ, mask); // align the data +} + +template<> inline v4f ei_pset1(const float& from) +{ + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + float __attribute__(aligned(16)) af[4]; + af[0] = from; + v4f vc = vec_ld(0, af); + vc = vec_splat(vc, 0); + return vc; +} + +template<> inline v4i ei_pset1(const int& from) +{ + int __attribute__(aligned(16)) ai[4]; + ai[0] = from; + v4i vc = vec_ld(0, ai); + vc = vec_splat(vc, 0); + return vc; +} + +template<> inline void ei_pstore(float* to, const v4f& from) { vec_st(from, 0, to); } +template<> inline void ei_pstore(int* to, const v4i& from) { vec_st(from, 0, to); } + +template<> inline void ei_pstoreu(float* to, const v4f& from) +{ + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + // Warning: not thread safe! + __vector unsigned char MSQ, LSQ, edges; + __vector unsigned char edgeAlign, align; + + MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword + edgeAlign = vec_lvsl(0, to); // permute map to extract edges + edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges + align = vec_lvsr( 0, to ); // permute map to misalign data + MSQ = vec_perm(edges,(__vector unsigned char)from,align); // misalign the data (MSQ) + LSQ = vec_perm((__vector unsigned char)from,edges,align); // misalign the data (LSQ) + vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first + vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part +} + +template<> inline void ei_pstoreu(int* to , const v4i& from ) +{ + // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html + // Warning: not thread safe! + __vector unsigned char MSQ, LSQ, edges; + __vector unsigned char edgeAlign, align; + + MSQ = vec_ld(0, (unsigned char *)to); // most significant quadword + LSQ = vec_ld(15, (unsigned char *)to); // least significant quadword + edgeAlign = vec_lvsl(0, to); // permute map to extract edges + edges=vec_perm(LSQ,MSQ,edgeAlign); // extract the edges + align = vec_lvsr( 0, to ); // permute map to misalign data + MSQ = vec_perm(edges,(__vector unsigned char)from,align); // misalign the data (MSQ) + LSQ = vec_perm((__vector unsigned char)from,edges,align); // misalign the data (LSQ) + vec_st( LSQ, 15, (unsigned char *)to ); // Store the LSQ part first + vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part +} + +template<> inline float ei_pfirst(const v4f& a) +{ + float __attribute__(aligned(16)) af[4]; + vec_st(a, 0, af); + return af[0]; +} + +template<> inline int ei_pfirst(const v4i& a) +{ + int __attribute__(aligned(16)) ai[4]; + vec_st(a, 0, ai); + return ai[0]; +} + +inline v4f ei_preduxp(const v4f* vecs) +{ + v4f v[4], sum[4]; + + // It's easier and faster to transpose then add as columns + // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation + // Do the transpose, first set of moves + v[0] = vec_mergeh(vecs[0], vecs[2]); + v[1] = vec_mergel(vecs[0], vecs[2]); + v[2] = vec_mergeh(vecs[1], vecs[3]); + v[3] = vec_mergel(vecs[1], vecs[3]); + // Get the resulting vectors + sum[0] = vec_mergeh(v[0], v[2]); + sum[1] = vec_mergel(v[0], v[2]); + sum[2] = vec_mergeh(v[1], v[3]); + sum[3] = vec_mergel(v[1], v[3]); + + // Now do the summation: + // Lines 0+1 + sum[0] = vec_add(sum[0], sum[1]); + // Lines 2+3 + sum[1] = vec_add(sum[2], sum[3]); + // Add the results + sum[0] = vec_add(sum[0], sum[1]); + return sum[0]; +} + +inline float ei_predux(const v4f& a) +{ + v4f b, sum; + b = (v4f)vec_sld(a, a, 8); + sum = vec_add(a, b); + b = (v4f)vec_sld(sum, sum, 4); + sum = vec_add(sum, b); + return ei_pfirst(sum); +} + +inline v4i ei_preduxp(const v4i* vecs) +{ + v4i v[4], sum[4]; + + // It's easier and faster to transpose then add as columns + // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation + // Do the transpose, first set of moves + v[0] = vec_mergeh(vecs[0], vecs[2]); + v[1] = vec_mergel(vecs[0], vecs[2]); + v[2] = vec_mergeh(vecs[1], vecs[3]); + v[3] = vec_mergel(vecs[1], vecs[3]); + // Get the resulting vectors + sum[0] = vec_mergeh(v[0], v[2]); + sum[1] = vec_mergel(v[0], v[2]); + sum[2] = vec_mergeh(v[1], v[3]); + sum[3] = vec_mergel(v[1], v[3]); + + // Now do the summation: + // Lines 0+1 + sum[0] = vec_add(sum[0], sum[1]); + // Lines 2+3 + sum[1] = vec_add(sum[2], sum[3]); + // Add the results + sum[0] = vec_add(sum[0], sum[1]); + return sum[0]; +} + +inline int ei_predux(const v4i& a) +{ + USE_CONST_v0i; + v4i sum; + sum = vec_sums(a, v0i); + sum = vec_sld(sum, v0i, 12); + return ei_pfirst(sum); +} + +template +struct ei_palign_impl +{ + inline static void run(v4f& first, const v4f& second) + { + first = vec_sld(first, second, Offset*4); + } +}; + +template +struct ei_palign_impl +{ + inline static void run(v4i& first, const v4i& second) + { + first = vec_sld(first, second, Offset*4); + } +}; + +#endif // EIGEN_PACKET_MATH_ALTIVEC_H diff --git a/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h b/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h new file mode 100644 index 00000000000..9ca65b9be5b --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/arch/SSE/PacketMath.h @@ -0,0 +1,321 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PACKET_MATH_SSE_H +#define EIGEN_PACKET_MATH_SSE_H + +#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD +#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16 +#endif + +template<> struct ei_packet_traits { typedef __m128 type; enum {size=4}; }; +template<> struct ei_packet_traits { typedef __m128d type; enum {size=2}; }; +template<> struct ei_packet_traits { typedef __m128i type; enum {size=4}; }; + +template<> struct ei_unpacket_traits<__m128> { typedef float type; enum {size=4}; }; +template<> struct ei_unpacket_traits<__m128d> { typedef double type; enum {size=2}; }; +template<> struct ei_unpacket_traits<__m128i> { typedef int type; enum {size=4}; }; + +template<> EIGEN_STRONG_INLINE __m128 ei_pset1(const float& from) { return _mm_set1_ps(from); } +template<> EIGEN_STRONG_INLINE __m128d ei_pset1(const double& from) { return _mm_set1_pd(from); } +template<> EIGEN_STRONG_INLINE __m128i ei_pset1(const int& from) { return _mm_set1_epi32(from); } + +template<> EIGEN_STRONG_INLINE __m128 ei_padd<__m128>(const __m128& a, const __m128& b) { return _mm_add_ps(a,b); } +template<> EIGEN_STRONG_INLINE __m128d ei_padd<__m128d>(const __m128d& a, const __m128d& b) { return _mm_add_pd(a,b); } +template<> EIGEN_STRONG_INLINE __m128i ei_padd<__m128i>(const __m128i& a, const __m128i& b) { return _mm_add_epi32(a,b); } + +template<> EIGEN_STRONG_INLINE __m128 ei_psub<__m128>(const __m128& a, const __m128& b) { return _mm_sub_ps(a,b); } +template<> EIGEN_STRONG_INLINE __m128d ei_psub<__m128d>(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a,b); } +template<> EIGEN_STRONG_INLINE __m128i ei_psub<__m128i>(const __m128i& a, const __m128i& b) { return _mm_sub_epi32(a,b); } + +template<> EIGEN_STRONG_INLINE __m128 ei_pmul<__m128>(const __m128& a, const __m128& b) { return _mm_mul_ps(a,b); } +template<> EIGEN_STRONG_INLINE __m128d ei_pmul<__m128d>(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a,b); } +template<> EIGEN_STRONG_INLINE __m128i ei_pmul<__m128i>(const __m128i& a, const __m128i& b) +{ + return _mm_or_si128( + _mm_and_si128( + _mm_mul_epu32(a,b), + _mm_setr_epi32(0xffffffff,0,0xffffffff,0)), + _mm_slli_si128( + _mm_and_si128( + _mm_mul_epu32(_mm_srli_si128(a,4),_mm_srli_si128(b,4)), + _mm_setr_epi32(0xffffffff,0,0xffffffff,0)), 4)); +} + +template<> EIGEN_STRONG_INLINE __m128 ei_pdiv<__m128>(const __m128& a, const __m128& b) { return _mm_div_ps(a,b); } +template<> EIGEN_STRONG_INLINE __m128d ei_pdiv<__m128d>(const __m128d& a, const __m128d& b) { return _mm_div_pd(a,b); } +template<> EIGEN_STRONG_INLINE __m128i ei_pdiv<__m128i>(const __m128i& /*a*/, const __m128i& /*b*/) +{ ei_assert(false && "packet integer division are not supported by SSE"); + __m128i dummy = ei_pset1(0); + return dummy; +} + +// for some weird raisons, it has to be overloaded for packet integer +template<> EIGEN_STRONG_INLINE __m128i ei_pmadd(const __m128i& a, const __m128i& b, const __m128i& c) { return ei_padd(ei_pmul(a,b), c); } + +template<> EIGEN_STRONG_INLINE __m128 ei_pmin<__m128>(const __m128& a, const __m128& b) { return _mm_min_ps(a,b); } +template<> EIGEN_STRONG_INLINE __m128d ei_pmin<__m128d>(const __m128d& a, const __m128d& b) { return _mm_min_pd(a,b); } +// FIXME this vectorized min operator is likely to be slower than the standard one +template<> EIGEN_STRONG_INLINE __m128i ei_pmin<__m128i>(const __m128i& a, const __m128i& b) +{ + __m128i mask = _mm_cmplt_epi32(a,b); + return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); +} + +template<> EIGEN_STRONG_INLINE __m128 ei_pmax<__m128>(const __m128& a, const __m128& b) { return _mm_max_ps(a,b); } +template<> EIGEN_STRONG_INLINE __m128d ei_pmax<__m128d>(const __m128d& a, const __m128d& b) { return _mm_max_pd(a,b); } +// FIXME this vectorized max operator is likely to be slower than the standard one +template<> EIGEN_STRONG_INLINE __m128i ei_pmax<__m128i>(const __m128i& a, const __m128i& b) +{ + __m128i mask = _mm_cmpgt_epi32(a,b); + return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); +} + +template<> EIGEN_STRONG_INLINE __m128 ei_pload(const float* from) { return _mm_load_ps(from); } +template<> EIGEN_STRONG_INLINE __m128d ei_pload(const double* from) { return _mm_load_pd(from); } +template<> EIGEN_STRONG_INLINE __m128i ei_pload(const int* from) { return _mm_load_si128(reinterpret_cast(from)); } + +template<> EIGEN_STRONG_INLINE __m128 ei_ploadu(const float* from) { return _mm_loadu_ps(from); } +// template<> EIGEN_STRONG_INLINE __m128 ei_ploadu(const float* from) { +// if (size_t(from)&0xF) +// return _mm_loadu_ps(from); +// else +// return _mm_loadu_ps(from); +// } +template<> EIGEN_STRONG_INLINE __m128d ei_ploadu(const double* from) { return _mm_loadu_pd(from); } +template<> EIGEN_STRONG_INLINE __m128i ei_ploadu(const int* from) { return _mm_loadu_si128(reinterpret_cast(from)); } + +template<> EIGEN_STRONG_INLINE void ei_pstore(float* to, const __m128& from) { _mm_store_ps(to, from); } +template<> EIGEN_STRONG_INLINE void ei_pstore(double* to, const __m128d& from) { _mm_store_pd(to, from); } +template<> EIGEN_STRONG_INLINE void ei_pstore(int* to, const __m128i& from) { _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } + +template<> EIGEN_STRONG_INLINE void ei_pstoreu(float* to, const __m128& from) { _mm_storeu_ps(to, from); } +template<> EIGEN_STRONG_INLINE void ei_pstoreu(double* to, const __m128d& from) { _mm_storeu_pd(to, from); } +template<> EIGEN_STRONG_INLINE void ei_pstoreu(int* to, const __m128i& from) { _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } + +#ifdef _MSC_VER +// this fix internal compilation error +template<> EIGEN_STRONG_INLINE float ei_pfirst<__m128>(const __m128& a) { float x = _mm_cvtss_f32(a); return x; } +template<> EIGEN_STRONG_INLINE double ei_pfirst<__m128d>(const __m128d& a) { double x = _mm_cvtsd_f64(a); return x; } +template<> EIGEN_STRONG_INLINE int ei_pfirst<__m128i>(const __m128i& a) { int x = _mm_cvtsi128_si32(a); return x; } +#else +template<> EIGEN_STRONG_INLINE float ei_pfirst<__m128>(const __m128& a) { return _mm_cvtss_f32(a); } +template<> EIGEN_STRONG_INLINE double ei_pfirst<__m128d>(const __m128d& a) { return _mm_cvtsd_f64(a); } +template<> EIGEN_STRONG_INLINE int ei_pfirst<__m128i>(const __m128i& a) { return _mm_cvtsi128_si32(a); } +#endif + +#ifdef __SSE3__ +// TODO implement SSE2 versions as well as integer versions +template<> EIGEN_STRONG_INLINE __m128 ei_preduxp<__m128>(const __m128* vecs) +{ + return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3])); +} +template<> EIGEN_STRONG_INLINE __m128d ei_preduxp<__m128d>(const __m128d* vecs) +{ + return _mm_hadd_pd(vecs[0], vecs[1]); +} +// SSSE3 version: +// EIGEN_STRONG_INLINE __m128i ei_preduxp(const __m128i* vecs) +// { +// return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3])); +// } + +template<> EIGEN_STRONG_INLINE float ei_predux<__m128>(const __m128& a) +{ + __m128 tmp0 = _mm_hadd_ps(a,a); + return ei_pfirst(_mm_hadd_ps(tmp0, tmp0)); +} + +template<> EIGEN_STRONG_INLINE double ei_predux<__m128d>(const __m128d& a) { return ei_pfirst(_mm_hadd_pd(a, a)); } + +// SSSE3 version: +// EIGEN_STRONG_INLINE float ei_predux(const __m128i& a) +// { +// __m128i tmp0 = _mm_hadd_epi32(a,a); +// return ei_pfirst(_mm_hadd_epi32(tmp0, tmp0)); +// } +#else +// SSE2 versions +template<> EIGEN_STRONG_INLINE float ei_predux<__m128>(const __m128& a) +{ + __m128 tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); + return ei_pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); +} +template<> EIGEN_STRONG_INLINE double ei_predux<__m128d>(const __m128d& a) +{ + return ei_pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); +} + +template<> EIGEN_STRONG_INLINE __m128 ei_preduxp<__m128>(const __m128* vecs) +{ + __m128 tmp0, tmp1, tmp2; + tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]); + tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]); + tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]); + tmp0 = _mm_add_ps(tmp0, tmp1); + tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]); + tmp1 = _mm_add_ps(tmp1, tmp2); + tmp2 = _mm_movehl_ps(tmp1, tmp0); + tmp0 = _mm_movelh_ps(tmp0, tmp1); + return _mm_add_ps(tmp0, tmp2); +} + +template<> EIGEN_STRONG_INLINE __m128d ei_preduxp<__m128d>(const __m128d* vecs) +{ + return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1])); +} +#endif // SSE3 + +template<> EIGEN_STRONG_INLINE int ei_predux<__m128i>(const __m128i& a) +{ + __m128i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); + return ei_pfirst(tmp) + ei_pfirst(_mm_shuffle_epi32(tmp, 1)); +} + +template<> EIGEN_STRONG_INLINE __m128i ei_preduxp<__m128i>(const __m128i* vecs) +{ + __m128i tmp0, tmp1, tmp2; + tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]); + tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]); + tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]); + tmp0 = _mm_add_epi32(tmp0, tmp1); + tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]); + tmp1 = _mm_add_epi32(tmp1, tmp2); + tmp2 = _mm_unpacklo_epi64(tmp0, tmp1); + tmp0 = _mm_unpackhi_epi64(tmp0, tmp1); + return _mm_add_epi32(tmp0, tmp2); +} + +#if (defined __GNUC__) +// template <> EIGEN_STRONG_INLINE __m128 ei_pmadd(const __m128& a, const __m128& b, const __m128& c) +// { +// __m128 res = b; +// asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c)); +// return res; +// } +// EIGEN_STRONG_INLINE __m128i _mm_alignr_epi8(const __m128i& a, const __m128i& b, const int i) +// { +// __m128i res = a; +// asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i)); +// return res; +// } +#endif + +#ifdef __SSSE3__ +// SSSE3 versions +template +struct ei_palign_impl +{ + EIGEN_STRONG_INLINE static void run(__m128& first, const __m128& second) + { + if (Offset!=0) + first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4)); + } +}; + +template +struct ei_palign_impl +{ + EIGEN_STRONG_INLINE static void run(__m128i& first, const __m128i& second) + { + if (Offset!=0) + first = _mm_alignr_epi8(second,first, Offset*4); + } +}; + +template +struct ei_palign_impl +{ + EIGEN_STRONG_INLINE static void run(__m128d& first, const __m128d& second) + { + if (Offset==1) + first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8)); + } +}; +#else +// SSE2 versions +template +struct ei_palign_impl +{ + EIGEN_STRONG_INLINE static void run(__m128& first, const __m128& second) + { + if (Offset==1) + { + first = _mm_move_ss(first,second); + first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39)); + } + else if (Offset==2) + { + first = _mm_movehl_ps(first,first); + first = _mm_movelh_ps(first,second); + } + else if (Offset==3) + { + first = _mm_move_ss(first,second); + first = _mm_shuffle_ps(first,second,0x93); + } + } +}; + +template +struct ei_palign_impl +{ + EIGEN_STRONG_INLINE static void run(__m128i& first, const __m128i& second) + { + if (Offset==1) + { + first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); + first = _mm_shuffle_epi32(first,0x39); + } + else if (Offset==2) + { + first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first))); + first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); + } + else if (Offset==3) + { + first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second))); + first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93)); + } + } +}; + +template +struct ei_palign_impl +{ + EIGEN_STRONG_INLINE static void run(__m128d& first, const __m128d& second) + { + if (Offset==1) + { + first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first))); + first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second))); + } + } +}; +#endif + +#define ei_vec4f_swizzle1(v,p,q,r,s) \ + (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p))))) + +#endif // EIGEN_PACKET_MATH_SSE_H diff --git a/extern/Eigen2/Eigen/src/Core/util/Constants.h b/extern/Eigen2/Eigen/src/Core/util/Constants.h new file mode 100644 index 00000000000..296c3caa5f6 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/Constants.h @@ -0,0 +1,254 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CONSTANTS_H +#define EIGEN_CONSTANTS_H + +/** This value means that a quantity is not known at compile-time, and that instead the value is + * stored in some runtime variable. + * + * Explanation for the choice of this value: + * - It should be positive and larger than any reasonable compile-time-fixed number of rows or columns. + * This allows to simplify many compile-time conditions throughout Eigen. + * - It should be smaller than the sqrt of INT_MAX. Indeed, we often multiply a number of rows with a number + * of columns in order to compute a number of coefficients. Even if we guard that with an "if" checking whether + * the values are Dynamic, we still get a compiler warning "integer overflow". So the only way to get around + * it would be a meta-selector. Doing this everywhere would reduce code readability and lenghten compilation times. + * Also, disabling compiler warnings for integer overflow, sounds like a bad idea. + * + * If you wish to port Eigen to a platform where sizeof(int)==2, it is perfectly possible to set Dynamic to, say, 100. + */ +const int Dynamic = 10000; + +/** This value means +Infinity; it is currently used only as the p parameter to MatrixBase::lpNorm(). + * The value Infinity there means the L-infinity norm. + */ +const int Infinity = -1; + +/** \defgroup flags flags + * \ingroup Core_Module + * + * These are the possible bits which can be OR'ed to constitute the flags of a matrix or + * expression. + * + * It is important to note that these flags are a purely compile-time notion. They are a compile-time property of + * an expression type, implemented as enum's. They are not stored in memory at runtime, and they do not incur any + * runtime overhead. + * + * \sa MatrixBase::Flags + */ + +/** \ingroup flags + * + * for a matrix, this means that the storage order is row-major. + * If this bit is not set, the storage order is column-major. + * For an expression, this determines the storage order of + * the matrix created by evaluation of that expression. */ +const unsigned int RowMajorBit = 0x1; + +/** \ingroup flags + * + * means the expression should be evaluated by the calling expression */ +const unsigned int EvalBeforeNestingBit = 0x2; + +/** \ingroup flags + * + * means the expression should be evaluated before any assignement */ +const unsigned int EvalBeforeAssigningBit = 0x4; + +/** \ingroup flags + * + * Short version: means the expression might be vectorized + * + * Long version: means that the coefficients can be handled by packets + * and start at a memory location whose alignment meets the requirements + * of the present CPU architecture for optimized packet access. In the fixed-size + * case, there is the additional condition that the total size of the coefficients + * array is a multiple of the packet size, so that it is possible to access all the + * coefficients by packets. In the dynamic-size case, there is no such condition + * on the total size, so it might not be possible to access the few last coeffs + * by packets. + * + * \note This bit can be set regardless of whether vectorization is actually enabled. + * To check for actual vectorizability, see \a ActualPacketAccessBit. + */ +const unsigned int PacketAccessBit = 0x8; + +#ifdef EIGEN_VECTORIZE +/** \ingroup flags + * + * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant + * is set to the value \a PacketAccessBit. + * + * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant + * is set to the value 0. + */ +const unsigned int ActualPacketAccessBit = PacketAccessBit; +#else +const unsigned int ActualPacketAccessBit = 0x0; +#endif + +/** \ingroup flags + * + * Short version: means the expression can be seen as 1D vector. + * + * Long version: means that one can access the coefficients + * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These + * index-based access methods are guaranteed + * to not have to do any runtime computation of a (row, col)-pair from the index, so that it + * is guaranteed that whenever it is available, index-based access is at least as fast as + * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit. + * + * If both PacketAccessBit and LinearAccessBit are set, then the + * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a + * lvalue expression. + * + * Typically, all vector expressions have the LinearAccessBit, but there is one exception: + * Product expressions don't have it, because it would be troublesome for vectorization, even when the + * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but + * not index-based packet access, so they don't have the LinearAccessBit. + */ +const unsigned int LinearAccessBit = 0x10; + +/** \ingroup flags + * + * Means that the underlying array of coefficients can be directly accessed. This means two things. + * First, references to the coefficients must be available through coeffRef(int, int). This rules out read-only + * expressions whose coefficients are computed on demand by coeff(int, int). Second, the memory layout of the + * array of coefficients must be exactly the natural one suggested by rows(), cols(), stride(), and the RowMajorBit. + * This rules out expressions such as DiagonalCoeffs, whose coefficients, though referencable, do not have + * such a regular memory layout. + */ +const unsigned int DirectAccessBit = 0x20; + +/** \ingroup flags + * + * means the first coefficient packet is guaranteed to be aligned */ +const unsigned int AlignedBit = 0x40; + +/** \ingroup flags + * + * means all diagonal coefficients are equal to 0 */ +const unsigned int ZeroDiagBit = 0x80; + +/** \ingroup flags + * + * means all diagonal coefficients are equal to 1 */ +const unsigned int UnitDiagBit = 0x100; + +/** \ingroup flags + * + * means the matrix is selfadjoint (M=M*). */ +const unsigned int SelfAdjointBit = 0x200; + +/** \ingroup flags + * + * means the strictly lower triangular part is 0 */ +const unsigned int UpperTriangularBit = 0x400; + +/** \ingroup flags + * + * means the strictly upper triangular part is 0 */ +const unsigned int LowerTriangularBit = 0x800; + +/** \ingroup flags + * + * means the expression includes sparse matrices and the sparse path has to be taken. */ +const unsigned int SparseBit = 0x1000; + +// list of flags that are inherited by default +const unsigned int HereditaryBits = RowMajorBit + | EvalBeforeNestingBit + | EvalBeforeAssigningBit + | SparseBit; + +// Possible values for the Mode parameter of part() and of extract() +const unsigned int UpperTriangular = UpperTriangularBit; +const unsigned int StrictlyUpperTriangular = UpperTriangularBit | ZeroDiagBit; +const unsigned int LowerTriangular = LowerTriangularBit; +const unsigned int StrictlyLowerTriangular = LowerTriangularBit | ZeroDiagBit; +const unsigned int SelfAdjoint = SelfAdjointBit; + +// additional possible values for the Mode parameter of extract() +const unsigned int UnitUpperTriangular = UpperTriangularBit | UnitDiagBit; +const unsigned int UnitLowerTriangular = LowerTriangularBit | UnitDiagBit; +const unsigned int Diagonal = UpperTriangular | LowerTriangular; + +enum { Aligned, Unaligned }; +enum { ForceAligned, AsRequested }; +enum { ConditionalJumpCost = 5 }; +enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight }; +enum DirectionType { Vertical, Horizontal }; +enum ProductEvaluationMode { NormalProduct, CacheFriendlyProduct, DiagonalProduct, SparseTimeSparseProduct, SparseTimeDenseProduct, DenseTimeSparseProduct }; + +enum { + /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignment + * and good size */ + InnerVectorization, + /** \internal Vectorization path using a single loop plus scalar loops for the + * unaligned boundaries */ + LinearVectorization, + /** \internal Generic vectorization path using one vectorized loop per row/column with some + * scalar loops to handle the unaligned boundaries */ + SliceVectorization, + NoVectorization +}; + +enum { + NoUnrolling, + InnerUnrolling, + CompleteUnrolling +}; + +enum { + ColMajor = 0, + RowMajor = 0x1, // it is only a coincidence that this is equal to RowMajorBit -- don't rely on that + /** \internal Don't require alignment for the matrix itself (the array of coefficients, if dynamically allocated, may still be + requested to be aligned) */ + DontAlign = 0, + /** \internal Align the matrix itself if it is vectorizable fixed-size */ + AutoAlign = 0x2 +}; + +enum { + IsDense = 0, + IsSparse = SparseBit, + NoDirectAccess = 0, + HasDirectAccess = DirectAccessBit +}; + +const int EiArch_Generic = 0x0; +const int EiArch_SSE = 0x1; +const int EiArch_AltiVec = 0x2; + +#if defined EIGEN_VECTORIZE_SSE + const int EiArch = EiArch_SSE; +#elif defined EIGEN_VECTORIZE_ALTIVEC + const int EiArch = EiArch_AltiVec; +#else + const int EiArch = EiArch_Generic; +#endif + +#endif // EIGEN_CONSTANTS_H diff --git a/extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h b/extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h new file mode 100644 index 00000000000..765ddecc53c --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/DisableMSVCWarnings.h @@ -0,0 +1,5 @@ + +#ifdef _MSC_VER + #pragma warning( push ) + #pragma warning( disable : 4181 4244 4127 4211 4717 ) +#endif diff --git a/extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h b/extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h new file mode 100644 index 00000000000..8bd61601ebb --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/EnableMSVCWarnings.h @@ -0,0 +1,4 @@ + +#ifdef _MSC_VER + #pragma warning( pop ) +#endif diff --git a/extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h b/extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h new file mode 100644 index 00000000000..a72a40b1bfc --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/ForwardDeclarations.h @@ -0,0 +1,125 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_FORWARDDECLARATIONS_H +#define EIGEN_FORWARDDECLARATIONS_H + +template struct ei_traits; +template struct NumTraits; + +template class Matrix; + +template class Flagged; +template class NestByValue; +template class SwapWrapper; +template class Minor; +template::Flags&DirectAccessBit ? DirectAccessBit + : ei_traits::Flags&SparseBit> class Block; +template class Transpose; +template class Conjugate; +template class CwiseNullaryOp; +template class CwiseUnaryOp; +template class CwiseBinaryOp; +template class Product; +template class DiagonalMatrix; +template class DiagonalCoeffs; +template class Map; +template class Part; +template class Extract; +template class Cwise; +template class WithFormat; +template struct CommaInitializer; + + +template struct ei_product_mode; +template::value> struct ProductReturnType; + +template struct ei_scalar_sum_op; +template struct ei_scalar_difference_op; +template struct ei_scalar_product_op; +template struct ei_scalar_quotient_op; +template struct ei_scalar_opposite_op; +template struct ei_scalar_conjugate_op; +template struct ei_scalar_real_op; +template struct ei_scalar_imag_op; +template struct ei_scalar_abs_op; +template struct ei_scalar_abs2_op; +template struct ei_scalar_sqrt_op; +template struct ei_scalar_exp_op; +template struct ei_scalar_log_op; +template struct ei_scalar_cos_op; +template struct ei_scalar_sin_op; +template struct ei_scalar_pow_op; +template struct ei_scalar_inverse_op; +template struct ei_scalar_square_op; +template struct ei_scalar_cube_op; +template struct ei_scalar_cast_op; +template struct ei_scalar_multiple_op; +template struct ei_scalar_quotient1_op; +template struct ei_scalar_min_op; +template struct ei_scalar_max_op; +template struct ei_scalar_random_op; +template struct ei_scalar_add_op; +template struct ei_scalar_constant_op; +template struct ei_scalar_identity_op; + +struct IOFormat; + +template +void ei_cache_friendly_product( + int _rows, int _cols, int depth, + bool _lhsRowMajor, const Scalar* _lhs, int _lhsStride, + bool _rhsRowMajor, const Scalar* _rhs, int _rhsStride, + bool resRowMajor, Scalar* res, int resStride); + +// Array module +template class Select; +template class PartialReduxExpr; +template class PartialRedux; + +template class LU; +template class QR; +template class SVD; +template class LLT; +template class LDLT; + +// Geometry module: +template class RotationBase; +template class Cross; +template class Quaternion; +template class Rotation2D; +template class AngleAxis; +template class Transform; +template class ParametrizedLine; +template class Hyperplane; +template class Translation; +template class Scaling; + +// Sparse module: +template class SparseProduct; + +#endif // EIGEN_FORWARDDECLARATIONS_H diff --git a/extern/Eigen2/Eigen/src/Core/util/Macros.h b/extern/Eigen2/Eigen/src/Core/util/Macros.h new file mode 100644 index 00000000000..6be6f096055 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/Macros.h @@ -0,0 +1,273 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MACROS_H +#define EIGEN_MACROS_H + +#undef minor + +#define EIGEN_WORLD_VERSION 2 +#define EIGEN_MAJOR_VERSION 0 +#define EIGEN_MINOR_VERSION 6 + +#define EIGEN_VERSION_AT_LEAST(x,y,z) (EIGEN_WORLD_VERSION>x || (EIGEN_WORLD_VERSION>=x && \ + (EIGEN_MAJOR_VERSION>y || (EIGEN_MAJOR_VERSION>=y && \ + EIGEN_MINOR_VERSION>=z)))) + +// 16 byte alignment is only useful for vectorization. Since it affects the ABI, we need to enable 16 byte alignment on all +// platforms where vectorization might be enabled. In theory we could always enable alignment, but it can be a cause of problems +// on some platforms, so we just disable it in certain common platform (compiler+architecture combinations) to avoid these problems. +#if defined(__GNUC__) && !(defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(__ia64__)) +#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_ALIGNMENT 1 +#else +#define EIGEN_GCC_AND_ARCH_DOESNT_WANT_ALIGNMENT 0 +#endif + +#if defined(__GNUC__) && (__GNUC__ <= 3) +#define EIGEN_GCC3_OR_OLDER 1 +#else +#define EIGEN_GCC3_OR_OLDER 0 +#endif + +// FIXME vectorization + alignment is completely disabled with sun studio +#if !EIGEN_GCC_AND_ARCH_DOESNT_WANT_ALIGNMENT && !EIGEN_GCC3_OR_OLDER && !defined(__SUNPRO_CC) + #define EIGEN_ARCH_WANTS_ALIGNMENT 1 +#else + #define EIGEN_ARCH_WANTS_ALIGNMENT 0 +#endif + +// EIGEN_ALIGN is the true test whether we want to align or not. It takes into account both the user choice to explicitly disable +// alignment (EIGEN_DONT_ALIGN) and the architecture config (EIGEN_ARCH_WANTS_ALIGNMENT). Henceforth, only EIGEN_ALIGN should be used. +#if EIGEN_ARCH_WANTS_ALIGNMENT && !defined(EIGEN_DONT_ALIGN) + #define EIGEN_ALIGN 1 +#else + #define EIGEN_ALIGN 0 + #ifdef EIGEN_VECTORIZE + #error "Vectorization enabled, but our platform checks say that we don't do 16 byte alignment on this platform. If you added vectorization for another architecture, you also need to edit this platform check." + #endif + #ifndef EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT + #define EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT + #endif +#endif + +#ifdef EIGEN_DEFAULT_TO_ROW_MAJOR +#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION RowMajor +#else +#define EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ColMajor +#endif + +/** \internal Defines the maximal loop size to enable meta unrolling of loops. + * Note that the value here is expressed in Eigen's own notion of "number of FLOPS", + * it does not correspond to the number of iterations or the number of instructions + */ +#ifndef EIGEN_UNROLLING_LIMIT +#define EIGEN_UNROLLING_LIMIT 100 +#endif + +/** \internal Define the maximal size in Bytes of blocks fitting in CPU cache. + * The current value is set to generate blocks of 256x256 for float + * + * Typically for a single-threaded application you would set that to 25% of the size of your CPU caches in bytes + */ +#ifndef EIGEN_TUNE_FOR_CPU_CACHE_SIZE +#define EIGEN_TUNE_FOR_CPU_CACHE_SIZE (sizeof(float)*256*256) +#endif + +// FIXME this should go away quickly +#ifdef EIGEN_TUNE_FOR_L2_CACHE_SIZE +#error EIGEN_TUNE_FOR_L2_CACHE_SIZE is now called EIGEN_TUNE_FOR_CPU_CACHE_SIZE. +#endif + +#define USING_PART_OF_NAMESPACE_EIGEN \ +EIGEN_USING_MATRIX_TYPEDEFS \ +using Eigen::Matrix; \ +using Eigen::MatrixBase; \ +using Eigen::ei_random; \ +using Eigen::ei_real; \ +using Eigen::ei_imag; \ +using Eigen::ei_conj; \ +using Eigen::ei_abs; \ +using Eigen::ei_abs2; \ +using Eigen::ei_sqrt; \ +using Eigen::ei_exp; \ +using Eigen::ei_log; \ +using Eigen::ei_sin; \ +using Eigen::ei_cos; + +#ifdef NDEBUG +# ifndef EIGEN_NO_DEBUG +# define EIGEN_NO_DEBUG +# endif +#endif + +#ifndef ei_assert +#ifdef EIGEN_NO_DEBUG +#define ei_assert(x) +#else +#define ei_assert(x) assert(x) +#endif +#endif + +#ifdef EIGEN_INTERNAL_DEBUGGING +#define ei_internal_assert(x) ei_assert(x) +#else +#define ei_internal_assert(x) +#endif + +#ifdef EIGEN_NO_DEBUG +#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x +#else +#define EIGEN_ONLY_USED_FOR_DEBUG(x) +#endif + +// EIGEN_ALWAYS_INLINE_ATTRIB should be use in the declaration of function +// which should be inlined even in debug mode. +// FIXME with the always_inline attribute, +// gcc 3.4.x reports the following compilation error: +// Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval Eigen::MatrixBase::eval() const' +// : function body not available +#if EIGEN_GNUC_AT_LEAST(4,0) +#define EIGEN_ALWAYS_INLINE_ATTRIB __attribute__((always_inline)) +#else +#define EIGEN_ALWAYS_INLINE_ATTRIB +#endif + +// EIGEN_FORCE_INLINE means "inline as much as possible" +#if (defined _MSC_VER) +#define EIGEN_STRONG_INLINE __forceinline +#else +#define EIGEN_STRONG_INLINE inline +#endif + +#if (defined __GNUC__) +#define EIGEN_DONT_INLINE __attribute__((noinline)) +#elif (defined _MSC_VER) +#define EIGEN_DONT_INLINE __declspec(noinline) +#else +#define EIGEN_DONT_INLINE +#endif + +#if (defined __GNUC__) +#define EIGEN_DEPRECATED __attribute__((deprecated)) +#elif (defined _MSC_VER) +#define EIGEN_DEPRECATED __declspec(deprecated) +#else +#define EIGEN_DEPRECATED +#endif + +/* EIGEN_ALIGN_128 forces data to be 16-byte aligned, EVEN if vectorization (EIGEN_VECTORIZE) is disabled, + * so that vectorization doesn't affect binary compatibility. + * + * If we made alignment depend on whether or not EIGEN_VECTORIZE is defined, it would be impossible to link + * vectorized and non-vectorized code. + */ +#if !EIGEN_ALIGN +#define EIGEN_ALIGN_128 +#elif (defined __GNUC__) +#define EIGEN_ALIGN_128 __attribute__((aligned(16))) +#elif (defined _MSC_VER) +#define EIGEN_ALIGN_128 __declspec(align(16)) +#else +#error Please tell me what is the equivalent of __attribute__((aligned(16))) for your compiler +#endif + +#ifdef EIGEN_DONT_USE_RESTRICT_KEYWORD + #define EIGEN_RESTRICT +#endif +#ifndef EIGEN_RESTRICT + #define EIGEN_RESTRICT __restrict +#endif + +#ifndef EIGEN_STACK_ALLOCATION_LIMIT +#define EIGEN_STACK_ALLOCATION_LIMIT 1000000 +#endif + +#ifndef EIGEN_DEFAULT_IO_FORMAT +#define EIGEN_DEFAULT_IO_FORMAT Eigen::IOFormat() +#endif + +// format used in Eigen's documentation +// needed to define it here as escaping characters in CMake add_definition's argument seems very problematic. +#define EIGEN_DOCS_IO_FORMAT IOFormat(3, AlignCols, " ", "\n", "", "") + +#define EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::MatrixBase& other) \ +{ \ + return Base::operator Op(other.derived()); \ +} \ +EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ +{ \ + return Base::operator Op(other); \ +} + +#define EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ +{ \ + return Base::operator Op(scalar); \ +} + +#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ +EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ +EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ +EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ +EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ +EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) + +#define _EIGEN_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \ +typedef BaseClass Base; \ +typedef typename Eigen::ei_traits::Scalar Scalar; \ +typedef typename Eigen::NumTraits::Real RealScalar; \ +typedef typename Base::PacketScalar PacketScalar; \ +typedef typename Eigen::ei_nested::type Nested; \ +enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ + MaxRowsAtCompileTime = Eigen::ei_traits::MaxRowsAtCompileTime, \ + MaxColsAtCompileTime = Eigen::ei_traits::MaxColsAtCompileTime, \ + Flags = Eigen::ei_traits::Flags, \ + CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ + SizeAtCompileTime = Base::SizeAtCompileTime, \ + MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \ + IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; + +#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \ +_EIGEN_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::MatrixBase) + +#define EIGEN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b) +#define EIGEN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b) + +// just an empty macro ! +#define EIGEN_EMPTY + +// concatenate two tokens +#define EIGEN_CAT2(a,b) a ## b +#define EIGEN_CAT(a,b) EIGEN_CAT2(a,b) + +// convert a token to a string +#define EIGEN_MAKESTRING2(a) #a +#define EIGEN_MAKESTRING(a) EIGEN_MAKESTRING2(a) + +#endif // EIGEN_MACROS_H diff --git a/extern/Eigen2/Eigen/src/Core/util/Memory.h b/extern/Eigen2/Eigen/src/Core/util/Memory.h new file mode 100644 index 00000000000..09ad39d5be9 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/Memory.h @@ -0,0 +1,368 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008-2009 Benoit Jacob +// Copyright (C) 2009 Kenneth Riddile +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MEMORY_H +#define EIGEN_MEMORY_H + +#if defined(__APPLE__) || defined(_WIN64) + #define EIGEN_MALLOC_ALREADY_ALIGNED 1 +#else + #define EIGEN_MALLOC_ALREADY_ALIGNED 0 +#endif + +#if ((defined _GNU_SOURCE) || ((defined _XOPEN_SOURCE) && (_XOPEN_SOURCE >= 600))) && (defined _POSIX_ADVISORY_INFO) && (_POSIX_ADVISORY_INFO > 0) + #define EIGEN_HAS_POSIX_MEMALIGN 1 +#else + #define EIGEN_HAS_POSIX_MEMALIGN 0 +#endif + +#ifdef EIGEN_VECTORIZE_SSE + #define EIGEN_HAS_MM_MALLOC 1 +#else + #define EIGEN_HAS_MM_MALLOC 0 +#endif + +/** \internal like malloc, but the returned pointer is guaranteed to be 16-byte aligned. + * Fast, but wastes 16 additional bytes of memory. + * Does not throw any exception. + */ +inline void* ei_handmade_aligned_malloc(size_t size) +{ + void *original = malloc(size+16); + void *aligned = reinterpret_cast((reinterpret_cast(original) & ~(size_t(15))) + 16); + *(reinterpret_cast(aligned) - 1) = original; + return aligned; +} + +/** \internal frees memory allocated with ei_handmade_aligned_malloc */ +inline void ei_handmade_aligned_free(void *ptr) +{ + if(ptr) + free(*(reinterpret_cast(ptr) - 1)); +} + +/** \internal allocates \a size bytes. The returned pointer is guaranteed to have 16 bytes alignment. + * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown. + */ +inline void* ei_aligned_malloc(size_t size) +{ + #ifdef EIGEN_NO_MALLOC + ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); + #endif + + void *result; + #if !EIGEN_ALIGN + result = malloc(size); + #elif EIGEN_MALLOC_ALREADY_ALIGNED + result = malloc(size); + #elif EIGEN_HAS_POSIX_MEMALIGN + if(posix_memalign(&result, 16, size)) result = 0; + #elif EIGEN_HAS_MM_MALLOC + result = _mm_malloc(size, 16); + #elif (defined _MSC_VER) + result = _aligned_malloc(size, 16); + #else + result = ei_handmade_aligned_malloc(size); + #endif + + #ifdef EIGEN_EXCEPTIONS + if(result == 0) + throw std::bad_alloc(); + #endif + return result; +} + +/** allocates \a size bytes. If Align is true, then the returned ptr is 16-byte-aligned. + * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown. + */ +template inline void* ei_conditional_aligned_malloc(size_t size) +{ + return ei_aligned_malloc(size); +} + +template<> inline void* ei_conditional_aligned_malloc(size_t size) +{ + #ifdef EIGEN_NO_MALLOC + ei_assert(false && "heap allocation is forbidden (EIGEN_NO_MALLOC is defined)"); + #endif + + void *result = malloc(size); + #ifdef EIGEN_EXCEPTIONS + if(!result) throw std::bad_alloc(); + #endif + return result; +} + +/** allocates \a size objects of type T. The returned pointer is guaranteed to have 16 bytes alignment. + * On allocation error, the returned pointer is undefined, but if exceptions are enabled then a std::bad_alloc is thrown. + * The default constructor of T is called. + */ +template inline T* ei_aligned_new(size_t size) +{ + void *void_result = ei_aligned_malloc(sizeof(T)*size); + return ::new(void_result) T[size]; +} + +template inline T* ei_conditional_aligned_new(size_t size) +{ + void *void_result = ei_conditional_aligned_malloc(sizeof(T)*size); + return ::new(void_result) T[size]; +} + +/** \internal free memory allocated with ei_aligned_malloc + */ +inline void ei_aligned_free(void *ptr) +{ + #if !EIGEN_ALIGN + free(ptr); + #elif EIGEN_MALLOC_ALREADY_ALIGNED + free(ptr); + #elif EIGEN_HAS_POSIX_MEMALIGN + free(ptr); + #elif EIGEN_HAS_MM_MALLOC + _mm_free(ptr); + #elif defined(_MSC_VER) + _aligned_free(ptr); + #else + ei_handmade_aligned_free(ptr); + #endif +} + +/** \internal free memory allocated with ei_conditional_aligned_malloc + */ +template inline void ei_conditional_aligned_free(void *ptr) +{ + ei_aligned_free(ptr); +} + +template<> inline void ei_conditional_aligned_free(void *ptr) +{ + free(ptr); +} + +/** \internal delete the elements of an array. + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template inline void ei_delete_elements_of_array(T *ptr, size_t size) +{ + // always destruct an array starting from the end. + while(size) ptr[--size].~T(); +} + +/** \internal delete objects constructed with ei_aligned_new + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template inline void ei_aligned_delete(T *ptr, size_t size) +{ + ei_delete_elements_of_array(ptr, size); + ei_aligned_free(ptr); +} + +/** \internal delete objects constructed with ei_conditional_aligned_new + * The \a size parameters tells on how many objects to call the destructor of T. + */ +template inline void ei_conditional_aligned_delete(T *ptr, size_t size) +{ + ei_delete_elements_of_array(ptr, size); + ei_conditional_aligned_free(ptr); +} + +/** \internal \returns the number of elements which have to be skipped such that data are 16 bytes aligned */ +template +inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset) +{ + typedef typename ei_packet_traits::type Packet; + const int PacketSize = ei_packet_traits::size; + const int PacketAlignedMask = PacketSize-1; + const bool Vectorized = PacketSize>1; + return Vectorized + ? std::min( (PacketSize - (int((size_t(ptr)/sizeof(Scalar))) & PacketAlignedMask)) + & PacketAlignedMask, maxOffset) + : 0; +} + +/** \internal + * ei_aligned_stack_alloc(SIZE) allocates an aligned buffer of SIZE bytes + * on the stack if SIZE is smaller than EIGEN_STACK_ALLOCATION_LIMIT. + * Otherwise the memory is allocated on the heap. + * Data allocated with ei_aligned_stack_alloc \b must be freed by calling ei_aligned_stack_free(PTR,SIZE). + * \code + * float * data = ei_aligned_stack_alloc(float,array.size()); + * // ... + * ei_aligned_stack_free(data,float,array.size()); + * \endcode + */ +#ifdef __linux__ + #define ei_aligned_stack_alloc(SIZE) (SIZE<=EIGEN_STACK_ALLOCATION_LIMIT) \ + ? alloca(SIZE) \ + : ei_aligned_malloc(SIZE) + #define ei_aligned_stack_free(PTR,SIZE) if(SIZE>EIGEN_STACK_ALLOCATION_LIMIT) ei_aligned_free(PTR) +#else + #define ei_aligned_stack_alloc(SIZE) ei_aligned_malloc(SIZE) + #define ei_aligned_stack_free(PTR,SIZE) ei_aligned_free(PTR) +#endif + +#define ei_aligned_stack_new(TYPE,SIZE) ::new(ei_aligned_stack_alloc(sizeof(TYPE)*SIZE)) TYPE[SIZE] +#define ei_aligned_stack_delete(TYPE,PTR,SIZE) do {ei_delete_elements_of_array(PTR, SIZE); \ + ei_aligned_stack_free(PTR,sizeof(TYPE)*SIZE);} while(0) + + +#if EIGEN_ALIGN + #ifdef EIGEN_EXCEPTIONS + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + void* operator new(size_t size, const std::nothrow_t&) throw() { \ + try { return Eigen::ei_conditional_aligned_malloc(size); } \ + catch (...) { return 0; } \ + return 0; \ + } + #else + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + void* operator new(size_t size, const std::nothrow_t&) throw() { \ + return Eigen::ei_conditional_aligned_malloc(size); \ + } + #endif + + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \ + void *operator new(size_t size) { \ + return Eigen::ei_conditional_aligned_malloc(size); \ + } \ + void *operator new[](size_t size) { \ + return Eigen::ei_conditional_aligned_malloc(size); \ + } \ + void operator delete(void * ptr) throw() { Eigen::ei_conditional_aligned_free(ptr); } \ + void operator delete[](void * ptr) throw() { Eigen::ei_conditional_aligned_free(ptr); } \ + /* in-place new and delete. since (at least afaik) there is no actual */ \ + /* memory allocated we can safely let the default implementation handle */ \ + /* this particular case. */ \ + static void *operator new(size_t size, void *ptr) { return ::operator new(size,ptr); } \ + void operator delete(void * memory, void *ptr) throw() { return ::operator delete(memory,ptr); } \ + /* nothrow-new (returns zero instead of std::bad_alloc) */ \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_NOTHROW(NeedsToAlign) \ + void operator delete(void *ptr, const std::nothrow_t&) throw() { \ + Eigen::ei_conditional_aligned_free(ptr); \ + } \ + typedef void ei_operator_new_marker_type; +#else + #define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) +#endif + +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(true) +#define EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(Scalar,Size) \ + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(((Size)!=Eigen::Dynamic) && ((sizeof(Scalar)*(Size))%16==0)) + + +/** \class aligned_allocator +* +* \brief stl compatible allocator to use with with 16 byte aligned types +* +* Example: +* \code +* // Matrix4f requires 16 bytes alignment: +* std::map< int, Matrix4f, std::less, aligned_allocator > my_map_mat4; +* // Vector3f does not require 16 bytes alignment, no need to use Eigen's allocator: +* std::map< int, Vector3f > my_map_vec3; +* \endcode +* +*/ +template +class aligned_allocator +{ +public: + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template + struct rebind + { + typedef aligned_allocator other; + }; + + pointer address( reference value ) const + { + return &value; + } + + const_pointer address( const_reference value ) const + { + return &value; + } + + aligned_allocator() throw() + { + } + + aligned_allocator( const aligned_allocator& ) throw() + { + } + + template + aligned_allocator( const aligned_allocator& ) throw() + { + } + + ~aligned_allocator() throw() + { + } + + size_type max_size() const throw() + { + return std::numeric_limits::max(); + } + + pointer allocate( size_type num, const_pointer* hint = 0 ) + { + static_cast( hint ); // suppress unused variable warning + return static_cast( ei_aligned_malloc( num * sizeof(T) ) ); + } + + void construct( pointer p, const T& value ) + { + ::new( p ) T( value ); + } + + void destroy( pointer p ) + { + p->~T(); + } + + void deallocate( pointer p, size_type /*num*/ ) + { + ei_aligned_free( p ); + } + + bool operator!=(const aligned_allocator& other) const + { return false; } + + bool operator==(const aligned_allocator& other) const + { return true; } +}; + +#endif // EIGEN_MEMORY_H diff --git a/extern/Eigen2/Eigen/src/Core/util/Meta.h b/extern/Eigen2/Eigen/src/Core/util/Meta.h new file mode 100644 index 00000000000..c65c52ef42f --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/Meta.h @@ -0,0 +1,183 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_META_H +#define EIGEN_META_H + +/** \internal + * \file Meta.h + * This file contains generic metaprogramming classes which are not specifically related to Eigen. + * \note In case you wonder, yes we're aware that Boost already provides all these features, + * we however don't want to add a dependency to Boost. + */ + +struct ei_meta_true { enum { ret = 1 }; }; +struct ei_meta_false { enum { ret = 0 }; }; + +template +struct ei_meta_if { typedef Then ret; }; + +template +struct ei_meta_if { typedef Else ret; }; + +template struct ei_is_same_type { enum { ret = 0 }; }; +template struct ei_is_same_type { enum { ret = 1 }; }; + +template struct ei_unref { typedef T type; }; +template struct ei_unref { typedef T type; }; + +template struct ei_unpointer { typedef T type; }; +template struct ei_unpointer { typedef T type; }; +template struct ei_unpointer { typedef T type; }; + +template struct ei_unconst { typedef T type; }; +template struct ei_unconst { typedef T type; }; +template struct ei_unconst { typedef T & type; }; +template struct ei_unconst { typedef T * type; }; + +template struct ei_cleantype { typedef T type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; +template struct ei_cleantype { typedef typename ei_cleantype::type type; }; + +/** \internal + * Convenient struct to get the result type of a unary or binary functor. + * + * It supports both the current STL mechanism (using the result_type member) as well as + * upcoming next STL generation (using a templated result member). + * If none of these members is provided, then the type of the first argument is returned. FIXME, that behavior is a pretty bad hack. + */ +template struct ei_result_of {}; + +struct ei_has_none {int a[1];}; +struct ei_has_std_result_type {int a[2];}; +struct ei_has_tr1_result {int a[3];}; + +template +struct ei_unary_result_of_select {typedef ArgType type;}; + +template +struct ei_unary_result_of_select {typedef typename Func::result_type type;}; + +template +struct ei_unary_result_of_select {typedef typename Func::template result::type type;}; + +template +struct ei_result_of { + template + static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static ei_has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static ei_has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename ei_unary_result_of_select::type type; +}; + +template +struct ei_binary_result_of_select {typedef ArgType0 type;}; + +template +struct ei_binary_result_of_select +{typedef typename Func::result_type type;}; + +template +struct ei_binary_result_of_select +{typedef typename Func::template result::type type;}; + +template +struct ei_result_of { + template + static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0); + template + static ei_has_tr1_result testFunctor(T const *, typename T::template result::type const * = 0); + static ei_has_none testFunctor(...); + + // note that the following indirection is needed for gcc-3.3 + enum {FunctorType = sizeof(testFunctor(static_cast(0)))}; + typedef typename ei_binary_result_of_select::type type; +}; + +/** \internal In short, it computes int(sqrt(\a Y)) with \a Y an integer. + * Usage example: \code ei_meta_sqrt<1023>::ret \endcode + */ +template Y))) > + // use ?: instead of || just to shut up a stupid gcc 4.3 warning +class ei_meta_sqrt +{ + enum { + MidX = (InfX+SupX)/2, + TakeInf = MidX*MidX > Y ? 1 : 0, + NewInf = int(TakeInf) ? InfX : int(MidX), + NewSup = int(TakeInf) ? int(MidX) : SupX + }; + public: + enum { ret = ei_meta_sqrt::ret }; +}; + +template +class ei_meta_sqrt { public: enum { ret = (SupX*SupX <= Y) ? SupX : InfX }; }; + +/** \internal determines whether the product of two numeric types is allowed and what the return type is */ +template struct ei_scalar_product_traits +{ + // dummy general case where T and U aren't compatible -- not allowed anyway but we catch it elsewhere + //enum { Cost = NumTraits::MulCost }; + typedef T ReturnType; +}; + +template struct ei_scalar_product_traits +{ + //enum { Cost = NumTraits::MulCost }; + typedef T ReturnType; +}; + +template struct ei_scalar_product_traits > +{ + //enum { Cost = 2*NumTraits::MulCost }; + typedef std::complex ReturnType; +}; + +template struct ei_scalar_product_traits, T> +{ + //enum { Cost = 2*NumTraits::MulCost }; + typedef std::complex ReturnType; +}; + +// FIXME quick workaround around current limitation of ei_result_of +template +struct ei_result_of(ArgType0,ArgType1)> { +typedef typename ei_scalar_product_traits::type, typename ei_cleantype::type>::ReturnType type; +}; + + + +#endif // EIGEN_META_H diff --git a/extern/Eigen2/Eigen/src/Core/util/StaticAssert.h b/extern/Eigen2/Eigen/src/Core/util/StaticAssert.h new file mode 100644 index 00000000000..2c13098a20f --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/StaticAssert.h @@ -0,0 +1,148 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_STATIC_ASSERT_H +#define EIGEN_STATIC_ASSERT_H + +/* Some notes on Eigen's static assertion mechanism: + * + * - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean + * expression, and MSG an enum listed in struct ei_static_assert + * + * - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time) + * in that case, the static assertion is converted to the following runtime assert: + * ei_assert(CONDITION && "MSG") + * + * - currently EIGEN_STATIC_ASSERT can only be used in function scope + * + */ + +#ifndef EIGEN_NO_STATIC_ASSERT + + #ifdef __GXX_EXPERIMENTAL_CXX0X__ + + // if native static_assert is enabled, let's use it + #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG); + + #else // CXX0X + + template + struct ei_static_assert {}; + + template<> + struct ei_static_assert + { + enum { + YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX, + YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES, + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES, + THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE, + THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE, + YOU_MADE_A_PROGRAMMING_MISTAKE, + YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR, + UNALIGNED_LOAD_AND_STORE_OPERATIONS_UNIMPLEMENTED_ON_ALTIVEC, + NUMERIC_TYPE_MUST_BE_FLOATING_POINT, + COEFFICIENT_WRITE_ACCESS_TO_SELFADJOINT_NOT_SUPPORTED, + WRITING_TO_TRIANGULAR_PART_WITH_UNIT_DIAGONAL_IS_NOT_SUPPORTED, + THIS_METHOD_IS_ONLY_FOR_FIXED_SIZE, + INVALID_MATRIX_PRODUCT, + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS, + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION, + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY, + THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES, + THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES, + INVALID_MATRIX_TEMPLATE_PARAMETERS, + BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER, + THIS_METHOD_IS_ONLY_FOR_DIAGONAL_MATRIX + }; + }; + + // Specialized implementation for MSVC to avoid "conditional + // expression is constant" warnings. This implementation doesn't + // appear to work under GCC, hence the multiple implementations. + #ifdef _MSC_VER + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ + {Eigen::ei_static_assert::MSG;} + + #else + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \ + if (Eigen::ei_static_assert::MSG) {} + + #endif + + #endif // not CXX0X + +#else // EIGEN_NO_STATIC_ASSERT + + #define EIGEN_STATIC_ASSERT(CONDITION,MSG) ei_assert((CONDITION) && #MSG); + +#endif // EIGEN_NO_STATIC_ASSERT + + +// static assertion failing if the type \a TYPE is not a vector type +#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \ + YOU_TRIED_CALLING_A_VECTOR_METHOD_ON_A_MATRIX) + +// static assertion failing if the type \a TYPE is not fixed-size +#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \ + EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \ + YOU_CALLED_A_FIXED_SIZE_METHOD_ON_A_DYNAMIC_SIZE_MATRIX_OR_VECTOR) + +// static assertion failing if the type \a TYPE is not a vector type of the given size +#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \ + EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \ + THIS_METHOD_IS_ONLY_FOR_VECTORS_OF_A_SPECIFIC_SIZE) + +// static assertion failing if the type \a TYPE is not a vector type of the given size +#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \ + EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \ + THIS_METHOD_IS_ONLY_FOR_MATRICES_OF_A_SPECIFIC_SIZE) + +// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size) +#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \ + EIGEN_STATIC_ASSERT( \ + (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\ + YOU_MIXED_VECTORS_OF_DIFFERENT_SIZES) + +#define EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ + ((int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \ + && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \ + || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))) + +// static assertion failing if it is guaranteed at compile-time that the two matrix expression types have different sizes +#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \ + EIGEN_STATIC_ASSERT( \ + EIGEN_PREDICATE_SAME_MATRIX_SIZE(TYPE0,TYPE1),\ + YOU_MIXED_MATRICES_OF_DIFFERENT_SIZES) + +#endif // EIGEN_STATIC_ASSERT_H diff --git a/extern/Eigen2/Eigen/src/Core/util/XprHelper.h b/extern/Eigen2/Eigen/src/Core/util/XprHelper.h new file mode 100644 index 00000000000..12d6f9a3a3e --- /dev/null +++ b/extern/Eigen2/Eigen/src/Core/util/XprHelper.h @@ -0,0 +1,219 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_XPRHELPER_H +#define EIGEN_XPRHELPER_H + +// just a workaround because GCC seems to not really like empty structs +#ifdef __GNUG__ + struct ei_empty_struct{char _ei_dummy_;}; + #define EIGEN_EMPTY_STRUCT : Eigen::ei_empty_struct +#else + #define EIGEN_EMPTY_STRUCT +#endif + +//classes inheriting ei_no_assignment_operator don't generate a default operator=. +class ei_no_assignment_operator +{ + private: + ei_no_assignment_operator& operator=(const ei_no_assignment_operator&); +}; + +/** \internal If the template parameter Value is Dynamic, this class is just a wrapper around an int variable that + * can be accessed using value() and setValue(). + * Otherwise, this class is an empty structure and value() just returns the template parameter Value. + */ +template class ei_int_if_dynamic EIGEN_EMPTY_STRUCT +{ + public: + ei_int_if_dynamic() {} + explicit ei_int_if_dynamic(int) {} + static int value() { return Value; } + void setValue(int) {} +}; + +template<> class ei_int_if_dynamic +{ + int m_value; + ei_int_if_dynamic() {} + public: + explicit ei_int_if_dynamic(int value) : m_value(value) {} + int value() const { return m_value; } + void setValue(int value) { m_value = value; } +}; + +template struct ei_functor_traits +{ + enum + { + Cost = 10, + PacketAccess = false + }; +}; + +template struct ei_packet_traits +{ + typedef T type; + enum {size=1}; +}; + +template struct ei_unpacket_traits +{ + typedef T type; + enum {size=1}; +}; + +template +class ei_compute_matrix_flags +{ + enum { + row_major_bit = Options&RowMajor ? RowMajorBit : 0, + inner_max_size = row_major_bit ? MaxCols : MaxRows, + is_big = inner_max_size == Dynamic, + is_packet_size_multiple = (Cols*Rows) % ei_packet_traits::size == 0, + aligned_bit = ((Options&AutoAlign) && (is_big || is_packet_size_multiple)) ? AlignedBit : 0, + packet_access_bit = ei_packet_traits::size > 1 && aligned_bit ? PacketAccessBit : 0 + }; + + public: + enum { ret = LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit | aligned_bit }; +}; + +template struct ei_size_at_compile_time +{ + enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols }; +}; + +/* ei_eval : the return type of eval(). For matrices, this is just a const reference + * in order to avoid a useless copy + */ + +template::Flags&SparseBit> class ei_eval; + +template struct ei_eval +{ + typedef Matrix::Scalar, + ei_traits::RowsAtCompileTime, + ei_traits::ColsAtCompileTime, + AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), + ei_traits::MaxRowsAtCompileTime, + ei_traits::MaxColsAtCompileTime + > type; +}; + +// for matrices, no need to evaluate, just use a const reference to avoid a useless copy +template +struct ei_eval, IsDense> +{ + typedef const Matrix<_Scalar, _Rows, _Cols, _StorageOrder, _MaxRows, _MaxCols>& type; +}; + +/* ei_plain_matrix_type : the difference from ei_eval is that ei_plain_matrix_type is always a plain matrix type, + * whereas ei_eval is a const reference in the case of a matrix + */ +template struct ei_plain_matrix_type +{ + typedef Matrix::Scalar, + ei_traits::RowsAtCompileTime, + ei_traits::ColsAtCompileTime, + AutoAlign | (ei_traits::Flags&RowMajorBit ? RowMajor : ColMajor), + ei_traits::MaxRowsAtCompileTime, + ei_traits::MaxColsAtCompileTime + > type; +}; + +/* ei_plain_matrix_type_column_major : same as ei_plain_matrix_type but guaranteed to be column-major + */ +template struct ei_plain_matrix_type_column_major +{ + typedef Matrix::Scalar, + ei_traits::RowsAtCompileTime, + ei_traits::ColsAtCompileTime, + AutoAlign | ColMajor, + ei_traits::MaxRowsAtCompileTime, + ei_traits::MaxColsAtCompileTime + > type; +}; + +template struct ei_must_nest_by_value { enum { ret = false }; }; +template struct ei_must_nest_by_value > { enum { ret = true }; }; + +/** \internal Determines how a given expression should be nested into another one. + * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be + * nested into the bigger product expression. The choice is between nesting the expression b+c as-is, or + * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is + * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes + * many coefficient accesses in the nested expressions -- as is the case with matrix product for example. + * + * \param T the type of the expression being nested + * \param n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression. + * + * Example. Suppose that a, b, and c are of type Matrix3d. The user forms the expression a*(b+c). + * b+c is an expression "sum of matrices", which we will denote by S. In order to determine how to nest it, + * the Product expression uses: ei_nested::ret, which turns out to be Matrix3d because the internal logic of + * ei_nested determined that in this case it was better to evaluate the expression b+c into a temporary. On the other hand, + * since a is of type Matrix3d, the Product expression nests it as ei_nested::ret, which turns out to be + * const Matrix3d&, because the internal logic of ei_nested determined that since a was already a matrix, there was no point + * in copying it into another matrix. + */ +template::type> struct ei_nested +{ + enum { + CostEval = (n+1) * int(NumTraits::Scalar>::ReadCost), + CostNoEval = (n-1) * int(ei_traits::CoeffReadCost) + }; + typedef typename ei_meta_if< + ei_must_nest_by_value::ret, + T, + typename ei_meta_if< + (int(ei_traits::Flags) & EvalBeforeNestingBit) + || ( int(CostEval) <= int(CostNoEval) ), + PlainMatrixType, + const T& + >::ret + >::ret type; +}; + +template struct ei_are_flags_consistent +{ + enum { ret = !( (Flags&UnitDiagBit && Flags&ZeroDiagBit) ) + }; +}; + +/** \internal Gives the type of a sub-matrix or sub-vector of a matrix of type \a ExpressionType and size \a Size + * TODO: could be a good idea to define a big ReturnType struct ?? + */ +template struct BlockReturnType { + typedef Block::RowsAtCompileTime == 1 ? 1 : RowsOrSize), + (ei_traits::ColsAtCompileTime == 1 ? 1 : RowsOrSize)> SubVectorType; + typedef Block Type; +}; + +template struct ei_cast_return_type +{ + typedef typename ei_meta_if::ret,const CurrentType&,NewType>::ret type; +}; + +#endif // EIGEN_XPRHELPER_H diff --git a/extern/Eigen2/Eigen/src/Geometry/AlignedBox.h b/extern/Eigen2/Eigen/src/Geometry/AlignedBox.h new file mode 100644 index 00000000000..14ec9261e3a --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/AlignedBox.h @@ -0,0 +1,173 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ALIGNEDBOX_H +#define EIGEN_ALIGNEDBOX_H + +/** \geometry_module \ingroup Geometry_Module + * \nonstableyet + * + * \class AlignedBox + * + * \brief An axis aligned box + * + * \param _Scalar the type of the scalar coefficients + * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * + * This class represents an axis aligned box as a pair of the minimal and maximal corners. + */ +template +class AlignedBox +{ +public: +EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + /** Default constructor initializing a null box. */ + inline explicit AlignedBox() + { if (AmbientDimAtCompileTime!=Dynamic) setNull(); } + + /** Constructs a null box with \a _dim the dimension of the ambient space. */ + inline explicit AlignedBox(int _dim) : m_min(_dim), m_max(_dim) + { setNull(); } + + /** Constructs a box with extremities \a _min and \a _max. */ + inline AlignedBox(const VectorType& _min, const VectorType& _max) : m_min(_min), m_max(_max) {} + + /** Constructs a box containing a single point \a p. */ + inline explicit AlignedBox(const VectorType& p) : m_min(p), m_max(p) {} + + ~AlignedBox() {} + + /** \returns the dimension in which the box holds */ + inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_min.size()-1 : AmbientDimAtCompileTime; } + + /** \returns true if the box is null, i.e, empty. */ + inline bool isNull() const { return (m_min.cwise() > m_max).any(); } + + /** Makes \c *this a null/empty box. */ + inline void setNull() + { + m_min.setConstant( std::numeric_limits::max()); + m_max.setConstant(-std::numeric_limits::max()); + } + + /** \returns the minimal corner */ + inline const VectorType& min() const { return m_min; } + /** \returns a non const reference to the minimal corner */ + inline VectorType& min() { return m_min; } + /** \returns the maximal corner */ + inline const VectorType& max() const { return m_max; } + /** \returns a non const reference to the maximal corner */ + inline VectorType& max() { return m_max; } + + /** \returns true if the point \a p is inside the box \c *this. */ + inline bool contains(const VectorType& p) const + { return (m_min.cwise()<=p).all() && (p.cwise()<=m_max).all(); } + + /** \returns true if the box \a b is entirely inside the box \c *this. */ + inline bool contains(const AlignedBox& b) const + { return (m_min.cwise()<=b.min()).all() && (b.max().cwise()<=m_max).all(); } + + /** Extends \c *this such that it contains the point \a p and returns a reference to \c *this. */ + inline AlignedBox& extend(const VectorType& p) + { m_min = m_min.cwise().min(p); m_max = m_max.cwise().max(p); return *this; } + + /** Extends \c *this such that it contains the box \a b and returns a reference to \c *this. */ + inline AlignedBox& extend(const AlignedBox& b) + { m_min = m_min.cwise().min(b.m_min); m_max = m_max.cwise().max(b.m_max); return *this; } + + /** Clamps \c *this by the box \a b and returns a reference to \c *this. */ + inline AlignedBox& clamp(const AlignedBox& b) + { m_min = m_min.cwise().max(b.m_min); m_max = m_max.cwise().min(b.m_max); return *this; } + + /** Translate \c *this by the vector \a t and returns a reference to \c *this. */ + inline AlignedBox& translate(const VectorType& t) + { m_min += t; m_max += t; return *this; } + + /** \returns the squared distance between the point \a p and the box \c *this, + * and zero if \a p is inside the box. + * \sa exteriorDistance() + */ + inline Scalar squaredExteriorDistance(const VectorType& p) const; + + /** \returns the distance between the point \a p and the box \c *this, + * and zero if \a p is inside the box. + * \sa squaredExteriorDistance() + */ + inline Scalar exteriorDistance(const VectorType& p) const + { return ei_sqrt(squaredExteriorDistance(p)); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { + return typename ei_cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + inline explicit AlignedBox(const AlignedBox& other) + { + m_min = other.min().template cast(); + m_max = other.max().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const AlignedBox& other, typename NumTraits::Real prec = precision()) const + { return m_min.isApprox(other.m_min, prec) && m_max.isApprox(other.m_max, prec); } + +protected: + + VectorType m_min, m_max; +}; + +template +inline Scalar AlignedBox::squaredExteriorDistance(const VectorType& p) const +{ + Scalar dist2 = 0.; + Scalar aux; + for (int k=0; k +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ANGLEAXIS_H +#define EIGEN_ANGLEAXIS_H + +/** \geometry_module \ingroup Geometry_Module + * + * \class AngleAxis + * + * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * + * The following two typedefs are provided for convenience: + * \li \c AngleAxisf for \c float + * \li \c AngleAxisd for \c double + * + * \addexample AngleAxisForEuler \label How to define a rotation from Euler-angles + * + * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily + * mimic Euler-angles. Here is an example: + * \include AngleAxis_mimic_euler.cpp + * Output: \verbinclude AngleAxis_mimic_euler.out + * + * \note This class is not aimed to be used to store a rotation transformation, + * but rather to make easier the creation of other rotation (Quaternion, rotation Matrix) + * and transformation objects. + * + * \sa class Quaternion, class Transform, MatrixBase::UnitX() + */ + +template struct ei_traits > +{ + typedef _Scalar Scalar; +}; + +template +class AngleAxis : public RotationBase,3> +{ + typedef RotationBase,3> Base; + +public: + + using Base::operator*; + + enum { Dim = 3 }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Matrix Matrix3; + typedef Matrix Vector3; + typedef Quaternion QuaternionType; + +protected: + + Vector3 m_axis; + Scalar m_angle; + +public: + + /** Default constructor without initialization. */ + AngleAxis() {} + /** Constructs and initialize the angle-axis rotation from an \a angle in radian + * and an \a axis which must be normalized. */ + template + inline AngleAxis(Scalar angle, const MatrixBase& axis) : m_axis(axis), m_angle(angle) {} + /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */ + inline AngleAxis(const QuaternionType& q) { *this = q; } + /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */ + template + inline explicit AngleAxis(const MatrixBase& m) { *this = m; } + + Scalar angle() const { return m_angle; } + Scalar& angle() { return m_angle; } + + const Vector3& axis() const { return m_axis; } + Vector3& axis() { return m_axis; } + + /** Concatenates two rotations */ + inline QuaternionType operator* (const AngleAxis& other) const + { return QuaternionType(*this) * QuaternionType(other); } + + /** Concatenates two rotations */ + inline QuaternionType operator* (const QuaternionType& other) const + { return QuaternionType(*this) * other; } + + /** Concatenates two rotations */ + friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b) + { return a * QuaternionType(b); } + + /** Concatenates two rotations */ + inline Matrix3 operator* (const Matrix3& other) const + { return toRotationMatrix() * other; } + + /** Concatenates two rotations */ + inline friend Matrix3 operator* (const Matrix3& a, const AngleAxis& b) + { return a * b.toRotationMatrix(); } + + /** Applies rotation to vector */ + inline Vector3 operator* (const Vector3& other) const + { return toRotationMatrix() * other; } + + /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */ + AngleAxis inverse() const + { return AngleAxis(-m_angle, m_axis); } + + AngleAxis& operator=(const QuaternionType& q); + template + AngleAxis& operator=(const MatrixBase& m); + + template + AngleAxis& fromRotationMatrix(const MatrixBase& m); + Matrix3 toRotationMatrix(void) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { return typename ei_cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit AngleAxis(const AngleAxis& other) + { + m_axis = other.axis().template cast(); + m_angle = Scalar(other.angle()); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const AngleAxis& other, typename NumTraits::Real prec = precision()) const + { return m_axis.isApprox(other.m_axis, prec) && ei_isApprox(m_angle,other.m_angle, prec); } +}; + +/** \ingroup Geometry_Module + * single precision angle-axis type */ +typedef AngleAxis AngleAxisf; +/** \ingroup Geometry_Module + * double precision angle-axis type */ +typedef AngleAxis AngleAxisd; + +/** Set \c *this from a quaternion. + * The axis is normalized. + */ +template +AngleAxis& AngleAxis::operator=(const QuaternionType& q) +{ + Scalar n2 = q.vec().squaredNorm(); + if (n2 < precision()*precision()) + { + m_angle = 0; + m_axis << 1, 0, 0; + } + else + { + m_angle = 2*std::acos(q.w()); + m_axis = q.vec() / ei_sqrt(n2); + } + return *this; +} + +/** Set \c *this from a 3x3 rotation matrix \a mat. + */ +template +template +AngleAxis& AngleAxis::operator=(const MatrixBase& mat) +{ + // Since a direct conversion would not be really faster, + // let's use the robust Quaternion implementation: + return *this = QuaternionType(mat); +} + +/** Constructs and \returns an equivalent 3x3 rotation matrix. + */ +template +typename AngleAxis::Matrix3 +AngleAxis::toRotationMatrix(void) const +{ + Matrix3 res; + Vector3 sin_axis = ei_sin(m_angle) * m_axis; + Scalar c = ei_cos(m_angle); + Vector3 cos1_axis = (Scalar(1)-c) * m_axis; + + Scalar tmp; + tmp = cos1_axis.x() * m_axis.y(); + res.coeffRef(0,1) = tmp - sin_axis.z(); + res.coeffRef(1,0) = tmp + sin_axis.z(); + + tmp = cos1_axis.x() * m_axis.z(); + res.coeffRef(0,2) = tmp + sin_axis.y(); + res.coeffRef(2,0) = tmp - sin_axis.y(); + + tmp = cos1_axis.y() * m_axis.z(); + res.coeffRef(1,2) = tmp - sin_axis.x(); + res.coeffRef(2,1) = tmp + sin_axis.x(); + + res.diagonal() = (cos1_axis.cwise() * m_axis).cwise() + c; + + return res; +} + +#endif // EIGEN_ANGLEAXIS_H diff --git a/extern/Eigen2/Eigen/src/Geometry/EulerAngles.h b/extern/Eigen2/Eigen/src/Geometry/EulerAngles.h new file mode 100644 index 00000000000..204118ac94d --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/EulerAngles.h @@ -0,0 +1,96 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_EULERANGLES_H +#define EIGEN_EULERANGLES_H + +/** \geometry_module \ingroup Geometry_Module + * \nonstableyet + * + * \returns the Euler-angles of the rotation matrix \c *this using the convention defined by the triplet (\a a0,\a a1,\a a2) + * + * Each of the three parameters \a a0,\a a1,\a a2 represents the respective rotation axis as an integer in {0,1,2}. + * For instance, in: + * \code Vector3f ea = mat.eulerAngles(2, 0, 2); \endcode + * "2" represents the z axis and "0" the x axis, etc. The returned angles are such that + * we have the following equality: + * \code + * mat == AngleAxisf(ea[0], Vector3f::UnitZ()) + * * AngleAxisf(ea[1], Vector3f::UnitX()) + * * AngleAxisf(ea[2], Vector3f::UnitZ()); \endcode + * This corresponds to the right-multiply conventions (with right hand side frames). + */ +template +inline Matrix::Scalar,3,1> +MatrixBase::eulerAngles(int a0, int a1, int a2) const +{ + /* Implemented from Graphics Gems IV */ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Derived,3,3) + + Matrix res; + typedef Matrix Vector2; + const Scalar epsilon = precision(); + + const int odd = ((a0+1)%3 == a1) ? 0 : 1; + const int i = a0; + const int j = (a0 + 1 + odd)%3; + const int k = (a0 + 2 - odd)%3; + + if (a0==a2) + { + Scalar s = Vector2(coeff(j,i) , coeff(k,i)).norm(); + res[1] = ei_atan2(s, coeff(i,i)); + if (s > epsilon) + { + res[0] = ei_atan2(coeff(j,i), coeff(k,i)); + res[2] = ei_atan2(coeff(i,j),-coeff(i,k)); + } + else + { + res[0] = Scalar(0); + res[2] = (coeff(i,i)>0?1:-1)*ei_atan2(-coeff(k,j), coeff(j,j)); + } + } + else + { + Scalar c = Vector2(coeff(i,i) , coeff(i,j)).norm(); + res[1] = ei_atan2(-coeff(i,k), c); + if (c > epsilon) + { + res[0] = ei_atan2(coeff(j,k), coeff(k,k)); + res[2] = ei_atan2(coeff(i,j), coeff(i,i)); + } + else + { + res[0] = Scalar(0); + res[2] = (coeff(i,k)>0?1:-1)*ei_atan2(-coeff(k,j), coeff(j,j)); + } + } + if (!odd) + res = -res; + return res; +} + + +#endif // EIGEN_EULERANGLES_H diff --git a/extern/Eigen2/Eigen/src/Geometry/Hyperplane.h b/extern/Eigen2/Eigen/src/Geometry/Hyperplane.h new file mode 100644 index 00000000000..22c530d4be0 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/Hyperplane.h @@ -0,0 +1,268 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_HYPERPLANE_H +#define EIGEN_HYPERPLANE_H + +/** \geometry_module \ingroup Geometry_Module + * + * \class Hyperplane + * + * \brief A hyperplane + * + * A hyperplane is an affine subspace of dimension n-1 in a space of dimension n. + * For example, a hyperplane in a plane is a line; a hyperplane in 3-space is a plane. + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + * Notice that the dimension of the hyperplane is _AmbientDim-1. + * + * This class represents an hyperplane as the zero set of the implicit equation + * \f$ n \cdot x + d = 0 \f$ where \f$ n \f$ is a unit normal vector of the plane (linear part) + * and \f$ d \f$ is the distance (offset) to the origin. + */ +template +class Hyperplane +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim==Dynamic ? Dynamic : _AmbientDim+1) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + typedef Matrix Coefficients; + typedef Block NormalReturnType; + + /** Default constructor without initialization */ + inline explicit Hyperplane() {} + + /** Constructs a dynamic-size hyperplane with \a _dim the dimension + * of the ambient space */ + inline explicit Hyperplane(int _dim) : m_coeffs(_dim+1) {} + + /** Construct a plane from its normal \a n and a point \a e onto the plane. + * \warning the vector normal is assumed to be normalized. + */ + inline Hyperplane(const VectorType& n, const VectorType& e) + : m_coeffs(n.size()+1) + { + normal() = n; + offset() = -e.dot(n); + } + + /** Constructs a plane from its normal \a n and distance to the origin \a d + * such that the algebraic equation of the plane is \f$ n \cdot x + d = 0 \f$. + * \warning the vector normal is assumed to be normalized. + */ + inline Hyperplane(const VectorType& n, Scalar d) + : m_coeffs(n.size()+1) + { + normal() = n; + offset() = d; + } + + /** Constructs a hyperplane passing through the two points. If the dimension of the ambient space + * is greater than 2, then there isn't uniqueness, so an arbitrary choice is made. + */ + static inline Hyperplane Through(const VectorType& p0, const VectorType& p1) + { + Hyperplane result(p0.size()); + result.normal() = (p1 - p0).unitOrthogonal(); + result.offset() = -result.normal().dot(p0); + return result; + } + + /** Constructs a hyperplane passing through the three points. The dimension of the ambient space + * is required to be exactly 3. + */ + static inline Hyperplane Through(const VectorType& p0, const VectorType& p1, const VectorType& p2) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 3) + Hyperplane result(p0.size()); + result.normal() = (p2 - p0).cross(p1 - p0).normalized(); + result.offset() = -result.normal().dot(p0); + return result; + } + + /** Constructs a hyperplane passing through the parametrized line \a parametrized. + * If the dimension of the ambient space is greater than 2, then there isn't uniqueness, + * so an arbitrary choice is made. + */ + // FIXME to be consitent with the rest this could be implemented as a static Through function ?? + explicit Hyperplane(const ParametrizedLine& parametrized) + { + normal() = parametrized.direction().unitOrthogonal(); + offset() = -normal().dot(parametrized.origin()); + } + + ~Hyperplane() {} + + /** \returns the dimension in which the plane holds */ + inline int dim() const { return AmbientDimAtCompileTime==Dynamic ? m_coeffs.size()-1 : AmbientDimAtCompileTime; } + + /** normalizes \c *this */ + void normalize(void) + { + m_coeffs /= normal().norm(); + } + + /** \returns the signed distance between the plane \c *this and a point \a p. + * \sa absDistance() + */ + inline Scalar signedDistance(const VectorType& p) const { return p.dot(normal()) + offset(); } + + /** \returns the absolute distance between the plane \c *this and a point \a p. + * \sa signedDistance() + */ + inline Scalar absDistance(const VectorType& p) const { return ei_abs(signedDistance(p)); } + + /** \returns the projection of a point \a p onto the plane \c *this. + */ + inline VectorType projection(const VectorType& p) const { return p - signedDistance(p) * normal(); } + + /** \returns a constant reference to the unit normal vector of the plane, which corresponds + * to the linear part of the implicit equation. + */ + inline const NormalReturnType normal() const { return NormalReturnType(m_coeffs,0,0,dim(),1); } + + /** \returns a non-constant reference to the unit normal vector of the plane, which corresponds + * to the linear part of the implicit equation. + */ + inline NormalReturnType normal() { return NormalReturnType(m_coeffs,0,0,dim(),1); } + + /** \returns the distance to the origin, which is also the "constant term" of the implicit equation + * \warning the vector normal is assumed to be normalized. + */ + inline const Scalar& offset() const { return m_coeffs.coeff(dim()); } + + /** \returns a non-constant reference to the distance to the origin, which is also the constant part + * of the implicit equation */ + inline Scalar& offset() { return m_coeffs(dim()); } + + /** \returns a constant reference to the coefficients c_i of the plane equation: + * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ + */ + inline const Coefficients& coeffs() const { return m_coeffs; } + + /** \returns a non-constant reference to the coefficients c_i of the plane equation: + * \f$ c_0*x_0 + ... + c_{d-1}*x_{d-1} + c_d = 0 \f$ + */ + inline Coefficients& coeffs() { return m_coeffs; } + + /** \returns the intersection of *this with \a other. + * + * \warning The ambient space must be a plane, i.e. have dimension 2, so that \c *this and \a other are lines. + * + * \note If \a other is approximately parallel to *this, this method will return any point on *this. + */ + VectorType intersection(const Hyperplane& other) + { + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) + Scalar det = coeffs().coeff(0) * other.coeffs().coeff(1) - coeffs().coeff(1) * other.coeffs().coeff(0); + // since the line equations ax+by=c are normalized with a^2+b^2=1, the following tests + // whether the two lines are approximately parallel. + if(ei_isMuchSmallerThan(det, Scalar(1))) + { // special case where the two lines are approximately parallel. Pick any point on the first line. + if(ei_abs(coeffs().coeff(1))>ei_abs(coeffs().coeff(0))) + return VectorType(coeffs().coeff(1), -coeffs().coeff(2)/coeffs().coeff(1)-coeffs().coeff(0)); + else + return VectorType(-coeffs().coeff(2)/coeffs().coeff(0)-coeffs().coeff(1), coeffs().coeff(0)); + } + else + { // general case + Scalar invdet = Scalar(1) / det; + return VectorType(invdet*(coeffs().coeff(1)*other.coeffs().coeff(2)-other.coeffs().coeff(1)*coeffs().coeff(2)), + invdet*(other.coeffs().coeff(0)*coeffs().coeff(2)-coeffs().coeff(0)*other.coeffs().coeff(2))); + } + } + + /** Applies the transformation matrix \a mat to \c *this and returns a reference to \c *this. + * + * \param mat the Dim x Dim transformation matrix + * \param traits specifies whether the matrix \a mat represents an Isometry + * or a more generic Affine transformation. The default is Affine. + */ + template + inline Hyperplane& transform(const MatrixBase& mat, TransformTraits traits = Affine) + { + if (traits==Affine) + normal() = mat.inverse().transpose() * normal(); + else if (traits==Isometry) + normal() = mat * normal(); + else + { + ei_assert("invalid traits value in Hyperplane::transform()"); + } + return *this; + } + + /** Applies the transformation \a t to \c *this and returns a reference to \c *this. + * + * \param t the transformation of dimension Dim + * \param traits specifies whether the transformation \a t represents an Isometry + * or a more generic Affine transformation. The default is Affine. + * Other kind of transformations are not supported. + */ + inline Hyperplane& transform(const Transform& t, + TransformTraits traits = Affine) + { + transform(t.linear(), traits); + offset() -= t.translation().dot(normal()); + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { + return typename ei_cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Hyperplane(const Hyperplane& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Hyperplane& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +protected: + + Coefficients m_coeffs; +}; + +#endif // EIGEN_HYPERPLANE_H diff --git a/extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h b/extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h new file mode 100644 index 00000000000..047152d0b99 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/OrthoMethods.h @@ -0,0 +1,119 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ORTHOMETHODS_H +#define EIGEN_ORTHOMETHODS_H + +/** \geometry_module + * + * \returns the cross product of \c *this and \a other + * + * Here is a very good explanation of cross-product: http://xkcd.com/199/ + */ +template +template +inline typename MatrixBase::PlainMatrixType +MatrixBase::cross(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3) + + // Note that there is no need for an expression here since the compiler + // optimize such a small temporary very well (even within a complex expression) + const typename ei_nested::type lhs(derived()); + const typename ei_nested::type rhs(other.derived()); + return typename ei_plain_matrix_type::type( + lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1), + lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2), + lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0) + ); +} + +template +struct ei_unitOrthogonal_selector +{ + typedef typename ei_plain_matrix_type::type VectorType; + typedef typename ei_traits::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + inline static VectorType run(const Derived& src) + { + VectorType perp(src.size()); + /* Let us compute the crossed product of *this with a vector + * that is not too close to being colinear to *this. + */ + + /* unless the x and y coords are both close to zero, we can + * simply take ( -y, x, 0 ) and normalize it. + */ + if((!ei_isMuchSmallerThan(src.x(), src.z())) + || (!ei_isMuchSmallerThan(src.y(), src.z()))) + { + RealScalar invnm = RealScalar(1)/src.template start<2>().norm(); + perp.coeffRef(0) = -ei_conj(src.y())*invnm; + perp.coeffRef(1) = ei_conj(src.x())*invnm; + perp.coeffRef(2) = 0; + } + /* if both x and y are close to zero, then the vector is close + * to the z-axis, so it's far from colinear to the x-axis for instance. + * So we take the crossed product with (1,0,0) and normalize it. + */ + else + { + RealScalar invnm = RealScalar(1)/src.template end<2>().norm(); + perp.coeffRef(0) = 0; + perp.coeffRef(1) = -ei_conj(src.z())*invnm; + perp.coeffRef(2) = ei_conj(src.y())*invnm; + } + if( (Derived::SizeAtCompileTime!=Dynamic && Derived::SizeAtCompileTime>3) + || (Derived::SizeAtCompileTime==Dynamic && src.size()>3) ) + perp.end(src.size()-3).setZero(); + + return perp; + } +}; + +template +struct ei_unitOrthogonal_selector +{ + typedef typename ei_plain_matrix_type::type VectorType; + inline static VectorType run(const Derived& src) + { return VectorType(-ei_conj(src.y()), ei_conj(src.x())).normalized(); } +}; + +/** \returns a unit vector which is orthogonal to \c *this + * + * The size of \c *this must be at least 2. If the size is exactly 2, + * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).normalized(). + * + * \sa cross() + */ +template +typename MatrixBase::PlainMatrixType +MatrixBase::unitOrthogonal() const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + return ei_unitOrthogonal_selector::run(derived()); +} + +#endif // EIGEN_ORTHOMETHODS_H diff --git a/extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h b/extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h new file mode 100644 index 00000000000..2b990d084f0 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/ParametrizedLine.h @@ -0,0 +1,155 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_PARAMETRIZEDLINE_H +#define EIGEN_PARAMETRIZEDLINE_H + +/** \geometry_module \ingroup Geometry_Module + * + * \class ParametrizedLine + * + * \brief A parametrized line + * + * A parametrized line is defined by an origin point \f$ \mathbf{o} \f$ and a unit + * direction vector \f$ \mathbf{d} \f$ such that the line corresponds to + * the set \f$ l(t) = \mathbf{o} + t \mathbf{d} \f$, \f$ l \in \mathbf{R} \f$. + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * \param _AmbientDim the dimension of the ambient space, can be a compile time value or Dynamic. + */ +template +class ParametrizedLine +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_AmbientDim) + enum { AmbientDimAtCompileTime = _AmbientDim }; + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix VectorType; + + /** Default constructor without initialization */ + inline explicit ParametrizedLine() {} + + /** Constructs a dynamic-size line with \a _dim the dimension + * of the ambient space */ + inline explicit ParametrizedLine(int _dim) : m_origin(_dim), m_direction(_dim) {} + + /** Initializes a parametrized line of direction \a direction and origin \a origin. + * \warning the vector direction is assumed to be normalized. + */ + ParametrizedLine(const VectorType& origin, const VectorType& direction) + : m_origin(origin), m_direction(direction) {} + + explicit ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane); + + /** Constructs a parametrized line going from \a p0 to \a p1. */ + static inline ParametrizedLine Through(const VectorType& p0, const VectorType& p1) + { return ParametrizedLine(p0, (p1-p0).normalized()); } + + ~ParametrizedLine() {} + + /** \returns the dimension in which the line holds */ + inline int dim() const { return m_direction.size(); } + + const VectorType& origin() const { return m_origin; } + VectorType& origin() { return m_origin; } + + const VectorType& direction() const { return m_direction; } + VectorType& direction() { return m_direction; } + + /** \returns the squared distance of a point \a p to its projection onto the line \c *this. + * \sa distance() + */ + RealScalar squaredDistance(const VectorType& p) const + { + VectorType diff = p-origin(); + return (diff - diff.dot(direction())* direction()).squaredNorm(); + } + /** \returns the distance of a point \a p to its projection onto the line \c *this. + * \sa squaredDistance() + */ + RealScalar distance(const VectorType& p) const { return ei_sqrt(squaredDistance(p)); } + + /** \returns the projection of a point \a p onto the line \c *this. */ + VectorType projection(const VectorType& p) const + { return origin() + (p-origin()).dot(direction()) * direction(); } + + Scalar intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane); + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { + return typename ei_cast_return_type >::type(*this); + } + + /** Copy constructor with scalar type conversion */ + template + inline explicit ParametrizedLine(const ParametrizedLine& other) + { + m_origin = other.origin().template cast(); + m_direction = other.direction().template cast(); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const ParametrizedLine& other, typename NumTraits::Real prec = precision()) const + { return m_origin.isApprox(other.m_origin, prec) && m_direction.isApprox(other.m_direction, prec); } + +protected: + + VectorType m_origin, m_direction; +}; + +/** Constructs a parametrized line from a 2D hyperplane + * + * \warning the ambient space must have dimension 2 such that the hyperplane actually describes a line + */ +template +inline ParametrizedLine<_Scalar, _AmbientDim>::ParametrizedLine(const Hyperplane<_Scalar, _AmbientDim>& hyperplane) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(VectorType, 2) + direction() = hyperplane.normal().unitOrthogonal(); + origin() = -hyperplane.normal()*hyperplane.offset(); +} + +/** \returns the parameter value of the intersection between \c *this and the given hyperplane + */ +template +inline _Scalar ParametrizedLine<_Scalar, _AmbientDim>::intersection(const Hyperplane<_Scalar, _AmbientDim>& hyperplane) +{ + return -(hyperplane.offset()+origin().dot(hyperplane.normal())) + /(direction().dot(hyperplane.normal())); +} + +#endif // EIGEN_PARAMETRIZEDLINE_H diff --git a/extern/Eigen2/Eigen/src/Geometry/Quaternion.h b/extern/Eigen2/Eigen/src/Geometry/Quaternion.h new file mode 100644 index 00000000000..3fcbff4e71d --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/Quaternion.h @@ -0,0 +1,521 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_QUATERNION_H +#define EIGEN_QUATERNION_H + +template +struct ei_quaternion_assign_impl; + +/** \geometry_module \ingroup Geometry_Module + * + * \class Quaternion + * + * \brief The quaternion class used to represent 3D orientations and rotations + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * + * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of + * orientations and rotations of objects in three dimensions. Compared to other representations + * like Euler angles or 3x3 matrices, quatertions offer the following advantages: + * \li \b compact storage (4 scalars) + * \li \b efficient to compose (28 flops), + * \li \b stable spherical interpolation + * + * The following two typedefs are provided for convenience: + * \li \c Quaternionf for \c float + * \li \c Quaterniond for \c double + * + * \sa class AngleAxis, class Transform + */ + +template struct ei_traits > +{ + typedef _Scalar Scalar; +}; + +template +class Quaternion : public RotationBase,3> +{ + typedef RotationBase,3> Base; + +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,4) + + using Base::operator*; + + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + + /** the type of the Coefficients 4-vector */ + typedef Matrix Coefficients; + /** the type of a 3D vector */ + typedef Matrix Vector3; + /** the equivalent rotation matrix type */ + typedef Matrix Matrix3; + /** the equivalent angle-axis type */ + typedef AngleAxis AngleAxisType; + + /** \returns the \c x coefficient */ + inline Scalar x() const { return m_coeffs.coeff(0); } + /** \returns the \c y coefficient */ + inline Scalar y() const { return m_coeffs.coeff(1); } + /** \returns the \c z coefficient */ + inline Scalar z() const { return m_coeffs.coeff(2); } + /** \returns the \c w coefficient */ + inline Scalar w() const { return m_coeffs.coeff(3); } + + /** \returns a reference to the \c x coefficient */ + inline Scalar& x() { return m_coeffs.coeffRef(0); } + /** \returns a reference to the \c y coefficient */ + inline Scalar& y() { return m_coeffs.coeffRef(1); } + /** \returns a reference to the \c z coefficient */ + inline Scalar& z() { return m_coeffs.coeffRef(2); } + /** \returns a reference to the \c w coefficient */ + inline Scalar& w() { return m_coeffs.coeffRef(3); } + + /** \returns a read-only vector expression of the imaginary part (x,y,z) */ + inline const Block vec() const { return m_coeffs.template start<3>(); } + + /** \returns a vector expression of the imaginary part (x,y,z) */ + inline Block vec() { return m_coeffs.template start<3>(); } + + /** \returns a read-only vector expression of the coefficients (x,y,z,w) */ + inline const Coefficients& coeffs() const { return m_coeffs; } + + /** \returns a vector expression of the coefficients (x,y,z,w) */ + inline Coefficients& coeffs() { return m_coeffs; } + + /** Default constructor leaving the quaternion uninitialized. */ + inline Quaternion() {} + + /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from + * its four coefficients \a w, \a x, \a y and \a z. + * + * \warning Note the order of the arguments: the real \a w coefficient first, + * while internally the coefficients are stored in the following order: + * [\c x, \c y, \c z, \c w] + */ + inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z) + { m_coeffs << x, y, z, w; } + + /** Copy constructor */ + inline Quaternion(const Quaternion& other) { m_coeffs = other.m_coeffs; } + + /** Constructs and initializes a quaternion from the angle-axis \a aa */ + explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; } + + /** Constructs and initializes a quaternion from either: + * - a rotation matrix expression, + * - a 4D vector expression representing quaternion coefficients. + * \sa operator=(MatrixBase) + */ + template + explicit inline Quaternion(const MatrixBase& other) { *this = other; } + + Quaternion& operator=(const Quaternion& other); + Quaternion& operator=(const AngleAxisType& aa); + template + Quaternion& operator=(const MatrixBase& m); + + /** \returns a quaternion representing an identity rotation + * \sa MatrixBase::Identity() + */ + inline static Quaternion Identity() { return Quaternion(1, 0, 0, 0); } + + /** \sa Quaternion::Identity(), MatrixBase::setIdentity() + */ + inline Quaternion& setIdentity() { m_coeffs << 0, 0, 0, 1; return *this; } + + /** \returns the squared norm of the quaternion's coefficients + * \sa Quaternion::norm(), MatrixBase::squaredNorm() + */ + inline Scalar squaredNorm() const { return m_coeffs.squaredNorm(); } + + /** \returns the norm of the quaternion's coefficients + * \sa Quaternion::squaredNorm(), MatrixBase::norm() + */ + inline Scalar norm() const { return m_coeffs.norm(); } + + /** Normalizes the quaternion \c *this + * \sa normalized(), MatrixBase::normalize() */ + inline void normalize() { m_coeffs.normalize(); } + /** \returns a normalized version of \c *this + * \sa normalize(), MatrixBase::normalized() */ + inline Quaternion normalized() const { return Quaternion(m_coeffs.normalized()); } + + /** \returns the dot product of \c *this and \a other + * Geometrically speaking, the dot product of two unit quaternions + * corresponds to the cosine of half the angle between the two rotations. + * \sa angularDistance() + */ + inline Scalar dot(const Quaternion& other) const { return m_coeffs.dot(other.m_coeffs); } + + inline Scalar angularDistance(const Quaternion& other) const; + + Matrix3 toRotationMatrix(void) const; + + template + Quaternion& setFromTwoVectors(const MatrixBase& a, const MatrixBase& b); + + inline Quaternion operator* (const Quaternion& q) const; + inline Quaternion& operator*= (const Quaternion& q); + + Quaternion inverse(void) const; + Quaternion conjugate(void) const; + + Quaternion slerp(Scalar t, const Quaternion& other) const; + + template + Vector3 operator* (const MatrixBase& vec) const; + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { return typename ei_cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Quaternion(const Quaternion& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Quaternion& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +protected: + Coefficients m_coeffs; +}; + +/** \ingroup Geometry_Module + * single precision quaternion type */ +typedef Quaternion Quaternionf; +/** \ingroup Geometry_Module + * double precision quaternion type */ +typedef Quaternion Quaterniond; + +// Generic Quaternion * Quaternion product +template inline Quaternion +ei_quaternion_product(const Quaternion& a, const Quaternion& b) +{ + return Quaternion + ( + a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(), + a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(), + a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(), + a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x() + ); +} + +#ifdef EIGEN_VECTORIZE_SSE +template<> inline Quaternion +ei_quaternion_product(const Quaternion& _a, const Quaternion& _b) +{ + const __m128 mask = _mm_castsi128_ps(_mm_setr_epi32(0,0,0,0x80000000)); + Quaternion res; + __m128 a = _a.coeffs().packet(0); + __m128 b = _b.coeffs().packet(0); + __m128 flip1 = _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a,1,2,0,2), + ei_vec4f_swizzle1(b,2,0,1,2)),mask); + __m128 flip2 = _mm_xor_ps(_mm_mul_ps(ei_vec4f_swizzle1(a,3,3,3,1), + ei_vec4f_swizzle1(b,0,1,2,1)),mask); + ei_pstore(&res.x(), + _mm_add_ps(_mm_sub_ps(_mm_mul_ps(a,ei_vec4f_swizzle1(b,3,3,3,3)), + _mm_mul_ps(ei_vec4f_swizzle1(a,2,0,1,0), + ei_vec4f_swizzle1(b,1,2,0,0))), + _mm_add_ps(flip1,flip2))); + return res; +} +#endif + +/** \returns the concatenation of two rotations as a quaternion-quaternion product */ +template +inline Quaternion Quaternion::operator* (const Quaternion& other) const +{ + return ei_quaternion_product(*this,other); +} + +/** \sa operator*(Quaternion) */ +template +inline Quaternion& Quaternion::operator*= (const Quaternion& other) +{ + return (*this = *this * other); +} + +/** Rotation of a vector by a quaternion. + * \remarks If the quaternion is used to rotate several points (>1) + * then it is much more efficient to first convert it to a 3x3 Matrix. + * Comparison of the operation cost for n transformations: + * - Quaternion: 30n + * - Via a Matrix3: 24 + 15n + */ +template +template +inline typename Quaternion::Vector3 +Quaternion::operator* (const MatrixBase& v) const +{ + // Note that this algorithm comes from the optimization by hand + // of the conversion to a Matrix followed by a Matrix/Vector product. + // It appears to be much faster than the common algorithm found + // in the litterature (30 versus 39 flops). It also requires two + // Vector3 as temporaries. + Vector3 uv; + uv = 2 * this->vec().cross(v); + return v + this->w() * uv + this->vec().cross(uv); +} + +template +inline Quaternion& Quaternion::operator=(const Quaternion& other) +{ + m_coeffs = other.m_coeffs; + return *this; +} + +/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this + */ +template +inline Quaternion& Quaternion::operator=(const AngleAxisType& aa) +{ + Scalar ha = Scalar(0.5)*aa.angle(); // Scalar(0.5) to suppress precision loss warnings + this->w() = ei_cos(ha); + this->vec() = ei_sin(ha) * aa.axis(); + return *this; +} + +/** Set \c *this from the expression \a xpr: + * - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion + * - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix + * and \a xpr is converted to a quaternion + */ +template +template +inline Quaternion& Quaternion::operator=(const MatrixBase& xpr) +{ + ei_quaternion_assign_impl::run(*this, xpr.derived()); + return *this; +} + +/** Convert the quaternion to a 3x3 rotation matrix */ +template +inline typename Quaternion::Matrix3 +Quaternion::toRotationMatrix(void) const +{ + // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!) + // if not inlined then the cost of the return by value is huge ~ +35%, + // however, not inlining this function is an order of magnitude slower, so + // it has to be inlined, and so the return by value is not an issue + Matrix3 res; + + const Scalar tx = 2*this->x(); + const Scalar ty = 2*this->y(); + const Scalar tz = 2*this->z(); + const Scalar twx = tx*this->w(); + const Scalar twy = ty*this->w(); + const Scalar twz = tz*this->w(); + const Scalar txx = tx*this->x(); + const Scalar txy = ty*this->x(); + const Scalar txz = tz*this->x(); + const Scalar tyy = ty*this->y(); + const Scalar tyz = tz*this->y(); + const Scalar tzz = tz*this->z(); + + res.coeffRef(0,0) = 1-(tyy+tzz); + res.coeffRef(0,1) = txy-twz; + res.coeffRef(0,2) = txz+twy; + res.coeffRef(1,0) = txy+twz; + res.coeffRef(1,1) = 1-(txx+tzz); + res.coeffRef(1,2) = tyz-twx; + res.coeffRef(2,0) = txz-twy; + res.coeffRef(2,1) = tyz+twx; + res.coeffRef(2,2) = 1-(txx+tyy); + + return res; +} + +/** Sets *this to be a quaternion representing a rotation sending the vector \a a to the vector \a b. + * + * \returns a reference to *this. + * + * Note that the two input vectors do \b not have to be normalized. + */ +template +template +inline Quaternion& Quaternion::setFromTwoVectors(const MatrixBase& a, const MatrixBase& b) +{ + Vector3 v0 = a.normalized(); + Vector3 v1 = b.normalized(); + Scalar c = v0.dot(v1); + + // if dot == 1, vectors are the same + if (ei_isApprox(c,Scalar(1))) + { + // set to identity + this->w() = 1; this->vec().setZero(); + return *this; + } + // if dot == -1, vectors are opposites + if (ei_isApprox(c,Scalar(-1))) + { + this->vec() = v0.unitOrthogonal(); + this->w() = 0; + return *this; + } + + Vector3 axis = v0.cross(v1); + Scalar s = ei_sqrt((Scalar(1)+c)*Scalar(2)); + Scalar invs = Scalar(1)/s; + this->vec() = axis * invs; + this->w() = s * Scalar(0.5); + + return *this; +} + +/** \returns the multiplicative inverse of \c *this + * Note that in most cases, i.e., if you simply want the opposite rotation, + * and/or the quaternion is normalized, then it is enough to use the conjugate. + * + * \sa Quaternion::conjugate() + */ +template +inline Quaternion Quaternion::inverse() const +{ + // FIXME should this function be called multiplicativeInverse and conjugate() be called inverse() or opposite() ?? + Scalar n2 = this->squaredNorm(); + if (n2 > 0) + return Quaternion(conjugate().coeffs() / n2); + else + { + // return an invalid result to flag the error + return Quaternion(Coefficients::Zero()); + } +} + +/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse + * if the quaternion is normalized. + * The conjugate of a quaternion represents the opposite rotation. + * + * \sa Quaternion::inverse() + */ +template +inline Quaternion Quaternion::conjugate() const +{ + return Quaternion(this->w(),-this->x(),-this->y(),-this->z()); +} + +/** \returns the angle (in radian) between two rotations + * \sa dot() + */ +template +inline Scalar Quaternion::angularDistance(const Quaternion& other) const +{ + double d = ei_abs(this->dot(other)); + if (d>=1.0) + return 0; + return Scalar(2) * std::acos(d); +} + +/** \returns the spherical linear interpolation between the two quaternions + * \c *this and \a other at the parameter \a t + */ +template +Quaternion Quaternion::slerp(Scalar t, const Quaternion& other) const +{ + static const Scalar one = Scalar(1) - precision(); + Scalar d = this->dot(other); + Scalar absD = ei_abs(d); + if (absD>=one) + return *this; + + // theta is the angle between the 2 quaternions + Scalar theta = std::acos(absD); + Scalar sinTheta = ei_sin(theta); + + Scalar scale0 = ei_sin( ( Scalar(1) - t ) * theta) / sinTheta; + Scalar scale1 = ei_sin( ( t * theta) ) / sinTheta; + if (d<0) + scale1 = -scale1; + + return Quaternion(scale0 * m_coeffs + scale1 * other.m_coeffs); +} + +// set from a rotation matrix +template +struct ei_quaternion_assign_impl +{ + typedef typename Other::Scalar Scalar; + inline static void run(Quaternion& q, const Other& mat) + { + // This algorithm comes from "Quaternion Calculus and Fast Animation", + // Ken Shoemake, 1987 SIGGRAPH course notes + Scalar t = mat.trace(); + if (t > 0) + { + t = ei_sqrt(t + Scalar(1.0)); + q.w() = Scalar(0.5)*t; + t = Scalar(0.5)/t; + q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t; + q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t; + q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t; + } + else + { + int i = 0; + if (mat.coeff(1,1) > mat.coeff(0,0)) + i = 1; + if (mat.coeff(2,2) > mat.coeff(i,i)) + i = 2; + int j = (i+1)%3; + int k = (j+1)%3; + + t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + Scalar(1.0)); + q.coeffs().coeffRef(i) = Scalar(0.5) * t; + t = Scalar(0.5)/t; + q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t; + q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t; + q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t; + } + } +}; + +// set from a vector of coefficients assumed to be a quaternion +template +struct ei_quaternion_assign_impl +{ + typedef typename Other::Scalar Scalar; + inline static void run(Quaternion& q, const Other& vec) + { + q.coeffs() = vec; + } +}; + +#endif // EIGEN_QUATERNION_H diff --git a/extern/Eigen2/Eigen/src/Geometry/Rotation2D.h b/extern/Eigen2/Eigen/src/Geometry/Rotation2D.h new file mode 100644 index 00000000000..dca7f06bf5d --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/Rotation2D.h @@ -0,0 +1,159 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ROTATION2D_H +#define EIGEN_ROTATION2D_H + +/** \geometry_module \ingroup Geometry_Module + * + * \class Rotation2D + * + * \brief Represents a rotation/orientation in a 2 dimensional space. + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * + * This class is equivalent to a single scalar representing a counter clock wise rotation + * as a single angle in radian. It provides some additional features such as the automatic + * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar + * interface to Quaternion in order to facilitate the writing of generic algorithms + * dealing with rotations. + * + * \sa class Quaternion, class Transform + */ +template struct ei_traits > +{ + typedef _Scalar Scalar; +}; + +template +class Rotation2D : public RotationBase,2> +{ + typedef RotationBase,2> Base; + +public: + + using Base::operator*; + + enum { Dim = 2 }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + typedef Matrix Vector2; + typedef Matrix Matrix2; + +protected: + + Scalar m_angle; + +public: + + /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */ + inline Rotation2D(Scalar a) : m_angle(a) {} + + /** \returns the rotation angle */ + inline Scalar angle() const { return m_angle; } + + /** \returns a read-write reference to the rotation angle */ + inline Scalar& angle() { return m_angle; } + + /** \returns the inverse rotation */ + inline Rotation2D inverse() const { return -m_angle; } + + /** Concatenates two rotations */ + inline Rotation2D operator*(const Rotation2D& other) const + { return m_angle + other.m_angle; } + + /** Concatenates two rotations */ + inline Rotation2D& operator*=(const Rotation2D& other) + { return m_angle += other.m_angle; return *this; } + + /** Applies the rotation to a 2D vector */ + Vector2 operator* (const Vector2& vec) const + { return toRotationMatrix() * vec; } + + template + Rotation2D& fromRotationMatrix(const MatrixBase& m); + Matrix2 toRotationMatrix(void) const; + + /** \returns the spherical interpolation between \c *this and \a other using + * parameter \a t. It is in fact equivalent to a linear interpolation. + */ + inline Rotation2D slerp(Scalar t, const Rotation2D& other) const + { return m_angle * (1-t) + other.angle() * t; } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { return typename ei_cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Rotation2D(const Rotation2D& other) + { + m_angle = Scalar(other.angle()); + } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Rotation2D& other, typename NumTraits::Real prec = precision()) const + { return ei_isApprox(m_angle,other.m_angle, prec); } +}; + +/** \ingroup Geometry_Module + * single precision 2D rotation type */ +typedef Rotation2D Rotation2Df; +/** \ingroup Geometry_Module + * double precision 2D rotation type */ +typedef Rotation2D Rotation2Dd; + +/** Set \c *this from a 2x2 rotation matrix \a mat. + * In other words, this function extract the rotation angle + * from the rotation matrix. + */ +template +template +Rotation2D& Rotation2D::fromRotationMatrix(const MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,YOU_MADE_A_PROGRAMMING_MISTAKE) + m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0)); + return *this; +} + +/** Constructs and \returns an equivalent 2x2 rotation matrix. + */ +template +typename Rotation2D::Matrix2 +Rotation2D::toRotationMatrix(void) const +{ + Scalar sinA = ei_sin(m_angle); + Scalar cosA = ei_cos(m_angle); + return (Matrix2() << cosA, -sinA, sinA, cosA).finished(); +} + +#endif // EIGEN_ROTATION2D_H diff --git a/extern/Eigen2/Eigen/src/Geometry/RotationBase.h b/extern/Eigen2/Eigen/src/Geometry/RotationBase.h new file mode 100644 index 00000000000..5fec0f18d72 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/RotationBase.h @@ -0,0 +1,137 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_ROTATIONBASE_H +#define EIGEN_ROTATIONBASE_H + +// this file aims to contains the various representations of rotation/orientation +// in 2D and 3D space excepted Matrix and Quaternion. + +/** \class RotationBase + * + * \brief Common base class for compact rotation representations + * + * \param Derived is the derived type, i.e., a rotation type + * \param _Dim the dimension of the space + */ +template +class RotationBase +{ + public: + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef typename ei_traits::Scalar Scalar; + + /** corresponding linear transformation matrix type */ + typedef Matrix RotationMatrixType; + + inline const Derived& derived() const { return *static_cast(this); } + inline Derived& derived() { return *static_cast(this); } + + /** \returns an equivalent rotation matrix */ + inline RotationMatrixType toRotationMatrix() const { return derived().toRotationMatrix(); } + + /** \returns the inverse rotation */ + inline Derived inverse() const { return derived().inverse(); } + + /** \returns the concatenation of the rotation \c *this with a translation \a t */ + inline Transform operator*(const Translation& t) const + { return toRotationMatrix() * t; } + + /** \returns the concatenation of the rotation \c *this with a scaling \a s */ + inline RotationMatrixType operator*(const Scaling& s) const + { return toRotationMatrix() * s; } + + /** \returns the concatenation of the rotation \c *this with an affine transformation \a t */ + inline Transform operator*(const Transform& t) const + { return toRotationMatrix() * t; } +}; + +/** \geometry_module + * + * Constructs a Dim x Dim rotation matrix from the rotation \a r + */ +template +template +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> +::Matrix(const RotationBase& r) +{ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) + *this = r.toRotationMatrix(); +} + +/** \geometry_module + * + * Set a Dim x Dim rotation matrix from the rotation \a r + */ +template +template +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols>& +Matrix<_Scalar, _Rows, _Cols, _Storage, _MaxRows, _MaxCols> +::operator=(const RotationBase& r) +{ + EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,int(OtherDerived::Dim),int(OtherDerived::Dim)) + return *this = r.toRotationMatrix(); +} + +/** \internal + * + * Helper function to return an arbitrary rotation object to a rotation matrix. + * + * \param Scalar the numeric type of the matrix coefficients + * \param Dim the dimension of the current space + * + * It returns a Dim x Dim fixed size matrix. + * + * Default specializations are provided for: + * - any scalar type (2D), + * - any matrix expression, + * - any type based on RotationBase (e.g., Quaternion, AngleAxis, Rotation2D) + * + * Currently ei_toRotationMatrix is only used by Transform. + * + * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis + */ +template +inline static Matrix ei_toRotationMatrix(const Scalar& s) +{ + EIGEN_STATIC_ASSERT(Dim==2,YOU_MADE_A_PROGRAMMING_MISTAKE) + return Rotation2D(s).toRotationMatrix(); +} + +template +inline static Matrix ei_toRotationMatrix(const RotationBase& r) +{ + return r.toRotationMatrix(); +} + +template +inline static const MatrixBase& ei_toRotationMatrix(const MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim, + YOU_MADE_A_PROGRAMMING_MISTAKE) + return mat; +} + +#endif // EIGEN_ROTATIONBASE_H diff --git a/extern/Eigen2/Eigen/src/Geometry/Scaling.h b/extern/Eigen2/Eigen/src/Geometry/Scaling.h new file mode 100644 index 00000000000..5daf0a49961 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/Scaling.h @@ -0,0 +1,181 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SCALING_H +#define EIGEN_SCALING_H + +/** \geometry_module \ingroup Geometry_Module + * + * \class Scaling + * + * \brief Represents a possibly non uniform scaling transformation + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * \param _Dim the dimension of the space, can be a compile time value or Dynamic + * + * \note This class is not aimed to be used to store a scaling transformation, + * but rather to make easier the constructions and updates of Transform objects. + * + * \sa class Translation, class Transform + */ +template +class Scaling +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) + /** dimension of the space */ + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** corresponding vector type */ + typedef Matrix VectorType; + /** corresponding linear transformation matrix type */ + typedef Matrix LinearMatrixType; + /** corresponding translation type */ + typedef Translation TranslationType; + /** corresponding affine transformation type */ + typedef Transform TransformType; + +protected: + + VectorType m_coeffs; + +public: + + /** Default constructor without initialization. */ + Scaling() {} + /** Constructs and initialize a uniform scaling transformation */ + explicit inline Scaling(const Scalar& s) { m_coeffs.setConstant(s); } + /** 2D only */ + inline Scaling(const Scalar& sx, const Scalar& sy) + { + ei_assert(Dim==2); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + } + /** 3D only */ + inline Scaling(const Scalar& sx, const Scalar& sy, const Scalar& sz) + { + ei_assert(Dim==3); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + m_coeffs.z() = sz; + } + /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */ + explicit inline Scaling(const VectorType& coeffs) : m_coeffs(coeffs) {} + + const VectorType& coeffs() const { return m_coeffs; } + VectorType& coeffs() { return m_coeffs; } + + /** Concatenates two scaling */ + inline Scaling operator* (const Scaling& other) const + { return Scaling(coeffs().cwise() * other.coeffs()); } + + /** Concatenates a scaling and a translation */ + inline TransformType operator* (const TranslationType& t) const; + + /** Concatenates a scaling and an affine transformation */ + inline TransformType operator* (const TransformType& t) const; + + /** Concatenates a scaling and a linear transformation matrix */ + // TODO returns an expression + inline LinearMatrixType operator* (const LinearMatrixType& other) const + { return coeffs().asDiagonal() * other; } + + /** Concatenates a linear transformation matrix and a scaling */ + // TODO returns an expression + friend inline LinearMatrixType operator* (const LinearMatrixType& other, const Scaling& s) + { return other * s.coeffs().asDiagonal(); } + + template + inline LinearMatrixType operator*(const RotationBase& r) const + { return *this * r.toRotationMatrix(); } + + /** Applies scaling to vector */ + inline VectorType operator* (const VectorType& other) const + { return coeffs().asDiagonal() * other; } + + /** \returns the inverse scaling */ + inline Scaling inverse() const + { return Scaling(coeffs().cwise().inverse()); } + + inline Scaling& operator=(const Scaling& other) + { + m_coeffs = other.m_coeffs; + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { return typename ei_cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Scaling(const Scaling& other) + { m_coeffs = other.coeffs().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Scaling& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +}; + +/** \addtogroup Geometry_Module */ +//@{ +typedef Scaling Scaling2f; +typedef Scaling Scaling2d; +typedef Scaling Scaling3f; +typedef Scaling Scaling3d; +//@} + +template +inline typename Scaling::TransformType +Scaling::operator* (const TranslationType& t) const +{ + TransformType res; + res.matrix().setZero(); + res.linear().diagonal() = coeffs(); + res.translation() = m_coeffs.cwise() * t.vector(); + res(Dim,Dim) = Scalar(1); + return res; +} + +template +inline typename Scaling::TransformType +Scaling::operator* (const TransformType& t) const +{ + TransformType res = t; + res.prescale(m_coeffs); + return res; +} + +#endif // EIGEN_SCALING_H diff --git a/extern/Eigen2/Eigen/src/Geometry/Transform.h b/extern/Eigen2/Eigen/src/Geometry/Transform.h new file mode 100644 index 00000000000..8425a1cd963 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/Transform.h @@ -0,0 +1,785 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2009 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_TRANSFORM_H +#define EIGEN_TRANSFORM_H + +/** Represents some traits of a transformation */ +enum TransformTraits { + Isometry, ///< the transformation is a concatenation of translations and rotations + Affine, ///< the transformation is affine (linear transformation + translation) + Projective ///< the transformation might not be affine +}; + +// Note that we have to pass Dim and HDim because it is not allowed to use a template +// parameter to define a template specialization. To be more precise, in the following +// specializations, it is not allowed to use Dim+1 instead of HDim. +template< typename Other, + int Dim, + int HDim, + int OtherRows=Other::RowsAtCompileTime, + int OtherCols=Other::ColsAtCompileTime> +struct ei_transform_product_impl; + +/** \geometry_module \ingroup Geometry_Module + * + * \class Transform + * + * \brief Represents an homogeneous transformation in a N dimensional space + * + * \param _Scalar the scalar type, i.e., the type of the coefficients + * \param _Dim the dimension of the space + * + * The homography is internally represented and stored as a (Dim+1)^2 matrix which + * is available through the matrix() method. + * + * Conversion methods from/to Qt's QMatrix and QTransform are available if the + * preprocessor token EIGEN_QT_SUPPORT is defined. + * + * \sa class Matrix, class Quaternion + */ +template +class Transform +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim==Dynamic ? Dynamic : (_Dim+1)*(_Dim+1)) + enum { + Dim = _Dim, ///< space dimension in which the transformation holds + HDim = _Dim+1 ///< size of a respective homogeneous vector + }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** type of the matrix used to represent the transformation */ + typedef Matrix MatrixType; + /** type of the matrix used to represent the linear part of the transformation */ + typedef Matrix LinearMatrixType; + /** type of read/write reference to the linear part of the transformation */ + typedef Block LinearPart; + /** type of a vector */ + typedef Matrix VectorType; + /** type of a read/write reference to the translation part of the rotation */ + typedef Block TranslationPart; + /** corresponding translation type */ + typedef Translation TranslationType; + /** corresponding scaling transformation type */ + typedef Scaling ScalingType; + +protected: + + MatrixType m_matrix; + +public: + + /** Default constructor without initialization of the coefficients. */ + inline Transform() { } + + inline Transform(const Transform& other) + { + m_matrix = other.m_matrix; + } + + inline explicit Transform(const TranslationType& t) { *this = t; } + inline explicit Transform(const ScalingType& s) { *this = s; } + template + inline explicit Transform(const RotationBase& r) { *this = r; } + + inline Transform& operator=(const Transform& other) + { m_matrix = other.m_matrix; return *this; } + + template // MSVC 2005 will commit suicide if BigMatrix has a default value + struct construct_from_matrix + { + static inline void run(Transform *transform, const MatrixBase& other) + { + transform->matrix() = other; + } + }; + + template struct construct_from_matrix + { + static inline void run(Transform *transform, const MatrixBase& other) + { + transform->linear() = other; + transform->translation().setZero(); + transform->matrix()(Dim,Dim) = Scalar(1); + transform->matrix().template block<1,Dim>(Dim,0).setZero(); + } + }; + + /** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */ + template + inline explicit Transform(const MatrixBase& other) + { + construct_from_matrix::run(this, other); + } + + /** Set \c *this from a (Dim+1)^2 matrix. */ + template + inline Transform& operator=(const MatrixBase& other) + { m_matrix = other; return *this; } + + #ifdef EIGEN_QT_SUPPORT + inline Transform(const QMatrix& other); + inline Transform& operator=(const QMatrix& other); + inline QMatrix toQMatrix(void) const; + inline Transform(const QTransform& other); + inline Transform& operator=(const QTransform& other); + inline QTransform toQTransform(void) const; + #endif + + /** shortcut for m_matrix(row,col); + * \sa MatrixBase::operaror(int,int) const */ + inline Scalar operator() (int row, int col) const { return m_matrix(row,col); } + /** shortcut for m_matrix(row,col); + * \sa MatrixBase::operaror(int,int) */ + inline Scalar& operator() (int row, int col) { return m_matrix(row,col); } + + /** \returns a read-only expression of the transformation matrix */ + inline const MatrixType& matrix() const { return m_matrix; } + /** \returns a writable expression of the transformation matrix */ + inline MatrixType& matrix() { return m_matrix; } + + /** \returns a read-only expression of the linear (linear) part of the transformation */ + inline const LinearPart linear() const { return m_matrix.template block(0,0); } + /** \returns a writable expression of the linear (linear) part of the transformation */ + inline LinearPart linear() { return m_matrix.template block(0,0); } + + /** \returns a read-only expression of the translation vector of the transformation */ + inline const TranslationPart translation() const { return m_matrix.template block(0,Dim); } + /** \returns a writable expression of the translation vector of the transformation */ + inline TranslationPart translation() { return m_matrix.template block(0,Dim); } + + /** \returns an expression of the product between the transform \c *this and a matrix expression \a other + * + * The right hand side \a other might be either: + * \li a vector of size Dim, + * \li an homogeneous vector of size Dim+1, + * \li a transformation matrix of size Dim+1 x Dim+1. + */ + // note: this function is defined here because some compilers cannot find the respective declaration + template + inline const typename ei_transform_product_impl::ResultType + operator * (const MatrixBase &other) const + { return ei_transform_product_impl::run(*this,other.derived()); } + + /** \returns the product expression of a transformation matrix \a a times a transform \a b + * The transformation matrix \a a must have a Dim+1 x Dim+1 sizes. */ + template + friend inline const typename ProductReturnType::Type + operator * (const MatrixBase &a, const Transform &b) + { return a.derived() * b.matrix(); } + + /** Contatenates two transformations */ + inline const Transform + operator * (const Transform& other) const + { return Transform(m_matrix * other.matrix()); } + + /** \sa MatrixBase::setIdentity() */ + void setIdentity() { m_matrix.setIdentity(); } + static const typename MatrixType::IdentityReturnType Identity() + { + return MatrixType::Identity(); + } + + template + inline Transform& scale(const MatrixBase &other); + + template + inline Transform& prescale(const MatrixBase &other); + + inline Transform& scale(Scalar s); + inline Transform& prescale(Scalar s); + + template + inline Transform& translate(const MatrixBase &other); + + template + inline Transform& pretranslate(const MatrixBase &other); + + template + inline Transform& rotate(const RotationType& rotation); + + template + inline Transform& prerotate(const RotationType& rotation); + + Transform& shear(Scalar sx, Scalar sy); + Transform& preshear(Scalar sx, Scalar sy); + + inline Transform& operator=(const TranslationType& t); + inline Transform& operator*=(const TranslationType& t) { return translate(t.vector()); } + inline Transform operator*(const TranslationType& t) const; + + inline Transform& operator=(const ScalingType& t); + inline Transform& operator*=(const ScalingType& s) { return scale(s.coeffs()); } + inline Transform operator*(const ScalingType& s) const; + friend inline Transform operator*(const LinearMatrixType& mat, const Transform& t) + { + Transform res = t; + res.matrix().row(Dim) = t.matrix().row(Dim); + res.matrix().template block(0,0) = (mat * t.matrix().template block(0,0)).lazy(); + return res; + } + + template + inline Transform& operator=(const RotationBase& r); + template + inline Transform& operator*=(const RotationBase& r) { return rotate(r.toRotationMatrix()); } + template + inline Transform operator*(const RotationBase& r) const; + + LinearMatrixType rotation() const; + template + void computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const; + template + void computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const; + + template + Transform& fromPositionOrientationScale(const MatrixBase &position, + const OrientationType& orientation, const MatrixBase &scale); + + inline const MatrixType inverse(TransformTraits traits = Affine) const; + + /** \returns a const pointer to the column major internal matrix */ + const Scalar* data() const { return m_matrix.data(); } + /** \returns a non-const pointer to the column major internal matrix */ + Scalar* data() { return m_matrix.data(); } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { return typename ei_cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Transform(const Transform& other) + { m_matrix = other.matrix().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Transform& other, typename NumTraits::Real prec = precision()) const + { return m_matrix.isApprox(other.m_matrix, prec); } + + #ifdef EIGEN_TRANSFORM_PLUGIN + #include EIGEN_TRANSFORM_PLUGIN + #endif + +protected: + +}; + +/** \ingroup Geometry_Module */ +typedef Transform Transform2f; +/** \ingroup Geometry_Module */ +typedef Transform Transform3f; +/** \ingroup Geometry_Module */ +typedef Transform Transform2d; +/** \ingroup Geometry_Module */ +typedef Transform Transform3d; + +/************************** +*** Optional QT support *** +**************************/ + +#ifdef EIGEN_QT_SUPPORT +/** Initialises \c *this from a QMatrix assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform::Transform(const QMatrix& other) +{ + *this = other; +} + +/** Set \c *this from a QMatrix assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform& Transform::operator=(const QMatrix& other) +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + 0, 0, 1; + return *this; +} + +/** \returns a QMatrix from \c *this assuming the dimension is 2. + * + * \warning this convertion might loss data if \c *this is not affine + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +QMatrix Transform::toQMatrix(void) const +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + return QMatrix(m_matrix.coeff(0,0), m_matrix.coeff(1,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2)); +} + +/** Initialises \c *this from a QTransform assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform::Transform(const QTransform& other) +{ + *this = other; +} + +/** Set \c *this from a QTransform assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +Transform& Transform::operator=(const QTransform& other) +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + m_matrix << other.m11(), other.m21(), other.dx(), + other.m12(), other.m22(), other.dy(), + other.m13(), other.m23(), other.m33(); + return *this; +} + +/** \returns a QTransform from \c *this assuming the dimension is 2. + * + * This function is available only if the token EIGEN_QT_SUPPORT is defined. + */ +template +QTransform Transform::toQTransform(void) const +{ + EIGEN_STATIC_ASSERT(Dim==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + return QTransform(m_matrix.coeff(0,0), m_matrix.coeff(1,0), m_matrix.coeff(2,0), + m_matrix.coeff(0,1), m_matrix.coeff(1,1), m_matrix.coeff(2,1), + m_matrix.coeff(0,2), m_matrix.coeff(1,2), m_matrix.coeff(2,2)); +} +#endif + +/********************* +*** Procedural API *** +*********************/ + +/** Applies on the right the non uniform scale transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \sa prescale() + */ +template +template +Transform& +Transform::scale(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + linear() = (linear() * other.asDiagonal()).lazy(); + return *this; +} + +/** Applies on the right a uniform scale of a factor \a c to \c *this + * and returns a reference to \c *this. + * \sa prescale(Scalar) + */ +template +inline Transform& Transform::scale(Scalar s) +{ + linear() *= s; + return *this; +} + +/** Applies on the left the non uniform scale transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \sa scale() + */ +template +template +Transform& +Transform::prescale(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + m_matrix.template block(0,0) = (other.asDiagonal() * m_matrix.template block(0,0)).lazy(); + return *this; +} + +/** Applies on the left a uniform scale of a factor \a c to \c *this + * and returns a reference to \c *this. + * \sa scale(Scalar) + */ +template +inline Transform& Transform::prescale(Scalar s) +{ + m_matrix.template corner(TopLeft) *= s; + return *this; +} + +/** Applies on the right the translation matrix represented by the vector \a other + * to \c *this and returns a reference to \c *this. + * \sa pretranslate() + */ +template +template +Transform& +Transform::translate(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + translation() += linear() * other; + return *this; +} + +/** Applies on the left the translation matrix represented by the vector \a other + * to \c *this and returns a reference to \c *this. + * \sa translate() + */ +template +template +Transform& +Transform::pretranslate(const MatrixBase &other) +{ + EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim)) + translation() += other; + return *this; +} + +/** Applies on the right the rotation represented by the rotation \a rotation + * to \c *this and returns a reference to \c *this. + * + * The template parameter \a RotationType is the type of the rotation which + * must be known by ei_toRotationMatrix<>. + * + * Natively supported types includes: + * - any scalar (2D), + * - a Dim x Dim matrix expression, + * - a Quaternion (3D), + * - a AngleAxis (3D) + * + * This mechanism is easily extendable to support user types such as Euler angles, + * or a pair of Quaternion for 4D rotations. + * + * \sa rotate(Scalar), class Quaternion, class AngleAxis, prerotate(RotationType) + */ +template +template +Transform& +Transform::rotate(const RotationType& rotation) +{ + linear() *= ei_toRotationMatrix(rotation); + return *this; +} + +/** Applies on the left the rotation represented by the rotation \a rotation + * to \c *this and returns a reference to \c *this. + * + * See rotate() for further details. + * + * \sa rotate() + */ +template +template +Transform& +Transform::prerotate(const RotationType& rotation) +{ + m_matrix.template block(0,0) = ei_toRotationMatrix(rotation) + * m_matrix.template block(0,0); + return *this; +} + +/** Applies on the right the shear transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \warning 2D only. + * \sa preshear() + */ +template +Transform& +Transform::shear(Scalar sx, Scalar sy) +{ + EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + VectorType tmp = linear().col(0)*sy + linear().col(1); + linear() << linear().col(0) + linear().col(1)*sx, tmp; + return *this; +} + +/** Applies on the left the shear transformation represented + * by the vector \a other to \c *this and returns a reference to \c *this. + * \warning 2D only. + * \sa shear() + */ +template +Transform& +Transform::preshear(Scalar sx, Scalar sy) +{ + EIGEN_STATIC_ASSERT(int(Dim)==2, YOU_MADE_A_PROGRAMMING_MISTAKE) + m_matrix.template block(0,0) = LinearMatrixType(1, sx, sy, 1) * m_matrix.template block(0,0); + return *this; +} + +/****************************************************** +*** Scaling, Translation and Rotation compatibility *** +******************************************************/ + +template +inline Transform& Transform::operator=(const TranslationType& t) +{ + linear().setIdentity(); + translation() = t.vector(); + m_matrix.template block<1,Dim>(Dim,0).setZero(); + m_matrix(Dim,Dim) = Scalar(1); + return *this; +} + +template +inline Transform Transform::operator*(const TranslationType& t) const +{ + Transform res = *this; + res.translate(t.vector()); + return res; +} + +template +inline Transform& Transform::operator=(const ScalingType& s) +{ + m_matrix.setZero(); + linear().diagonal() = s.coeffs(); + m_matrix.coeffRef(Dim,Dim) = Scalar(1); + return *this; +} + +template +inline Transform Transform::operator*(const ScalingType& s) const +{ + Transform res = *this; + res.scale(s.coeffs()); + return res; +} + +template +template +inline Transform& Transform::operator=(const RotationBase& r) +{ + linear() = ei_toRotationMatrix(r); + translation().setZero(); + m_matrix.template block<1,Dim>(Dim,0).setZero(); + m_matrix.coeffRef(Dim,Dim) = Scalar(1); + return *this; +} + +template +template +inline Transform Transform::operator*(const RotationBase& r) const +{ + Transform res = *this; + res.rotate(r.derived()); + return res; +} + +/************************ +*** Special functions *** +************************/ + +/** \returns the rotation part of the transformation + * \nonstableyet + * + * \svd_module + * + * \sa computeRotationScaling(), computeScalingRotation(), class SVD + */ +template +typename Transform::LinearMatrixType +Transform::rotation() const +{ + LinearMatrixType result; + computeRotationScaling(&result, (LinearMatrixType*)0); + return result; +} + + +/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * \nonstableyet + * + * \svd_module + * + * \sa computeScalingRotation(), rotation(), class SVD + */ +template +template +void Transform::computeRotationScaling(RotationMatrixType *rotation, ScalingMatrixType *scaling) const +{ + linear().svd().computeRotationScaling(rotation, scaling); +} + +/** decomposes the linear part of the transformation as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * \nonstableyet + * + * \svd_module + * + * \sa computeRotationScaling(), rotation(), class SVD + */ +template +template +void Transform::computeScalingRotation(ScalingMatrixType *scaling, RotationMatrixType *rotation) const +{ + linear().svd().computeScalingRotation(scaling, rotation); +} + +/** Convenient method to set \c *this from a position, orientation and scale + * of a 3D object. + */ +template +template +Transform& +Transform::fromPositionOrientationScale(const MatrixBase &position, + const OrientationType& orientation, const MatrixBase &scale) +{ + linear() = ei_toRotationMatrix(orientation); + linear() *= scale.asDiagonal(); + translation() = position; + m_matrix.template block<1,Dim>(Dim,0).setZero(); + m_matrix(Dim,Dim) = Scalar(1); + return *this; +} + +/** \nonstableyet + * + * \returns the inverse transformation matrix according to some given knowledge + * on \c *this. + * + * \param traits allows to optimize the inversion process when the transformion + * is known to be not a general transformation. The possible values are: + * - Projective if the transformation is not necessarily affine, i.e., if the + * last row is not guaranteed to be [0 ... 0 1] + * - Affine is the default, the last row is assumed to be [0 ... 0 1] + * - Isometry if the transformation is only a concatenations of translations + * and rotations. + * + * \warning unless \a traits is always set to NoShear or NoScaling, this function + * requires the generic inverse method of MatrixBase defined in the LU module. If + * you forget to include this module, then you will get hard to debug linking errors. + * + * \sa MatrixBase::inverse() + */ +template +inline const typename Transform::MatrixType +Transform::inverse(TransformTraits traits) const +{ + if (traits == Projective) + { + return m_matrix.inverse(); + } + else + { + MatrixType res; + if (traits == Affine) + { + res.template corner(TopLeft) = linear().inverse(); + } + else if (traits == Isometry) + { + res.template corner(TopLeft) = linear().transpose(); + } + else + { + ei_assert("invalid traits value in Transform::inverse()"); + } + // translation and remaining parts + res.template corner(TopRight) = - res.template corner(TopLeft) * translation(); + res.template corner<1,Dim>(BottomLeft).setZero(); + res.coeffRef(Dim,Dim) = Scalar(1); + return res; + } +} + +/***************************************************** +*** Specializations of operator* with a MatrixBase *** +*****************************************************/ + +template +struct ei_transform_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef typename ProductReturnType::Type ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { return tr.matrix() * other; } +}; + +template +struct ei_transform_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef TransformType ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { + TransformType res; + res.translation() = tr.translation(); + res.matrix().row(Dim) = tr.matrix().row(Dim); + res.linear() = (tr.linear() * other).lazy(); + return res; + } +}; + +template +struct ei_transform_product_impl +{ + typedef Transform TransformType; + typedef typename TransformType::MatrixType MatrixType; + typedef typename ProductReturnType::Type ResultType; + static ResultType run(const TransformType& tr, const Other& other) + { return tr.matrix() * other; } +}; + +template +struct ei_transform_product_impl +{ + typedef typename Other::Scalar Scalar; + typedef Transform TransformType; + typedef typename TransformType::LinearPart MatrixType; + typedef const CwiseUnaryOp< + ei_scalar_multiple_op, + NestByValue, + NestByValue,Other>::Type >, + NestByValue > > + > ResultType; + // FIXME should we offer an optimized version when the last row is known to be 0,0...,0,1 ? + static ResultType run(const TransformType& tr, const Other& other) + { return ((tr.linear().nestByValue() * other).nestByValue() + tr.translation().nestByValue()).nestByValue() + * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); } +}; + +#endif // EIGEN_TRANSFORM_H diff --git a/extern/Eigen2/Eigen/src/Geometry/Translation.h b/extern/Eigen2/Eigen/src/Geometry/Translation.h new file mode 100644 index 00000000000..4b2fc7a56fc --- /dev/null +++ b/extern/Eigen2/Eigen/src/Geometry/Translation.h @@ -0,0 +1,198 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_TRANSLATION_H +#define EIGEN_TRANSLATION_H + +/** \geometry_module \ingroup Geometry_Module + * + * \class Translation + * + * \brief Represents a translation transformation + * + * \param _Scalar the scalar type, i.e., the type of the coefficients. + * \param _Dim the dimension of the space, can be a compile time value or Dynamic + * + * \note This class is not aimed to be used to store a translation transformation, + * but rather to make easier the constructions and updates of Transform objects. + * + * \sa class Scaling, class Transform + */ +template +class Translation +{ +public: + EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF_VECTORIZABLE_FIXED_SIZE(_Scalar,_Dim) + /** dimension of the space */ + enum { Dim = _Dim }; + /** the scalar type of the coefficients */ + typedef _Scalar Scalar; + /** corresponding vector type */ + typedef Matrix VectorType; + /** corresponding linear transformation matrix type */ + typedef Matrix LinearMatrixType; + /** corresponding scaling transformation type */ + typedef Scaling ScalingType; + /** corresponding affine transformation type */ + typedef Transform TransformType; + +protected: + + VectorType m_coeffs; + +public: + + /** Default constructor without initialization. */ + Translation() {} + /** */ + inline Translation(const Scalar& sx, const Scalar& sy) + { + ei_assert(Dim==2); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + } + /** */ + inline Translation(const Scalar& sx, const Scalar& sy, const Scalar& sz) + { + ei_assert(Dim==3); + m_coeffs.x() = sx; + m_coeffs.y() = sy; + m_coeffs.z() = sz; + } + /** Constructs and initialize the scaling transformation from a vector of scaling coefficients */ + explicit inline Translation(const VectorType& vector) : m_coeffs(vector) {} + + const VectorType& vector() const { return m_coeffs; } + VectorType& vector() { return m_coeffs; } + + /** Concatenates two translation */ + inline Translation operator* (const Translation& other) const + { return Translation(m_coeffs + other.m_coeffs); } + + /** Concatenates a translation and a scaling */ + inline TransformType operator* (const ScalingType& other) const; + + /** Concatenates a translation and a linear transformation */ + inline TransformType operator* (const LinearMatrixType& linear) const; + + template + inline TransformType operator*(const RotationBase& r) const + { return *this * r.toRotationMatrix(); } + + /** Concatenates a linear transformation and a translation */ + // its a nightmare to define a templated friend function outside its declaration + friend inline TransformType operator* (const LinearMatrixType& linear, const Translation& t) + { + TransformType res; + res.matrix().setZero(); + res.linear() = linear; + res.translation() = linear * t.m_coeffs; + res.matrix().row(Dim).setZero(); + res(Dim,Dim) = Scalar(1); + return res; + } + + /** Concatenates a translation and an affine transformation */ + inline TransformType operator* (const TransformType& t) const; + + /** Applies translation to vector */ + inline VectorType operator* (const VectorType& other) const + { return m_coeffs + other; } + + /** \returns the inverse translation (opposite) */ + Translation inverse() const { return Translation(-m_coeffs); } + + Translation& operator=(const Translation& other) + { + m_coeffs = other.m_coeffs; + return *this; + } + + /** \returns \c *this with scalar type casted to \a NewScalarType + * + * Note that if \a NewScalarType is equal to the current scalar type of \c *this + * then this function smartly returns a const reference to \c *this. + */ + template + inline typename ei_cast_return_type >::type cast() const + { return typename ei_cast_return_type >::type(*this); } + + /** Copy constructor with scalar type conversion */ + template + inline explicit Translation(const Translation& other) + { m_coeffs = other.vector().template cast(); } + + /** \returns \c true if \c *this is approximately equal to \a other, within the precision + * determined by \a prec. + * + * \sa MatrixBase::isApprox() */ + bool isApprox(const Translation& other, typename NumTraits::Real prec = precision()) const + { return m_coeffs.isApprox(other.m_coeffs, prec); } + +}; + +/** \addtogroup Geometry_Module */ +//@{ +typedef Translation Translation2f; +typedef Translation Translation2d; +typedef Translation Translation3f; +typedef Translation Translation3d; +//@} + + +template +inline typename Translation::TransformType +Translation::operator* (const ScalingType& other) const +{ + TransformType res; + res.matrix().setZero(); + res.linear().diagonal() = other.coeffs(); + res.translation() = m_coeffs; + res(Dim,Dim) = Scalar(1); + return res; +} + +template +inline typename Translation::TransformType +Translation::operator* (const LinearMatrixType& linear) const +{ + TransformType res; + res.matrix().setZero(); + res.linear() = linear; + res.translation() = m_coeffs; + res.matrix().row(Dim).setZero(); + res(Dim,Dim) = Scalar(1); + return res; +} + +template +inline typename Translation::TransformType +Translation::operator* (const TransformType& t) const +{ + TransformType res = t; + res.pretranslate(m_coeffs); + return res; +} + +#endif // EIGEN_TRANSLATION_H diff --git a/extern/Eigen2/Eigen/src/LU/Determinant.h b/extern/Eigen2/Eigen/src/LU/Determinant.h new file mode 100644 index 00000000000..4f435054ac6 --- /dev/null +++ b/extern/Eigen2/Eigen/src/LU/Determinant.h @@ -0,0 +1,122 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_DETERMINANT_H +#define EIGEN_DETERMINANT_H + +template +inline const typename Derived::Scalar ei_bruteforce_det3_helper +(const MatrixBase& matrix, int a, int b, int c) +{ + return matrix.coeff(0,a) + * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b)); +} + +template +const typename Derived::Scalar ei_bruteforce_det4_helper +(const MatrixBase& matrix, int j, int k, int m, int n) +{ + return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1)) + * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3)); +} + +const int TriangularDeterminant = 0; + +template struct ei_determinant_impl +{ + static inline typename ei_traits::Scalar run(const Derived& m) + { + return m.lu().determinant(); + } +}; + +template struct ei_determinant_impl +{ + static inline typename ei_traits::Scalar run(const Derived& m) + { + if (Derived::Flags & UnitDiagBit) + return 1; + else if (Derived::Flags & ZeroDiagBit) + return 0; + else + return m.diagonal().redux(ei_scalar_product_op::Scalar>()); + } +}; + +template struct ei_determinant_impl +{ + static inline typename ei_traits::Scalar run(const Derived& m) + { + return m.coeff(0,0); + } +}; + +template struct ei_determinant_impl +{ + static inline typename ei_traits::Scalar run(const Derived& m) + { + return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1); + } +}; + +template struct ei_determinant_impl +{ + static typename ei_traits::Scalar run(const Derived& m) + { + return ei_bruteforce_det3_helper(m,0,1,2) + - ei_bruteforce_det3_helper(m,1,0,2) + + ei_bruteforce_det3_helper(m,2,0,1); + } +}; + +template struct ei_determinant_impl +{ + static typename ei_traits::Scalar run(const Derived& m) + { + // trick by Martin Costabel to compute 4x4 det with only 30 muls + return ei_bruteforce_det4_helper(m,0,1,2,3) + - ei_bruteforce_det4_helper(m,0,2,1,3) + + ei_bruteforce_det4_helper(m,0,3,1,2) + + ei_bruteforce_det4_helper(m,1,2,0,3) + - ei_bruteforce_det4_helper(m,1,3,0,2) + + ei_bruteforce_det4_helper(m,2,3,0,1); + } +}; + +/** \lu_module + * + * \returns the determinant of this matrix + */ +template +inline typename ei_traits::Scalar MatrixBase::determinant() const +{ + assert(rows() == cols()); + return ei_determinant_impl::run(derived()); +} + +#endif // EIGEN_DETERMINANT_H diff --git a/extern/Eigen2/Eigen/src/LU/Inverse.h b/extern/Eigen2/Eigen/src/LU/Inverse.h new file mode 100644 index 00000000000..3d4d6348949 --- /dev/null +++ b/extern/Eigen2/Eigen/src/LU/Inverse.h @@ -0,0 +1,258 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_INVERSE_H +#define EIGEN_INVERSE_H + +/******************************************************************** +*** Part 1 : optimized implementations for fixed-size 2,3,4 cases *** +********************************************************************/ + +template +void ei_compute_inverse_in_size2_case(const MatrixType& matrix, MatrixType* result) +{ + typedef typename MatrixType::Scalar Scalar; + const Scalar invdet = Scalar(1) / matrix.determinant(); + result->coeffRef(0,0) = matrix.coeff(1,1) * invdet; + result->coeffRef(1,0) = -matrix.coeff(1,0) * invdet; + result->coeffRef(0,1) = -matrix.coeff(0,1) * invdet; + result->coeffRef(1,1) = matrix.coeff(0,0) * invdet; +} + +template +bool ei_compute_inverse_in_size2_case_with_check(const XprType& matrix, MatrixType* result) +{ + typedef typename MatrixType::Scalar Scalar; + const Scalar det = matrix.determinant(); + if(ei_isMuchSmallerThan(det, matrix.cwise().abs().maxCoeff())) return false; + const Scalar invdet = Scalar(1) / det; + result->coeffRef(0,0) = matrix.coeff(1,1) * invdet; + result->coeffRef(1,0) = -matrix.coeff(1,0) * invdet; + result->coeffRef(0,1) = -matrix.coeff(0,1) * invdet; + result->coeffRef(1,1) = matrix.coeff(0,0) * invdet; + return true; +} + +template +void ei_compute_inverse_in_size3_case(const MatrixType& matrix, MatrixType* result) +{ + typedef typename MatrixType::Scalar Scalar; + const Scalar det_minor00 = matrix.minor(0,0).determinant(); + const Scalar det_minor10 = matrix.minor(1,0).determinant(); + const Scalar det_minor20 = matrix.minor(2,0).determinant(); + const Scalar invdet = Scalar(1) / ( det_minor00 * matrix.coeff(0,0) + - det_minor10 * matrix.coeff(1,0) + + det_minor20 * matrix.coeff(2,0) ); + result->coeffRef(0, 0) = det_minor00 * invdet; + result->coeffRef(0, 1) = -det_minor10 * invdet; + result->coeffRef(0, 2) = det_minor20 * invdet; + result->coeffRef(1, 0) = -matrix.minor(0,1).determinant() * invdet; + result->coeffRef(1, 1) = matrix.minor(1,1).determinant() * invdet; + result->coeffRef(1, 2) = -matrix.minor(2,1).determinant() * invdet; + result->coeffRef(2, 0) = matrix.minor(0,2).determinant() * invdet; + result->coeffRef(2, 1) = -matrix.minor(1,2).determinant() * invdet; + result->coeffRef(2, 2) = matrix.minor(2,2).determinant() * invdet; +} + +template +bool ei_compute_inverse_in_size4_case_helper(const MatrixType& matrix, MatrixType* result) +{ + /* Let's split M into four 2x2 blocks: + * (P Q) + * (R S) + * If P is invertible, with inverse denoted by P_inverse, and if + * (S - R*P_inverse*Q) is also invertible, then the inverse of M is + * (P' Q') + * (R' S') + * where + * S' = (S - R*P_inverse*Q)^(-1) + * P' = P1 + (P1*Q) * S' *(R*P_inverse) + * Q' = -(P_inverse*Q) * S' + * R' = -S' * (R*P_inverse) + */ + typedef Block XprBlock22; + typedef typename MatrixBase::PlainMatrixType Block22; + Block22 P_inverse; + if(ei_compute_inverse_in_size2_case_with_check(matrix.template block<2,2>(0,0), &P_inverse)) + { + const Block22 Q = matrix.template block<2,2>(0,2); + const Block22 P_inverse_times_Q = P_inverse * Q; + const XprBlock22 R = matrix.template block<2,2>(2,0); + const Block22 R_times_P_inverse = R * P_inverse; + const Block22 R_times_P_inverse_times_Q = R_times_P_inverse * Q; + const XprBlock22 S = matrix.template block<2,2>(2,2); + const Block22 X = S - R_times_P_inverse_times_Q; + Block22 Y; + ei_compute_inverse_in_size2_case(X, &Y); + result->template block<2,2>(2,2) = Y; + result->template block<2,2>(2,0) = - Y * R_times_P_inverse; + const Block22 Z = P_inverse_times_Q * Y; + result->template block<2,2>(0,2) = - Z; + result->template block<2,2>(0,0) = P_inverse + Z * R_times_P_inverse; + return true; + } + else + { + return false; + } +} + +template +void ei_compute_inverse_in_size4_case(const MatrixType& matrix, MatrixType* result) +{ + if(ei_compute_inverse_in_size4_case_helper(matrix, result)) + { + // good ! The topleft 2x2 block was invertible, so the 2x2 blocks approach is successful. + return; + } + else + { + // rare case: the topleft 2x2 block is not invertible (but the matrix itself is assumed to be). + // since this is a rare case, we don't need to optimize it. We just want to handle it with little + // additional code. + MatrixType m(matrix); + m.row(0).swap(m.row(2)); + m.row(1).swap(m.row(3)); + if(ei_compute_inverse_in_size4_case_helper(m, result)) + { + // good, the topleft 2x2 block of m is invertible. Since m is different from matrix in that some + // rows were permuted, the actual inverse of matrix is derived from the inverse of m by permuting + // the corresponding columns. + result->col(0).swap(result->col(2)); + result->col(1).swap(result->col(3)); + } + else + { + // last possible case. Since matrix is assumed to be invertible, this last case has to work. + // first, undo the swaps previously made + m.row(0).swap(m.row(2)); + m.row(1).swap(m.row(3)); + // swap row 0 with the the row among 0 and 1 that has the biggest 2 first coeffs + int swap0with = ei_abs(m.coeff(0,0))+ei_abs(m.coeff(0,1))>ei_abs(m.coeff(1,0))+ei_abs(m.coeff(1,1)) ? 0 : 1; + m.row(0).swap(m.row(swap0with)); + // swap row 1 with the the row among 2 and 3 that has the biggest 2 first coeffs + int swap1with = ei_abs(m.coeff(2,0))+ei_abs(m.coeff(2,1))>ei_abs(m.coeff(3,0))+ei_abs(m.coeff(3,1)) ? 2 : 3; + m.row(1).swap(m.row(swap1with)); + ei_compute_inverse_in_size4_case_helper(m, result); + result->col(1).swap(result->col(swap1with)); + result->col(0).swap(result->col(swap0with)); + } + } +} + +/*********************************************** +*** Part 2 : selector and MatrixBase methods *** +***********************************************/ + +template +struct ei_compute_inverse +{ + static inline void run(const MatrixType& matrix, MatrixType* result) + { + LU lu(matrix); + lu.computeInverse(result); + } +}; + +template +struct ei_compute_inverse +{ + static inline void run(const MatrixType& matrix, MatrixType* result) + { + typedef typename MatrixType::Scalar Scalar; + result->coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0); + } +}; + +template +struct ei_compute_inverse +{ + static inline void run(const MatrixType& matrix, MatrixType* result) + { + ei_compute_inverse_in_size2_case(matrix, result); + } +}; + +template +struct ei_compute_inverse +{ + static inline void run(const MatrixType& matrix, MatrixType* result) + { + ei_compute_inverse_in_size3_case(matrix, result); + } +}; + +template +struct ei_compute_inverse +{ + static inline void run(const MatrixType& matrix, MatrixType* result) + { + ei_compute_inverse_in_size4_case(matrix, result); + } +}; + +/** \lu_module + * + * Computes the matrix inverse of this matrix. + * + * \note This matrix must be invertible, otherwise the result is undefined. + * + * \param result Pointer to the matrix in which to store the result. + * + * Example: \include MatrixBase_computeInverse.cpp + * Output: \verbinclude MatrixBase_computeInverse.out + * + * \sa inverse() + */ +template +inline void MatrixBase::computeInverse(PlainMatrixType *result) const +{ + ei_assert(rows() == cols()); + EIGEN_STATIC_ASSERT(NumTraits::HasFloatingPoint,NUMERIC_TYPE_MUST_BE_FLOATING_POINT) + ei_compute_inverse::run(eval(), result); +} + +/** \lu_module + * + * \returns the matrix inverse of this matrix. + * + * \note This matrix must be invertible, otherwise the result is undefined. + * + * \note This method returns a matrix by value, which can be inefficient. To avoid that overhead, + * use computeInverse() instead. + * + * Example: \include MatrixBase_inverse.cpp + * Output: \verbinclude MatrixBase_inverse.out + * + * \sa computeInverse() + */ +template +inline const typename MatrixBase::PlainMatrixType MatrixBase::inverse() const +{ + PlainMatrixType result(rows(), cols()); + computeInverse(&result); + return result; +} + +#endif // EIGEN_INVERSE_H diff --git a/extern/Eigen2/Eigen/src/LU/LU.h b/extern/Eigen2/Eigen/src/LU/LU.h new file mode 100644 index 00000000000..176e76a91a3 --- /dev/null +++ b/extern/Eigen2/Eigen/src/LU/LU.h @@ -0,0 +1,541 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_LU_H +#define EIGEN_LU_H + +/** \ingroup LU_Module + * + * \class LU + * + * \brief LU decomposition of a matrix with complete pivoting, and related features + * + * \param MatrixType the type of the matrix of which we are computing the LU decomposition + * + * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A + * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q + * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal + * coefficients) of U are sorted in such a way that any zeros are at the end, so that the rank + * of A is the index of the first zero on the diagonal of U (with indices starting at 0) if any. + * + * This decomposition provides the generic approach to solving systems of linear equations, computing + * the rank, invertibility, inverse, kernel, and determinant. + * + * This LU decomposition is very stable and well tested with large matrices. Even exact rank computation + * works at sizes larger than 1000x1000. However there are use cases where the SVD decomposition is inherently + * more stable when dealing with numerically damaged input. For example, computing the kernel is more stable with + * SVD because the SVD can determine which singular values are negligible while LU has to work at the level of matrix + * coefficients that are less meaningful in this respect. + * + * The data of the LU decomposition can be directly accessed through the methods matrixLU(), + * permutationP(), permutationQ(). + * + * As an exemple, here is how the original matrix can be retrieved: + * \include class_LU.cpp + * Output: \verbinclude class_LU.out + * + * \sa MatrixBase::lu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse() + */ +template class LU +{ + public: + + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef Matrix RowVectorType; + typedef Matrix ColVectorType; + + enum { MaxSmallDimAtCompileTime = EIGEN_ENUM_MIN( + MatrixType::MaxColsAtCompileTime, + MatrixType::MaxRowsAtCompileTime) + }; + + typedef Matrix KernelResultType; + + typedef Matrix ImageResultType; + + /** Constructor. + * + * \param matrix the matrix of which to compute the LU decomposition. + */ + LU(const MatrixType& matrix); + + /** \returns the LU decomposition matrix: the upper-triangular part is U, the + * unit-lower-triangular part is L (at least for square matrices; in the non-square + * case, special care is needed, see the documentation of class LU). + * + * \sa matrixL(), matrixU() + */ + inline const MatrixType& matrixLU() const + { + return m_lu; + } + + /** \returns a vector of integers, whose size is the number of rows of the matrix being decomposed, + * representing the P permutation i.e. the permutation of the rows. For its precise meaning, + * see the examples given in the documentation of class LU. + * + * \sa permutationQ() + */ + inline const IntColVectorType& permutationP() const + { + return m_p; + } + + /** \returns a vector of integers, whose size is the number of columns of the matrix being + * decomposed, representing the Q permutation i.e. the permutation of the columns. + * For its precise meaning, see the examples given in the documentation of class LU. + * + * \sa permutationP() + */ + inline const IntRowVectorType& permutationQ() const + { + return m_q; + } + + /** Computes a basis of the kernel of the matrix, also called the null-space of the matrix. + * + * \note This method is only allowed on non-invertible matrices, as determined by + * isInvertible(). Calling it on an invertible matrix will make an assertion fail. + * + * \param result a pointer to the matrix in which to store the kernel. The columns of this + * matrix will be set to form a basis of the kernel (it will be resized + * if necessary). + * + * Example: \include LU_computeKernel.cpp + * Output: \verbinclude LU_computeKernel.out + * + * \sa kernel(), computeImage(), image() + */ + template + void computeKernel(KernelMatrixType *result) const; + + /** Computes a basis of the image of the matrix, also called the column-space or range of he matrix. + * + * \note Calling this method on the zero matrix will make an assertion fail. + * + * \param result a pointer to the matrix in which to store the image. The columns of this + * matrix will be set to form a basis of the image (it will be resized + * if necessary). + * + * Example: \include LU_computeImage.cpp + * Output: \verbinclude LU_computeImage.out + * + * \sa image(), computeKernel(), kernel() + */ + template + void computeImage(ImageMatrixType *result) const; + + /** \returns the kernel of the matrix, also called its null-space. The columns of the returned matrix + * will form a basis of the kernel. + * + * \note: this method is only allowed on non-invertible matrices, as determined by + * isInvertible(). Calling it on an invertible matrix will make an assertion fail. + * + * \note: this method returns a matrix by value, which induces some inefficiency. + * If you prefer to avoid this overhead, use computeKernel() instead. + * + * Example: \include LU_kernel.cpp + * Output: \verbinclude LU_kernel.out + * + * \sa computeKernel(), image() + */ + const KernelResultType kernel() const; + + /** \returns the image of the matrix, also called its column-space. The columns of the returned matrix + * will form a basis of the kernel. + * + * \note: Calling this method on the zero matrix will make an assertion fail. + * + * \note: this method returns a matrix by value, which induces some inefficiency. + * If you prefer to avoid this overhead, use computeImage() instead. + * + * Example: \include LU_image.cpp + * Output: \verbinclude LU_image.out + * + * \sa computeImage(), kernel() + */ + const ImageResultType image() const; + + /** This method finds a solution x to the equation Ax=b, where A is the matrix of which + * *this is the LU decomposition, if any exists. + * + * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix, + * the only requirement in order for the equation to make sense is that + * b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition. + * \param result a pointer to the vector or matrix in which to store the solution, if any exists. + * Resized if necessary, so that result->rows()==A.cols() and result->cols()==b.cols(). + * If no solution exists, *result is left with undefined coefficients. + * + * \returns true if any solution exists, false if no solution exists. + * + * \note If there exist more than one solution, this method will arbitrarily choose one. + * If you need a complete analysis of the space of solutions, take the one solution obtained + * by this method and add to it elements of the kernel, as determined by kernel(). + * + * Example: \include LU_solve.cpp + * Output: \verbinclude LU_solve.out + * + * \sa MatrixBase::solveTriangular(), kernel(), computeKernel(), inverse(), computeInverse() + */ + template + bool solve(const MatrixBase& b, ResultType *result) const; + + /** \returns the determinant of the matrix of which + * *this is the LU decomposition. It has only linear complexity + * (that is, O(n) where n is the dimension of the square matrix) + * as the LU decomposition has already been computed. + * + * \note This is only for square matrices. + * + * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers + * optimized paths. + * + * \warning a determinant can be very big or small, so for matrices + * of large enough dimension, there is a risk of overflow/underflow. + * + * \sa MatrixBase::determinant() + */ + typename ei_traits::Scalar determinant() const; + + /** \returns the rank of the matrix of which *this is the LU decomposition. + * + * \note This is computed at the time of the construction of the LU decomposition. This + * method does not perform any further computation. + */ + inline int rank() const + { + return m_rank; + } + + /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition. + * + * \note Since the rank is computed at the time of the construction of the LU decomposition, this + * method almost does not perform any further computation. + */ + inline int dimensionOfKernel() const + { + return m_lu.cols() - m_rank; + } + + /** \returns true if the matrix of which *this is the LU decomposition represents an injective + * linear map, i.e. has trivial kernel; false otherwise. + * + * \note Since the rank is computed at the time of the construction of the LU decomposition, this + * method almost does not perform any further computation. + */ + inline bool isInjective() const + { + return m_rank == m_lu.cols(); + } + + /** \returns true if the matrix of which *this is the LU decomposition represents a surjective + * linear map; false otherwise. + * + * \note Since the rank is computed at the time of the construction of the LU decomposition, this + * method almost does not perform any further computation. + */ + inline bool isSurjective() const + { + return m_rank == m_lu.rows(); + } + + /** \returns true if the matrix of which *this is the LU decomposition is invertible. + * + * \note Since the rank is computed at the time of the construction of the LU decomposition, this + * method almost does not perform any further computation. + */ + inline bool isInvertible() const + { + return isInjective() && isSurjective(); + } + + /** Computes the inverse of the matrix of which *this is the LU decomposition. + * + * \param result a pointer to the matrix into which to store the inverse. Resized if needed. + * + * \note If this matrix is not invertible, *result is left with undefined coefficients. + * Use isInvertible() to first determine whether this matrix is invertible. + * + * \sa MatrixBase::computeInverse(), inverse() + */ + inline void computeInverse(MatrixType *result) const + { + solve(MatrixType::Identity(m_lu.rows(), m_lu.cols()), result); + } + + /** \returns the inverse of the matrix of which *this is the LU decomposition. + * + * \note If this matrix is not invertible, the returned matrix has undefined coefficients. + * Use isInvertible() to first determine whether this matrix is invertible. + * + * \sa computeInverse(), MatrixBase::inverse() + */ + inline MatrixType inverse() const + { + MatrixType result; + computeInverse(&result); + return result; + } + + protected: + const MatrixType& m_originalMatrix; + MatrixType m_lu; + IntColVectorType m_p; + IntRowVectorType m_q; + int m_det_pq; + int m_rank; + RealScalar m_precision; +}; + +template +LU::LU(const MatrixType& matrix) + : m_originalMatrix(matrix), + m_lu(matrix), + m_p(matrix.rows()), + m_q(matrix.cols()) +{ + const int size = matrix.diagonal().size(); + const int rows = matrix.rows(); + const int cols = matrix.cols(); + + // this formula comes from experimenting (see "LU precision tuning" thread on the list) + // and turns out to be identical to Higham's formula used already in LDLt. + m_precision = machine_epsilon() * size; + + IntColVectorType rows_transpositions(matrix.rows()); + IntRowVectorType cols_transpositions(matrix.cols()); + int number_of_transpositions = 0; + + RealScalar biggest = RealScalar(0); + m_rank = size; + for(int k = 0; k < size; ++k) + { + int row_of_biggest_in_corner, col_of_biggest_in_corner; + RealScalar biggest_in_corner; + + biggest_in_corner = m_lu.corner(Eigen::BottomRight, rows-k, cols-k) + .cwise().abs() + .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner); + row_of_biggest_in_corner += k; + col_of_biggest_in_corner += k; + if(k==0) biggest = biggest_in_corner; + + // if the corner is negligible, then we have less than full rank, and we can finish early + if(ei_isMuchSmallerThan(biggest_in_corner, biggest, m_precision)) + { + m_rank = k; + for(int i = k; i < size; i++) + { + rows_transpositions.coeffRef(i) = i; + cols_transpositions.coeffRef(i) = i; + } + break; + } + + rows_transpositions.coeffRef(k) = row_of_biggest_in_corner; + cols_transpositions.coeffRef(k) = col_of_biggest_in_corner; + if(k != row_of_biggest_in_corner) { + m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner)); + ++number_of_transpositions; + } + if(k != col_of_biggest_in_corner) { + m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner)); + ++number_of_transpositions; + } + if(k= 0; --k) + std::swap(m_p.coeffRef(k), m_p.coeffRef(rows_transpositions.coeff(k))); + + for(int k = 0; k < matrix.cols(); ++k) m_q.coeffRef(k) = k; + for(int k = 0; k < size; ++k) + std::swap(m_q.coeffRef(k), m_q.coeffRef(cols_transpositions.coeff(k))); + + m_det_pq = (number_of_transpositions%2) ? -1 : 1; +} + +template +typename ei_traits::Scalar LU::determinant() const +{ + return Scalar(m_det_pq) * m_lu.diagonal().redux(ei_scalar_product_op()); +} + +template +template +void LU::computeKernel(KernelMatrixType *result) const +{ + ei_assert(!isInvertible()); + const int dimker = dimensionOfKernel(), cols = m_lu.cols(); + result->resize(cols, dimker); + + /* Let us use the following lemma: + * + * Lemma: If the matrix A has the LU decomposition PAQ = LU, + * then Ker A = Q(Ker U). + * + * Proof: trivial: just keep in mind that P, Q, L are invertible. + */ + + /* Thus, all we need to do is to compute Ker U, and then apply Q. + * + * U is upper triangular, with eigenvalues sorted so that any zeros appear at the end. + * Thus, the diagonal of U ends with exactly + * m_dimKer zero's. Let us use that to construct m_dimKer linearly + * independent vectors in Ker U. + */ + + Matrix + y(-m_lu.corner(TopRight, m_rank, dimker)); + + m_lu.corner(TopLeft, m_rank, m_rank) + .template marked() + .solveTriangularInPlace(y); + + for(int i = 0; i < m_rank; ++i) result->row(m_q.coeff(i)) = y.row(i); + for(int i = m_rank; i < cols; ++i) result->row(m_q.coeff(i)).setZero(); + for(int k = 0; k < dimker; ++k) result->coeffRef(m_q.coeff(m_rank+k), k) = Scalar(1); +} + +template +const typename LU::KernelResultType +LU::kernel() const +{ + KernelResultType result(m_lu.cols(), dimensionOfKernel()); + computeKernel(&result); + return result; +} + +template +template +void LU::computeImage(ImageMatrixType *result) const +{ + ei_assert(m_rank > 0); + result->resize(m_originalMatrix.rows(), m_rank); + for(int i = 0; i < m_rank; ++i) + result->col(i) = m_originalMatrix.col(m_q.coeff(i)); +} + +template +const typename LU::ImageResultType +LU::image() const +{ + ImageResultType result(m_originalMatrix.rows(), m_rank); + computeImage(&result); + return result; +} + +template +template +bool LU::solve( + const MatrixBase& b, + ResultType *result +) const +{ + /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}. + * So we proceed as follows: + * Step 1: compute c = Pb. + * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible. + * Step 3: replace c by the solution x to Ux = c. Check if a solution really exists. + * Step 4: result = Qc; + */ + + const int rows = m_lu.rows(), cols = m_lu.cols(); + ei_assert(b.rows() == rows); + const int smalldim = std::min(rows, cols); + + typename OtherDerived::PlainMatrixType c(b.rows(), b.cols()); + + // Step 1 + for(int i = 0; i < rows; ++i) c.row(m_p.coeff(i)) = b.row(i); + + // Step 2 + m_lu.corner(Eigen::TopLeft,smalldim,smalldim).template marked() + .solveTriangularInPlace( + c.corner(Eigen::TopLeft, smalldim, c.cols())); + if(rows>cols) + { + c.corner(Eigen::BottomLeft, rows-cols, c.cols()) + -= m_lu.corner(Eigen::BottomLeft, rows-cols, cols) * c.corner(Eigen::TopLeft, cols, c.cols()); + } + + // Step 3 + if(!isSurjective()) + { + // is c is in the image of U ? + RealScalar biggest_in_c = m_rank>0 ? c.corner(TopLeft, m_rank, c.cols()).cwise().abs().maxCoeff() : 0; + for(int col = 0; col < c.cols(); ++col) + for(int row = m_rank; row < c.rows(); ++row) + if(!ei_isMuchSmallerThan(c.coeff(row,col), biggest_in_c, m_precision)) + return false; + } + m_lu.corner(TopLeft, m_rank, m_rank) + .template marked() + .solveTriangularInPlace(c.corner(TopLeft, m_rank, c.cols())); + + // Step 4 + result->resize(m_lu.cols(), b.cols()); + for(int i = 0; i < m_rank; ++i) result->row(m_q.coeff(i)) = c.row(i); + for(int i = m_rank; i < m_lu.cols(); ++i) result->row(m_q.coeff(i)).setZero(); + return true; +} + +/** \lu_module + * + * \return the LU decomposition of \c *this. + * + * \sa class LU + */ +template +inline const LU::PlainMatrixType> +MatrixBase::lu() const +{ + return LU(eval()); +} + +#endif // EIGEN_LU_H diff --git a/extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h b/extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h new file mode 100644 index 00000000000..b2595ede1fe --- /dev/null +++ b/extern/Eigen2/Eigen/src/LeastSquares/LeastSquares.h @@ -0,0 +1,182 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2006-2009 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_LEASTSQUARES_H +#define EIGEN_LEASTSQUARES_H + +/** \ingroup LeastSquares_Module + * + * \leastsquares_module + * + * For a set of points, this function tries to express + * one of the coords as a linear (affine) function of the other coords. + * + * This is best explained by an example. This function works in full + * generality, for points in a space of arbitrary dimension, and also over + * the complex numbers, but for this example we will work in dimension 3 + * over the real numbers (doubles). + * + * So let us work with the following set of 5 points given by their + * \f$(x,y,z)\f$ coordinates: + * @code + Vector3d points[5]; + points[0] = Vector3d( 3.02, 6.89, -4.32 ); + points[1] = Vector3d( 2.01, 5.39, -3.79 ); + points[2] = Vector3d( 2.41, 6.01, -4.01 ); + points[3] = Vector3d( 2.09, 5.55, -3.86 ); + points[4] = Vector3d( 2.58, 6.32, -4.10 ); + * @endcode + * Suppose that we want to express the second coordinate (\f$y\f$) as a linear + * expression in \f$x\f$ and \f$z\f$, that is, + * \f[ y=ax+bz+c \f] + * for some constants \f$a,b,c\f$. Thus, we want to find the best possible + * constants \f$a,b,c\f$ so that the plane of equation \f$y=ax+bz+c\f$ fits + * best the five above points. To do that, call this function as follows: + * @code + Vector3d coeffs; // will store the coefficients a, b, c + linearRegression( + 5, + &points, + &coeffs, + 1 // the coord to express as a function of + // the other ones. 0 means x, 1 means y, 2 means z. + ); + * @endcode + * Now the vector \a coeffs is approximately + * \f$( 0.495 , -1.927 , -2.906 )\f$. + * Thus, we get \f$a=0.495, b = -1.927, c = -2.906\f$. Let us check for + * instance how near points[0] is from the plane of equation \f$y=ax+bz+c\f$. + * Looking at the coords of points[0], we see that: + * \f[ax+bz+c = 0.495 * 3.02 + (-1.927) * (-4.32) + (-2.906) = 6.91.\f] + * On the other hand, we have \f$y=6.89\f$. We see that the values + * \f$6.91\f$ and \f$6.89\f$ + * are near, so points[0] is very near the plane of equation \f$y=ax+bz+c\f$. + * + * Let's now describe precisely the parameters: + * @param numPoints the number of points + * @param points the array of pointers to the points on which to perform the linear regression + * @param result pointer to the vector in which to store the result. + This vector must be of the same type and size as the + data points. The meaning of its coords is as follows. + For brevity, let \f$n=Size\f$, + \f$r_i=result[i]\f$, + and \f$f=funcOfOthers\f$. Denote by + \f$x_0,\ldots,x_{n-1}\f$ + the n coordinates in the n-dimensional space. + Then the resulting equation is: + \f[ x_f = r_0 x_0 + \cdots + r_{f-1}x_{f-1} + + r_{f+1}x_{f+1} + \cdots + r_{n-1}x_{n-1} + r_n. \f] + * @param funcOfOthers Determines which coord to express as a function of the + others. Coords are numbered starting from 0, so that a + value of 0 means \f$x\f$, 1 means \f$y\f$, + 2 means \f$z\f$, ... + * + * \sa fitHyperplane() + */ +template +void linearRegression(int numPoints, + VectorType **points, + VectorType *result, + int funcOfOthers ) +{ + typedef typename VectorType::Scalar Scalar; + typedef Hyperplane HyperplaneType; + const int size = points[0]->size(); + result->resize(size); + HyperplaneType h(size); + fitHyperplane(numPoints, points, &h); + for(int i = 0; i < funcOfOthers; i++) + result->coeffRef(i) = - h.coeffs()[i] / h.coeffs()[funcOfOthers]; + for(int i = funcOfOthers; i < size; i++) + result->coeffRef(i) = - h.coeffs()[i+1] / h.coeffs()[funcOfOthers]; +} + +/** \ingroup LeastSquares_Module + * + * \leastsquares_module + * + * This function is quite similar to linearRegression(), so we refer to the + * documentation of this function and only list here the differences. + * + * The main difference from linearRegression() is that this function doesn't + * take a \a funcOfOthers argument. Instead, it finds a general equation + * of the form + * \f[ r_0 x_0 + \cdots + r_{n-1}x_{n-1} + r_n = 0, \f] + * where \f$n=Size\f$, \f$r_i=retCoefficients[i]\f$, and we denote by + * \f$x_0,\ldots,x_{n-1}\f$ the n coordinates in the n-dimensional space. + * + * Thus, the vector \a retCoefficients has size \f$n+1\f$, which is another + * difference from linearRegression(). + * + * In practice, this function performs an hyper-plane fit in a total least square sense + * via the following steps: + * 1 - center the data to the mean + * 2 - compute the covariance matrix + * 3 - pick the eigenvector corresponding to the smallest eigenvalue of the covariance matrix + * The ratio of the smallest eigenvalue and the second one gives us a hint about the relevance + * of the solution. This value is optionally returned in \a soundness. + * + * \sa linearRegression() + */ +template +void fitHyperplane(int numPoints, + VectorType **points, + HyperplaneType *result, + typename NumTraits::Real* soundness = 0) +{ + typedef typename VectorType::Scalar Scalar; + typedef Matrix CovMatrixType; + EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType) + ei_assert(numPoints >= 1); + int size = points[0]->size(); + ei_assert(size+1 == result->coeffs().size()); + + // compute the mean of the data + VectorType mean = VectorType::Zero(size); + for(int i = 0; i < numPoints; ++i) + mean += *(points[i]); + mean /= numPoints; + + // compute the covariance matrix + CovMatrixType covMat = CovMatrixType::Zero(size, size); + VectorType remean = VectorType::Zero(size); + for(int i = 0; i < numPoints; ++i) + { + VectorType diff = (*(points[i]) - mean).conjugate(); + covMat += diff * diff.adjoint(); + } + + // now we just have to pick the eigen vector with smallest eigen value + SelfAdjointEigenSolver eig(covMat); + result->normal() = eig.eigenvectors().col(0); + if (soundness) + *soundness = eig.eigenvalues().coeff(0)/eig.eigenvalues().coeff(1); + + // let's compute the constant coefficient such that the + // plane pass trough the mean point: + result->offset() = - (result->normal().cwise()* mean).sum(); +} + + +#endif // EIGEN_LEASTSQUARES_H diff --git a/extern/Eigen2/Eigen/src/QR/EigenSolver.h b/extern/Eigen2/Eigen/src/QR/EigenSolver.h new file mode 100644 index 00000000000..70f21cebcdb --- /dev/null +++ b/extern/Eigen2/Eigen/src/QR/EigenSolver.h @@ -0,0 +1,722 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_EIGENSOLVER_H +#define EIGEN_EIGENSOLVER_H + +/** \ingroup QR_Module + * \nonstableyet + * + * \class EigenSolver + * + * \brief Eigen values/vectors solver for non selfadjoint matrices + * + * \param MatrixType the type of the matrix of which we are computing the eigen decomposition + * + * Currently it only support real matrices. + * + * \note this code was adapted from JAMA (public domain) + * + * \sa MatrixBase::eigenvalues(), SelfAdjointEigenSolver + */ +template class EigenSolver +{ + public: + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef std::complex Complex; + typedef Matrix EigenvalueType; + typedef Matrix EigenvectorType; + typedef Matrix RealVectorType; + typedef Matrix RealVectorTypeX; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via EigenSolver::compute(const MatrixType&). + */ + EigenSolver() : m_eivec(), m_eivalues(), m_isInitialized(false) {} + + EigenSolver(const MatrixType& matrix) + : m_eivec(matrix.rows(), matrix.cols()), + m_eivalues(matrix.cols()), + m_isInitialized(false) + { + compute(matrix); + } + + + EigenvectorType eigenvectors(void) const; + + /** \returns a real matrix V of pseudo eigenvectors. + * + * Let D be the block diagonal matrix with the real eigenvalues in 1x1 blocks, + * and any complex values u+iv in 2x2 blocks [u v ; -v u]. Then, the matrices D + * and V satisfy A*V = V*D. + * + * More precisely, if the diagonal matrix of the eigen values is:\n + * \f$ + * \left[ \begin{array}{cccccc} + * u+iv & & & & & \\ + * & u-iv & & & & \\ + * & & a+ib & & & \\ + * & & & a-ib & & \\ + * & & & & x & \\ + * & & & & & y \\ + * \end{array} \right] + * \f$ \n + * then, we have:\n + * \f$ + * D =\left[ \begin{array}{cccccc} + * u & v & & & & \\ + * -v & u & & & & \\ + * & & a & b & & \\ + * & & -b & a & & \\ + * & & & & x & \\ + * & & & & & y \\ + * \end{array} \right] + * \f$ + * + * \sa pseudoEigenvalueMatrix() + */ + const MatrixType& pseudoEigenvectors() const + { + ei_assert(m_isInitialized && "EigenSolver is not initialized."); + return m_eivec; + } + + MatrixType pseudoEigenvalueMatrix() const; + + /** \returns the eigenvalues as a column vector */ + EigenvalueType eigenvalues() const + { + ei_assert(m_isInitialized && "EigenSolver is not initialized."); + return m_eivalues; + } + + void compute(const MatrixType& matrix); + + private: + + void orthes(MatrixType& matH, RealVectorType& ort); + void hqr2(MatrixType& matH); + + protected: + MatrixType m_eivec; + EigenvalueType m_eivalues; + bool m_isInitialized; +}; + +/** \returns the real block diagonal matrix D of the eigenvalues. + * + * See pseudoEigenvectors() for the details. + */ +template +MatrixType EigenSolver::pseudoEigenvalueMatrix() const +{ + ei_assert(m_isInitialized && "EigenSolver is not initialized."); + int n = m_eivec.cols(); + MatrixType matD = MatrixType::Zero(n,n); + for (int i=0; i(i,i) << ei_real(m_eivalues.coeff(i)), ei_imag(m_eivalues.coeff(i)), + -ei_imag(m_eivalues.coeff(i)), ei_real(m_eivalues.coeff(i)); + ++i; + } + } + return matD; +} + +/** \returns the normalized complex eigenvectors as a matrix of column vectors. + * + * \sa eigenvalues(), pseudoEigenvectors() + */ +template +typename EigenSolver::EigenvectorType EigenSolver::eigenvectors(void) const +{ + ei_assert(m_isInitialized && "EigenSolver is not initialized."); + int n = m_eivec.cols(); + EigenvectorType matV(n,n); + for (int j=0; j(); + } + else + { + // we have a pair of complex eigen values + for (int i=0; i +void EigenSolver::compute(const MatrixType& matrix) +{ + assert(matrix.cols() == matrix.rows()); + int n = matrix.cols(); + m_eivalues.resize(n,1); + + MatrixType matH = matrix; + RealVectorType ort(n); + + // Reduce to Hessenberg form. + orthes(matH, ort); + + // Reduce Hessenberg to real Schur form. + hqr2(matH); + + m_isInitialized = true; +} + +// Nonsymmetric reduction to Hessenberg form. +template +void EigenSolver::orthes(MatrixType& matH, RealVectorType& ort) +{ + // This is derived from the Algol procedures orthes and ortran, + // by Martin and Wilkinson, Handbook for Auto. Comp., + // Vol.ii-Linear Algebra, and the corresponding + // Fortran subroutines in EISPACK. + + int n = m_eivec.cols(); + int low = 0; + int high = n-1; + + for (int m = low+1; m <= high-1; ++m) + { + // Scale column. + RealScalar scale = matH.block(m, m-1, high-m+1, 1).cwise().abs().sum(); + if (scale != 0.0) + { + // Compute Householder transformation. + RealScalar h = 0.0; + // FIXME could be rewritten, but this one looks better wrt cache + for (int i = high; i >= m; i--) + { + ort.coeffRef(i) = matH.coeff(i,m-1)/scale; + h += ort.coeff(i) * ort.coeff(i); + } + RealScalar g = ei_sqrt(h); + if (ort.coeff(m) > 0) + g = -g; + h = h - ort.coeff(m) * g; + ort.coeffRef(m) = ort.coeff(m) - g; + + // Apply Householder similarity transformation + // H = (I-u*u'/h)*H*(I-u*u')/h) + int bSize = high-m+1; + matH.block(m, m, bSize, n-m) -= ((ort.segment(m, bSize)/h) + * (ort.segment(m, bSize).transpose() * matH.block(m, m, bSize, n-m)).lazy()).lazy(); + + matH.block(0, m, high+1, bSize) -= ((matH.block(0, m, high+1, bSize) * ort.segment(m, bSize)).lazy() + * (ort.segment(m, bSize)/h).transpose()).lazy(); + + ort.coeffRef(m) = scale*ort.coeff(m); + matH.coeffRef(m,m-1) = scale*g; + } + } + + // Accumulate transformations (Algol's ortran). + m_eivec.setIdentity(); + + for (int m = high-1; m >= low+1; m--) + { + if (matH.coeff(m,m-1) != 0.0) + { + ort.segment(m+1, high-m) = matH.col(m-1).segment(m+1, high-m); + + int bSize = high-m+1; + m_eivec.block(m, m, bSize, bSize) += ( (ort.segment(m, bSize) / (matH.coeff(m,m-1) * ort.coeff(m) ) ) + * (ort.segment(m, bSize).transpose() * m_eivec.block(m, m, bSize, bSize)).lazy()); + } + } +} + +// Complex scalar division. +template +std::complex cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi) +{ + Scalar r,d; + if (ei_abs(yr) > ei_abs(yi)) + { + r = yi/yr; + d = yr + r*yi; + return std::complex((xr + r*xi)/d, (xi - r*xr)/d); + } + else + { + r = yr/yi; + d = yi + r*yr; + return std::complex((r*xr + xi)/d, (r*xi - xr)/d); + } +} + + +// Nonsymmetric reduction from Hessenberg to real Schur form. +template +void EigenSolver::hqr2(MatrixType& matH) +{ + // This is derived from the Algol procedure hqr2, + // by Martin and Wilkinson, Handbook for Auto. Comp., + // Vol.ii-Linear Algebra, and the corresponding + // Fortran subroutine in EISPACK. + + // Initialize + int nn = m_eivec.cols(); + int n = nn-1; + int low = 0; + int high = nn-1; + Scalar eps = ei_pow(Scalar(2),ei_is_same_type::ret ? Scalar(-23) : Scalar(-52)); + Scalar exshift = 0.0; + Scalar p=0,q=0,r=0,s=0,z=0,t,w,x,y; + + // Store roots isolated by balanc and compute matrix norm + // FIXME to be efficient the following would requires a triangular reduxion code + // Scalar norm = matH.upper().cwise().abs().sum() + matH.corner(BottomLeft,n,n).diagonal().cwise().abs().sum(); + Scalar norm = 0.0; + for (int j = 0; j < nn; ++j) + { + // FIXME what's the purpose of the following since the condition is always false + if ((j < low) || (j > high)) + { + m_eivalues.coeffRef(j) = Complex(matH.coeff(j,j), 0.0); + } + norm += matH.row(j).segment(std::max(j-1,0), nn-std::max(j-1,0)).cwise().abs().sum(); + } + + // Outer loop over eigenvalue index + int iter = 0; + while (n >= low) + { + // Look for single small sub-diagonal element + int l = n; + while (l > low) + { + s = ei_abs(matH.coeff(l-1,l-1)) + ei_abs(matH.coeff(l,l)); + if (s == 0.0) + s = norm; + if (ei_abs(matH.coeff(l,l-1)) < eps * s) + break; + l--; + } + + // Check for convergence + // One root found + if (l == n) + { + matH.coeffRef(n,n) = matH.coeff(n,n) + exshift; + m_eivalues.coeffRef(n) = Complex(matH.coeff(n,n), 0.0); + n--; + iter = 0; + } + else if (l == n-1) // Two roots found + { + w = matH.coeff(n,n-1) * matH.coeff(n-1,n); + p = (matH.coeff(n-1,n-1) - matH.coeff(n,n)) * Scalar(0.5); + q = p * p + w; + z = ei_sqrt(ei_abs(q)); + matH.coeffRef(n,n) = matH.coeff(n,n) + exshift; + matH.coeffRef(n-1,n-1) = matH.coeff(n-1,n-1) + exshift; + x = matH.coeff(n,n); + + // Scalar pair + if (q >= 0) + { + if (p >= 0) + z = p + z; + else + z = p - z; + + m_eivalues.coeffRef(n-1) = Complex(x + z, 0.0); + m_eivalues.coeffRef(n) = Complex(z!=0.0 ? x - w / z : m_eivalues.coeff(n-1).real(), 0.0); + + x = matH.coeff(n,n-1); + s = ei_abs(x) + ei_abs(z); + p = x / s; + q = z / s; + r = ei_sqrt(p * p+q * q); + p = p / r; + q = q / r; + + // Row modification + for (int j = n-1; j < nn; ++j) + { + z = matH.coeff(n-1,j); + matH.coeffRef(n-1,j) = q * z + p * matH.coeff(n,j); + matH.coeffRef(n,j) = q * matH.coeff(n,j) - p * z; + } + + // Column modification + for (int i = 0; i <= n; ++i) + { + z = matH.coeff(i,n-1); + matH.coeffRef(i,n-1) = q * z + p * matH.coeff(i,n); + matH.coeffRef(i,n) = q * matH.coeff(i,n) - p * z; + } + + // Accumulate transformations + for (int i = low; i <= high; ++i) + { + z = m_eivec.coeff(i,n-1); + m_eivec.coeffRef(i,n-1) = q * z + p * m_eivec.coeff(i,n); + m_eivec.coeffRef(i,n) = q * m_eivec.coeff(i,n) - p * z; + } + } + else // Complex pair + { + m_eivalues.coeffRef(n-1) = Complex(x + p, z); + m_eivalues.coeffRef(n) = Complex(x + p, -z); + } + n = n - 2; + iter = 0; + } + else // No convergence yet + { + // Form shift + x = matH.coeff(n,n); + y = 0.0; + w = 0.0; + if (l < n) + { + y = matH.coeff(n-1,n-1); + w = matH.coeff(n,n-1) * matH.coeff(n-1,n); + } + + // Wilkinson's original ad hoc shift + if (iter == 10) + { + exshift += x; + for (int i = low; i <= n; ++i) + matH.coeffRef(i,i) -= x; + s = ei_abs(matH.coeff(n,n-1)) + ei_abs(matH.coeff(n-1,n-2)); + x = y = Scalar(0.75) * s; + w = Scalar(-0.4375) * s * s; + } + + // MATLAB's new ad hoc shift + if (iter == 30) + { + s = Scalar((y - x) / 2.0); + s = s * s + w; + if (s > 0) + { + s = ei_sqrt(s); + if (y < x) + s = -s; + s = Scalar(x - w / ((y - x) / 2.0 + s)); + for (int i = low; i <= n; ++i) + matH.coeffRef(i,i) -= s; + exshift += s; + x = y = w = Scalar(0.964); + } + } + + iter = iter + 1; // (Could check iteration count here.) + + // Look for two consecutive small sub-diagonal elements + int m = n-2; + while (m >= l) + { + z = matH.coeff(m,m); + r = x - z; + s = y - z; + p = (r * s - w) / matH.coeff(m+1,m) + matH.coeff(m,m+1); + q = matH.coeff(m+1,m+1) - z - r - s; + r = matH.coeff(m+2,m+1); + s = ei_abs(p) + ei_abs(q) + ei_abs(r); + p = p / s; + q = q / s; + r = r / s; + if (m == l) { + break; + } + if (ei_abs(matH.coeff(m,m-1)) * (ei_abs(q) + ei_abs(r)) < + eps * (ei_abs(p) * (ei_abs(matH.coeff(m-1,m-1)) + ei_abs(z) + + ei_abs(matH.coeff(m+1,m+1))))) + { + break; + } + m--; + } + + for (int i = m+2; i <= n; ++i) + { + matH.coeffRef(i,i-2) = 0.0; + if (i > m+2) + matH.coeffRef(i,i-3) = 0.0; + } + + // Double QR step involving rows l:n and columns m:n + for (int k = m; k <= n-1; ++k) + { + int notlast = (k != n-1); + if (k != m) { + p = matH.coeff(k,k-1); + q = matH.coeff(k+1,k-1); + r = notlast ? matH.coeff(k+2,k-1) : Scalar(0); + x = ei_abs(p) + ei_abs(q) + ei_abs(r); + if (x != 0.0) + { + p = p / x; + q = q / x; + r = r / x; + } + } + + if (x == 0.0) + break; + + s = ei_sqrt(p * p + q * q + r * r); + + if (p < 0) + s = -s; + + if (s != 0) + { + if (k != m) + matH.coeffRef(k,k-1) = -s * x; + else if (l != m) + matH.coeffRef(k,k-1) = -matH.coeff(k,k-1); + + p = p + s; + x = p / s; + y = q / s; + z = r / s; + q = q / p; + r = r / p; + + // Row modification + for (int j = k; j < nn; ++j) + { + p = matH.coeff(k,j) + q * matH.coeff(k+1,j); + if (notlast) + { + p = p + r * matH.coeff(k+2,j); + matH.coeffRef(k+2,j) = matH.coeff(k+2,j) - p * z; + } + matH.coeffRef(k,j) = matH.coeff(k,j) - p * x; + matH.coeffRef(k+1,j) = matH.coeff(k+1,j) - p * y; + } + + // Column modification + for (int i = 0; i <= std::min(n,k+3); ++i) + { + p = x * matH.coeff(i,k) + y * matH.coeff(i,k+1); + if (notlast) + { + p = p + z * matH.coeff(i,k+2); + matH.coeffRef(i,k+2) = matH.coeff(i,k+2) - p * r; + } + matH.coeffRef(i,k) = matH.coeff(i,k) - p; + matH.coeffRef(i,k+1) = matH.coeff(i,k+1) - p * q; + } + + // Accumulate transformations + for (int i = low; i <= high; ++i) + { + p = x * m_eivec.coeff(i,k) + y * m_eivec.coeff(i,k+1); + if (notlast) + { + p = p + z * m_eivec.coeff(i,k+2); + m_eivec.coeffRef(i,k+2) = m_eivec.coeff(i,k+2) - p * r; + } + m_eivec.coeffRef(i,k) = m_eivec.coeff(i,k) - p; + m_eivec.coeffRef(i,k+1) = m_eivec.coeff(i,k+1) - p * q; + } + } // (s != 0) + } // k loop + } // check convergence + } // while (n >= low) + + // Backsubstitute to find vectors of upper triangular form + if (norm == 0.0) + { + return; + } + + for (n = nn-1; n >= 0; n--) + { + p = m_eivalues.coeff(n).real(); + q = m_eivalues.coeff(n).imag(); + + // Scalar vector + if (q == 0) + { + int l = n; + matH.coeffRef(n,n) = 1.0; + for (int i = n-1; i >= 0; i--) + { + w = matH.coeff(i,i) - p; + r = (matH.row(i).segment(l,n-l+1) * matH.col(n).segment(l, n-l+1))(0,0); + + if (m_eivalues.coeff(i).imag() < 0.0) + { + z = w; + s = r; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == 0.0) + { + if (w != 0.0) + matH.coeffRef(i,n) = -r / w; + else + matH.coeffRef(i,n) = -r / (eps * norm); + } + else // Solve real equations + { + x = matH.coeff(i,i+1); + y = matH.coeff(i+1,i); + q = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag(); + t = (x * s - z * r) / q; + matH.coeffRef(i,n) = t; + if (ei_abs(x) > ei_abs(z)) + matH.coeffRef(i+1,n) = (-r - w * t) / x; + else + matH.coeffRef(i+1,n) = (-s - y * t) / z; + } + + // Overflow control + t = ei_abs(matH.coeff(i,n)); + if ((eps * t) * t > 1) + matH.col(n).end(nn-i) /= t; + } + } + } + else if (q < 0) // Complex vector + { + std::complex cc; + int l = n-1; + + // Last vector component imaginary so matrix is triangular + if (ei_abs(matH.coeff(n,n-1)) > ei_abs(matH.coeff(n-1,n))) + { + matH.coeffRef(n-1,n-1) = q / matH.coeff(n,n-1); + matH.coeffRef(n-1,n) = -(matH.coeff(n,n) - p) / matH.coeff(n,n-1); + } + else + { + cc = cdiv(0.0,-matH.coeff(n-1,n),matH.coeff(n-1,n-1)-p,q); + matH.coeffRef(n-1,n-1) = ei_real(cc); + matH.coeffRef(n-1,n) = ei_imag(cc); + } + matH.coeffRef(n,n-1) = 0.0; + matH.coeffRef(n,n) = 1.0; + for (int i = n-2; i >= 0; i--) + { + Scalar ra,sa,vr,vi; + ra = (matH.block(i,l, 1, n-l+1) * matH.block(l,n-1, n-l+1, 1)).lazy()(0,0); + sa = (matH.block(i,l, 1, n-l+1) * matH.block(l,n, n-l+1, 1)).lazy()(0,0); + w = matH.coeff(i,i) - p; + + if (m_eivalues.coeff(i).imag() < 0.0) + { + z = w; + r = ra; + s = sa; + } + else + { + l = i; + if (m_eivalues.coeff(i).imag() == 0) + { + cc = cdiv(-ra,-sa,w,q); + matH.coeffRef(i,n-1) = ei_real(cc); + matH.coeffRef(i,n) = ei_imag(cc); + } + else + { + // Solve complex equations + x = matH.coeff(i,i+1); + y = matH.coeff(i+1,i); + vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q; + vi = (m_eivalues.coeff(i).real() - p) * Scalar(2) * q; + if ((vr == 0.0) && (vi == 0.0)) + vr = eps * norm * (ei_abs(w) + ei_abs(q) + ei_abs(x) + ei_abs(y) + ei_abs(z)); + + cc= cdiv(x*r-z*ra+q*sa,x*s-z*sa-q*ra,vr,vi); + matH.coeffRef(i,n-1) = ei_real(cc); + matH.coeffRef(i,n) = ei_imag(cc); + if (ei_abs(x) > (ei_abs(z) + ei_abs(q))) + { + matH.coeffRef(i+1,n-1) = (-ra - w * matH.coeff(i,n-1) + q * matH.coeff(i,n)) / x; + matH.coeffRef(i+1,n) = (-sa - w * matH.coeff(i,n) - q * matH.coeff(i,n-1)) / x; + } + else + { + cc = cdiv(-r-y*matH.coeff(i,n-1),-s-y*matH.coeff(i,n),z,q); + matH.coeffRef(i+1,n-1) = ei_real(cc); + matH.coeffRef(i+1,n) = ei_imag(cc); + } + } + + // Overflow control + t = std::max(ei_abs(matH.coeff(i,n-1)),ei_abs(matH.coeff(i,n))); + if ((eps * t) * t > 1) + matH.block(i, n-1, nn-i, 2) /= t; + + } + } + } + } + + // Vectors of isolated roots + for (int i = 0; i < nn; ++i) + { + // FIXME again what's the purpose of this test ? + // in this algo low==0 and high==nn-1 !! + if (i < low || i > high) + { + m_eivec.row(i).end(nn-i) = matH.row(i).end(nn-i); + } + } + + // Back transformation to get eigenvectors of original matrix + int bRows = high-low+1; + for (int j = nn-1; j >= low; j--) + { + int bSize = std::min(j,high)-low+1; + m_eivec.col(j).segment(low, bRows) = (m_eivec.block(low, low, bRows, bSize) * matH.col(j).segment(low, bSize)); + } +} + +#endif // EIGEN_EIGENSOLVER_H diff --git a/extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h b/extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h new file mode 100644 index 00000000000..6d0ff794ec2 --- /dev/null +++ b/extern/Eigen2/Eigen/src/QR/HessenbergDecomposition.h @@ -0,0 +1,250 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_HESSENBERGDECOMPOSITION_H +#define EIGEN_HESSENBERGDECOMPOSITION_H + +/** \ingroup QR_Module + * \nonstableyet + * + * \class HessenbergDecomposition + * + * \brief Reduces a squared matrix to an Hessemberg form + * + * \param MatrixType the type of the matrix of which we are computing the Hessenberg decomposition + * + * This class performs an Hessenberg decomposition of a matrix \f$ A \f$ such that: + * \f$ A = Q H Q^* \f$ where \f$ Q \f$ is unitary and \f$ H \f$ a Hessenberg matrix. + * + * \sa class Tridiagonalization, class Qr + */ +template class HessenbergDecomposition +{ + public: + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + enum { + Size = MatrixType::RowsAtCompileTime, + SizeMinusOne = MatrixType::RowsAtCompileTime==Dynamic + ? Dynamic + : MatrixType::RowsAtCompileTime-1 + }; + + typedef Matrix CoeffVectorType; + typedef Matrix DiagonalType; + typedef Matrix SubDiagonalType; + + typedef typename NestByValue >::RealReturnType DiagonalReturnType; + + typedef typename NestByValue > > >::RealReturnType SubDiagonalReturnType; + + /** This constructor initializes a HessenbergDecomposition object for + * further use with HessenbergDecomposition::compute() + */ + HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size) + : m_matrix(size,size), m_hCoeffs(size-1) + {} + + HessenbergDecomposition(const MatrixType& matrix) + : m_matrix(matrix), + m_hCoeffs(matrix.cols()-1) + { + _compute(m_matrix, m_hCoeffs); + } + + /** Computes or re-compute the Hessenberg decomposition for the matrix \a matrix. + * + * This method allows to re-use the allocated data. + */ + void compute(const MatrixType& matrix) + { + m_matrix = matrix; + m_hCoeffs.resize(matrix.rows()-1,1); + _compute(m_matrix, m_hCoeffs); + } + + /** \returns the householder coefficients allowing to + * reconstruct the matrix Q from the packed data. + * + * \sa packedMatrix() + */ + CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; } + + /** \returns the internal result of the decomposition. + * + * The returned matrix contains the following information: + * - the upper part and lower sub-diagonal represent the Hessenberg matrix H + * - the rest of the lower part contains the Householder vectors that, combined with + * Householder coefficients returned by householderCoefficients(), + * allows to reconstruct the matrix Q as follow: + * Q = H_{N-1} ... H_1 H_0 + * where the matrices H are the Householder transformation: + * H_i = (I - h_i * v_i * v_i') + * where h_i == householderCoefficients()[i] and v_i is a Householder vector: + * v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ] + * + * See LAPACK for further details on this packed storage. + */ + const MatrixType& packedMatrix(void) const { return m_matrix; } + + MatrixType matrixQ(void) const; + MatrixType matrixH(void) const; + + private: + + static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs); + + protected: + MatrixType m_matrix; + CoeffVectorType m_hCoeffs; +}; + +#ifndef EIGEN_HIDE_HEAVY_CODE + +/** \internal + * Performs a tridiagonal decomposition of \a matA in place. + * + * \param matA the input selfadjoint matrix + * \param hCoeffs returned Householder coefficients + * + * The result is written in the lower triangular part of \a matA. + * + * Implemented from Golub's "Matrix Computations", algorithm 8.3.1. + * + * \sa packedMatrix() + */ +template +void HessenbergDecomposition::_compute(MatrixType& matA, CoeffVectorType& hCoeffs) +{ + assert(matA.rows()==matA.cols()); + int n = matA.rows(); + for (int i = 0; i(1))) + { + hCoeffs.coeffRef(i) = 0.; + } + else + { + Scalar v0 = matA.col(i).coeff(i+1); + RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2); + if (ei_real(v0)>=0.) + beta = -beta; + matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta)); + matA.col(i).coeffRef(i+1) = beta; + Scalar h = (beta - v0) / beta; + // end of the householder transformation + + // Apply similarity transformation to remaining columns, + // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1) + matA.col(i).coeffRef(i+1) = 1; + + // first let's do A = H A + matA.corner(BottomRight,n-i-1,n-i-1) -= ((ei_conj(h) * matA.col(i).end(n-i-1)) * + (matA.col(i).end(n-i-1).adjoint() * matA.corner(BottomRight,n-i-1,n-i-1))).lazy(); + + // now let's do A = A H + matA.corner(BottomRight,n,n-i-1) -= ((matA.corner(BottomRight,n,n-i-1) * matA.col(i).end(n-i-1)) + * (h * matA.col(i).end(n-i-1).adjoint())).lazy(); + + matA.col(i).coeffRef(i+1) = beta; + hCoeffs.coeffRef(i) = h; + } + } + if (NumTraits::IsComplex) + { + // Householder transformation on the remaining single scalar + int i = n-2; + Scalar v0 = matA.coeff(i+1,i); + + RealScalar beta = ei_sqrt(ei_abs2(v0)); + if (ei_real(v0)>=0.) + beta = -beta; + Scalar h = (beta - v0) / beta; + hCoeffs.coeffRef(i) = h; + + // A = H* A + matA.corner(BottomRight,n-i-1,n-i) -= ei_conj(h) * matA.corner(BottomRight,n-i-1,n-i); + + // A = A H + matA.col(n-1) -= h * matA.col(n-1); + } + else + { + hCoeffs.coeffRef(n-2) = 0; + } +} + +/** reconstructs and returns the matrix Q */ +template +typename HessenbergDecomposition::MatrixType +HessenbergDecomposition::matrixQ(void) const +{ + int n = m_matrix.rows(); + MatrixType matQ = MatrixType::Identity(n,n); + for (int i = n-2; i>=0; i--) + { + Scalar tmp = m_matrix.coeff(i+1,i); + m_matrix.const_cast_derived().coeffRef(i+1,i) = 1; + + matQ.corner(BottomRight,n-i-1,n-i-1) -= + ((m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1)) * + (m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy()).lazy(); + + m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp; + } + return matQ; +} + +#endif // EIGEN_HIDE_HEAVY_CODE + +/** constructs and returns the matrix H. + * Note that the matrix H is equivalent to the upper part of the packed matrix + * (including the lower sub-diagonal). Therefore, it might be often sufficient + * to directly use the packed matrix instead of creating a new one. + */ +template +typename HessenbergDecomposition::MatrixType +HessenbergDecomposition::matrixH(void) const +{ + // FIXME should this function (and other similar) rather take a matrix as argument + // and fill it (to avoid temporaries) + int n = m_matrix.rows(); + MatrixType matH = m_matrix; + if (n>2) + matH.corner(BottomLeft,n-2, n-2).template part().setZero(); + return matH; +} + +#endif // EIGEN_HESSENBERGDECOMPOSITION_H diff --git a/extern/Eigen2/Eigen/src/QR/QR.h b/extern/Eigen2/Eigen/src/QR/QR.h new file mode 100644 index 00000000000..90751dd428d --- /dev/null +++ b/extern/Eigen2/Eigen/src/QR/QR.h @@ -0,0 +1,334 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_QR_H +#define EIGEN_QR_H + +/** \ingroup QR_Module + * \nonstableyet + * + * \class QR + * + * \brief QR decomposition of a matrix + * + * \param MatrixType the type of the matrix of which we are computing the QR decomposition + * + * This class performs a QR decomposition using Householder transformations. The result is + * stored in a compact way compatible with LAPACK. + * + * \sa MatrixBase::qr() + */ +template class QR +{ + public: + + typedef typename MatrixType::Scalar Scalar; + typedef typename MatrixType::RealScalar RealScalar; + typedef Block MatrixRBlockType; + typedef Matrix MatrixTypeR; + typedef Matrix VectorType; + + /** + * \brief Default Constructor. + * + * The default constructor is useful in cases in which the user intends to + * perform decompositions via QR::compute(const MatrixType&). + */ + QR() : m_qr(), m_hCoeffs(), m_isInitialized(false) {} + + QR(const MatrixType& matrix) + : m_qr(matrix.rows(), matrix.cols()), + m_hCoeffs(matrix.cols()), + m_isInitialized(false) + { + compute(matrix); + } + + /** \deprecated use isInjective() + * \returns whether or not the matrix is of full rank + * + * \note Since the rank is computed only once, i.e. the first time it is needed, this + * method almost does not perform any further computation. + */ + EIGEN_DEPRECATED bool isFullRank() const + { + ei_assert(m_isInitialized && "QR is not initialized."); + return rank() == m_qr.cols(); + } + + /** \returns the rank of the matrix of which *this is the QR decomposition. + * + * \note Since the rank is computed only once, i.e. the first time it is needed, this + * method almost does not perform any further computation. + */ + int rank() const; + + /** \returns the dimension of the kernel of the matrix of which *this is the QR decomposition. + * + * \note Since the rank is computed only once, i.e. the first time it is needed, this + * method almost does not perform any further computation. + */ + inline int dimensionOfKernel() const + { + ei_assert(m_isInitialized && "QR is not initialized."); + return m_qr.cols() - rank(); + } + + /** \returns true if the matrix of which *this is the QR decomposition represents an injective + * linear map, i.e. has trivial kernel; false otherwise. + * + * \note Since the rank is computed only once, i.e. the first time it is needed, this + * method almost does not perform any further computation. + */ + inline bool isInjective() const + { + ei_assert(m_isInitialized && "QR is not initialized."); + return rank() == m_qr.cols(); + } + + /** \returns true if the matrix of which *this is the QR decomposition represents a surjective + * linear map; false otherwise. + * + * \note Since the rank is computed only once, i.e. the first time it is needed, this + * method almost does not perform any further computation. + */ + inline bool isSurjective() const + { + ei_assert(m_isInitialized && "QR is not initialized."); + return rank() == m_qr.rows(); + } + + /** \returns true if the matrix of which *this is the QR decomposition is invertible. + * + * \note Since the rank is computed only once, i.e. the first time it is needed, this + * method almost does not perform any further computation. + */ + inline bool isInvertible() const + { + ei_assert(m_isInitialized && "QR is not initialized."); + return isInjective() && isSurjective(); + } + + /** \returns a read-only expression of the matrix R of the actual the QR decomposition */ + const Part, UpperTriangular> + matrixR(void) const + { + ei_assert(m_isInitialized && "QR is not initialized."); + int cols = m_qr.cols(); + return MatrixRBlockType(m_qr, 0, 0, cols, cols).nestByValue().template part(); + } + + /** This method finds a solution x to the equation Ax=b, where A is the matrix of which + * *this is the QR decomposition, if any exists. + * + * \param b the right-hand-side of the equation to solve. + * + * \param result a pointer to the vector/matrix in which to store the solution, if any exists. + * Resized if necessary, so that result->rows()==A.cols() and result->cols()==b.cols(). + * If no solution exists, *result is left with undefined coefficients. + * + * \returns true if any solution exists, false if no solution exists. + * + * \note If there exist more than one solution, this method will arbitrarily choose one. + * If you need a complete analysis of the space of solutions, take the one solution obtained + * by this method and add to it elements of the kernel, as determined by kernel(). + * + * \note The case where b is a matrix is not yet implemented. Also, this + * code is space inefficient. + * + * Example: \include QR_solve.cpp + * Output: \verbinclude QR_solve.out + * + * \sa MatrixBase::solveTriangular(), kernel(), computeKernel(), inverse(), computeInverse() + */ + template + bool solve(const MatrixBase& b, ResultType *result) const; + + MatrixType matrixQ(void) const; + + void compute(const MatrixType& matrix); + + protected: + MatrixType m_qr; + VectorType m_hCoeffs; + mutable int m_rank; + mutable bool m_rankIsUptodate; + bool m_isInitialized; +}; + +/** \returns the rank of the matrix of which *this is the QR decomposition. */ +template +int QR::rank() const +{ + ei_assert(m_isInitialized && "QR is not initialized."); + if (!m_rankIsUptodate) + { + RealScalar maxCoeff = m_qr.diagonal().cwise().abs().maxCoeff(); + int n = m_qr.cols(); + m_rank = 0; + while(m_rank +void QR::compute(const MatrixType& matrix) +{ + m_rankIsUptodate = false; + m_qr = matrix; + m_hCoeffs.resize(matrix.cols()); + + int rows = matrix.rows(); + int cols = matrix.cols(); + RealScalar eps2 = precision()*precision(); + + for (int k = 0; k < cols; ++k) + { + int remainingSize = rows-k; + + RealScalar beta; + Scalar v0 = m_qr.col(k).coeff(k); + + if (remainingSize==1) + { + if (NumTraits::IsComplex) + { + // Householder transformation on the remaining single scalar + beta = ei_abs(v0); + if (ei_real(v0)>0) + beta = -beta; + m_qr.coeffRef(k,k) = beta; + m_hCoeffs.coeffRef(k) = (beta - v0) / beta; + } + else + { + m_hCoeffs.coeffRef(k) = 0; + } + } + else if ((beta=m_qr.col(k).end(remainingSize-1).squaredNorm())>eps2) + // FIXME what about ei_imag(v0) ?? + { + // form k-th Householder vector + beta = ei_sqrt(ei_abs2(v0)+beta); + if (ei_real(v0)>=0.) + beta = -beta; + m_qr.col(k).end(remainingSize-1) /= v0-beta; + m_qr.coeffRef(k,k) = beta; + Scalar h = m_hCoeffs.coeffRef(k) = (beta - v0) / beta; + + // apply the Householder transformation (I - h v v') to remaining columns, i.e., + // R <- (I - h v v') * R where v = [1,m_qr(k+1,k), m_qr(k+2,k), ...] + int remainingCols = cols - k -1; + if (remainingCols>0) + { + m_qr.coeffRef(k,k) = Scalar(1); + m_qr.corner(BottomRight, remainingSize, remainingCols) -= ei_conj(h) * m_qr.col(k).end(remainingSize) + * (m_qr.col(k).end(remainingSize).adjoint() * m_qr.corner(BottomRight, remainingSize, remainingCols)); + m_qr.coeffRef(k,k) = beta; + } + } + else + { + m_hCoeffs.coeffRef(k) = 0; + } + } + m_isInitialized = true; +} + +template +template +bool QR::solve( + const MatrixBase& b, + ResultType *result +) const +{ + ei_assert(m_isInitialized && "QR is not initialized."); + const int rows = m_qr.rows(); + ei_assert(b.rows() == rows); + result->resize(rows, b.cols()); + + // TODO(keir): There is almost certainly a faster way to multiply by + // Q^T without explicitly forming matrixQ(). Investigate. + *result = matrixQ().transpose()*b; + + if(!isSurjective()) + { + // is result is in the image of R ? + RealScalar biggest_in_res = result->corner(TopLeft, m_rank, result->cols()).cwise().abs().maxCoeff(); + for(int col = 0; col < result->cols(); ++col) + for(int row = m_rank; row < result->rows(); ++row) + if(!ei_isMuchSmallerThan(result->coeff(row,col), biggest_in_res)) + return false; + } + m_qr.corner(TopLeft, m_rank, m_rank) + .template marked() + .solveTriangularInPlace(result->corner(TopLeft, m_rank, result->cols())); + + return true; +} + +/** \returns the matrix Q */ +template +MatrixType QR::matrixQ() const +{ + ei_assert(m_isInitialized && "QR is not initialized."); + // compute the product Q_0 Q_1 ... Q_n-1, + // where Q_k is the k-th Householder transformation I - h_k v_k v_k' + // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...] + int rows = m_qr.rows(); + int cols = m_qr.cols(); + MatrixType res = MatrixType::Identity(rows, cols); + for (int k = cols-1; k >= 0; k--) + { + // to make easier the computation of the transformation, let's temporarily + // overwrite m_qr(k,k) such that the end of m_qr.col(k) is exactly our Householder vector. + Scalar beta = m_qr.coeff(k,k); + m_qr.const_cast_derived().coeffRef(k,k) = 1; + int endLength = rows-k; + res.corner(BottomRight,endLength, cols-k) -= ((m_hCoeffs.coeff(k) * m_qr.col(k).end(endLength)) + * (m_qr.col(k).end(endLength).adjoint() * res.corner(BottomRight,endLength, cols-k)).lazy()).lazy(); + m_qr.const_cast_derived().coeffRef(k,k) = beta; + } + return res; +} + +#endif // EIGEN_HIDE_HEAVY_CODE + +/** \return the QR decomposition of \c *this. + * + * \sa class QR + */ +template +const QR::PlainMatrixType> +MatrixBase::qr() const +{ + return QR(eval()); +} + + +#endif // EIGEN_QR_H diff --git a/extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp b/extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp new file mode 100644 index 00000000000..dacb05d3d1f --- /dev/null +++ b/extern/Eigen2/Eigen/src/QR/QrInstantiations.cpp @@ -0,0 +1,43 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_EXTERN_INSTANTIATIONS +#define EIGEN_EXTERN_INSTANTIATIONS +#endif +#include "../../Core" +#undef EIGEN_EXTERN_INSTANTIATIONS + +#include "../../QR" + +namespace Eigen +{ + +template static void ei_tridiagonal_qr_step(float* , float* , int, int, float* , int); +template static void ei_tridiagonal_qr_step(double* , double* , int, int, double* , int); +template static void ei_tridiagonal_qr_step(float* , float* , int, int, std::complex* , int); +template static void ei_tridiagonal_qr_step(double* , double* , int, int, std::complex* , int); + +EIGEN_QR_MODULE_INSTANTIATE(); + +} diff --git a/extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h b/extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h new file mode 100644 index 00000000000..70984efab9d --- /dev/null +++ b/extern/Eigen2/Eigen/src/QR/SelfAdjointEigenSolver.h @@ -0,0 +1,402 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H +#define EIGEN_SELFADJOINTEIGENSOLVER_H + +/** \qr_module \ingroup QR_Module + * \nonstableyet + * + * \class SelfAdjointEigenSolver + * + * \brief Eigen values/vectors solver for selfadjoint matrix + * + * \param MatrixType the type of the matrix of which we are computing the eigen decomposition + * + * \note MatrixType must be an actual Matrix type, it can't be an expression type. + * + * \sa MatrixBase::eigenvalues(), class EigenSolver + */ +template class SelfAdjointEigenSolver +{ + public: + + enum {Size = _MatrixType::RowsAtCompileTime }; + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef std::complex Complex; + typedef Matrix RealVectorType; + typedef Matrix RealVectorTypeX; + typedef Tridiagonalization TridiagonalizationType; + + SelfAdjointEigenSolver() + : m_eivec(int(Size), int(Size)), + m_eivalues(int(Size)) + { + ei_assert(Size!=Dynamic); + } + + SelfAdjointEigenSolver(int size) + : m_eivec(size, size), + m_eivalues(size) + {} + + /** Constructors computing the eigenvalues of the selfadjoint matrix \a matrix, + * as well as the eigenvectors if \a computeEigenvectors is true. + * + * \sa compute(MatrixType,bool), SelfAdjointEigenSolver(MatrixType,MatrixType,bool) + */ + SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true) + : m_eivec(matrix.rows(), matrix.cols()), + m_eivalues(matrix.cols()) + { + compute(matrix, computeEigenvectors); + } + + /** Constructors computing the eigenvalues of the generalized eigen problem + * \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$ + * and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors + * are computed if \a computeEigenvectors is true. + * + * \sa compute(MatrixType,MatrixType,bool), SelfAdjointEigenSolver(MatrixType,bool) + */ + SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true) + : m_eivec(matA.rows(), matA.cols()), + m_eivalues(matA.cols()) + { + compute(matA, matB, computeEigenvectors); + } + + void compute(const MatrixType& matrix, bool computeEigenvectors = true); + + void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true); + + /** \returns the computed eigen vectors as a matrix of column vectors */ + MatrixType eigenvectors(void) const + { + #ifndef NDEBUG + ei_assert(m_eigenvectorsOk); + #endif + return m_eivec; + } + + /** \returns the computed eigen values */ + RealVectorType eigenvalues(void) const { return m_eivalues; } + + /** \returns the positive square root of the matrix + * + * \note the matrix itself must be positive in order for this to make sense. + */ + MatrixType operatorSqrt() const + { + return m_eivec * m_eivalues.cwise().sqrt().asDiagonal() * m_eivec.adjoint(); + } + + /** \returns the positive inverse square root of the matrix + * + * \note the matrix itself must be positive definite in order for this to make sense. + */ + MatrixType operatorInverseSqrt() const + { + return m_eivec * m_eivalues.cwise().inverse().cwise().sqrt().asDiagonal() * m_eivec.adjoint(); + } + + + protected: + MatrixType m_eivec; + RealVectorType m_eivalues; + #ifndef NDEBUG + bool m_eigenvectorsOk; + #endif +}; + +#ifndef EIGEN_HIDE_HEAVY_CODE + +// from Golub's "Matrix Computations", algorithm 5.1.3 +template +static void ei_givens_rotation(Scalar a, Scalar b, Scalar& c, Scalar& s) +{ + if (b==0) + { + c = 1; s = 0; + } + else if (ei_abs(b)>ei_abs(a)) + { + Scalar t = -a/b; + s = Scalar(1)/ei_sqrt(1+t*t); + c = s * t; + } + else + { + Scalar t = -b/a; + c = Scalar(1)/ei_sqrt(1+t*t); + s = c * t; + } +} + +/** \internal + * + * \qr_module + * + * Performs a QR step on a tridiagonal symmetric matrix represented as a + * pair of two vectors \a diag and \a subdiag. + * + * \param matA the input selfadjoint matrix + * \param hCoeffs returned Householder coefficients + * + * For compilation efficiency reasons, this procedure does not use eigen expression + * for its arguments. + * + * Implemented from Golub's "Matrix Computations", algorithm 8.3.2: + * "implicit symmetric QR step with Wilkinson shift" + */ +template +static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n); + +/** Computes the eigenvalues of the selfadjoint matrix \a matrix, + * as well as the eigenvectors if \a computeEigenvectors is true. + * + * \sa SelfAdjointEigenSolver(MatrixType,bool), compute(MatrixType,MatrixType,bool) + */ +template +void SelfAdjointEigenSolver::compute(const MatrixType& matrix, bool computeEigenvectors) +{ + #ifndef NDEBUG + m_eigenvectorsOk = computeEigenvectors; + #endif + assert(matrix.cols() == matrix.rows()); + int n = matrix.cols(); + m_eivalues.resize(n,1); + + if(n==1) + { + m_eivalues.coeffRef(0,0) = ei_real(matrix.coeff(0,0)); + m_eivec.setOnes(); + return; + } + + m_eivec = matrix; + + // FIXME, should tridiag be a local variable of this function or an attribute of SelfAdjointEigenSolver ? + // the latter avoids multiple memory allocation when the same SelfAdjointEigenSolver is used multiple times... + // (same for diag and subdiag) + RealVectorType& diag = m_eivalues; + typename TridiagonalizationType::SubDiagonalType subdiag(n-1); + TridiagonalizationType::decomposeInPlace(m_eivec, diag, subdiag, computeEigenvectors); + + int end = n-1; + int start = 0; + while (end>0) + { + for (int i = start; i0 && subdiag[end-1]==0) + end--; + if (end<=0) + break; + start = end - 1; + while (start>0 && subdiag[start-1]!=0) + start--; + + ei_tridiagonal_qr_step(diag.data(), subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n); + } + + // Sort eigenvalues and corresponding vectors. + // TODO make the sort optional ? + // TODO use a better sort algorithm !! + for (int i = 0; i < n-1; ++i) + { + int k; + m_eivalues.segment(i,n-i).minCoeff(&k); + if (k > 0) + { + std::swap(m_eivalues[i], m_eivalues[k+i]); + m_eivec.col(i).swap(m_eivec.col(k+i)); + } + } +} + +/** Computes the eigenvalues of the generalized eigen problem + * \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$ + * and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors + * are computed if \a computeEigenvectors is true. + * + * \sa SelfAdjointEigenSolver(MatrixType,MatrixType,bool), compute(MatrixType,bool) + */ +template +void SelfAdjointEigenSolver:: +compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors) +{ + ei_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows()); + + // Compute the cholesky decomposition of matB = L L' + LLT cholB(matB); + + // compute C = inv(L) A inv(L') + MatrixType matC = matA; + cholB.matrixL().solveTriangularInPlace(matC); + // FIXME since we currently do not support A * inv(L'), let's do (inv(L) A')' : + matC = matC.adjoint().eval(); + cholB.matrixL().template marked().solveTriangularInPlace(matC); + matC = matC.adjoint().eval(); + // this version works too: +// matC = matC.transpose(); +// cholB.matrixL().conjugate().template marked().solveTriangularInPlace(matC); +// matC = matC.transpose(); + // FIXME: this should work: (currently it only does for small matrices) +// Transpose trMatC(matC); +// cholB.matrixL().conjugate().eval().template marked().solveTriangularInPlace(trMatC); + + compute(matC, computeEigenvectors); + + if (computeEigenvectors) + { + // transform back the eigen vectors: evecs = inv(U) * evecs + cholB.matrixL().adjoint().template marked().solveTriangularInPlace(m_eivec); + for (int i=0; i +inline Matrix::Scalar>::Real, ei_traits::ColsAtCompileTime, 1> +MatrixBase::eigenvalues() const +{ + ei_assert(Flags&SelfAdjointBit); + return SelfAdjointEigenSolver(eval(),false).eigenvalues(); +} + +template +struct ei_operatorNorm_selector +{ + static inline typename NumTraits::Scalar>::Real + operatorNorm(const MatrixBase& m) + { + // FIXME if it is really guaranteed that the eigenvalues are already sorted, + // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. + return m.eigenvalues().cwise().abs().maxCoeff(); + } +}; + +template struct ei_operatorNorm_selector +{ + static inline typename NumTraits::Scalar>::Real + operatorNorm(const MatrixBase& m) + { + typename Derived::PlainMatrixType m_eval(m); + // FIXME if it is really guaranteed that the eigenvalues are already sorted, + // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough. + return ei_sqrt( + (m_eval*m_eval.adjoint()) + .template marked() + .eigenvalues() + .maxCoeff() + ); + } +}; + +/** \qr_module + * + * \returns the matrix norm of this matrix. + */ +template +inline typename NumTraits::Scalar>::Real +MatrixBase::operatorNorm() const +{ + return ei_operatorNorm_selector + ::operatorNorm(derived()); +} + +#ifndef EIGEN_EXTERN_INSTANTIATIONS +template +static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n) +{ + RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5); + RealScalar e2 = ei_abs2(subdiag[end-1]); + RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * ei_sqrt(td*td + e2)); + RealScalar x = diag[start] - mu; + RealScalar z = subdiag[start]; + + for (int k = start; k < end; ++k) + { + RealScalar c, s; + ei_givens_rotation(x, z, c, s); + + // do T = G' T G + RealScalar sdk = s * diag[k] + c * subdiag[k]; + RealScalar dkp1 = s * subdiag[k] + c * diag[k+1]; + + diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k+1]); + diag[k+1] = s * sdk + c * dkp1; + subdiag[k] = c * sdk - s * dkp1; + + if (k > start) + subdiag[k - 1] = c * subdiag[k-1] - s * z; + + x = subdiag[k]; + + if (k < end - 1) + { + z = -s * subdiag[k+1]; + subdiag[k + 1] = c * subdiag[k+1]; + } + + // apply the givens rotation to the unit matrix Q = Q * G + // G only modifies the two columns k and k+1 + if (matrixQ) + { + #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR + #else + int kn = k*n; + int kn1 = (k+1)*n; + #endif + // let's do the product manually to avoid the need of temporaries... + for (int i=0; i +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_TRIDIAGONALIZATION_H +#define EIGEN_TRIDIAGONALIZATION_H + +/** \ingroup QR_Module + * \nonstableyet + * + * \class Tridiagonalization + * + * \brief Trigiagonal decomposition of a selfadjoint matrix + * + * \param MatrixType the type of the matrix of which we are performing the tridiagonalization + * + * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that: + * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix. + * + * \sa MatrixBase::tridiagonalize() + */ +template class Tridiagonalization +{ + public: + + typedef _MatrixType MatrixType; + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef typename ei_packet_traits::type Packet; + + enum { + Size = MatrixType::RowsAtCompileTime, + SizeMinusOne = MatrixType::RowsAtCompileTime==Dynamic + ? Dynamic + : MatrixType::RowsAtCompileTime-1, + PacketSize = ei_packet_traits::size + }; + + typedef Matrix CoeffVectorType; + typedef Matrix DiagonalType; + typedef Matrix SubDiagonalType; + + typedef typename NestByValue >::RealReturnType DiagonalReturnType; + + typedef typename NestByValue > > >::RealReturnType SubDiagonalReturnType; + + /** This constructor initializes a Tridiagonalization object for + * further use with Tridiagonalization::compute() + */ + Tridiagonalization(int size = Size==Dynamic ? 2 : Size) + : m_matrix(size,size), m_hCoeffs(size-1) + {} + + Tridiagonalization(const MatrixType& matrix) + : m_matrix(matrix), + m_hCoeffs(matrix.cols()-1) + { + _compute(m_matrix, m_hCoeffs); + } + + /** Computes or re-compute the tridiagonalization for the matrix \a matrix. + * + * This method allows to re-use the allocated data. + */ + void compute(const MatrixType& matrix) + { + m_matrix = matrix; + m_hCoeffs.resize(matrix.rows()-1, 1); + _compute(m_matrix, m_hCoeffs); + } + + /** \returns the householder coefficients allowing to + * reconstruct the matrix Q from the packed data. + * + * \sa packedMatrix() + */ + inline CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; } + + /** \returns the internal result of the decomposition. + * + * The returned matrix contains the following information: + * - the strict upper part is equal to the input matrix A + * - the diagonal and lower sub-diagonal represent the tridiagonal symmetric matrix (real). + * - the rest of the lower part contains the Householder vectors that, combined with + * Householder coefficients returned by householderCoefficients(), + * allows to reconstruct the matrix Q as follow: + * Q = H_{N-1} ... H_1 H_0 + * where the matrices H are the Householder transformations: + * H_i = (I - h_i * v_i * v_i') + * where h_i == householderCoefficients()[i] and v_i is a Householder vector: + * v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ] + * + * See LAPACK for further details on this packed storage. + */ + inline const MatrixType& packedMatrix(void) const { return m_matrix; } + + MatrixType matrixQ(void) const; + MatrixType matrixT(void) const; + const DiagonalReturnType diagonal(void) const; + const SubDiagonalReturnType subDiagonal(void) const; + + static void decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true); + + private: + + static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs); + + static void _decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true); + + protected: + MatrixType m_matrix; + CoeffVectorType m_hCoeffs; +}; + +/** \returns an expression of the diagonal vector */ +template +const typename Tridiagonalization::DiagonalReturnType +Tridiagonalization::diagonal(void) const +{ + return m_matrix.diagonal().nestByValue().real(); +} + +/** \returns an expression of the sub-diagonal vector */ +template +const typename Tridiagonalization::SubDiagonalReturnType +Tridiagonalization::subDiagonal(void) const +{ + int n = m_matrix.rows(); + return Block(m_matrix, 1, 0, n-1,n-1) + .nestByValue().diagonal().nestByValue().real(); +} + +/** constructs and returns the tridiagonal matrix T. + * Note that the matrix T is equivalent to the diagonal and sub-diagonal of the packed matrix. + * Therefore, it might be often sufficient to directly use the packed matrix, or the vector + * expressions returned by diagonal() and subDiagonal() instead of creating a new matrix. + */ +template +typename Tridiagonalization::MatrixType +Tridiagonalization::matrixT(void) const +{ + // FIXME should this function (and other similar ones) rather take a matrix as argument + // and fill it ? (to avoid temporaries) + int n = m_matrix.rows(); + MatrixType matT = m_matrix; + matT.corner(TopRight,n-1, n-1).diagonal() = subDiagonal().template cast().conjugate(); + if (n>2) + { + matT.corner(TopRight,n-2, n-2).template part().setZero(); + matT.corner(BottomLeft,n-2, n-2).template part().setZero(); + } + return matT; +} + +#ifndef EIGEN_HIDE_HEAVY_CODE + +/** \internal + * Performs a tridiagonal decomposition of \a matA in place. + * + * \param matA the input selfadjoint matrix + * \param hCoeffs returned Householder coefficients + * + * The result is written in the lower triangular part of \a matA. + * + * Implemented from Golub's "Matrix Computations", algorithm 8.3.1. + * + * \sa packedMatrix() + */ +template +void Tridiagonalization::_compute(MatrixType& matA, CoeffVectorType& hCoeffs) +{ + assert(matA.rows()==matA.cols()); + int n = matA.rows(); +// std::cerr << matA << "\n\n"; + for (int i = 0; i(1))) + { + hCoeffs.coeffRef(i) = 0.; + } + else + { + Scalar v0 = matA.col(i).coeff(i+1); + RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2); + if (ei_real(v0)>=0.) + beta = -beta; + matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta)); + matA.col(i).coeffRef(i+1) = beta; + Scalar h = (beta - v0) / beta; + // end of the householder transformation + + // Apply similarity transformation to remaining columns, + // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1) + + matA.col(i).coeffRef(i+1) = 1; + + /* This is the initial algorithm which minimize operation counts and maximize + * the use of Eigen's expression. Unfortunately, the first matrix-vector product + * using Part is very very slow */ + #ifdef EIGEN_NEVER_DEFINED + // matrix - vector product + hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1).template part() + * (h * matA.col(i).end(n-i-1))).lazy(); + // simple axpy + hCoeffs.end(n-i-1) += (h * Scalar(-0.5) * matA.col(i).end(n-i-1).dot(hCoeffs.end(n-i-1))) + * matA.col(i).end(n-i-1); + // rank-2 update + //Block B(matA,i+1,i,n-i-1,1); + matA.corner(BottomRight,n-i-1,n-i-1).template part() -= + (matA.col(i).end(n-i-1) * hCoeffs.end(n-i-1).adjoint()).lazy() + + (hCoeffs.end(n-i-1) * matA.col(i).end(n-i-1).adjoint()).lazy(); + #endif + /* end initial algorithm */ + + /* If we still want to minimize operation count (i.e., perform operation on the lower part only) + * then we could provide the following algorithm for selfadjoint - vector product. However, a full + * matrix-vector product is still faster (at least for dynamic size, and not too small, did not check + * small matrices). The algo performs block matrix-vector and transposed matrix vector products. */ + #ifdef EIGEN_NEVER_DEFINED + int n4 = (std::max(0,n-4)/4)*4; + hCoeffs.end(n-i-1).setZero(); + for (int b=i+1; b(matA,b+4,b,n-b-4,4) * matA.template block<4,1>(b,i); + // the respective transposed part: + Block(hCoeffs, b, 0, 4,1) += + Block(matA,b+4,b,n-b-4,4).adjoint() * Block(matA,b+4,i,n-b-4,1); + // the 4x4 block diagonal: + Block(hCoeffs, b, 0, 4,1) += + (Block(matA,b,b,4,4).template part() + * (h * Block(matA,b,i,4,1))).lazy(); + } + #endif + // todo: handle the remaining part + /* end optimized selfadjoint - vector product */ + + /* Another interesting note: the above rank-2 update is much slower than the following hand written loop. + * After an analyze of the ASM, it seems GCC (4.2) generate poor code because of the Block. Moreover, + * if we remove the specialization of Block for Matrix then it is even worse, much worse ! */ + #ifdef EIGEN_NEVER_DEFINED + for (int j1=i+1; j11) + { + int alignedStart = (starti) + ei_alignmentOffset(&matA.coeffRef(starti,j1), n-starti); + alignedEnd = alignedStart + ((n-alignedStart)/PacketSize)*PacketSize; + + for (int i1=starti; i1::IsComplex) + { + // Householder transformation on the remaining single scalar + int i = n-2; + Scalar v0 = matA.col(i).coeff(i+1); + RealScalar beta = ei_abs(v0); + if (ei_real(v0)>=0.) + beta = -beta; + matA.col(i).coeffRef(i+1) = beta; + if(ei_isMuchSmallerThan(beta, Scalar(1))) hCoeffs.coeffRef(i) = Scalar(0); + else hCoeffs.coeffRef(i) = (beta - v0) / beta; + } + else + { + hCoeffs.coeffRef(n-2) = 0; + } +} + +/** reconstructs and returns the matrix Q */ +template +typename Tridiagonalization::MatrixType +Tridiagonalization::matrixQ(void) const +{ + int n = m_matrix.rows(); + MatrixType matQ = MatrixType::Identity(n,n); + for (int i = n-2; i>=0; i--) + { + Scalar tmp = m_matrix.coeff(i+1,i); + m_matrix.const_cast_derived().coeffRef(i+1,i) = 1; + + matQ.corner(BottomRight,n-i-1,n-i-1) -= + ((m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1)) * + (m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy()).lazy(); + + m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp; + } + return matQ; +} + +/** Performs a full decomposition in place */ +template +void Tridiagonalization::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) +{ + int n = mat.rows(); + ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1); + if (n==3 && (!NumTraits::IsComplex) ) + { + _decomposeInPlace3x3(mat, diag, subdiag, extractQ); + } + else + { + Tridiagonalization tridiag(mat); + diag = tridiag.diagonal(); + subdiag = tridiag.subDiagonal(); + if (extractQ) + mat = tridiag.matrixQ(); + } +} + +/** \internal + * Optimized path for 3x3 matrices. + * Especially useful for plane fitting. + */ +template +void Tridiagonalization::_decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ) +{ + diag[0] = ei_real(mat(0,0)); + RealScalar v1norm2 = ei_abs2(mat(0,2)); + if (ei_isMuchSmallerThan(v1norm2, RealScalar(1))) + { + diag[1] = ei_real(mat(1,1)); + diag[2] = ei_real(mat(2,2)); + subdiag[0] = ei_real(mat(0,1)); + subdiag[1] = ei_real(mat(1,2)); + if (extractQ) + mat.setIdentity(); + } + else + { + RealScalar beta = ei_sqrt(ei_abs2(mat(0,1))+v1norm2); + RealScalar invBeta = RealScalar(1)/beta; + Scalar m01 = mat(0,1) * invBeta; + Scalar m02 = mat(0,2) * invBeta; + Scalar q = RealScalar(2)*m01*mat(1,2) + m02*(mat(2,2) - mat(1,1)); + diag[1] = ei_real(mat(1,1) + m02*q); + diag[2] = ei_real(mat(2,2) - m02*q); + subdiag[0] = beta; + subdiag[1] = ei_real(mat(1,2) - m01 * q); + if (extractQ) + { + mat(0,0) = 1; + mat(0,1) = 0; + mat(0,2) = 0; + mat(1,0) = 0; + mat(1,1) = m01; + mat(1,2) = m02; + mat(2,0) = 0; + mat(2,1) = m02; + mat(2,2) = -m01; + } + } +} + +#endif // EIGEN_HIDE_HEAVY_CODE + +#endif // EIGEN_TRIDIAGONALIZATION_H diff --git a/extern/Eigen2/Eigen/src/SVD/SVD.h b/extern/Eigen2/Eigen/src/SVD/SVD.h new file mode 100644 index 00000000000..0a52acf3d5b --- /dev/null +++ b/extern/Eigen2/Eigen/src/SVD/SVD.h @@ -0,0 +1,645 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SVD_H +#define EIGEN_SVD_H + +/** \ingroup SVD_Module + * \nonstableyet + * + * \class SVD + * + * \brief Standard SVD decomposition of a matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the SVD decomposition + * + * This class performs a standard SVD decomposition of a real matrix A of size \c M x \c N + * with \c M \>= \c N. + * + * + * \sa MatrixBase::SVD() + */ +template class SVD +{ + private: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + enum { + PacketSize = ei_packet_traits::size, + AlignmentMask = int(PacketSize)-1, + MinSize = EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime) + }; + + typedef Matrix ColVector; + typedef Matrix RowVector; + + typedef Matrix MatrixUType; + typedef Matrix MatrixVType; + typedef Matrix SingularValuesType; + + public: + + SVD(const MatrixType& matrix) + : m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())), + m_matV(matrix.cols(),matrix.cols()), + m_sigma(std::min(matrix.rows(),matrix.cols())) + { + compute(matrix); + } + + template + bool solve(const MatrixBase &b, ResultType* result) const; + + const MatrixUType& matrixU() const { return m_matU; } + const SingularValuesType& singularValues() const { return m_sigma; } + const MatrixVType& matrixV() const { return m_matV; } + + void compute(const MatrixType& matrix); + SVD& sort(); + + template + void computeUnitaryPositive(UnitaryType *unitary, PositiveType *positive) const; + template + void computePositiveUnitary(PositiveType *positive, UnitaryType *unitary) const; + template + void computeRotationScaling(RotationType *unitary, ScalingType *positive) const; + template + void computeScalingRotation(ScalingType *positive, RotationType *unitary) const; + + protected: + /** \internal */ + MatrixUType m_matU; + /** \internal */ + MatrixVType m_matV; + /** \internal */ + SingularValuesType m_sigma; +}; + +/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix + * + * \note this code has been adapted from JAMA (public domain) + */ +template +void SVD::compute(const MatrixType& matrix) +{ + const int m = matrix.rows(); + const int n = matrix.cols(); + const int nu = std::min(m,n); + + m_matU.resize(m, nu); + m_matU.setZero(); + m_sigma.resize(std::min(m,n)); + m_matV.resize(n,n); + + RowVector e(n); + ColVector work(m); + MatrixType matA(matrix); + const bool wantu = true; + const bool wantv = true; + int i=0, j=0, k=0; + + // Reduce A to bidiagonal form, storing the diagonal elements + // in s and the super-diagonal elements in e. + int nct = std::min(m-1,n); + int nrt = std::max(0,std::min(n-2,m)); + for (k = 0; k < std::max(nct,nrt); ++k) + { + if (k < nct) + { + // Compute the transformation for the k-th column and + // place the k-th diagonal in m_sigma[k]. + m_sigma[k] = matA.col(k).end(m-k).norm(); + if (m_sigma[k] != 0.0) // FIXME + { + if (matA(k,k) < 0.0) + m_sigma[k] = -m_sigma[k]; + matA.col(k).end(m-k) /= m_sigma[k]; + matA(k,k) += 1.0; + } + m_sigma[k] = -m_sigma[k]; + } + + for (j = k+1; j < n; ++j) + { + if ((k < nct) && (m_sigma[k] != 0.0)) + { + // Apply the transformation. + Scalar t = matA.col(k).end(m-k).dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ?? + t = -t/matA(k,k); + matA.col(j).end(m-k) += t * matA.col(k).end(m-k); + } + + // Place the k-th row of A into e for the + // subsequent calculation of the row transformation. + e[j] = matA(k,j); + } + + // Place the transformation in U for subsequent back multiplication. + if (wantu & (k < nct)) + m_matU.col(k).end(m-k) = matA.col(k).end(m-k); + + if (k < nrt) + { + // Compute the k-th row transformation and place the + // k-th super-diagonal in e[k]. + e[k] = e.end(n-k-1).norm(); + if (e[k] != 0.0) + { + if (e[k+1] < 0.0) + e[k] = -e[k]; + e.end(n-k-1) /= e[k]; + e[k+1] += 1.0; + } + e[k] = -e[k]; + if ((k+1 < m) & (e[k] != 0.0)) + { + // Apply the transformation. + work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1); + for (j = k+1; j < n; ++j) + matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1); + } + + // Place the transformation in V for subsequent back multiplication. + if (wantv) + m_matV.col(k).end(n-k-1) = e.end(n-k-1); + } + } + + + // Set up the final bidiagonal matrix or order p. + int p = std::min(n,m+1); + if (nct < n) + m_sigma[nct] = matA(nct,nct); + if (m < p) + m_sigma[p-1] = 0.0; + if (nrt+1 < p) + e[nrt] = matA(nrt,p-1); + e[p-1] = 0.0; + + // If required, generate U. + if (wantu) + { + for (j = nct; j < nu; ++j) + { + m_matU.col(j).setZero(); + m_matU(j,j) = 1.0; + } + for (k = nct-1; k >= 0; k--) + { + if (m_sigma[k] != 0.0) + { + for (j = k+1; j < nu; ++j) + { + Scalar t = m_matU.col(k).end(m-k).dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ? + t = -t/m_matU(k,k); + m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k); + } + m_matU.col(k).end(m-k) = - m_matU.col(k).end(m-k); + m_matU(k,k) = Scalar(1) + m_matU(k,k); + if (k-1>0) + m_matU.col(k).start(k-1).setZero(); + } + else + { + m_matU.col(k).setZero(); + m_matU(k,k) = 1.0; + } + } + } + + // If required, generate V. + if (wantv) + { + for (k = n-1; k >= 0; k--) + { + if ((k < nrt) & (e[k] != 0.0)) + { + for (j = k+1; j < nu; ++j) + { + Scalar t = m_matV.col(k).end(n-k-1).dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ? + t = -t/m_matV(k+1,k); + m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1); + } + } + m_matV.col(k).setZero(); + m_matV(k,k) = 1.0; + } + } + + // Main iteration loop for the singular values. + int pp = p-1; + int iter = 0; + Scalar eps = ei_pow(Scalar(2),ei_is_same_type::ret ? Scalar(-23) : Scalar(-52)); + while (p > 0) + { + int k=0; + int kase=0; + + // Here is where a test for too many iterations would go. + + // This section of the program inspects for + // negligible elements in the s and e arrays. On + // completion the variables kase and k are set as follows. + + // kase = 1 if s(p) and e[k-1] are negligible and k

= -1; --k) + { + if (k == -1) + break; + if (ei_abs(e[k]) <= eps*(ei_abs(m_sigma[k]) + ei_abs(m_sigma[k+1]))) + { + e[k] = 0.0; + break; + } + } + if (k == p-2) + { + kase = 4; + } + else + { + int ks; + for (ks = p-1; ks >= k; --ks) + { + if (ks == k) + break; + Scalar t = (ks != p ? ei_abs(e[ks]) : Scalar(0)) + (ks != k+1 ? ei_abs(e[ks-1]) : Scalar(0)); + if (ei_abs(m_sigma[ks]) <= eps*t) + { + m_sigma[ks] = 0.0; + break; + } + } + if (ks == k) + { + kase = 3; + } + else if (ks == p-1) + { + kase = 1; + } + else + { + kase = 2; + k = ks; + } + } + ++k; + + // Perform the task indicated by kase. + switch (kase) + { + + // Deflate negligible s(p). + case 1: + { + Scalar f(e[p-2]); + e[p-2] = 0.0; + for (j = p-2; j >= k; --j) + { + Scalar t(ei_hypot(m_sigma[j],f)); + Scalar cs(m_sigma[j]/t); + Scalar sn(f/t); + m_sigma[j] = t; + if (j != k) + { + f = -sn*e[j-1]; + e[j-1] = cs*e[j-1]; + } + if (wantv) + { + for (i = 0; i < n; ++i) + { + t = cs*m_matV(i,j) + sn*m_matV(i,p-1); + m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1); + m_matV(i,j) = t; + } + } + } + } + break; + + // Split at negligible s(k). + case 2: + { + Scalar f(e[k-1]); + e[k-1] = 0.0; + for (j = k; j < p; ++j) + { + Scalar t(ei_hypot(m_sigma[j],f)); + Scalar cs( m_sigma[j]/t); + Scalar sn(f/t); + m_sigma[j] = t; + f = -sn*e[j]; + e[j] = cs*e[j]; + if (wantu) + { + for (i = 0; i < m; ++i) + { + t = cs*m_matU(i,j) + sn*m_matU(i,k-1); + m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1); + m_matU(i,j) = t; + } + } + } + } + break; + + // Perform one qr step. + case 3: + { + // Calculate the shift. + Scalar scale = std::max(std::max(std::max(std::max( + ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])), + ei_abs(m_sigma[k])),ei_abs(e[k])); + Scalar sp = m_sigma[p-1]/scale; + Scalar spm1 = m_sigma[p-2]/scale; + Scalar epm1 = e[p-2]/scale; + Scalar sk = m_sigma[k]/scale; + Scalar ek = e[k]/scale; + Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/Scalar(2); + Scalar c = (sp*epm1)*(sp*epm1); + Scalar shift = 0.0; + if ((b != 0.0) || (c != 0.0)) + { + shift = ei_sqrt(b*b + c); + if (b < 0.0) + shift = -shift; + shift = c/(b + shift); + } + Scalar f = (sk + sp)*(sk - sp) + shift; + Scalar g = sk*ek; + + // Chase zeros. + + for (j = k; j < p-1; ++j) + { + Scalar t = ei_hypot(f,g); + Scalar cs = f/t; + Scalar sn = g/t; + if (j != k) + e[j-1] = t; + f = cs*m_sigma[j] + sn*e[j]; + e[j] = cs*e[j] - sn*m_sigma[j]; + g = sn*m_sigma[j+1]; + m_sigma[j+1] = cs*m_sigma[j+1]; + if (wantv) + { + for (i = 0; i < n; ++i) + { + t = cs*m_matV(i,j) + sn*m_matV(i,j+1); + m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1); + m_matV(i,j) = t; + } + } + t = ei_hypot(f,g); + cs = f/t; + sn = g/t; + m_sigma[j] = t; + f = cs*e[j] + sn*m_sigma[j+1]; + m_sigma[j+1] = -sn*e[j] + cs*m_sigma[j+1]; + g = sn*e[j+1]; + e[j+1] = cs*e[j+1]; + if (wantu && (j < m-1)) + { + for (i = 0; i < m; ++i) + { + t = cs*m_matU(i,j) + sn*m_matU(i,j+1); + m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1); + m_matU(i,j) = t; + } + } + } + e[p-2] = f; + iter = iter + 1; + } + break; + + // Convergence. + case 4: + { + // Make the singular values positive. + if (m_sigma[k] <= 0.0) + { + m_sigma[k] = m_sigma[k] < Scalar(0) ? -m_sigma[k] : Scalar(0); + if (wantv) + m_matV.col(k).start(pp+1) = -m_matV.col(k).start(pp+1); + } + + // Order the singular values. + while (k < pp) + { + if (m_sigma[k] >= m_sigma[k+1]) + break; + Scalar t = m_sigma[k]; + m_sigma[k] = m_sigma[k+1]; + m_sigma[k+1] = t; + if (wantv && (k < n-1)) + m_matV.col(k).swap(m_matV.col(k+1)); + if (wantu && (k < m-1)) + m_matU.col(k).swap(m_matU.col(k+1)); + ++k; + } + iter = 0; + p--; + } + break; + } // end big switch + } // end iterations +} + +template +SVD& SVD::sort() +{ + int mu = m_matU.rows(); + int mv = m_matV.rows(); + int n = m_matU.cols(); + + for (int i=0; i p) + { + k = j; + p = m_sigma.coeff(j); + } + } + if (k != i) + { + m_sigma.coeffRef(k) = m_sigma.coeff(i); // i.e. + m_sigma.coeffRef(i) = p; // swaps the i-th and the k-th elements + + int j = mu; + for(int s=0; j!=0; ++s, --j) + std::swap(m_matU.coeffRef(s,i), m_matU.coeffRef(s,k)); + + j = mv; + for (int s=0; j!=0; ++s, --j) + std::swap(m_matV.coeffRef(s,i), m_matV.coeffRef(s,k)); + } + } + return *this; +} + +/** \returns the solution of \f$ A x = b \f$ using the current SVD decomposition of A. + * The parts of the solution corresponding to zero singular values are ignored. + * + * \sa MatrixBase::svd(), LU::solve(), LLT::solve() + */ +template +template +bool SVD::solve(const MatrixBase &b, ResultType* result) const +{ + const int rows = m_matU.rows(); + ei_assert(b.rows() == rows); + + Scalar maxVal = m_sigma.cwise().abs().maxCoeff(); + for (int j=0; j aux = m_matU.transpose() * b.col(j); + + for (int i = 0; i col(j) = m_matV * aux; + } + return true; +} + +/** Computes the polar decomposition of the matrix, as a product unitary x positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * Only for square matrices. + * + * \sa computePositiveUnitary(), computeRotationScaling() + */ +template +template +void SVD::computeUnitaryPositive(UnitaryType *unitary, + PositiveType *positive) const +{ + ei_assert(m_matU.cols() == m_matV.cols() && "Polar decomposition is only for square matrices"); + if(unitary) *unitary = m_matU * m_matV.adjoint(); + if(positive) *positive = m_matV * m_sigma.asDiagonal() * m_matV.adjoint(); +} + +/** Computes the polar decomposition of the matrix, as a product positive x unitary. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * Only for square matrices. + * + * \sa computeUnitaryPositive(), computeRotationScaling() + */ +template +template +void SVD::computePositiveUnitary(UnitaryType *positive, + PositiveType *unitary) const +{ + ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices"); + if(unitary) *unitary = m_matU * m_matV.adjoint(); + if(positive) *positive = m_matU * m_sigma.asDiagonal() * m_matU.adjoint(); +} + +/** decomposes the matrix as a product rotation x scaling, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * This method requires the Geometry module. + * + * \sa computeScalingRotation(), computeUnitaryPositive() + */ +template +template +void SVD::computeRotationScaling(RotationType *rotation, ScalingType *scaling) const +{ + ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices"); + Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1 + Matrix sv(m_sigma); + sv.coeffRef(0) *= x; + if(scaling) scaling->lazyAssign(m_matV * sv.asDiagonal() * m_matV.adjoint()); + if(rotation) + { + MatrixType m(m_matU); + m.col(0) /= x; + rotation->lazyAssign(m * m_matV.adjoint()); + } +} + +/** decomposes the matrix as a product scaling x rotation, the scaling being + * not necessarily positive. + * + * If either pointer is zero, the corresponding computation is skipped. + * + * This method requires the Geometry module. + * + * \sa computeRotationScaling(), computeUnitaryPositive() + */ +template +template +void SVD::computeScalingRotation(ScalingType *scaling, RotationType *rotation) const +{ + ei_assert(m_matU.rows() == m_matV.rows() && "Polar decomposition is only for square matrices"); + Scalar x = (m_matU * m_matV.adjoint()).determinant(); // so x has absolute value 1 + Matrix sv(m_sigma); + sv.coeffRef(0) *= x; + if(scaling) scaling->lazyAssign(m_matU * sv.asDiagonal() * m_matU.adjoint()); + if(rotation) + { + MatrixType m(m_matU); + m.col(0) /= x; + rotation->lazyAssign(m * m_matV.adjoint()); + } +} + + +/** \svd_module + * \returns the SVD decomposition of \c *this + */ +template +inline SVD::PlainMatrixType> +MatrixBase::svd() const +{ + return SVD(derived()); +} + +#endif // EIGEN_SVD_H diff --git a/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h b/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h new file mode 100644 index 00000000000..75001a2fa25 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/AmbiVector.h @@ -0,0 +1,371 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_AMBIVECTOR_H +#define EIGEN_AMBIVECTOR_H + +/** \internal + * Hybrid sparse/dense vector class designed for intensive read-write operations. + * + * See BasicSparseLLT and SparseProduct for usage examples. + */ +template class AmbiVector +{ + public: + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + AmbiVector(int size) + : m_buffer(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1) + { + resize(size); + } + + void init(RealScalar estimatedDensity); + void init(int mode); + + void nonZeros() const; + + /** Specifies a sub-vector to work on */ + void setBounds(int start, int end) { m_start = start; m_end = end; } + + void setZero(); + + void restart(); + Scalar& coeffRef(int i); + Scalar coeff(int i); + + class Iterator; + + ~AmbiVector() { delete[] m_buffer; } + + void resize(int size) + { + if (m_allocatedSize < size) + reallocate(size); + m_size = size; + } + + int size() const { return m_size; } + + protected: + + void reallocate(int size) + { + // if the size of the matrix is not too large, let's allocate a bit more than needed such + // that we can handle dense vector even in sparse mode. + delete[] m_buffer; + if (size<1000) + { + int allocSize = (size * sizeof(ListEl))/sizeof(Scalar); + m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl); + m_buffer = new Scalar[allocSize]; + } + else + { + m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl); + m_buffer = new Scalar[size]; + } + m_size = size; + m_start = 0; + m_end = m_size; + } + + void reallocateSparse() + { + int copyElements = m_allocatedElements; + m_allocatedElements = std::min(int(m_allocatedElements*1.5),m_size); + int allocSize = m_allocatedElements * sizeof(ListEl); + allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0); + Scalar* newBuffer = new Scalar[allocSize]; + memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl)); + } + + protected: + // element type of the linked list + struct ListEl + { + int next; + int index; + Scalar value; + }; + + // used to store data in both mode + Scalar* m_buffer; + int m_size; + int m_start; + int m_end; + int m_allocatedSize; + int m_allocatedElements; + int m_mode; + + // linked list mode + int m_llStart; + int m_llCurrent; + int m_llSize; + + private: + AmbiVector(const AmbiVector&); + +}; + +/** \returns the number of non zeros in the current sub vector */ +template +void AmbiVector::nonZeros() const +{ + if (m_mode==IsSparse) + return m_llSize; + else + return m_end - m_start; +} + +template +void AmbiVector::init(RealScalar estimatedDensity) +{ + if (estimatedDensity>0.1) + init(IsDense); + else + init(IsSparse); +} + +template +void AmbiVector::init(int mode) +{ + m_mode = mode; + if (m_mode==IsSparse) + { + m_llSize = 0; + m_llStart = -1; + } +} + +/** Must be called whenever we might perform a write access + * with an index smaller than the previous one. + * + * Don't worry, this function is extremely cheap. + */ +template +void AmbiVector::restart() +{ + m_llCurrent = m_llStart; +} + +/** Set all coefficients of current subvector to zero */ +template +void AmbiVector::setZero() +{ + if (m_mode==IsDense) + { + for (int i=m_start; i +Scalar& AmbiVector::coeffRef(int i) +{ + if (m_mode==IsDense) + return m_buffer[i]; + else + { + ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); + // TODO factorize the following code to reduce code generation + ei_assert(m_mode==IsSparse); + if (m_llSize==0) + { + // this is the first element + m_llStart = 0; + m_llCurrent = 0; + ++m_llSize; + llElements[0].value = Scalar(0); + llElements[0].index = i; + llElements[0].next = -1; + return llElements[0].value; + } + else if (i=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index"); + while (nextel >= 0 && llElements[nextel].index<=i) + { + m_llCurrent = nextel; + nextel = llElements[nextel].next; + } + + if (llElements[m_llCurrent].index==i) + { + // the coefficient already exists and we found it ! + return llElements[m_llCurrent].value; + } + else + { + if (m_llSize>=m_allocatedElements) + reallocateSparse(); + ei_internal_assert(m_llSize +Scalar AmbiVector::coeff(int i) +{ + if (m_mode==IsDense) + return m_buffer[i]; + else + { + ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_buffer); + ei_assert(m_mode==IsSparse); + if ((m_llSize==0) || (i= 0 && llElements[elid].index +class AmbiVector<_Scalar>::Iterator +{ + public: + typedef _Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + + /** Default constructor + * \param vec the vector on which we iterate + * \param epsilon the minimal value used to prune zero coefficients. + * In practice, all coefficients having a magnitude smaller than \a epsilon + * are skipped. + */ + Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*precision()) + : m_vector(vec) + { + m_epsilon = epsilon; + m_isDense = m_vector.m_mode==IsDense; + if (m_isDense) + { + m_cachedIndex = m_vector.m_start-1; + ++(*this); + } + else + { + ListEl* EIGEN_RESTRICT llElements = reinterpret_cast(m_vector.m_buffer); + m_currentEl = m_vector.m_llStart; + while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value)=0; } + + Iterator& operator++() + { + if (m_isDense) + { + do { + ++m_cachedIndex; + } while (m_cachedIndex(m_vector.m_buffer); + do { + m_currentEl = llElements[m_currentEl].next; + } while (m_currentEl>=0 && ei_abs(llElements[m_currentEl].value) +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_CHOLMODSUPPORT_H +#define EIGEN_CHOLMODSUPPORT_H + +template +void ei_cholmod_configure_matrix(CholmodType& mat) +{ + if (ei_is_same_type::ret) + { + mat.xtype = CHOLMOD_REAL; + mat.dtype = 1; + } + else if (ei_is_same_type::ret) + { + mat.xtype = CHOLMOD_REAL; + mat.dtype = 0; + } + else if (ei_is_same_type >::ret) + { + mat.xtype = CHOLMOD_COMPLEX; + mat.dtype = 1; + } + else if (ei_is_same_type >::ret) + { + mat.xtype = CHOLMOD_COMPLEX; + mat.dtype = 0; + } + else + { + ei_assert(false && "Scalar type not supported by CHOLMOD"); + } +} + +template +cholmod_sparse SparseMatrixBase::asCholmodMatrix() +{ + typedef typename Derived::Scalar Scalar; + cholmod_sparse res; + res.nzmax = nonZeros(); + res.nrow = rows();; + res.ncol = cols(); + res.p = derived()._outerIndexPtr(); + res.i = derived()._innerIndexPtr(); + res.x = derived()._valuePtr(); + res.xtype = CHOLMOD_REAL; + res.itype = CHOLMOD_INT; + res.sorted = 1; + res.packed = 1; + res.dtype = 0; + res.stype = -1; + + ei_cholmod_configure_matrix(res); + + if (Derived::Flags & SelfAdjoint) + { + if (Derived::Flags & UpperTriangular) + res.stype = 1; + else if (Derived::Flags & LowerTriangular) + res.stype = -1; + else + res.stype = 0; + } + else + res.stype = 0; + + return res; +} + +template +cholmod_dense ei_cholmod_map_eigen_to_dense(MatrixBase& mat) +{ + EIGEN_STATIC_ASSERT((ei_traits::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + typedef typename Derived::Scalar Scalar; + + cholmod_dense res; + res.nrow = mat.rows(); + res.ncol = mat.cols(); + res.nzmax = res.nrow * res.ncol; + res.d = mat.derived().stride(); + res.x = mat.derived().data(); + res.z = 0; + + ei_cholmod_configure_matrix(res); + + return res; +} + +template +MappedSparseMatrix::MappedSparseMatrix(cholmod_sparse& cm) +{ + m_innerSize = cm.nrow; + m_outerSize = cm.ncol; + m_outerIndex = reinterpret_cast(cm.p); + m_innerIndices = reinterpret_cast(cm.i); + m_values = reinterpret_cast(cm.x); + m_nnz = m_outerIndex[cm.ncol]; +} + +template +class SparseLLT : public SparseLLT +{ + protected: + typedef SparseLLT Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + using Base::MatrixLIsDirty; + using Base::SupernodalFactorIsDirty; + using Base::m_flags; + using Base::m_matrix; + using Base::m_status; + + public: + + SparseLLT(int flags = 0) + : Base(flags), m_cholmodFactor(0) + { + cholmod_start(&m_cholmod); + } + + SparseLLT(const MatrixType& matrix, int flags = 0) + : Base(flags), m_cholmodFactor(0) + { + cholmod_start(&m_cholmod); + compute(matrix); + } + + ~SparseLLT() + { + if (m_cholmodFactor) + cholmod_free_factor(&m_cholmodFactor, &m_cholmod); + cholmod_finish(&m_cholmod); + } + + inline const typename Base::CholMatrixType& matrixL(void) const; + + template + void solveInPlace(MatrixBase &b) const; + + void compute(const MatrixType& matrix); + + protected: + mutable cholmod_common m_cholmod; + cholmod_factor* m_cholmodFactor; +}; + +template +void SparseLLT::compute(const MatrixType& a) +{ + if (m_cholmodFactor) + { + cholmod_free_factor(&m_cholmodFactor, &m_cholmod); + m_cholmodFactor = 0; + } + + cholmod_sparse A = const_cast(a).asCholmodMatrix(); + m_cholmod.supernodal = CHOLMOD_AUTO; + // TODO + if (m_flags&IncompleteFactorization) + { + m_cholmod.nmethods = 1; + m_cholmod.method[0].ordering = CHOLMOD_NATURAL; + m_cholmod.postorder = 0; + } + else + { + m_cholmod.nmethods = 1; + m_cholmod.method[0].ordering = CHOLMOD_NATURAL; + m_cholmod.postorder = 0; + } + m_cholmod.final_ll = 1; + m_cholmodFactor = cholmod_analyze(&A, &m_cholmod); + cholmod_factorize(&A, m_cholmodFactor, &m_cholmod); + + m_status = (m_status & ~SupernodalFactorIsDirty) | MatrixLIsDirty; +} + +template +inline const typename SparseLLT::CholMatrixType& +SparseLLT::matrixL() const +{ + if (m_status & MatrixLIsDirty) + { + ei_assert(!(m_status & SupernodalFactorIsDirty)); + + cholmod_sparse* cmRes = cholmod_factor_to_sparse(m_cholmodFactor, &m_cholmod); + const_cast(m_matrix) = MappedSparseMatrix(*cmRes); + free(cmRes); + + m_status = (m_status & ~MatrixLIsDirty); + } + return m_matrix; +} + +template +template +void SparseLLT::solveInPlace(MatrixBase &b) const +{ + const int size = m_cholmodFactor->n; + ei_assert(size==b.rows()); + + // this uses Eigen's triangular sparse solver +// if (m_status & MatrixLIsDirty) +// matrixL(); +// Base::solveInPlace(b); + // as long as our own triangular sparse solver is not fully optimal, + // let's use CHOLMOD's one: + cholmod_dense cdb = ei_cholmod_map_eigen_to_dense(b); + cholmod_dense* x = cholmod_solve(CHOLMOD_LDLt, m_cholmodFactor, &cdb, &m_cholmod); + b = Matrix::Map(reinterpret_cast(x->x),b.rows()); + cholmod_free_dense(&x, &m_cholmod); +} + +#endif // EIGEN_CHOLMODSUPPORT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h b/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h new file mode 100644 index 00000000000..4dbd3230985 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/CompressedStorage.h @@ -0,0 +1,230 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_COMPRESSED_STORAGE_H +#define EIGEN_COMPRESSED_STORAGE_H + +/** Stores a sparse set of values as a list of values and a list of indices. + * + */ +template +class CompressedStorage +{ + typedef typename NumTraits::Real RealScalar; + public: + CompressedStorage() + : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) + {} + + CompressedStorage(size_t size) + : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) + { + resize(size); + } + + CompressedStorage(const CompressedStorage& other) + : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0) + { + *this = other; + } + + CompressedStorage& operator=(const CompressedStorage& other) + { + resize(other.size()); + memcpy(m_values, other.m_values, m_size * sizeof(Scalar)); + memcpy(m_indices, other.m_indices, m_size * sizeof(int)); + return *this; + } + + void swap(CompressedStorage& other) + { + std::swap(m_values, other.m_values); + std::swap(m_indices, other.m_indices); + std::swap(m_size, other.m_size); + std::swap(m_allocatedSize, other.m_allocatedSize); + } + + ~CompressedStorage() + { + delete[] m_values; + delete[] m_indices; + } + + void reserve(size_t size) + { + size_t newAllocatedSize = m_size + size; + if (newAllocatedSize > m_allocatedSize) + reallocate(newAllocatedSize); + } + + void squeeze() + { + if (m_allocatedSize>m_size) + reallocate(m_size); + } + + void resize(size_t size, float reserveSizeFactor = 0) + { + if (m_allocatedSizestart) + { + size_t mid = (end+start)>>1; + if (m_indices[mid]start && key==m_indices[end-1]) + return m_values[end-1]; + // ^^ optimization: let's first check if it is the last coefficient + // (very common in high level algorithms) + const size_t id = searchLowerIndex(start,end-1,key); + return ((id=m_size || m_indices[id]!=key) + { + resize(m_size+1,1); + for (size_t j=m_size-1; j>id; --j) + { + m_indices[j] = m_indices[j-1]; + m_values[j] = m_values[j-1]; + } + m_indices[id] = key; + m_values[id] = defaultValue; + } + return m_values[id]; + } + + void prune(Scalar reference, RealScalar epsilon = precision()) + { + size_t k = 0; + size_t n = size(); + for (size_t i=0; i +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_COREITERATORS_H +#define EIGEN_COREITERATORS_H + +/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core + */ + +/** \class InnerIterator + * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression + * + * todo + */ + +// generic version for dense matrix and expressions +template class MatrixBase::InnerIterator +{ + typedef typename Derived::Scalar Scalar; + enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit }; + public: + EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, int outer) + : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.rows()) + {} + + EIGEN_STRONG_INLINE Scalar value() const + { + return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner) + : m_expression.coeff(m_inner, m_outer); + } + + EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; } + + EIGEN_STRONG_INLINE int index() const { return m_inner; } + inline int row() const { return IsRowMajor ? m_outer : index(); } + inline int col() const { return IsRowMajor ? index() : m_outer; } + + EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; } + + protected: + const Derived& m_expression; + int m_inner; + const int m_outer; + const int m_end; +}; + +#endif // EIGEN_COREITERATORS_H diff --git a/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h new file mode 100644 index 00000000000..7119a84bd51 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/DynamicSparseMatrix.h @@ -0,0 +1,297 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H +#define EIGEN_DYNAMIC_SPARSEMATRIX_H + +/** \class DynamicSparseMatrix + * + * \brief A sparse matrix class designed for matrix assembly purpose + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * + * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows + * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is + * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows + * otherwise. + * + * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might + * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance + * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors. + * + * \see SparseMatrix + */ +template +struct ei_traits > +{ + typedef _Scalar Scalar; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = SparseBit | _Flags, + CoeffReadCost = NumTraits::ReadCost, + SupportedAccessPatterns = OuterRandomAccessPattern + }; +}; + +template +class DynamicSparseMatrix + : public SparseMatrixBase > +{ + public: + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(DynamicSparseMatrix) + // FIXME: why are these operator already alvailable ??? + // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=) + // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=) + typedef MappedSparseMatrix Map; + + protected: + + enum { IsRowMajor = Base::IsRowMajor }; + typedef DynamicSparseMatrix TransposedSparseMatrix; + + int m_innerSize; + std::vector > m_data; + + public: + + inline int rows() const { return IsRowMajor ? outerSize() : m_innerSize; } + inline int cols() const { return IsRowMajor ? m_innerSize : outerSize(); } + inline int innerSize() const { return m_innerSize; } + inline int outerSize() const { return m_data.size(); } + inline int innerNonZeros(int j) const { return m_data[j].size(); } + + std::vector >& _data() { return m_data; } + const std::vector >& _data() const { return m_data; } + + /** \returns the coefficient value at given position \a row, \a col + * This operation involes a log(rho*outer_size) binary search. + */ + inline Scalar coeff(int row, int col) const + { + const int outer = IsRowMajor ? row : col; + const int inner = IsRowMajor ? col : row; + return m_data[outer].at(inner); + } + + /** \returns a reference to the coefficient value at given position \a row, \a col + * This operation involes a log(rho*outer_size) binary search. If the coefficient does not + * exist yet, then a sorted insertion into a sequential buffer is performed. + */ + inline Scalar& coeffRef(int row, int col) + { + const int outer = IsRowMajor ? row : col; + const int inner = IsRowMajor ? col : row; + return m_data[outer].atWithInsertion(inner); + } + + class InnerIterator; + + inline void setZero() + { + for (int j=0; j0) + { + int reserveSizePerVector = std::max(reserveSize/outerSize(),4); + for (int j=0; j= \a row. Otherwise the matrix is invalid. + * + * \see fillrand(), coeffRef() + */ + inline Scalar& fill(int row, int col) + { + const int outer = IsRowMajor ? row : col; + const int inner = IsRowMajor ? col : row; + ei_assert(outer= startId) && (m_data[outer].index(id) > inner) ) + { + m_data[outer].index(id+1) = m_data[outer].index(id); + m_data[outer].value(id+1) = m_data[outer].value(id); + --id; + } + m_data[outer].index(id+1) = inner; + m_data[outer].value(id+1) = 0; + return m_data[outer].value(id+1); + } + + /** Does nothing. Provided for compatibility with SparseMatrix. */ + inline void endFill() {} + + void prune(Scalar reference, RealScalar epsilon = precision()) + { + for (int j=0; jinnerSize) + { + // remove all coefficients with innerCoord>=innerSize + // TODO + std::cerr << "not implemented yet\n"; + exit(2); + } + if (m_data.size() != outerSize) + { + m_data.resize(outerSize); + } + } + + inline DynamicSparseMatrix() + : m_innerSize(0), m_data(0) + { + ei_assert(innerSize()==0 && outerSize()==0); + } + + inline DynamicSparseMatrix(int rows, int cols) + : m_innerSize(0) + { + resize(rows, cols); + } + + template + inline DynamicSparseMatrix(const SparseMatrixBase& other) + : m_innerSize(0) + { + *this = other.derived(); + } + + inline DynamicSparseMatrix(const DynamicSparseMatrix& other) + : Base(), m_innerSize(0) + { + *this = other.derived(); + } + + inline void swap(DynamicSparseMatrix& other) + { + //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); + std::swap(m_innerSize, other.m_innerSize); + //std::swap(m_outerSize, other.m_outerSize); + m_data.swap(other.m_data); + } + + inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other) + { + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else + { + resize(other.rows(), other.cols()); + m_data = other.m_data; + } + return *this; + } + + template + inline DynamicSparseMatrix& operator=(const SparseMatrixBase& other) + { + return SparseMatrixBase::operator=(other.derived()); + } + + /** Destructor */ + inline ~DynamicSparseMatrix() {} +}; + +template +class DynamicSparseMatrix::InnerIterator : public SparseVector::InnerIterator +{ + typedef typename SparseVector::InnerIterator Base; + public: + InnerIterator(const DynamicSparseMatrix& mat, int outer) + : Base(mat.m_data[outer]), m_outer(outer) + {} + + inline int row() const { return IsRowMajor ? m_outer : Base::index(); } + inline int col() const { return IsRowMajor ? Base::index() : m_outer; } + + + protected: + const int m_outer; +}; + +#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H diff --git a/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h new file mode 100644 index 00000000000..f4935d8344e --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/MappedSparseMatrix.h @@ -0,0 +1,175 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_MAPPED_SPARSEMATRIX_H +#define EIGEN_MAPPED_SPARSEMATRIX_H + +/** \class MappedSparseMatrix + * + * \brief Sparse matrix + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * + * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. + * + */ +template +struct ei_traits > : ei_traits > +{}; + +template +class MappedSparseMatrix + : public SparseMatrixBase > +{ + public: + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(MappedSparseMatrix) + + protected: + enum { IsRowMajor = Base::IsRowMajor }; + + int m_outerSize; + int m_innerSize; + int m_nnz; + int* m_outerIndex; + int* m_innerIndices; + Scalar* m_values; + + public: + + inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + inline int innerSize() const { return m_innerSize; } + inline int outerSize() const { return m_outerSize; } + inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } + + //---------------------------------------- + // direct access interface + inline const Scalar* _valuePtr() const { return m_values; } + inline Scalar* _valuePtr() { return m_values; } + + inline const int* _innerIndexPtr() const { return m_innerIndices; } + inline int* _innerIndexPtr() { return m_innerIndices; } + + inline const int* _outerIndexPtr() const { return m_outerIndex; } + inline int* _outerIndexPtr() { return m_outerIndex; } + //---------------------------------------- + + inline Scalar coeff(int row, int col) const + { + const int outer = RowMajor ? row : col; + const int inner = RowMajor ? col : row; + + int start = m_outerIndex[outer]; + int end = m_outerIndex[outer+1]; + if (start==end) + return Scalar(0); + else if (end>0 && inner==m_innerIndices[end-1]) + return m_values[end-1]; + // ^^ optimization: let's first check if it is the last coefficient + // (very common in high level algorithms) + + const int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner); + const int id = r-&m_innerIndices[0]; + return ((*r==inner) && (id=start && "you probably called coeffRef on a non finalized matrix"); + ei_assert(end>start && "coeffRef cannot be called on a zero coefficient"); + int* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner); + const int id = r-&m_innerIndices[0]; + ei_assert((*r==inner) && (id +class MappedSparseMatrix::InnerIterator +{ + public: + InnerIterator(const MappedSparseMatrix& mat, int outer) + : m_matrix(mat), + m_outer(outer), + m_id(mat._outerIndexPtr()[outer]), + m_start(m_id), + m_end(mat._outerIndexPtr()[outer+1]) + {} + + template + InnerIterator(const Flagged& mat, int outer) + : m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]), + m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1]) + {} + + inline InnerIterator& operator++() { m_id++; return *this; } + + inline Scalar value() const { return m_matrix._valuePtr()[m_id]; } + inline Scalar& valueRef() { return const_cast(m_matrix._valuePtr()[m_id]); } + + inline int index() const { return m_matrix._innerIndexPtr()[m_id]; } + inline int row() const { return IsRowMajor ? m_outer : index(); } + inline int col() const { return IsRowMajor ? index() : m_outer; } + + inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } + + protected: + const MappedSparseMatrix& m_matrix; + const int m_outer; + int m_id; + const int m_start; + const int m_end; +}; + +#endif // EIGEN_MAPPED_SPARSEMATRIX_H diff --git a/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h b/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h new file mode 100644 index 00000000000..d908e315f3b --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/RandomSetter.h @@ -0,0 +1,330 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_RANDOMSETTER_H +#define EIGEN_RANDOMSETTER_H + +/** Represents a std::map + * + * \see RandomSetter + */ +template struct StdMapTraits +{ + typedef int KeyType; + typedef std::map Type; + enum { + IsSorted = 1 + }; + + static void setInvalidKey(Type&, const KeyType&) {} +}; + +#ifdef EIGEN_UNORDERED_MAP_SUPPORT +/** Represents a std::unordered_map + * + * To use it you need to both define EIGEN_UNORDERED_MAP_SUPPORT and include the unordered_map header file + * yourself making sure that unordered_map is defined in the std namespace. + * + * For instance, with current version of gcc you can either enable C++0x standard (-std=c++0x) or do: + * \code + * #include + * #define EIGEN_UNORDERED_MAP_SUPPORT + * namespace std { + * using std::tr1::unordered_map; + * } + * \endcode + * + * \see RandomSetter + */ +template struct StdUnorderedMapTraits +{ + typedef int KeyType; + typedef std::unordered_map Type; + enum { + IsSorted = 0 + }; + + static void setInvalidKey(Type&, const KeyType&) {} +}; +#endif // EIGEN_UNORDERED_MAP_SUPPORT + +#ifdef _DENSE_HASH_MAP_H_ +/** Represents a google::dense_hash_map + * + * \see RandomSetter + */ +template struct GoogleDenseHashMapTraits +{ + typedef int KeyType; + typedef google::dense_hash_map Type; + enum { + IsSorted = 0 + }; + + static void setInvalidKey(Type& map, const KeyType& k) + { map.set_empty_key(k); } +}; +#endif + +#ifdef _SPARSE_HASH_MAP_H_ +/** Represents a google::sparse_hash_map + * + * \see RandomSetter + */ +template struct GoogleSparseHashMapTraits +{ + typedef int KeyType; + typedef google::sparse_hash_map Type; + enum { + IsSorted = 0 + }; + + static void setInvalidKey(Type&, const KeyType&) {} +}; +#endif + +/** \class RandomSetter + * + * \brief The RandomSetter is a wrapper object allowing to set/update a sparse matrix with random access + * + * \param SparseMatrixType the type of the sparse matrix we are updating + * \param MapTraits a traits class representing the map implementation used for the temporary sparse storage. + * Its default value depends on the system. + * \param OuterPacketBits defines the number of rows (or columns) manage by a single map object + * as a power of two exponent. + * + * This class temporarily represents a sparse matrix object using a generic map implementation allowing for + * efficient random access. The conversion from the compressed representation to a hash_map object is performed + * in the RandomSetter constructor, while the sparse matrix is updated back at destruction time. This strategy + * suggest the use of nested blocks as in this example: + * + * \code + * SparseMatrix m(rows,cols); + * { + * RandomSetter > w(m); + * // don't use m but w instead with read/write random access to the coefficients: + * for(;;) + * w(rand(),rand()) = rand; + * } + * // when w is deleted, the data are copied back to m + * // and m is ready to use. + * \endcode + * + * Since hash_map objects are not fully sorted, representing a full matrix as a single hash_map would + * involve a big and costly sort to update the compressed matrix back. To overcome this issue, a RandomSetter + * use multiple hash_map, each representing 2^OuterPacketBits columns or rows according to the storage order. + * To reach optimal performance, this value should be adjusted according to the average number of nonzeros + * per rows/columns. + * + * The possible values for the template parameter MapTraits are: + * - \b StdMapTraits: corresponds to std::map. (does not perform very well) + * - \b GnuHashMapTraits: corresponds to __gnu_cxx::hash_map (available only with GCC) + * - \b GoogleDenseHashMapTraits: corresponds to google::dense_hash_map (best efficiency, reasonable memory consumption) + * - \b GoogleSparseHashMapTraits: corresponds to google::sparse_hash_map (best memory consumption, relatively good performance) + * + * The default map implementation depends on the availability, and the preferred order is: + * GoogleSparseHashMapTraits, GnuHashMapTraits, and finally StdMapTraits. + * + * For performance and memory consumption reasons it is highly recommended to use one of + * the Google's hash_map implementation. To enable the support for them, you have two options: + * - \#include yourself \b before Eigen/Sparse header + * - define EIGEN_GOOGLEHASH_SUPPORT + * In the later case the inclusion of is made for you. + * + * \see http://code.google.com/p/google-sparsehash/ + */ +template class MapTraits = +#if defined _DENSE_HASH_MAP_H_ + GoogleDenseHashMapTraits +#elif defined _HASH_MAP + GnuHashMapTraits +#else + StdMapTraits +#endif + ,int OuterPacketBits = 6> +class RandomSetter +{ + typedef typename ei_traits::Scalar Scalar; + struct ScalarWrapper + { + ScalarWrapper() : value(0) {} + Scalar value; + }; + typedef typename MapTraits::KeyType KeyType; + typedef typename MapTraits::Type HashMapType; + static const int OuterPacketMask = (1 << OuterPacketBits) - 1; + enum { + SwapStorage = 1 - MapTraits::IsSorted, + TargetRowMajor = (SparseMatrixType::Flags & RowMajorBit) ? 1 : 0, + SetterRowMajor = SwapStorage ? 1-TargetRowMajor : TargetRowMajor, + IsUpperTriangular = SparseMatrixType::Flags & UpperTriangularBit, + IsLowerTriangular = SparseMatrixType::Flags & LowerTriangularBit + }; + + public: + + /** Constructs a random setter object from the sparse matrix \a target + * + * Note that the initial value of \a target are imported. If you want to re-set + * a sparse matrix from scratch, then you must set it to zero first using the + * setZero() function. + */ + inline RandomSetter(SparseMatrixType& target) + : mp_target(&target) + { + const int outerSize = SwapStorage ? target.innerSize() : target.outerSize(); + const int innerSize = SwapStorage ? target.outerSize() : target.innerSize(); + m_outerPackets = outerSize >> OuterPacketBits; + if (outerSize&OuterPacketMask) + m_outerPackets += 1; + m_hashmaps = new HashMapType[m_outerPackets]; + // compute number of bits needed to store inner indices + int aux = innerSize - 1; + m_keyBitsOffset = 0; + while (aux) + { + ++m_keyBitsOffset; + aux = aux >> 1; + } + KeyType ik = (1<<(OuterPacketBits+m_keyBitsOffset)); + for (int k=0; k::setInvalidKey(m_hashmaps[k],ik); + + // insert current coeffs + for (int j=0; jouterSize(); ++j) + for (typename SparseMatrixType::InnerIterator it(*mp_target,j); it; ++it) + (*this)(TargetRowMajor?j:it.index(), TargetRowMajor?it.index():j) = it.value(); + } + + /** Destructor updating back the sparse matrix target */ + ~RandomSetter() + { + KeyType keyBitsMask = (1<startFill(nonZeros()); + for (int k=0; kfirst >> m_keyBitsOffset) + outerOffset; + const int inner = it->first & keyBitsMask; + mp_target->fill(TargetRowMajor ? outer : inner, TargetRowMajor ? inner : outer) = it->second.value; + } + } + mp_target->endFill(); + } + else + { + VectorXi positions(mp_target->outerSize()); + positions.setZero(); + // pass 1 + for (int k=0; kfirst & keyBitsMask; + ++positions[outer]; + } + } + // prefix sum + int count = 0; + for (int j=0; jouterSize(); ++j) + { + int tmp = positions[j]; + mp_target->_outerIndexPtr()[j] = count; + positions[j] = count; + count += tmp; + } + mp_target->_outerIndexPtr()[mp_target->outerSize()] = count; + mp_target->resizeNonZeros(count); + // pass 2 + for (int k=0; kfirst >> m_keyBitsOffset) + outerOffset; + const int outer = it->first & keyBitsMask; + // sorted insertion + // Note that we have to deal with at most 2^OuterPacketBits unsorted coefficients, + // moreover those 2^OuterPacketBits coeffs are likely to be sparse, an so only a + // small fraction of them have to be sorted, whence the following simple procedure: + int posStart = mp_target->_outerIndexPtr()[outer]; + int i = (positions[outer]++) - 1; + while ( (i >= posStart) && (mp_target->_innerIndexPtr()[i] > inner) ) + { + mp_target->_valuePtr()[i+1] = mp_target->_valuePtr()[i]; + mp_target->_innerIndexPtr()[i+1] = mp_target->_innerIndexPtr()[i]; + --i; + } + mp_target->_innerIndexPtr()[i+1] = inner; + mp_target->_valuePtr()[i+1] = it->second.value; + } + } + } + delete[] m_hashmaps; + } + + /** \returns a reference to the coefficient at given coordinates \a row, \a col */ + Scalar& operator() (int row, int col) + { + ei_assert(((!IsUpperTriangular) || (row<=col)) && "Invalid access to an upper triangular matrix"); + ei_assert(((!IsLowerTriangular) || (col<=row)) && "Invalid access to an upper triangular matrix"); + const int outer = SetterRowMajor ? row : col; + const int inner = SetterRowMajor ? col : row; + const int outerMajor = outer >> OuterPacketBits; // index of the packet/map + const int outerMinor = outer & OuterPacketMask; // index of the inner vector in the packet + const KeyType key = (KeyType(outerMinor)< +// Copyright (C) 2008 Daniel Gomez Ferro +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_BLOCK_H +#define EIGEN_SPARSE_BLOCK_H + +template +struct ei_traits > +{ + typedef typename ei_traits::Scalar Scalar; + enum { + IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit, + Flags = MatrixType::Flags, + RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime, + ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size, + CoeffReadCost = MatrixType::CoeffReadCost + }; +}; + +template +class SparseInnerVectorSet : ei_no_assignment_operator, + public SparseMatrixBase > +{ + enum { IsRowMajor = ei_traits::IsRowMajor }; + public: + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet) + class InnerIterator: public MatrixType::InnerIterator + { + public: + inline InnerIterator(const SparseInnerVectorSet& xpr, int outer) + : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer) + {} + }; + + inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize) + : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) + { + ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + } + + inline SparseInnerVectorSet(const MatrixType& matrix, int outer) + : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) + { + ei_assert(Size!=Dynamic); + ei_assert( (outer>=0) && (outer +// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) +// { +// return *this; +// } + +// template +// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) +// { +// return *this; +// } + + EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + const typename MatrixType::Nested m_matrix; + int m_outerStart; + const ei_int_if_dynamic m_outerSize; + +}; + +/*************************************************************************** +* specialisation for DynamicSparseMatrix +***************************************************************************/ + +template +class SparseInnerVectorSet, Size> + : public SparseMatrixBase, Size> > +{ + typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType; + enum { IsRowMajor = ei_traits::IsRowMajor }; + public: + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet) + class InnerIterator: public MatrixType::InnerIterator + { + public: + inline InnerIterator(const SparseInnerVectorSet& xpr, int outer) + : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer) + {} + }; + + inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize) + : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) + { + ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + } + + inline SparseInnerVectorSet(const MatrixType& matrix, int outer) + : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) + { + ei_assert(Size!=Dynamic); + ei_assert( (outer>=0) && (outer + inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) + { + if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) + { + // need to transpose => perform a block evaluation followed by a big swap + DynamicSparseMatrix aux(other); + *this = aux.markAsRValue(); + } + else + { + // evaluate/copy vector per vector + for (int j=0; j aux(other.innerVector(j)); + m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); + } + } + return *this; + } + + inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) + { + return operator=(other); + } + +// template +// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) +// { +// return *this; +// } + + EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + const typename MatrixType::Nested m_matrix; + int m_outerStart; + const ei_int_if_dynamic m_outerSize; + +}; + + +/*************************************************************************** +* specialisation for SparseMatrix +***************************************************************************/ +/* +template +class SparseInnerVectorSet, Size> + : public SparseMatrixBase, Size> > +{ + typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType; + enum { IsRowMajor = ei_traits::IsRowMajor }; + public: + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseInnerVectorSet) + class InnerIterator: public MatrixType::InnerIterator + { + public: + inline InnerIterator(const SparseInnerVectorSet& xpr, int outer) + : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer) + {} + }; + + inline SparseInnerVectorSet(const MatrixType& matrix, int outerStart, int outerSize) + : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) + { + ei_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); + } + + inline SparseInnerVectorSet(const MatrixType& matrix, int outer) + : m_matrix(matrix), m_outerStart(outer) + { + ei_assert(Size==1); + ei_assert( (outer>=0) && (outer + inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) + { + if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit)) + { + // need to transpose => perform a block evaluation followed by a big swap + DynamicSparseMatrix aux(other); + *this = aux.markAsRValue(); + } + else + { + // evaluate/copy vector per vector + for (int j=0; j aux(other.innerVector(j)); + m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data()); + } + } + return *this; + } + + inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) + { + return operator=(other); + } + + inline const Scalar* _valuePtr() const + { return m_matrix._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; } + inline const int* _innerIndexPtr() const + { return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; } + inline const int* _outerIndexPtr() const { return m_matrix._outerIndexPtr() + m_outerStart; } + +// template +// inline SparseInnerVectorSet& operator=(const SparseMatrixBase& other) +// { +// return *this; +// } + + EIGEN_STRONG_INLINE int rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } + EIGEN_STRONG_INLINE int cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } + + protected: + + const typename MatrixType::Nested m_matrix; + int m_outerStart; + const ei_int_if_dynamic m_outerSize; + +}; +*/ +//---------- + +/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ +template +SparseInnerVectorSet SparseMatrixBase::row(int i) +{ + EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); + return innerVector(i); +} + +/** \returns the i-th row of the matrix \c *this. For row-major matrix only. + * (read-only version) */ +template +const SparseInnerVectorSet SparseMatrixBase::row(int i) const +{ + EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); + return innerVector(i); +} + +/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ +template +SparseInnerVectorSet SparseMatrixBase::col(int i) +{ + EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + return innerVector(i); +} + +/** \returns the i-th column of the matrix \c *this. For column-major matrix only. + * (read-only version) */ +template +const SparseInnerVectorSet SparseMatrixBase::col(int i) const +{ + EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + return innerVector(i); +} + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ +template +SparseInnerVectorSet SparseMatrixBase::innerVector(int outer) +{ return SparseInnerVectorSet(derived(), outer); } + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ +template +const SparseInnerVectorSet SparseMatrixBase::innerVector(int outer) const +{ return SparseInnerVectorSet(derived(), outer); } + +//---------- + +/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ +template +SparseInnerVectorSet SparseMatrixBase::subrows(int start, int size) +{ + EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); + return innerVectors(start, size); +} + +/** \returns the i-th row of the matrix \c *this. For row-major matrix only. + * (read-only version) */ +template +const SparseInnerVectorSet SparseMatrixBase::subrows(int start, int size) const +{ + EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); + return innerVectors(start, size); +} + +/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ +template +SparseInnerVectorSet SparseMatrixBase::subcols(int start, int size) +{ + EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + return innerVectors(start, size); +} + +/** \returns the i-th column of the matrix \c *this. For column-major matrix only. + * (read-only version) */ +template +const SparseInnerVectorSet SparseMatrixBase::subcols(int start, int size) const +{ + EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); + return innerVectors(start, size); +} + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ +template +SparseInnerVectorSet SparseMatrixBase::innerVectors(int outerStart, int outerSize) +{ return SparseInnerVectorSet(derived(), outerStart, outerSize); } + +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ +template +const SparseInnerVectorSet SparseMatrixBase::innerVectors(int outerStart, int outerSize) const +{ return SparseInnerVectorSet(derived(), outerStart, outerSize); } + +# if 0 +template +class Block + : public SparseMatrixBase > +{ +public: + + _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, SparseMatrixBase) + class InnerIterator; + + /** Column or Row constructor + */ + inline Block(const MatrixType& matrix, int i) + : m_matrix(matrix), + // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime, + // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1, + // all other cases are invalid. + // The case a 1x1 matrix seems ambiguous, but the result is the same anyway. + m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it + m_blockCols(matrix.cols()) // same for m_blockCols + { + ei_assert( (i>=0) && ( + ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows() + && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols()); + } + + /** Dynamic-size constructor + */ + inline Block(const MatrixType& matrix, + int startRow, int startCol, + int blockRows, int blockCols) + : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol), + m_blockRows(blockRows), m_blockCols(blockCols) + { + ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows) + && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols)); + ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows() + && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols()); + } + + inline int rows() const { return m_blockRows.value(); } + inline int cols() const { return m_blockCols.value(); } + + inline int stride(void) const { return m_matrix.stride(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived() + .coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } + + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); + } + + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived() + .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const Scalar coeff(int index) const + { + return m_matrix + .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + protected: + + const typename MatrixType::Nested m_matrix; + const ei_int_if_dynamic m_startRow; + const ei_int_if_dynamic m_startCol; + const ei_int_if_dynamic m_blockRows; + const ei_int_if_dynamic m_blockCols; + +}; +#endif + +#endif // EIGEN_SPARSE_BLOCK_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h new file mode 100644 index 00000000000..2206883cc76 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseCwise.h @@ -0,0 +1,175 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// Copyright (C) 2008 Benoit Jacob +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_CWISE_H +#define EIGEN_SPARSE_CWISE_H + +/** \internal + * convenient macro to defined the return type of a cwise binary operation */ +#define EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(OP) \ + CwiseBinaryOp::Scalar>, ExpressionType, OtherDerived> + +#define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \ + SparseCwiseBinaryOp< \ + ei_scalar_product_op< \ + typename ei_scalar_product_traits< \ + typename ei_traits::Scalar, \ + typename ei_traits::Scalar \ + >::ReturnType \ + >, \ + ExpressionType, \ + OtherDerived \ + > + +/** \internal + * convenient macro to defined the return type of a cwise unary operation */ +#define EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(OP) \ + SparseCwiseUnaryOp::Scalar>, ExpressionType> + +/** \internal + * convenient macro to defined the return type of a cwise comparison to a scalar */ +/*#define EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(OP) \ + CwiseBinaryOp::Scalar>, ExpressionType, \ + NestByValue >*/ + +template class SparseCwise +{ + public: + + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_meta_if::ret, + ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + typedef CwiseUnaryOp, ExpressionType> ScalarAddReturnType; + + inline SparseCwise(const ExpressionType& matrix) : m_matrix(matrix) {} + + /** \internal */ + inline const ExpressionType& _expression() const { return m_matrix; } + + template + const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE + operator*(const SparseMatrixBase &other) const; + + template + const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE + operator*(const MatrixBase &other) const; + +// template +// const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +// operator/(const SparseMatrixBase &other) const; +// +// template +// const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +// operator/(const MatrixBase &other) const; + + template + const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op) + min(const SparseMatrixBase &other) const; + + template + const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op) + max(const SparseMatrixBase &other) const; + + const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) abs() const; + const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) abs2() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op) square() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op) cube() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op) inverse() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op) sqrt() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op) exp() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op) log() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op) cos() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op) sin() const; +// const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op) pow(const Scalar& exponent) const; + + template + inline ExpressionType& operator*=(const SparseMatrixBase &other); + +// template +// inline ExpressionType& operator/=(const SparseMatrixBase &other); + + /* + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less) + operator<(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal) + operator<=(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater) + operator>(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal) + operator>=(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to) + operator==(const MatrixBase& other) const; + + template const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to) + operator!=(const MatrixBase& other) const; + + // comparisons to a scalar value + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less) + operator<(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::less_equal) + operator<=(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater) + operator>(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::greater_equal) + operator>=(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::equal_to) + operator==(Scalar s) const; + + const EIGEN_CWISE_COMP_TO_SCALAR_RETURN_TYPE(std::not_equal_to) + operator!=(Scalar s) const; + */ + + // allow to extend SparseCwise outside Eigen + #ifdef EIGEN_SPARSE_CWISE_PLUGIN + #include EIGEN_SPARSE_CWISE_PLUGIN + #endif + + protected: + ExpressionTypeNested m_matrix; +}; + +template +inline const SparseCwise +SparseMatrixBase::cwise() const +{ + return derived(); +} + +template +inline SparseCwise +SparseMatrixBase::cwise() +{ + return derived(); +} + +#endif // EIGEN_SPARSE_CWISE_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h new file mode 100644 index 00000000000..d19970efcb1 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseBinaryOp.h @@ -0,0 +1,442 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H +#define EIGEN_SPARSE_CWISE_BINARY_OP_H + +// Here we have to handle 3 cases: +// 1 - sparse op dense +// 2 - dense op sparse +// 3 - sparse op sparse +// We also need to implement a 4th iterator for: +// 4 - dense op dense +// Finally, we also need to distinguish between the product and other operations : +// configuration returned mode +// 1 - sparse op dense product sparse +// generic dense +// 2 - dense op sparse product sparse +// generic dense +// 3 - sparse op sparse product sparse +// generic sparse +// 4 - dense op dense product dense +// generic dense + +template +struct ei_traits > +{ + typedef typename ei_result_of< + BinaryOp( + typename Lhs::Scalar, + typename Rhs::Scalar + ) + >::type Scalar; + typedef typename Lhs::Nested LhsNested; + typedef typename Rhs::Nested RhsNested; + typedef typename ei_unref::type _LhsNested; + typedef typename ei_unref::type _RhsNested; + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + RowsAtCompileTime = Lhs::RowsAtCompileTime, + ColsAtCompileTime = Lhs::ColsAtCompileTime, + MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime, + MaxColsAtCompileTime = Lhs::MaxColsAtCompileTime, + Flags = (int(LhsFlags) | int(RhsFlags)) & HereditaryBits, + CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits::Cost + }; +}; + +template +class SparseCwiseBinaryOp : ei_no_assignment_operator, + public SparseMatrixBase > +{ + public: + + class InnerIterator; + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseCwiseBinaryOp) + typedef typename ei_traits::LhsNested LhsNested; + typedef typename ei_traits::RhsNested RhsNested; + typedef typename ei_unref::type _LhsNested; + typedef typename ei_unref::type _RhsNested; + + EIGEN_STRONG_INLINE SparseCwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp()) + : m_lhs(lhs), m_rhs(rhs), m_functor(func) + { + EIGEN_STATIC_ASSERT((_LhsNested::Flags&RowMajorBit)==(_RhsNested::Flags&RowMajorBit), + BOTH_MATRICES_MUST_HAVE_THE_SAME_STORAGE_ORDER) + EIGEN_STATIC_ASSERT((ei_functor_allows_mixing_real_and_complex::ret + ? int(ei_is_same_type::ret) + : int(ei_is_same_type::ret)), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + // require the sizes to match + EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Lhs, Rhs) + ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols()); + } + + EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_lhs.cols(); } + + EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } + EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } + EIGEN_STRONG_INLINE const BinaryOp& functor() const { return m_functor; } + + protected: + const LhsNested m_lhs; + const RhsNested m_rhs; + const BinaryOp m_functor; +}; + +template +class ei_sparse_cwise_binary_op_inner_iterator_selector; + +template +class SparseCwiseBinaryOp::InnerIterator + : public ei_sparse_cwise_binary_op_inner_iterator_selector::InnerIterator> +{ + public: + typedef ei_sparse_cwise_binary_op_inner_iterator_selector< + BinaryOp,Lhs,Rhs, InnerIterator> Base; + + EIGEN_STRONG_INLINE InnerIterator(const SparseCwiseBinaryOp& binOp, int outer) + : Base(binOp,outer) + {} +}; + +/*************************************************************************** +* Implementation of inner-iterators +***************************************************************************/ + +// template struct ei_func_is_conjunction { enum { ret = false }; }; +// template struct ei_func_is_conjunction > { enum { ret = true }; }; + +// TODO generalize the ei_scalar_product_op specialization to all conjunctions if any ! + +// sparse - sparse (generic) +template +class ei_sparse_cwise_binary_op_inner_iterator_selector +{ + typedef SparseCwiseBinaryOp CwiseBinaryXpr; + typedef typename ei_traits::Scalar Scalar; + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename _LhsNested::InnerIterator LhsIterator; + typedef typename _RhsNested::InnerIterator RhsIterator; + public: + + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) + { + this->operator++(); + } + + EIGEN_STRONG_INLINE Derived& operator++() + { + if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index())) + { + m_id = m_lhsIter.index(); + m_value = m_functor(m_lhsIter.value(), m_rhsIter.value()); + ++m_lhsIter; + ++m_rhsIter; + } + else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index()))) + { + m_id = m_lhsIter.index(); + m_value = m_functor(m_lhsIter.value(), Scalar(0)); + ++m_lhsIter; + } + else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index()))) + { + m_id = m_rhsIter.index(); + m_value = m_functor(Scalar(0), m_rhsIter.value()); + ++m_rhsIter; + } + else + { + m_id = -1; + } + return *static_cast(this); + } + + EIGEN_STRONG_INLINE Scalar value() const { return m_value; } + + EIGEN_STRONG_INLINE int index() const { return m_id; } + EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; } + + protected: + LhsIterator m_lhsIter; + RhsIterator m_rhsIter; + const BinaryOp& m_functor; + Scalar m_value; + int m_id; +}; + +// sparse - sparse (product) +template +class ei_sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, IsSparse, IsSparse> +{ + typedef ei_scalar_product_op BinaryFunc; + typedef SparseCwiseBinaryOp CwiseBinaryXpr; + typedef typename CwiseBinaryXpr::Scalar Scalar; + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename _LhsNested::InnerIterator LhsIterator; + typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename _RhsNested::InnerIterator RhsIterator; + public: + + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()) + { + while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) + { + if (m_lhsIter.index() < m_rhsIter.index()) + ++m_lhsIter; + else + ++m_rhsIter; + } + } + + EIGEN_STRONG_INLINE Derived& operator++() + { + ++m_lhsIter; + ++m_rhsIter; + while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index())) + { + if (m_lhsIter.index() < m_rhsIter.index()) + ++m_lhsIter; + else + ++m_rhsIter; + } + return *static_cast(this); + } + + EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); } + + EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); } + + protected: + LhsIterator m_lhsIter; + RhsIterator m_rhsIter; + const BinaryFunc& m_functor; +}; + +// sparse - dense (product) +template +class ei_sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, IsSparse, IsDense> +{ + typedef ei_scalar_product_op BinaryFunc; + typedef SparseCwiseBinaryOp CwiseBinaryXpr; + typedef typename CwiseBinaryXpr::Scalar Scalar; + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::RhsNested RhsNested; + typedef typename _LhsNested::InnerIterator LhsIterator; + enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit }; + public: + + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer) + {} + + EIGEN_STRONG_INLINE Derived& operator++() + { + ++m_lhsIter; + return *static_cast(this); + } + + EIGEN_STRONG_INLINE Scalar value() const + { return m_functor(m_lhsIter.value(), + m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); } + + EIGEN_STRONG_INLINE int index() const { return m_lhsIter.index(); } + EIGEN_STRONG_INLINE int row() const { return m_lhsIter.row(); } + EIGEN_STRONG_INLINE int col() const { return m_lhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; } + + protected: + const RhsNested m_rhs; + LhsIterator m_lhsIter; + const BinaryFunc m_functor; + const int m_outer; +}; + +// sparse - dense (product) +template +class ei_sparse_cwise_binary_op_inner_iterator_selector, Lhs, Rhs, Derived, IsDense, IsSparse> +{ + typedef ei_scalar_product_op BinaryFunc; + typedef SparseCwiseBinaryOp CwiseBinaryXpr; + typedef typename CwiseBinaryXpr::Scalar Scalar; + typedef typename ei_traits::_RhsNested _RhsNested; + typedef typename _RhsNested::InnerIterator RhsIterator; + enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit }; + public: + + EIGEN_STRONG_INLINE ei_sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, int outer) + : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer) + {} + + EIGEN_STRONG_INLINE Derived& operator++() + { + ++m_rhsIter; + return *static_cast(this); + } + + EIGEN_STRONG_INLINE Scalar value() const + { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); } + + EIGEN_STRONG_INLINE int index() const { return m_rhsIter.index(); } + EIGEN_STRONG_INLINE int row() const { return m_rhsIter.row(); } + EIGEN_STRONG_INLINE int col() const { return m_rhsIter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; } + + protected: + const CwiseBinaryXpr& m_xpr; + RhsIterator m_rhsIter; + const BinaryFunc& m_functor; + const int m_outer; +}; + + +/*************************************************************************** +* Implementation of SparseMatrixBase and SparseCwise functions/operators +***************************************************************************/ + +template +template +EIGEN_STRONG_INLINE const SparseCwiseBinaryOp::Scalar>, + Derived, OtherDerived> +SparseMatrixBase::operator-(const SparseMatrixBase &other) const +{ + return SparseCwiseBinaryOp, + Derived, OtherDerived>(derived(), other.derived()); +} + +template +template +EIGEN_STRONG_INLINE Derived & +SparseMatrixBase::operator-=(const SparseMatrixBase &other) +{ + return *this = derived() - other.derived(); +} + +template +template +EIGEN_STRONG_INLINE const SparseCwiseBinaryOp::Scalar>, Derived, OtherDerived> +SparseMatrixBase::operator+(const SparseMatrixBase &other) const +{ + return SparseCwiseBinaryOp, Derived, OtherDerived>(derived(), other.derived()); +} + +template +template +EIGEN_STRONG_INLINE Derived & +SparseMatrixBase::operator+=(const SparseMatrixBase& other) +{ + return *this = derived() + other.derived(); +} + +template +template +EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE +SparseCwise::operator*(const SparseMatrixBase &other) const +{ + return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived()); +} + +template +template +EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE +SparseCwise::operator*(const MatrixBase &other) const +{ + return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived()); +} + +// template +// template +// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +// SparseCwise::operator/(const SparseMatrixBase &other) const +// { +// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived()); +// } +// +// template +// template +// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op) +// SparseCwise::operator/(const MatrixBase &other) const +// { +// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived()); +// } + +template +template +inline ExpressionType& SparseCwise::operator*=(const SparseMatrixBase &other) +{ + return m_matrix.const_cast_derived() = _expression() * other.derived(); +} + +// template +// template +// inline ExpressionType& SparseCwise::operator/=(const SparseMatrixBase &other) +// { +// return m_matrix.const_cast_derived() = *this / other; +// } + +template +template +EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op) +SparseCwise::min(const SparseMatrixBase &other) const +{ + return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived()); +} + +template +template +EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op) +SparseCwise::max(const SparseMatrixBase &other) const +{ + return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived()); +} + +// template +// template +// EIGEN_STRONG_INLINE const CwiseBinaryOp +// SparseMatrixBase::binaryExpr(const SparseMatrixBase &other, const CustomBinaryOp& func) const +// { +// return CwiseBinaryOp(derived(), other.derived(), func); +// } + +#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h new file mode 100644 index 00000000000..b11c0f8a377 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseCwiseUnaryOp.h @@ -0,0 +1,183 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H +#define EIGEN_SPARSE_CWISE_UNARY_OP_H + +template +struct ei_traits > : ei_traits +{ + typedef typename ei_result_of< + UnaryOp(typename MatrixType::Scalar) + >::type Scalar; + typedef typename MatrixType::Nested MatrixTypeNested; + typedef typename ei_unref::type _MatrixTypeNested; + enum { + CoeffReadCost = _MatrixTypeNested::CoeffReadCost + ei_functor_traits::Cost + }; +}; + +template +class SparseCwiseUnaryOp : ei_no_assignment_operator, + public SparseMatrixBase > +{ + public: + + class InnerIterator; +// typedef typename ei_unref::type _LhsNested; + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseCwiseUnaryOp) + + inline SparseCwiseUnaryOp(const MatrixType& mat, const UnaryOp& func = UnaryOp()) + : m_matrix(mat), m_functor(func) {} + + EIGEN_STRONG_INLINE int rows() const { return m_matrix.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_matrix.cols(); } + +// EIGEN_STRONG_INLINE const typename MatrixType::Nested& _matrix() const { return m_matrix; } +// EIGEN_STRONG_INLINE const UnaryOp& _functor() const { return m_functor; } + + protected: + const typename MatrixType::Nested m_matrix; + const UnaryOp m_functor; +}; + + +template +class SparseCwiseUnaryOp::InnerIterator +{ + typedef typename SparseCwiseUnaryOp::Scalar Scalar; + typedef typename ei_traits::_MatrixTypeNested _MatrixTypeNested; + typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator; + public: + + EIGEN_STRONG_INLINE InnerIterator(const SparseCwiseUnaryOp& unaryOp, int outer) + : m_iter(unaryOp.m_matrix,outer), m_functor(unaryOp.m_functor) + {} + + EIGEN_STRONG_INLINE InnerIterator& operator++() + { ++m_iter; return *this; } + + EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); } + + EIGEN_STRONG_INLINE int index() const { return m_iter.index(); } + EIGEN_STRONG_INLINE int row() const { return m_iter.row(); } + EIGEN_STRONG_INLINE int col() const { return m_iter.col(); } + + EIGEN_STRONG_INLINE operator bool() const { return m_iter; } + + protected: + MatrixTypeIterator m_iter; + const UnaryOp m_functor; +}; + +template +template +EIGEN_STRONG_INLINE const SparseCwiseUnaryOp +SparseMatrixBase::unaryExpr(const CustomUnaryOp& func) const +{ + return SparseCwiseUnaryOp(derived(), func); +} + +template +EIGEN_STRONG_INLINE const SparseCwiseUnaryOp::Scalar>,Derived> +SparseMatrixBase::operator-() const +{ + return derived(); +} + +template +EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op) +SparseCwise::abs() const +{ + return _expression(); +} + +template +EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op) +SparseCwise::abs2() const +{ + return _expression(); +} + +template +EIGEN_STRONG_INLINE typename SparseMatrixBase::ConjugateReturnType +SparseMatrixBase::conjugate() const +{ + return ConjugateReturnType(derived()); +} + +template +EIGEN_STRONG_INLINE const typename SparseMatrixBase::RealReturnType +SparseMatrixBase::real() const { return derived(); } + +template +EIGEN_STRONG_INLINE const typename SparseMatrixBase::ImagReturnType +SparseMatrixBase::imag() const { return derived(); } + +template +template +EIGEN_STRONG_INLINE const SparseCwiseUnaryOp::Scalar, NewType>, Derived> +SparseMatrixBase::cast() const +{ + return derived(); +} + +template +EIGEN_STRONG_INLINE const SparseCwiseUnaryOp::Scalar>, Derived> +SparseMatrixBase::operator*(const Scalar& scalar) const +{ + return SparseCwiseUnaryOp, Derived> + (derived(), ei_scalar_multiple_op(scalar)); +} + +template +EIGEN_STRONG_INLINE const SparseCwiseUnaryOp::Scalar>, Derived> +SparseMatrixBase::operator/(const Scalar& scalar) const +{ + return SparseCwiseUnaryOp, Derived> + (derived(), ei_scalar_quotient1_op(scalar)); +} + +template +EIGEN_STRONG_INLINE Derived& +SparseMatrixBase::operator*=(const Scalar& other) +{ + for (int j=0; j +EIGEN_STRONG_INLINE Derived& +SparseMatrixBase::operator/=(const Scalar& other) +{ + for (int j=0; j +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H +#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H + +// the product a diagonal matrix with a sparse matrix can be easily +// implemented using expression template. We have two very different cases: +// 1 - diag * row-major sparse +// => each inner vector <=> scalar * sparse vector product +// => so we can reuse CwiseUnaryOp::InnerIterator +// 2 - diag * col-major sparse +// => each inner vector <=> densevector * sparse vector cwise product +// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator +// for that particular case +// The two other cases are symmetric. + +template +struct ei_traits > : ei_traits > +{ + typedef typename ei_cleantype::type _Lhs; + typedef typename ei_cleantype::type _Rhs; + enum { + SparseFlags = ((int(_Lhs::Flags)&Diagonal)==Diagonal) ? int(_Rhs::Flags) : int(_Lhs::Flags), + Flags = SparseBit | (SparseFlags&RowMajorBit) + }; +}; + +enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor}; +template +class ei_sparse_diagonal_product_inner_iterator_selector; + +template +class SparseDiagonalProduct : public SparseMatrixBase >, ei_no_assignment_operator +{ + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; + + enum { + LhsMode = (_LhsNested::Flags&Diagonal)==Diagonal ? SDP_IsDiagonal + : (_LhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor, + RhsMode = (_RhsNested::Flags&Diagonal)==Diagonal ? SDP_IsDiagonal + : (_RhsNested::Flags&RowMajorBit) ? SDP_IsSparseRowMajor : SDP_IsSparseColMajor + }; + + public: + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseDiagonalProduct) + + typedef ei_sparse_diagonal_product_inner_iterator_selector + <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; + + template + EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + { + ei_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product"); + } + + EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); } + + EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } + EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } + + protected: + LhsNested m_lhs; + RhsNested m_rhs; +}; + + +template +class ei_sparse_diagonal_product_inner_iterator_selector + + : public SparseCwiseUnaryOp,Rhs>::InnerIterator +{ + typedef typename SparseCwiseUnaryOp,Rhs>::InnerIterator Base; + public: + inline ei_sparse_diagonal_product_inner_iterator_selector( + const SparseDiagonalProductType& expr, int outer) + : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer) + {} +}; + +template +class ei_sparse_diagonal_product_inner_iterator_selector + + : public SparseCwiseBinaryOp< + ei_scalar_product_op, + SparseInnerVectorSet, + typename Lhs::_CoeffsVectorType>::InnerIterator +{ + typedef typename SparseCwiseBinaryOp< + ei_scalar_product_op, + SparseInnerVectorSet, + typename Lhs::_CoeffsVectorType>::InnerIterator Base; + public: + inline ei_sparse_diagonal_product_inner_iterator_selector( + const SparseDiagonalProductType& expr, int outer) + : Base(expr.rhs().innerVector(outer) .cwise()* expr.lhs().diagonal(), 0) + {} +}; + +template +class ei_sparse_diagonal_product_inner_iterator_selector + + : public SparseCwiseUnaryOp,Lhs>::InnerIterator +{ + typedef typename SparseCwiseUnaryOp,Lhs>::InnerIterator Base; + public: + inline ei_sparse_diagonal_product_inner_iterator_selector( + const SparseDiagonalProductType& expr, int outer) + : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer) + {} +}; + +template +class ei_sparse_diagonal_product_inner_iterator_selector + + : public SparseCwiseBinaryOp< + ei_scalar_product_op, + SparseInnerVectorSet, + NestByValue > >::InnerIterator +{ + typedef typename SparseCwiseBinaryOp< + ei_scalar_product_op, + SparseInnerVectorSet, + NestByValue > >::InnerIterator Base; + public: + inline ei_sparse_diagonal_product_inner_iterator_selector( + const SparseDiagonalProductType& expr, int outer) + : Base(expr.lhs().innerVector(outer) .cwise()* expr.rhs().diagonal().transpose().nestByValue(), 0) + {} +}; + +#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseDot.h b/extern/Eigen2/Eigen/src/Sparse/SparseDot.h new file mode 100644 index 00000000000..7a26e0f4ba5 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseDot.h @@ -0,0 +1,97 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_DOT_H +#define EIGEN_SPARSE_DOT_H + +template +template +typename ei_traits::Scalar +SparseMatrixBase::dot(const MatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + ei_assert(size() == other.size()); + ei_assert(other.size()>0 && "you are using a non initialized vector"); + + typename Derived::InnerIterator i(derived(),0); + Scalar res = 0; + while (i) + { + res += i.value() * ei_conj(other.coeff(i.index())); + ++i; + } + return res; +} + +template +template +typename ei_traits::Scalar +SparseMatrixBase::dot(const SparseMatrixBase& other) const +{ + EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) + EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) + EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived) + EIGEN_STATIC_ASSERT((ei_is_same_type::ret), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + ei_assert(size() == other.size()); + + typename Derived::InnerIterator i(derived(),0); + typename OtherDerived::InnerIterator j(other.derived(),0); + Scalar res = 0; + while (i && j) + { + if (i.index()==j.index()) + { + res += i.value() * ei_conj(j.value()); + ++i; ++j; + } + else if (i.index() +inline typename NumTraits::Scalar>::Real +SparseMatrixBase::squaredNorm() const +{ + return ei_real((*this).cwise().abs2().sum()); +} + +template +inline typename NumTraits::Scalar>::Real +SparseMatrixBase::norm() const +{ + return ei_sqrt(squaredNorm()); +} + +#endif // EIGEN_SPARSE_DOT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h b/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h new file mode 100644 index 00000000000..c47e162f538 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseFlagged.h @@ -0,0 +1,97 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Benoit Jacob +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_FLAGGED_H +#define EIGEN_SPARSE_FLAGGED_H + +template +struct ei_traits > : ei_traits +{ + enum { Flags = (ExpressionType::Flags | Added) & ~Removed }; +}; + +template class SparseFlagged + : public SparseMatrixBase > +{ + public: + + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseFlagged) + class InnerIterator; + class ReverseInnerIterator; + + typedef typename ei_meta_if::ret, + ExpressionType, const ExpressionType&>::ret ExpressionTypeNested; + + inline SparseFlagged(const ExpressionType& matrix) : m_matrix(matrix) {} + + inline int rows() const { return m_matrix.rows(); } + inline int cols() const { return m_matrix.cols(); } + + // FIXME should be keep them ? + inline Scalar& coeffRef(int row, int col) + { return m_matrix.const_cast_derived().coeffRef(col, row); } + + inline const Scalar coeff(int row, int col) const + { return m_matrix.coeff(col, row); } + + inline const Scalar coeff(int index) const + { return m_matrix.coeff(index); } + + inline Scalar& coeffRef(int index) + { return m_matrix.const_cast_derived().coeffRef(index); } + + protected: + ExpressionTypeNested m_matrix; +}; + +template + class SparseFlagged::InnerIterator : public ExpressionType::InnerIterator +{ + public: + + EIGEN_STRONG_INLINE InnerIterator(const SparseFlagged& xpr, int outer) + : ExpressionType::InnerIterator(xpr.m_matrix, outer) + {} +}; + +template + class SparseFlagged::ReverseInnerIterator : public ExpressionType::ReverseInnerIterator +{ + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseFlagged& xpr, int outer) + : ExpressionType::ReverseInnerIterator(xpr.m_matrix, outer) + {} +}; + +template +template +inline const SparseFlagged +SparseMatrixBase::marked() const +{ + return derived(); +} + +#endif // EIGEN_SPARSE_FLAGGED_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h b/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h new file mode 100644 index 00000000000..355f4d52eab --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseFuzzy.h @@ -0,0 +1,41 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSE_FUZZY_H +#define EIGEN_SPARSE_FUZZY_H + +// template +// template +// bool SparseMatrixBase::isApprox( +// const OtherDerived& other, +// typename NumTraits::Real prec +// ) const +// { +// const typename ei_nested::type nested(derived()); +// const typename ei_nested::type otherNested(other.derived()); +// return (nested - otherNested).cwise().abs2().sum() +// <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum()); +// } + +#endif // EIGEN_SPARSE_FUZZY_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h b/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h new file mode 100644 index 00000000000..a1bac4d084d --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseLDLT.h @@ -0,0 +1,346 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +/* + +NOTE: the _symbolic, and _numeric functions has been adapted from + the LDL library: + +LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved. + +LDL License: + + Your use or distribution of LDL or any modified version of + LDL implies that you agree to this License. + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 + USA + + Permission is hereby granted to use or copy this program under the + terms of the GNU LGPL, provided that the Copyright, this License, + and the Availability of the original version is retained on all copies. + User documentation of any code that uses this code or any modified + version of this code must cite the Copyright, this License, the + Availability note, and "Used by permission." Permission to modify + the code and to distribute modified code is granted, provided the + Copyright, this License, and the Availability note are retained, + and a notice that the code was modified is included. + */ + +#ifndef EIGEN_SPARSELDLT_H +#define EIGEN_SPARSELDLT_H + +/** \ingroup Sparse_Module + * + * \class SparseLDLT + * + * \brief LDLT Cholesky decomposition of a sparse matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LDLT Cholesky decomposition + * + * \sa class LDLT, class LDLT + */ +template +class SparseLDLT +{ + protected: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef SparseMatrix CholMatrixType; + typedef Matrix VectorType; + + enum { + SupernodalFactorIsDirty = 0x10000, + MatrixLIsDirty = 0x20000 + }; + + public: + + /** Creates a dummy LDLT factorization object with flags \a flags. */ + SparseLDLT(int flags = 0) + : m_flags(flags), m_status(0) + { + ei_assert((MatrixType::Flags&RowMajorBit)==0); + m_precision = RealScalar(0.1) * Eigen::precision(); + } + + /** Creates a LDLT object and compute the respective factorization of \a matrix using + * flags \a flags. */ + SparseLDLT(const MatrixType& matrix, int flags = 0) + : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0) + { + ei_assert((MatrixType::Flags&RowMajorBit)==0); + m_precision = RealScalar(0.1) * Eigen::precision(); + compute(matrix); + } + + /** Sets the relative threshold value used to prune zero coefficients during the decomposition. + * + * Setting a value greater than zero speeds up computation, and yields to an imcomplete + * factorization with fewer non zero coefficients. Such approximate factors are especially + * useful to initialize an iterative solver. + * + * \warning if precision is greater that zero, the LDLT factorization is not guaranteed to succeed + * even if the matrix is positive definite. + * + * Note that the exact meaning of this parameter might depends on the actual + * backend. Moreover, not all backends support this feature. + * + * \sa precision() */ + void setPrecision(RealScalar v) { m_precision = v; } + + /** \returns the current precision. + * + * \sa setPrecision() */ + RealScalar precision() const { return m_precision; } + + /** Sets the flags. Possible values are: + * - CompleteFactorization + * - IncompleteFactorization + * - MemoryEfficient (hint to use the memory most efficient method offered by the backend) + * - SupernodalMultifrontal (implies a complete factorization if supported by the backend, + * overloads the MemoryEfficient flags) + * - SupernodalLeftLooking (implies a complete factorization if supported by the backend, + * overloads the MemoryEfficient flags) + * + * \sa flags() */ + void settagss(int f) { m_flags = f; } + /** \returns the current flags */ + int flags() const { return m_flags; } + + /** Computes/re-computes the LDLT factorization */ + void compute(const MatrixType& matrix); + + /** Perform a symbolic factorization */ + void _symbolic(const MatrixType& matrix); + /** Perform the actual factorization using the previously + * computed symbolic factorization */ + bool _numeric(const MatrixType& matrix); + + /** \returns the lower triangular matrix L */ + inline const CholMatrixType& matrixL(void) const { return m_matrix; } + + /** \returns the coefficients of the diagonal matrix D */ + inline VectorType vectorD(void) const { return m_diag; } + + template + bool solveInPlace(MatrixBase &b) const; + + /** \returns true if the factorization succeeded */ + inline bool succeeded(void) const { return m_succeeded; } + + protected: + CholMatrixType m_matrix; + VectorType m_diag; + VectorXi m_parent; // elimination tree + VectorXi m_nonZerosPerCol; +// VectorXi m_w; // workspace + RealScalar m_precision; + int m_flags; + mutable int m_status; + bool m_succeeded; +}; + +/** Computes / recomputes the LDLT decomposition of matrix \a a + * using the default algorithm. + */ +template +void SparseLDLT::compute(const MatrixType& a) +{ + _symbolic(a); + m_succeeded = _numeric(a); +} + +template +void SparseLDLT::_symbolic(const MatrixType& a) +{ + assert(a.rows()==a.cols()); + const int size = a.rows(); + m_matrix.resize(size, size); + m_parent.resize(size); + m_nonZerosPerCol.resize(size); + int * tags = ei_aligned_stack_new(int, size); + + const int* Ap = a._outerIndexPtr(); + const int* Ai = a._innerIndexPtr(); + int* Lp = m_matrix._outerIndexPtr(); + const int* P = 0; + int* Pinv = 0; + + if (P) + { + /* If P is present then compute Pinv, the inverse of P */ + for (int k = 0; k < size; ++k) + Pinv[P[k]] = k; + } + for (int k = 0; k < size; ++k) + { + /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ + m_parent[k] = -1; /* parent of k is not yet known */ + tags[k] = k; /* mark node k as visited */ + m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ + int kk = P ? P[k] : k; /* kth original, or permuted, column */ + int p2 = Ap[kk+1]; + for (int p = Ap[kk]; p < p2; ++p) + { + /* A (i,k) is nonzero (original or permuted A) */ + int i = Pinv ? Pinv[Ai[p]] : Ai[p]; + if (i < k) + { + /* follow path from i to root of etree, stop at flagged node */ + for (; tags[i] != k; i = m_parent[i]) + { + /* find parent of i if not yet determined */ + if (m_parent[i] == -1) + m_parent[i] = k; + ++m_nonZerosPerCol[i]; /* L (k,i) is nonzero */ + tags[i] = k; /* mark i as visited */ + } + } + } + } + /* construct Lp index array from m_nonZerosPerCol column counts */ + Lp[0] = 0; + for (int k = 0; k < size; ++k) + Lp[k+1] = Lp[k] + m_nonZerosPerCol[k]; + + m_matrix.resizeNonZeros(Lp[size]); + ei_aligned_stack_delete(int, tags, size); +} + +template +bool SparseLDLT::_numeric(const MatrixType& a) +{ + assert(a.rows()==a.cols()); + const int size = a.rows(); + assert(m_parent.size()==size); + assert(m_nonZerosPerCol.size()==size); + + const int* Ap = a._outerIndexPtr(); + const int* Ai = a._innerIndexPtr(); + const Scalar* Ax = a._valuePtr(); + const int* Lp = m_matrix._outerIndexPtr(); + int* Li = m_matrix._innerIndexPtr(); + Scalar* Lx = m_matrix._valuePtr(); + m_diag.resize(size); + + Scalar * y = ei_aligned_stack_new(Scalar, size); + int * pattern = ei_aligned_stack_new(int, size); + int * tags = ei_aligned_stack_new(int, size); + + const int* P = 0; + const int* Pinv = 0; + bool ok = true; + + for (int k = 0; k < size; ++k) + { + /* compute nonzero pattern of kth row of L, in topological order */ + y[k] = 0.0; /* Y(0:k) is now all zero */ + int top = size; /* stack for pattern is empty */ + tags[k] = k; /* mark node k as visited */ + m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ + int kk = (P) ? (P[k]) : (k); /* kth original, or permuted, column */ + int p2 = Ap[kk+1]; + for (int p = Ap[kk]; p < p2; ++p) + { + int i = Pinv ? Pinv[Ai[p]] : Ai[p]; /* get A(i,k) */ + if (i <= k) + { + y[i] += Ax[p]; /* scatter A(i,k) into Y (sum duplicates) */ + int len; + for (len = 0; tags[i] != k; i = m_parent[i]) + { + pattern[len++] = i; /* L(k,i) is nonzero */ + tags[i] = k; /* mark i as visited */ + } + while (len > 0) + pattern[--top] = pattern[--len]; + } + } + /* compute numerical values kth row of L (a sparse triangular solve) */ + m_diag[k] = y[k]; /* get D(k,k) and clear Y(k) */ + y[k] = 0.0; + for (; top < size; ++top) + { + int i = pattern[top]; /* pattern[top:n-1] is pattern of L(:,k) */ + Scalar yi = y[i]; /* get and clear Y(i) */ + y[i] = 0.0; + int p2 = Lp[i] + m_nonZerosPerCol[i]; + int p; + for (p = Lp[i]; p < p2; ++p) + y[Li[p]] -= Lx[p] * yi; + Scalar l_ki = yi / m_diag[i]; /* the nonzero entry L(k,i) */ + m_diag[k] -= l_ki * yi; + Li[p] = k; /* store L(k,i) in column form of L */ + Lx[p] = l_ki; + ++m_nonZerosPerCol[i]; /* increment count of nonzeros in col i */ + } + if (m_diag[k] == 0.0) + { + ok = false; /* failure, D(k,k) is zero */ + break; + } + } + + ei_aligned_stack_delete(Scalar, y, size); + ei_aligned_stack_delete(int, pattern, size); + ei_aligned_stack_delete(int, tags, size); + + return ok; /* success, diagonal of D is all nonzero */ +} + +/** Computes b = L^-T L^-1 b */ +template +template +bool SparseLDLT::solveInPlace(MatrixBase &b) const +{ + const int size = m_matrix.rows(); + ei_assert(size==b.rows()); + if (!m_succeeded) + return false; + + if (m_matrix.nonZeros()>0) // otherwise L==I + m_matrix.solveTriangularInPlace(b); + b = b.cwise() / m_diag; + // FIXME should be .adjoint() but it fails to compile... + + if (m_matrix.nonZeros()>0) // otherwise L==I + m_matrix.transpose().solveTriangularInPlace(b); + + return true; +} + +#endif // EIGEN_SPARSELDLT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h b/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h new file mode 100644 index 00000000000..e7c314c2cad --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseLLT.h @@ -0,0 +1,205 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSELLT_H +#define EIGEN_SPARSELLT_H + +/** \ingroup Sparse_Module + * + * \class SparseLLT + * + * \brief LLT Cholesky decomposition of a sparse matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LLT Cholesky decomposition + * + * \sa class LLT, class LDLT + */ +template +class SparseLLT +{ + protected: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef SparseMatrix CholMatrixType; + + enum { + SupernodalFactorIsDirty = 0x10000, + MatrixLIsDirty = 0x20000 + }; + + public: + + /** Creates a dummy LLT factorization object with flags \a flags. */ + SparseLLT(int flags = 0) + : m_flags(flags), m_status(0) + { + m_precision = RealScalar(0.1) * Eigen::precision(); + } + + /** Creates a LLT object and compute the respective factorization of \a matrix using + * flags \a flags. */ + SparseLLT(const MatrixType& matrix, int flags = 0) + : m_matrix(matrix.rows(), matrix.cols()), m_flags(flags), m_status(0) + { + m_precision = RealScalar(0.1) * Eigen::precision(); + compute(matrix); + } + + /** Sets the relative threshold value used to prune zero coefficients during the decomposition. + * + * Setting a value greater than zero speeds up computation, and yields to an imcomplete + * factorization with fewer non zero coefficients. Such approximate factors are especially + * useful to initialize an iterative solver. + * + * \warning if precision is greater that zero, the LLT factorization is not guaranteed to succeed + * even if the matrix is positive definite. + * + * Note that the exact meaning of this parameter might depends on the actual + * backend. Moreover, not all backends support this feature. + * + * \sa precision() */ + void setPrecision(RealScalar v) { m_precision = v; } + + /** \returns the current precision. + * + * \sa setPrecision() */ + RealScalar precision() const { return m_precision; } + + /** Sets the flags. Possible values are: + * - CompleteFactorization + * - IncompleteFactorization + * - MemoryEfficient (hint to use the memory most efficient method offered by the backend) + * - SupernodalMultifrontal (implies a complete factorization if supported by the backend, + * overloads the MemoryEfficient flags) + * - SupernodalLeftLooking (implies a complete factorization if supported by the backend, + * overloads the MemoryEfficient flags) + * + * \sa flags() */ + void setFlags(int f) { m_flags = f; } + /** \returns the current flags */ + int flags() const { return m_flags; } + + /** Computes/re-computes the LLT factorization */ + void compute(const MatrixType& matrix); + + /** \returns the lower triangular matrix L */ + inline const CholMatrixType& matrixL(void) const { return m_matrix; } + + template + bool solveInPlace(MatrixBase &b) const; + + /** \returns true if the factorization succeeded */ + inline bool succeeded(void) const { return m_succeeded; } + + protected: + CholMatrixType m_matrix; + RealScalar m_precision; + int m_flags; + mutable int m_status; + bool m_succeeded; +}; + +/** Computes / recomputes the LLT decomposition of matrix \a a + * using the default algorithm. + */ +template +void SparseLLT::compute(const MatrixType& a) +{ + assert(a.rows()==a.cols()); + const int size = a.rows(); + m_matrix.resize(size, size); + + // allocate a temporary vector for accumulations + AmbiVector tempVector(size); + RealScalar density = a.nonZeros()/RealScalar(size*size); + + // TODO estimate the number of non zeros + m_matrix.startFill(a.nonZeros()*2); + for (int j = 0; j < size; ++j) + { + Scalar x = ei_real(a.coeff(j,j)); + + // TODO better estimate of the density ! + tempVector.init(density>0.001? IsDense : IsSparse); + tempVector.setBounds(j+1,size); + tempVector.setZero(); + // init with current matrix a + { + typename MatrixType::InnerIterator it(a,j); + ++it; // skip diagonal element + for (; it; ++it) + tempVector.coeffRef(it.index()) = it.value(); + } + for (int k=0; k::Iterator it(tempVector, m_precision*rx); it; ++it) + { + m_matrix.fill(it.index(), j) = it.value() * y; + } + } + m_matrix.endFill(); +} + +/** Computes b = L^-T L^-1 b */ +template +template +bool SparseLLT::solveInPlace(MatrixBase &b) const +{ + const int size = m_matrix.rows(); + ei_assert(size==b.rows()); + + m_matrix.solveTriangularInPlace(b); + // FIXME should be simply .adjoint() but it fails to compile... + if (NumTraits::IsComplex) + { + CholMatrixType aux = m_matrix.conjugate(); + aux.transpose().solveTriangularInPlace(b); + } + else + m_matrix.transpose().solveTriangularInPlace(b); + + return true; +} + +#endif // EIGEN_SPARSELLT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseLU.h b/extern/Eigen2/Eigen/src/Sparse/SparseLU.h new file mode 100644 index 00000000000..1425920509f --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseLU.h @@ -0,0 +1,148 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSELU_H +#define EIGEN_SPARSELU_H + +/** \ingroup Sparse_Module + * + * \class SparseLU + * + * \brief LU decomposition of a sparse matrix and associated features + * + * \param MatrixType the type of the matrix of which we are computing the LU factorization + * + * \sa class LU, class SparseLLT + */ +template +class SparseLU +{ + protected: + typedef typename MatrixType::Scalar Scalar; + typedef typename NumTraits::Real RealScalar; + typedef SparseMatrix LUMatrixType; + + enum { + MatrixLUIsDirty = 0x10000 + }; + + public: + + /** Creates a dummy LU factorization object with flags \a flags. */ + SparseLU(int flags = 0) + : m_flags(flags), m_status(0) + { + m_precision = RealScalar(0.1) * Eigen::precision(); + } + + /** Creates a LU object and compute the respective factorization of \a matrix using + * flags \a flags. */ + SparseLU(const MatrixType& matrix, int flags = 0) + : /*m_matrix(matrix.rows(), matrix.cols()),*/ m_flags(flags), m_status(0) + { + m_precision = RealScalar(0.1) * Eigen::precision(); + compute(matrix); + } + + /** Sets the relative threshold value used to prune zero coefficients during the decomposition. + * + * Setting a value greater than zero speeds up computation, and yields to an imcomplete + * factorization with fewer non zero coefficients. Such approximate factors are especially + * useful to initialize an iterative solver. + * + * Note that the exact meaning of this parameter might depends on the actual + * backend. Moreover, not all backends support this feature. + * + * \sa precision() */ + void setPrecision(RealScalar v) { m_precision = v; } + + /** \returns the current precision. + * + * \sa setPrecision() */ + RealScalar precision() const { return m_precision; } + + /** Sets the flags. Possible values are: + * - CompleteFactorization + * - IncompleteFactorization + * - MemoryEfficient + * - one of the ordering methods + * - etc... + * + * \sa flags() */ + void setFlags(int f) { m_flags = f; } + /** \returns the current flags */ + int flags() const { return m_flags; } + + void setOrderingMethod(int m) + { + ei_assert(m&~OrderingMask == 0 && m!=0 && "invalid ordering method"); + m_flags = m_flags&~OrderingMask | m&OrderingMask; + } + + int orderingMethod() const + { + return m_flags&OrderingMask; + } + + /** Computes/re-computes the LU factorization */ + void compute(const MatrixType& matrix); + + /** \returns the lower triangular matrix L */ + //inline const MatrixType& matrixL() const { return m_matrixL; } + + /** \returns the upper triangular matrix U */ + //inline const MatrixType& matrixU() const { return m_matrixU; } + + template + bool solve(const MatrixBase &b, MatrixBase* x) const; + + /** \returns true if the factorization succeeded */ + inline bool succeeded(void) const { return m_succeeded; } + + protected: + RealScalar m_precision; + int m_flags; + mutable int m_status; + bool m_succeeded; +}; + +/** Computes / recomputes the LU decomposition of matrix \a a + * using the default algorithm. + */ +template +void SparseLU::compute(const MatrixType& a) +{ + ei_assert(false && "not implemented yet"); +} + +/** Computes *x = U^-1 L^-1 b */ +template +template +bool SparseLU::solve(const MatrixBase &b, MatrixBase* x) const +{ + ei_assert(false && "not implemented yet"); + return false; +} + +#endif // EIGEN_SPARSELU_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h b/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h new file mode 100644 index 00000000000..3f09596bc64 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseMatrix.h @@ -0,0 +1,447 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEMATRIX_H +#define EIGEN_SPARSEMATRIX_H + +/** \class SparseMatrix + * + * \brief Sparse matrix + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * + * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. + * + */ +template +struct ei_traits > +{ + typedef _Scalar Scalar; + enum { + RowsAtCompileTime = Dynamic, + ColsAtCompileTime = Dynamic, + MaxRowsAtCompileTime = Dynamic, + MaxColsAtCompileTime = Dynamic, + Flags = SparseBit | _Flags, + CoeffReadCost = NumTraits::ReadCost, + SupportedAccessPatterns = InnerRandomAccessPattern + }; +}; + + + +template +class SparseMatrix + : public SparseMatrixBase > +{ + public: + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseMatrix) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=) + // FIXME: why are these operator already alvailable ??? + // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=) + // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=) + + typedef MappedSparseMatrix Map; + + protected: + + enum { IsRowMajor = Base::IsRowMajor }; + typedef SparseMatrix TransposedSparseMatrix; + + int m_outerSize; + int m_innerSize; + int* m_outerIndex; + CompressedStorage m_data; + + public: + + inline int rows() const { return IsRowMajor ? m_outerSize : m_innerSize; } + inline int cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } + + inline int innerSize() const { return m_innerSize; } + inline int outerSize() const { return m_outerSize; } + inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; } + + inline const Scalar* _valuePtr() const { return &m_data.value(0); } + inline Scalar* _valuePtr() { return &m_data.value(0); } + + inline const int* _innerIndexPtr() const { return &m_data.index(0); } + inline int* _innerIndexPtr() { return &m_data.index(0); } + + inline const int* _outerIndexPtr() const { return m_outerIndex; } + inline int* _outerIndexPtr() { return m_outerIndex; } + + inline Scalar coeff(int row, int col) const + { + const int outer = IsRowMajor ? row : col; + const int inner = IsRowMajor ? col : row; + return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner); + } + + inline Scalar& coeffRef(int row, int col) + { + const int outer = IsRowMajor ? row : col; + const int inner = IsRowMajor ? col : row; + + int start = m_outerIndex[outer]; + int end = m_outerIndex[outer+1]; + ei_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); + ei_assert(end>start && "coeffRef cannot be called on a zero coefficient"); + const int id = m_data.searchLowerIndex(start,end-1,inner); + ei_assert((id=0 && m_outerIndex[i]==0) + { + m_outerIndex[i] = m_data.size(); + --i; + } + m_outerIndex[outer+1] = m_outerIndex[outer]; + } + else + { + ei_assert(m_data.index(m_data.size()-1)=0 && m_outerIndex[i]==0) + { + m_outerIndex[i] = m_data.size(); + --i; + } + m_outerIndex[outer+1] = m_outerIndex[outer]; + } + assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "invalid outer index"); + size_t startId = m_outerIndex[outer]; + // FIXME let's make sure sizeof(long int) == sizeof(size_t) + size_t id = m_outerIndex[outer+1]; + ++m_outerIndex[outer+1]; + + float reallocRatio = 1; + if (m_data.allocatedSize() startId) && (m_data.index(id-1) > inner) ) + { + m_data.index(id) = m_data.index(id-1); + m_data.value(id) = m_data.value(id-1); + --id; + } + + m_data.index(id) = inner; + return (m_data.value(id) = 0); + } + + inline void endFill() + { + int size = m_data.size(); + int i = m_outerSize; + // find the last filled column + while (i>=0 && m_outerIndex[i]==0) + --i; + ++i; + while (i<=m_outerSize) + { + m_outerIndex[i] = size; + ++i; + } + } + + void prune(Scalar reference, RealScalar epsilon = precision()) + { + int k = 0; + for (int j=0; j + inline SparseMatrix(const SparseMatrixBase& other) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0) + { + *this = other.derived(); + } + + inline SparseMatrix(const SparseMatrix& other) + : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0) + { + *this = other.derived(); + } + + inline void swap(SparseMatrix& other) + { + //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n"); + std::swap(m_outerIndex, other.m_outerIndex); + std::swap(m_innerSize, other.m_innerSize); + std::swap(m_outerSize, other.m_outerSize); + m_data.swap(other.m_data); + } + + inline SparseMatrix& operator=(const SparseMatrix& other) + { +// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n"; + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else + { + resize(other.rows(), other.cols()); + memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(int)); + m_data = other.m_data; + } + return *this; + } + + template + inline SparseMatrix& operator=(const SparseMatrixBase& other) + { + const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + if (needToTranspose) + { + // two passes algorithm: + // 1 - compute the number of coeffs per dest inner vector + // 2 - do the actual copy/eval + // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed + //typedef typename ei_nested::type OtherCopy; + typedef typename ei_eval::type OtherCopy; + typedef typename ei_cleantype::type _OtherCopy; + OtherCopy otherCopy(other.derived()); + + resize(other.rows(), other.cols()); + Eigen::Map(m_outerIndex,outerSize()).setZero(); + // pass 1 + // FIXME the above copy could be merged with that pass + for (int j=0; j::operator=(other.derived()); + } + } + + friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) + { + EIGEN_DBG_SPARSE( + s << "Nonzero entries:\n"; + for (int i=0; i&>(m); + return s; + } + + /** Destructor */ + inline ~SparseMatrix() + { + delete[] m_outerIndex; + } +}; + +template +class SparseMatrix::InnerIterator +{ + public: + InnerIterator(const SparseMatrix& mat, int outer) + : m_matrix(mat), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_start(m_id), m_end(mat.m_outerIndex[outer+1]) + {} + + template + InnerIterator(const Flagged& mat, int outer) + : m_matrix(mat._expression()), m_outer(outer), m_id(m_matrix.m_outerIndex[outer]), + m_start(m_id), m_end(m_matrix.m_outerIndex[outer+1]) + {} + + inline InnerIterator& operator++() { m_id++; return *this; } + + inline Scalar value() const { return m_matrix.m_data.value(m_id); } + inline Scalar& valueRef() { return const_cast(m_matrix.m_data.value(m_id)); } + + inline int index() const { return m_matrix.m_data.index(m_id); } + inline int row() const { return IsRowMajor ? m_outer : index(); } + inline int col() const { return IsRowMajor ? index() : m_outer; } + + inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); } + + protected: + const SparseMatrix& m_matrix; + const int m_outer; + int m_id; + const int m_start; + const int m_end; +}; + +#endif // EIGEN_SPARSEMATRIX_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h b/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h new file mode 100644 index 00000000000..468bc9e227c --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseMatrixBase.h @@ -0,0 +1,626 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEMATRIXBASE_H +#define EIGEN_SPARSEMATRIXBASE_H + +template class SparseMatrixBase +{ + public: + + typedef typename ei_traits::Scalar Scalar; +// typedef typename Derived::InnerIterator InnerIterator; + + enum { + + RowsAtCompileTime = ei_traits::RowsAtCompileTime, + /**< The number of rows at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */ + + ColsAtCompileTime = ei_traits::ColsAtCompileTime, + /**< The number of columns at compile-time. This is just a copy of the value provided + * by the \a Derived type. If a value is not known at compile-time, + * it is set to the \a Dynamic constant. + * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */ + + + SizeAtCompileTime = (ei_size_at_compile_time::RowsAtCompileTime, + ei_traits::ColsAtCompileTime>::ret), + /**< This is equal to the number of coefficients, i.e. the number of + * rows times the number of columns, or to \a Dynamic if this is not + * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */ + + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + + MaxSizeAtCompileTime = (ei_size_at_compile_time::ret), + + IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1, + /**< This is set to true if either the number of rows or the number of + * columns is known at compile-time to be equal to 1. Indeed, in that case, + * we are dealing with a column-vector (if there is only one column) or with + * a row-vector (if there is only one row). */ + + Flags = ei_traits::Flags, + /**< This stores expression \ref flags flags which may or may not be inherited by new expressions + * constructed from this one. See the \ref flags "list of flags". + */ + + CoeffReadCost = ei_traits::CoeffReadCost, + /**< This is a rough measure of how expensive it is to read one coefficient from + * this expression. + */ + + IsRowMajor = Flags&RowMajorBit ? 1 : 0 + }; + + /** \internal the return type of MatrixBase::conjugate() */ + typedef typename ei_meta_if::IsComplex, + const SparseCwiseUnaryOp, Derived>, + const Derived& + >::ret ConjugateReturnType; + /** \internal the return type of MatrixBase::real() */ + typedef CwiseUnaryOp, Derived> RealReturnType; + /** \internal the return type of MatrixBase::imag() */ + typedef CwiseUnaryOp, Derived> ImagReturnType; + /** \internal the return type of MatrixBase::adjoint() */ + typedef SparseTranspose::type> /*>*/ + AdjointReturnType; + +#ifndef EIGEN_PARSED_BY_DOXYGEN + /** This is the "real scalar" type; if the \a Scalar type is already real numbers + * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If + * \a Scalar is \a std::complex then RealScalar is \a T. + * + * \sa class NumTraits + */ + typedef typename NumTraits::Real RealScalar; + + /** type of the equivalent square matrix */ + typedef Matrix SquareMatrixType; + + inline const Derived& derived() const { return *static_cast(this); } + inline Derived& derived() { return *static_cast(this); } + inline Derived& const_cast_derived() const + { return *static_cast(const_cast(this)); } +#endif // not EIGEN_PARSED_BY_DOXYGEN + + /** \returns the number of rows. \sa cols(), RowsAtCompileTime */ + inline int rows() const { return derived().rows(); } + /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/ + inline int cols() const { return derived().cols(); } + /** \returns the number of coefficients, which is \a rows()*cols(). + * \sa rows(), cols(), SizeAtCompileTime. */ + inline int size() const { return rows() * cols(); } + /** \returns the number of nonzero coefficients which is in practice the number + * of stored coefficients. */ + inline int nonZeros() const { return derived().nonZeros(); } + /** \returns true if either the number of rows or the number of columns is equal to 1. + * In other words, this function returns + * \code rows()==1 || cols()==1 \endcode + * \sa rows(), cols(), IsVectorAtCompileTime. */ + inline bool isVector() const { return rows()==1 || cols()==1; } + /** \returns the size of the storage major dimension, + * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */ + int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); } + /** \returns the size of the inner dimension according to the storage order, + * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */ + int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); } + + bool isRValue() const { return m_isRValue; } + Derived& markAsRValue() { m_isRValue = true; return derived(); } + + SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ } + + inline Derived& operator=(const Derived& other) + { +// std::cout << "Derived& operator=(const Derived& other)\n"; +// if (other.isRValue()) +// derived().swap(other.const_cast_derived()); +// else + this->operator=(other); + return derived(); + } + + + template + inline void assignGeneric(const OtherDerived& other) + { +// std::cout << "Derived& operator=(const MatrixBase& other)\n"; + //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + ei_assert(( ((ei_traits::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) || + (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) && + "the transpose operation is supposed to be handled in SparseMatrix::operator="); + + const int outerSize = other.outerSize(); + //typedef typename ei_meta_if, Derived>::ret TempType; + // thanks to shallow copies, we always eval to a tempary + Derived temp(other.rows(), other.cols()); + + temp.startFill(std::max(this->rows(),this->cols())*2); + for (int j=0; j + inline Derived& operator=(const SparseMatrixBase& other) + { +// std::cout << typeid(OtherDerived).name() << "\n"; +// std::cout << Flags << " " << OtherDerived::Flags << "\n"; + const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); +// std::cout << "eval transpose = " << transpose << "\n"; + const int outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols(); + if ((!transpose) && other.isRValue()) + { + // eval without temporary + derived().resize(other.rows(), other.cols()); + derived().startFill(std::max(this->rows(),this->cols())*2); + for (int j=0; j + inline Derived& operator=(const SparseProduct& product); + + friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m) + { + if (Flags&RowMajorBit) + { + for (int row=0; row trans = m.derived(); + s << trans; + } + } + return s; + } + + const SparseCwiseUnaryOp::Scalar>,Derived> operator-() const; + + template + const SparseCwiseBinaryOp::Scalar>, Derived, OtherDerived> + operator+(const SparseMatrixBase &other) const; + + template + const SparseCwiseBinaryOp::Scalar>, Derived, OtherDerived> + operator-(const SparseMatrixBase &other) const; + + template + Derived& operator+=(const SparseMatrixBase& other); + template + Derived& operator-=(const SparseMatrixBase& other); + +// template +// Derived& operator+=(const Flagged, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other); + + Derived& operator*=(const Scalar& other); + Derived& operator/=(const Scalar& other); + + const SparseCwiseUnaryOp::Scalar>, Derived> + operator*(const Scalar& scalar) const; + const SparseCwiseUnaryOp::Scalar>, Derived> + operator/(const Scalar& scalar) const; + + inline friend const SparseCwiseUnaryOp::Scalar>, Derived> + operator*(const Scalar& scalar, const SparseMatrixBase& matrix) + { return matrix*scalar; } + + + template + const typename SparseProductReturnType::Type + operator*(const SparseMatrixBase &other) const; + + // dense * sparse (return a dense object) + template friend + const typename SparseProductReturnType::Type + operator*(const MatrixBase& lhs, const Derived& rhs) + { return typename SparseProductReturnType::Type(lhs.derived(),rhs); } + + template + const typename SparseProductReturnType::Type + operator*(const MatrixBase &other) const; + + template + Derived& operator*=(const SparseMatrixBase& other); + + template + typename ei_plain_matrix_type_column_major::type + solveTriangular(const MatrixBase& other) const; + + template + void solveTriangularInPlace(MatrixBase& other) const; + + template Scalar dot(const MatrixBase& other) const; + template Scalar dot(const SparseMatrixBase& other) const; + RealScalar squaredNorm() const; + RealScalar norm() const; +// const PlainMatrixType normalized() const; +// void normalize(); + + SparseTranspose transpose() { return derived(); } + const SparseTranspose transpose() const { return derived(); } + // void transposeInPlace(); + const AdjointReturnType adjoint() const { return conjugate()/*.nestByValue()*/; } + + // sub-vector + SparseInnerVectorSet row(int i); + const SparseInnerVectorSet row(int i) const; + SparseInnerVectorSet col(int j); + const SparseInnerVectorSet col(int j) const; + SparseInnerVectorSet innerVector(int outer); + const SparseInnerVectorSet innerVector(int outer) const; + + // set of sub-vectors + SparseInnerVectorSet subrows(int start, int size); + const SparseInnerVectorSet subrows(int start, int size) const; + SparseInnerVectorSet subcols(int start, int size); + const SparseInnerVectorSet subcols(int start, int size) const; + SparseInnerVectorSet innerVectors(int outerStart, int outerSize); + const SparseInnerVectorSet innerVectors(int outerStart, int outerSize) const; + +// typename BlockReturnType::Type block(int startRow, int startCol, int blockRows, int blockCols); +// const typename BlockReturnType::Type +// block(int startRow, int startCol, int blockRows, int blockCols) const; +// +// typename BlockReturnType::SubVectorType segment(int start, int size); +// const typename BlockReturnType::SubVectorType segment(int start, int size) const; +// +// typename BlockReturnType::SubVectorType start(int size); +// const typename BlockReturnType::SubVectorType start(int size) const; +// +// typename BlockReturnType::SubVectorType end(int size); +// const typename BlockReturnType::SubVectorType end(int size) const; +// +// typename BlockReturnType::Type corner(CornerType type, int cRows, int cCols); +// const typename BlockReturnType::Type corner(CornerType type, int cRows, int cCols) const; +// +// template +// typename BlockReturnType::Type block(int startRow, int startCol); +// template +// const typename BlockReturnType::Type block(int startRow, int startCol) const; + +// template +// typename BlockReturnType::Type corner(CornerType type); +// template +// const typename BlockReturnType::Type corner(CornerType type) const; + +// template typename BlockReturnType::SubVectorType start(void); +// template const typename BlockReturnType::SubVectorType start() const; + +// template typename BlockReturnType::SubVectorType end(); +// template const typename BlockReturnType::SubVectorType end() const; + +// template typename BlockReturnType::SubVectorType segment(int start); +// template const typename BlockReturnType::SubVectorType segment(int start) const; + +// DiagonalCoeffs diagonal(); +// const DiagonalCoeffs diagonal() const; + +// template Part part(); +// template const Part part() const; + + +// static const ConstantReturnType Constant(int rows, int cols, const Scalar& value); +// static const ConstantReturnType Constant(int size, const Scalar& value); +// static const ConstantReturnType Constant(const Scalar& value); + +// template +// static const CwiseNullaryOp NullaryExpr(int rows, int cols, const CustomNullaryOp& func); +// template +// static const CwiseNullaryOp NullaryExpr(int size, const CustomNullaryOp& func); +// template +// static const CwiseNullaryOp NullaryExpr(const CustomNullaryOp& func); + +// static const ConstantReturnType Zero(int rows, int cols); +// static const ConstantReturnType Zero(int size); +// static const ConstantReturnType Zero(); +// static const ConstantReturnType Ones(int rows, int cols); +// static const ConstantReturnType Ones(int size); +// static const ConstantReturnType Ones(); +// static const IdentityReturnType Identity(); +// static const IdentityReturnType Identity(int rows, int cols); +// static const BasisReturnType Unit(int size, int i); +// static const BasisReturnType Unit(int i); +// static const BasisReturnType UnitX(); +// static const BasisReturnType UnitY(); +// static const BasisReturnType UnitZ(); +// static const BasisReturnType UnitW(); + +// const DiagonalMatrix asDiagonal() const; + +// Derived& setConstant(const Scalar& value); +// Derived& setZero(); +// Derived& setOnes(); +// Derived& setRandom(); +// Derived& setIdentity(); + + Matrix toDense() const + { + Matrix res(rows(),cols()); + res.setZero(); + for (int j=0; j + bool isApprox(const SparseMatrixBase& other, + RealScalar prec = precision()) const + { return toDense().isApprox(other.toDense(),prec); } + + template + bool isApprox(const MatrixBase& other, + RealScalar prec = precision()) const + { return toDense().isApprox(other,prec); } +// bool isMuchSmallerThan(const RealScalar& other, +// RealScalar prec = precision()) const; +// template +// bool isMuchSmallerThan(const MatrixBase& other, +// RealScalar prec = precision()) const; + +// bool isApproxToConstant(const Scalar& value, RealScalar prec = precision()) const; +// bool isZero(RealScalar prec = precision()) const; +// bool isOnes(RealScalar prec = precision()) const; +// bool isIdentity(RealScalar prec = precision()) const; +// bool isDiagonal(RealScalar prec = precision()) const; + +// bool isUpperTriangular(RealScalar prec = precision()) const; +// bool isLowerTriangular(RealScalar prec = precision()) const; + +// template +// bool isOrthogonal(const MatrixBase& other, +// RealScalar prec = precision()) const; +// bool isUnitary(RealScalar prec = precision()) const; + +// template +// inline bool operator==(const MatrixBase& other) const +// { return (cwise() == other).all(); } + +// template +// inline bool operator!=(const MatrixBase& other) const +// { return (cwise() != other).any(); } + + + template + const SparseCwiseUnaryOp::Scalar, NewType>, Derived> cast() const; + + /** \returns the matrix or vector obtained by evaluating this expression. + * + * Notice that in the case of a plain matrix or vector (not an expression) this function just returns + * a const reference, in order to avoid a useless copy. + */ + EIGEN_STRONG_INLINE const typename ei_eval::type eval() const + { return typename ei_eval::type(derived()); } + +// template +// void swap(const MatrixBase& other); + + template + const SparseFlagged marked() const; +// const Flagged lazy() const; + + /** \returns number of elements to skip to pass from one row (resp. column) to another + * for a row-major (resp. column-major) matrix. + * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data + * of the underlying matrix. + */ +// inline int stride(void) const { return derived().stride(); } + +// inline const NestByValue nestByValue() const; + + + ConjugateReturnType conjugate() const; + const RealReturnType real() const; + const ImagReturnType imag() const; + + template + const SparseCwiseUnaryOp unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const; + +// template +// const CwiseBinaryOp +// binaryExpr(const MatrixBase &other, const CustomBinaryOp& func = CustomBinaryOp()) const; + + + Scalar sum() const; +// Scalar trace() const; + +// typename ei_traits::Scalar minCoeff() const; +// typename ei_traits::Scalar maxCoeff() const; + +// typename ei_traits::Scalar minCoeff(int* row, int* col = 0) const; +// typename ei_traits::Scalar maxCoeff(int* row, int* col = 0) const; + +// template +// typename ei_result_of::Scalar)>::type +// redux(const BinaryOp& func) const; + +// template +// void visit(Visitor& func) const; + + + const SparseCwise cwise() const; + SparseCwise cwise(); + +// inline const WithFormat format(const IOFormat& fmt) const; + +/////////// Array module /////////// + /* + bool all(void) const; + bool any(void) const; + + const PartialRedux rowwise() const; + const PartialRedux colwise() const; + + static const CwiseNullaryOp,Derived> Random(int rows, int cols); + static const CwiseNullaryOp,Derived> Random(int size); + static const CwiseNullaryOp,Derived> Random(); + + template + const Select + select(const MatrixBase& thenMatrix, + const MatrixBase& elseMatrix) const; + + template + inline const Select > + select(const MatrixBase& thenMatrix, typename ThenDerived::Scalar elseScalar) const; + + template + inline const Select, ElseDerived > + select(typename ElseDerived::Scalar thenScalar, const MatrixBase& elseMatrix) const; + + template RealScalar lpNorm() const; + */ + + +// template +// Scalar dot(const MatrixBase& other) const +// { +// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived) +// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived) +// EIGEN_STATIC_ASSERT((ei_is_same_type::ret), +// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) +// +// ei_assert(derived().size() == other.size()); +// // short version, but the assembly looks more complicated because +// // of the CwiseBinaryOp iterator complexity +// // return res = (derived().cwise() * other.derived().conjugate()).sum(); +// +// // optimized, generic version +// typename Derived::InnerIterator i(derived(),0); +// typename OtherDerived::InnerIterator j(other.derived(),0); +// Scalar res = 0; +// while (i && j) +// { +// if (i.index()==j.index()) +// { +// // std::cerr << i.value() << " * " << j.value() << "\n"; +// res += i.value() * ei_conj(j.value()); +// ++i; ++j; +// } +// else if (i.index() +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEPRODUCT_H +#define EIGEN_SPARSEPRODUCT_H + +template struct ei_sparse_product_mode +{ + enum { + + value = ((Lhs::Flags&Diagonal)==Diagonal || (Rhs::Flags&Diagonal)==Diagonal) + ? DiagonalProduct + : (Rhs::Flags&Lhs::Flags&SparseBit)==SparseBit + ? SparseTimeSparseProduct + : (Lhs::Flags&SparseBit)==SparseBit + ? SparseTimeDenseProduct + : DenseTimeSparseProduct }; +}; + +template +struct SparseProductReturnType +{ + typedef const typename ei_nested::type LhsNested; + typedef const typename ei_nested::type RhsNested; + + typedef SparseProduct Type; +}; + +template +struct SparseProductReturnType +{ + typedef const typename ei_nested::type LhsNested; + typedef const typename ei_nested::type RhsNested; + + typedef SparseDiagonalProduct Type; +}; + +// sparse product return type specialization +template +struct SparseProductReturnType +{ + typedef typename ei_traits::Scalar Scalar; + enum { + LhsRowMajor = ei_traits::Flags & RowMajorBit, + RhsRowMajor = ei_traits::Flags & RowMajorBit, + TransposeRhs = (!LhsRowMajor) && RhsRowMajor, + TransposeLhs = LhsRowMajor && (!RhsRowMajor) + }; + + // FIXME if we transpose let's evaluate to a LinkedVectorMatrix since it is the + // type of the temporary to perform the transpose op + typedef typename ei_meta_if, + const typename ei_nested::type>::ret LhsNested; + + typedef typename ei_meta_if, + const typename ei_nested::type>::ret RhsNested; + + typedef SparseProduct Type; +}; + +template +struct ei_traits > +{ + // clean the nested types: + typedef typename ei_cleantype::type _LhsNested; + typedef typename ei_cleantype::type _RhsNested; + typedef typename _LhsNested::Scalar Scalar; + + enum { + LhsCoeffReadCost = _LhsNested::CoeffReadCost, + RhsCoeffReadCost = _RhsNested::CoeffReadCost, + LhsFlags = _LhsNested::Flags, + RhsFlags = _RhsNested::Flags, + + RowsAtCompileTime = _LhsNested::RowsAtCompileTime, + ColsAtCompileTime = _RhsNested::ColsAtCompileTime, + InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime), + + MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime, + MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime, + +// LhsIsRowMajor = (LhsFlags & RowMajorBit)==RowMajorBit, +// RhsIsRowMajor = (RhsFlags & RowMajorBit)==RowMajorBit, + + EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit), + ResultIsSparse = ProductMode==SparseTimeSparseProduct || ProductMode==DiagonalProduct, + + RemovedBits = ~( (EvalToRowMajor ? 0 : RowMajorBit) | (ResultIsSparse ? 0 : SparseBit) ), + + Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits) + | EvalBeforeAssigningBit + | EvalBeforeNestingBit, + + CoeffReadCost = Dynamic + }; + + typedef typename ei_meta_if >, + MatrixBase > >::ret Base; +}; + +template +class SparseProduct : ei_no_assignment_operator, + public ei_traits >::Base +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(SparseProduct) + + private: + + typedef typename ei_traits::_LhsNested _LhsNested; + typedef typename ei_traits::_RhsNested _RhsNested; + + public: + + template + EIGEN_STRONG_INLINE SparseProduct(const Lhs& lhs, const Rhs& rhs) + : m_lhs(lhs), m_rhs(rhs) + { + ei_assert(lhs.cols() == rhs.rows()); + + enum { + ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic + || _RhsNested::RowsAtCompileTime==Dynamic + || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime), + AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime, + SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested) + }; + // note to the lost user: + // * for a dot product use: v1.dot(v2) + // * for a coeff-wise product use: v1.cwise()*v2 + EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes), + INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS) + EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors), + INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION) + EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT) + } + + EIGEN_STRONG_INLINE int rows() const { return m_lhs.rows(); } + EIGEN_STRONG_INLINE int cols() const { return m_rhs.cols(); } + + EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; } + EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; } + + protected: + LhsNested m_lhs; + RhsNested m_rhs; +}; + +// perform a pseudo in-place sparse * sparse product assuming all matrices are col major +template +static void ei_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res) +{ + typedef typename ei_traits::type>::Scalar Scalar; + + // make sure to call innerSize/outerSize since we fake the storage order. + int rows = lhs.innerSize(); + int cols = rhs.outerSize(); + //int size = lhs.outerSize(); + ei_assert(lhs.outerSize() == rhs.innerSize()); + + // allocate a temporary buffer + AmbiVector tempVector(rows); + + // estimate the number of non zero entries + float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); + float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); + float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f); + + res.resize(rows, cols); + res.startFill(int(ratioRes*rows*cols)); + for (int j=0; j::Iterator it(tempVector); it; ++it) + if (ResultType::Flags&RowMajorBit) + res.fill(j,it.index()) = it.value(); + else + res.fill(it.index(), j) = it.value(); + } + res.endFill(); +} + +template::Flags&RowMajorBit, + int RhsStorageOrder = ei_traits::Flags&RowMajorBit, + int ResStorageOrder = ei_traits::Flags&RowMajorBit> +struct ei_sparse_product_selector; + +template +struct ei_sparse_product_selector +{ + typedef typename ei_traits::type>::Scalar Scalar; + + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + typename ei_cleantype::type _res(res.rows(), res.cols()); + ei_sparse_product_impl(lhs, rhs, _res); + res.swap(_res); + } +}; + +template +struct ei_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + // we need a col-major matrix to hold the result + typedef SparseMatrix SparseTemporaryType; + SparseTemporaryType _res(res.rows(), res.cols()); + ei_sparse_product_impl(lhs, rhs, _res); + res = _res; + } +}; + +template +struct ei_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + // let's transpose the product to get a column x column product + typename ei_cleantype::type _res(res.rows(), res.cols()); + ei_sparse_product_impl(rhs, lhs, _res); + res.swap(_res); + } +}; + +template +struct ei_sparse_product_selector +{ + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) + { + // let's transpose the product to get a column x column product + typedef SparseMatrix SparseTemporaryType; + SparseTemporaryType _res(res.cols(), res.rows()); + ei_sparse_product_impl(rhs, lhs, _res); + res = _res.transpose(); + } +}; + +// NOTE eventually let's transpose one argument even in this case since it might be expensive if +// the result is not dense. +// template +// struct ei_sparse_product_selector +// { +// static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) +// { +// // trivial product as lhs.row/rhs.col dot products +// // loop over the preferred order of the result +// } +// }; + +// NOTE the 2 others cases (col row *) must never occurs since they are caught +// by ProductReturnType which transform it to (col col *) by evaluating rhs. + + +// template +// template +// inline Derived& SparseMatrixBase::lazyAssign(const SparseProduct& product) +// { +// // std::cout << "sparse product to dense\n"; +// ei_sparse_product_selector< +// typename ei_cleantype::type, +// typename ei_cleantype::type, +// typename ei_cleantype::type>::run(product.lhs(),product.rhs(),derived()); +// return derived(); +// } + +// sparse = sparse * sparse +template +template +inline Derived& SparseMatrixBase::operator=(const SparseProduct& product) +{ + ei_sparse_product_selector< + typename ei_cleantype::type, + typename ei_cleantype::type, + Derived>::run(product.lhs(),product.rhs(),derived()); + return derived(); +} + +// dense = sparse * dense +// template +// template +// Derived& MatrixBase::lazyAssign(const SparseProduct& product) +// { +// typedef typename ei_cleantype::type _Lhs; +// typedef typename _Lhs::InnerIterator LhsInnerIterator; +// enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit }; +// derived().setZero(); +// for (int j=0; j +template +Derived& MatrixBase::lazyAssign(const SparseProduct& product) +{ + typedef typename ei_cleantype::type _Lhs; + typedef typename ei_cleantype::type _Rhs; + typedef typename _Lhs::InnerIterator LhsInnerIterator; + enum { + LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit, + LhsIsSelfAdjoint = (_Lhs::Flags&SelfAdjointBit)==SelfAdjointBit, + ProcessFirstHalf = LhsIsSelfAdjoint + && ( ((_Lhs::Flags&(UpperTriangularBit|LowerTriangularBit))==0) + || ( (_Lhs::Flags&UpperTriangularBit) && !LhsIsRowMajor) + || ( (_Lhs::Flags&LowerTriangularBit) && LhsIsRowMajor) ), + ProcessSecondHalf = LhsIsSelfAdjoint && (!ProcessFirstHalf) + }; + derived().setZero(); + for (int j=0; j res(derived().row(LhsIsRowMajor ? j : 0)); + for (; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i) + { + if (LhsIsSelfAdjoint) + { + int a = LhsIsRowMajor ? j : i.index(); + int b = LhsIsRowMajor ? i.index() : j; + Scalar v = i.value(); + derived().row(a) += (v) * product.rhs().row(b); + derived().row(b) += ei_conj(v) * product.rhs().row(a); + } + else if (LhsIsRowMajor) + res += i.value() * product.rhs().row(i.index()); + else + derived().row(i.index()) += i.value() * product.rhs().row(j); + } + if (ProcessFirstHalf && i && (i.index()==j)) + derived().row(j) += i.value() * product.rhs().row(j); + } + return derived(); +} + +// dense = dense * sparse +template +template +Derived& MatrixBase::lazyAssign(const SparseProduct& product) +{ + typedef typename ei_cleantype::type _Rhs; + typedef typename _Rhs::InnerIterator RhsInnerIterator; + enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit }; + derived().setZero(); + for (int j=0; j +template +EIGEN_STRONG_INLINE const typename SparseProductReturnType::Type +SparseMatrixBase::operator*(const SparseMatrixBase &other) const +{ + return typename SparseProductReturnType::Type(derived(), other.derived()); +} + +// sparse * dense +template +template +EIGEN_STRONG_INLINE const typename SparseProductReturnType::Type +SparseMatrixBase::operator*(const MatrixBase &other) const +{ + return typename SparseProductReturnType::Type(derived(), other.derived()); +} + +#endif // EIGEN_SPARSEPRODUCT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h b/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h new file mode 100644 index 00000000000..f0d3705488e --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseRedux.h @@ -0,0 +1,40 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEREDUX_H +#define EIGEN_SPARSEREDUX_H + +template +typename ei_traits::Scalar +SparseMatrixBase::sum() const +{ + ei_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix"); + Scalar res = 0; + for (int j=0; j +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSETRANSPOSE_H +#define EIGEN_SPARSETRANSPOSE_H + +template +struct ei_traits > : ei_traits > +{}; + +template class SparseTranspose + : public SparseMatrixBase > +{ + public: + + EIGEN_GENERIC_PUBLIC_INTERFACE(SparseTranspose) + + class InnerIterator; + class ReverseInnerIterator; + + inline SparseTranspose(const MatrixType& matrix) : m_matrix(matrix) {} + + //EIGEN_INHERIT_ASSIGNMENT_OPERATORS(SparseTranspose) + + inline int rows() const { return m_matrix.cols(); } + inline int cols() const { return m_matrix.rows(); } + inline int nonZeros() const { return m_matrix.nonZeros(); } + + // FIXME should be keep them ? + inline Scalar& coeffRef(int row, int col) + { return m_matrix.const_cast_derived().coeffRef(col, row); } + + inline const Scalar coeff(int row, int col) const + { return m_matrix.coeff(col, row); } + + inline const Scalar coeff(int index) const + { return m_matrix.coeff(index); } + + inline Scalar& coeffRef(int index) + { return m_matrix.const_cast_derived().coeffRef(index); } + + protected: + const typename MatrixType::Nested m_matrix; +}; + +template class SparseTranspose::InnerIterator : public MatrixType::InnerIterator +{ + public: + + EIGEN_STRONG_INLINE InnerIterator(const SparseTranspose& trans, int outer) + : MatrixType::InnerIterator(trans.m_matrix, outer) + {} +}; + +template class SparseTranspose::ReverseInnerIterator : public MatrixType::ReverseInnerIterator +{ + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTranspose& xpr, int outer) + : MatrixType::ReverseInnerIterator(xpr.m_matrix, outer) + {} +}; + +#endif // EIGEN_SPARSETRANSPOSE_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h b/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h new file mode 100644 index 00000000000..393cdda6ea2 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseUtil.h @@ -0,0 +1,148 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEUTIL_H +#define EIGEN_SPARSEUTIL_H + +#ifdef NDEBUG +#define EIGEN_DBG_SPARSE(X) +#else +#define EIGEN_DBG_SPARSE(X) X +#endif + +#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase& other) \ +{ \ + return Base::operator Op(other.derived()); \ +} \ +EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \ +{ \ + return Base::operator Op(other); \ +} + +#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \ +template \ +EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \ +{ \ + return Base::operator Op(scalar); \ +} + +#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \ +EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \ +EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \ +EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \ +EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \ +EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=) + +#define _EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \ +typedef BaseClass Base; \ +typedef typename Eigen::ei_traits::Scalar Scalar; \ +typedef typename Eigen::NumTraits::Real RealScalar; \ +typedef typename Eigen::ei_nested::type Nested; \ +enum { RowsAtCompileTime = Eigen::ei_traits::RowsAtCompileTime, \ + ColsAtCompileTime = Eigen::ei_traits::ColsAtCompileTime, \ + Flags = Eigen::ei_traits::Flags, \ + CoeffReadCost = Eigen::ei_traits::CoeffReadCost, \ + SizeAtCompileTime = Base::SizeAtCompileTime, \ + IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; + +#define EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived) \ +_EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase) + +enum SparseBackend { + DefaultBackend, + Taucs, + Cholmod, + SuperLU, + UmfPack +}; + +// solver flags +enum { + CompleteFactorization = 0x0000, // the default + IncompleteFactorization = 0x0001, + MemoryEfficient = 0x0002, + + // For LLT Cholesky: + SupernodalMultifrontal = 0x0010, + SupernodalLeftLooking = 0x0020, + + // Ordering methods: + NaturalOrdering = 0x0100, // the default + MinimumDegree_AT_PLUS_A = 0x0200, + MinimumDegree_ATA = 0x0300, + ColApproxMinimumDegree = 0x0400, + Metis = 0x0500, + Scotch = 0x0600, + Chaco = 0x0700, + OrderingMask = 0x0f00 +}; + +template class SparseMatrixBase; +template class SparseMatrix; +template class DynamicSparseMatrix; +template class SparseVector; +template class MappedSparseMatrix; + +template class SparseTranspose; +template class SparseInnerVectorSet; +template class SparseCwise; +template class SparseCwiseUnaryOp; +template class SparseCwiseBinaryOp; +template class SparseFlagged; +template class SparseDiagonalProduct; + +template struct ei_sparse_product_mode; +template::value> struct SparseProductReturnType; + +const int CoherentAccessPattern = 0x1; +const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern; +const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern; +const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern; + +// const int AccessPatternNotSupported = 0x0; +// const int AccessPatternSupported = 0x1; +// +// template struct ei_support_access_pattern +// { +// enum { ret = (int(ei_traits::SupportedAccessPatterns) & AccessPattern) == AccessPattern +// ? AccessPatternSupported +// : AccessPatternNotSupported +// }; +// }; + +template class ei_eval +{ + typedef typename ei_traits::Scalar _Scalar; + enum { + _Flags = ei_traits::Flags + }; + + public: + typedef SparseMatrix<_Scalar, _Flags> type; +}; + +#endif // EIGEN_SPARSEUTIL_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SparseVector.h b/extern/Eigen2/Eigen/src/Sparse/SparseVector.h new file mode 100644 index 00000000000..8e5a6efeda8 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SparseVector.h @@ -0,0 +1,365 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSEVECTOR_H +#define EIGEN_SPARSEVECTOR_H + +/** \class SparseVector + * + * \brief a sparse vector class + * + * \param _Scalar the scalar type, i.e. the type of the coefficients + * + * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme. + * + */ +template +struct ei_traits > +{ + typedef _Scalar Scalar; + enum { + IsColVector = _Flags & RowMajorBit ? 0 : 1, + + RowsAtCompileTime = IsColVector ? Dynamic : 1, + ColsAtCompileTime = IsColVector ? 1 : Dynamic, + MaxRowsAtCompileTime = RowsAtCompileTime, + MaxColsAtCompileTime = ColsAtCompileTime, + Flags = SparseBit | _Flags, + CoeffReadCost = NumTraits::ReadCost, + SupportedAccessPatterns = InnerRandomAccessPattern + }; +}; + +template +class SparseVector + : public SparseMatrixBase > +{ + public: + EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseVector) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) + EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) +// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =) + + protected: + public: + + typedef SparseMatrixBase SparseBase; + enum { IsColVector = ei_traits::IsColVector }; + + CompressedStorage m_data; + int m_size; + + CompressedStorage& _data() { return m_data; } + CompressedStorage& _data() const { return m_data; } + + public: + + EIGEN_STRONG_INLINE int rows() const { return IsColVector ? m_size : 1; } + EIGEN_STRONG_INLINE int cols() const { return IsColVector ? 1 : m_size; } + EIGEN_STRONG_INLINE int innerSize() const { return m_size; } + EIGEN_STRONG_INLINE int outerSize() const { return 1; } + EIGEN_STRONG_INLINE int innerNonZeros(int j) const { ei_assert(j==0); return m_size; } + + EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); } + EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); } + + EIGEN_STRONG_INLINE const int* _innerIndexPtr() const { return &m_data.index(0); } + EIGEN_STRONG_INLINE int* _innerIndexPtr() { return &m_data.index(0); } + + inline Scalar coeff(int row, int col) const + { + ei_assert((IsColVector ? col : row)==0); + return coeff(IsColVector ? row : col); + } + inline Scalar coeff(int i) const { return m_data.at(i); } + + inline Scalar& coeffRef(int row, int col) + { + ei_assert((IsColVector ? col : row)==0); + return coeff(IsColVector ? row : col); + } + + /** \returns a reference to the coefficient value at given index \a i + * This operation involes a log(rho*size) binary search. If the coefficient does not + * exist yet, then a sorted insertion into a sequential buffer is performed. + * + * This insertion might be very costly if the number of nonzeros above \a i is large. + */ + inline Scalar& coeffRef(int i) + { + return m_data.atWithInsertion(i); + } + + public: + + class InnerIterator; + + inline void setZero() { m_data.clear(); } + + /** \returns the number of non zero coefficients */ + inline int nonZeros() const { return m_data.size(); } + + /** + */ + inline void reserve(int reserveSize) { m_data.reserve(reserveSize); } + + inline void startFill(int reserve) + { + setZero(); + m_data.reserve(reserve); + } + + /** + */ + inline Scalar& fill(int r, int c) + { + ei_assert(r==0 || c==0); + return fill(IsColVector ? r : c); + } + + inline Scalar& fill(int i) + { + m_data.append(0, i); + return m_data.value(m_data.size()-1); + } + + inline Scalar& fillrand(int r, int c) + { + ei_assert(r==0 || c==0); + return fillrand(IsColVector ? r : c); + } + + /** Like fill() but with random coordinates. + */ + inline Scalar& fillrand(int i) + { + int startId = 0; + int id = m_data.size() - 1; + m_data.resize(id+2,1); + + while ( (id >= startId) && (m_data.index(id) > i) ) + { + m_data.index(id+1) = m_data.index(id); + m_data.value(id+1) = m_data.value(id); + --id; + } + m_data.index(id+1) = i; + m_data.value(id+1) = 0; + return m_data.value(id+1); + } + + inline void endFill() {} + + void prune(Scalar reference, RealScalar epsilon = precision()) + { + m_data.prune(reference,epsilon); + } + + void resize(int rows, int cols) + { + ei_assert(rows==1 || cols==1); + resize(IsColVector ? rows : cols); + } + + void resize(int newSize) + { + m_size = newSize; + m_data.clear(); + } + + void resizeNonZeros(int size) { m_data.resize(size); } + + inline SparseVector() : m_size(0) { resize(0); } + + inline SparseVector(int size) : m_size(0) { resize(size); } + + inline SparseVector(int rows, int cols) : m_size(0) { resize(rows,cols); } + + template + inline SparseVector(const MatrixBase& other) + : m_size(0) + { + *this = other.derived(); + } + + template + inline SparseVector(const SparseMatrixBase& other) + : m_size(0) + { + *this = other.derived(); + } + + inline SparseVector(const SparseVector& other) + : m_size(0) + { + *this = other.derived(); + } + + inline void swap(SparseVector& other) + { + std::swap(m_size, other.m_size); + m_data.swap(other.m_data); + } + + inline SparseVector& operator=(const SparseVector& other) + { + if (other.isRValue()) + { + swap(other.const_cast_derived()); + } + else + { + resize(other.size()); + m_data = other.m_data; + } + return *this; + } + + template + inline SparseVector& operator=(const SparseMatrixBase& other) + { + return Base::operator=(other); + } + +// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); +// if (needToTranspose) +// { +// // two passes algorithm: +// // 1 - compute the number of coeffs per dest inner vector +// // 2 - do the actual copy/eval +// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed +// typedef typename ei_nested::type OtherCopy; +// OtherCopy otherCopy(other.derived()); +// typedef typename ei_cleantype::type _OtherCopy; +// +// resize(other.rows(), other.cols()); +// Eigen::Map(m_outerIndex,outerSize()).setZero(); +// // pass 1 +// // FIXME the above copy could be merged with that pass +// for (int j=0; j::operator=(other.derived()); +// } +// } + + friend std::ostream & operator << (std::ostream & s, const SparseVector& m) + { + for (unsigned int i=0; i +class SparseVector::InnerIterator +{ + public: + InnerIterator(const SparseVector& vec, int outer=0) + : m_data(vec.m_data), m_id(0), m_end(m_data.size()) + { + ei_assert(outer==0); + } + + InnerIterator(const CompressedStorage& data) + : m_data(data), m_id(0), m_end(m_data.size()) + {} + + template + InnerIterator(const Flagged& vec, int outer) + : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size()) + {} + + inline InnerIterator& operator++() { m_id++; return *this; } + + inline Scalar value() const { return m_data.value(m_id); } + inline Scalar& valueRef() { return const_cast(m_data.value(m_id)); } + + inline int index() const { return m_data.index(m_id); } + inline int row() const { return IsColVector ? index() : 0; } + inline int col() const { return IsColVector ? 0 : index(); } + + inline operator bool() const { return (m_id < m_end); } + + protected: + const CompressedStorage& m_data; + int m_id; + const int m_end; +}; + +#endif // EIGEN_SPARSEVECTOR_H diff --git a/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h b/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h new file mode 100644 index 00000000000..3c9a4fcced6 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/SuperLUSupport.h @@ -0,0 +1,565 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SUPERLUSUPPORT_H +#define EIGEN_SUPERLUSUPPORT_H + +// declaration of gssvx taken from GMM++ +#define DECL_GSSVX(NAMESPACE,FNAME,FLOATTYPE,KEYTYPE) \ + inline float SuperLU_gssvx(superlu_options_t *options, SuperMatrix *A, \ + int *perm_c, int *perm_r, int *etree, char *equed, \ + FLOATTYPE *R, FLOATTYPE *C, SuperMatrix *L, \ + SuperMatrix *U, void *work, int lwork, \ + SuperMatrix *B, SuperMatrix *X, \ + FLOATTYPE *recip_pivot_growth, \ + FLOATTYPE *rcond, FLOATTYPE *ferr, FLOATTYPE *berr, \ + SuperLUStat_t *stats, int *info, KEYTYPE) { \ + using namespace NAMESPACE; \ + mem_usage_t mem_usage; \ + NAMESPACE::FNAME(options, A, perm_c, perm_r, etree, equed, R, C, L, \ + U, work, lwork, B, X, recip_pivot_growth, rcond, \ + ferr, berr, &mem_usage, stats, info); \ + return mem_usage.for_lu; /* bytes used by the factor storage */ \ + } + +DECL_GSSVX(SuperLU_S,sgssvx,float,float) +DECL_GSSVX(SuperLU_C,cgssvx,float,std::complex) +DECL_GSSVX(SuperLU_D,dgssvx,double,double) +DECL_GSSVX(SuperLU_Z,zgssvx,double,std::complex) + +template +struct SluMatrixMapHelper; + +/** \internal + * + * A wrapper class for SuperLU matrices. It supports only compressed sparse matrices + * and dense matrices. Supernodal and other fancy format are not supported by this wrapper. + * + * This wrapper class mainly aims to avoids the need of dynamic allocation of the storage structure. + */ +struct SluMatrix : SuperMatrix +{ + SluMatrix() + { + Store = &storage; + } + + SluMatrix(const SluMatrix& other) + : SuperMatrix(other) + { + Store = &storage; + storage = other.storage; + } + + SluMatrix& operator=(const SluMatrix& other) + { + SuperMatrix::operator=(static_cast(other)); + Store = &storage; + storage = other.storage; + return *this; + } + + struct + { + union {int nnz;int lda;}; + void *values; + int *innerInd; + int *outerInd; + } storage; + + void setStorageType(Stype_t t) + { + Stype = t; + if (t==SLU_NC || t==SLU_NR || t==SLU_DN) + Store = &storage; + else + { + ei_assert(false && "storage type not supported"); + Store = 0; + } + } + + template + void setScalarType() + { + if (ei_is_same_type::ret) + Dtype = SLU_S; + else if (ei_is_same_type::ret) + Dtype = SLU_D; + else if (ei_is_same_type >::ret) + Dtype = SLU_C; + else if (ei_is_same_type >::ret) + Dtype = SLU_Z; + else + { + ei_assert(false && "Scalar type not supported by SuperLU"); + } + } + + template + static SluMatrix Map(Matrix& mat) + { + typedef Matrix MatrixType; + ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); + SluMatrix res; + res.setStorageType(SLU_DN); + res.setScalarType(); + res.Mtype = SLU_GE; + + res.nrow = mat.rows(); + res.ncol = mat.cols(); + + res.storage.lda = mat.stride(); + res.storage.values = mat.data(); + return res; + } + + template + static SluMatrix Map(SparseMatrixBase& mat) + { + SluMatrix res; + if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) + { + res.setStorageType(SLU_NR); + res.nrow = mat.cols(); + res.ncol = mat.rows(); + } + else + { + res.setStorageType(SLU_NC); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + } + + res.Mtype = SLU_GE; + + res.storage.nnz = mat.nonZeros(); + res.storage.values = mat.derived()._valuePtr(); + res.storage.innerInd = mat.derived()._innerIndexPtr(); + res.storage.outerInd = mat.derived()._outerIndexPtr(); + + res.setScalarType(); + + // FIXME the following is not very accurate + if (MatrixType::Flags & UpperTriangular) + res.Mtype = SLU_TRU; + if (MatrixType::Flags & LowerTriangular) + res.Mtype = SLU_TRL; + if (MatrixType::Flags & SelfAdjoint) + ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU"); + return res; + } +}; + +template +struct SluMatrixMapHelper > +{ + typedef Matrix MatrixType; + static void run(MatrixType& mat, SluMatrix& res) + { + ei_assert( ((Options&RowMajor)!=RowMajor) && "row-major dense matrices is not supported by SuperLU"); + res.setStorageType(SLU_DN); + res.setScalarType(); + res.Mtype = SLU_GE; + + res.nrow = mat.rows(); + res.ncol = mat.cols(); + + res.storage.lda = mat.stride(); + res.storage.values = mat.data(); + } +}; + +template +struct SluMatrixMapHelper > +{ + typedef Derived MatrixType; + static void run(MatrixType& mat, SluMatrix& res) + { + if ((MatrixType::Flags&RowMajorBit)==RowMajorBit) + { + res.setStorageType(SLU_NR); + res.nrow = mat.cols(); + res.ncol = mat.rows(); + } + else + { + res.setStorageType(SLU_NC); + res.nrow = mat.rows(); + res.ncol = mat.cols(); + } + + res.Mtype = SLU_GE; + + res.storage.nnz = mat.nonZeros(); + res.storage.values = mat._valuePtr(); + res.storage.innerInd = mat._innerIndexPtr(); + res.storage.outerInd = mat._outerIndexPtr(); + + res.setScalarType(); + + // FIXME the following is not very accurate + if (MatrixType::Flags & UpperTriangular) + res.Mtype = SLU_TRU; + if (MatrixType::Flags & LowerTriangular) + res.Mtype = SLU_TRL; + if (MatrixType::Flags & SelfAdjoint) + ei_assert(false && "SelfAdjoint matrix shape not supported by SuperLU"); + } +}; + +template +SluMatrix SparseMatrixBase::asSluMatrix() +{ + return SluMatrix::Map(derived()); +} + +/** View a Super LU matrix as an Eigen expression */ +template +MappedSparseMatrix::MappedSparseMatrix(SluMatrix& sluMat) +{ + if ((Flags&RowMajorBit)==RowMajorBit) + { + assert(sluMat.Stype == SLU_NR); + m_innerSize = sluMat.ncol; + m_outerSize = sluMat.nrow; + } + else + { + assert(sluMat.Stype == SLU_NC); + m_innerSize = sluMat.nrow; + m_outerSize = sluMat.ncol; + } + m_outerIndex = sluMat.storage.outerInd; + m_innerIndices = sluMat.storage.innerInd; + m_values = reinterpret_cast(sluMat.storage.values); + m_nnz = sluMat.storage.outerInd[m_outerSize]; +} + +template +class SparseLU : public SparseLU +{ + protected: + typedef SparseLU Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + typedef Matrix Vector; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef SparseMatrix LMatrixType; + typedef SparseMatrix UMatrixType; + using Base::m_flags; + using Base::m_status; + + public: + + SparseLU(int flags = NaturalOrdering) + : Base(flags) + { + } + + SparseLU(const MatrixType& matrix, int flags = NaturalOrdering) + : Base(flags) + { + compute(matrix); + } + + ~SparseLU() + { + } + + inline const LMatrixType& matrixL() const + { + if (m_extractedDataAreDirty) extractData(); + return m_l; + } + + inline const UMatrixType& matrixU() const + { + if (m_extractedDataAreDirty) extractData(); + return m_u; + } + + inline const IntColVectorType& permutationP() const + { + if (m_extractedDataAreDirty) extractData(); + return m_p; + } + + inline const IntRowVectorType& permutationQ() const + { + if (m_extractedDataAreDirty) extractData(); + return m_q; + } + + Scalar determinant() const; + + template + bool solve(const MatrixBase &b, MatrixBase* x) const; + + void compute(const MatrixType& matrix); + + protected: + + void extractData() const; + + protected: + // cached data to reduce reallocation, etc. + mutable LMatrixType m_l; + mutable UMatrixType m_u; + mutable IntColVectorType m_p; + mutable IntRowVectorType m_q; + + mutable SparseMatrix m_matrix; + mutable SluMatrix m_sluA; + mutable SuperMatrix m_sluL, m_sluU; + mutable SluMatrix m_sluB, m_sluX; + mutable SuperLUStat_t m_sluStat; + mutable superlu_options_t m_sluOptions; + mutable std::vector m_sluEtree; + mutable std::vector m_sluRscale, m_sluCscale; + mutable std::vector m_sluFerr, m_sluBerr; + mutable char m_sluEqued; + mutable bool m_extractedDataAreDirty; +}; + +template +void SparseLU::compute(const MatrixType& a) +{ + const int size = a.rows(); + m_matrix = a; + + set_default_options(&m_sluOptions); + m_sluOptions.ColPerm = NATURAL; + m_sluOptions.PrintStat = NO; + m_sluOptions.ConditionNumber = NO; + m_sluOptions.Trans = NOTRANS; + // m_sluOptions.Equil = NO; + + switch (Base::orderingMethod()) + { + case NaturalOrdering : m_sluOptions.ColPerm = NATURAL; break; + case MinimumDegree_AT_PLUS_A : m_sluOptions.ColPerm = MMD_AT_PLUS_A; break; + case MinimumDegree_ATA : m_sluOptions.ColPerm = MMD_ATA; break; + case ColApproxMinimumDegree : m_sluOptions.ColPerm = COLAMD; break; + default: + std::cerr << "Eigen: ordering method \"" << Base::orderingMethod() << "\" not supported by the SuperLU backend\n"; + m_sluOptions.ColPerm = NATURAL; + }; + + m_sluA = m_matrix.asSluMatrix(); + memset(&m_sluL,0,sizeof m_sluL); + memset(&m_sluU,0,sizeof m_sluU); + m_sluEqued = 'B'; + int info = 0; + + m_p.resize(size); + m_q.resize(size); + m_sluRscale.resize(size); + m_sluCscale.resize(size); + m_sluEtree.resize(size); + + RealScalar recip_pivot_gross, rcond; + RealScalar ferr, berr; + + // set empty B and X + m_sluB.setStorageType(SLU_DN); + m_sluB.setScalarType(); + m_sluB.Mtype = SLU_GE; + m_sluB.storage.values = 0; + m_sluB.nrow = m_sluB.ncol = 0; + m_sluB.storage.lda = size; + m_sluX = m_sluB; + + StatInit(&m_sluStat); + SuperLU_gssvx(&m_sluOptions, &m_sluA, m_q.data(), m_p.data(), &m_sluEtree[0], + &m_sluEqued, &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_gross, &rcond, + &ferr, &berr, + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + m_extractedDataAreDirty = true; + + // FIXME how to better check for errors ??? + Base::m_succeeded = (info == 0); +} + +template +template +bool SparseLU::solve(const MatrixBase &b, MatrixBase *x) const +{ + const int size = m_matrix.rows(); + const int rhsCols = b.cols(); + ei_assert(size==b.rows()); + + m_sluOptions.Fact = FACTORED; + m_sluOptions.IterRefine = NOREFINE; + + m_sluFerr.resize(rhsCols); + m_sluBerr.resize(rhsCols); + m_sluB = SluMatrix::Map(b.const_cast_derived()); + m_sluX = SluMatrix::Map(x->derived()); + + StatInit(&m_sluStat); + int info = 0; + RealScalar recip_pivot_gross, rcond; + SuperLU_gssvx( + &m_sluOptions, &m_sluA, + m_q.data(), m_p.data(), + &m_sluEtree[0], &m_sluEqued, + &m_sluRscale[0], &m_sluCscale[0], + &m_sluL, &m_sluU, + NULL, 0, + &m_sluB, &m_sluX, + &recip_pivot_gross, &rcond, + &m_sluFerr[0], &m_sluBerr[0], + &m_sluStat, &info, Scalar()); + StatFree(&m_sluStat); + + return info==0; +} + +// +// the code of this extractData() function has been adapted from the SuperLU's Matlab support code, +// +// Copyright (c) 1994 by Xerox Corporation. All rights reserved. +// +// THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY +// EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. +// +template +void SparseLU::extractData() const +{ + if (m_extractedDataAreDirty) + { + int upper; + int fsupc, istart, nsupr; + int lastl = 0, lastu = 0; + SCformat *Lstore = static_cast(m_sluL.Store); + NCformat *Ustore = static_cast(m_sluU.Store); + Scalar *SNptr; + + const int size = m_matrix.rows(); + m_l.resize(size,size); + m_l.resizeNonZeros(Lstore->nnz); + m_u.resize(size,size); + m_u.resizeNonZeros(Ustore->nnz); + + int* Lcol = m_l._outerIndexPtr(); + int* Lrow = m_l._innerIndexPtr(); + Scalar* Lval = m_l._valuePtr(); + + int* Ucol = m_u._outerIndexPtr(); + int* Urow = m_u._innerIndexPtr(); + Scalar* Uval = m_u._valuePtr(); + + Ucol[0] = 0; + Ucol[0] = 0; + + /* for each supernode */ + for (int k = 0; k <= Lstore->nsuper; ++k) + { + fsupc = L_FST_SUPC(k); + istart = L_SUB_START(fsupc); + nsupr = L_SUB_START(fsupc+1) - istart; + upper = 1; + + /* for each column in the supernode */ + for (int j = fsupc; j < L_FST_SUPC(k+1); ++j) + { + SNptr = &((Scalar*)Lstore->nzval)[L_NZ_START(j)]; + + /* Extract U */ + for (int i = U_NZ_START(j); i < U_NZ_START(j+1); ++i) + { + Uval[lastu] = ((Scalar*)Ustore->nzval)[i]; + /* Matlab doesn't like explicit zero. */ + if (Uval[lastu] != 0.0) + Urow[lastu++] = U_SUB(i); + } + for (int i = 0; i < upper; ++i) + { + /* upper triangle in the supernode */ + Uval[lastu] = SNptr[i]; + /* Matlab doesn't like explicit zero. */ + if (Uval[lastu] != 0.0) + Urow[lastu++] = L_SUB(istart+i); + } + Ucol[j+1] = lastu; + + /* Extract L */ + Lval[lastl] = 1.0; /* unit diagonal */ + Lrow[lastl++] = L_SUB(istart + upper - 1); + for (int i = upper; i < nsupr; ++i) + { + Lval[lastl] = SNptr[i]; + /* Matlab doesn't like explicit zero. */ + if (Lval[lastl] != 0.0) + Lrow[lastl++] = L_SUB(istart+i); + } + Lcol[j+1] = lastl; + + ++upper; + } /* for j ... */ + + } /* for k ... */ + + // squeeze the matrices : + m_l.resizeNonZeros(lastl); + m_u.resizeNonZeros(lastu); + + m_extractedDataAreDirty = false; + } +} + +template +typename SparseLU::Scalar SparseLU::determinant() const +{ + if (m_extractedDataAreDirty) + extractData(); + + // TODO this code coule be moved to the default/base backend + // FIXME perhaps we have to take into account the scale factors m_sluRscale and m_sluCscale ??? + Scalar det = Scalar(1); + for (int j=0; j 0) + { + int lastId = m_u._outerIndexPtr()[j+1]-1; + ei_assert(m_u._innerIndexPtr()[lastId]<=j); + if (m_u._innerIndexPtr()[lastId]==j) + { + det *= m_u._valuePtr()[lastId]; + } + } + // std::cout << m_sluRscale[j] << " " << m_sluCscale[j] << " "; + } + return det; +} + +#endif // EIGEN_SUPERLUSUPPORT_H diff --git a/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h b/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h new file mode 100644 index 00000000000..4dddca7b622 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/TaucsSupport.h @@ -0,0 +1,210 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_TAUCSSUPPORT_H +#define EIGEN_TAUCSSUPPORT_H + +template +taucs_ccs_matrix SparseMatrixBase::asTaucsMatrix() +{ + taucs_ccs_matrix res; + res.n = cols(); + res.m = rows(); + res.flags = 0; + res.colptr = derived()._outerIndexPtr(); + res.rowind = derived()._innerIndexPtr(); + res.values.v = derived()._valuePtr(); + if (ei_is_same_type::ret) + res.flags |= TAUCS_INT; + else if (ei_is_same_type::ret) + res.flags |= TAUCS_SINGLE; + else if (ei_is_same_type::ret) + res.flags |= TAUCS_DOUBLE; + else if (ei_is_same_type >::ret) + res.flags |= TAUCS_SCOMPLEX; + else if (ei_is_same_type >::ret) + res.flags |= TAUCS_DCOMPLEX; + else + { + ei_assert(false && "Scalar type not supported by TAUCS"); + } + + if (Flags & UpperTriangular) + res.flags |= TAUCS_UPPER; + if (Flags & LowerTriangular) + res.flags |= TAUCS_LOWER; + if (Flags & SelfAdjoint) + res.flags |= (NumTraits::IsComplex ? TAUCS_HERMITIAN : TAUCS_SYMMETRIC); + else if ((Flags & UpperTriangular) || (Flags & LowerTriangular)) + res.flags |= TAUCS_TRIANGULAR; + + return res; +} + +template +MappedSparseMatrix::MappedSparseMatrix(taucs_ccs_matrix& taucsMat) +{ + m_innerSize = taucsMat.m; + m_outerSize = taucsMat.n; + m_outerIndex = taucsMat.colptr; + m_innerIndices = taucsMat.rowind; + m_values = reinterpret_cast(taucsMat.values.v); + m_nnz = taucsMat.colptr[taucsMat.n]; +} + +template +class SparseLLT : public SparseLLT +{ + protected: + typedef SparseLLT Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + using Base::MatrixLIsDirty; + using Base::SupernodalFactorIsDirty; + using Base::m_flags; + using Base::m_matrix; + using Base::m_status; + + public: + + SparseLLT(int flags = 0) + : Base(flags), m_taucsSupernodalFactor(0) + { + } + + SparseLLT(const MatrixType& matrix, int flags = 0) + : Base(flags), m_taucsSupernodalFactor(0) + { + compute(matrix); + } + + ~SparseLLT() + { + if (m_taucsSupernodalFactor) + taucs_supernodal_factor_free(m_taucsSupernodalFactor); + } + + inline const typename Base::CholMatrixType& matrixL(void) const; + + template + void solveInPlace(MatrixBase &b) const; + + void compute(const MatrixType& matrix); + + protected: + void* m_taucsSupernodalFactor; +}; + +template +void SparseLLT::compute(const MatrixType& a) +{ + if (m_taucsSupernodalFactor) + { + taucs_supernodal_factor_free(m_taucsSupernodalFactor); + m_taucsSupernodalFactor = 0; + } + + if (m_flags & IncompleteFactorization) + { + taucs_ccs_matrix taucsMatA = const_cast(a).asTaucsMatrix(); + taucs_ccs_matrix* taucsRes = taucs_ccs_factor_llt(&taucsMatA, Base::m_precision, 0); + // the matrix returned by Taucs is not necessarily sorted, + // so let's copy it in two steps + DynamicSparseMatrix tmp = MappedSparseMatrix(*taucsRes); + m_matrix = tmp; + free(taucsRes); + m_status = (m_status & ~(CompleteFactorization|MatrixLIsDirty)) + | IncompleteFactorization + | SupernodalFactorIsDirty; + } + else + { + taucs_ccs_matrix taucsMatA = const_cast(a).asTaucsMatrix(); + if ( (m_flags & SupernodalLeftLooking) + || ((!(m_flags & SupernodalMultifrontal)) && (m_flags & MemoryEfficient)) ) + { + m_taucsSupernodalFactor = taucs_ccs_factor_llt_ll(&taucsMatA); + } + else + { + // use the faster Multifrontal routine + m_taucsSupernodalFactor = taucs_ccs_factor_llt_mf(&taucsMatA); + } + m_status = (m_status & ~IncompleteFactorization) | CompleteFactorization | MatrixLIsDirty; + } +} + +template +inline const typename SparseLLT::CholMatrixType& +SparseLLT::matrixL() const +{ + if (m_status & MatrixLIsDirty) + { + ei_assert(!(m_status & SupernodalFactorIsDirty)); + + taucs_ccs_matrix* taucsL = taucs_supernodal_factor_to_ccs(m_taucsSupernodalFactor); + + // the matrix returned by Taucs is not necessarily sorted, + // so let's copy it in two steps + DynamicSparseMatrix tmp = MappedSparseMatrix(*taucsL); + const_cast(m_matrix) = tmp; + free(taucsL); + m_status = (m_status & ~MatrixLIsDirty); + } + return m_matrix; +} + +template +template +void SparseLLT::solveInPlace(MatrixBase &b) const +{ + bool inputIsCompatibleWithTaucs = (Derived::Flags&RowMajorBit)==0; + + if (!inputIsCompatibleWithTaucs) + { + matrixL(); + Base::solveInPlace(b); + } + else if (m_flags & IncompleteFactorization) + { + taucs_ccs_matrix taucsLLT = const_cast(m_matrix).asTaucsMatrix(); + typename ei_plain_matrix_type::type x(b.rows()); + for (int j=0; j::type x(b.rows()); + for (int j=0; j +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_SPARSETRIANGULARSOLVER_H +#define EIGEN_SPARSETRIANGULARSOLVER_H + +// forward substitution, row-major +template +struct ei_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + static void run(const Lhs& lhs, Rhs& other) + { + for(int col=0 ; col +struct ei_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + static void run(const Lhs& lhs, Rhs& other) + { + for(int col=0 ; col=0 ; --i) + { + Scalar tmp = other.coeff(i,col); + typename Lhs::InnerIterator it(lhs, i); + if (it.index() == i) + ++it; + for(; it; ++it) + { + tmp -= it.value() * other.coeff(it.index(),col); + } + + if (Lhs::Flags & UnitDiagBit) + other.coeffRef(i,col) = tmp; + else + { + typename Lhs::InnerIterator it(lhs, i); + ei_assert(it.index() == i); + other.coeffRef(i,col) = tmp/it.value(); + } + } + } + } +}; + +// forward substitution, col-major +template +struct ei_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + static void run(const Lhs& lhs, Rhs& other) + { + for(int col=0 ; col +struct ei_solve_triangular_selector +{ + typedef typename Rhs::Scalar Scalar; + static void run(const Lhs& lhs, Rhs& other) + { + for(int col=0 ; col=0; --i) + { + if(!(Lhs::Flags & UnitDiagBit)) + { + // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the + // last element of the column ! + other.coeffRef(i,col) /= lhs.coeff(i,i); + } + Scalar tmp = other.coeffRef(i,col); + typename Lhs::InnerIterator it(lhs, i); + for(; it && it.index() +template +void SparseMatrixBase::solveTriangularInPlace(MatrixBase& other) const +{ + ei_assert(derived().cols() == derived().rows()); + ei_assert(derived().cols() == other.rows()); + ei_assert(!(Flags & ZeroDiagBit)); + ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit)); + + enum { copy = ei_traits::Flags & RowMajorBit }; + + typedef typename ei_meta_if::type, OtherDerived&>::ret OtherCopy; + OtherCopy otherCopy(other.derived()); + + ei_solve_triangular_selector::type>::run(derived(), otherCopy); + + if (copy) + other = otherCopy; +} + +template +template +typename ei_plain_matrix_type_column_major::type +SparseMatrixBase::solveTriangular(const MatrixBase& other) const +{ + typename ei_plain_matrix_type_column_major::type res(other); + solveTriangularInPlace(res); + return res; +} + +#endif // EIGEN_SPARSETRIANGULARSOLVER_H diff --git a/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h b/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h new file mode 100644 index 00000000000..b76ffb25248 --- /dev/null +++ b/extern/Eigen2/Eigen/src/Sparse/UmfPackSupport.h @@ -0,0 +1,289 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. Eigen itself is part of the KDE project. +// +// Copyright (C) 2008-2009 Gael Guennebaud +// +// Eigen is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 3 of the License, or (at your option) any later version. +// +// Alternatively, you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of +// the License, or (at your option) any later version. +// +// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY +// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public +// License and a copy of the GNU General Public License along with +// Eigen. If not, see . + +#ifndef EIGEN_UMFPACKSUPPORT_H +#define EIGEN_UMFPACKSUPPORT_H + +/* TODO extract L, extract U, compute det, etc... */ + +// generic double/complex wrapper functions: + +inline void umfpack_free_numeric(void **Numeric, double) +{ umfpack_di_free_numeric(Numeric); } + +inline void umfpack_free_numeric(void **Numeric, std::complex) +{ umfpack_zi_free_numeric(Numeric); } + +inline void umfpack_free_symbolic(void **Symbolic, double) +{ umfpack_di_free_symbolic(Symbolic); } + +inline void umfpack_free_symbolic(void **Symbolic, std::complex) +{ umfpack_zi_free_symbolic(Symbolic); } + +inline int umfpack_symbolic(int n_row,int n_col, + const int Ap[], const int Ai[], const double Ax[], void **Symbolic, + const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) +{ + return umfpack_di_symbolic(n_row,n_col,Ap,Ai,Ax,Symbolic,Control,Info); +} + +inline int umfpack_symbolic(int n_row,int n_col, + const int Ap[], const int Ai[], const std::complex Ax[], void **Symbolic, + const double Control [UMFPACK_CONTROL], double Info [UMFPACK_INFO]) +{ + return umfpack_zi_symbolic(n_row,n_col,Ap,Ai,&Ax[0].real(),0,Symbolic,Control,Info); +} + +inline int umfpack_numeric( const int Ap[], const int Ai[], const double Ax[], + void *Symbolic, void **Numeric, + const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) +{ + return umfpack_di_numeric(Ap,Ai,Ax,Symbolic,Numeric,Control,Info); +} + +inline int umfpack_numeric( const int Ap[], const int Ai[], const std::complex Ax[], + void *Symbolic, void **Numeric, + const double Control[UMFPACK_CONTROL],double Info [UMFPACK_INFO]) +{ + return umfpack_zi_numeric(Ap,Ai,&Ax[0].real(),0,Symbolic,Numeric,Control,Info); +} + +inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const double Ax[], + double X[], const double B[], void *Numeric, + const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) +{ + return umfpack_di_solve(sys,Ap,Ai,Ax,X,B,Numeric,Control,Info); +} + +inline int umfpack_solve( int sys, const int Ap[], const int Ai[], const std::complex Ax[], + std::complex X[], const std::complex B[], void *Numeric, + const double Control[UMFPACK_CONTROL], double Info[UMFPACK_INFO]) +{ + return umfpack_zi_solve(sys,Ap,Ai,&Ax[0].real(),0,&X[0].real(),0,&B[0].real(),0,Numeric,Control,Info); +} + +inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, double) +{ + return umfpack_di_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); +} + +inline int umfpack_get_lunz(int *lnz, int *unz, int *n_row, int *n_col, int *nz_udiag, void *Numeric, std::complex) +{ + return umfpack_zi_get_lunz(lnz,unz,n_row,n_col,nz_udiag,Numeric); +} + +inline int umfpack_get_numeric(int Lp[], int Lj[], double Lx[], int Up[], int Ui[], double Ux[], + int P[], int Q[], double Dx[], int *do_recip, double Rs[], void *Numeric) +{ + return umfpack_di_get_numeric(Lp,Lj,Lx,Up,Ui,Ux,P,Q,Dx,do_recip,Rs,Numeric); +} + +inline int umfpack_get_numeric(int Lp[], int Lj[], std::complex Lx[], int Up[], int Ui[], std::complex Ux[], + int P[], int Q[], std::complex Dx[], int *do_recip, double Rs[], void *Numeric) +{ + return umfpack_zi_get_numeric(Lp,Lj,Lx?&Lx[0].real():0,0,Up,Ui,Ux?&Ux[0].real():0,0,P,Q, + Dx?&Dx[0].real():0,0,do_recip,Rs,Numeric); +} + +inline int umfpack_get_determinant(double *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) +{ + return umfpack_di_get_determinant(Mx,Ex,NumericHandle,User_Info); +} + +inline int umfpack_get_determinant(std::complex *Mx, double *Ex, void *NumericHandle, double User_Info [UMFPACK_INFO]) +{ + return umfpack_zi_get_determinant(&Mx->real(),0,Ex,NumericHandle,User_Info); +} + + +template +class SparseLU : public SparseLU +{ + protected: + typedef SparseLU Base; + typedef typename Base::Scalar Scalar; + typedef typename Base::RealScalar RealScalar; + typedef Matrix Vector; + typedef Matrix IntRowVectorType; + typedef Matrix IntColVectorType; + typedef SparseMatrix LMatrixType; + typedef SparseMatrix UMatrixType; + using Base::m_flags; + using Base::m_status; + + public: + + SparseLU(int flags = NaturalOrdering) + : Base(flags), m_numeric(0) + { + } + + SparseLU(const MatrixType& matrix, int flags = NaturalOrdering) + : Base(flags), m_numeric(0) + { + compute(matrix); + } + + ~SparseLU() + { + if (m_numeric) + umfpack_free_numeric(&m_numeric,Scalar()); + } + + inline const LMatrixType& matrixL() const + { + if (m_extractedDataAreDirty) extractData(); + return m_l; + } + + inline const UMatrixType& matrixU() const + { + if (m_extractedDataAreDirty) extractData(); + return m_u; + } + + inline const IntColVectorType& permutationP() const + { + if (m_extractedDataAreDirty) extractData(); + return m_p; + } + + inline const IntRowVectorType& permutationQ() const + { + if (m_extractedDataAreDirty) extractData(); + return m_q; + } + + Scalar determinant() const; + + template + bool solve(const MatrixBase &b, MatrixBase* x) const; + + void compute(const MatrixType& matrix); + + protected: + + void extractData() const; + + protected: + // cached data: + void* m_numeric; + const MatrixType* m_matrixRef; + mutable LMatrixType m_l; + mutable UMatrixType m_u; + mutable IntColVectorType m_p; + mutable IntRowVectorType m_q; + mutable bool m_extractedDataAreDirty; +}; + +template +void SparseLU::compute(const MatrixType& a) +{ + const int rows = a.rows(); + const int cols = a.cols(); + ei_assert((MatrixType::Flags&RowMajorBit)==0 && "Row major matrices are not supported yet"); + + m_matrixRef = &a; + + if (m_numeric) + umfpack_free_numeric(&m_numeric,Scalar()); + + void* symbolic; + int errorCode = 0; + errorCode = umfpack_symbolic(rows, cols, a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(), + &symbolic, 0, 0); + if (errorCode==0) + errorCode = umfpack_numeric(a._outerIndexPtr(), a._innerIndexPtr(), a._valuePtr(), + symbolic, &m_numeric, 0, 0); + + umfpack_free_symbolic(&symbolic,Scalar()); + + m_extractedDataAreDirty = true; + + Base::m_succeeded = (errorCode==0); +} + +template +void SparseLU::extractData() const +{ + if (m_extractedDataAreDirty) + { + // get size of the data + int lnz, unz, rows, cols, nz_udiag; + umfpack_get_lunz(&lnz, &unz, &rows, &cols, &nz_udiag, m_numeric, Scalar()); + + // allocate data + m_l.resize(rows,std::min(rows,cols)); + m_l.resizeNonZeros(lnz); + + m_u.resize(std::min(rows,cols),cols); + m_u.resizeNonZeros(unz); + + m_p.resize(rows); + m_q.resize(cols); + + // extract + umfpack_get_numeric(m_l._outerIndexPtr(), m_l._innerIndexPtr(), m_l._valuePtr(), + m_u._outerIndexPtr(), m_u._innerIndexPtr(), m_u._valuePtr(), + m_p.data(), m_q.data(), 0, 0, 0, m_numeric); + + m_extractedDataAreDirty = false; + } +} + +template +typename SparseLU::Scalar SparseLU::determinant() const +{ + Scalar det; + umfpack_get_determinant(&det, 0, m_numeric, 0); + return det; +} + +template +template +bool SparseLU::solve(const MatrixBase &b, MatrixBase *x) const +{ + //const int size = m_matrix.rows(); + const int rhsCols = b.cols(); +// ei_assert(size==b.rows()); + ei_assert((BDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major rhs yet"); + ei_assert((XDerived::Flags&RowMajorBit)==0 && "UmfPack backend does not support non col-major result yet"); + + int errorCode; + for (int j=0; j_outerIndexPtr(), m_matrixRef->_innerIndexPtr(), m_matrixRef->_valuePtr(), + &x->col(j).coeffRef(0), &b.const_cast_derived().col(j).coeffRef(0), m_numeric, 0, 0); + if (errorCode!=0) + return false; + } +// errorCode = umfpack_di_solve(UMFPACK_A, +// m_matrixRef._outerIndexPtr(), m_matrixRef._innerIndexPtr(), m_matrixRef._valuePtr(), +// x->derived().data(), b.derived().data(), m_numeric, 0, 0); + + return true; +} + +#endif // EIGEN_UMFPACKSUPPORT_H diff --git a/extern/Eigen2/eigen-update.sh b/extern/Eigen2/eigen-update.sh new file mode 100755 index 00000000000..926a36ef120 --- /dev/null +++ b/extern/Eigen2/eigen-update.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +echo "*** EIGEN2-HG Update utility" +echo "*** This gets a new eigen2-hg tree and adapts it to blenders build structure" +echo "*** Warning! This script will wipe all the header file" + +if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then + echo Proceeding as requested by command line ... +else + echo "*** Please run again with --i-really-know-what-im-doing ..." + exit 1 +fi + +# get the latest revision from repository. +hg clone http://bitbucket.org/eigen/eigen2 +if [ -d eigen2 ] +then + cd eigen2 + # put here the version you want to use + hg up 2.0.6 + rm -f `find Eigen/ -type f -name "CMakeLists.txt"` + cp -r Eigen .. + cd .. + rm -rf eigen2 +else + echo "Did you install Mercurial?" +fi + diff --git a/extern/Makefile b/extern/Makefile index b81fbd2b91a..a30cd1d7ca3 100644 --- a/extern/Makefile +++ b/extern/Makefile @@ -33,9 +33,9 @@ DIR = $(OCGDIR)/extern DIRS = glew/src # Cloth requires it -#ifneq ($(NAN_NO_KETSJI), true) -DIRS += bullet2 -#endif +ifeq ($(NAN_USE_BULLET), true) + DIRS += bullet2 +endif ifeq ($(WITH_BINRELOC), true) DIRS += binreloc diff --git a/extern/SConscript b/extern/SConscript index 20604d87e45..af057a73927 100644 --- a/extern/SConscript +++ b/extern/SConscript @@ -22,5 +22,8 @@ if env['WITH_BF_REDCODE'] and env['BF_REDCODE_LIB'] == '': if env['OURPLATFORM'] == 'linux2': SConscript(['binreloc/SConscript']); -SConscript(['lzo/SConscript']) -SConscript(['lzma/SConscript']) +if env['WITH_BF_LZO']: + SConscript(['lzo/SConscript']) + +if env['WITH_BF_LZMA']: + SConscript(['lzma/SConscript']) diff --git a/extern/bullet2/CMakeLists.txt b/extern/bullet2/CMakeLists.txt index 2e2d8920781..3054ed98908 100644 --- a/extern/bullet2/CMakeLists.txt +++ b/extern/bullet2/CMakeLists.txt @@ -32,14 +32,12 @@ FILE(GLOB SRC src/BulletCollision/CollisionShapes/*.cpp src/BulletCollision/NarrowPhaseCollision/*.cpp src/BulletCollision/Gimpact/*.cpp - src/BulletCollision//CollisionDispatch/*.cpp + src/BulletCollision/CollisionDispatch/*.cpp src/BulletDynamics/ConstraintSolver/*.cpp src/BulletDynamics/Vehicle/*.cpp src/BulletDynamics/Dynamics/*.cpp src/BulletSoftBody/*.cpp ) -ADD_DEFINITIONS(-D_LIB) - BLENDERLIB(extern_bullet "${SRC}" "${INC}") -#, libtype=['game2', 'player'], priority=[20, 170], compileflags=cflags ) + diff --git a/extern/bullet2/src/SConscript b/extern/bullet2/src/SConscript index 3d0c645e7a0..c0ee56045d8 100644 --- a/extern/bullet2/src/SConscript +++ b/extern/bullet2/src/SConscript @@ -4,11 +4,11 @@ import os Import('env') -defs = 'USE_DOUBLES QHULL _LIB' +defs = '' cflags = [] if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'): - defs += ' WIN32 NDEBUG _WINDOWS _LIB' + defs += ' WIN32 NDEBUG _WINDOWS' #cflags += ['/MT', '/W3', '/GX', '/O2', '/Op'] cflags += ['/MT', '/W3', '/GX', '/Og', '/Ot', '/Ob1', '/Op', '/G6', '/O3', '/EHcs'] elif env['OURPLATFORM']=='win32-mingw': diff --git a/extern/glew/make/msvc_9_0/glew.vcproj b/extern/glew/make/msvc_9_0/glew.vcproj index f9d8df478ca..a7186f61cbc 100644 --- a/extern/glew/make/msvc_9_0/glew.vcproj +++ b/extern/glew/make/msvc_9_0/glew.vcproj @@ -112,6 +112,7 @@ /> begin(); - while(it != m_playingSounds->end()) { - sound = *it; - // increment the iterator to make sure it's valid, - // in case the sound gets deleted after stopping - ++it; - - // is it a streamed sound? - if(!sound->isBuffered) + // for all sounds + AUD_HandleIterator it = m_playingSounds->begin(); + while(it != m_playingSounds->end()) { - // check for buffer refilling - alGetSourcei(sound->source, AL_BUFFERS_PROCESSED, &info); + sound = *it; + // increment the iterator to make sure it's valid, + // in case the sound gets deleted after stopping + ++it; - if(info) + // is it a streamed sound? + if(!sound->isBuffered) { - specs = sound->reader->getSpecs(); + // check for buffer refilling + alGetSourcei(sound->source, AL_BUFFERS_PROCESSED, &info); - // for all empty buffers - while(info--) + if(info) { - // if there's still data to play back - if(!sound->data_end) - { - // read data - length = m_buffersize; - sound->reader->read(length, buffer); - - // read nothing? - if(length == 0) - { - sound->data_end = true; - break; - } - - // unqueue buffer - alSourceUnqueueBuffers(sound->source, 1, - &sound->buffers[sound->current]); - ALenum err; - if((err = alGetError()) != AL_NO_ERROR) - { - sound->data_end = true; - break; - } - - // fill with new data - alBufferData(sound->buffers[sound->current], - sound->format, - buffer, - length * AUD_SAMPLE_SIZE(specs), - specs.rate); + specs = sound->reader->getSpecs(); - if(alGetError() != AL_NO_ERROR) + // for all empty buffers + while(info--) + { + // if there's still data to play back + if(!sound->data_end) { - sound->data_end = true; - break; + // read data + length = m_buffersize; + sound->reader->read(length, buffer); + + // read nothing? + if(length == 0) + { + sound->data_end = true; + break; + } + + // unqueue buffer + alSourceUnqueueBuffers(sound->source, 1, + &sound->buffers[sound->current]); + ALenum err; + if((err = alGetError()) != AL_NO_ERROR) + { + sound->data_end = true; + break; + } + + // fill with new data + alBufferData(sound->buffers[sound->current], + sound->format, + buffer, + length * AUD_SAMPLE_SIZE(specs), + specs.rate); + + if(alGetError() != AL_NO_ERROR) + { + sound->data_end = true; + break; + } + + // and queue again + alSourceQueueBuffers(sound->source, 1, + &sound->buffers[sound->current]); + if(alGetError() != AL_NO_ERROR) + { + sound->data_end = true; + break; + } + + sound->current = (sound->current+1) % + AUD_OPENAL_CYCLE_BUFFERS; } - - // and queue again - alSourceQueueBuffers(sound->source, 1, - &sound->buffers[sound->current]); - if(alGetError() != AL_NO_ERROR) - { - sound->data_end = true; + else break; - } - - sound->current = (sound->current+1) % - AUD_OPENAL_CYCLE_BUFFERS; } - else - break; } } - } - // check if the sound has been stopped - alGetSourcei(sound->source, AL_SOURCE_STATE, &info); + // check if the sound has been stopped + alGetSourcei(sound->source, AL_SOURCE_STATE, &info); - if(info != AL_PLAYING) - { - // if it really stopped - if(sound->data_end) + if(info != AL_PLAYING) { - // pause or - if(sound->keep) - pause(sound); - // stop + // if it really stopped + if(sound->data_end) + { + // pause or + if(sound->keep) + pause(sound); + // stop + else + stop(sound); + } + // continue playing else - stop(sound); + alSourcePlay(sound->source); } - // continue playing - else - alSourcePlay(sound->source); } } @@ -516,60 +518,73 @@ bool AUD_OpenALDevice::getFormat(ALenum &format, AUD_Specs specs) AUD_Handle* AUD_OpenALDevice::play(AUD_IFactory* factory, bool keep) { - // check if it is a buffered factory - for(AUD_BFIterator i = m_bufferedFactories->begin(); - i != m_bufferedFactories->end(); i++) - { - if((*i)->factory == factory) - { - // create the handle - AUD_OpenALHandle* sound = new AUD_OpenALHandle; AUD_NEW("handle") - sound->keep = keep; - sound->current = -1; - sound->isBuffered = true; - sound->data_end = true; + lock(); - alcSuspendContext(m_context); + AUD_OpenALHandle* sound = NULL; - // OpenAL playback code - try + try + { + // check if it is a buffered factory + for(AUD_BFIterator i = m_bufferedFactories->begin(); + i != m_bufferedFactories->end(); i++) + { + if((*i)->factory == factory) { - alGenSources(1, &sound->source); - if(alGetError() != AL_NO_ERROR) - AUD_THROW(AUD_ERROR_OPENAL); + // create the handle + sound = new AUD_OpenALHandle; AUD_NEW("handle") + sound->keep = keep; + sound->current = -1; + sound->isBuffered = true; + sound->data_end = true; + + alcSuspendContext(m_context); + // OpenAL playback code try { - alSourcei(sound->source, AL_BUFFER, (*i)->buffer); + alGenSources(1, &sound->source); if(alGetError() != AL_NO_ERROR) AUD_THROW(AUD_ERROR_OPENAL); + + try + { + alSourcei(sound->source, AL_BUFFER, (*i)->buffer); + if(alGetError() != AL_NO_ERROR) + AUD_THROW(AUD_ERROR_OPENAL); + } + catch(AUD_Exception) + { + alDeleteSources(1, &sound->source); + throw; + } } catch(AUD_Exception) { - alDeleteSources(1, &sound->source); + delete sound; AUD_DELETE("handle") + alcProcessContext(m_context); throw; } - } - catch(AUD_Exception) - { - delete sound; AUD_DELETE("handle") - alcProcessContext(m_context); - unlock(); - throw; - } - // play sound - m_playingSounds->push_back(sound); + // play sound + m_playingSounds->push_back(sound); - alSourcei(sound->source, AL_SOURCE_RELATIVE, 1); - start(); + alSourcei(sound->source, AL_SOURCE_RELATIVE, 1); + start(); - alcProcessContext(m_context); - unlock(); - - return sound; + alcProcessContext(m_context); + } } } + catch(AUD_Exception) + { + unlock(); + throw; + } + + unlock(); + + if(sound) + return sound; AUD_IReader* reader = factory->createReader(); @@ -596,7 +611,7 @@ AUD_Handle* AUD_OpenALDevice::play(AUD_IFactory* factory, bool keep) } // create the handle - AUD_OpenALHandle* sound = new AUD_OpenALHandle; AUD_NEW("handle") + sound = new AUD_OpenALHandle; AUD_NEW("handle") sound->keep = keep; sound->reader = reader; sound->current = 0; @@ -683,8 +698,11 @@ AUD_Handle* AUD_OpenALDevice::play(AUD_IFactory* factory, bool keep) bool AUD_OpenALDevice::pause(AUD_Handle* handle) { - // only songs that are played can be paused + bool result = false; + lock(); + + // only songs that are played can be paused for(AUD_HandleIterator i = m_playingSounds->begin(); i != m_playingSounds->end(); i++) { @@ -693,16 +711,20 @@ bool AUD_OpenALDevice::pause(AUD_Handle* handle) m_pausedSounds->push_back(*i); alSourcePause((*i)->source); m_playingSounds->erase(i); - unlock(); - return true; + result = true; + break; } } + unlock(); - return false; + + return result; } bool AUD_OpenALDevice::resume(AUD_Handle* handle) { + bool result = false; + lock(); // only songs that are paused can be resumed @@ -714,19 +736,24 @@ bool AUD_OpenALDevice::resume(AUD_Handle* handle) m_playingSounds->push_back(*i); start(); m_pausedSounds->erase(i); - unlock(); - return true; + result = true; + break; } } + unlock(); - return false; + + return result; } bool AUD_OpenALDevice::stop(AUD_Handle* handle) { AUD_OpenALHandle* sound; + bool result = false; + lock(); + for(AUD_HandleIterator i = m_playingSounds->begin(); i != m_playingSounds->end(); i++) { @@ -741,51 +768,60 @@ bool AUD_OpenALDevice::stop(AUD_Handle* handle) } delete *i; AUD_DELETE("handle") m_playingSounds->erase(i); - unlock(); - return true; + result = true; + break; } } - for(AUD_HandleIterator i = m_pausedSounds->begin(); - i != m_pausedSounds->end(); i++) + if(!result) { - if(*i == handle) + for(AUD_HandleIterator i = m_pausedSounds->begin(); + i != m_pausedSounds->end(); i++) { - sound = *i; - alDeleteSources(1, &sound->source); - if(!sound->isBuffered) + if(*i == handle) { - delete sound->reader; AUD_DELETE("reader") - alDeleteBuffers(AUD_OPENAL_CYCLE_BUFFERS, sound->buffers); + sound = *i; + alDeleteSources(1, &sound->source); + if(!sound->isBuffered) + { + delete sound->reader; AUD_DELETE("reader") + alDeleteBuffers(AUD_OPENAL_CYCLE_BUFFERS, sound->buffers); + } + delete *i; AUD_DELETE("handle") + m_pausedSounds->erase(i); + result = true; + break; } - delete *i; AUD_DELETE("handle") - m_pausedSounds->erase(i); - unlock(); - return true; } } + unlock(); - return false; + + return result; } bool AUD_OpenALDevice::setKeep(AUD_Handle* handle, bool keep) { + bool result = false; + lock(); + if(isValid(handle)) { ((AUD_OpenALHandle*)handle)->keep = keep; - unlock(); - return true; + result = true; } + unlock(); - return false; + + return result; } bool AUD_OpenALDevice::sendMessage(AUD_Handle* handle, AUD_Message &message) { - lock(); - bool result = false; + lock(); + if(handle == 0) { for(AUD_HandleIterator i = m_playingSounds->begin(); @@ -800,12 +836,16 @@ bool AUD_OpenALDevice::sendMessage(AUD_Handle* handle, AUD_Message &message) else if(isValid(handle)) if(!((AUD_OpenALHandle*)handle)->isBuffered) result = ((AUD_OpenALHandle*)handle)->reader->notify(message); + unlock(); + return result; } bool AUD_OpenALDevice::seek(AUD_Handle* handle, float position) { + bool result = false; + lock(); if(isValid(handle)) @@ -857,20 +897,19 @@ bool AUD_OpenALDevice::seek(AUD_Handle* handle, float position) alSourceRewind(alhandle->source); } } - unlock(); - return true; + result = true; } unlock(); - return false; + return result; } float AUD_OpenALDevice::getPosition(AUD_Handle* handle) { - lock(); - float position = 0.0; + lock(); + if(isValid(handle)) { AUD_OpenALHandle* h = (AUD_OpenALHandle*)handle; @@ -887,27 +926,35 @@ float AUD_OpenALDevice::getPosition(AUD_Handle* handle) AUD_Status AUD_OpenALDevice::getStatus(AUD_Handle* handle) { + AUD_Status status = AUD_STATUS_INVALID; + lock(); + for(AUD_HandleIterator i = m_playingSounds->begin(); i != m_playingSounds->end(); i++) { if(*i == handle) { - unlock(); - return AUD_STATUS_PLAYING; + status = AUD_STATUS_PLAYING; + break; } } - for(AUD_HandleIterator i = m_pausedSounds->begin(); - i != m_pausedSounds->end(); i++) + if(status == AUD_STATUS_INVALID) { - if(*i == handle) + for(AUD_HandleIterator i = m_pausedSounds->begin(); + i != m_pausedSounds->end(); i++) { - unlock(); - return AUD_STATUS_PAUSED; + if(*i == handle) + { + status = AUD_STATUS_PAUSED; + break; + } } } + unlock(); - return AUD_STATUS_INVALID; + + return status; } void AUD_OpenALDevice::lock() @@ -935,6 +982,7 @@ bool AUD_OpenALDevice::checkCapability(int capability) bool AUD_OpenALDevice::setCapability(int capability, void *value) { + bool result = false; switch(capability) { case AUD_CAPS_VOLUME: @@ -948,8 +996,7 @@ bool AUD_OpenALDevice::setCapability(int capability, void *value) { alSourcef(((AUD_OpenALHandle*)caps->handle)->source, AL_GAIN, caps->value); - unlock(); - return true; + result = true; } unlock(); } @@ -962,8 +1009,7 @@ bool AUD_OpenALDevice::setCapability(int capability, void *value) { alSourcef(((AUD_OpenALHandle*)caps->handle)->source, AL_PITCH, caps->value); - unlock(); - return true; + result = true; } unlock(); } @@ -981,11 +1027,13 @@ bool AUD_OpenALDevice::setCapability(int capability, void *value) { if((*i)->factory == factory) { - unlock(); - return true; + result = true; + break; } } unlock(); + if(result) + return result; AUD_IReader* reader = factory->createReader(); @@ -1104,11 +1152,13 @@ bool AUD_OpenALDevice::setCapability(int capability, void *value) } break; } - return false; + return result; } bool AUD_OpenALDevice::getCapability(int capability, void *value) { + bool result = false; + switch(capability) { case AUD_CAPS_VOLUME: @@ -1122,8 +1172,7 @@ bool AUD_OpenALDevice::getCapability(int capability, void *value) { alGetSourcef(((AUD_OpenALHandle*)caps->handle)->source, AL_GAIN, &caps->value); - unlock(); - return true; + result = true; } unlock(); } @@ -1136,14 +1185,14 @@ bool AUD_OpenALDevice::getCapability(int capability, void *value) { alGetSourcef(((AUD_OpenALHandle*)caps->handle)->source, AL_PITCH, &caps->value); - unlock(); - return true; + result = true; } unlock(); } break; } - return false; + + return result; } /******************************************************************************/ @@ -1233,6 +1282,8 @@ float AUD_OpenALDevice::getSetting(AUD_3DSetting setting) bool AUD_OpenALDevice::updateSource(AUD_Handle* handle, AUD_3DData &data) { + bool result = false; + lock(); if(isValid(handle)) @@ -1241,12 +1292,12 @@ bool AUD_OpenALDevice::updateSource(AUD_Handle* handle, AUD_3DData &data) alSourcefv(source, AL_POSITION, (ALfloat*)data.position); alSourcefv(source, AL_VELOCITY, (ALfloat*)data.velocity); alSourcefv(source, AL_DIRECTION, (ALfloat*)&(data.orientation[3])); - unlock(); - return true; + result = true; } unlock(); - return false; + + return result; } bool AUD_OpenALDevice::setSourceSetting(AUD_Handle* handle, diff --git a/intern/audaspace/intern/AUD_C-API.cpp b/intern/audaspace/intern/AUD_C-API.cpp index 45faebc7e97..255d1d2f1f6 100644 --- a/intern/audaspace/intern/AUD_C-API.cpp +++ b/intern/audaspace/intern/AUD_C-API.cpp @@ -516,19 +516,51 @@ AUD_Device* AUD_openReadDevice(AUD_Specs specs) } } -int AUD_playDevice(AUD_Device* device, AUD_Sound* sound) +AUD_Handle* AUD_playDevice(AUD_Device* device, AUD_Sound* sound) { assert(device); assert(sound); try { - return device->play(sound) != NULL; + return device->play(sound); } catch(AUD_Exception) { - return false; + return NULL; + } +} + +int AUD_setDeviceVolume(AUD_Device* device, float volume) +{ + assert(device); + + try + { + return device->setCapability(AUD_CAPS_VOLUME, &volume); + } + catch(AUD_Exception) {} + + return false; +} + +int AUD_setDeviceSoundVolume(AUD_Device* device, AUD_Handle* handle, + float volume) +{ + if(handle) + { + assert(device); + AUD_SourceCaps caps; + caps.handle = handle; + caps.value = volume; + + try + { + return device->setCapability(AUD_CAPS_SOURCE_VOLUME, &caps); + } + catch(AUD_Exception) {} } + return false; } int AUD_readDevice(AUD_Device* device, sample_t* buffer, int length) diff --git a/intern/audaspace/intern/AUD_C-API.h b/intern/audaspace/intern/AUD_C-API.h index 6ec5ec87ad5..66a5a5147b3 100644 --- a/intern/audaspace/intern/AUD_C-API.h +++ b/intern/audaspace/intern/AUD_C-API.h @@ -299,13 +299,32 @@ extern int AUD_setSoundPitch(AUD_Handle* handle, float pitch); */ extern AUD_Device* AUD_openReadDevice(AUD_Specs specs); +/** + * Sets the main volume of a device. + * \param device The device. + * \param volume The new volume, must be between 0.0 and 1.0. + * \return Whether the action succeeded. + */ +extern int AUD_setDeviceVolume(AUD_Device* device, float volume); + /** * Plays back a sound file through a read device. * \param device The read device. * \param sound The handle of the sound file. - * \return Whether the sound could be played back. + * \return A handle to the played back sound. + */ +extern AUD_Handle* AUD_playDevice(AUD_Device* device, AUD_Sound* sound); + +/** + * Sets the volume of a played back sound of a read device. + * \param device The read device. + * \param handle The handle to the sound. + * \param volume The new volume, must be between 0.0 and 1.0. + * \return Whether the action succeeded. */ -extern int AUD_playDevice(AUD_Device* device, AUD_Sound* sound); +extern int AUD_setDeviceSoundVolume(AUD_Device* device, + AUD_Handle* handle, + float volume); /** * Reads the next samples into the supplied buffer. diff --git a/intern/audaspace/intern/AUD_SoftwareDevice.cpp b/intern/audaspace/intern/AUD_SoftwareDevice.cpp index 174ff8c8979..42a90a6f15e 100644 --- a/intern/audaspace/intern/AUD_SoftwareDevice.cpp +++ b/intern/audaspace/intern/AUD_SoftwareDevice.cpp @@ -94,51 +94,53 @@ void AUD_SoftwareDevice::mix(sample_t* buffer, int length) { lock(); - AUD_SoftwareHandle* sound; - int len; - sample_t* buf; - int sample_size = AUD_SAMPLE_SIZE(m_specs); - std::list stopSounds; - - // for all sounds - AUD_HandleIterator it = m_playingSounds->begin(); - while(it != m_playingSounds->end()) { - sound = *it; - // increment the iterator to make sure it's valid, - // in case the sound gets deleted after stopping - ++it; + AUD_SoftwareHandle* sound; + int len; + sample_t* buf; + int sample_size = AUD_SAMPLE_SIZE(m_specs); + std::list stopSounds; + + // for all sounds + AUD_HandleIterator it = m_playingSounds->begin(); + while(it != m_playingSounds->end()) + { + sound = *it; + // increment the iterator to make sure it's valid, + // in case the sound gets deleted after stopping + ++it; - // get the buffer from the source - len = length; - sound->reader->read(len, buf); + // get the buffer from the source + len = length; + sound->reader->read(len, buf); - m_mixer->add(buf, sound->reader->getSpecs(), len, sound->volume); + m_mixer->add(buf, sound->reader->getSpecs(), len, sound->volume); - // in case the end of the sound is reached - if(len < length) - { - if(sound->keep) - pause(sound); - else - stopSounds.push_back(sound); + // in case the end of the sound is reached + if(len < length) + { + if(sound->keep) + pause(sound); + else + stopSounds.push_back(sound); + } } - } - // fill with silence - if(m_specs.format == AUD_FORMAT_U8) - memset(buffer, 0x80, length * sample_size); - else - memset(buffer, 0, length * sample_size); + // fill with silence + if(m_specs.format == AUD_FORMAT_U8) + memset(buffer, 0x80, length * sample_size); + else + memset(buffer, 0, length * sample_size); - // superpose - m_mixer->superpose(buffer, length, m_volume); + // superpose + m_mixer->superpose(buffer, length, m_volume); - while(!stopSounds.empty()) - { - sound = stopSounds.front(); - stopSounds.pop_front(); - stop(sound); + while(!stopSounds.empty()) + { + sound = stopSounds.front(); + stopSounds.pop_front(); + stop(sound); + } } unlock(); @@ -201,8 +203,11 @@ AUD_Handle* AUD_SoftwareDevice::play(AUD_IFactory* factory, bool keep) bool AUD_SoftwareDevice::pause(AUD_Handle* handle) { - // only songs that are played can be paused + bool result = false; + lock(); + + // only songs that are played can be paused for(AUD_HandleIterator i = m_playingSounds->begin(); i != m_playingSounds->end(); i++) { @@ -212,18 +217,23 @@ bool AUD_SoftwareDevice::pause(AUD_Handle* handle) m_playingSounds->erase(i); if(m_playingSounds->empty()) playing(m_playback = false); - unlock(); - return true; + result = true; + break; } } + unlock(); - return false; + + return result; } bool AUD_SoftwareDevice::resume(AUD_Handle* handle) { - // only songs that are paused can be resumed + bool result = false; + lock(); + + // only songs that are paused can be resumed for(AUD_HandleIterator i = m_pausedSounds->begin(); i != m_pausedSounds->end(); i++) { @@ -233,17 +243,22 @@ bool AUD_SoftwareDevice::resume(AUD_Handle* handle) m_pausedSounds->erase(i); if(!m_playback) playing(m_playback = true); - unlock(); - return true; + result = true; + break; } } + unlock(); - return false; + + return result; } bool AUD_SoftwareDevice::stop(AUD_Handle* handle) { + bool result = false; + lock(); + for(AUD_HandleIterator i = m_playingSounds->begin(); i != m_playingSounds->end(); i++) { @@ -254,37 +269,46 @@ bool AUD_SoftwareDevice::stop(AUD_Handle* handle) m_playingSounds->erase(i); if(m_playingSounds->empty()) playing(m_playback = false); - unlock(); - return true; + result = true; + break; } } - for(AUD_HandleIterator i = m_pausedSounds->begin(); - i != m_pausedSounds->end(); i++) + if(!result) { - if(*i == handle) + for(AUD_HandleIterator i = m_pausedSounds->begin(); + i != m_pausedSounds->end(); i++) { - delete (*i)->reader; AUD_DELETE("reader") - delete *i; AUD_DELETE("handle") - m_pausedSounds->erase(i); - unlock(); - return true; + if(*i == handle) + { + delete (*i)->reader; AUD_DELETE("reader") + delete *i; AUD_DELETE("handle") + m_pausedSounds->erase(i); + result = true; + break; + } } } + unlock(); - return false; + + return result; } bool AUD_SoftwareDevice::setKeep(AUD_Handle* handle, bool keep) { + bool result = false; + lock(); + if(isValid(handle)) { ((AUD_SoftwareHandle*)handle)->keep = keep; - unlock(); - return true; + result = true; } + unlock(); - return false; + + return result; } bool AUD_SoftwareDevice::sendMessage(AUD_Handle* handle, AUD_Message &message) @@ -312,16 +336,18 @@ bool AUD_SoftwareDevice::seek(AUD_Handle* handle, float position) { lock(); + bool result = false; + if(isValid(handle)) { AUD_IReader* reader = ((AUD_SoftwareHandle*)handle)->reader; reader->seek((int)(position * reader->getSpecs().rate)); - unlock(); - return true; + result = true; } unlock(); - return false; + + return result; } float AUD_SoftwareDevice::getPosition(AUD_Handle* handle) @@ -337,32 +363,41 @@ float AUD_SoftwareDevice::getPosition(AUD_Handle* handle) } unlock(); + return position; } AUD_Status AUD_SoftwareDevice::getStatus(AUD_Handle* handle) { + AUD_Status status = AUD_STATUS_INVALID; + lock(); + for(AUD_HandleIterator i = m_playingSounds->begin(); i != m_playingSounds->end(); i++) { if(*i == handle) { - unlock(); - return AUD_STATUS_PLAYING; + status = AUD_STATUS_PLAYING; + break; } } - for(AUD_HandleIterator i = m_pausedSounds->begin(); - i != m_pausedSounds->end(); i++) + if(status == AUD_STATUS_INVALID) { - if(*i == handle) + for(AUD_HandleIterator i = m_pausedSounds->begin(); + i != m_pausedSounds->end(); i++) { - unlock(); - return AUD_STATUS_PAUSED; + if(*i == handle) + { + status = AUD_STATUS_PAUSED; + break; + } } } + unlock(); - return AUD_STATUS_INVALID; + + return status; } void AUD_SoftwareDevice::lock() @@ -384,6 +419,8 @@ bool AUD_SoftwareDevice::checkCapability(int capability) bool AUD_SoftwareDevice::setCapability(int capability, void *value) { + bool result = false; + switch(capability) { case AUD_CAPS_VOLUME: @@ -407,18 +444,20 @@ bool AUD_SoftwareDevice::setCapability(int capability, void *value) handle->volume = 1.0; else if(handle->volume < 0.0) handle->volume = 0.0; - unlock(); - return true; + result = true; } unlock(); } break; } - return false; + + return result;; } bool AUD_SoftwareDevice::getCapability(int capability, void *value) { + bool result = false; + switch(capability) { case AUD_CAPS_VOLUME: @@ -429,16 +468,19 @@ bool AUD_SoftwareDevice::getCapability(int capability, void *value) case AUD_CAPS_SOURCE_VOLUME: { AUD_SourceCaps* caps = (AUD_SourceCaps*) value; + lock(); + if(isValid(caps->handle)) { caps->value = ((AUD_SoftwareHandle*)caps->handle)->volume; - unlock(); - return true; + result = true; } + unlock(); } break; } - return false; + + return result; } diff --git a/intern/audaspace/make/msvc_9_0/audaspace.vcproj b/intern/audaspace/make/msvc_9_0/audaspace.vcproj index 0d8ade43e07..93dcdd66628 100644 --- a/intern/audaspace/make/msvc_9_0/audaspace.vcproj +++ b/intern/audaspace/make/msvc_9_0/audaspace.vcproj @@ -42,6 +42,7 @@ /> setCursorGrab(grab?true:false); + return window->setCursorGrab(grab?true:false, warp?true:false, restore?true:false); } @@ -629,6 +629,13 @@ GHOST_TSuccess GHOST_SetWindowState(GHOST_WindowHandle windowhandle, } +GHOST_TSuccess GHOST_SetWindowModifiedState(GHOST_WindowHandle windowhandle, GHOST_TUns8 isUnsavedChanges) +{ + GHOST_IWindow* window = (GHOST_IWindow*) windowhandle; + + return window->setModifiedState(isUnsavedChanges); +} + GHOST_TSuccess GHOST_SetWindowOrder(GHOST_WindowHandle windowhandle, GHOST_TWindowOrder order) diff --git a/intern/ghost/intern/GHOST_DisplayManager.cpp b/intern/ghost/intern/GHOST_DisplayManager.cpp index a06692797c7..712ded7ea20 100644 --- a/intern/ghost/intern/GHOST_DisplayManager.cpp +++ b/intern/ghost/intern/GHOST_DisplayManager.cpp @@ -27,8 +27,6 @@ */ /** - - * $Id$ * Copyright (C) 2001 NaN Technologies B.V. * @author Maarten Gribnau * @date September 21, 2001 diff --git a/intern/ghost/intern/GHOST_ISystem.cpp b/intern/ghost/intern/GHOST_ISystem.cpp index 9329e68132b..fc338c182a9 100644 --- a/intern/ghost/intern/GHOST_ISystem.cpp +++ b/intern/ghost/intern/GHOST_ISystem.cpp @@ -44,7 +44,11 @@ # include "GHOST_SystemWin32.h" #else # ifdef __APPLE__ -# include "GHOST_SystemCarbon.h" +# ifdef GHOST_COCOA +# include "GHOST_SystemCocoa.h" +# else +# include "GHOST_SystemCarbon.h" +# endif # else # include "GHOST_SystemX11.h" # endif @@ -62,7 +66,11 @@ GHOST_TSuccess GHOST_ISystem::createSystem() m_system = new GHOST_SystemWin32 (); #else # ifdef __APPLE__ - m_system = new GHOST_SystemCarbon (); +# ifdef GHOST_COCOA + m_system = new GHOST_SystemCocoa (); +# else + m_system = new GHOST_SystemCarbon (); +# endif # else m_system = new GHOST_SystemX11 (); # endif diff --git a/intern/ghost/intern/GHOST_System.cpp b/intern/ghost/intern/GHOST_System.cpp index 229744e2000..84298d3e3ff 100644 --- a/intern/ghost/intern/GHOST_System.cpp +++ b/intern/ghost/intern/GHOST_System.cpp @@ -291,7 +291,7 @@ GHOST_TSuccess GHOST_System::init() #ifdef GHOST_DEBUG if (m_eventManager) { m_eventPrinter = new GHOST_EventPrinter(); - //m_eventManager->addConsumer(m_eventPrinter); + m_eventManager->addConsumer(m_eventPrinter); } #endif // GHOST_DEBUG diff --git a/intern/ghost/intern/GHOST_SystemCarbon.cpp b/intern/ghost/intern/GHOST_SystemCarbon.cpp index fb1b96fcbc7..57d6f6c06cc 100644 --- a/intern/ghost/intern/GHOST_SystemCarbon.cpp +++ b/intern/ghost/intern/GHOST_SystemCarbon.cpp @@ -788,21 +788,21 @@ OSStatus GHOST_SystemCarbon::handleTabletEvent(EventRef event) switch(tabletProximityRecord.pointerType) { case 1: /* stylus */ - ct.Active = 1; + ct.Active = GHOST_kTabletModeStylus; break; case 2: /* puck, not supported so far */ - ct.Active = 0; + ct.Active = GHOST_kTabletModeNone; break; case 3: /* eraser */ - ct.Active = 2; + ct.Active = GHOST_kTabletModeEraser; break; default: - ct.Active = 0; + ct.Active = GHOST_kTabletModeNone; break; } } else { // pointer is leaving - return to mouse - ct.Active = 0; + ct.Active = GHOST_kTabletModeNone; } } } diff --git a/intern/ghost/intern/GHOST_SystemWin32.cpp b/intern/ghost/intern/GHOST_SystemWin32.cpp index 8513d056795..2e89be40bcb 100644 --- a/intern/ghost/intern/GHOST_SystemWin32.cpp +++ b/intern/ghost/intern/GHOST_SystemWin32.cpp @@ -39,7 +39,6 @@ #endif #include "GHOST_SystemWin32.h" -//#include //for printf() // win64 doesn't define GWL_USERDATA #ifdef WIN32 @@ -61,6 +60,23 @@ #define WHEEL_DELTA 120 /* Value for rolling one detent, (old convention! MS changed it) */ #endif // WHEEL_DELTA +/* + * Defines for mouse buttons 4 and 5 aka xbutton1 and xbutton2. + * MSDN: Declared in Winuser.h, include Windows.h + * This does not seem to work with MinGW so we define our own here. + */ +#ifndef XBUTTON1 +#define XBUTTON1 0x0001 +#endif // XBUTTON1 +#ifndef XBUTTON2 +#define XBUTTON2 0x0002 +#endif // XBUTTON2 +#ifndef WM_XBUTTONUP +#define WM_XBUTTONUP 524 +#endif // WM_XBUTTONUP +#ifndef WM_XBUTTONDOWN +#define WM_XBUTTONDOWN 523 +#endif // WM_XBUTTONDOWN #include "GHOST_Debug.h" #include "GHOST_DisplayManagerWin32.h" @@ -672,6 +688,14 @@ LRESULT WINAPI GHOST_SystemWin32::s_wndProc(HWND hwnd, UINT msg, WPARAM wParam, window->registerMouseClickEvent(true); event = processButtonEvent(GHOST_kEventButtonDown, window, GHOST_kButtonMaskRight); break; + case WM_XBUTTONDOWN: + window->registerMouseClickEvent(true); + if ((short) HIWORD(wParam) == XBUTTON1){ + event = processButtonEvent(GHOST_kEventButtonDown, window, GHOST_kButtonMaskButton4); + }else if((short) HIWORD(wParam) == XBUTTON2){ + event = processButtonEvent(GHOST_kEventButtonDown, window, GHOST_kButtonMaskButton5); + } + break; case WM_LBUTTONUP: window->registerMouseClickEvent(false); event = processButtonEvent(GHOST_kEventButtonUp, window, GHOST_kButtonMaskLeft); @@ -684,6 +708,14 @@ LRESULT WINAPI GHOST_SystemWin32::s_wndProc(HWND hwnd, UINT msg, WPARAM wParam, window->registerMouseClickEvent(false); event = processButtonEvent(GHOST_kEventButtonUp, window, GHOST_kButtonMaskRight); break; + case WM_XBUTTONUP: + window->registerMouseClickEvent(false); + if ((short) HIWORD(wParam) == XBUTTON1){ + event = processButtonEvent(GHOST_kEventButtonUp, window, GHOST_kButtonMaskButton4); + }else if((short) HIWORD(wParam) == XBUTTON2){ + event = processButtonEvent(GHOST_kEventButtonUp, window, GHOST_kButtonMaskButton5); + } + break; case WM_MOUSEMOVE: event = processCursorEvent(GHOST_kEventCursorMove, window); break; diff --git a/intern/ghost/intern/GHOST_SystemX11.cpp b/intern/ghost/intern/GHOST_SystemX11.cpp index 5dba76adb02..8c87abf16bc 100644 --- a/intern/ghost/intern/GHOST_SystemX11.cpp +++ b/intern/ghost/intern/GHOST_SystemX11.cpp @@ -374,12 +374,12 @@ GHOST_SystemX11::processEvent(XEvent *xe) // Only generate a single expose event // per read of the event queue. - g_event = new + g_event = new GHOST_Event( getMilliSeconds(), GHOST_kEventWindowUpdate, window - ); + ); } break; } @@ -388,14 +388,42 @@ GHOST_SystemX11::processEvent(XEvent *xe) { XMotionEvent &xme = xe->xmotion; - g_event = new - GHOST_EventCursor( - getMilliSeconds(), - GHOST_kEventCursorMove, - window, - xme.x_root, - xme.y_root - ); + if(window->getCursorWarp()) { + /* Calculate offscreen location and re-center the mouse */ + GHOST_TInt32 x_warp, y_warp, x_new, y_new, x_accum, y_accum; + + window->getCursorWarpPos(x_warp, y_warp); + getCursorPosition(x_new, y_new); + + if(x_warp != x_new || y_warp != y_new) { + window->getCursorWarpAccum(x_accum, y_accum); + x_accum += x_new - x_warp; + y_accum += y_new - y_warp; + + window->setCursorWarpAccum(x_accum, y_accum); + setCursorPosition(x_warp, y_warp); /* reset */ + + g_event = new + GHOST_EventCursor( + getMilliSeconds(), + GHOST_kEventCursorMove, + window, + x_warp + x_accum, + y_warp + y_accum + ); + + } + } + else { + g_event = new + GHOST_EventCursor( + getMilliSeconds(), + GHOST_kEventCursorMove, + window, + xme.x_root, + xme.y_root + ); + } break; } @@ -444,10 +472,15 @@ GHOST_SystemX11::processEvent(XEvent *xe) XButtonEvent & xbe = xe->xbutton; GHOST_TButtonMask gbmask = GHOST_kButtonMaskLeft; - switch (xbe.button) { case Button1 : gbmask = GHOST_kButtonMaskLeft; break; case Button3 : gbmask = GHOST_kButtonMaskRight; break; + /* It seems events 6 and 7 are for horizontal scrolling. + * you can re-order button mapping like this... (swaps 6,7 with 8,9) + * xmodmap -e "pointer = 1 2 3 4 5 8 9 6 7" + */ + case 8 : gbmask = GHOST_kButtonMaskButton4; break; /* Button4 is the wheel */ + case 9 : gbmask = GHOST_kButtonMaskButton5; break; /* Button5 is a wheel too */ default: case Button2 : gbmask = GHOST_kButtonMaskMiddle; break; } @@ -684,12 +717,12 @@ GHOST_SystemX11::processEvent(XEvent *xe) { XProximityNotifyEvent* data = (XProximityNotifyEvent*)xe; if(data->deviceid == window->GetXTablet().StylusID) - window->GetXTablet().CommonData.Active= 1; + window->GetXTablet().CommonData.Active= GHOST_kTabletModeStylus; else if(data->deviceid == window->GetXTablet().EraserID) - window->GetXTablet().CommonData.Active= 2; + window->GetXTablet().CommonData.Active= GHOST_kTabletModeEraser; } else if(xe->type == window->GetXTablet().ProxOutEvent) - window->GetXTablet().CommonData.Active= 0; + window->GetXTablet().CommonData.Active= GHOST_kTabletModeNone; break; } diff --git a/intern/ghost/intern/GHOST_Window.cpp b/intern/ghost/intern/GHOST_Window.cpp index dee890830a1..94feb83e003 100644 --- a/intern/ghost/intern/GHOST_Window.cpp +++ b/intern/ghost/intern/GHOST_Window.cpp @@ -27,8 +27,6 @@ */ /** - - * $Id$ * Copyright (C) 2001 NaN Technologies B.V. * @author Maarten Gribnau * @date May 10, 2001 @@ -50,10 +48,16 @@ GHOST_Window::GHOST_Window( : m_drawingContextType(type), m_cursorVisible(true), - m_cursorGrabbed(true), + m_cursorGrabbed(false), + m_cursorWarp(false), m_cursorShape(GHOST_kStandardCursorDefault), m_stereoVisual(stereoVisual) { + m_isUnsavedChanges = false; + + m_cursorWarpAccumPos[0] = 0; + m_cursorWarpAccumPos[1] = 0; + m_fullScreen = state == GHOST_kWindowStateFullScreen; if (m_fullScreen) { m_fullScreenWidth = width; @@ -94,12 +98,12 @@ GHOST_TSuccess GHOST_Window::setCursorVisibility(bool visible) } } -GHOST_TSuccess GHOST_Window::setCursorGrab(bool grab) +GHOST_TSuccess GHOST_Window::setCursorGrab(bool grab, bool warp, bool restore) { if(m_cursorGrabbed == grab) return GHOST_kSuccess; - if (setWindowCursorGrab(grab)) { + if (setWindowCursorGrab(grab, warp, restore)) { m_cursorGrabbed = grab; return GHOST_kSuccess; } @@ -139,3 +143,15 @@ GHOST_TSuccess GHOST_Window::setCustomCursorShape(GHOST_TUns8 *bitmap, GHOST_TUn } } + +GHOST_TSuccess GHOST_Window::setModifiedState(bool isUnsavedChanges) +{ + m_isUnsavedChanges = isUnsavedChanges; + + return GHOST_kSuccess; +} + +bool GHOST_Window::getModifiedState() +{ + return m_isUnsavedChanges; +} diff --git a/intern/ghost/intern/GHOST_Window.h b/intern/ghost/intern/GHOST_Window.h index 88178bae5b3..786918716c5 100644 --- a/intern/ghost/intern/GHOST_Window.h +++ b/intern/ghost/intern/GHOST_Window.h @@ -158,6 +158,10 @@ public: * @return The visibility state of the cursor. */ inline virtual bool getCursorVisibility() const; + inline virtual bool getCursorWarp() const; + inline virtual bool getCursorWarpPos(GHOST_TInt32 &x, GHOST_TInt32 &y) const; + inline virtual bool getCursorWarpAccum(GHOST_TInt32 &x, GHOST_TInt32 &y) const; + inline virtual bool setCursorWarpAccum(GHOST_TInt32 x, GHOST_TInt32 y); /** * Shows or hides the cursor. @@ -171,8 +175,21 @@ public: * @param grab The new grab state of the cursor. * @return Indication of success. */ - virtual GHOST_TSuccess setCursorGrab(bool grab); + virtual GHOST_TSuccess setCursorGrab(bool grab, bool warp, bool restore); + /** + * Sets the window "modified" status, indicating unsaved changes + * @param isUnsavedChanges Unsaved changes or not + * @return Indication of success. + */ + virtual GHOST_TSuccess setModifiedState(bool isUnsavedChanges); + + /** + * Gets the window "modified" status, indicating unsaved changes + * @return True if there are unsaved changes + */ + virtual bool getModifiedState(); + /** * Returns the type of drawing context used in this window. * @return The current type of drawing context. @@ -230,7 +247,7 @@ protected: * Sets the cursor grab on the window using * native window system calls. */ - virtual GHOST_TSuccess setWindowCursorGrab(bool grab) { return GHOST_kSuccess; }; + virtual GHOST_TSuccess setWindowCursorGrab(bool grab, bool warp, bool restore) { return GHOST_kSuccess; }; /** * Sets the cursor shape on the window using @@ -259,9 +276,21 @@ protected: /** The current grabbed state of the cursor */ bool m_cursorGrabbed; + /** The current warped state of the cursor */ + bool m_cursorWarp; + + /** Initial grab location. */ + GHOST_TInt32 m_cursorWarpInitPos[2]; + + /** Accumulated offset from m_cursorWarpInitPos. */ + GHOST_TInt32 m_cursorWarpAccumPos[2]; + /** The current shape of the cursor */ GHOST_TStandardCursor m_cursorShape; + /** Modified state : are there unsaved changes */ + bool m_isUnsavedChanges; + /** Stores wether this is a full screen window. */ bool m_fullScreen; @@ -288,6 +317,42 @@ inline bool GHOST_Window::getCursorVisibility() const return m_cursorVisible; } +inline bool GHOST_Window::getCursorWarp() const +{ + return m_cursorWarp; +} + +inline bool GHOST_Window::getCursorWarpPos(GHOST_TInt32 &x, GHOST_TInt32 &y) const +{ + if(m_cursorWarp==false) + return GHOST_kFailure; + + x= m_cursorWarpInitPos[0]; + y= m_cursorWarpInitPos[1]; + return GHOST_kSuccess; +} + +inline bool GHOST_Window::getCursorWarpAccum(GHOST_TInt32 &x, GHOST_TInt32 &y) const +{ + if(m_cursorWarp==false) + return GHOST_kFailure; + + x= m_cursorWarpAccumPos[0]; + y= m_cursorWarpAccumPos[1]; + return GHOST_kSuccess; +} + +inline bool GHOST_Window::setCursorWarpAccum(GHOST_TInt32 x, GHOST_TInt32 y) +{ + if(m_cursorWarp==false) + return GHOST_kFailure; + + m_cursorWarpAccumPos[0]= x; + m_cursorWarpAccumPos[1]= y; + + return GHOST_kSuccess; +} + inline GHOST_TStandardCursor GHOST_Window::getCursorShape() const { return m_cursorShape; diff --git a/intern/ghost/intern/GHOST_WindowCarbon.cpp b/intern/ghost/intern/GHOST_WindowCarbon.cpp index 87bb86a37e7..362e949a0a4 100644 --- a/intern/ghost/intern/GHOST_WindowCarbon.cpp +++ b/intern/ghost/intern/GHOST_WindowCarbon.cpp @@ -183,7 +183,7 @@ GHOST_WindowCarbon::GHOST_WindowCarbon( updateDrawingContext(); activateDrawingContext(); - m_tablet.Active = 0; + m_tablet.Active = GHOST_kTabletModeNone; } } diff --git a/intern/ghost/intern/GHOST_WindowManager.cpp b/intern/ghost/intern/GHOST_WindowManager.cpp index af96653db13..15ee41e3dce 100644 --- a/intern/ghost/intern/GHOST_WindowManager.cpp +++ b/intern/ghost/intern/GHOST_WindowManager.cpp @@ -187,10 +187,21 @@ void GHOST_WindowManager::setWindowInactive(const GHOST_IWindow* window) } - std::vector & -GHOST_WindowManager:: -getWindows( -){ +std::vector &GHOST_WindowManager::getWindows() +{ return m_windows; } + +bool GHOST_WindowManager::getAnyModifiedState() +{ + bool isAnyModified = false; + std::vector::iterator iter; + + for (iter = m_windows.begin(); iter != m_windows.end(); iter++) { + if ((*iter)->getModifiedState()) + isAnyModified = true; + } + + return isAnyModified; +} \ No newline at end of file diff --git a/intern/ghost/intern/GHOST_WindowManager.h b/intern/ghost/intern/GHOST_WindowManager.h index 46e80d2c603..3690ad41e2c 100644 --- a/intern/ghost/intern/GHOST_WindowManager.h +++ b/intern/ghost/intern/GHOST_WindowManager.h @@ -133,11 +133,13 @@ public: * this vector. Please do not destroy or add windows use the * interface above for this, */ + std::vector & getWindows(); - std::vector & - getWindows( - ); - + /** + * Return true if any windows has a modified status + * @return True if any window has unsaved changes + */ + bool getAnyModifiedState(); protected: /** The list of windows managed */ diff --git a/intern/ghost/intern/GHOST_WindowWin32.cpp b/intern/ghost/intern/GHOST_WindowWin32.cpp index 366adb3ab86..e2caf31edee 100644 --- a/intern/ghost/intern/GHOST_WindowWin32.cpp +++ b/intern/ghost/intern/GHOST_WindowWin32.cpp @@ -244,7 +244,7 @@ GHOST_WindowWin32::GHOST_WindowWin32( m_tablet = fpWTOpen( m_hWnd, &lc, TRUE ); if (m_tablet) { m_tabletData = new GHOST_TabletData(); - m_tabletData->Active = 0; + m_tabletData->Active = GHOST_kTabletModeNone; } } } @@ -704,7 +704,7 @@ void GHOST_WindowWin32::processWin32TabletInitEvent() } } - m_tabletData->Active = 0; + m_tabletData->Active = GHOST_kTabletModeNone; } } } @@ -720,15 +720,15 @@ void GHOST_WindowWin32::processWin32TabletEvent(WPARAM wParam, LPARAM lParam) switch (pkt.pkCursor) { case 0: /* first device */ case 3: /* second device */ - m_tabletData->Active = 0; /* puck - not yet supported */ + m_tabletData->Active = GHOST_kTabletModeNone; /* puck - not yet supported */ break; case 1: case 4: - m_tabletData->Active = 1; /* stylus */ + m_tabletData->Active = GHOST_kTabletModeStylus; /* stylus */ break; case 2: case 5: - m_tabletData->Active = 2; /* eraser */ + m_tabletData->Active = GHOST_kTabletModeEraser; /* eraser */ break; } if (m_maxPressure > 0) { diff --git a/intern/ghost/intern/GHOST_WindowX11.cpp b/intern/ghost/intern/GHOST_WindowX11.cpp index 3aff9d64a17..d197b534352 100644 --- a/intern/ghost/intern/GHOST_WindowX11.cpp +++ b/intern/ghost/intern/GHOST_WindowX11.cpp @@ -414,6 +414,100 @@ static int ApplicationErrorHandler(Display *display, XErrorEvent *theEvent) { return 0 ; } +/* These C functions are copied from Wine 1.1.13's wintab.c */ +#define BOOL int +#define TRUE 1 +#define FALSE 0 + +static bool match_token(const char *haystack, const char *needle) +{ + const char *p, *q; + for (p = haystack; *p; ) + { + while (*p && isspace(*p)) + p++; + if (! *p) + break; + + for (q = needle; *q && *p && tolower(*p) == tolower(*q); q++) + p++; + if (! *q && (isspace(*p) || !*p)) + return TRUE; + + while (*p && ! isspace(*p)) + p++; + } + return FALSE; +} + +/* Determining if an X device is a Tablet style device is an imperfect science. +** We rely on common conventions around device names as well as the type reported +** by Wacom tablets. This code will likely need to be expanded for alternate tablet types +** +** Wintab refers to any device that interacts with the tablet as a cursor, +** (stylus, eraser, tablet mouse, airbrush, etc) +** this is not to be confused with wacom x11 configuration "cursor" device. +** Wacoms x11 config "cursor" refers to its device slot (which we mirror with +** our gSysCursors) for puck like devices (tablet mice essentially). +*/ +#if 0 // unused +static BOOL is_tablet_cursor(const char *name, const char *type) +{ + int i; + static const char *tablet_cursor_whitelist[] = { + "wacom", + "wizardpen", + "acecad", + "tablet", + "cursor", + "stylus", + "eraser", + "pad", + NULL + }; + + for (i=0; tablet_cursor_whitelist[i] != NULL; i++) { + if (name && match_token(name, tablet_cursor_whitelist[i])) + return TRUE; + if (type && match_token(type, tablet_cursor_whitelist[i])) + return TRUE; + } + return FALSE; +} +#endif +static BOOL is_stylus(const char *name, const char *type) +{ + int i; + static const char* tablet_stylus_whitelist[] = { + "stylus", + "wizardpen", + "acecad", + NULL + }; + + for (i=0; tablet_stylus_whitelist[i] != NULL; i++) { + if (name && match_token(name, tablet_stylus_whitelist[i])) + return TRUE; + if (type && match_token(type, tablet_stylus_whitelist[i])) + return TRUE; + } + + return FALSE; +} + +static BOOL is_eraser(const char *name, const char *type) +{ + if (name && match_token(name, "eraser")) + return TRUE; + if (type && match_token(type, "eraser")) + return TRUE; + return FALSE; +} +#undef BOOL +#undef TRUE +#undef FALSE +/* end code copied from wine */ + void GHOST_WindowX11::initXInputDevices() { static XErrorHandler old_handler = (XErrorHandler) 0 ; @@ -423,28 +517,21 @@ void GHOST_WindowX11::initXInputDevices() if(version->present) { int device_count; XDeviceInfo* device_info = XListInputDevices(m_display, &device_count); - m_xtablet.StylusDevice = 0; - m_xtablet.EraserDevice = 0; - m_xtablet.CommonData.Active= 0; + m_xtablet.StylusDevice = NULL; + m_xtablet.EraserDevice = NULL; + m_xtablet.CommonData.Active= GHOST_kTabletModeNone; /* Install our error handler to override Xlib's termination behavior */ old_handler = XSetErrorHandler(ApplicationErrorHandler) ; for(int i=0; inum_classes; ++j) { if(ici->c_class==ValuatorClass) { +// printf("\t\tfound ValuatorClass\n"); XValuatorInfo* xvi = (XValuatorInfo*)ici; m_xtablet.PressureLevels = xvi->axes[2].max_value; @@ -469,11 +557,16 @@ void GHOST_WindowX11::initXInputDevices() m_xtablet.StylusID= 0; } } - if(type.find("eraser") != std::string::npos) { + else if(m_xtablet.EraserDevice==NULL && is_eraser(device_info[i].name, device_type)) { +// printf("\tfound eraser\n"); m_xtablet.EraserID= device_info[i].id; m_xtablet.EraserDevice = XOpenDevice(m_display, m_xtablet.EraserID); if (m_xtablet.EraserDevice == NULL) m_xtablet.EraserID= 0; } + + if(device_type) { + XFree((void*)device_type); + } } /* Restore handler */ @@ -1125,6 +1218,13 @@ GHOST_WindowX11:: XFreeCursor(m_display, m_custom_cursor); } + /* close tablet devices */ + if(m_xtablet.StylusDevice) + XCloseDevice(m_display, m_xtablet.StylusDevice); + + if(m_xtablet.EraserDevice) + XCloseDevice(m_display, m_xtablet.EraserDevice); + if (m_context) { if (m_context == s_firstContext) { s_firstContext = NULL; @@ -1300,12 +1400,49 @@ setWindowCursorVisibility( GHOST_TSuccess GHOST_WindowX11:: setWindowCursorGrab( - bool grab + bool grab, bool warp, bool restore ){ - if(grab) + if(grab) { + if(warp) { + m_system->getCursorPosition(m_cursorWarpInitPos[0], m_cursorWarpInitPos[1]); + + setCursorWarpAccum(0, 0); + setWindowCursorVisibility(false); + m_cursorWarp= true; + } XGrabPointer(m_display, m_window, True, ButtonPressMask| ButtonReleaseMask|PointerMotionMask, GrabModeAsync, GrabModeAsync, None, None, CurrentTime); - else + } + else { + if(m_cursorWarp) { /* are we exiting warp */ + setWindowCursorVisibility(true); + /* Almost works without but important otherwise the mouse GHOST location can be incorrect on exit */ + if(restore) { + GHOST_Rect bounds; + GHOST_TInt32 x_new, y_new, x_rel, y_rel; + + getClientBounds(bounds); + + x_new= m_cursorWarpInitPos[0]+m_cursorWarpAccumPos[0]; + y_new= m_cursorWarpInitPos[1]+m_cursorWarpAccumPos[1]; + + screenToClient(x_new, y_new, x_rel, y_rel); + + if(x_rel < 0) x_new = (x_new-x_rel) + 2; + if(y_rel < 0) y_new = (y_new-y_rel) + 2; + if(x_rel > bounds.getWidth()) x_new -= (x_rel-bounds.getWidth()) + 2; + if(y_rel > bounds.getHeight()) y_new -= (y_rel-bounds.getHeight()) + 2; + m_system->setCursorPosition(x_new, y_new); + + } + else { + m_system->setCursorPosition(m_cursorWarpInitPos[0], m_cursorWarpInitPos[1]); + } + + setCursorWarpAccum(0, 0); + m_cursorWarp= false; + } XUngrabPointer(m_display, CurrentTime); + } XFlush(m_display); diff --git a/intern/ghost/intern/GHOST_WindowX11.h b/intern/ghost/intern/GHOST_WindowX11.h index 6f8940bdcbb..eb0689ab410 100644 --- a/intern/ghost/intern/GHOST_WindowX11.h +++ b/intern/ghost/intern/GHOST_WindowX11.h @@ -252,10 +252,11 @@ protected: /** * Sets the cursor grab on the window using * native window system calls. + * @param warp Only used when grab is enabled, hides the mouse and allows gragging outside the screen. */ GHOST_TSuccess setWindowCursorGrab( - bool grab + bool grab, bool warp, bool restore ); /** diff --git a/intern/ghost/make/msvc_9_0/ghost.vcproj b/intern/ghost/make/msvc_9_0/ghost.vcproj index fa128786a90..6b3a49cfc9c 100644 --- a/intern/ghost/make/msvc_9_0/ghost.vcproj +++ b/intern/ghost/make/msvc_9_0/ghost.vcproj @@ -42,6 +42,7 @@ /> +#include +#include + +namespace iTaSC { + +// a joint constraint is characterized by 5 values: tolerance, K, alpha, yd, yddot +static const unsigned int constraintCacheSize = 5; +std::string Armature::m_root = "root"; + +Armature::Armature(): + ControlledObject(), + m_tree(), + m_njoint(0), + m_nconstraint(0), + m_noutput(0), + m_neffector(0), + m_finalized(false), + m_cache(NULL), + m_buf(NULL), + m_qCCh(-1), + m_qCTs(0), + m_yCCh(-1), + m_yCTs(0), + m_qKdl(), + m_oldqKdl(), + m_newqKdl(), + m_qdotKdl(), + m_jac(NULL), + m_armlength(0.0), + m_jacsolver(NULL), + m_fksolver(NULL) +{ +} + +Armature::~Armature() +{ + if (m_jac) + delete m_jac; + if (m_jacsolver) + delete m_jacsolver; + if (m_fksolver) + delete m_fksolver; + for (JointConstraintList::iterator it=m_constraints.begin(); it != m_constraints.end(); it++) { + if (*it != NULL) + delete (*it); + } + if (m_buf) + delete [] m_buf; + m_constraints.clear(); +} + +Armature::JointConstraint_struct::JointConstraint_struct(SegmentMap::const_iterator _segment, unsigned int _y_nr, ConstraintCallback _function, void* _param, bool _freeParam, bool _substep): + segment(_segment), value(), values(), function(_function), y_nr(_y_nr), param(_param), freeParam(_freeParam), substep(_substep) +{ + memset(values, 0, sizeof(values)); + memset(value, 0, sizeof(value)); + values[0].feedback = 20.0; + values[1].feedback = 20.0; + values[2].feedback = 20.0; + values[0].tolerance = 1.0; + values[1].tolerance = 1.0; + values[2].tolerance = 1.0; + values[0].values = &value[0]; + values[1].values = &value[1]; + values[2].values = &value[2]; + values[0].number = 1; + values[1].number = 1; + values[2].number = 1; + switch (segment->second.segment.getJoint().getType()) { + case Joint::RotX: + value[0].id = ID_JOINT_RX; + values[0].id = ID_JOINT_RX; + v_nr = 1; + break; + case Joint::RotY: + value[0].id = ID_JOINT_RY; + values[0].id = ID_JOINT_RY; + v_nr = 1; + break; + case Joint::RotZ: + value[0].id = ID_JOINT_RZ; + values[0].id = ID_JOINT_RZ; + v_nr = 1; + break; + case Joint::TransX: + value[0].id = ID_JOINT_TX; + values[0].id = ID_JOINT_TX; + v_nr = 1; + break; + case Joint::TransY: + value[0].id = ID_JOINT_TY; + values[0].id = ID_JOINT_TY; + v_nr = 1; + break; + case Joint::TransZ: + value[0].id = ID_JOINT_TZ; + values[0].id = ID_JOINT_TZ; + v_nr = 1; + break; + case Joint::Sphere: + values[0].id = value[0].id = ID_JOINT_RX; + values[1].id = value[1].id = ID_JOINT_RY; + values[2].id = value[2].id = ID_JOINT_RZ; + v_nr = 3; + break; + case Joint::Swing: + values[0].id = value[0].id = ID_JOINT_RX; + values[1].id = value[1].id = ID_JOINT_RZ; + v_nr = 2; + break; + case Joint::None: + break; + } +} + +Armature::JointConstraint_struct::~JointConstraint_struct() +{ + if (freeParam && param) + free(param); +} + +void Armature::initCache(Cache *_cache) +{ + m_cache = _cache; + m_qCCh = -1; + m_yCCh = -1; + m_buf = NULL; + if (m_cache) { + // add a special channel for the joint + m_qCCh = m_cache->addChannel(this, "q", m_qKdl.rows()*sizeof(double)); +#if 0 + // for the constraints, instead of creating many different channels, we will + // create a single channel for all the constraints + if (m_nconstraint) { + m_yCCh = m_cache->addChannel(this, "y", m_nconstraint*constraintCacheSize*sizeof(double)); + m_buf = new double[m_nconstraint*constraintCacheSize]; + } + // store the initial cache position at timestamp 0 + pushConstraints(0); +#endif + pushQ(0); + } +} + +void Armature::pushQ(CacheTS timestamp) +{ + if (m_qCCh >= 0) { + // try to keep the cache if the joints are the same + m_cache->addCacheVectorIfDifferent(this, m_qCCh, timestamp, &m_qKdl(0), m_qKdl.rows(), KDL::epsilon); + m_qCTs = timestamp; + } +} + +/* return true if a m_cache position was loaded */ +bool Armature::popQ(CacheTS timestamp) +{ + if (m_qCCh >= 0) { + double* item; + item = (double*)m_cache->getPreviousCacheItem(this, m_qCCh, ×tamp); + if (item && m_qCTs != timestamp) { + double& q = m_qKdl(0); + memcpy(&q, item, m_qKdl.rows()*sizeof(q)); + m_qCTs = timestamp; + // changing the joint => recompute the jacobian + updateJacobian(); + } + return (item) ? true : false; + } + return true; +} +#if 0 +void Armature::pushConstraints(CacheTS timestamp) +{ + if (m_yCCh >= 0) { + double *buf = NULL; + if (m_nconstraint) { + double *item = m_buf; + for (unsigned int i=0; ivalues.feedback; + *item++ = pConstraint->values.tolerance; + *item++ = pConstraint->value.yd; + *item++ = pConstraint->value.yddot; + *item++ = pConstraint->values.alpha; + } + } + m_cache->addCacheVectorIfDifferent(this, m_yCCh, timestamp, m_buf, m_nconstraint*constraintCacheSize, KDL::epsilon); + m_yCTs = timestamp; + } +} + +/* return true if a cache position was loaded */ +bool Armature::popConstraints(CacheTS timestamp) +{ + if (m_yCCh >= 0) { + double *item = (double*)m_cache->getPreviousCacheItem(this, m_yCCh, ×tamp); + if (item && m_yCTs != timestamp) { + for (unsigned int i=0; ifunction != Joint1DOFLimitCallback) { + pConstraint->values.feedback = *item++; + pConstraint->values.tolerance = *item++; + pConstraint->value.yd = *item++; + pConstraint->value.yddot = *item++; + pConstraint->values.alpha = *item++; + } else { + item += constraintCacheSize; + } + } + m_yCTs = timestamp; + } + return (item) ? true : false; + } + return true; +} +#endif + +bool Armature::addSegment(const std::string& segment_name, const std::string& hook_name, const Joint& joint, const double& q_rest, const Frame& f_tip, const Inertia& M) +{ + if (m_finalized) + return false; + + Segment segment(joint, f_tip, M); + if (!m_tree.addSegment(segment, segment_name, hook_name)) + return false; + int ndof = joint.getNDof(); + for (int dof=0; dofsecond.segment.getJoint(); + if (q_size < p_joint->getNDof()) + return false; + p_tip = &sit->second.segment.getFrameToTip(); + for (unsigned int dof=0; dofgetNDof(); dof++) { + (&q_rest)[dof] = m_joints[sit->second.q_nr+dof].rest; + (&q)[dof] = m_qKdl(sit->second.q_nr+dof); + } + return true; +} + +double Armature::getMaxJointChange() +{ + if (!m_finalized) + return 0.0; + double maxJoint = 0.0; + for (unsigned int i=0; i maxDelta) + maxDelta = delta; + delta = twist.vel.Norm(); + if (delta > maxDelta) + maxDelta = delta; + } + return maxDelta; +} + +int Armature::addConstraint(const std::string& segment_name, ConstraintCallback _function, void* _param, bool _freeParam, bool _substep) +{ + SegmentMap::const_iterator segment_it = m_tree.getSegment(segment_name); + // not suitable for NDof joints + if (segment_it == m_tree.getSegments().end()) { + if (_freeParam && _param) + free(_param); + return -1; + } + JointConstraintList::iterator constraint_it; + JointConstraint_struct* pConstraint; + int iConstraint; + for (iConstraint=0, constraint_it=m_constraints.begin(); constraint_it != m_constraints.end(); constraint_it++, iConstraint++) { + pConstraint = *constraint_it; + if (pConstraint->segment == segment_it) { + // redefining a constraint + if (pConstraint->freeParam && pConstraint->param) { + free(pConstraint->param); + } + pConstraint->function = _function; + pConstraint->param = _param; + pConstraint->freeParam = _freeParam; + pConstraint->substep = _substep; + return iConstraint; + } + } + if (m_finalized) { + if (_freeParam && _param) + free(_param); + return -1; + } + // new constraint, append + pConstraint = new JointConstraint_struct(segment_it, m_noutput, _function, _param, _freeParam, _substep); + m_constraints.push_back(pConstraint); + m_noutput += pConstraint->v_nr; + return m_nconstraint++; +} + +int Armature::addLimitConstraint(const std::string& segment_name, unsigned int dof, double _min, double _max) +{ + SegmentMap::const_iterator segment_it = m_tree.getSegment(segment_name); + if (segment_it == m_tree.getSegments().end()) + return -1; + const Joint& joint = segment_it->second.segment.getJoint(); + if (joint.getNDof() != 1 && joint.getType() != Joint::Swing) { + // not suitable for Sphere joints + return -1; + } + if ((joint.getNDof() == 1 && dof > 0) || (joint.getNDof() == 2 && dof > 1)) + return -1; + if (joint.getType() < Joint::TransX || joint.getType() == Joint::Swing) { + // for rotation joint, the limit is given in degree, convert to radian + _min *= KDL::deg2rad; + _max *= KDL::deg2rad; + } + Joint_struct& p_joint = m_joints[segment_it->second.q_nr+dof]; + p_joint.min = _min; + p_joint.max = _max; + p_joint.useLimit = true; + return 0; +} + +int Armature::addEndEffector(const std::string& name) +{ + const SegmentMap& segments = m_tree.getSegments(); + if (segments.find(name) == segments.end()) + return -1; + + EffectorList::const_iterator it; + int ee; + for (it=m_effectors.begin(), ee=0; it!=m_effectors.end(); it++, ee++) { + if (it->name == name) + return ee; + } + if (m_finalized) + return -1; + Effector_struct effector(name); + m_effectors.push_back(effector); + return m_neffector++; +} + +void Armature::finalize() +{ + unsigned int i, j, c; + if (m_finalized) + return; + initialize(m_njoint, m_noutput, m_neffector); + for (i=c=0; iv_nr; j++, c++) { + m_Cq(c,pConstraint->segment->second.q_nr+j) = 1.0; + m_Wy(c) = pConstraint->values[j].alpha/*/(pConstraint->values.tolerance*pConstraint->values.feedback)*/; + } + } + m_jacsolver= new KDL::TreeJntToJacSolver(m_tree); + m_fksolver = new KDL::TreeFkSolverPos_recursive(m_tree); + m_jac = new Jacobian(m_njoint); + m_qKdl.resize(m_njoint); + m_oldqKdl.resize(m_njoint); + m_newqKdl.resize(m_njoint); + m_qdotKdl.resize(m_njoint); + for (i=0; ifirst != "root") { + Frame tip = sit->second.segment.pose(m_qKdl(sit->second.q_nr)); + length += tip.p.Norm(); + sit = sit->second.parent; + } + if (length > m_armlength) + m_armlength = length; + } + if (m_armlength < KDL::epsilon) + m_armlength = KDL::epsilon; + m_finalized = true; +} + +void Armature::pushCache(const Timestamp& timestamp) +{ + if (!timestamp.substep && timestamp.cache) { + pushQ(timestamp.cacheTimestamp); + //pushConstraints(timestamp.cacheTimestamp); + } +} + +bool Armature::setJointArray(const KDL::JntArray& joints) +{ + if (!m_finalized) + return false; + if (joints.rows() != m_qKdl.rows()) + return false; + m_qKdl = joints; + updateJacobian(); + return true; +} + +const KDL::JntArray& Armature::getJointArray() +{ + return m_qKdl; +} + +bool Armature::updateJoint(const Timestamp& timestamp, JointLockCallback& callback) +{ + if (!m_finalized) + return false; + + // integration and joint limit + // for spherical joint we must use a more sophisticated method + unsigned int q_nr; + double* qdot=&m_qdotKdl(0); + double* q=&m_qKdl(0); + double* newq=&m_newqKdl(0); + double norm, qx, qz, CX, CZ, sx, sz; + bool locked = false; + int unlocked = 0; + + for (q_nr=0; q_nrlocked) { + switch (joint->type) { + case KDL::Joint::Swing: + { + KDL::Rotation base = KDL::Rot(KDL::Vector(q[0],0.0,q[1])); + (base*KDL::Rot(KDL::Vector(qdot[0],0.0,qdot[1])*timestamp.realTimestep)).GetXZRot().GetValue(newq); + if (joint[0].useLimit) { + if (joint[1].useLimit) { + // elliptical limit + sx = sz = 1.0; + qx = newq[0]; + qz = newq[1]; + // determine in which quadrant we are + if (qx > 0.0 && qz > 0.0) { + CX = joint[0].max; + CZ = joint[1].max; + } else if (qx <= 0.0 && qz > 0.0) { + CX = -joint[0].min; + CZ = joint[1].max; + qx = -qx; + sx = -1.0; + } else if (qx <= 0.0 && qz <= 0.0) { + CX = -joint[0].min; + CZ = -joint[1].min; + qx = -qx; + qz = -qz; + sx = sz = -1.0; + } else { + CX = joint[0].max; + CZ = -joint[0].min; + qz = -qz; + sz = -1.0; + } + if (CX < KDL::epsilon || CZ < KDL::epsilon) { + // quadrant is degenerated + if (qx > CX) { + newq[0] = CX*sx; + joint[0].locked = true; + } + if (qz > CZ) { + newq[1] = CZ*sz; + joint[0].locked = true; + } + } else { + // general case + qx /= CX; + qz /= CZ; + norm = KDL::sqrt(KDL::sqr(qx)+KDL::sqr(qz)); + if (norm > 1.0) { + norm = 1.0/norm; + newq[0] = qx*norm*CX*sx; + newq[1] = qz*norm*CZ*sz; + joint[0].locked = true; + } + } + } else { + // limit on X only + qx = newq[0]; + if (qx > joint[0].max) { + newq[0] = joint[0].max; + joint[0].locked = true; + } else if (qx < joint[0].min) { + newq[0] = joint[0].min; + joint[0].locked = true; + } + } + } else if (joint[1].useLimit) { + // limit on Z only + qz = newq[1]; + if (qz > joint[1].max) { + newq[1] = joint[1].max; + joint[0].locked = true; + } else if (qz < joint[1].min) { + newq[1] = joint[1].min; + joint[0].locked = true; + } + } + if (joint[0].locked) { + // check the difference from previous position + locked = true; + norm = KDL::sqr(newq[0]-q[0])+KDL::sqr(newq[1]-q[1]); + if (norm < KDL::epsilon2) { + // joint didn't move, no need to update the jacobian + callback.lockJoint(q_nr, 2); + } else { + // joint moved, compute the corresponding velocity + double deltaq[2]; + (base.Inverse()*KDL::Rot(KDL::Vector(newq[0],0.0,newq[1]))).GetXZRot().GetValue(deltaq); + deltaq[0] /= timestamp.realTimestep; + deltaq[1] /= timestamp.realTimestep; + callback.lockJoint(q_nr, 2, deltaq); + // no need to update the other joints, it will be done after next rerun + goto end_loop; + } + } else + unlocked++; + break; + } + case KDL::Joint::Sphere: + { + (KDL::Rot(KDL::Vector(q))*KDL::Rot(KDL::Vector(qdot)*timestamp.realTimestep)).GetRot().GetValue(newq); + // no limit on this joint + unlocked++; + break; + } + default: + for (unsigned int i=0; indof; i++) { + newq[i] = q[i]+qdot[i]*timestamp.realTimestep; + if (joint[i].useLimit) { + if (newq[i] > joint[i].max) { + newq[i] = joint[i].max; + joint[0].locked = true; + } else if (newq[i] < joint[i].min) { + newq[i] = joint[i].min; + joint[0].locked = true; + } + } + } + if (joint[0].locked) { + locked = true; + norm = 0.0; + // compute delta to locked position + for (unsigned int i=0; indof; i++) { + qdot[i] = newq[i] - q[i]; + norm += qdot[i]*qdot[i]; + } + if (norm < KDL::epsilon2) { + // joint didn't move, no need to update the jacobian + callback.lockJoint(q_nr, joint->ndof); + } else { + // solver needs velocity, compute equivalent velocity + for (unsigned int i=0; indof; i++) { + qdot[i] /= timestamp.realTimestep; + } + callback.lockJoint(q_nr, joint->ndof, qdot); + goto end_loop; + } + } else + unlocked++; + } + } + qdot += joint->ndof; + q += joint->ndof; + newq += joint->ndof; + q_nr += joint->ndof; + } +end_loop: + // check if there any other unlocked joint + for ( ; q_nrlocked) + unlocked++; + q_nr += joint->ndof; + } + // if all joints have been locked no need to run the solver again + return (unlocked) ? locked : false; +} + +void Armature::updateKinematics(const Timestamp& timestamp){ + + //Integrate m_qdot + if (!m_finalized) + return; + + // the new joint value have been computed already, just copy + memcpy(&m_qKdl(0), &m_newqKdl(0), sizeof(double)*m_qKdl.rows()); + pushCache(timestamp); + updateJacobian(); + // here update the desired output. + // We assume constant desired output for the joint limit constraint, no need to update it. +} + +void Armature::updateJacobian() +{ + //calculate pose and jacobian + for (unsigned int ee=0; eeJntToCart(m_qKdl,m_effectors[ee].pose,m_effectors[ee].name,m_root); + m_jacsolver->JntToJac(m_qKdl,*m_jac,m_effectors[ee].name); + // get the jacobian for the base point, to prepare transformation to world reference + changeRefPoint(*m_jac,-m_effectors[ee].pose.p,*m_jac); + //copy to Jq: + e_matrix& Jq = m_JqArray[ee]; + for(unsigned int i=0;i<6;i++) { + for(unsigned int j=0;j= m_nee) ? F_identity : m_effectors[ee].pose; +} + +bool Armature::getRelativeFrame(Frame& result, const std::string& segment_name, const std::string& base_name) +{ + if (!m_finalized) + return false; + return (m_fksolver->JntToCart(m_qKdl,result,segment_name,base_name) < 0) ? false : true; +} + +void Armature::updateControlOutput(const Timestamp& timestamp) +{ + if (!m_finalized) + return; + + + if (!timestamp.substep && !timestamp.reiterate && timestamp.interpolate) { + popQ(timestamp.cacheTimestamp); + //popConstraints(timestamp.cacheTimestamp); + } + + if (!timestamp.substep) { + // save previous joint state for getMaxJointChange() + memcpy(&m_oldqKdl(0), &m_qKdl(0), sizeof(double)*m_qKdl.rows()); + for (unsigned int i=0; isegment->second.q_nr; iv_nr; i++, nr++) { + *(double*)&pConstraint->value[i].y = m_qKdl(nr); + *(double*)&pConstraint->value[i].ydot = m_qdotKdl(nr); + } + if (pConstraint->function && (pConstraint->substep || (!timestamp.reiterate && !timestamp.substep))) { + (*pConstraint->function)(timestamp, pConstraint->values, pConstraint->v_nr, pConstraint->param); + } + // recompute the weight in any case, that's the most likely modification + for (i=0, nr=pConstraint->y_nr; iv_nr; i++, nr++) { + m_Wy(nr) = pConstraint->values[i].alpha/*/(pConstraint->values.tolerance*pConstraint->values.feedback)*/; + m_ydot(nr)=pConstraint->value[i].yddot+pConstraint->values[i].feedback*(pConstraint->value[i].yd-pConstraint->value[i].y); + } + } +} + +bool Armature::setControlParameter(unsigned int constraintId, unsigned int valueId, ConstraintAction action, double value, double timestep) +{ + unsigned int lastid, i; + if (constraintId == CONSTRAINT_ID_ALL) { + constraintId = 0; + lastid = m_nconstraint; + } else if (constraintId < m_nconstraint) { + lastid = constraintId+1; + } else { + return false; + } + for ( ; constraintIdv_nr; i++) { + switch (action) { + case ACT_TOLERANCE: + pConstraint->values[i].tolerance = value; + break; + case ACT_FEEDBACK: + pConstraint->values[i].feedback = value; + break; + case ACT_ALPHA: + pConstraint->values[i].alpha = value; + break; + default: + break; + } + } + } else { + for (i=0; iv_nr; i++) { + if (valueId == pConstraint->value[i].id) { + switch (action) { + case ACT_VALUE: + pConstraint->value[i].yd = value; + break; + case ACT_VELOCITY: + pConstraint->value[i].yddot = value; + break; + case ACT_TOLERANCE: + pConstraint->values[i].tolerance = value; + break; + case ACT_FEEDBACK: + pConstraint->values[i].feedback = value; + break; + case ACT_ALPHA: + pConstraint->values[i].alpha = value; + break; + case ACT_NONE: + break; + } + } + } + } + if (m_finalized) { + for (i=0; iv_nr; i++) + m_Wy(pConstraint->y_nr+i) = pConstraint->values[i].alpha/*/(pConstraint->values.tolerance*pConstraint->values.feedback)*/; + } + } + return true; +} + +} + diff --git a/intern/itasc/Armature.hpp b/intern/itasc/Armature.hpp new file mode 100644 index 00000000000..312ca1b28c3 --- /dev/null +++ b/intern/itasc/Armature.hpp @@ -0,0 +1,137 @@ +/* $Id: Armature.hpp 20853 2009-06-13 12:29:46Z ben2610 $ + * Armature.hpp + * + * Created on: Feb 3, 2009 + * Author: benoitbolsee + */ + +#ifndef ARMATURE_HPP_ +#define ARMATURE_HPP_ + +#include "ControlledObject.hpp" +#include "ConstraintSet.hpp" +#include "kdl/treejnttojacsolver.hpp" +#include "kdl/treefksolverpos_recursive.hpp" +#include + +namespace iTaSC { + +class Armature: public iTaSC::ControlledObject { +public: + Armature(); + virtual ~Armature(); + + bool addSegment(const std::string& segment_name, const std::string& hook_name, const Joint& joint, const double& q_rest, const Frame& f_tip=F_identity, const Inertia& M = Inertia::Zero()); + // general purpose constraint on joint + int addConstraint(const std::string& segment_name, ConstraintCallback _function, void* _param=NULL, bool _freeParam=false, bool _substep=false); + // specific limit constraint on joint + int addLimitConstraint(const std::string& segment_name, unsigned int dof, double _min, double _max); + double getMaxJointChange(); + double getMaxEndEffectorChange(); + bool getSegment(const std::string& segment_name, const unsigned int q_size, const Joint* &p_joint, double &q_rest, double &q, const Frame* &p_tip); + bool getRelativeFrame(Frame& result, const std::string& segment_name, const std::string& base_name=m_root); + + virtual void finalize(); + + virtual int addEndEffector(const std::string& name); + virtual const Frame& getPose(const unsigned int end_effector); + virtual bool updateJoint(const Timestamp& timestamp, JointLockCallback& callback); + virtual void updateKinematics(const Timestamp& timestamp); + virtual void pushCache(const Timestamp& timestamp); + virtual void updateControlOutput(const Timestamp& timestamp); + virtual bool setControlParameter(unsigned int constraintId, unsigned int valueId, ConstraintAction action, double value, double timestep=0.0); + virtual void initCache(Cache *_cache); + virtual bool setJointArray(const KDL::JntArray& joints); + virtual const KDL::JntArray& getJointArray(); + + virtual double getArmLength() + { + return m_armlength; + } + + struct Effector_struct { + std::string name; + Frame oldpose; + Frame pose; + Effector_struct(const std::string& _name) {name = _name; oldpose = pose = F_identity;} + }; + typedef std::vector EffectorList; + + enum ID { + ID_JOINT=1, + ID_JOINT_RX=2, + ID_JOINT_RY=3, + ID_JOINT_RZ=4, + ID_JOINT_TX=2, + ID_JOINT_TY=3, + ID_JOINT_TZ=4, + }; + struct JointConstraint_struct { + SegmentMap::const_iterator segment; + ConstraintSingleValue value[3]; + ConstraintValues values[3]; + ConstraintCallback function; + unsigned int v_nr; + unsigned int y_nr; // first coordinate of constraint in Y vector + void* param; + bool freeParam; + bool substep; + JointConstraint_struct(SegmentMap::const_iterator _segment, unsigned int _y_nr, ConstraintCallback _function, void* _param, bool _freeParam, bool _substep); + ~JointConstraint_struct(); + }; + typedef std::vector JointConstraintList; + + struct Joint_struct { + KDL::Joint::JointType type; + unsigned short ndof; + bool useLimit; + bool locked; + double rest; + double min; + double max; + + Joint_struct(KDL::Joint::JointType _type, unsigned int _ndof, double _rest) : + type(_type), ndof(_ndof), rest(_rest) { useLimit=locked=false; min=0.0; max=0.0; } + }; + typedef std::vector JointList; + +protected: + virtual void updateJacobian(); + +private: + static std::string m_root; + Tree m_tree; + unsigned int m_njoint; + unsigned int m_nconstraint; + unsigned int m_noutput; + unsigned int m_neffector; + bool m_finalized; + Cache* m_cache; + double *m_buf; + int m_qCCh; + CacheTS m_qCTs; + int m_yCCh; + CacheTS m_yCTs; + JntArray m_qKdl; + JntArray m_oldqKdl; + JntArray m_newqKdl; + JntArray m_qdotKdl; + Jacobian* m_jac; + double m_armlength; + + KDL::TreeJntToJacSolver* m_jacsolver; + KDL::TreeFkSolverPos_recursive* m_fksolver; + EffectorList m_effectors; + JointConstraintList m_constraints; + JointList m_joints; + + void pushQ(CacheTS timestamp); + bool popQ(CacheTS timestamp); + //void pushConstraints(CacheTS timestamp); + //bool popConstraints(CacheTS timestamp); + +}; + +} + +#endif /* ARMATURE_HPP_ */ diff --git a/intern/itasc/CMakeLists.txt b/intern/itasc/CMakeLists.txt new file mode 100644 index 00000000000..405d74d17ac --- /dev/null +++ b/intern/itasc/CMakeLists.txt @@ -0,0 +1,32 @@ +# $Id: CMakeLists.txt 19905 2009-04-23 13:29:54Z ben2610 $ +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# The Original Code is Copyright (C) 2006, Blender Foundation +# All rights reserved. +# +# The Original Code is: all of this file. +# +# Contributor(s): Jacques Beaurain. +# +# ***** END GPL LICENSE BLOCK ***** + +SET(INC ../../extern/Eigen2) + +FILE(GLOB SRC *.cpp kdl/*.cpp kdl/utilities/*.cpp) + +BLENDERLIB(bf_ITASC "${SRC}" "${INC}") +#, libtype=['blender'], priority = [10] ) diff --git a/intern/itasc/Cache.cpp b/intern/itasc/Cache.cpp new file mode 100644 index 00000000000..ccd9cef4655 --- /dev/null +++ b/intern/itasc/Cache.cpp @@ -0,0 +1,620 @@ +/* $Id$ + * Cache.cpp + * + * Created on: Feb 24, 2009 + * Author: benoit bolsee + */ +#include +#include +#include +#include +#include "Cache.hpp" + +namespace iTaSC { + +CacheEntry::~CacheEntry() +{ + for (unsigned int id=0; id < m_count; id++) + m_channelArray[id].clear(); + if (m_channelArray) + free(m_channelArray); +} + +CacheItem *CacheChannel::_findBlock(CacheBuffer *buffer, unsigned short timeOffset, unsigned int *retBlock) +{ + // the timestamp is necessarily in this buffer + unsigned int lowBlock, highBlock, midBlock; + if (timeOffset <= buffer->lookup[0].m_timeOffset) { + // special case: the item is in the first block, search from start + *retBlock = 0; + return &buffer->m_firstItem; + } + // general case, the item is in the middle of the buffer + // before doing a dycotomic search, we will assume that timestamp + // are regularly spaced so that we can try to locate the block directly + highBlock = buffer->m_lastItemPositionW>>m_positionToBlockShiftW; + lowBlock = midBlock = (timeOffset*highBlock)/(buffer->m_lastTimestamp-buffer->m_firstTimestamp); + // give some space for security + if (lowBlock > 0) + lowBlock--; + if (timeOffset <= buffer->lookup[lowBlock].m_timeOffset) { + // bad guess, but we know this block is a good high block, just use it + highBlock = lowBlock; + lowBlock = 0; + } else { + // ok, good guess, now check the high block, give some space + if (midBlock < highBlock) + midBlock++; + if (timeOffset <= buffer->lookup[midBlock].m_timeOffset) { + // good guess, keep that block as the high block + highBlock = midBlock; + } + } + // the item is in a different block, do a dycotomic search + // the timestamp is alway > lowBlock and <= highBlock + while (1) { + midBlock = (lowBlock+highBlock)/2; + if (midBlock == lowBlock) { + // low block and high block are contigous, we can start search from the low block + break; + } else if (timeOffset <= buffer->lookup[midBlock].m_timeOffset) { + highBlock = midBlock; + } else { + lowBlock = midBlock; + } + } + assert (lowBlock != highBlock); + *retBlock = highBlock; + return CACHE_BLOCK_ITEM_ADDR(this,buffer,lowBlock); +} + +void CacheChannel::clear() +{ + CacheBuffer *buffer, *next; + for (buffer=m_firstBuffer; buffer != 0; buffer = next) { + next = buffer->m_next; + free(buffer); + } + m_firstBuffer = NULL; + m_lastBuffer = NULL; + if (initItem) { + free(initItem); + initItem = NULL; + } +} + +CacheBuffer* CacheChannel::allocBuffer() +{ + CacheBuffer* buffer; + if (!m_busy) + return NULL; + buffer = (CacheBuffer*)malloc(CACHE_BUFFER_HEADER_SIZE+(m_bufferSizeW<<2)); + if (buffer) { + memset(buffer, 0, CACHE_BUFFER_HEADER_SIZE); + } + return buffer; +} + +CacheItem* CacheChannel::findItemOrLater(unsigned int timestamp, CacheBuffer **rBuffer) +{ + CacheBuffer* buffer; + CacheItem *item, *limit; + if (!m_busy) + return NULL; + if (timestamp == 0 && initItem) { + *rBuffer = NULL; + return initItem; + } + for (buffer=m_firstBuffer; buffer; buffer = buffer->m_next) { + if (buffer->m_firstFreePositionW == 0) + // buffer is empty, this must be the last and we didn't find the timestamp + return NULL; + if (timestamp < buffer->m_firstTimestamp) { + *rBuffer = buffer; + return &buffer->m_firstItem; + } + if (timestamp <= buffer->m_lastTimestamp) { + // the timestamp is necessarily in this buffer + unsigned short timeOffset = (unsigned short)(timestamp-buffer->m_firstTimestamp); + unsigned int highBlock; + item = _findBlock(buffer, timeOffset, &highBlock); + // now we do a linear search until we find a timestamp that is equal or higher + // we should normally always find an item but let's put a limit just in case + limit = CACHE_BLOCK_ITEM_ADDR(this,buffer,highBlock); + while (item<=limit && item->m_timeOffset < timeOffset ) + item = CACHE_NEXT_ITEM(item); + assert(item<=limit); + *rBuffer = buffer; + return item; + } + // search in next buffer + } + return NULL; +} + +CacheItem* CacheChannel::findItemEarlier(unsigned int timestamp, CacheBuffer **rBuffer) +{ + CacheBuffer *buffer, *prevBuffer; + CacheItem *item, *limit, *prevItem; + if (!m_busy) + return NULL; + if (timestamp == 0) + return NULL; + for (prevBuffer=NULL, buffer=m_firstBuffer; buffer; prevBuffer = buffer, buffer = buffer->m_next) { + if (buffer->m_firstFreePositionW == 0) + // buffer is empty, this must be the last and we didn't find the timestamp + return NULL; + if (timestamp <= buffer->m_firstTimestamp) { + if (prevBuffer == NULL) { + // no item before, except the initial item + *rBuffer = NULL; + return initItem; + } + // the item is necessarily the last one of previous buffer + *rBuffer = prevBuffer; + return CACHE_ITEM_ADDR(prevBuffer,prevBuffer->m_lastItemPositionW); + } + if (timestamp <= buffer->m_lastTimestamp) { + // the timestamp is necessarily in this buffer + unsigned short timeOffset = (unsigned short)(timestamp-buffer->m_firstTimestamp); + unsigned int highBlock; + item = _findBlock(buffer, timeOffset, &highBlock); + // now we do a linear search until we find a timestamp that is equal or higher + // we should normally always find an item but let's put a limit just in case + limit = CACHE_BLOCK_ITEM_ADDR(this,buffer,highBlock); + prevItem = NULL; + while (item<=limit && item->m_timeOffset < timeOffset) { + prevItem = item; + item = CACHE_NEXT_ITEM(item); + } + assert(item<=limit && prevItem!=NULL); + *rBuffer = buffer; + return prevItem; + } + // search in next buffer + } + // pass all buffer, the last item is the last item of the last buffer + if (prevBuffer == NULL) { + // no item before, except the initial item + *rBuffer = NULL; + return initItem; + } + // the item is necessarily the last one of previous buffer + *rBuffer = prevBuffer; + return CACHE_ITEM_ADDR(prevBuffer,prevBuffer->m_lastItemPositionW); +} + + +Cache::Cache() +{ +} + +Cache::~Cache() +{ + CacheMap::iterator it; + for (it=m_cache.begin(); it!=m_cache.end(); it=m_cache.begin()) { + deleteDevice(it->first); + } +} + +int Cache::addChannel(const void *device, const char *name, unsigned int maxItemSize) +{ + CacheMap::iterator it = m_cache.find(device); + CacheEntry *entry; + CacheChannel *channel; + unsigned int id; + + if (maxItemSize > 0x3FFF0) + return -1; + + if (it == m_cache.end()) { + // device does not exist yet, create a new entry + entry = new CacheEntry(); + if (entry == NULL) + return -1; + if (!m_cache.insert(CacheMap::value_type(device,entry)).second) + return -1; + } else { + entry = it->second; + } + // locate a channel with the same name and reuse + for (channel=entry->m_channelArray, id=0; idm_count; id++, channel++) { + if (channel->m_busy && !strcmp(name, channel->m_name)) { + // make this channel free again + deleteChannel(device, id); + // there can only be one channel with the same name + break; + } + } + for (channel=entry->m_channelArray, id=0; idm_count; id++, channel++) { + // locate a free channel + if (!channel->m_busy) + break; + } + if (id == entry->m_count) { + // no channel free, create new channels + int newcount = entry->m_count + CACHE_CHANNEL_EXTEND_SIZE; + channel = (CacheChannel*)realloc(entry->m_channelArray, newcount*sizeof(CacheChannel)); + if (channel == NULL) + return -1; + entry->m_channelArray = channel; + memset(&entry->m_channelArray[entry->m_count], 0, CACHE_CHANNEL_EXTEND_SIZE*sizeof(CacheChannel)); + entry->m_count = newcount; + channel = &entry->m_channelArray[id]; + } + // compute the optimal buffer size + // The buffer size must be selected so that + // - it does not contain more than 1630 items (=1s of cache assuming 25 items per second) + // - it contains at least one item + // - it's not bigger than 256kb and preferably around 32kb + // - it a multiple of 4 + unsigned int bufSize = 1630*(maxItemSize+4); + if (bufSize >= CACHE_DEFAULT_BUFFER_SIZE) + bufSize = CACHE_DEFAULT_BUFFER_SIZE; + if (bufSize < maxItemSize+16) + bufSize = maxItemSize+16; + bufSize = (bufSize + 3) & ~0x3; + // compute block size and offset bit mask + // the block size is computed so that + // - it is a power of 2 + // - there is at least one item per block + // - there is no more than CACHE_LOOKUP_TABLE_SIZE blocks per buffer + unsigned int blockSize = bufSize/CACHE_LOOKUP_TABLE_SIZE; + if (blockSize < maxItemSize+12) + blockSize = maxItemSize+12; + // find the power of 2 that is immediately larger than blockSize + unsigned int m; + unsigned int pwr2Size = blockSize; + while ((m = (pwr2Size & (pwr2Size-1))) != 0) + pwr2Size = m; + blockSize = (pwr2Size < blockSize) ? pwr2Size<<1 : pwr2Size; + // convert byte size to word size because all positions and size are expressed in 32 bit words + blockSize >>= 2; + channel->m_blockSizeW = blockSize; + channel->m_bufferSizeW = bufSize>>2; + channel->m_firstBuffer = NULL; + channel->m_lastBuffer = NULL; + channel->m_busy = 1; + channel->initItem = NULL; + channel->m_maxItemSizeB = maxItemSize; + strncpy(channel->m_name, name, sizeof(channel->m_name)); + channel->m_name[sizeof(channel->m_name)-1] = 0; + channel->m_positionToOffsetMaskW = (blockSize-1); + for (m=0; blockSize!=1; m++, blockSize>>=1); + channel->m_positionToBlockShiftW = m; + return (int)id; +} + +int Cache::deleteChannel(const void *device, int id) +{ + CacheMap::iterator it = m_cache.find(device); + CacheEntry *entry; + + if (it == m_cache.end()) { + // device does not exist + return -1; + } + entry = it->second; + if (id < 0 || id >= (int)entry->m_count || !entry->m_channelArray[id].m_busy) + return -1; + entry->m_channelArray[id].clear(); + entry->m_channelArray[id].m_busy = 0; + return 0; +} + +int Cache::deleteDevice(const void *device) +{ + CacheMap::iterator it = m_cache.find(device); + CacheEntry *entry; + + if (it == m_cache.end()) { + // device does not exist + return -1; + } + entry = it->second; + delete entry; + m_cache.erase(it); + return 0; +} + +void Cache::clearCacheFrom(const void *device, CacheTS timestamp) +{ + CacheMap::iterator it = (device) ? m_cache.find(device) : m_cache.begin(); + CacheEntry *entry; + CacheChannel *channel; + CacheBuffer *buffer, *nextBuffer, *prevBuffer; + CacheItem *item, *prevItem, *nextItem; + unsigned int positionW, block; + + while (it != m_cache.end()) { + entry = it->second; + for (unsigned int ch=0; chm_count; ch++) { + channel = &entry->m_channelArray[ch]; + if (channel->m_busy) { + item = channel->findItemOrLater(timestamp, &buffer); + if (item ) { + if (!buffer) { + // this is possible if we return the special timestamp=0 item, delete all buffers + channel->clear(); + } else { + // this item and all later items will be removed, clear any later buffer + while ((nextBuffer = buffer->m_next) != NULL) { + buffer->m_next = nextBuffer->m_next; + free(nextBuffer); + } + positionW = CACHE_ITEM_POSITIONW(buffer,item); + if (positionW == 0) { + // this item is the first one of the buffer, remove the buffer completely + // first find the buffer just before it + nextBuffer = channel->m_firstBuffer; + prevBuffer = NULL; + while (nextBuffer != buffer) { + prevBuffer = nextBuffer; + nextBuffer = nextBuffer->m_next; + // we must quit this loop before reaching the end of the list + assert(nextBuffer); + } + free(buffer); + buffer = prevBuffer; + if (buffer == NULL) + // this was also the first buffer + channel->m_firstBuffer = NULL; + } else { + // removing this item means finding the previous item to make it the last one + block = positionW>>channel->m_positionToBlockShiftW; + if (block == 0) { + // start from first item, we know it is not our item because positionW > 0 + prevItem = &buffer->m_firstItem; + } else { + // no need to check the current block, it will point to our item or a later one + // but the previous block will be a good start for sure. + block--; + prevItem = CACHE_BLOCK_ITEM_ADDR(channel,buffer,block); + } + while ((nextItem = CACHE_NEXT_ITEM(prevItem)) < item) + prevItem = nextItem; + // we must have found our item + assert(nextItem==item); + // now set the buffer + buffer->m_lastItemPositionW = CACHE_ITEM_POSITIONW(buffer,prevItem); + buffer->m_firstFreePositionW = positionW; + buffer->m_lastTimestamp = buffer->m_firstTimestamp + prevItem->m_timeOffset; + block = buffer->m_lastItemPositionW>>channel->m_positionToBlockShiftW; + buffer->lookup[block].m_offsetW = buffer->m_lastItemPositionW&channel->m_positionToOffsetMaskW; + buffer->lookup[block].m_timeOffset = prevItem->m_timeOffset; + } + // set the channel + channel->m_lastBuffer = buffer; + if (buffer) { + channel->m_lastTimestamp = buffer->m_lastTimestamp; + channel->m_lastItemPositionW = buffer->m_lastItemPositionW; + } + } + } + } + } + if (device) + break; + ++it; + } +} + +void *Cache::addCacheItem(const void *device, int id, unsigned int timestamp, void *data, unsigned int length) +{ + CacheMap::iterator it = m_cache.find(device); + CacheEntry *entry; + CacheChannel *channel; + CacheBuffer *buffer, *next; + CacheItem *item; + unsigned int positionW, sizeW, block; + + if (it == m_cache.end()) { + // device does not exist + return NULL; + } + entry = it->second; + if (id < 0 || id >= (int) entry->m_count || !entry->m_channelArray[id].m_busy) + return NULL; + channel = &entry->m_channelArray[id]; + if (length > channel->m_maxItemSizeB) + return NULL; + if (timestamp == 0) { + // initial item, delete all buffers + channel->clear(); + // and create initial item + item = NULL; + // we will allocate the memory, which is always pointer aligned => compute size + // with NULL will give same result. + sizeW = CACHE_ITEM_SIZEW(item,length); + item = (CacheItem*)calloc(sizeW, 4); + item->m_sizeW = sizeW; + channel->initItem = item; + } else { + if (!channel->m_lastBuffer) { + // no item in buffer, insert item at first position of first buffer + positionW = 0; + if ((buffer = channel->m_firstBuffer) == NULL) { + buffer = channel->allocBuffer(); + channel->m_firstBuffer = buffer; + } + } else if (timestamp > channel->m_lastTimestamp) { + // this is the normal case: we are writing past lastest timestamp + buffer = channel->m_lastBuffer; + positionW = buffer->m_firstFreePositionW; + } else if (timestamp == channel->m_lastTimestamp) { + // common case, rewriting the last timestamp, just reuse the last position + buffer = channel->m_lastBuffer; + positionW = channel->m_lastItemPositionW; + } else { + // general case, write in the middle of the buffer, locate the timestamp + // (or the timestamp just after), clear this item and all future items, + // and write at that position + item = channel->findItemOrLater(timestamp, &buffer); + if (item == NULL) { + // this should not happen + return NULL; + } + // this item will become the last one of this channel, clear any later buffer + while ((next = buffer->m_next) != NULL) { + buffer->m_next = next->m_next; + free(next); + } + // no need to update the buffer, this will be done when the item is written + positionW = CACHE_ITEM_POSITIONW(buffer,item); + } + item = CACHE_ITEM_ADDR(buffer,positionW); + sizeW = CACHE_ITEM_SIZEW(item,length); + // we have positionW pointing where we can put the item + // before we do that we have to check if we can: + // - enough room + // - timestamp not too late + if ((positionW+sizeW > channel->m_bufferSizeW) || + (positionW > 0 && timestamp >= buffer->m_firstTimestamp+0x10000)) { + // we must allocate a new buffer to store this item + // but before we must make sure that the current buffer is consistent + if (positionW != buffer->m_firstFreePositionW) { + // This means that we were trying to write in the middle of the buffer. + // We must set the buffer right with positionW being the last position + // and find the item before positionW to make it the last. + block = positionW>>channel->m_positionToBlockShiftW; + CacheItem *previousItem, *nextItem; + if (block == 0) { + // start from first item, we know it is not our item because positionW > 0 + previousItem = &buffer->m_firstItem; + } else { + // no need to check the current block, it will point to our item or a later one + // but the previous block will be a good start for sure. + block--; + previousItem = CACHE_BLOCK_ITEM_ADDR(channel,buffer,block); + } + while ((nextItem = CACHE_NEXT_ITEM(previousItem)) < item) + previousItem = nextItem; + // we must have found our item + assert(nextItem==item); + // now set the buffer + buffer->m_lastItemPositionW = CACHE_ITEM_POSITIONW(buffer,previousItem); + buffer->m_firstFreePositionW = positionW; + buffer->m_lastTimestamp = buffer->m_firstTimestamp + previousItem->m_timeOffset; + block = buffer->m_lastItemPositionW>>channel->m_positionToBlockShiftW; + buffer->lookup[block].m_offsetW = buffer->m_lastItemPositionW&channel->m_positionToOffsetMaskW; + buffer->lookup[block].m_timeOffset = previousItem->m_timeOffset; + // and also the channel, just in case + channel->m_lastBuffer = buffer; + channel->m_lastTimestamp = buffer->m_lastTimestamp; + channel->m_lastItemPositionW = buffer->m_lastItemPositionW; + } + // now allocate a new buffer + buffer->m_next = channel->allocBuffer(); + if (buffer->m_next == NULL) + return NULL; + buffer = buffer->m_next; + positionW = 0; + item = &buffer->m_firstItem; + sizeW = CACHE_ITEM_SIZEW(item,length); + } + // all check passed, ready to write the item + item->m_sizeW = sizeW; + if (positionW == 0) { + item->m_timeOffset = 0; + buffer->m_firstTimestamp = timestamp; + } else { + item->m_timeOffset = (unsigned short)(timestamp-buffer->m_firstTimestamp); + } + buffer->m_lastItemPositionW = positionW; + buffer->m_firstFreePositionW = positionW+sizeW; + buffer->m_lastTimestamp = timestamp; + block = positionW>>channel->m_positionToBlockShiftW; + buffer->lookup[block].m_offsetW = positionW&channel->m_positionToOffsetMaskW; + buffer->lookup[block].m_timeOffset = item->m_timeOffset; + buffer->m_lastItemPositionW = CACHE_ITEM_POSITIONW(buffer,item); + buffer->m_firstFreePositionW = buffer->m_lastItemPositionW+item->m_sizeW; + channel->m_lastBuffer = buffer; + channel->m_lastItemPositionW = positionW; + channel->m_lastTimestamp = timestamp; + } + // now copy the item + void *itemData = CACHE_ITEM_DATA_POINTER(item); + if (data) + memcpy(itemData, data, length); + return itemData; +} + +const void *Cache::getPreviousCacheItem(const void *device, int id, unsigned int *timestamp) +{ + CacheMap::iterator it; + CacheEntry *entry; + CacheChannel *channel; + CacheBuffer *buffer; + CacheItem *item; + + if (device) { + it = m_cache.find(device); + } else { + it = m_cache.begin(); + } + if (it == m_cache.end()) { + // device does not exist + return NULL; + } + entry = it->second; + if (id < 0 || id >= (int) entry->m_count || !entry->m_channelArray[id].m_busy) + return NULL; + channel = &entry->m_channelArray[id]; + if ((item = channel->findItemEarlier(*timestamp,&buffer)) == NULL) + return NULL; + *timestamp = (buffer) ? buffer->m_firstTimestamp+item->m_timeOffset : 0; + return CACHE_ITEM_DATA_POINTER(item); +} + +const CacheItem *Cache::getCurrentCacheItemInternal(const void *device, int id, CacheTS timestamp) +{ + CacheMap::iterator it = m_cache.find(device); + CacheEntry *entry; + CacheChannel *channel; + CacheBuffer *buffer; + CacheItem *item; + + if (it == m_cache.end()) { + // device does not exist + return NULL; + } + entry = it->second; + if (id < 0 || id >= (int) entry->m_count || !entry->m_channelArray[id].m_busy) + return NULL; + channel = &entry->m_channelArray[id]; + if ((item = channel->findItemOrLater(timestamp,&buffer)) == NULL) + return NULL; + if (buffer && buffer->m_firstTimestamp+item->m_timeOffset != timestamp) + return NULL; + return item; +} + +const void *Cache::getCurrentCacheItem(const void *device, int channel, unsigned int timestamp) +{ + const CacheItem *item = getCurrentCacheItemInternal(device, channel, timestamp); + return (item) ? CACHE_ITEM_DATA_POINTER(item) : NULL; +} + +double *Cache::addCacheVectorIfDifferent(const void *device, int channel, CacheTS timestamp, double *newdata, unsigned int length, double threshold) +{ + const CacheItem *item = getCurrentCacheItemInternal(device, channel, timestamp); + unsigned int sizeW = CACHE_ITEM_SIZEW(item,length*sizeof(double)); + if (!item || item->m_sizeW != sizeW) + return (double*)addCacheItem(device, channel, timestamp, newdata, length*sizeof(double)); + double *olddata = (double*)CACHE_ITEM_DATA_POINTER(item); + if (!length) + return olddata; + double *ref = olddata; + double *v = newdata; + unsigned int i; + for (i=length; i>0; --i) { + if (fabs(*v-*ref) > threshold) + break; + *ref++ = *v++; + } + if (i) + olddata = (double*)addCacheItem(device, channel, timestamp, newdata, length*sizeof(double)); + return olddata; +} + +} diff --git a/intern/itasc/Cache.hpp b/intern/itasc/Cache.hpp new file mode 100644 index 00000000000..64707782e6f --- /dev/null +++ b/intern/itasc/Cache.hpp @@ -0,0 +1,227 @@ +/* $Id: Cache.hpp 21152 2009-06-25 11:57:19Z ben2610 $ + * Cache.hpp + * + * Created on: Feb 24, 2009 + * Author: benoit tbolsee + */ + +#ifndef CACHE_HPP_ +#define CACHE_HPP_ + +#include + +namespace iTaSC { + +#define CACHE_LOOKUP_TABLE_SIZE 128 +#define CACHE_DEFAULT_BUFFER_SIZE 32768 +#define CACHE_CHANNEL_EXTEND_SIZE 10 +#define CACHE_MAX_ITEM_SIZE 0x3FFF0 + +/* macro to get the alignement gap after an item header */ +#define CACHE_ITEM_GAPB(item) (unsigned int)(((size_t)item+sizeof(CacheItem))&(sizeof(void*)-1)) +/* macro to get item data position, item=CacheItem pointer */ +#define CACHE_ITEM_DATA_POINTER(item) (void*)((unsigned char*)item+sizeof(CacheItem)+CACHE_ITEM_GAPB(item)) +/* macro to get item size in 32bit words from item address and length, item=CacheItem pointer */ +#define CACHE_ITEM_SIZEW(item,length) (unsigned int)((sizeof(CacheItem)+CACHE_ITEM_GAPB(item)+(((length)+3)&~0x3))>>2) +/* macto to move from one item to the next, item=CacheItem pointer, updated by the macro */ +#define CACHE_NEXT_ITEM(item) ((item)+(item)->m_sizeW) +#define CACHE_BLOCK_ITEM_ADDR(chan,buf,block) (&(buf)->m_firstItem+(((unsigned int)(block)<m_positionToBlockShiftW)+(buf)->lookup[block].m_offsetW)) +#define CACHE_ITEM_ADDR(buf,pos) (&(buf)->m_firstItem+(pos)) +#define CACHE_ITEM_POSITIONW(buf,item) (unsigned int)(item-&buf->m_firstItem) + +typedef unsigned int CacheTS; + +struct Timestamp +{ + double realTimestamp; + double realTimestep; + CacheTS cacheTimestamp; + unsigned int numstep:8; + unsigned int substep:1; + unsigned int reiterate:1; + unsigned int cache:1; + unsigned int update:1; + unsigned int interpolate:1; + unsigned int dummy:19; + + Timestamp() { memset(this, 0, sizeof(Timestamp)); } +}; + +/* utility function to return second timestamp to millisecond */ +inline void setCacheTimestamp(Timestamp& timestamp) +{ + if (timestamp.realTimestamp < 0.0 || timestamp.realTimestamp > 4294967.295) + timestamp.cacheTimestamp = 0; + else + timestamp.cacheTimestamp = (CacheTS)(timestamp.realTimestamp*1000.0+0.5); +} + + +/* +class Cache: +Top level class, only one instance of this class should exists. +A device (=constraint, object) uses this class to create a cache entry for its data. +A cache entry is divided into cache channels, each providing a separate buffer for cache items. +The cache channels must be declared by the devices before they can be used. +The device must specify the largest cache item (limited to 256Kb) so that the cache +buffer can be organized optimally. +Cache channels are identified by small number (starting from 0) allocated by the cache system. +Cache items are inserted into cache channels ordered by timestamp. Writing is always done +at the end of the cache buffer: writing an item automatically clears all items with +higher timestamp. +A cache item is an array of bytes provided by the device; the format of the cache item is left +to the device. +The device can retrieve a cache item associated with a certain timestamp. The cache system +returns a pointer that points directly in the cache buffer to avoid unnecessary copy. +The pointer is guaranteed to be pointer aligned so that direct mapping to C structure is possible +(=32 bit aligned on 32 systems and 64 bit aligned on 64 bits system). + +Timestamp = rounded time in millisecond. +*/ + +struct CacheEntry; +struct CacheBuffer; +struct CacheItem; +struct CacheChannel; + +class Cache +{ +private: + /* map between device and cache entry. + Dynamically updated when more devices create cache channels */ + typedef std::map CacheMap; + CacheMap m_cache; + const CacheItem *getCurrentCacheItemInternal(const void *device, int channel, CacheTS timestamp); + +public: + Cache(); + ~Cache(); + /* add a cache channel, maxItemSize must be < 256k. + name : channel name, truncated at 31 characters + msxItemSize : maximum size of item in bytes, items of larger size will be rejected + return value >= 0: channel id, -1: error */ + int addChannel(const void *device, const char *name, unsigned int maxItemSize); + + /* delete a cache channel (and all associated buffers and items) */ + int deleteChannel(const void *device, int channel); + /* delete all channels of a device and remove the device from the map */ + int deleteDevice(const void *device); + /* removes all cache items, leaving the special item at timestamp=0. + if device=NULL, apply to all devices. */ + void clearCacheFrom(const void *device, CacheTS timestamp); + + /* add a new cache item + channel: the cache channel (as returned by AddChannel + data, length: the cache item and length in bytes + If data is NULL, the memory is allocated in the cache but not writen + return: error: NULL, success: pointer to item in cache */ + void *addCacheItem(const void *device, int channel, CacheTS timestamp, void *data, unsigned int length); + + /* specialized function to add a vector of double in the cache + It will first check if a vector exist already in the cache for the same timestamp + and compared the cached vector with the new values. + If all values are within threshold, the vector is updated but the cache is not deleted + for the future timestamps. */ + double *addCacheVectorIfDifferent(const void *device, int channel, CacheTS timestamp, double *data, unsigned int length, double threshold); + + /* returns the cache item with timestamp that is just before the given timestamp. + returns the data pointer or NULL if there is no cache item before timestamp. + On return, timestamp is updated with the actual timestamp of the item being returned. + Note that the length of the item is not returned, it is up to the device to organize + the data so that length can be retrieved from the data if needed. + Device can NULL, it will then just look the first channel available, useful to + test the status of the cache. */ + const void *getPreviousCacheItem(const void *device, int channel, CacheTS *timestamp); + + /* returns the cache item with the timestamp that is exactly equal to the given timestamp + If there is no cache item for this timestamp, returns NULL.*/ + const void *getCurrentCacheItem(const void *device, int channel, CacheTS timestamp); + +}; + +/* the following structures are not internal use only, they should not be used directly */ + +struct CacheEntry +{ + CacheChannel *m_channelArray; // array of channels, automatically resized if more channels are created + unsigned int m_count; // number of channel in channelArray + CacheEntry() : m_channelArray(NULL), m_count(0) {} + ~CacheEntry(); +}; + +struct CacheChannel +{ + CacheItem* initItem; // item corresponding to timestamp=0 + struct CacheBuffer *m_firstBuffer; // first buffer of list + struct CacheBuffer *m_lastBuffer; // last buffer of list to which an item was written + char m_name[32]; // channel name + unsigned char m_busy; // =0 if channel is free, !=0 when channel is in use + unsigned char m_positionToBlockShiftW; // number of bits to shift a position in word to get the block number + unsigned short m_positionToOffsetMaskW; // bit mask to apply on a position in word to get offset in a block + unsigned int m_maxItemSizeB; // maximum item size in bytes + unsigned int m_bufferSizeW; // size of item buffer in word to allocate when a new buffer must be created + unsigned int m_blockSizeW; // block size in words of the lookup table + unsigned int m_lastTimestamp; // timestamp of the last item that was written + unsigned int m_lastItemPositionW; // position in words in lastBuffer of the last item written + void clear(); + CacheBuffer* allocBuffer(); + CacheItem* findItemOrLater(unsigned int timestamp, CacheBuffer **rBuffer); + CacheItem* findItemEarlier(unsigned int timestamp, CacheBuffer **rBuffer); + // Internal function: finds an item in a buffer that is < timeOffset + // timeOffset must be a valid offset for the buffer and the buffer must not be empty + // on return highBlock contains the block with items above or equal to timeOffset + CacheItem *_findBlock(CacheBuffer *buffer, unsigned short timeOffset, unsigned int *highBlock); +}; + +struct CacheBlock { + unsigned short m_timeOffset; // timestamp relative to m_firstTimestamp + unsigned short m_offsetW; // position in words of item relative to start of block +}; + +/* CacheItem is the header of each item in the buffer, must be 32bit + Items are always 32 bits aligned and size is the number of 32 bit words until the + next item header, including an eventual pre and post padding gap for pointer alignment */ +struct CacheItem +{ + unsigned short m_timeOffset; // timestamp relative to m_firstTimestamp + unsigned short m_sizeW; // size of item in 32 bit words + // item data follows header immediately or after a gap if position is not pointer aligned +}; + +// Buffer header +// Defined in a macro to avoid sizeof() potential problem. +// next for linked list. = NULL for last buffer +// m_firstTimestamp timestamp of first item in this buffer +// m_lastTimestamp timestamp of last item in this buffer +// m_lastTimestamp must be < m_firstTimestamp+65536 +// m_lastItemPositionW position in word of last item written +// m_firstFreePositionW position in word where a new item can be written, 0 if buffer is empty +// lookup lookup table for fast access to item by timestamp +// The buffer is divided in blocks of 2**n bytes with n chosen so that +// there are no more than CACHE_LOOKUP_TABLE_SIZE blocks and that each +// block will contain at least one item. +// Each element of the lookup table gives the timestamp and offset +// of the last cache item occupying (=starting in) the corresponding block. +#define CACHE_HEADER \ + struct CacheBuffer *m_next; \ + unsigned int m_firstTimestamp; \ + unsigned int m_lastTimestamp; \ + \ + unsigned int m_lastItemPositionW; \ + unsigned int m_firstFreePositionW;\ + struct CacheBlock lookup[CACHE_LOOKUP_TABLE_SIZE] + +struct CacheBufferHeader { + CACHE_HEADER; +}; +#define CACHE_BUFFER_HEADER_SIZE (sizeof(struct CacheBufferHeader)) +struct CacheBuffer +{ + CACHE_HEADER; + struct CacheItem m_firstItem; // the address of this field marks the start of the buffer +}; + + +} + +#endif /* CACHE_HPP_ */ diff --git a/intern/itasc/ConstraintSet.cpp b/intern/itasc/ConstraintSet.cpp new file mode 100644 index 00000000000..a38db445ea2 --- /dev/null +++ b/intern/itasc/ConstraintSet.cpp @@ -0,0 +1,170 @@ +/* $Id$ + * ConstraintSet.cpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#include "ConstraintSet.hpp" +#include "kdl/utilities/svd_eigen_HH.hpp" + +namespace iTaSC { + +ConstraintSet::ConstraintSet(unsigned int _nc,double accuracy,unsigned int maximum_iterations): + m_nc(_nc), + m_Cf(e_zero_matrix(m_nc,6)), + m_Wy(e_scalar_vector(m_nc,1.0)), + m_y(m_nc),m_ydot(e_zero_vector(m_nc)),m_chi(e_zero_vector(6)), + m_S(6),m_temp(6),m_tdelta(6), + m_Jf(e_identity_matrix(6,6)), + m_U(e_identity_matrix(6,6)),m_V(e_identity_matrix(6,6)),m_B(e_zero_matrix(6,6)), + m_Jf_inv(e_zero_matrix(6,6)), + m_internalPose(F_identity), m_externalPose(F_identity), + m_constraintCallback(NULL), m_constraintParam(NULL), + m_toggle(false),m_substep(false), + m_threshold(accuracy),m_maxIter(maximum_iterations) +{ + m_maxDeltaChi = e_scalar(0.52); +} + +ConstraintSet::ConstraintSet(): + m_nc(0), + m_internalPose(F_identity), m_externalPose(F_identity), + m_constraintCallback(NULL), m_constraintParam(NULL), + m_toggle(false),m_substep(false), + m_threshold(0.0),m_maxIter(0) +{ + m_maxDeltaChi = e_scalar(0.52); +} + +void ConstraintSet::reset(unsigned int _nc,double accuracy,unsigned int maximum_iterations) +{ + m_nc = _nc; + m_Jf = e_identity_matrix(6,6); + m_Cf = e_zero_matrix(m_nc,6); + m_U = e_identity_matrix(6,6); + m_V = e_identity_matrix(6,6); + m_B = e_zero_matrix(6,6); + m_Jf_inv = e_zero_matrix(6,6), + m_Wy = e_scalar_vector(m_nc,1.0), + m_chi = e_zero_vector(6); + m_chidot = e_zero_vector(6); + m_y = e_zero_vector(m_nc); + m_ydot = e_zero_vector(m_nc); + m_S = e_zero_vector(6); + m_temp = e_zero_vector(6); + m_tdelta = e_zero_vector(6); + m_threshold = accuracy; + m_maxIter = maximum_iterations; +} + +ConstraintSet::~ConstraintSet() { + +} + +void ConstraintSet::modelUpdate(Frame& _external_pose,const Timestamp& timestamp) +{ + m_chi+=m_chidot*timestamp.realTimestep; + m_externalPose = _external_pose; + + //update the internal pose and Jf + updateJacobian(); + //check if loop is already closed, if not update the pose and Jf + unsigned int iter=0; + while(iter<5&&!closeLoop()) + iter++; +} + +double ConstraintSet::getMaxTimestep(double& timestep) +{ + e_scalar maxChidot = m_chidot.cwise().abs().maxCoeff(); + if (timestep*maxChidot > m_maxDeltaChi) { + timestep = m_maxDeltaChi/maxChidot; + } + return timestep; +} + +bool ConstraintSet::initialise(Frame& init_pose){ + m_externalPose=init_pose; + // get current Jf + updateJacobian(); + + unsigned int iter=0; + while(iter change the reference point to the base frame + Twist twist_delta(diff(m_internalPose,m_externalPose)); + twist_delta=twist_delta.RefPoint(-m_internalPose.p); + for(unsigned int i=0;i<6;i++) + m_tdelta(i)=twist_delta(i); + //TODO: use damping in constraintset inversion? + for(unsigned int i=0;i<6;i++) + if(m_S(i) + +namespace iTaSC { + +enum ConstraintAction { + ACT_NONE= 0, + ACT_VALUE= 1, + ACT_VELOCITY= 2, + ACT_TOLERANCE= 4, + ACT_FEEDBACK= 8, + ACT_ALPHA= 16 +}; + +struct ConstraintSingleValue { + unsigned int id; // identifier of constraint value, depends on constraint + unsigned int action;// action performed, compbination of ACT_..., set on return + const double y; // actual constraint value + const double ydot; // actual constraint velocity + double yd; // current desired constraint value, changed on return + double yddot; // current desired constraint velocity, changed on return + ConstraintSingleValue(): id(0), action(0), y(0.0), ydot(0.0) {} +}; + +struct ConstraintValues { + unsigned int id; // identifier of group of constraint values, depend on constraint + unsigned short number; // number of constraints in list + unsigned short action; // action performed, ACT_..., set on return + double alpha; // constraint activation coefficient, should be [0..1] + double tolerance; // current desired tolerance on constraint, same unit than yd, changed on return + double feedback; // current desired feedback on error, in 1/sec, changed on return + struct ConstraintSingleValue* values; + ConstraintValues(): id(0), number(0), action(0), values(NULL) {} +}; + +class ConstraintSet; +typedef bool (*ConstraintCallback)(const Timestamp& timestamp, struct ConstraintValues* const _values, unsigned int _nvalues, void* _param); + +class ConstraintSet { +protected: + unsigned int m_nc; + e_scalar m_maxDeltaChi; + e_matrix m_Cf; + e_vector m_Wy,m_y,m_ydot; + e_vector6 m_chi,m_chidot,m_S,m_temp,m_tdelta; + e_matrix6 m_Jf,m_U,m_V,m_B,m_Jf_inv; + KDL::Frame m_internalPose,m_externalPose; + ConstraintCallback m_constraintCallback; + void* m_constraintParam; + void* m_poseParam; + bool m_toggle; + bool m_substep; + double m_threshold; + unsigned int m_maxIter; + + friend class Scene; + virtual void modelUpdate(KDL::Frame& _external_pose,const Timestamp& timestamp); + virtual void updateKinematics(const Timestamp& timestamp)=0; + virtual void pushCache(const Timestamp& timestamp)=0; + virtual void updateJacobian()=0; + virtual void updateControlOutput(const Timestamp& timestamp)=0; + virtual void initCache(Cache *_cache) = 0; + virtual bool initialise(KDL::Frame& init_pose); + virtual void reset(unsigned int nc,double accuracy,unsigned int maximum_iterations); + virtual bool closeLoop(); + virtual double getMaxTimestep(double& timestep); + + +public: + ConstraintSet(unsigned int nc,double accuracy,unsigned int maximum_iterations); + ConstraintSet(); + virtual ~ConstraintSet(); + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + + virtual bool registerCallback(ConstraintCallback _function, void* _param) + { + m_constraintCallback = _function; + m_constraintParam = _param; + return true; + } + + virtual const e_vector& getControlOutput()const{return m_ydot;}; + virtual const ConstraintValues* getControlParameters(unsigned int* _nvalues) = 0; + virtual bool setControlParameters(ConstraintValues* _values, unsigned int _nvalues, double timestep=0.0) = 0; + bool setControlParameter(int id, ConstraintAction action, double value, double timestep=0.0); + + virtual const e_matrix6& getJf() const{return m_Jf;}; + virtual const KDL::Frame& getPose() const{return m_internalPose;}; + virtual const e_matrix& getCf() const{return m_Cf;}; + + virtual const e_vector& getWy() const {return m_Wy;}; + virtual void setWy(const e_vector& Wy_in){m_Wy = Wy_in;}; + virtual void setJointVelocity(const e_vector chidot_in){m_chidot = chidot_in;}; + + virtual unsigned int getNrOfConstraints(){return m_nc;}; + void substep(bool _substep) {m_substep=_substep;} + bool substep() {return m_substep;} +}; + +} + +#endif /* CONSTRAINTSET_HPP_ */ diff --git a/intern/itasc/ControlledObject.cpp b/intern/itasc/ControlledObject.cpp new file mode 100644 index 00000000000..b987e176031 --- /dev/null +++ b/intern/itasc/ControlledObject.cpp @@ -0,0 +1,61 @@ +/* $Id$ + * ControlledObject.cpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#include "ControlledObject.hpp" + + +namespace iTaSC { +ControlledObject::ControlledObject(): + Object(Controlled),m_nq(0),m_nc(0),m_nee(0) +{ + // max joint variable = 0.52 radian or 0.52 meter in one timestep + m_maxDeltaQ = e_scalar(0.52); +} + +void ControlledObject::initialize(unsigned int _nq,unsigned int _nc, unsigned int _nee) +{ + assert(_nee >= 1); + m_nq = _nq; + m_nc = _nc; + m_nee = _nee; + if (m_nq > 0) { + m_Wq = e_identity_matrix(m_nq,m_nq); + m_qdot = e_zero_vector(m_nq); + } + if (m_nc > 0) { + m_Wy = e_scalar_vector(m_nc,1.0); + m_ydot = e_zero_vector(m_nc); + } + if (m_nc > 0 && m_nq > 0) + m_Cq = e_zero_matrix(m_nc,m_nq); + // clear all Jacobian if any + m_JqArray.clear(); + // reserve one more to have a zero matrix handy + if (m_nq > 0) + m_JqArray.resize(m_nee+1, e_zero_matrix(6,m_nq)); +} + +ControlledObject::~ControlledObject() {} + + + +const e_matrix& ControlledObject::getJq(unsigned int ee) const +{ + assert(m_nq > 0); + return m_JqArray[(ee>m_nee)?m_nee:ee]; +} + +double ControlledObject::getMaxTimestep(double& timestep) +{ + e_scalar maxQdot = m_qdot.cwise().abs().maxCoeff(); + if (timestep*maxQdot > m_maxDeltaQ) { + timestep = m_maxDeltaQ/maxQdot; + } + return timestep; +} + +} diff --git a/intern/itasc/ControlledObject.hpp b/intern/itasc/ControlledObject.hpp new file mode 100644 index 00000000000..2370f6594ed --- /dev/null +++ b/intern/itasc/ControlledObject.hpp @@ -0,0 +1,70 @@ +/* $Id: ControlledObject.hpp 20853 2009-06-13 12:29:46Z ben2610 $ + * ControlledObject.hpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#ifndef CONTROLLEDOBJECT_HPP_ +#define CONTROLLEDOBJECT_HPP_ + +#include "kdl/frames.hpp" +#include "eigen_types.hpp" + +#include "Object.hpp" +#include "ConstraintSet.hpp" +#include + +namespace iTaSC { + +#define CONSTRAINT_ID_ALL ((unsigned int)-1) + +class ControlledObject : public Object { +protected: + e_scalar m_maxDeltaQ; + unsigned int m_nq,m_nc,m_nee; + e_matrix m_Wq,m_Cq; + e_vector m_Wy,m_ydot,m_qdot; + std::vector m_JqArray; +public: + ControlledObject(); + virtual ~ControlledObject(); + + class JointLockCallback { + public: + JointLockCallback() {} + virtual ~JointLockCallback() {} + + // lock a joint, no need to update output + virtual void lockJoint(unsigned int q_nr, unsigned int ndof) = 0; + // lock a joint and update output in view of reiteration + virtual void lockJoint(unsigned int q_nr, unsigned int ndof, double* qdot) = 0; + }; + + virtual void initialize(unsigned int _nq,unsigned int _nc, unsigned int _nee); + + // returns true when a joint has been locked via the callback and the solver must run again + virtual bool updateJoint(const Timestamp& timestamp, JointLockCallback& callback) = 0; + virtual void updateControlOutput(const Timestamp& timestamp)=0; + virtual void setJointVelocity(const e_vector qdot_in){m_qdot = qdot_in;}; + virtual double getMaxTimestep(double& timestep); + virtual bool setControlParameter(unsigned int constraintId, unsigned int valueId, ConstraintAction action, e_scalar value, double timestep=0.0)=0; + + virtual const e_vector& getControlOutput() const{return m_ydot;} + + virtual const e_matrix& getJq(unsigned int ee) const; + + virtual const e_matrix& getCq() const{return m_Cq;}; + + virtual e_matrix& getWq() {return m_Wq;}; + virtual void setWq(const e_matrix& Wq_in){m_Wq = Wq_in;}; + + virtual const e_vector& getWy() const {return m_Wy;}; + + virtual const unsigned int getNrOfCoordinates(){return m_nq;}; + virtual const unsigned int getNrOfConstraints(){return m_nc;}; +}; + +} + +#endif /* CONTROLLEDOBJECT_HPP_ */ diff --git a/intern/itasc/CopyPose.cpp b/intern/itasc/CopyPose.cpp new file mode 100644 index 00000000000..69722909ed1 --- /dev/null +++ b/intern/itasc/CopyPose.cpp @@ -0,0 +1,480 @@ +/* $Id$ + * CopyPose.cpp + * + * Created on: Mar 17, 2009 + * Author: benoit bolsee + */ + +#include "CopyPose.hpp" +#include "kdl/kinfam_io.hpp" +#include +#include + +namespace iTaSC +{ + +const unsigned int maxPoseCacheSize = (2*(3+3*2)); +CopyPose::CopyPose(unsigned int control_output, unsigned int dynamic_output, double armlength, double accuracy, unsigned int maximum_iterations): + ConstraintSet(), + m_cache(NULL), + m_poseCCh(-1),m_poseCTs(0) +{ + m_maxerror = armlength/2.0; + m_outputControl = (control_output & CTL_ALL); + unsigned int _nc = nBitsOn(m_outputControl); + if (!_nc) + return; + // reset the constraint set + reset(_nc, accuracy, maximum_iterations); + _nc = 0; + m_nvalues = 0; + int nrot = 0, npos = 0; + int nposCache = 0, nrotCache = 0; + m_outputDynamic = (dynamic_output & m_outputControl); + memset(m_values, 0, sizeof(m_values)); + memset(m_posData, 0, sizeof(m_posData)); + memset(m_rotData, 0, sizeof(m_rotData)); + memset(&m_rot, 0, sizeof(m_rot)); + memset(&m_pos, 0, sizeof(m_pos)); + if (m_outputControl & CTL_POSITION) { + m_pos.alpha = 1.0; + m_pos.K = 20.0; + m_pos.tolerance = 0.05; + m_values[m_nvalues].alpha = m_pos.alpha; + m_values[m_nvalues].feedback = m_pos.K; + m_values[m_nvalues].tolerance = m_pos.tolerance; + m_values[m_nvalues].id = ID_POSITION; + if (m_outputControl & CTL_POSITIONX) { + m_Wy(_nc) = m_pos.alpha/*/(m_pos.tolerance*m_pos.K)*/; + m_Cf(_nc++,0)=1.0; + m_posData[npos++].id = ID_POSITIONX; + if (m_outputDynamic & CTL_POSITIONX) + nposCache++; + } + if (m_outputControl & CTL_POSITIONY) { + m_Wy(_nc) = m_pos.alpha/*/(m_pos.tolerance*m_pos.K)*/; + m_Cf(_nc++,1)=1.0; + m_posData[npos++].id = ID_POSITIONY; + if (m_outputDynamic & CTL_POSITIONY) + nposCache++; + } + if (m_outputControl & CTL_POSITIONZ) { + m_Wy(_nc) = m_pos.alpha/*/(m_pos.tolerance*m_pos.K)*/; + m_Cf(_nc++,2)=1.0; + m_posData[npos++].id = ID_POSITIONZ; + if (m_outputDynamic & CTL_POSITIONZ) + nposCache++; + } + m_values[m_nvalues].number = npos; + m_values[m_nvalues++].values = m_posData; + m_pos.firsty = 0; + m_pos.ny = npos; + } + if (m_outputControl & CTL_ROTATION) { + m_rot.alpha = 1.0; + m_rot.K = 20.0; + m_rot.tolerance = 0.05; + m_values[m_nvalues].alpha = m_rot.alpha; + m_values[m_nvalues].feedback = m_rot.K; + m_values[m_nvalues].tolerance = m_rot.tolerance; + m_values[m_nvalues].id = ID_ROTATION; + if (m_outputControl & CTL_ROTATIONX) { + m_Wy(_nc) = m_rot.alpha/*/(m_rot.tolerance*m_rot.K)*/; + m_Cf(_nc++,3)=1.0; + m_rotData[nrot++].id = ID_ROTATIONX; + if (m_outputDynamic & CTL_ROTATIONX) + nrotCache++; + } + if (m_outputControl & CTL_ROTATIONY) { + m_Wy(_nc) = m_rot.alpha/*/(m_rot.tolerance*m_rot.K)*/; + m_Cf(_nc++,4)=1.0; + m_rotData[nrot++].id = ID_ROTATIONY; + if (m_outputDynamic & CTL_ROTATIONY) + nrotCache++; + } + if (m_outputControl & CTL_ROTATIONZ) { + m_Wy(_nc) = m_rot.alpha/*/(m_rot.tolerance*m_rot.K)*/; + m_Cf(_nc++,5)=1.0; + m_rotData[nrot++].id = ID_ROTATIONZ; + if (m_outputDynamic & CTL_ROTATIONZ) + nrotCache++; + } + m_values[m_nvalues].number = nrot; + m_values[m_nvalues++].values = m_rotData; + m_rot.firsty = npos; + m_rot.ny = nrot; + } + assert(_nc == m_nc); + m_Jf=e_identity_matrix(6,6); + m_poseCacheSize = ((nrotCache)?(3+nrotCache*2):0)+((nposCache)?(3+nposCache*2):0); +} + +CopyPose::~CopyPose() +{ +} + +bool CopyPose::initialise(Frame& init_pose) +{ + m_externalPose = m_internalPose = init_pose; + updateJacobian(); + return true; +} + +void CopyPose::modelUpdate(Frame& _external_pose,const Timestamp& timestamp) +{ + m_internalPose = m_externalPose = _external_pose; + updateJacobian(); +} + +void CopyPose::initCache(Cache *_cache) +{ + m_cache = _cache; + m_poseCCh = -1; + if (m_cache) { + // create one channel for the coordinates + m_poseCCh = m_cache->addChannel(this, "Xf", m_poseCacheSize*sizeof(double)); + // don't save initial value, it will be recomputed from external pose + //pushPose(0); + } +} + +double* CopyPose::pushValues(double* item, ControlState* _state, unsigned int mask) +{ + ControlState::ControlValue* _yval; + int i; + + *item++ = _state->alpha; + *item++ = _state->K; + *item++ = _state->tolerance; + + for (i=0, _yval=_state->output; i<_state->ny; mask<<=1) { + if (m_outputControl & mask) { + if (m_outputDynamic & mask) { + *item++ = _yval->yd; + *item++ = _yval->yddot; + } + _yval++; + i++; + } + } + return item; +} + +void CopyPose::pushPose(CacheTS timestamp) +{ + if (m_poseCCh >= 0) { + if (m_poseCacheSize) { + double buf[maxPoseCacheSize]; + double *item = buf; + if (m_outputDynamic & CTL_POSITION) + item = pushValues(item, &m_pos, CTL_POSITIONX); + if (m_outputDynamic & CTL_ROTATION) + item = pushValues(item, &m_rot, CTL_ROTATIONX); + m_cache->addCacheVectorIfDifferent(this, m_poseCCh, timestamp, buf, m_poseCacheSize, KDL::epsilon); + } else + m_cache->addCacheVectorIfDifferent(this, m_poseCCh, timestamp, NULL, 0, KDL::epsilon); + m_poseCTs = timestamp; + } +} + +double* CopyPose::restoreValues(double* item, ConstraintValues* _values, ControlState* _state, unsigned int mask) +{ + ConstraintSingleValue* _data; + ControlState::ControlValue* _yval; + int i, j; + + _values->alpha = _state->alpha = *item++; + _values->feedback = _state->K = *item++; + _values->tolerance = _state->tolerance = *item++; + + for (i=_state->firsty, j=i+_state->ny, _yval=_state->output, _data=_values->values; ialpha/*/(_state->tolerance*_state->K)*/; + if (m_outputDynamic & mask) { + _data->yd = _yval->yd = *item++; + _data->yddot = _yval->yddot = *item++; + } + _data++; + _yval++; + i++; + } + } + return item; +} + +bool CopyPose::popPose(CacheTS timestamp) +{ + bool found = false; + if (m_poseCCh >= 0) { + double *item = (double*)m_cache->getPreviousCacheItem(this, m_poseCCh, ×tamp); + if (item) { + found = true; + if (timestamp != m_poseCTs) { + int i=0; + if (m_outputControl & CTL_POSITION) { + if (m_outputDynamic & CTL_POSITION) { + item = restoreValues(item, &m_values[i], &m_pos, CTL_POSITIONX); + } + i++; + } + if (m_outputControl & CTL_ROTATION) { + if (m_outputDynamic & CTL_ROTATION) { + item = restoreValues(item, &m_values[i], &m_rot, CTL_ROTATIONX); + } + i++; + } + m_poseCTs = timestamp; + item = NULL; + } + } + } + return found; +} + +void CopyPose::interpolateOutput(ControlState* _state, unsigned int mask, const Timestamp& timestamp) +{ + ControlState::ControlValue* _yval; + int i; + + for (i=0, _yval=_state->output; i<_state->ny; mask <<= 1) { + if (m_outputControl & mask) { + if (m_outputDynamic & mask) { + if (timestamp.substep && timestamp.interpolate) { + _yval->yd += _yval->yddot*timestamp.realTimestep; + } else { + _yval->yd = _yval->nextyd; + _yval->yddot = _yval->nextyddot; + } + } + i++; + _yval++; + } + } +} + +void CopyPose::pushCache(const Timestamp& timestamp) +{ + if (!timestamp.substep && timestamp.cache) { + pushPose(timestamp.cacheTimestamp); + } +} + +void CopyPose::updateKinematics(const Timestamp& timestamp) +{ + if (timestamp.interpolate) { + if (m_outputDynamic & CTL_POSITION) + interpolateOutput(&m_pos, CTL_POSITIONX, timestamp); + if (m_outputDynamic & CTL_ROTATION) + interpolateOutput(&m_rot, CTL_ROTATIONX, timestamp); + } + pushCache(timestamp); +} + +void CopyPose::updateJacobian() +{ + //Jacobian is always identity at the start of the constraint chain + //instead of going through complicated jacobian operation, implemented direct formula + //m_Jf(1,3) = m_internalPose.p.z(); + //m_Jf(2,3) = -m_internalPose.p.y(); + //m_Jf(0,4) = -m_internalPose.p.z(); + //m_Jf(2,4) = m_internalPose.p.x(); + //m_Jf(0,5) = m_internalPose.p.y(); + //m_Jf(1,5) = -m_internalPose.p.x(); +} + +void CopyPose::updateState(ConstraintValues* _values, ControlState* _state, unsigned int mask, double timestep) +{ + unsigned int id = (mask == CTL_ROTATIONX) ? ID_ROTATIONX : ID_POSITIONX; + ControlState::ControlValue* _yval; + ConstraintSingleValue* _data; + int i, j, k; + int action = 0; + + if ((_values->action & ACT_ALPHA) && _values->alpha >= 0.0) { + _state->alpha = _values->alpha; + action |= ACT_ALPHA; + } + if ((_values->action & ACT_TOLERANCE) && _values->tolerance > KDL::epsilon) { + _state->tolerance = _values->tolerance; + action |= ACT_TOLERANCE; + } + if ((_values->action & ACT_FEEDBACK) && _values->feedback > KDL::epsilon) { + _state->K = _values->feedback; + action |= ACT_FEEDBACK; + } + for (i=_state->firsty, j=_state->firsty+_state->ny, _yval=_state->output; ialpha/*/(_state->tolerance*_state->K)*/; + // check if this controlled output is provided + for (k=0, _data=_values->values; k<_values->number; k++, _data++) { + if (_data->id == id) { + switch (_data->action & (ACT_VALUE|ACT_VELOCITY)) { + case 0: + // no indication, keep current values + break; + case ACT_VELOCITY: + // only the velocity is given estimate the new value by integration + _data->yd = _yval->yd+_data->yddot*timestep; + // walkthrough + case ACT_VALUE: + _yval->nextyd = _data->yd; + // if the user sets the value, we assume future velocity is zero + // (until the user changes the value again) + _yval->nextyddot = (_data->action & ACT_VALUE) ? 0.0 : _data->yddot; + if (timestep>0.0) { + _yval->yddot = (_data->yd-_yval->yd)/timestep; + } else { + // allow the user to change target instantenously when this function + // if called from setControlParameter with timestep = 0 + _yval->yd = _yval->nextyd; + _yval->yddot = _yval->nextyddot; + } + break; + case (ACT_VALUE|ACT_VELOCITY): + // the user should not set the value and velocity at the same time. + // In this case, we will assume that he wants to set the future value + // and we compute the current value to match the velocity + _yval->yd = _data->yd - _data->yddot*timestep; + _yval->nextyd = _data->yd; + _yval->nextyddot = _data->yddot; + if (timestep>0.0) { + _yval->yddot = (_data->yd-_yval->yd)/timestep; + } else { + _yval->yd = _yval->nextyd; + _yval->yddot = _yval->nextyddot; + } + break; + } + } + } + _yval++; + i++; + } + } +} + + +bool CopyPose::setControlParameters(struct ConstraintValues* _values, unsigned int _nvalues, double timestep) +{ + while (_nvalues > 0) { + if (_values->id >= ID_POSITION && _values->id <= ID_POSITIONZ && (m_outputControl & CTL_POSITION)) { + updateState(_values, &m_pos, CTL_POSITIONX, timestep); + } + if (_values->id >= ID_ROTATION && _values->id <= ID_ROTATIONZ && (m_outputControl & CTL_ROTATION)) { + updateState(_values, &m_rot, CTL_ROTATIONX, timestep); + } + _values++; + _nvalues--; + } + return true; +} + +void CopyPose::updateValues(Vector& vel, ConstraintValues* _values, ControlState* _state, unsigned int mask) +{ + ConstraintSingleValue* _data; + ControlState::ControlValue* _yval; + int i, j; + + _values->action = 0; + + for (i=_state->firsty, j=0, _yval=_state->output, _data=_values->values; j<3; j++, mask<<=1) { + if (m_outputControl & mask) { + *(double*)&_data->y = vel(j); + *(double*)&_data->ydot = m_ydot(i); + _data->yd = _yval->yd; + _data->yddot = _yval->yddot; + _data->action = 0; + i++; + _data++; + _yval++; + } + } +} + +void CopyPose::updateOutput(Vector& vel, ControlState* _state, unsigned int mask) +{ + ControlState::ControlValue* _yval; + int i, j; + double coef=1.0; + if (mask & CTL_POSITION) { + // put a limit on position error + double len=0.0; + for (j=0, _yval=_state->output; j<3; j++) { + if (m_outputControl & (mask<yd-vel(j)); + _yval++; + } + } + len = KDL::sqrt(len); + if (len > m_maxerror) + coef = m_maxerror/len; + } + for (i=_state->firsty, j=0, _yval=_state->output; j<3; j++) { + if (m_outputControl & (mask<yddot+_state->K*coef*(_yval->yd-vel(j)); + _yval++; + i++; + } + } +} + +void CopyPose::updateControlOutput(const Timestamp& timestamp) +{ + //IMO this should be done, no idea if it is enough (wrt Distance impl) + Twist y = diff(F_identity, m_internalPose); + bool found = true; + if (!timestamp.substep) { + if (!timestamp.reiterate) { + found = popPose(timestamp.cacheTimestamp); + } + } + if (m_constraintCallback && (m_substep || (!timestamp.reiterate && !timestamp.substep))) { + // initialize first callback the application to get the current values + int i=0; + if (m_outputControl & CTL_POSITION) { + updateValues(y.vel, &m_values[i++], &m_pos, CTL_POSITIONX); + } + if (m_outputControl & CTL_ROTATION) { + updateValues(y.rot, &m_values[i++], &m_rot, CTL_ROTATIONX); + } + if ((*m_constraintCallback)(timestamp, m_values, m_nvalues, m_constraintParam)) { + setControlParameters(m_values, m_nvalues, (found && timestamp.interpolate)?timestamp.realTimestep:0.0); + } + } + if (m_outputControl & CTL_POSITION) { + updateOutput(y.vel, &m_pos, CTL_POSITIONX); + } + if (m_outputControl & CTL_ROTATION) { + updateOutput(y.rot, &m_rot, CTL_ROTATIONX); + } +} + +const ConstraintValues* CopyPose::getControlParameters(unsigned int* _nvalues) +{ + Twist y = diff(m_internalPose,F_identity); + int i=0; + if (m_outputControl & CTL_POSITION) { + updateValues(y.vel, &m_values[i++], &m_pos, CTL_POSITIONX); + } + if (m_outputControl & CTL_ROTATION) { + updateValues(y.rot, &m_values[i++], &m_rot, CTL_ROTATIONX); + } + if (_nvalues) + *_nvalues=m_nvalues; + return m_values; +} + +double CopyPose::getMaxTimestep(double& timestep) +{ + // CopyPose should not have any limit on linear velocity: + // in case the target is out of reach, this can be very high. + // We will simply limit on rotation + e_scalar maxChidot = m_chidot.block(3,0,3,1).cwise().abs().maxCoeff(); + if (timestep*maxChidot > m_maxDeltaChi) { + timestep = m_maxDeltaChi/maxChidot; + } + return timestep; +} + +} diff --git a/intern/itasc/CopyPose.hpp b/intern/itasc/CopyPose.hpp new file mode 100644 index 00000000000..3a3f60a9f37 --- /dev/null +++ b/intern/itasc/CopyPose.hpp @@ -0,0 +1,99 @@ +/* $Id: CopyPose.hpp 20622 2009-06-04 12:47:59Z ben2610 $ + * CopyPose.h + * + * Created on: Mar 17, 2009 + * Author: benoit bolsee + */ + +#ifndef COPYPOSE_H_ +#define COPYPOSE_H_ + +#include "ConstraintSet.hpp" +namespace iTaSC{ + +using namespace KDL; + +class CopyPose: public iTaSC::ConstraintSet +{ +protected: + virtual void updateKinematics(const Timestamp& timestamp); + virtual void pushCache(const Timestamp& timestamp); + virtual void updateJacobian(); + virtual bool initialise(Frame& init_pose); + virtual void initCache(Cache *_cache); + virtual void updateControlOutput(const Timestamp& timestamp); + virtual void modelUpdate(Frame& _external_pose,const Timestamp& timestamp); + virtual double getMaxTimestep(double& timestep); + +public: + enum ID { // constraint ID in callback and setControlParameter + ID_POSITION=0, + ID_POSITIONX=1, + ID_POSITIONY=2, + ID_POSITIONZ=3, + ID_ROTATION=4, + ID_ROTATIONX=5, + ID_ROTATIONY=6, + ID_ROTATIONZ=7, + }; + enum CTL { // control ID in constructor to specify which output is constrainted + CTL_NONE=0x00, + CTL_POSITIONX=0x01, // the bit order is important: it matches the y output order + CTL_POSITIONY=0x02, + CTL_POSITIONZ=0x04, + CTL_POSITION=0x07, + CTL_ROTATIONX=0x08, + CTL_ROTATIONY=0x10, + CTL_ROTATIONZ=0x20, + CTL_ROTATION=0x38, + CTL_ALL=0x3F, + }; + + // use a combination of CTL_.. in control_output to specify which + CopyPose(unsigned int control_output=CTL_ALL, unsigned int dynamic_output=CTL_NONE, double armlength=1.0, double accuracy=1e-6, unsigned int maximum_iterations=100); + virtual ~CopyPose(); + + virtual bool setControlParameters(struct ConstraintValues* _values, unsigned int _nvalues, double timestep); + virtual const ConstraintValues* getControlParameters(unsigned int* _nvalues); + +private: + struct ConstraintSingleValue m_posData[3]; // index = controlled output in X,Y,Z order + struct ConstraintSingleValue m_rotData[3]; + struct ConstraintValues m_values[2]; // index = group of controlled output, in position, rotation order + Cache* m_cache; + int m_poseCCh; + CacheTS m_poseCTs; + unsigned int m_poseCacheSize; + unsigned int m_outputDynamic; // combination of CTL_... determine which variables are dynamically controlled by the application + unsigned int m_outputControl; // combination of CTL_... determine which output are constrained + unsigned int m_nvalues; // number of elements used in m_values[] + double m_maxerror; + + struct ControlState { + int firsty; // first y index + int ny; // number of y in output + double alpha; + double K; + double tolerance; + struct ControlValue { + double yddot; + double yd; + double nextyd; + double nextyddot; + } output[3]; // inded numbex = same as m_rotData + } m_rot, m_pos; + + void pushPose(CacheTS timestamp); + bool popPose(CacheTS timestamp); + int nBitsOn(unsigned int v) + { int n=0; while(v) { if (v&1) n++; v>>=1; } return n; } + double* restoreValues(double* item, ConstraintValues* _values, ControlState* _state, unsigned int mask); + double* pushValues(double* item, ControlState* _state, unsigned int mask); + void updateState(ConstraintValues* _values, ControlState* _state, unsigned int mask, double timestep); + void updateValues(Vector& vel, ConstraintValues* _values, ControlState* _state, unsigned int mask); + void updateOutput(Vector& vel, ControlState* _state, unsigned int mask); + void interpolateOutput(ControlState* _state, unsigned int mask, const Timestamp& timestamp); + +}; +} +#endif /* COPYROTATION_H_ */ diff --git a/intern/itasc/Distance.cpp b/intern/itasc/Distance.cpp new file mode 100644 index 00000000000..bf19a978888 --- /dev/null +++ b/intern/itasc/Distance.cpp @@ -0,0 +1,321 @@ +/* $Id$ + * Distance.cpp + * + * Created on: Jan 30, 2009 + * Author: rsmits + */ + +#include "Distance.hpp" +#include "kdl/kinfam_io.hpp" +#include +#include + +namespace iTaSC +{ +// a distance constraint is characterized by 5 values: alpha, tolerance, K, yd, yddot +static const unsigned int distanceCacheSize = sizeof(double)*5 + sizeof(e_scalar)*6; + +Distance::Distance(double armlength, double accuracy, unsigned int maximum_iterations): + ConstraintSet(1,accuracy,maximum_iterations), + m_chiKdl(6),m_jac(6),m_cache(NULL), + m_distCCh(-1),m_distCTs(0) +{ + m_chain.addSegment(Segment(Joint(Joint::RotZ))); + m_chain.addSegment(Segment(Joint(Joint::RotX))); + m_chain.addSegment(Segment(Joint(Joint::TransY))); + m_chain.addSegment(Segment(Joint(Joint::RotZ))); + m_chain.addSegment(Segment(Joint(Joint::RotY))); + m_chain.addSegment(Segment(Joint(Joint::RotX))); + + m_fksolver = new KDL::ChainFkSolverPos_recursive(m_chain); + m_jacsolver = new KDL::ChainJntToJacSolver(m_chain); + m_Cf(0,2)=1.0; + m_alpha = 1.0; + m_tolerance = 0.05; + m_maxerror = armlength/2.0; + m_K = 20.0; + m_Wy(0) = m_alpha/*/(m_tolerance*m_K)*/; + m_yddot = m_nextyddot = 0.0; + m_yd = m_nextyd = KDL::epsilon; + memset(&m_data, 0, sizeof(m_data)); + // initialize the data with normally fixed values + m_data.id = ID_DISTANCE; + m_values.id = ID_DISTANCE; + m_values.number = 1; + m_values.alpha = m_alpha; + m_values.feedback = m_K; + m_values.tolerance = m_tolerance; + m_values.values = &m_data; +} + +Distance::~Distance() +{ + delete m_fksolver; + delete m_jacsolver; +} + +bool Distance::computeChi(Frame& pose) +{ + double dist, alpha, beta, gamma; + dist = pose.p.Norm(); + Rotation basis; + if (dist < KDL::epsilon) { + // distance is almost 0, no need for initial rotation + m_chi(0) = 0.0; + m_chi(1) = 0.0; + } else { + // find the XZ angles that bring the Y axis to point to init_pose.p + Vector axis(pose.p/dist); + beta = 0.0; + if (fabs(axis(2)) > 1-KDL::epsilon) { + // direction is aligned on Z axis, just rotation on X + alpha = 0.0; + gamma = KDL::sign(axis(2))*KDL::PI/2; + } else { + alpha = -KDL::atan2(axis(0), axis(1)); + gamma = KDL::atan2(axis(2), KDL::sqrt(KDL::sqr(axis(0))+KDL::sqr(axis(1)))); + } + // rotation after first 2 joints + basis = Rotation::EulerZYX(alpha, beta, gamma); + m_chi(0) = alpha; + m_chi(1) = gamma; + } + m_chi(2) = dist; + basis = basis.Inverse()*pose.M; + basis.GetEulerZYX(alpha, beta, gamma); + // alpha = rotation on Z + // beta = rotation on Y + // gamma = rotation on X in that order + // it corresponds to the joint order, so just assign + m_chi(3) = alpha; + m_chi(4) = beta; + m_chi(5) = gamma; + return true; +} + +bool Distance::initialise(Frame& init_pose) +{ + // we will initialize m_chi to values that match the pose + m_externalPose=init_pose; + computeChi(m_externalPose); + // get current Jf and update internal pose + updateJacobian(); + return true; +} + +bool Distance::closeLoop() +{ + if (!Equal(m_internalPose.Inverse()*m_externalPose,F_identity,m_threshold)){ + computeChi(m_externalPose); + updateJacobian(); + } + return true; +} + +void Distance::initCache(Cache *_cache) +{ + m_cache = _cache; + m_distCCh = -1; + if (m_cache) { + // create one channel for the coordinates + m_distCCh = m_cache->addChannel(this, "Xf", distanceCacheSize); + // save initial constraint in cache position 0 + pushDist(0); + } +} + +void Distance::pushDist(CacheTS timestamp) +{ + if (m_distCCh >= 0) { + double *item = (double*)m_cache->addCacheItem(this, m_distCCh, timestamp, NULL, distanceCacheSize); + if (item) { + *item++ = m_K; + *item++ = m_tolerance; + *item++ = m_yd; + *item++ = m_yddot; + *item++ = m_alpha; + memcpy(item, &m_chi[0], 6*sizeof(e_scalar)); + } + m_distCTs = timestamp; + } +} + +bool Distance::popDist(CacheTS timestamp) +{ + if (m_distCCh >= 0) { + double *item = (double*)m_cache->getPreviousCacheItem(this, m_distCCh, ×tamp); + if (item && timestamp != m_distCTs) { + m_values.feedback = m_K = *item++; + m_values.tolerance = m_tolerance = *item++; + m_yd = *item++; + m_yddot = *item++; + m_values.alpha = m_alpha = *item++; + memcpy(&m_chi[0], item, 6*sizeof(e_scalar)); + m_distCTs = timestamp; + m_Wy(0) = m_alpha/*/(m_tolerance*m_K)*/; + updateJacobian(); + } + return (item) ? true : false; + } + return true; +} + +void Distance::pushCache(const Timestamp& timestamp) +{ + if (!timestamp.substep && timestamp.cache) + pushDist(timestamp.cacheTimestamp); +} + +void Distance::updateKinematics(const Timestamp& timestamp) +{ + if (timestamp.interpolate) { + //the internal pose and Jf is already up to date (see model_update) + //update the desired output based on yddot + if (timestamp.substep) { + m_yd += m_yddot*timestamp.realTimestep; + if (m_yd < KDL::epsilon) + m_yd = KDL::epsilon; + } else { + m_yd = m_nextyd; + m_yddot = m_nextyddot; + } + } + pushCache(timestamp); +} + +void Distance::updateJacobian() +{ + for(unsigned int i=0;i<6;i++) + m_chiKdl(i)=m_chi(i); + + m_fksolver->JntToCart(m_chiKdl,m_internalPose); + m_jacsolver->JntToJac(m_chiKdl,m_jac); + changeRefPoint(m_jac,-m_internalPose.p,m_jac); + for(unsigned int i=0;i<6;i++) + for(unsigned int j=0;j<6;j++) + m_Jf(i,j)=m_jac(i,j); +} + +bool Distance::setControlParameters(struct ConstraintValues* _values, unsigned int _nvalues, double timestep) +{ + int action = 0; + int i; + ConstraintSingleValue* _data; + + while (_nvalues > 0) { + if (_values->id == ID_DISTANCE) { + if ((_values->action & ACT_ALPHA) && _values->alpha >= 0.0) { + m_alpha = _values->alpha; + action |= ACT_ALPHA; + } + if ((_values->action & ACT_TOLERANCE) && _values->tolerance > KDL::epsilon) { + m_tolerance = _values->tolerance; + action |= ACT_TOLERANCE; + } + if ((_values->action & ACT_FEEDBACK) && _values->feedback > KDL::epsilon) { + m_K = _values->feedback; + action |= ACT_FEEDBACK; + } + for (_data = _values->values, i=0; i<_values->number; i++, _data++) { + if (_data->id == ID_DISTANCE) { + switch (_data->action & (ACT_VALUE|ACT_VELOCITY)) { + case 0: + // no indication, keep current values + break; + case ACT_VELOCITY: + // only the velocity is given estimate the new value by integration + _data->yd = m_yd+_data->yddot*timestep; + // walkthrough for negative value correction + case ACT_VALUE: + // only the value is given, estimate the velocity from previous value + if (_data->yd < KDL::epsilon) + _data->yd = KDL::epsilon; + m_nextyd = _data->yd; + // if the user sets the value, we assume future velocity is zero + // (until the user changes the value again) + m_nextyddot = (_data->action & ACT_VALUE) ? 0.0 : _data->yddot; + if (timestep>0.0) { + m_yddot = (_data->yd-m_yd)/timestep; + } else { + // allow the user to change target instantenously when this function + // if called from setControlParameter with timestep = 0 + m_yddot = m_nextyddot; + m_yd = m_nextyd; + } + break; + case (ACT_VALUE|ACT_VELOCITY): + // the user should not set the value and velocity at the same time. + // In this case, we will assume that he want to set the future value + // and we compute the current value to match the velocity + if (_data->yd < KDL::epsilon) + _data->yd = KDL::epsilon; + m_yd = _data->yd - _data->yddot*timestep; + if (m_yd < KDL::epsilon) + m_yd = KDL::epsilon; + m_nextyd = _data->yd; + m_nextyddot = _data->yddot; + if (timestep>0.0) { + m_yddot = (_data->yd-m_yd)/timestep; + } else { + m_yd = m_nextyd; + m_yddot = m_nextyddot; + } + break; + } + } + } + } + _nvalues--; + _values++; + } + if (action & (ACT_TOLERANCE|ACT_FEEDBACK|ACT_ALPHA)) { + // recompute the weight + m_Wy(0) = m_alpha/*/(m_tolerance*m_K)*/; + } + return true; +} + +const ConstraintValues* Distance::getControlParameters(unsigned int* _nvalues) +{ + *(double*)&m_data.y = m_chi(2); + *(double*)&m_data.ydot = m_ydot(0); + m_data.yd = m_yd; + m_data.yddot = m_yddot; + m_data.action = 0; + m_values.action = 0; + if (_nvalues) + *_nvalues=1; + return &m_values; +} + +void Distance::updateControlOutput(const Timestamp& timestamp) +{ + bool cacheAvail = true; + if (!timestamp.substep) { + if (!timestamp.reiterate) + cacheAvail = popDist(timestamp.cacheTimestamp); + } + if (m_constraintCallback && (m_substep || (!timestamp.reiterate && !timestamp.substep))) { + // initialize first callback the application to get the current values + *(double*)&m_data.y = m_chi(2); + *(double*)&m_data.ydot = m_ydot(0); + m_data.yd = m_yd; + m_data.yddot = m_yddot; + m_data.action = 0; + m_values.action = 0; + if ((*m_constraintCallback)(timestamp, &m_values, 1, m_constraintParam)) { + setControlParameters(&m_values, 1, timestamp.realTimestep); + } + } + if (!cacheAvail || !timestamp.interpolate) { + // first position in cache: set the desired output immediately as we cannot interpolate + m_yd = m_nextyd; + m_yddot = m_nextyddot; + } + double error = m_yd-m_chi(2); + if (KDL::Norm(error) > m_maxerror) + error = KDL::sign(error)*m_maxerror; + m_ydot(0)=m_yddot+m_K*error; +} + +} diff --git a/intern/itasc/Distance.hpp b/intern/itasc/Distance.hpp new file mode 100644 index 00000000000..1366693743e --- /dev/null +++ b/intern/itasc/Distance.hpp @@ -0,0 +1,62 @@ +/* $Id: Distance.hpp 19905 2009-04-23 13:29:54Z ben2610 $ + * Distance.hpp + * + * Created on: Jan 30, 2009 + * Author: rsmits + */ + +#ifndef DISTANCE_HPP_ +#define DISTANCE_HPP_ + +#include "ConstraintSet.hpp" +#include "kdl/chain.hpp" +#include "kdl/chainfksolverpos_recursive.hpp" +#include "kdl/chainjnttojacsolver.hpp" + +namespace iTaSC +{ + +class Distance: public iTaSC::ConstraintSet +{ +protected: + virtual void updateKinematics(const Timestamp& timestamp); + virtual void pushCache(const Timestamp& timestamp); + virtual void updateJacobian(); + virtual bool initialise(Frame& init_pose); + virtual void initCache(Cache *_cache); + virtual void updateControlOutput(const Timestamp& timestamp); + virtual bool closeLoop(); + +public: + enum ID { + ID_DISTANCE=1, + }; + Distance(double armlength=1.0, double accuracy=1e-6, unsigned int maximum_iterations=100); + virtual ~Distance(); + + virtual bool setControlParameters(struct ConstraintValues* _values, unsigned int _nvalues, double timestep); + virtual const ConstraintValues* getControlParameters(unsigned int* _nvalues); + +private: + bool computeChi(Frame& pose); + KDL::Chain m_chain; + KDL::ChainFkSolverPos_recursive* m_fksolver; + KDL::ChainJntToJacSolver* m_jacsolver; + KDL::JntArray m_chiKdl; + KDL::Jacobian m_jac; + struct ConstraintSingleValue m_data; + struct ConstraintValues m_values; + Cache* m_cache; + int m_distCCh; + CacheTS m_distCTs; + double m_maxerror; + + void pushDist(CacheTS timestamp); + bool popDist(CacheTS timestamp); + + double m_alpha,m_yddot,m_yd,m_nextyd,m_nextyddot,m_K,m_tolerance; +}; + +} + +#endif /* DISTANCE_HPP_ */ diff --git a/intern/itasc/FixedObject.cpp b/intern/itasc/FixedObject.cpp new file mode 100644 index 00000000000..fad77d4825e --- /dev/null +++ b/intern/itasc/FixedObject.cpp @@ -0,0 +1,70 @@ +/* $Id$ + * FixedObject.cpp + * + * Created on: Feb 10, 2009 + * Author: benoitbolsee + */ + +#include "FixedObject.hpp" + +namespace iTaSC{ + + +FixedObject::FixedObject():UncontrolledObject(), + m_finalized(false), m_nframe(0) +{ +} + +FixedObject::~FixedObject() +{ + m_frameArray.clear(); +} + +int FixedObject::addFrame(const std::string& name, const Frame& frame) +{ + if (m_finalized) + return -1; + FrameList::iterator it; + unsigned int i; + for (i=0, it=m_frameArray.begin(); ifirst == name) { + // this frame will replace the old frame + it->second = frame; + return i; + } + } + m_frameArray.push_back(FrameList::value_type(name,frame)); + return m_nframe++; +} + +int FixedObject::addEndEffector(const std::string& name) +{ + // verify that this frame name exist + FrameList::iterator it; + unsigned int i; + for (i=0, it=m_frameArray.begin(); ifirst == name) { + return i; + } + } + return -1; +} + +void FixedObject::finalize() +{ + if (m_finalized) + return; + initialize(0, m_nframe); + m_finalized = true; +} + +const Frame& FixedObject::getPose(const unsigned int frameIndex) +{ + if (frameIndex < m_nframe) { + return m_frameArray[frameIndex].second; + } else { + return F_identity; + } +} + +} diff --git a/intern/itasc/FixedObject.hpp b/intern/itasc/FixedObject.hpp new file mode 100644 index 00000000000..01ab3355259 --- /dev/null +++ b/intern/itasc/FixedObject.hpp @@ -0,0 +1,45 @@ +/* $Id: FixedObject.hpp 19905 2009-04-23 13:29:54Z ben2610 $ + * FixedObject.h + * + * Created on: Feb 10, 2009 + * Author: benoitbolsee + */ + +#ifndef FIXEDOBJECT_HPP_ +#define FIXEDOBJECT_HPP_ + +#include "UncontrolledObject.hpp" +#include + + +namespace iTaSC{ + +class FixedObject: public UncontrolledObject { +public: + FixedObject(); + virtual ~FixedObject(); + + int addFrame(const std::string& name, const Frame& frame); + + virtual void updateCoordinates(const Timestamp& timestamp) {}; + virtual int addEndEffector(const std::string& name); + virtual void finalize(); + virtual const Frame& getPose(const unsigned int frameIndex); + virtual void updateKinematics(const Timestamp& timestamp) {}; + virtual void pushCache(const Timestamp& timestamp) {}; + virtual void initCache(Cache *_cache) {}; + +protected: + virtual void updateJacobian() {} +private: + typedef std::vector > FrameList; + + bool m_finalized; + unsigned int m_nframe; + FrameList m_frameArray; + +}; + +} + +#endif /* FIXEDOBJECT_H_ */ diff --git a/intern/itasc/Makefile b/intern/itasc/Makefile new file mode 100644 index 00000000000..2be46a017df --- /dev/null +++ b/intern/itasc/Makefile @@ -0,0 +1,53 @@ +# +# $Id$ +# +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. +# All rights reserved. +# +# The Original Code is: all of this file. +# +# Contributor(s): Hans Lambermont +# +# ***** END GPL LICENSE BLOCK ***** +# iksolver main makefile. +# + +include nan_definitions.mk + +LIBNAME = itasc +SOURCEDIR = intern/$(LIBNAME) +DIR = $(OCGDIR)/$(SOURCEDIR) +DIRS = kdl +include nan_subdirs.mk +include nan_compile.mk + +CPPFLAGS += -I. +CPPFLAGS += -I../../extern/Eigen2 + +install: $(ALL_OR_DEBUG) + @[ -d $(NAN_ITASC) ] || mkdir $(NAN_ITASC) + @[ -d $(NAN_ITASC)/lib/$(DEBUG_DIR) ] || mkdir $(NAN_ITASC)/lib/$(DEBUG_DIR) + @../tools/cpifdiff.sh $(DIR)/$(DEBUG_DIR)libitasc.a $(DIR)/$(DEBUG_DIR)libitasc_kdl.a $(DIR)/$(DEBUG_DIR)libitasc_kdl_util.a $(NAN_ITASC)/lib/$(DEBUG_DIR) +ifeq ($(OS),darwin) + ranlib $(NAN_ITASC)/lib/$(DEBUG_DIR)libitasc.a + ranlib $(NAN_ITASC)/lib/$(DEBUG_DIR)libitasc_kdl.a + ranlib $(NAN_ITASC)/lib/$(DEBUG_DIR)libitasc_kdl_util.a +endif +############################## +include nan_subdirs.mk diff --git a/intern/itasc/MovingFrame.cpp b/intern/itasc/MovingFrame.cpp new file mode 100644 index 00000000000..e923b1fab27 --- /dev/null +++ b/intern/itasc/MovingFrame.cpp @@ -0,0 +1,156 @@ +/* $Id$ + * MovingFrame.cpp + * + * Created on: Feb 10, 2009 + * Author: benoitbolsee + */ + +#include "MovingFrame.hpp" +#include +namespace iTaSC{ + +static const unsigned int frameCacheSize = (sizeof(((Frame*)0)->p.data)+sizeof(((Frame*)0)->M.data))/sizeof(double); + +MovingFrame::MovingFrame(const Frame& frame):UncontrolledObject(), + m_function(NULL), m_param(NULL), m_velocity(), m_poseCCh(-1), m_poseCTs(0) +{ + m_internalPose = m_nextPose = frame; + initialize(6, 1); + e_matrix& Ju = m_JuArray[0]; + Ju = e_identity_matrix(6,6); +} + +MovingFrame::~MovingFrame() +{ +} + +void MovingFrame::finalize() +{ + updateJacobian(); +} + +void MovingFrame::initCache(Cache *_cache) +{ + m_cache = _cache; + m_poseCCh = -1; + if (m_cache) { + m_poseCCh = m_cache->addChannel(this,"pose",frameCacheSize*sizeof(double)); + // don't store the initial pose, it's causing unnecessary large velocity on the first step + //pushInternalFrame(0); + } +} + +void MovingFrame::pushInternalFrame(CacheTS timestamp) +{ + if (m_poseCCh >= 0) { + double buf[frameCacheSize]; + memcpy(buf, m_internalPose.p.data, sizeof(m_internalPose.p.data)); + memcpy(&buf[sizeof(m_internalPose.p.data)/sizeof(double)], m_internalPose.M.data, sizeof(m_internalPose.M.data)); + + m_cache->addCacheVectorIfDifferent(this, m_poseCCh, timestamp, buf, frameCacheSize, KDL::epsilon); + m_poseCTs = timestamp; + } +} + +// load pose just preceeding timestamp +// return false if no cache position was found +bool MovingFrame::popInternalFrame(CacheTS timestamp) +{ + if (m_poseCCh >= 0) { + char *item; + item = (char *)m_cache->getPreviousCacheItem(this, m_poseCCh, ×tamp); + if (item && m_poseCTs != timestamp) { + memcpy(m_internalPose.p.data, item, sizeof(m_internalPose.p.data)); + item += sizeof(m_internalPose.p.data); + memcpy(m_internalPose.M.data, item, sizeof(m_internalPose.M.data)); + m_poseCTs = timestamp; + // changing the starting pose, recompute the jacobian + updateJacobian(); + } + return (item) ? true : false; + } + // in case of no cache, there is always a previous position + return true; +} + +bool MovingFrame::setFrame(const Frame& frame) +{ + m_internalPose = m_nextPose = frame; + return true; +} + +bool MovingFrame::setCallback(MovingFrameCallback _function, void* _param) +{ + m_function = _function; + m_param = _param; + return true; +} + +void MovingFrame::updateCoordinates(const Timestamp& timestamp) +{ + // don't compute the velocity during substepping, it is assumed constant. + if (!timestamp.substep) { + bool cacheAvail = true; + if (!timestamp.reiterate) { + cacheAvail = popInternalFrame(timestamp.cacheTimestamp); + if (m_function) + (*m_function)(timestamp, m_internalPose, m_nextPose, m_param); + } + // only compute velocity if we have a previous pose + if (cacheAvail && timestamp.interpolate) { + unsigned int iXu; + m_velocity = diff(m_internalPose, m_nextPose, timestamp.realTimestep); + for (iXu=0; iXu<6; iXu++) + m_xudot(iXu) = m_velocity(iXu); + } else if (!timestamp.reiterate) { + // new position is forced, no velocity as we cannot interpolate + m_internalPose = m_nextPose; + m_velocity = Twist::Zero(); + m_xudot = e_zero_vector(6); + // recompute the jacobian + updateJacobian(); + } + } +} + +void MovingFrame::pushCache(const Timestamp& timestamp) +{ + if (!timestamp.substep && timestamp.cache) + pushInternalFrame(timestamp.cacheTimestamp); +} + +void MovingFrame::updateKinematics(const Timestamp& timestamp) +{ + if (timestamp.interpolate) { + if (timestamp.substep) { + // during substepping, update the internal pose from velocity information + Twist localvel = m_internalPose.M.Inverse(m_velocity); + m_internalPose.Integrate(localvel, 1.0/timestamp.realTimestep); + } else { + m_internalPose = m_nextPose; + } + // m_internalPose is updated, recompute the jacobian + updateJacobian(); + } + pushCache(timestamp); +} + +void MovingFrame::updateJacobian() +{ + Twist m_jac; + e_matrix& Ju = m_JuArray[0]; + + //Jacobian is always identity at position on the object, + //we ust change the reference to the world. + //instead of going through complicated jacobian operation, implemented direct formula + Ju(1,3) = m_internalPose.p.z(); + Ju(2,3) = -m_internalPose.p.y(); + Ju(0,4) = -m_internalPose.p.z(); + Ju(2,4) = m_internalPose.p.x(); + Ju(0,5) = m_internalPose.p.y(); + Ju(1,5) = -m_internalPose.p.x(); + // remember that this object has moved + m_updated = true; +} + +} diff --git a/intern/itasc/MovingFrame.hpp b/intern/itasc/MovingFrame.hpp new file mode 100644 index 00000000000..edaa3136a13 --- /dev/null +++ b/intern/itasc/MovingFrame.hpp @@ -0,0 +1,48 @@ +/* $Id: MovingFrame.hpp 19907 2009-04-23 13:41:59Z ben2610 $ + * MovingFrame.h + * + * Created on: Feb 10, 2009 + * Author: benoitbolsee + */ + +#ifndef MOVINGFRAME_HPP_ +#define MOVINGFRAME_HPP_ + +#include "UncontrolledObject.hpp" +#include + + +namespace iTaSC{ + +typedef bool (*MovingFrameCallback)(const Timestamp& timestamp, const Frame& _current, Frame& _next, void *param); + +class MovingFrame: public UncontrolledObject { +public: + MovingFrame(const Frame& frame=F_identity); + virtual ~MovingFrame(); + + bool setFrame(const Frame& frame); + bool setCallback(MovingFrameCallback _function, void* _param); + + virtual void updateCoordinates(const Timestamp& timestamp); + virtual void updateKinematics(const Timestamp& timestamp); + virtual void pushCache(const Timestamp& timestamp); + virtual void initCache(Cache *_cache); + virtual void finalize(); +protected: + virtual void updateJacobian(); + +private: + void pushInternalFrame(CacheTS timestamp); + bool popInternalFrame(CacheTS timestamp); + MovingFrameCallback m_function; + void* m_param; + Frame m_nextPose; + Twist m_velocity; + int m_poseCCh; // cache channel for pose + unsigned int m_poseCTs; +}; + +} + +#endif /* MOVINGFRAME_H_ */ diff --git a/intern/itasc/Object.hpp b/intern/itasc/Object.hpp new file mode 100644 index 00000000000..5c312cab768 --- /dev/null +++ b/intern/itasc/Object.hpp @@ -0,0 +1,48 @@ +/* $Id: Object.hpp 19907 2009-04-23 13:41:59Z ben2610 $ + * Object.hpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#ifndef OBJECT_HPP_ +#define OBJECT_HPP_ + +#include "Cache.hpp" +#include "kdl/frames.hpp" +#include + +namespace iTaSC{ + +class WorldObject; + +class Object { +public: + enum ObjectType {Controlled, UnControlled}; + static WorldObject world; + +private: + ObjectType m_type; +protected: + Cache *m_cache; + KDL::Frame m_internalPose; + bool m_updated; + virtual void updateJacobian()=0; +public: + Object(ObjectType _type):m_type(_type), m_cache(NULL), m_internalPose(F_identity), m_updated(false) {}; + virtual ~Object(){}; + + virtual int addEndEffector(const std::string& name){return 0;}; + virtual void finalize(){}; + virtual const KDL::Frame& getPose(const unsigned int end_effector=0){return m_internalPose;}; + virtual const ObjectType getType(){return m_type;}; + virtual const unsigned int getNrOfCoordinates(){return 0;}; + virtual void updateKinematics(const Timestamp& timestamp)=0; + virtual void pushCache(const Timestamp& timestamp)=0; + virtual void initCache(Cache *_cache) = 0; + bool updated() {return m_updated;}; + void updated(bool val) {m_updated=val;}; +}; + +} +#endif /* OBJECT_HPP_ */ diff --git a/intern/itasc/SConscript b/intern/itasc/SConscript new file mode 100644 index 00000000000..9e11b6c7119 --- /dev/null +++ b/intern/itasc/SConscript @@ -0,0 +1,11 @@ +#!/usr/bin/python +Import ('env') + +sources = env.Glob('*.cpp') +sources += env.Glob('kdl/*.cpp') +sources += env.Glob('kdl/utilities/*.cpp') + +incs = '. ../../extern/Eigen2' + +env.BlenderLib ('bf_ITASC', sources, Split(incs), [], libtype=['intern','player'], priority=[20,100] ) + diff --git a/intern/itasc/Scene.cpp b/intern/itasc/Scene.cpp new file mode 100644 index 00000000000..8aa423584f1 --- /dev/null +++ b/intern/itasc/Scene.cpp @@ -0,0 +1,543 @@ +/* $Id$ + * Scene.cpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#include "Scene.hpp" +#include "ControlledObject.hpp" +#include "kdl/utilities/svd_eigen_HH.hpp" +#include + +namespace iTaSC { + +class SceneLock : public ControlledObject::JointLockCallback { +private: + Scene* m_scene; + Range m_qrange; + +public: + SceneLock(Scene* scene) : + m_scene(scene), m_qrange(0,0) {} + virtual ~SceneLock() {} + + void setRange(Range& range) + { + m_qrange = range; + } + // lock a joint, no need to update output + virtual void lockJoint(unsigned int q_nr, unsigned int ndof) + { + q_nr += m_qrange.start; + project(m_scene->m_Wq, Range(q_nr, ndof), m_qrange).setZero(); + } + // lock a joint and update output in view of reiteration + virtual void lockJoint(unsigned int q_nr, unsigned int ndof, double* qdot) + { + q_nr += m_qrange.start; + project(m_scene->m_Wq, Range(q_nr, ndof), m_qrange).setZero(); + // update the ouput vector so that the movement of this joint will be + // taken into account and we can put the joint back in its initial position + // which means that the jacobian doesn't need to be changed + for (unsigned int i=0 ;im_ydot -= m_scene->m_A.col(q_nr)*qdot[i]; + } + } +}; + +Scene::Scene(): + m_A(), m_B(), m_Atemp(), m_Wq(), m_Jf(), m_Jq(), m_Ju(), m_Cf(), m_Cq(), m_Jf_inv(), + m_Vf(),m_Uf(), m_Wy(), m_ydot(), m_qdot(), m_xdot(), m_Sf(),m_tempf(), + m_ncTotal(0),m_nqTotal(0),m_nuTotal(0),m_nsets(0), + m_solver(NULL),m_cache(NULL) +{ + m_minstep = 0.01; + m_maxstep = 0.06; +} + +Scene::~Scene() +{ + ConstraintMap::iterator constraint_it; + while ((constraint_it = constraints.begin()) != constraints.end()) { + delete constraint_it->second; + constraints.erase(constraint_it); + } + ObjectMap::iterator object_it; + while ((object_it = objects.begin()) != objects.end()) { + delete object_it->second; + objects.erase(object_it); + } +} + +bool Scene::setParam(SceneParam paramId, double value) +{ + switch (paramId) { + case MIN_TIMESTEP: + m_minstep = value; + break; + case MAX_TIMESTEP: + m_maxstep = value; + break; + default: + return false; + } + return true; +} + +bool Scene::addObject(const std::string& name, Object* object, UncontrolledObject* base, const std::string& baseFrame) +{ + // finalize the object before adding + object->finalize(); + //Check if Object is controlled or uncontrolled. + if(object->getType()==Object::Controlled){ + int baseFrameIndex = base->addEndEffector(baseFrame); + if (baseFrameIndex < 0) + return false; + std::pair result; + if (base->getNrOfCoordinates() == 0) { + // base is fixed object, no coordinate range + result = objects.insert(ObjectMap::value_type( + name, new Object_struct(object,base,baseFrameIndex, + Range(m_nqTotal,object->getNrOfCoordinates()), + Range(m_ncTotal,((ControlledObject*)object)->getNrOfConstraints()), + Range(0,0)))); + } else { + // base is a moving object, must be in list already + ObjectMap::iterator base_it; + for (base_it=objects.begin(); base_it != objects.end(); base_it++) { + if (base_it->second->object == base) + break; + } + if (base_it == objects.end()) + return false; + result = objects.insert(ObjectMap::value_type( + name, new Object_struct(object,base,baseFrameIndex, + Range(m_nqTotal,object->getNrOfCoordinates()), + Range(m_ncTotal,((ControlledObject*)object)->getNrOfConstraints()), + base_it->second->coordinaterange))); + } + if (!result.second) { + return false; + } + m_nqTotal+=object->getNrOfCoordinates(); + m_ncTotal+=((ControlledObject*)object)->getNrOfConstraints(); + return true; + } + if(object->getType()==Object::UnControlled){ + if ((WorldObject*)base != &Object::world) + return false; + std::pair result = objects.insert(ObjectMap::value_type( + name,new Object_struct(object,base,0, + Range(0,0), + Range(0,0), + Range(m_nuTotal,object->getNrOfCoordinates())))); + if(!result.second) + return false; + m_nuTotal+=object->getNrOfCoordinates(); + return true; + } + return false; +} + +bool Scene::addConstraintSet(const std::string& name,ConstraintSet* task,const std::string& object1,const std::string& object2, const std::string& ee1, const std::string& ee2) +{ + //Check if objects exist: + ObjectMap::iterator object1_it = objects.find(object1); + ObjectMap::iterator object2_it = objects.find(object2); + if(object1_it==objects.end()||object2_it==objects.end()) + return false; + int ee1_index = object1_it->second->object->addEndEffector(ee1); + int ee2_index = object2_it->second->object->addEndEffector(ee2); + if (ee1_index < 0 || ee2_index < 0) + return false; + std::pair result = + constraints.insert(ConstraintMap::value_type(name,new ConstraintSet_struct( + task,object1_it,ee1_index,object2_it,ee2_index, + Range(m_ncTotal,task->getNrOfConstraints()),Range(6*m_nsets,6)))); + if(!result.second) + return false; + m_ncTotal+=task->getNrOfConstraints(); + m_nsets+=1; + return true; +} + +bool Scene::addSolver(Solver* _solver){ + if(m_solver==NULL){ + m_solver=_solver; + return true; + } + else + return false; +} + +bool Scene::addCache(Cache* _cache){ + if(m_cache==NULL){ + m_cache=_cache; + return true; + } + else + return false; +} + +bool Scene::initialize(){ + + //prepare all matrices: + if (m_ncTotal == 0 || m_nqTotal == 0 || m_nsets == 0) + return false; + + m_A = e_zero_matrix(m_ncTotal,m_nqTotal); + if (m_nuTotal > 0) { + m_B = e_zero_matrix(m_ncTotal,m_nuTotal); + m_xdot = e_zero_vector(m_nuTotal); + m_Ju = e_zero_matrix(6*m_nsets,m_nuTotal); + } + m_Atemp = e_zero_matrix(m_ncTotal,6*m_nsets); + m_ydot = e_zero_vector(m_ncTotal); + m_qdot = e_zero_vector(m_nqTotal); + m_Wq = e_zero_matrix(m_nqTotal,m_nqTotal); + m_Wy = e_zero_vector(m_ncTotal); + m_Jq = e_zero_matrix(6*m_nsets,m_nqTotal); + m_Jf = e_zero_matrix(6*m_nsets,6*m_nsets); + m_Jf_inv = m_Jf; + m_Cf = e_zero_matrix(m_ncTotal,m_Jf.rows()); + m_Cq = e_zero_matrix(m_ncTotal,m_nqTotal); + + bool result=true; + // finalize all objects + for (ObjectMap::iterator it=objects.begin(); it!=objects.end(); ++it) { + Object_struct* os = it->second; + + os->object->initCache(m_cache); + if (os->constraintrange.count > 0) + project(m_Cq,os->constraintrange,os->jointrange) = (((ControlledObject*)(os->object))->getCq()); + } + + m_ytask.resize(m_ncTotal); + bool toggle=true; + int cnt = 0; + //Initialize all ConstraintSets: + for(ConstraintMap::iterator it=constraints.begin();it!=constraints.end();++it){ + //Calculate the external pose: + ConstraintSet_struct* cs = it->second; + Frame external_pose; + getConstraintPose(cs->task, cs, external_pose); + result&=cs->task->initialise(external_pose); + cs->task->initCache(m_cache); + for (int i=0; iconstraintrange.count; i++, cnt++) { + m_ytask[cnt] = toggle; + } + toggle = !toggle; + project(m_Cf,cs->constraintrange,cs->featurerange)=cs->task->getCf(); + } + + if(m_solver!=NULL) + m_solver->init(m_nqTotal,m_ncTotal,m_ytask); + else + return false; + + + return result; +} + +bool Scene::getConstraintPose(ConstraintSet* constraint, void *_param, KDL::Frame& _pose) +{ + // function called from constraint when they need to get the external pose + ConstraintSet_struct* cs = (ConstraintSet_struct*)_param; + // verification, the pointer MUST match + assert (constraint == cs->task); + Object_struct* ob1 = cs->object1->second; + Object_struct* ob2 = cs->object2->second; + //Calculate the external pose: + _pose=(ob1->base->getPose(ob1->baseFrameIndex)*ob1->object->getPose(cs->ee1index)).Inverse()*(ob2->base->getPose(ob2->baseFrameIndex)*ob2->object->getPose(cs->ee2index)); + return true; +} + +bool Scene::update(double timestamp, double timestep, unsigned int numsubstep, bool reiterate, bool cache, bool interpolate) +{ + // we must have valid timestep and timestamp + if (timestamp < KDL::epsilon || timestep < 0.0) + return false; + Timestamp ts; + ts.realTimestamp = timestamp; + // initially we start with the full timestep to allow velocity estimation over the full interval + ts.realTimestep = timestep; + setCacheTimestamp(ts); + ts.substep = 0; + // for reiteration don't load cache + // reiteration=additional iteration with same timestamp if application finds the convergence not good enough + ts.reiterate = (reiterate) ? 1 : 0; + ts.interpolate = (interpolate) ? 1 : 0; + ts.cache = (cache) ? 1 : 0; + ts.update = 1; + ts.numstep = (numsubstep & 0xFF); + bool autosubstep = (numsubstep == 0) ? true : false; + if (numsubstep < 1) + numsubstep = 1; + double timesubstep = timestep/numsubstep; + double timeleft = timestep; + + if (timeleft == 0.0) { + // this special case correspond to a request to cache data + for(ObjectMap::iterator it=objects.begin();it!=objects.end();++it){ + it->second->object->pushCache(ts); + } + //Update the Constraints + for(ConstraintMap::iterator it=constraints.begin();it!=constraints.end();++it){ + it->second->task->pushCache(ts); + } + return true; + } + + double maxqdot; + e_scalar nlcoef; + SceneLock lockCallback(this); + Frame external_pose; + bool locked; + + // initially we keep timestep unchanged so that update function compute the velocity over + while (numsubstep > 0) { + // get objects + for(ObjectMap::iterator it=objects.begin();it!=objects.end();++it) { + Object_struct* os = it->second; + if (os->object->getType()==Object::Controlled) { + ((ControlledObject*)(os->object))->updateControlOutput(ts); + if (os->constraintrange.count > 0) { + project(m_ydot, os->constraintrange) = ((ControlledObject*)(os->object))->getControlOutput(); + project(m_Wy, os->constraintrange) = ((ControlledObject*)(os->object))->getWy(); + // project(m_Cq,os->constraintrange,os->jointrange) = (((ControlledObject*)(os->object))->getCq()); + } + if (os->jointrange.count > 0) { + project(m_Wq,os->jointrange,os->jointrange) = ((ControlledObject*)(os->object))->getWq(); + } + } + if (os->object->getType()==Object::UnControlled && ((UncontrolledObject*)os->object)->getNrOfCoordinates() != 0) { + ((UncontrolledObject*)(os->object))->updateCoordinates(ts); + if (!ts.substep) { + // velocity of uncontrolled object remains constant during substepping + project(m_xdot,os->coordinaterange) = ((UncontrolledObject*)(os->object))->getXudot(); + } + } + } + + //get new Constraints values + for(ConstraintMap::iterator it=constraints.begin();it!=constraints.end();++it) { + ConstraintSet_struct* cs = it->second; + Object_struct* ob1 = cs->object1->second; + Object_struct* ob2 = cs->object2->second; + + if (ob1->base->updated() || ob1->object->updated() || ob2->base->updated() || ob2->object->updated()) { + // the object from which the constraint depends have changed position + // recompute the constraint pose + getConstraintPose(cs->task, cs, external_pose); + cs->task->initialise(external_pose); + } + cs->task->updateControlOutput(ts); + project(m_ydot,cs->constraintrange)=cs->task->getControlOutput(); + if (!ts.substep || cs->task->substep()) { + project(m_Wy,cs->constraintrange)=(cs->task)->getWy(); + //project(m_Cf,cs->constraintrange,cs->featurerange)=cs->task->getCf(); + } + + project(m_Jf,cs->featurerange,cs->featurerange)=cs->task->getJf(); + //std::cout << "Jf = " << Jf << std::endl; + //Transform the reference frame of this jacobian to the world reference frame + Eigen::Block Jf_part = project(m_Jf,cs->featurerange,cs->featurerange); + changeBase(Jf_part,ob1->base->getPose(ob1->baseFrameIndex)*ob1->object->getPose(cs->ee1index)); + //std::cout << "Jf_w = " << Jf << std::endl; + + //calculate the inverse of Jf + KDL::svd_eigen_HH(project(m_Jf,cs->featurerange,cs->featurerange),m_Uf,m_Sf,m_Vf,m_tempf); + for(unsigned int i=0;i<6;++i) + if(m_Sf(i)featurerange,cs->featurerange)=(m_Vf*m_Uf.transpose()).lazy(); + + //Get the robotjacobian associated with this constraintset + //Each jacobian is expressed in robot base frame => convert to world reference + //and negate second robot because it is taken reversed when closing the loop: + if(ob1->object->getType()==Object::Controlled){ + project(m_Jq,cs->featurerange,ob1->jointrange) = (((ControlledObject*)(ob1->object))->getJq(cs->ee1index)); + //Transform the reference frame of this jacobian to the world reference frame: + Eigen::Block Jq_part = project(m_Jq,cs->featurerange,ob1->jointrange); + changeBase(Jq_part,ob1->base->getPose(ob1->baseFrameIndex)); + // if the base of this object is moving, get the Ju part + if (ob1->base->getNrOfCoordinates() != 0) { + // Ju is already computed for world reference frame + project(m_Ju,cs->featurerange,ob1->coordinaterange)=ob1->base->getJu(ob1->baseFrameIndex); + } + } else if (ob1->object->getType() == Object::UnControlled && ((UncontrolledObject*)ob1->object)->getNrOfCoordinates() != 0) { + // object1 is uncontrolled moving object + project(m_Ju,cs->featurerange,ob1->coordinaterange)=((UncontrolledObject*)ob1->object)->getJu(cs->ee1index); + } + if(ob2->object->getType()==Object::Controlled){ + //Get the robotjacobian associated with this constraintset + // process a special case where object2 and object1 are equal but using different end effector + if (ob1->object == ob2->object) { + // we must create a temporary matrix + e_matrix JqTemp(((ControlledObject*)(ob2->object))->getJq(cs->ee2index)); + //Transform the reference frame of this jacobian to the world reference frame: + changeBase(JqTemp,ob2->base->getPose(ob2->baseFrameIndex)); + // substract in place + project(m_Jq,cs->featurerange,ob2->jointrange) -= JqTemp; + } else { + project(m_Jq,cs->featurerange,ob2->jointrange) = -(((ControlledObject*)(ob2->object))->getJq(cs->ee2index)); + //Transform the reference frame of this jacobian to the world reference frame: + Eigen::Block Jq_part = project(m_Jq,cs->featurerange,ob2->jointrange); + changeBase(Jq_part,ob2->base->getPose(ob2->baseFrameIndex)); + } + if (ob2->base->getNrOfCoordinates() != 0) { + // if base is the same as first object or first object base, + // that portion of m_Ju has been set already => substract inplace + if (ob2->base == ob1->base || ob2->base == ob1->object) { + project(m_Ju,cs->featurerange,ob2->coordinaterange) -= ob2->base->getJu(ob2->baseFrameIndex); + } else { + project(m_Ju,cs->featurerange,ob2->coordinaterange) = -ob2->base->getJu(ob2->baseFrameIndex); + } + } + } else if (ob2->object->getType() == Object::UnControlled && ((UncontrolledObject*)ob2->object)->getNrOfCoordinates() != 0) { + if (ob2->object == ob1->base || ob2->object == ob1->object) { + project(m_Ju,cs->featurerange,ob2->coordinaterange) -= ((UncontrolledObject*)ob2->object)->getJu(cs->ee2index); + } else { + project(m_Ju,cs->featurerange,ob2->coordinaterange) = -((UncontrolledObject*)ob2->object)->getJu(cs->ee2index); + } + } + } + + //Calculate A + m_Atemp=(m_Cf*m_Jf_inv).lazy(); + m_A = m_Cq-(m_Atemp*m_Jq).lazy(); + if (m_nuTotal > 0) { + m_B=(m_Atemp*m_Ju).lazy(); + m_ydot += (m_B*m_xdot).lazy(); + } + + //Call the solver with A, Wq, Wy, ydot to solver qdot: + if(!m_solver->solve(m_A,m_Wy,m_ydot,m_Wq,m_qdot,nlcoef)) + // this should never happen + return false; + //send result to the objects + for(ObjectMap::iterator it=objects.begin();it!=objects.end();++it) { + Object_struct* os = it->second; + if(os->object->getType()==Object::Controlled) + ((ControlledObject*)(os->object))->setJointVelocity(project(m_qdot,os->jointrange)); + } + // compute the constraint velocity + for(ConstraintMap::iterator it=constraints.begin();it!=constraints.end();++it){ + ConstraintSet_struct* cs = it->second; + Object_struct* ob1 = cs->object1->second; + Object_struct* ob2 = cs->object2->second; + //Calculate the twist of the world reference frame due to the robots (Jq*qdot+Ju*chiudot): + e_vector6 external_vel = e_zero_vector(6); + if (ob1->jointrange.count > 0) + external_vel += (project(m_Jq,cs->featurerange,ob1->jointrange)*project(m_qdot,ob1->jointrange)).lazy(); + if (ob2->jointrange.count > 0) + external_vel += (project(m_Jq,cs->featurerange,ob2->jointrange)*project(m_qdot,ob2->jointrange)).lazy(); + if (ob1->coordinaterange.count > 0) + external_vel += (project(m_Ju,cs->featurerange,ob1->coordinaterange)*project(m_xdot,ob1->coordinaterange)).lazy(); + if (ob2->coordinaterange.count > 0) + external_vel += (project(m_Ju,cs->featurerange,ob2->coordinaterange)*project(m_xdot,ob2->coordinaterange)).lazy(); + //the twist caused by the constraint must be opposite because of the closed loop + //estimate the velocity of the joints using the inverse jacobian + e_vector6 estimated_chidot = project(m_Jf_inv,cs->featurerange,cs->featurerange)*(-external_vel); + cs->task->setJointVelocity(estimated_chidot); + } + + if (autosubstep) { + // automatic computing of substep based on maximum joint change + // and joint limit gain variation + // We will pass the joint velocity to each object and they will recommend a maximum timestep + timesubstep = timeleft; + // get armature max joint velocity to estimate the maximum duration of integration + maxqdot = m_qdot.cwise().abs().maxCoeff(); + double maxsubstep = nlcoef*m_maxstep; + if (maxsubstep < m_minstep) + maxsubstep = m_minstep; + if (timesubstep > maxsubstep) + timesubstep = maxsubstep; + for(ObjectMap::iterator it=objects.begin();it!=objects.end();++it){ + Object_struct* os = it->second; + if(os->object->getType()==Object::Controlled) + ((ControlledObject*)(os->object))->getMaxTimestep(timesubstep); + } + for(ConstraintMap::iterator it=constraints.begin();it!=constraints.end();++it){ + ConstraintSet_struct* cs = it->second; + cs->task->getMaxTimestep(timesubstep); + } + // use substep that are even dividers of timestep for more regularity + maxsubstep = 2.0*floor(timestep/2.0/timesubstep-0.66666); + timesubstep = (maxsubstep < 0.0) ? timestep : timestep/(2.0+maxsubstep); + if (timesubstep >= timeleft-(m_minstep/2.0)) { + timesubstep = timeleft; + numsubstep = 1; + timeleft = 0.; + } else { + numsubstep = 2; + timeleft -= timesubstep; + } + } + if (numsubstep > 1) { + ts.substep = 1; + } else { + // set substep to false for last iteration so that controlled output + // can be updated in updateKinematics() and model_update)() before next call to Secne::update() + ts.substep = 0; + } + // change timestep so that integration is done correctly + ts.realTimestep = timesubstep; + + do { + ObjectMap::iterator it; + Object_struct* os; + locked = false; + for(it=objects.begin();it!=objects.end();++it){ + os = it->second; + if (os->object->getType()==Object::Controlled) { + lockCallback.setRange(os->jointrange); + if (((ControlledObject*)os->object)->updateJoint(ts, lockCallback)) { + // this means one of the joint was locked and we must rerun + // the solver to update the remaining joints + locked = true; + break; + } + } + } + if (locked) { + // Some rows of m_Wq have been cleared so that the corresponding joint will not move + if(!m_solver->solve(m_A,m_Wy,m_ydot,m_Wq,m_qdot,nlcoef)) + // this should never happen + return false; + + //send result to the objects + for(it=objects.begin();it!=objects.end();++it) { + os = it->second; + if(os->object->getType()==Object::Controlled) + ((ControlledObject*)(os->object))->setJointVelocity(project(m_qdot,os->jointrange)); + } + } + } while (locked); + + //Update the Objects + for(ObjectMap::iterator it=objects.begin();it!=objects.end();++it){ + it->second->object->updateKinematics(ts); + // mark this object not updated since the constraint will be updated anyway + // this flag is only useful to detect external updates + it->second->object->updated(false); + } + //Update the Constraints + for(ConstraintMap::iterator it=constraints.begin();it!=constraints.end();++it){ + ConstraintSet_struct* cs = it->second; + //Calculate the external pose: + getConstraintPose(cs->task, cs, external_pose); + cs->task->modelUpdate(external_pose,ts); + // update the constraint output and cache + cs->task->updateKinematics(ts); + } + numsubstep--; + } + return true; +} + +} diff --git a/intern/itasc/Scene.hpp b/intern/itasc/Scene.hpp new file mode 100644 index 00000000000..a2d63361d95 --- /dev/null +++ b/intern/itasc/Scene.hpp @@ -0,0 +1,104 @@ +/* $Id: Scene.hpp 20622 2009-06-04 12:47:59Z ben2610 $ + * Scene.hpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#ifndef SCENE_HPP_ +#define SCENE_HPP_ + +#include "eigen_types.hpp" + +#include "WorldObject.hpp" +#include "ConstraintSet.hpp" +#include "Solver.hpp" + +#include + +namespace iTaSC { + +class SceneLock; + +class Scene { + friend class SceneLock; +public: + enum SceneParam { + MIN_TIMESTEP = 0, + MAX_TIMESTEP, + + COUNT + }; + + + Scene(); + virtual ~Scene(); + + bool addObject(const std::string& name, Object* object, UncontrolledObject* base=&Object::world, const std::string& baseFrame=""); + bool addConstraintSet(const std::string& name, ConstraintSet* task,const std::string& object1,const std::string& object2,const std::string& ee1="",const std::string& ee2=""); + bool addSolver(Solver* _solver); + bool addCache(Cache* _cache); + bool initialize(); + bool update(double timestamp, double timestep, unsigned int numsubstep=1, bool reiterate=false, bool cache=true, bool interpolate=true); + bool setParam(SceneParam paramId, double value); + + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + +private: + e_matrix m_A,m_B,m_Atemp,m_Wq,m_Jf,m_Jq,m_Ju,m_Cf,m_Cq,m_Jf_inv; + e_matrix6 m_Vf,m_Uf; + e_vector m_Wy,m_ydot,m_qdot,m_xdot; + e_vector6 m_Sf,m_tempf; + double m_minstep; + double m_maxstep; + unsigned int m_ncTotal,m_nqTotal,m_nuTotal,m_nsets; + std::vector m_ytask; + + Solver* m_solver; + Cache* m_cache; + + + struct Object_struct{ + Object* object; + UncontrolledObject* base; + unsigned int baseFrameIndex; + Range constraintrange; + Range jointrange; + Range coordinaterange; // Xu range of base when object is controlled + // Xu range of object when object is uncontrolled + + Object_struct(Object* _object,UncontrolledObject* _base,unsigned int _baseFrameIndex,Range nq_range,Range nc_range,Range nu_range): + object(_object),base(_base),baseFrameIndex(_baseFrameIndex),constraintrange(nc_range),jointrange(nq_range),coordinaterange(nu_range) + {}; + }; + typedef std::map ObjectMap; + + struct ConstraintSet_struct{ + ConstraintSet* task; + ObjectMap::iterator object1; + ObjectMap::iterator object2; + Range constraintrange; + Range featurerange; + unsigned int ee1index; + unsigned int ee2index; + ConstraintSet_struct(ConstraintSet* _task, + ObjectMap::iterator _object1,unsigned int _ee1index, + ObjectMap::iterator _object2,unsigned int _ee2index, + Range nc_range,Range coord_range): + task(_task), + object1(_object1),object2(_object2), + constraintrange(nc_range),featurerange(coord_range), + ee1index(_ee1index), ee2index(_ee2index) + {}; + }; + typedef std::map ConstraintMap; + + ObjectMap objects; + ConstraintMap constraints; + + static bool getConstraintPose(ConstraintSet* constraint, void *_param, KDL::Frame& _pose); +}; + +} + +#endif /* SCENE_HPP_ */ diff --git a/intern/itasc/Solver.hpp b/intern/itasc/Solver.hpp new file mode 100644 index 00000000000..e3aa1e1abc8 --- /dev/null +++ b/intern/itasc/Solver.hpp @@ -0,0 +1,33 @@ +/* $Id: Solver.hpp 20622 2009-06-04 12:47:59Z ben2610 $ + * Solver.hpp + * + * Created on: Jan 8, 2009 + * Author: rubensmits + */ + +#ifndef SOLVER_HPP_ +#define SOLVER_HPP_ + +#include +#include "eigen_types.hpp" + +namespace iTaSC{ + +class Solver{ +public: + enum SolverParam { + DLS_QMAX = 0, + DLS_LAMBDA_MAX, + DLS_EPSILON + }; + virtual ~Solver(){}; + + // gc = grouping of constraint output , + // size of vector = nc, alternance of true / false to indicate the grouping of output + virtual bool init(unsigned int nq, unsigned int nc, const std::vector& gc)=0; + virtual bool solve(const e_matrix& A, const e_vector& Wy, const e_vector& ydot, const e_matrix& Wq, e_vector& qdot, e_scalar& nlcoef)=0; + virtual void setParam(SolverParam param, double value)=0; +}; + +} +#endif /* SOLVER_HPP_ */ diff --git a/intern/itasc/UncontrolledObject.cpp b/intern/itasc/UncontrolledObject.cpp new file mode 100644 index 00000000000..4db44aaf7dc --- /dev/null +++ b/intern/itasc/UncontrolledObject.cpp @@ -0,0 +1,43 @@ +/* $Id$ + * UncontrolledObject.cpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#include "UncontrolledObject.hpp" + +namespace iTaSC{ + +UncontrolledObject::UncontrolledObject():Object(UnControlled), + m_nu(0), m_nf(0), m_xudot() +{ +} + +UncontrolledObject::~UncontrolledObject() +{ +} + +void UncontrolledObject::initialize(unsigned int _nu, unsigned int _nf) +{ + assert (_nf >= 1); + m_nu = _nu; + m_nf = _nf; + if (_nu > 0) + m_xudot = e_zero_vector(_nu); + // clear all Jacobian if any + m_JuArray.clear(); + // reserve one more to have an zero matrix handy + if (m_nu > 0) + m_JuArray.resize(m_nf+1, e_zero_matrix(6,m_nu)); +} + +const e_matrix& UncontrolledObject::getJu(unsigned int frameIndex) const +{ + assert (m_nu > 0); + return m_JuArray[(frameIndex>m_nf)?m_nf:frameIndex]; +} + + + +} diff --git a/intern/itasc/UncontrolledObject.hpp b/intern/itasc/UncontrolledObject.hpp new file mode 100644 index 00000000000..3b693a0b2ed --- /dev/null +++ b/intern/itasc/UncontrolledObject.hpp @@ -0,0 +1,37 @@ +/* $Id: UncontrolledObject.hpp 19907 2009-04-23 13:41:59Z ben2610 $ + * UncontrolledObject.h + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#ifndef UNCONTROLLEDOBJECT_HPP_ +#define UNCONTROLLEDOBJECT_HPP_ + +#include "eigen_types.hpp" + +#include "Object.hpp" +namespace iTaSC{ + +class UncontrolledObject: public Object { +protected: + unsigned int m_nu, m_nf; + e_vector m_xudot; + std::vector m_JuArray; + +public: + UncontrolledObject(); + virtual ~UncontrolledObject(); + + virtual void initialize(unsigned int _nu, unsigned int _nf); + virtual const e_matrix& getJu(unsigned int frameIndex) const; + virtual const e_vector& getXudot() const {return m_xudot;} + virtual void updateCoordinates(const Timestamp& timestamp)=0; + virtual const unsigned int getNrOfCoordinates(){return m_nu;}; + virtual const unsigned int getNrOfFrames(){return m_nf;}; + +}; + +} + +#endif /* UNCONTROLLEDOBJECT_H_ */ diff --git a/intern/itasc/WDLSSolver.cpp b/intern/itasc/WDLSSolver.cpp new file mode 100644 index 00000000000..1d0efde54c9 --- /dev/null +++ b/intern/itasc/WDLSSolver.cpp @@ -0,0 +1,101 @@ +/* $Id$ + * WDLSSolver.hpp.cpp + * + * Created on: Jan 8, 2009 + * Author: rubensmits + */ + +#include "WDLSSolver.hpp" +#include "kdl/utilities/svd_eigen_HH.hpp" + +namespace iTaSC { + +WDLSSolver::WDLSSolver() : m_lambda(0.5), m_epsilon(0.1) +{ + // maximum joint velocity + m_qmax = 50.0; +} + +WDLSSolver::~WDLSSolver() { +} + +bool WDLSSolver::init(unsigned int nq, unsigned int nc, const std::vector& gc) +{ + m_ns = std::min(nc,nq); + m_AWq = e_zero_matrix(nc,nq); + m_WyAWq = e_zero_matrix(nc,nq); + m_WyAWqt = e_zero_matrix(nq,nc); + m_S = e_zero_vector(std::max(nc,nq)); + m_Wy_ydot = e_zero_vector(nc); + if (nq > nc) { + m_transpose = true; + m_temp = e_zero_vector(nc); + m_U = e_zero_matrix(nc,nc); + m_V = e_zero_matrix(nq,nc); + m_WqV = e_zero_matrix(nq,nc); + } else { + m_transpose = false; + m_temp = e_zero_vector(nq); + m_U = e_zero_matrix(nc,nq); + m_V = e_zero_matrix(nq,nq); + m_WqV = e_zero_matrix(nq,nq); + } + return true; +} + +bool WDLSSolver::solve(const e_matrix& A, const e_vector& Wy, const e_vector& ydot, const e_matrix& Wq, e_vector& qdot, e_scalar& nlcoef) +{ + double alpha, vmax, norm; + // Create the Weighted jacobian + m_AWq = A*Wq; + for (int i=0; i 0 && (prevS-S) > maxDeltaS) { + maxDeltaS = (prevS-S); + maxS = prevS; + } + lambda = (S < m_epsilon) ? (e_scalar(1.0)-KDL::sqr(S/m_epsilon))*m_lambda*m_lambda : e_scalar(0.0); + alpha = m_U.col(i).dot(m_Wy_ydot)*S/(S*S+lambda); + vmax = m_WqV.col(i).cwise().abs().maxCoeff(); + norm = fabs(alpha*vmax); + if (norm > m_qmax) { + qdot += m_WqV.col(i)*(alpha*m_qmax/norm); + } else { + qdot += m_WqV.col(i)*alpha; + } + prevS = S; + } + if (maxDeltaS == e_scalar(0.0)) + nlcoef = e_scalar(KDL::epsilon); + else + nlcoef = (maxS-maxDeltaS)/maxS; + return true; +} + +} diff --git a/intern/itasc/WDLSSolver.hpp b/intern/itasc/WDLSSolver.hpp new file mode 100644 index 00000000000..b56ad1ab2b8 --- /dev/null +++ b/intern/itasc/WDLSSolver.hpp @@ -0,0 +1,48 @@ +/* $Id: WDLSSolver.hpp 20622 2009-06-04 12:47:59Z ben2610 $ + * WDLSSolver.hpp + * + * Created on: Jan 8, 2009 + * Author: rubensmits + */ + +#ifndef WDLSSOLVER_HPP_ +#define WDLSSOLVER_HPP_ + +#include "Solver.hpp" + +namespace iTaSC { + +class WDLSSolver: public iTaSC::Solver { +private: + e_matrix m_AWq,m_WyAWq,m_WyAWqt,m_U,m_V,m_WqV; + e_vector m_S,m_temp,m_Wy_ydot; + double m_lambda; + double m_epsilon; + double m_qmax; + int m_ns; + bool m_transpose; +public: + WDLSSolver(); + virtual ~WDLSSolver(); + + virtual bool init(unsigned int nq, unsigned int nc, const std::vector& gc); + virtual bool solve(const e_matrix& A, const e_vector& Wy, const e_vector& ydot, const e_matrix& Wq, e_vector& qdot, e_scalar& nlcoef); + virtual void setParam(SolverParam param, double value) + { + switch (param) { + case DLS_QMAX: + m_qmax = value; + break; + case DLS_LAMBDA_MAX: + m_lambda = value; + break; + case DLS_EPSILON: + m_epsilon = value; + break; + } + } +}; + +} + +#endif /* WDLSSOLVER_HPP_ */ diff --git a/intern/itasc/WSDLSSolver.cpp b/intern/itasc/WSDLSSolver.cpp new file mode 100644 index 00000000000..9f7ebed960a --- /dev/null +++ b/intern/itasc/WSDLSSolver.cpp @@ -0,0 +1,138 @@ +/* $Id$ + * WDLSSolver.hpp.cpp + * + * Created on: Jan 8, 2009 + * Author: rubensmits + */ + +#include "WSDLSSolver.hpp" +#include "kdl/utilities/svd_eigen_HH.hpp" +#include + +namespace iTaSC { + +WSDLSSolver::WSDLSSolver() : + m_ns(0), m_nc(0), m_nq(0) + +{ + // default maximum speed: 50 rad/s + m_qmax = 50.0; +} + +WSDLSSolver::~WSDLSSolver() { +} + +bool WSDLSSolver::init(unsigned int _nq, unsigned int _nc, const std::vector& gc) +{ + if (_nc == 0 || _nq == 0 || gc.size() != _nc) + return false; + m_nc = _nc; + m_nq = _nq; + m_ns = std::min(m_nc,m_nq); + m_AWq = e_zero_matrix(m_nc,m_nq); + m_WyAWq = e_zero_matrix(m_nc,m_nq); + m_WyAWqt = e_zero_matrix(m_nq,m_nc); + m_S = e_zero_vector(std::max(m_nc,m_nq)); + m_Wy_ydot = e_zero_vector(m_nc); + m_ytask = gc; + if (m_nq > m_nc) { + m_transpose = true; + m_temp = e_zero_vector(m_nc); + m_U = e_zero_matrix(m_nc,m_nc); + m_V = e_zero_matrix(m_nq,m_nc); + m_WqV = e_zero_matrix(m_nq,m_nc); + } else { + m_transpose = false; + m_temp = e_zero_vector(m_nq); + m_U = e_zero_matrix(m_nc,m_nq); + m_V = e_zero_matrix(m_nq,m_nq); + m_WqV = e_zero_matrix(m_nq,m_nq); + } + return true; +} + +bool WSDLSSolver::solve(const e_matrix& A, const e_vector& Wy, const e_vector& ydot, const e_matrix& Wq, e_vector& qdot, e_scalar& nlcoef) +{ + unsigned int i, j, l; + e_scalar N, M; + + // Create the Weighted jacobian + m_AWq = (A*Wq).lazy(); + for (i=0; i 0) { + if ((prevS-S) > maxDeltaS) { + maxDeltaS = (prevS-S); + maxS = prevS; + } + } + N = M = e_scalar(0.); + for (l=0, prev=m_ytask[0], norm=e_scalar(0.); l _qmax) { + damp = Sinv*alpha*_qmax/norm; + } else { + damp = Sinv*alpha; + } + qdot += m_WqV.col(i)*damp; + prevS = S; + } + if (maxDeltaS == e_scalar(0.0)) + nlcoef = e_scalar(KDL::epsilon); + else + nlcoef = (maxS-maxDeltaS)/maxS; + return true; +} + +} diff --git a/intern/itasc/WSDLSSolver.hpp b/intern/itasc/WSDLSSolver.hpp new file mode 100644 index 00000000000..0b17f26ef47 --- /dev/null +++ b/intern/itasc/WSDLSSolver.hpp @@ -0,0 +1,43 @@ +/* $Id: WSDLSSolver.hpp 20622 2009-06-04 12:47:59Z ben2610 $ + * WSDLSSolver.hpp + * + * Created on: Mar 26, 2009 + * Author: benoit bolsee + */ + +#ifndef WSDLSSOLVER_HPP_ +#define WSDLSSOLVER_HPP_ + +#include "Solver.hpp" + +namespace iTaSC { + +class WSDLSSolver: public iTaSC::Solver { +private: + e_matrix m_AWq,m_WyAWq,m_WyAWqt,m_U,m_V,m_WqV; + e_vector m_S,m_temp,m_Wy_ydot; + std::vector m_ytask; + e_scalar m_qmax; + unsigned int m_ns, m_nc, m_nq; + bool m_transpose; +public: + WSDLSSolver(); + virtual ~WSDLSSolver(); + + virtual bool init(unsigned int _nq, unsigned int _nc, const std::vector& gc); + virtual bool solve(const e_matrix& A, const e_vector& Wy, const e_vector& ydot, const e_matrix& Wq, e_vector& qdot, e_scalar& nlcoef); + virtual void setParam(SolverParam param, double value) + { + switch (param) { + case DLS_QMAX: + m_qmax = value; + break; + default: + break; + } + } +}; + +} + +#endif /* WSDLSSOLVER_HPP_ */ diff --git a/intern/itasc/WorldObject.cpp b/intern/itasc/WorldObject.cpp new file mode 100644 index 00000000000..99cb8773e77 --- /dev/null +++ b/intern/itasc/WorldObject.cpp @@ -0,0 +1,26 @@ +/* $Id$ + * WorldObject.cpp + * + * Created on: Feb 10, 2009 + * Author: benoitbolsee + */ + +#include "WorldObject.hpp" + +namespace iTaSC{ + +/* special singleton to be used as base for uncontrolled object */ +WorldObject Object::world; + +WorldObject::WorldObject():UncontrolledObject() +{ + initialize(0,1); + m_internalPose = Frame::Identity(); +} + +WorldObject::~WorldObject() +{ +} + + +} diff --git a/intern/itasc/WorldObject.hpp b/intern/itasc/WorldObject.hpp new file mode 100644 index 00000000000..b309545a843 --- /dev/null +++ b/intern/itasc/WorldObject.hpp @@ -0,0 +1,30 @@ +/* $Id: WorldObject.hpp 19907 2009-04-23 13:41:59Z ben2610 $ + * WorldObject.h + * + * Created on: Feb 10, 2009 + * Author: benoitbolsee + */ + +#ifndef WORLDOBJECT_HPP_ +#define WORLDOBJECT_HPP_ + +#include "UncontrolledObject.hpp" +namespace iTaSC{ + +class WorldObject: public UncontrolledObject { +public: + WorldObject(); + virtual ~WorldObject(); + + virtual void updateCoordinates(const Timestamp& timestamp) {}; + virtual void updateKinematics(const Timestamp& timestamp) {}; + virtual void pushCache(const Timestamp& timestamp) {}; + virtual void initCache(Cache *_cache) {}; +protected: + virtual void updateJacobian() {} + +}; + +} + +#endif /* WORLDOBJECT_H_ */ diff --git a/intern/itasc/eigen_types.cpp b/intern/itasc/eigen_types.cpp new file mode 100644 index 00000000000..2aa942f38c7 --- /dev/null +++ b/intern/itasc/eigen_types.cpp @@ -0,0 +1,12 @@ +/* $Id$ + * eigen_types.cpp + * + * Created on: March 19, 2009 + * Author: benoit bolsee + */ + +#include "eigen_types.hpp" + +const KDL::Frame iTaSC::F_identity(Rotation::Identity(),Vector::Zero()); + + diff --git a/intern/itasc/eigen_types.hpp b/intern/itasc/eigen_types.hpp new file mode 100644 index 00000000000..fe46f8b6bb3 --- /dev/null +++ b/intern/itasc/eigen_types.hpp @@ -0,0 +1,84 @@ +/* $Id: eigen_types.hpp 19905 2009-04-23 13:29:54Z ben2610 $ + * eigen_types.hpp + * + * Created on: March 6, 2009 + * Author: benoit bolsee + */ + +#ifndef EIGEN_TYPES_HPP_ +#define EIGEN_TYPES_HPP_ + +#include +#include "kdl/frames.hpp" +#include "kdl/tree.hpp" +#include "kdl/chain.hpp" +#include "kdl/jacobian.hpp" +#include "kdl/jntarray.hpp" + + +namespace iTaSC{ + +using KDL::Twist; +using KDL::Frame; +using KDL::Joint; +using KDL::Inertia; +using KDL::SegmentMap; +using KDL::Tree; +using KDL::JntArray; +using KDL::Jacobian; +using KDL::Segment; +using KDL::Rotation; +using KDL::Vector; +using KDL::Vector2; +using KDL::Chain; + +extern const Frame F_identity; + +#define e_scalar double +#define e_vector Eigen::Matrix +#define e_zero_vector Eigen::Matrix::Zero +#define e_matrix Eigen::Matrix +#define e_matrix6 Eigen::Matrix +#define e_identity_matrix Eigen::Matrix::Identity +#define e_scalar_vector Eigen::Matrix::Constant +#define e_zero_matrix Eigen::Matrix::Zero +#define e_random_matrix Eigen::Matrix::Random +#define e_vector6 Eigen::Matrix +#define e_vector3 Eigen::Matrix + +class Range { +public: + int start; + int count; + Range(int _start, int _count) { start = _start; count=_count; } + Range(const Range& other) { start=other.start; count=other.count; } +}; + +template inline Eigen::Block project(MatrixType& m, Range r) +{ + return Eigen::Block(m,r.start,0,r.count,1); +} + +template inline Eigen::Block project(MatrixType& m, Range r, Range c) +{ + return Eigen::Block(m,r.start,c.start,r.count,c.count); +} + +template inline static int changeBase(Eigen::MatrixBase& J, const Frame& T) { + + if (J.rows() != 6) + return -1; + for (int j = 0; j < J.cols(); ++j) { + typename Derived::ColXpr Jj = J.col(j); + Twist arg; + for(unsigned int i=0;i<6;++i) + arg(i)=Jj[i]; + Twist tmp(T*arg); + for(unsigned int i=0;i<6;++i) + Jj[i]=e_scalar(tmp(i)); + } + return 0; +} + +} +#endif /* UBLAS_TYPES_HPP_ */ diff --git a/intern/itasc/kdl/Makefile b/intern/itasc/kdl/Makefile new file mode 100644 index 00000000000..058f93da4e1 --- /dev/null +++ b/intern/itasc/kdl/Makefile @@ -0,0 +1,43 @@ +# +# $Id$ +# +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. +# All rights reserved. +# +# The Original Code is: all of this file. +# +# Contributor(s): Hans Lambermont +# +# ***** END GPL LICENSE BLOCK ***** +# iksolver main makefile. +# + +include nan_definitions.mk + +LIBNAME = itasc_kdl +# Yep, same dir than parent (itasc instead of $(LIBNAME)) +DIR = $(OCGDIR)/intern/itasc +DIRS = utilities +SOURCEDIR = intern/$(LIBNAME)/kdl + +include nan_subdirs.mk +include nan_compile.mk + +CPPFLAGS += -I. +CPPFLAGS += -I../../../extern/Eigen2 diff --git a/intern/itasc/kdl/chain.cpp b/intern/itasc/kdl/chain.cpp new file mode 100644 index 00000000000..638366c96be --- /dev/null +++ b/intern/itasc/kdl/chain.cpp @@ -0,0 +1,75 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "chain.hpp" + +namespace KDL { + using namespace std; + + Chain::Chain(): + segments(0), + nrOfJoints(0), + nrOfSegments(0) + { + } + + Chain::Chain(const Chain& in):nrOfJoints(0), + nrOfSegments(0) + { + for(unsigned int i=0;iaddSegment(in.getSegment(i)); + } + + Chain& Chain::operator=(const Chain& arg) + { + nrOfJoints=0; + nrOfSegments=0; + segments.resize(0); + for(unsigned int i=0;iaddSegment(chain.getSegment(i)); + } + + const Segment& Chain::getSegment(unsigned int nr) const + { + return segments[nr]; + } + + Chain::~Chain() + { + } + +} + diff --git a/intern/itasc/kdl/chain.hpp b/intern/itasc/kdl/chain.hpp new file mode 100644 index 00000000000..0d40690202a --- /dev/null +++ b/intern/itasc/kdl/chain.hpp @@ -0,0 +1,95 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_CHAIN_HPP +#define KDL_CHAIN_HPP + +#include "segment.hpp" +#include + +namespace KDL { + /** + * \brief This class encapsulates a serial kinematic + * interconnection structure. It is build out of segments. + * + * @ingroup KinematicFamily + */ + class Chain { + private: + std::vector segments; + unsigned int nrOfJoints; + unsigned int nrOfSegments; + public: + /** + * The constructor of a chain, a new chain is always empty. + * + */ + Chain(); + Chain(const Chain& in); + Chain& operator = (const Chain& arg); + + /** + * Adds a new segment to the end of the chain. + * + * @param segment The segment to add + */ + void addSegment(const Segment& segment); + /** + * Adds a complete chain to the end of the chain + * The added chain is copied. + * + * @param chain The chain to add + */ + void addChain(const Chain& chain); + + /** + * Request the total number of joints in the chain.\n + * Important: It is not the + * same as the total number of segments since a segment does not + * need to have a joint. This function is important when + * creating a KDL::JntArray to use with this chain. + * @return total nr of joints + */ + unsigned int getNrOfJoints()const {return nrOfJoints;}; + /** + * Request the total number of segments in the chain. + * @return total number of segments + */ + unsigned int getNrOfSegments()const {return nrOfSegments;}; + + /** + * Request the nr'd segment of the chain. There is no boundary + * checking. + * + * @param nr the nr of the segment starting from 0 + * + * @return a constant reference to the nr'd segment + */ + const Segment& getSegment(unsigned int nr)const; + + virtual ~Chain(); + }; + + + +}//end of namespace KDL + +#endif diff --git a/intern/itasc/kdl/chainfksolver.hpp b/intern/itasc/kdl/chainfksolver.hpp new file mode 100644 index 00000000000..fa6f625ee9d --- /dev/null +++ b/intern/itasc/kdl/chainfksolver.hpp @@ -0,0 +1,107 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_CHAIN_FKSOLVER_HPP +#define KDL_CHAIN_FKSOLVER_HPP + +#include "chain.hpp" +#include "framevel.hpp" +#include "frameacc.hpp" +#include "jntarray.hpp" +#include "jntarrayvel.hpp" +#include "jntarrayacc.hpp" + +namespace KDL { + + /** + * \brief This abstract class encapsulates a + * solver for the forward position kinematics for a KDL::Chain. + * + * @ingroup KinematicFamily + */ + + //Forward definition + class ChainFkSolverPos { + public: + /** + * Calculate forward position kinematics for a KDL::Chain, + * from joint coordinates to cartesian pose. + * + * @param q_in input joint coordinates + * @param p_out reference to output cartesian pose + * + * @return if < 0 something went wrong + */ + virtual int JntToCart(const JntArray& q_in, Frame& p_out,int segmentNr=-1)=0; + virtual ~ChainFkSolverPos(){}; + }; + + /** + * \brief This abstract class encapsulates a solver + * for the forward velocity kinematics for a KDL::Chain. + * + * @ingroup KinematicFamily + */ + class ChainFkSolverVel { + public: + /** + * Calculate forward position and velocity kinematics, from + * joint coordinates to cartesian coordinates. + * + * @param q_in input joint coordinates (position and velocity) + * @param out output cartesian coordinates (position and velocity) + * + * @return if < 0 something went wrong + */ + virtual int JntToCart(const JntArrayVel& q_in, FrameVel& out,int segmentNr=-1)=0; + + virtual ~ChainFkSolverVel(){}; + }; + + /** + * \brief This abstract class encapsulates a solver + * for the forward acceleration kinematics for a KDL::Chain. + * + * @ingroup KinematicFamily + */ + + class ChainFkSolverAcc { + public: + /** + * Calculate forward position, velocity and accelaration + * kinematics, from joint coordinates to cartesian coordinates + * + * @param q_in input joint coordinates (position, velocity and + * acceleration + @param out output cartesian coordinates (position, velocity + * and acceleration + * + * @return if < 0 something went wrong + */ + virtual int JntToCart(const JntArrayAcc& q_in, FrameAcc& out,int segmentNr=-1)=0; + + virtual ~ChainFkSolverAcc()=0; + }; + + +}//end of namespace KDL + +#endif diff --git a/intern/itasc/kdl/chainfksolverpos_recursive.cpp b/intern/itasc/kdl/chainfksolverpos_recursive.cpp new file mode 100644 index 00000000000..46c29c9c6e0 --- /dev/null +++ b/intern/itasc/kdl/chainfksolverpos_recursive.cpp @@ -0,0 +1,61 @@ +// Copyright (C) 2007 Francois Cauwe +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "chainfksolverpos_recursive.hpp" +#include + +namespace KDL { + + ChainFkSolverPos_recursive::ChainFkSolverPos_recursive(const Chain& _chain): + chain(_chain) + { + } + + int ChainFkSolverPos_recursive::JntToCart(const JntArray& q_in, Frame& p_out, int segmentNr) + { + unsigned int segNr = (unsigned int)segmentNr; + if(segmentNr<0) + segNr=chain.getNrOfSegments(); + + p_out = Frame::Identity(); + + if(q_in.rows()!=chain.getNrOfJoints()) + return -1; + else if(segNr>chain.getNrOfSegments()) + return -1; + else{ + int j=0; + for(unsigned int i=0;i + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDLCHAINFKSOLVERPOS_RECURSIVE_HPP +#define KDLCHAINFKSOLVERPOS_RECURSIVE_HPP + +#include "chainfksolver.hpp" + +namespace KDL { + + /** + * Implementation of a recursive forward position kinematics + * algorithm to calculate the position transformation from joint + * space to Cartesian space of a general kinematic chain (KDL::Chain). + * + * @ingroup KinematicFamily + */ + class ChainFkSolverPos_recursive : public ChainFkSolverPos + { + public: + ChainFkSolverPos_recursive(const Chain& chain); + ~ChainFkSolverPos_recursive(); + + virtual int JntToCart(const JntArray& q_in, Frame& p_out, int segmentNr=-1); + + private: + const Chain chain; + }; + +} + +#endif diff --git a/intern/itasc/kdl/chainjnttojacsolver.cpp b/intern/itasc/kdl/chainjnttojacsolver.cpp new file mode 100644 index 00000000000..4a801c041f3 --- /dev/null +++ b/intern/itasc/kdl/chainjnttojacsolver.cpp @@ -0,0 +1,80 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "chainjnttojacsolver.hpp" + +namespace KDL +{ + ChainJntToJacSolver::ChainJntToJacSolver(const Chain& _chain): + chain(_chain) + { + } + + ChainJntToJacSolver::~ChainJntToJacSolver() + { + } + + int ChainJntToJacSolver::JntToJac(const JntArray& q_in,Jacobian& jac) + { + assert(q_in.rows()==chain.getNrOfJoints()&& + q_in.rows()==jac.columns()); + + + Frame T_local, T_joint; + T_total = Frame::Identity(); + SetToZero(t_local); + + int i=chain.getNrOfSegments()-1; + unsigned int q_nr = chain.getNrOfJoints(); + + //Lets recursively iterate until we are in the root segment + while (i >= 0) { + const Segment& segment = chain.getSegment(i); + int ndof = segment.getJoint().getNDof(); + q_nr -= ndof; + + //get the pose of the joint. + T_joint = segment.getJoint().pose(((JntArray&)q_in)(q_nr)); + // combine with the tip to have the tip pose + T_local = T_joint*segment.getFrameToTip(); + //calculate new T_end: + T_total = T_local * T_total; + + for (int dof=0; dof + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_CHAINJNTTOJACSOLVER_HPP +#define KDL_CHAINJNTTOJACSOLVER_HPP + +#include "frames.hpp" +#include "jacobian.hpp" +#include "jntarray.hpp" +#include "chain.hpp" + +namespace KDL +{ + /** + * @brief Class to calculate the jacobian of a general + * KDL::Chain, it is used by other solvers. It should not be used + * outside of KDL. + * + * + */ + + class ChainJntToJacSolver + { + public: + ChainJntToJacSolver(const Chain& chain); + ~ChainJntToJacSolver(); + /** + * Calculate the jacobian expressed in the base frame of the + * chain, with reference point at the end effector of the + * *chain. The alghoritm is similar to the one used in + * KDL::ChainFkSolverVel_recursive + * + * @param q_in input joint positions + * @param jac output jacobian + * + * @return always returns 0 + */ + int JntToJac(const JntArray& q_in,Jacobian& jac); + + private: + const Chain chain; + Twist t_local; + Frame T_total; + }; +} +#endif + diff --git a/intern/itasc/kdl/frameacc.cpp b/intern/itasc/kdl/frameacc.cpp new file mode 100644 index 00000000000..9defce0a00e --- /dev/null +++ b/intern/itasc/kdl/frameacc.cpp @@ -0,0 +1,26 @@ +/***************************************************************************** + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + ****************************************************************************/ + + +#include "frameacc.hpp" + +namespace KDL { + +#ifndef KDL_INLINE + #include "frameacc.inl" +#endif + +} + diff --git a/intern/itasc/kdl/frameacc.hpp b/intern/itasc/kdl/frameacc.hpp new file mode 100644 index 00000000000..4157237222e --- /dev/null +++ b/intern/itasc/kdl/frameacc.hpp @@ -0,0 +1,259 @@ +/***************************************************************************** + * \file + * This file contains the definition of classes for a + * Rall Algebra of (subset of) the classes defined in frames, + * i.e. classes that contain a set (value,derivative,2nd derivative) + * and define operations on that set + * this classes are usefull for automatic differentiation ( <-> symbolic diff , + * <-> numeric diff). + * Defines VectorAcc, RotationAcc, FrameAcc, doubleAcc. + * Look at the corresponding classes Vector Rotation Frame Twist and + * Wrench for the semantics of the methods. + * + * It also contains the 2nd derivative <-> RFrames.h + * + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id: frameacc.hpp 19905 2009-04-23 13:29:54Z ben2610 $ + * $Name: $ + ****************************************************************************/ + +#ifndef RRFRAMES_H +#define RRFRAMES_H + + +#include "utilities/rall2d.h" +#include "frames.hpp" + + + +namespace KDL { + +class TwistAcc; +typedef Rall2d doubleAcc; + + +class VectorAcc +{ +public: + Vector p; //!< position vector + Vector v; //!< velocity vector + Vector dv; //!< acceleration vector +public: + VectorAcc():p(),v(),dv() {} + explicit VectorAcc(const Vector& _p):p(_p),v(Vector::Zero()),dv(Vector::Zero()) {} + VectorAcc(const Vector& _p,const Vector& _v):p(_p),v(_v),dv(Vector::Zero()) {} + VectorAcc(const Vector& _p,const Vector& _v,const Vector& _dv): + p(_p),v(_v),dv(_dv) {} + IMETHOD VectorAcc& operator = (const VectorAcc& arg); + IMETHOD VectorAcc& operator = (const Vector& arg); + IMETHOD VectorAcc& operator += (const VectorAcc& arg); + IMETHOD VectorAcc& operator -= (const VectorAcc& arg); + IMETHOD static VectorAcc Zero(); + IMETHOD void ReverseSign(); + IMETHOD doubleAcc Norm(); + IMETHOD friend VectorAcc operator + (const VectorAcc& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator - (const VectorAcc& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator + (const Vector& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator - (const Vector& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator + (const VectorAcc& r1,const Vector& r2); + IMETHOD friend VectorAcc operator - (const VectorAcc& r1,const Vector& r2); + IMETHOD friend VectorAcc operator * (const VectorAcc& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator * (const VectorAcc& r1,const Vector& r2); + IMETHOD friend VectorAcc operator * (const Vector& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator * (const VectorAcc& r1,double r2); + IMETHOD friend VectorAcc operator * (double r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator * (const doubleAcc& r1,const VectorAcc& r2); + IMETHOD friend VectorAcc operator * (const VectorAcc& r2,const doubleAcc& r1); + IMETHOD friend VectorAcc operator*(const Rotation& R,const VectorAcc& x); + + IMETHOD friend VectorAcc operator / (const VectorAcc& r1,double r2); + IMETHOD friend VectorAcc operator / (const VectorAcc& r2,const doubleAcc& r1); + + + IMETHOD friend bool Equal(const VectorAcc& r1,const VectorAcc& r2,double eps=epsilon); + IMETHOD friend bool Equal(const Vector& r1,const VectorAcc& r2,double eps=epsilon); + IMETHOD friend bool Equal(const VectorAcc& r1,const Vector& r2,double eps=epsilon); + IMETHOD friend VectorAcc operator - (const VectorAcc& r); + IMETHOD friend doubleAcc dot(const VectorAcc& lhs,const VectorAcc& rhs); + IMETHOD friend doubleAcc dot(const VectorAcc& lhs,const Vector& rhs); + IMETHOD friend doubleAcc dot(const Vector& lhs,const VectorAcc& rhs); +}; + + + +class RotationAcc +{ +public: + Rotation R; //!< rotation matrix + Vector w; //!< angular velocity vector + Vector dw; //!< angular acceration vector +public: + RotationAcc():R(),w() {} + explicit RotationAcc(const Rotation& _R):R(_R),w(Vector::Zero()){} + RotationAcc(const Rotation& _R,const Vector& _w,const Vector& _dw): + R(_R),w(_w),dw(_dw) {} + IMETHOD RotationAcc& operator = (const RotationAcc& arg); + IMETHOD RotationAcc& operator = (const Rotation& arg); + IMETHOD static RotationAcc Identity(); + IMETHOD RotationAcc Inverse() const; + IMETHOD VectorAcc Inverse(const VectorAcc& arg) const; + IMETHOD VectorAcc Inverse(const Vector& arg) const; + IMETHOD VectorAcc operator*(const VectorAcc& arg) const; + IMETHOD VectorAcc operator*(const Vector& arg) const; + + // Rotations + // The SetRot.. functions set the value of *this to the appropriate rotation matrix. + // The Rot... static functions give the value of the appropriate rotation matrix back. + // The DoRot... functions apply a rotation R to *this,such that *this = *this * R. + // IMETHOD void DoRotX(const doubleAcc& angle); + // IMETHOD void DoRotY(const doubleAcc& angle); + // IMETHOD void DoRotZ(const doubleAcc& angle); + // IMETHOD static RRotation RotX(const doubleAcc& angle); + // IMETHOD static RRotation RotY(const doubleAcc& angle); + // IMETHOD static RRotation RotZ(const doubleAcc& angle); + + // IMETHOD void SetRot(const Vector& rotaxis,const doubleAcc& angle); + // Along an arbitrary axes. The norm of rotvec is neglected. + // IMETHOD static RotationAcc Rot(const Vector& rotvec,const doubleAcc& angle); + // rotvec has arbitrary norm + // rotation around a constant vector ! + // IMETHOD static RotationAcc Rot2(const Vector& rotvec,const doubleAcc& angle); + // rotvec is normalized. + // rotation around a constant vector ! + + IMETHOD friend RotationAcc operator* (const RotationAcc& r1,const RotationAcc& r2); + IMETHOD friend RotationAcc operator* (const Rotation& r1,const RotationAcc& r2); + IMETHOD friend RotationAcc operator* (const RotationAcc& r1,const Rotation& r2); + IMETHOD friend bool Equal(const RotationAcc& r1,const RotationAcc& r2,double eps=epsilon); + IMETHOD friend bool Equal(const Rotation& r1,const RotationAcc& r2,double eps=epsilon); + IMETHOD friend bool Equal(const RotationAcc& r1,const Rotation& r2,double eps=epsilon); + IMETHOD TwistAcc Inverse(const TwistAcc& arg) const; + IMETHOD TwistAcc Inverse(const Twist& arg) const; + IMETHOD TwistAcc operator * (const TwistAcc& arg) const; + IMETHOD TwistAcc operator * (const Twist& arg) const; +}; + + + + +class FrameAcc +{ +public: + RotationAcc M; //!< Rotation,angular velocity, and angular acceleration of frame. + VectorAcc p; //!< Translation, velocity and acceleration of origin. +public: + FrameAcc(){} + explicit FrameAcc(const Frame& _T):M(_T.M),p(_T.p) {} + FrameAcc(const Frame& _T,const Twist& _t,const Twist& _dt): + M(_T.M,_t.rot,_dt.rot),p(_T.p,_t.vel,_dt.vel) {} + FrameAcc(const RotationAcc& _M,const VectorAcc& _p):M(_M),p(_p) {} + + IMETHOD FrameAcc& operator = (const FrameAcc& arg); + IMETHOD FrameAcc& operator = (const Frame& arg); + IMETHOD static FrameAcc Identity(); + IMETHOD FrameAcc Inverse() const; + IMETHOD VectorAcc Inverse(const VectorAcc& arg) const; + IMETHOD VectorAcc operator*(const VectorAcc& arg) const; + IMETHOD VectorAcc operator*(const Vector& arg) const; + IMETHOD VectorAcc Inverse(const Vector& arg) const; + IMETHOD Frame GetFrame() const; + IMETHOD Twist GetTwist() const; + IMETHOD Twist GetAccTwist() const; + IMETHOD friend FrameAcc operator * (const FrameAcc& f1,const FrameAcc& f2); + IMETHOD friend FrameAcc operator * (const Frame& f1,const FrameAcc& f2); + IMETHOD friend FrameAcc operator * (const FrameAcc& f1,const Frame& f2); + IMETHOD friend bool Equal(const FrameAcc& r1,const FrameAcc& r2,double eps=epsilon); + IMETHOD friend bool Equal(const Frame& r1,const FrameAcc& r2,double eps=epsilon); + IMETHOD friend bool Equal(const FrameAcc& r1,const Frame& r2,double eps=epsilon); + + IMETHOD TwistAcc Inverse(const TwistAcc& arg) const; + IMETHOD TwistAcc Inverse(const Twist& arg) const; + IMETHOD TwistAcc operator * (const TwistAcc& arg) const; + IMETHOD TwistAcc operator * (const Twist& arg) const; +}; + + + + + + + + +//very similar to Wrench class. +class TwistAcc +{ +public: + VectorAcc vel; //!< translational velocity and its 1st and 2nd derivative + VectorAcc rot; //!< rotational velocity and its 1st and 2nd derivative +public: + + TwistAcc():vel(),rot() {}; + TwistAcc(const VectorAcc& _vel,const VectorAcc& _rot):vel(_vel),rot(_rot) {}; + + IMETHOD TwistAcc& operator-=(const TwistAcc& arg); + IMETHOD TwistAcc& operator+=(const TwistAcc& arg); + + IMETHOD friend TwistAcc operator*(const TwistAcc& lhs,double rhs); + IMETHOD friend TwistAcc operator*(double lhs,const TwistAcc& rhs); + IMETHOD friend TwistAcc operator/(const TwistAcc& lhs,double rhs); + + IMETHOD friend TwistAcc operator*(const TwistAcc& lhs,const doubleAcc& rhs); + IMETHOD friend TwistAcc operator*(const doubleAcc& lhs,const TwistAcc& rhs); + IMETHOD friend TwistAcc operator/(const TwistAcc& lhs,const doubleAcc& rhs); + + IMETHOD friend TwistAcc operator+(const TwistAcc& lhs,const TwistAcc& rhs); + IMETHOD friend TwistAcc operator-(const TwistAcc& lhs,const TwistAcc& rhs); + IMETHOD friend TwistAcc operator-(const TwistAcc& arg); + + IMETHOD friend void SetToZero(TwistAcc& v); + + static IMETHOD TwistAcc Zero(); + + IMETHOD void ReverseSign(); + + IMETHOD TwistAcc RefPoint(const VectorAcc& v_base_AB); + // Changes the reference point of the RTwist. + // The RVector v_base_AB is expressed in the same base as the RTwist + // The RVector v_base_AB is a RVector from the old point to + // the new point. + // Complexity : 6M+6A + + IMETHOD friend bool Equal(const TwistAcc& a,const TwistAcc& b,double eps=epsilon); + IMETHOD friend bool Equal(const Twist& a,const TwistAcc& b,double eps=epsilon); + IMETHOD friend bool Equal(const TwistAcc& a,const Twist& b,double eps=epsilon); + + + IMETHOD Twist GetTwist() const; + IMETHOD Twist GetTwistDot() const; + + friend class RotationAcc; + friend class FrameAcc; + +}; + + + + + + + +#ifdef KDL_INLINE +#include "frameacc.inl" +#endif + +} + + + + + +#endif diff --git a/intern/itasc/kdl/frameacc.inl b/intern/itasc/kdl/frameacc.inl new file mode 100644 index 00000000000..a8ea35ad436 --- /dev/null +++ b/intern/itasc/kdl/frameacc.inl @@ -0,0 +1,598 @@ +/***************************************************************************** + * \file + * provides inline functions of rrframes.h + * + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id: frameacc.inl 19905 2009-04-23 13:29:54Z ben2610 $ + * $Name: $ + ****************************************************************************/ + + + + +/////////////////// VectorAcc ///////////////////////////////////// + +VectorAcc operator + (const VectorAcc& r1,const VectorAcc& r2) { + return VectorAcc(r1.p+r2.p,r1.v+r2.v,r1.dv+r2.dv); +} + +VectorAcc operator - (const VectorAcc& r1,const VectorAcc& r2) { + return VectorAcc(r1.p-r2.p, r1.v-r2.v, r1.dv-r2.dv); +} +VectorAcc operator + (const Vector& r1,const VectorAcc& r2) { + return VectorAcc(r1+r2.p,r2.v,r2.dv); +} + +VectorAcc operator - (const Vector& r1,const VectorAcc& r2) { + return VectorAcc(r1-r2.p, -r2.v, -r2.dv); +} +VectorAcc operator + (const VectorAcc& r1,const Vector& r2) { + return VectorAcc(r1.p+r2,r1.v,r1.dv); +} + +VectorAcc operator - (const VectorAcc& r1,const Vector& r2) { + return VectorAcc(r1.p-r2, r1.v, r1.dv); +} + +// unary - +VectorAcc operator - (const VectorAcc& r) { + return VectorAcc(-r.p,-r.v,-r.dv); +} + +// cross prod. +VectorAcc operator * (const VectorAcc& r1,const VectorAcc& r2) { + return VectorAcc(r1.p*r2.p, + r1.p*r2.v+r1.v*r2.p, + r1.dv*r2.p+2*r1.v*r2.v+r1.p*r2.dv + ); +} + +VectorAcc operator * (const VectorAcc& r1,const Vector& r2) { + return VectorAcc(r1.p*r2, r1.v*r2, r1.dv*r2 ); +} + +VectorAcc operator * (const Vector& r1,const VectorAcc& r2) { + return VectorAcc(r1*r2.p, r1*r2.v, r1*r2.dv ); +} + + + +// scalar mult. +VectorAcc operator * (double r1,const VectorAcc& r2) { + return VectorAcc(r1*r2.p, r1*r2.v, r1*r2.dv ); +} + +VectorAcc operator * (const VectorAcc& r1,double r2) { + return VectorAcc(r1.p*r2, r1.v*r2, r1.dv*r2 ); +} + +VectorAcc operator * (const doubleAcc& r1,const VectorAcc& r2) { + return VectorAcc(r1.t*r2.p, + r1.t*r2.v + r1.d*r2.p, + r1.t*r2.dv + 2*r1.d*r2.v + r1.dd*r2.p + ); +} + +VectorAcc operator * (const VectorAcc& r2,const doubleAcc& r1) { + return VectorAcc(r1.t*r2.p, + r1.t*r2.v + r1.d*r2.p, + r1.t*r2.dv + 2*r1.d*r2.v + r1.dd*r2.p + ); +} + +VectorAcc& VectorAcc::operator = (const VectorAcc& arg) { + p=arg.p; + v=arg.v; + dv=arg.dv; + return *this; +} + +VectorAcc& VectorAcc::operator = (const Vector& arg) { + p=arg; + v=Vector::Zero(); + dv=Vector::Zero(); + return *this; +} + +VectorAcc& VectorAcc::operator += (const VectorAcc& arg) { + p+=arg.p; + v+=arg.v; + dv+= arg.dv; + return *this; +} +VectorAcc& VectorAcc::operator -= (const VectorAcc& arg) { + p-=arg.p; + v-=arg.v; + dv-=arg.dv; + return *this; +} + +VectorAcc VectorAcc::Zero() { + return VectorAcc(Vector::Zero(),Vector::Zero(),Vector::Zero()); +} + +void VectorAcc::ReverseSign() { + p.ReverseSign(); + v.ReverseSign(); + dv.ReverseSign(); +} + +doubleAcc VectorAcc::Norm() { + doubleAcc res; + res.t = p.Norm(); + res.d = dot(p,v)/res.t; + res.dd = (dot(p,dv)+dot(v,v)-res.d*res.d)/res.t; + return res; +} + +doubleAcc dot(const VectorAcc& lhs,const VectorAcc& rhs) { + return doubleAcc( dot(lhs.p,rhs.p), + dot(lhs.p,rhs.v)+dot(lhs.v,rhs.p), + dot(lhs.p,rhs.dv)+2*dot(lhs.v,rhs.v)+dot(lhs.dv,rhs.p) + ); +} + +doubleAcc dot(const VectorAcc& lhs,const Vector& rhs) { + return doubleAcc( dot(lhs.p,rhs), + dot(lhs.v,rhs), + dot(lhs.dv,rhs) + ); +} + +doubleAcc dot(const Vector& lhs,const VectorAcc& rhs) { + return doubleAcc( dot(lhs,rhs.p), + dot(lhs,rhs.v), + dot(lhs,rhs.dv) + ); +} + + +bool Equal(const VectorAcc& r1,const VectorAcc& r2,double eps) { + return (Equal(r1.p,r2.p,eps) + && Equal(r1.v,r2.v,eps) + && Equal(r1.dv,r2.dv,eps) + ); +} + +bool Equal(const Vector& r1,const VectorAcc& r2,double eps) { + return (Equal(r1,r2.p,eps) + && Equal(Vector::Zero(),r2.v,eps) + && Equal(Vector::Zero(),r2.dv,eps) + ); +} + +bool Equal(const VectorAcc& r1,const Vector& r2,double eps) { + return (Equal(r1.p,r2,eps) + && Equal(r1.v,Vector::Zero(),eps) + && Equal(r1.dv,Vector::Zero(),eps) + ); +} + +VectorAcc operator / (const VectorAcc& r1,double r2) { + return r1*(1.0/r2); +} + +VectorAcc operator / (const VectorAcc& r2,const doubleAcc& r1) { + return r2*(1.0/r1); +} + + + +/////////////////// RotationAcc ///////////////////////////////////// + +RotationAcc operator* (const RotationAcc& r1,const RotationAcc& r2) { + return RotationAcc( r1.R * r2.R, + r1.w + r1.R*r2.w, + r1.dw + r1.w*(r1.R*r2.w) + r1.R*r2.dw + ); +} + +RotationAcc operator* (const Rotation& r1,const RotationAcc& r2) { + return RotationAcc( r1*r2.R, r1*r2.w, r1*r2.dw); +} + +RotationAcc operator* (const RotationAcc& r1,const Rotation& r2) { + return RotationAcc( r1.R*r2, r1.w, r1.dw ); +} + +RotationAcc& RotationAcc::operator = (const RotationAcc& arg) { + R=arg.R; + w=arg.w; + dw=arg.dw; + return *this; +} +RotationAcc& RotationAcc::operator = (const Rotation& arg) { + R = arg; + w = Vector::Zero(); + dw = Vector::Zero(); + return *this; +} + +RotationAcc RotationAcc::Identity() { + return RotationAcc(Rotation::Identity(),Vector::Zero(),Vector::Zero()); +} + +RotationAcc RotationAcc::Inverse() const { + return RotationAcc(R.Inverse(),-R.Inverse(w),-R.Inverse(dw)); +} + +VectorAcc RotationAcc::Inverse(const VectorAcc& arg) const { + VectorAcc tmp; + tmp.p = R.Inverse(arg.p); + tmp.v = R.Inverse(arg.v - w * arg.p); + tmp.dv = R.Inverse(arg.dv - dw*arg.p - w*(arg.v+R*tmp.v)); + return tmp; +} + +VectorAcc RotationAcc::Inverse(const Vector& arg) const { + VectorAcc tmp; + tmp.p = R.Inverse(arg); + tmp.v = R.Inverse(-w*arg); + tmp.dv = R.Inverse(-dw*arg - w*(R*tmp.v)); + return tmp; +} + + +VectorAcc RotationAcc::operator*(const VectorAcc& arg) const { + VectorAcc tmp; + tmp.p = R*arg.p; + tmp.dv = R*arg.v; + tmp.v = w*tmp.p + tmp.dv; + tmp.dv = dw*tmp.p + w*(tmp.v + tmp.dv) + R*arg.dv; + return tmp; +} + +VectorAcc operator*(const Rotation& R,const VectorAcc& x) { + return VectorAcc(R*x.p,R*x.v,R*x.dv); +} + +VectorAcc RotationAcc::operator*(const Vector& arg) const { + VectorAcc tmp; + tmp.p = R*arg; + tmp.v = w*tmp.p; + tmp.dv = dw*tmp.p + w*tmp.v; + return tmp; +} + +/* + // = Rotations + // The Rot... static functions give the value of the appropriate rotation matrix back. + // The DoRot... functions apply a rotation R to *this,such that *this = *this * R. + + void RRotation::DoRotX(const RDouble& angle) { + w+=R*Vector(angle.grad,0,0); + R.DoRotX(angle.t); + } +RotationAcc RotationAcc::RotX(const doubleAcc& angle) { + return RotationAcc(Rotation::RotX(angle.t), + Vector(angle.d,0,0), + Vector(angle.dd,0,0) + ); +} + + void RRotation::DoRotY(const RDouble& angle) { + w+=R*Vector(0,angle.grad,0); + R.DoRotY(angle.t); + } +RotationAcc RotationAcc::RotY(const doubleAcc& angle) { + return RotationAcc( + Rotation::RotX(angle.t), + Vector(0,angle.d,0), + Vector(0,angle.dd,0) + ); +} + + void RRotation::DoRotZ(const RDouble& angle) { + w+=R*Vector(0,0,angle.grad); + R.DoRotZ(angle.t); + } +RotationAcc RotationAcc::RotZ(const doubleAcc& angle) { + return RotationAcc( + Rotation::RotZ(angle.t), + Vector(0,0,angle.d), + Vector(0,0,angle.dd) + ); +} + + + RRotation RRotation::Rot(const Vector& rotvec,const RDouble& angle) + // rotvec has arbitrary norm + // rotation around a constant vector ! + { + Vector v = rotvec.Normalize(); + return RRotation(Rotation::Rot2(v,angle.t),v*angle.grad); + } + + RRotation RRotation::Rot2(const Vector& rotvec,const RDouble& angle) + // rotvec is normalized. + { + return RRotation(Rotation::Rot2(rotvec,angle.t),rotvec*angle.grad); + } + +*/ + +bool Equal(const RotationAcc& r1,const RotationAcc& r2,double eps) { + return (Equal(r1.w,r2.w,eps) && Equal(r1.R,r2.R,eps) && Equal(r1.dw,r2.dw,eps) ); +} +bool Equal(const Rotation& r1,const RotationAcc& r2,double eps) { + return (Equal(Vector::Zero(),r2.w,eps) && Equal(r1,r2.R,eps) && + Equal(Vector::Zero(),r2.dw,eps) ); +} +bool Equal(const RotationAcc& r1,const Rotation& r2,double eps) { + return (Equal(r1.w,Vector::Zero(),eps) && Equal(r1.R,r2,eps) && + Equal(r1.dw,Vector::Zero(),eps) ); +} + + +// Methods and operators related to FrameAcc +// They all delegate most of the work to RotationAcc and VectorAcc +FrameAcc& FrameAcc::operator = (const FrameAcc& arg) { + M=arg.M; + p=arg.p; + return *this; +} + +FrameAcc FrameAcc::Identity() { + return FrameAcc(RotationAcc::Identity(),VectorAcc::Zero()); +} + + +FrameAcc operator *(const FrameAcc& lhs,const FrameAcc& rhs) +{ + return FrameAcc(lhs.M*rhs.M,lhs.M*rhs.p+lhs.p); +} +FrameAcc operator *(const FrameAcc& lhs,const Frame& rhs) +{ + return FrameAcc(lhs.M*rhs.M,lhs.M*rhs.p+lhs.p); +} +FrameAcc operator *(const Frame& lhs,const FrameAcc& rhs) +{ + return FrameAcc(lhs.M*rhs.M,lhs.M*rhs.p+lhs.p); +} + +VectorAcc FrameAcc::operator *(const VectorAcc & arg) const +{ + return M*arg+p; +} +VectorAcc FrameAcc::operator *(const Vector & arg) const +{ + return M*arg+p; +} + +VectorAcc FrameAcc::Inverse(const VectorAcc& arg) const +{ + return M.Inverse(arg-p); +} + +VectorAcc FrameAcc::Inverse(const Vector& arg) const +{ + return M.Inverse(arg-p); +} + +FrameAcc FrameAcc::Inverse() const +{ + return FrameAcc(M.Inverse(),-M.Inverse(p)); +} + +FrameAcc& FrameAcc::operator =(const Frame & arg) +{ + M = arg.M; + p = arg.p; + return *this; +} + +bool Equal(const FrameAcc& r1,const FrameAcc& r2,double eps) { + return (Equal(r1.M,r2.M,eps) && Equal(r1.p,r2.p,eps)); +} +bool Equal(const Frame& r1,const FrameAcc& r2,double eps) { + return (Equal(r1.M,r2.M,eps) && Equal(r1.p,r2.p,eps)); +} +bool Equal(const FrameAcc& r1,const Frame& r2,double eps) { + return (Equal(r1.M,r2.M,eps) && Equal(r1.p,r2.p,eps)); +} + + +Frame FrameAcc::GetFrame() const { + return Frame(M.R,p.p); +} + + +Twist FrameAcc::GetTwist() const { + return Twist(p.v,M.w); +} + + +Twist FrameAcc::GetAccTwist() const { + return Twist(p.dv,M.dw); +} + + + + + + + + + + + + + + + + + +TwistAcc TwistAcc::Zero() +{ + return TwistAcc(VectorAcc::Zero(),VectorAcc::Zero()); +} + + +void TwistAcc::ReverseSign() +{ + vel.ReverseSign(); + rot.ReverseSign(); +} + +TwistAcc TwistAcc::RefPoint(const VectorAcc& v_base_AB) + // Changes the reference point of the TwistAcc. + // The RVector v_base_AB is expressed in the same base as the TwistAcc + // The RVector v_base_AB is a RVector from the old point to + // the new point. + // Complexity : 6M+6A +{ + return TwistAcc(this->vel+this->rot*v_base_AB,this->rot); +} + +TwistAcc& TwistAcc::operator-=(const TwistAcc& arg) +{ + vel-=arg.vel; + rot -=arg.rot; + return *this; +} + +TwistAcc& TwistAcc::operator+=(const TwistAcc& arg) +{ + vel+=arg.vel; + rot +=arg.rot; + return *this; +} + + +TwistAcc operator*(const TwistAcc& lhs,double rhs) +{ + return TwistAcc(lhs.vel*rhs,lhs.rot*rhs); +} + +TwistAcc operator*(double lhs,const TwistAcc& rhs) +{ + return TwistAcc(lhs*rhs.vel,lhs*rhs.rot); +} + +TwistAcc operator/(const TwistAcc& lhs,double rhs) +{ + return TwistAcc(lhs.vel/rhs,lhs.rot/rhs); +} + + +TwistAcc operator*(const TwistAcc& lhs,const doubleAcc& rhs) +{ + return TwistAcc(lhs.vel*rhs,lhs.rot*rhs); +} + +TwistAcc operator*(const doubleAcc& lhs,const TwistAcc& rhs) +{ + return TwistAcc(lhs*rhs.vel,lhs*rhs.rot); +} + +TwistAcc operator/(const TwistAcc& lhs,const doubleAcc& rhs) +{ + return TwistAcc(lhs.vel/rhs,lhs.rot/rhs); +} + + + +// addition of TwistAcc's +TwistAcc operator+(const TwistAcc& lhs,const TwistAcc& rhs) +{ + return TwistAcc(lhs.vel+rhs.vel,lhs.rot+rhs.rot); +} + +TwistAcc operator-(const TwistAcc& lhs,const TwistAcc& rhs) +{ + return TwistAcc(lhs.vel-rhs.vel,lhs.rot-rhs.rot); +} + +// unary - +TwistAcc operator-(const TwistAcc& arg) +{ + return TwistAcc(-arg.vel,-arg.rot); +} + + + + + +TwistAcc RotationAcc::Inverse(const TwistAcc& arg) const +{ + return TwistAcc(Inverse(arg.vel),Inverse(arg.rot)); +} + +TwistAcc RotationAcc::operator * (const TwistAcc& arg) const +{ + return TwistAcc((*this)*arg.vel,(*this)*arg.rot); +} + +TwistAcc RotationAcc::Inverse(const Twist& arg) const +{ + return TwistAcc(Inverse(arg.vel),Inverse(arg.rot)); +} + +TwistAcc RotationAcc::operator * (const Twist& arg) const +{ + return TwistAcc((*this)*arg.vel,(*this)*arg.rot); +} + + +TwistAcc FrameAcc::operator * (const TwistAcc& arg) const +{ + TwistAcc tmp; + tmp.rot = M*arg.rot; + tmp.vel = M*arg.vel+p*tmp.rot; + return tmp; +} + +TwistAcc FrameAcc::operator * (const Twist& arg) const +{ + TwistAcc tmp; + tmp.rot = M*arg.rot; + tmp.vel = M*arg.vel+p*tmp.rot; + return tmp; +} + +TwistAcc FrameAcc::Inverse(const TwistAcc& arg) const +{ + TwistAcc tmp; + tmp.rot = M.Inverse(arg.rot); + tmp.vel = M.Inverse(arg.vel-p*arg.rot); + return tmp; +} + +TwistAcc FrameAcc::Inverse(const Twist& arg) const +{ + TwistAcc tmp; + tmp.rot = M.Inverse(arg.rot); + tmp.vel = M.Inverse(arg.vel-p*arg.rot); + return tmp; +} + +Twist TwistAcc::GetTwist() const { + return Twist(vel.p,rot.p); +} + +Twist TwistAcc::GetTwistDot() const { + return Twist(vel.v,rot.v); +} + +bool Equal(const TwistAcc& a,const TwistAcc& b,double eps) { + return (Equal(a.rot,b.rot,eps)&& + Equal(a.vel,b.vel,eps) ); +} +bool Equal(const Twist& a,const TwistAcc& b,double eps) { + return (Equal(a.rot,b.rot,eps)&& + Equal(a.vel,b.vel,eps) ); +} +bool Equal(const TwistAcc& a,const Twist& b,double eps) { + return (Equal(a.rot,b.rot,eps)&& + Equal(a.vel,b.vel,eps) ); +} + diff --git a/intern/itasc/kdl/frames.cpp b/intern/itasc/kdl/frames.cpp new file mode 100644 index 00000000000..7dcc39f2cd4 --- /dev/null +++ b/intern/itasc/kdl/frames.cpp @@ -0,0 +1,389 @@ +/*************************************************************************** + frames.cxx - description + ------------------------- + begin : June 2006 + copyright : (C) 2006 Erwin Aertbelien + email : firstname.lastname@mech.kuleuven.ac.be + + History (only major changes)( AUTHOR-Description ) : + + *************************************************************************** + * This library is free software; you can redistribute it and/or * + * modify it under the terms of the GNU Lesser General Public * + * License as published by the Free Software Foundation; either * + * version 2.1 of the License, or (at your option) any later version. * + * * + * This library is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * Lesser General Public License for more details. * + * * + * You should have received a copy of the GNU Lesser General Public * + * License along with this library; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, * + * Suite 330, Boston, MA 02111-1307 USA * + * * + ***************************************************************************/ + +#include "frames.hpp" + +namespace KDL { + +#ifndef KDL_INLINE +#include "frames.inl" +#endif + +void Frame::Make4x4(double * d) +{ + int i; + int j; + for (i=0;i<3;i++) { + for (j=0;j<3;j++) + d[i*4+j]=M(i,j); + d[i*4+3] = p(i)/1000; + } + for (j=0;j<3;j++) + d[12+j] = 0.; + d[15] = 1; +} + +Frame Frame::DH_Craig1989(double a,double alpha,double d,double theta) +// returns Modified Denavit-Hartenberg parameters (According to Craig) +{ + double ct,st,ca,sa; + ct = cos(theta); + st = sin(theta); + sa = sin(alpha); + ca = cos(alpha); + return Frame(Rotation( + ct, -st, 0, + st*ca, ct*ca, -sa, + st*sa, ct*sa, ca ), + Vector( + a, -sa*d, ca*d ) + ); +} + +Frame Frame::DH(double a,double alpha,double d,double theta) +// returns Denavit-Hartenberg parameters (Non-Modified DH) +{ + double ct,st,ca,sa; + ct = cos(theta); + st = sin(theta); + sa = sin(alpha); + ca = cos(alpha); + return Frame(Rotation( + ct, -st*ca, st*sa, + st, ct*ca, -ct*sa, + 0, sa, ca ), + Vector( + a*ct, a*st, d ) + ); +} + +double Vector2::Norm() const +{ + double tmp0 = fabs(data[0]); + double tmp1 = fabs(data[1]); + if (tmp0 >= tmp1) { + if (tmp1 == 0) + return 0; + return tmp0*sqrt(1+sqr(tmp1/tmp0)); + } else { + return tmp1*sqrt(1+sqr(tmp0/tmp1)); + } +} +// makes v a unitvector and returns the norm of v. +// if v is smaller than eps, Vector(1,0,0) is returned with norm 0. +// if this is not good, check the return value of this method. +double Vector2::Normalize(double eps) { + double v = this->Norm(); + if (v < eps) { + *this = Vector2(1,0); + return v; + } else { + *this = (*this)/v; + return v; + } +} + + +// do some effort not to lose precision +double Vector::Norm() const +{ + double tmp1; + double tmp2; + tmp1 = fabs(data[0]); + tmp2 = fabs(data[1]); + if (tmp1 >= tmp2) { + tmp2=fabs(data[2]); + if (tmp1 >= tmp2) { + if (tmp1 == 0) { + // only to everything exactly zero case, all other are handled correctly + return 0; + } + return tmp1*sqrt(1+sqr(data[1]/data[0])+sqr(data[2]/data[0])); + } else { + return tmp2*sqrt(1+sqr(data[0]/data[2])+sqr(data[1]/data[2])); + } + } else { + tmp1=fabs(data[2]); + if (tmp2 > tmp1) { + return tmp2*sqrt(1+sqr(data[0]/data[1])+sqr(data[2]/data[1])); + } else { + return tmp1*sqrt(1+sqr(data[0]/data[2])+sqr(data[1]/data[2])); + } + } +} + +// makes v a unitvector and returns the norm of v. +// if v is smaller than eps, Vector(1,0,0) is returned with norm 0. +// if this is not good, check the return value of this method. +double Vector::Normalize(double eps) { + double v = this->Norm(); + if (v < eps) { + *this = Vector(1,0,0); + return v; + } else { + *this = (*this)/v; + return v; + } +} + + +bool Equal(const Rotation& a,const Rotation& b,double eps) { + return (Equal(a.data[0],b.data[0],eps) && + Equal(a.data[1],b.data[1],eps) && + Equal(a.data[2],b.data[2],eps) && + Equal(a.data[3],b.data[3],eps) && + Equal(a.data[4],b.data[4],eps) && + Equal(a.data[5],b.data[5],eps) && + Equal(a.data[6],b.data[6],eps) && + Equal(a.data[7],b.data[7],eps) && + Equal(a.data[8],b.data[8],eps) ); +} + +void Rotation::Ortho() +{ + double n; + n=sqrt(sqr(data[0])+sqr(data[3])+sqr(data[6]));n=(n>1e-10)?1.0/n:0.0;data[0]*=n;data[3]*=n;data[6]*=n; + n=sqrt(sqr(data[1])+sqr(data[4])+sqr(data[7]));n=(n>1e-10)?1.0/n:0.0;data[1]*=n;data[4]*=n;data[7]*=n; + n=sqrt(sqr(data[2])+sqr(data[5])+sqr(data[8]));n=(n>1e-10)?1.0/n:0.0;data[2]*=n;data[5]*=n;data[8]*=n; +} + +Rotation operator *(const Rotation& lhs,const Rotation& rhs) +// Complexity : 27M+27A +{ + return Rotation( + lhs.data[0]*rhs.data[0]+lhs.data[1]*rhs.data[3]+lhs.data[2]*rhs.data[6], + lhs.data[0]*rhs.data[1]+lhs.data[1]*rhs.data[4]+lhs.data[2]*rhs.data[7], + lhs.data[0]*rhs.data[2]+lhs.data[1]*rhs.data[5]+lhs.data[2]*rhs.data[8], + lhs.data[3]*rhs.data[0]+lhs.data[4]*rhs.data[3]+lhs.data[5]*rhs.data[6], + lhs.data[3]*rhs.data[1]+lhs.data[4]*rhs.data[4]+lhs.data[5]*rhs.data[7], + lhs.data[3]*rhs.data[2]+lhs.data[4]*rhs.data[5]+lhs.data[5]*rhs.data[8], + lhs.data[6]*rhs.data[0]+lhs.data[7]*rhs.data[3]+lhs.data[8]*rhs.data[6], + lhs.data[6]*rhs.data[1]+lhs.data[7]*rhs.data[4]+lhs.data[8]*rhs.data[7], + lhs.data[6]*rhs.data[2]+lhs.data[7]*rhs.data[5]+lhs.data[8]*rhs.data[8] + ); + +} + + +Rotation Rotation::RPY(double roll,double pitch,double yaw) + { + double ca1,cb1,cc1,sa1,sb1,sc1; + ca1 = cos(yaw); sa1 = sin(yaw); + cb1 = cos(pitch);sb1 = sin(pitch); + cc1 = cos(roll);sc1 = sin(roll); + return Rotation(ca1*cb1,ca1*sb1*sc1 - sa1*cc1,ca1*sb1*cc1 + sa1*sc1, + sa1*cb1,sa1*sb1*sc1 + ca1*cc1,sa1*sb1*cc1 - ca1*sc1, + -sb1,cb1*sc1,cb1*cc1); + } + +// Gives back a rotation matrix specified with RPY convention +void Rotation::GetRPY(double& roll,double& pitch,double& yaw) const + { + if (fabs(data[6]) > 1.0 - epsilon ) { + roll = -sign(data[6]) * atan2(data[1], data[4]); + pitch= -sign(data[6]) * PI / 2; + yaw = 0.0 ; + } else { + roll = atan2(data[7], data[8]); + pitch = atan2(-data[6], sqrt( sqr(data[0]) +sqr(data[3]) ) ); + yaw = atan2(data[3], data[0]); + } + } + +Rotation Rotation::EulerZYZ(double Alfa,double Beta,double Gamma) { + double sa,ca,sb,cb,sg,cg; + sa = sin(Alfa);ca = cos(Alfa); + sb = sin(Beta);cb = cos(Beta); + sg = sin(Gamma);cg = cos(Gamma); + return Rotation( ca*cb*cg-sa*sg, -ca*cb*sg-sa*cg, ca*sb, + sa*cb*cg+ca*sg, -sa*cb*sg+ca*cg, sa*sb, + -sb*cg , sb*sg, cb + ); + + } + + +void Rotation::GetEulerZYZ(double& alfa,double& beta,double& gamma) const { + if (fabs(data[6]) < epsilon ) { + alfa=0.0; + if (data[8]>0) { + beta = 0.0; + gamma= atan2(-data[1],data[0]); + } else { + beta = PI; + gamma= atan2(data[1],-data[0]); + } + } else { + alfa=atan2(data[5], data[2]); + beta=atan2(sqrt( sqr(data[6]) +sqr(data[7]) ),data[8]); + gamma=atan2(data[7], -data[6]); + } + } + +Rotation Rotation::Rot(const Vector& rotaxis,double angle) { + // The formula is + // V.(V.tr) + st*[V x] + ct*(I-V.(V.tr)) + // can be found by multiplying it with an arbitrary vector p + // and noting that this vector is rotated. + double ct = cos(angle); + double st = sin(angle); + double vt = 1-ct; + Vector rotvec = rotaxis; + rotvec.Normalize(); + return Rotation( + ct + vt*rotvec(0)*rotvec(0), + -rotvec(2)*st + vt*rotvec(0)*rotvec(1), + rotvec(1)*st + vt*rotvec(0)*rotvec(2), + rotvec(2)*st + vt*rotvec(1)*rotvec(0), + ct + vt*rotvec(1)*rotvec(1), + -rotvec(0)*st + vt*rotvec(1)*rotvec(2), + -rotvec(1)*st + vt*rotvec(2)*rotvec(0), + rotvec(0)*st + vt*rotvec(2)*rotvec(1), + ct + vt*rotvec(2)*rotvec(2) + ); + } + +Rotation Rotation::Rot2(const Vector& rotvec,double angle) { + // rotvec should be normalized ! + // The formula is + // V.(V.tr) + st*[V x] + ct*(I-V.(V.tr)) + // can be found by multiplying it with an arbitrary vector p + // and noting that this vector is rotated. + double ct = cos(angle); + double st = sin(angle); + double vt = 1-ct; + return Rotation( + ct + vt*rotvec(0)*rotvec(0), + -rotvec(2)*st + vt*rotvec(0)*rotvec(1), + rotvec(1)*st + vt*rotvec(0)*rotvec(2), + rotvec(2)*st + vt*rotvec(1)*rotvec(0), + ct + vt*rotvec(1)*rotvec(1), + -rotvec(0)*st + vt*rotvec(1)*rotvec(2), + -rotvec(1)*st + vt*rotvec(2)*rotvec(0), + rotvec(0)*st + vt*rotvec(2)*rotvec(1), + ct + vt*rotvec(2)*rotvec(2) + ); +} + + + +Vector Rotation::GetRot() const + // Returns a vector with the direction of the equiv. axis + // and its norm is angle + { + Vector axis = Vector((data[7]-data[5]), + (data[2]-data[6]), + (data[3]-data[1]) )/2; + + double sa = axis.Norm(); + double ca = (data[0]+data[4]+data[8]-1)/2.0; + double alfa; + if (sa > epsilon) + alfa = ::atan2(sa,ca)/sa; + else { + if (ca < 0.0) { + alfa = KDL::PI; + axis.data[0] = 0.0; + axis.data[1] = 0.0; + axis.data[2] = 0.0; + if (data[0] > 0.0) { + axis.data[0] = 1.0; + } else if (data[4] > 0.0) { + axis.data[1] = 1.0; + } else { + axis.data[2] = 1.0; + } + } else { + alfa = 0.0; + } + } + return axis * alfa; + } + +Vector2 Rotation::GetXZRot() const +{ + // [0,1,0] x Y + Vector2 axis(data[7], -data[1]); + double norm = axis.Normalize(); + if (norm < epsilon) { + norm = (data[4] < 0.0) ? PI : 0.0; + } else { + norm = acos(data[4]); + } + return axis*norm; +} + + +/** Returns the rotation angle around the equiv. axis + * @param axis the rotation axis is returned in this variable + * @param eps : in the case of angle == 0 : rot axis is undefined and choosen + * to be +/- Z-axis + * in the case of angle == PI : 2 solutions, positive Z-component + * of the axis is choosen. + * @result returns the rotation angle (between [0..PI] ) + * /todo : + * Check corresponding routines in rframes and rrframes + */ +double Rotation::GetRotAngle(Vector& axis,double eps) const { + double ca = (data[0]+data[4]+data[8]-1)/2.0; + if (ca>1-eps) { + // undefined choose the Z-axis, and angle 0 + axis = Vector(0,0,1); + return 0; + } + if (ca < -1+eps) { + // two solutions, choose a positive Z-component of the axis + double z = sqrt( (data[8]+1)/2 ); + double x = (data[2])/2/z; + double y = (data[5])/2/z; + axis = Vector( x,y,z ); + return PI; + } + double angle = acos(ca); + double sa = sin(angle); + axis = Vector((data[7]-data[5])/2/sa, + (data[2]-data[6])/2/sa, + (data[3]-data[1])/2/sa ); + return angle; +} + +bool operator==(const Rotation& a,const Rotation& b) { +#ifdef KDL_USE_EQUAL + return Equal(a,b); +#else + return ( a.data[0]==b.data[0] && + a.data[1]==b.data[1] && + a.data[2]==b.data[2] && + a.data[3]==b.data[3] && + a.data[4]==b.data[4] && + a.data[5]==b.data[5] && + a.data[6]==b.data[6] && + a.data[7]==b.data[7] && + a.data[8]==b.data[8] ); +#endif +} +} diff --git a/intern/itasc/kdl/frames.hpp b/intern/itasc/kdl/frames.hpp new file mode 100644 index 00000000000..20590c5303e --- /dev/null +++ b/intern/itasc/kdl/frames.hpp @@ -0,0 +1,1097 @@ +/*************************************************************************** + frames.hpp `- description + ------------------------- + begin : June 2006 + copyright : (C) 2006 Erwin Aertbelien + email : firstname.lastname@mech.kuleuven.be + + History (only major changes)( AUTHOR-Description ) : + + *************************************************************************** + * This library is free software; you can redistribute it and/or * + * modify it under the terms of the GNU Lesser General Public * + * License as published by the Free Software Foundation; either * + * version 2.1 of the License, or (at your option) any later version. * + * * + * This library is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * Lesser General Public License for more details. * + * * + * You should have received a copy of the GNU Lesser General Public * + * License along with this library; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, * + * Suite 330, Boston, MA 02111-1307 USA * + * * + ***************************************************************************/ + +/** + * \file + * \warning + * Efficienty can be improved by writing p2 = A*(B*(C*p1))) instead of + * p2=A*B*C*p1 + * + * \par PROPOSED NAMING CONVENTION FOR FRAME-like OBJECTS + * + * \verbatim + * A naming convention of objects of the type defined in this file : + * (1) Frame : F... + * Rotation : R ... + * (2) Twist : T ... + * Wrench : W ... + * Vector : V ... + * This prefix is followed by : + * for category (1) : + * F_A_B : w.r.t. frame A, frame B expressed + * ( each column of F_A_B corresponds to an axis of B, + * expressed w.r.t. frame A ) + * in mathematical convention : + * A + * F_A_B == F + * B + * + * for category (2) : + * V_B : a vector expressed w.r.t. frame B + * + * This can also be prepended by a name : + * e.g. : temporaryV_B + * + * With this convention one can write : + * + * F_A_B = F_B_A.Inverse(); + * F_A_C = F_A_B * F_B_C; + * V_B = F_B_C * V_C; // both translation and rotation + * V_B = R_B_C * V_C; // only rotation + * \endverbatim + * + * \par CONVENTIONS FOR WHEN USED WITH ROBOTS : + * + * \verbatim + * world : represents the frame ([1 0 0,0 1 0,0 0 1],[0 0 0]') + * mp : represents mounting plate of a robot + * (i.e. everything before MP is constructed by robot manufacturer + * everything after MP is tool ) + * tf : represents task frame of a robot + * (i.e. frame in which motion and force control is expressed) + * sf : represents sensor frame of a robot + * (i.e. frame at which the forces measured by the force sensor + * are expressed ) + * + * Frame F_world_mp=...; + * Frame F_mp_sf(..) + * Frame F_mp_tf(,.) + * + * Wrench are measured in sensor frame SF, so one could write : + * Wrench_tf = F_mp_tf.Inverse()* ( F_mp_sf * Wrench_sf ); + * \endverbatim + * + * \par CONVENTIONS REGARDING UNITS : + * Any consistent series of units can be used, e.g. N,mm,Nmm,..mm/sec + * + * \par Twist and Wrench transformations + * 3 different types of transformations do exist for the twists + * and wrenches. + * + * \verbatim + * 1) Frame * Twist or Frame * Wrench : + * this transforms both the velocity/force reference point + * and the basis to which the twist/wrench are expressed. + * 2) Rotation * Twist or Rotation * Wrench : + * this transforms the basis to which the twist/wrench are + * expressed, but leaves the reference point intact. + * 3) Twist.RefPoint(v_base_AB) or Wrench.RefPoint(v_base_AB) + * this transforms only the reference point. v is expressed + * in the same base as the twist/wrench and points from the + * old reference point to the new reference point. + * \endverbatim + * + * \par Complexity + * Sometimes the amount of work is given in the documentation + * e.g. 6M+3A means 6 multiplications and 3 additions. + * + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + ****************************************************************************/ +#ifndef KDL_FRAMES_H +#define KDL_FRAMES_H + + +#include "utilities/kdl-config.h" +#include "utilities/utility.h" + +///////////////////////////////////////////////////////////// + +namespace KDL { + + + +class Vector; +class Rotation; +class Frame; +class Wrench; +class Twist; +class Vector2; +class Rotation2; +class Frame2; + + + +/** + * \brief A concrete implementation of a 3 dimensional vector class + */ +class Vector +{ +public: + double data[3]; + //! Does not initialise the Vector to zero. use Vector::Zero() or SetToZero for that + inline Vector() {data[0]=data[1]=data[2] = 0.0;} + + //! Constructs a vector out of the three values x, y and z + inline Vector(double x,double y, double z); + + //! Constructs a vector out of an array of three values x, y and z + inline Vector(double* xyz); + + //! Constructs a vector out of an array of three values x, y and z + inline Vector(float* xyz); + + //! Assignment operator. The normal copy by value semantics. + inline Vector(const Vector& arg); + + //! store vector components in array + inline void GetValue(double* xyz) const; + + //! Assignment operator. The normal copy by value semantics. + inline Vector& operator = ( const Vector& arg); + + //! Access to elements, range checked when NDEBUG is not set, from 0..2 + inline double operator()(int index) const; + + //! Access to elements, range checked when NDEBUG is not set, from 0..2 + inline double& operator() (int index); + + //! Equivalent to double operator()(int index) const + double operator[] ( int index ) const + { + return this->operator() ( index ); + } + + //! Equivalent to double& operator()(int index) + double& operator[] ( int index ) + { + return this->operator() ( index ); + } + + inline double x() const; + inline double y() const; + inline double z() const; + inline void x(double); + inline void y(double); + inline void z(double); + + //! Reverses the sign of the Vector object itself + inline void ReverseSign(); + + + //! subtracts a vector from the Vector object itself + inline Vector& operator-=(const Vector& arg); + + + //! Adds a vector from the Vector object itself + inline Vector& operator +=(const Vector& arg); + + //! Scalar multiplication is defined + inline friend Vector operator*(const Vector& lhs,double rhs); + //! Scalar multiplication is defined + inline friend Vector operator*(double lhs,const Vector& rhs); + //! Scalar division is defined + + inline friend Vector operator/(const Vector& lhs,double rhs); + inline friend Vector operator+(const Vector& lhs,const Vector& rhs); + inline friend Vector operator-(const Vector& lhs,const Vector& rhs); + inline friend Vector operator*(const Vector& lhs,const Vector& rhs); + inline friend Vector operator-(const Vector& arg); + inline friend double dot(const Vector& lhs,const Vector& rhs); + + //! To have a uniform operator to put an element to zero, for scalar values + //! and for objects. + inline friend void SetToZero(Vector& v); + + //! @return a zero vector + inline static Vector Zero(); + + /** Normalizes this vector and returns it norm + * makes v a unitvector and returns the norm of v. + * if v is smaller than eps, Vector(1,0,0) is returned with norm 0. + * if this is not good, check the return value of this method. + */ + double Normalize(double eps=epsilon); + + //! @return the norm of the vector + double Norm() const; + + + + //! a 3D vector where the 2D vector v is put in the XY plane + inline void Set2DXY(const Vector2& v); + //! a 3D vector where the 2D vector v is put in the YZ plane + inline void Set2DYZ(const Vector2& v); + //! a 3D vector where the 2D vector v is put in the ZX plane + inline void Set2DZX(const Vector2& v); + //! a 3D vector where the 2D vector v_XY is put in the XY plane of the frame F_someframe_XY. + inline void Set2DPlane(const Frame& F_someframe_XY,const Vector2& v_XY); + + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const Vector& a,const Vector& b,double eps=epsilon); + + //! return a normalized vector + inline friend Vector Normalize(const Vector& a, double eps=epsilon); + + //! The literal equality operator==(), also identical. + inline friend bool operator==(const Vector& a,const Vector& b); + //! The literal inequality operator!=(). + inline friend bool operator!=(const Vector& a,const Vector& b); + + friend class Rotation; + friend class Frame; +}; + + +/** + \brief represents rotations in 3 dimensional space. + + This class represents a rotation matrix with the following + conventions : + \verbatim + Suppose V2 = R*V, (1) + V is expressed in frame B + V2 is expressed in frame A + This matrix R consists of 3 collumns [ X,Y,Z ], + X,Y, and Z contain the axes of frame B, expressed in frame A + Because of linearity expr(1) is valid. + \endverbatim + This class only represents rotational_interpolation, not translation + Two interpretations are possible for rotation angles. + * if you rotate with angle around X frame A to have frame B, + then the result of SetRotX is equal to frame B expressed wrt A. + In code: + \verbatim + Rotation R; + F_A_B = R.SetRotX(angle); + \endverbatim + * Secondly, if you take the following code : + \verbatim + Vector p,p2; Rotation R; + R.SetRotX(angle); + p2 = R*p; + \endverbatim + then the frame p2 is rotated around X axis with (-angle). + Analogue reasonings can be applyd to SetRotY,SetRotZ,SetRot + \par type + Concrete implementation +*/ +class Rotation +{ +public: + double data[9]; + + inline Rotation() { + *this = Rotation::Identity(); + } + inline Rotation(double Xx,double Yx,double Zx, + double Xy,double Yy,double Zy, + double Xz,double Yz,double Zz); + inline Rotation(const Vector& x,const Vector& y,const Vector& z); + // default copy constructor is sufficient + + inline void setValue(float* oglmat); + inline void getValue(float* oglmat) const; + + inline Rotation& operator=(const Rotation& arg); + + //! Defines a multiplication R*V between a Rotation R and a Vector V. + //! Complexity : 9M+6A + inline Vector operator*(const Vector& v) const; + + //! Access to elements 0..2,0..2, bounds are checked when NDEBUG is not set + inline double& operator()(int i,int j); + + //! Access to elements 0..2,0..2, bounds are checked when NDEBUG is not set + inline double operator() (int i,int j) const; + + friend Rotation operator *(const Rotation& lhs,const Rotation& rhs); + + //! Sets the value of *this to its inverse. + inline void SetInverse(); + + //! Gives back the inverse rotation matrix of *this. + inline Rotation Inverse() const; + + //! The same as R.Inverse()*v but more efficient. + inline Vector Inverse(const Vector& v) const; + + //! The same as R.Inverse()*arg but more efficient. + inline Wrench Inverse(const Wrench& arg) const; + + //! The same as R.Inverse()*arg but more efficient. + inline Twist Inverse(const Twist& arg) const; + + //! Gives back an identity rotaton matrix + inline static Rotation Identity(); + + +// = Rotations + //! The Rot... static functions give the value of the appropriate rotation matrix back. + inline static Rotation RotX(double angle); + //! The Rot... static functions give the value of the appropriate rotation matrix back. + inline static Rotation RotY(double angle); + //! The Rot... static functions give the value of the appropriate rotation matrix back. + inline static Rotation RotZ(double angle); + //! The DoRot... functions apply a rotation R to *this,such that *this = *this * Rot.. + //! DoRot... functions are only defined when they can be executed more efficiently + inline void DoRotX(double angle); + //! The DoRot... functions apply a rotation R to *this,such that *this = *this * Rot.. + //! DoRot... functions are only defined when they can be executed more efficiently + inline void DoRotY(double angle); + //! The DoRot... functions apply a rotation R to *this,such that *this = *this * Rot.. + //! DoRot... functions are only defined when they can be executed more efficiently + inline void DoRotZ(double angle); + + //! Along an arbitrary axes. It is not necessary to normalize rotaxis. + //! returns identity rotation matrix in the case that the norm of rotaxis + //! is to small to be used. + // @see Rot2 if you want to handle this error in another way. + static Rotation Rot(const Vector& rotaxis,double angle); + + //! Along an arbitrary axes. rotvec should be normalized. + static Rotation Rot2(const Vector& rotvec,double angle); + + // make sure the matrix is a pure rotation (no scaling) + void Ortho(); + + //! Returns a vector with the direction of the equiv. axis + //! and its norm is angle + Vector GetRot() const; + + //! Returns a 2D vector representing the equivalent rotation in the XZ plane that brings the + //! Y axis onto the Matrix Y axis and its norm is angle + Vector2 GetXZRot() const; + + /** Returns the rotation angle around the equiv. axis + * @param axis the rotation axis is returned in this variable + * @param eps : in the case of angle == 0 : rot axis is undefined and choosen + * to be +/- Z-axis + * in the case of angle == PI : 2 solutions, positive Z-component + * of the axis is choosen. + * @result returns the rotation angle (between [0..PI] ) + */ + double GetRotAngle(Vector& axis,double eps=epsilon) const; + + + //! Gives back a rotation matrix specified with EulerZYZ convention : + //! First rotate around Z with alfa, + //! then around the new Y with beta, then around + //! new Z with gamma. + static Rotation EulerZYZ(double Alfa,double Beta,double Gamma); + + //! Gives back the EulerZYZ convention description of the rotation matrix : + //! First rotate around Z with alfa, + //! then around the new Y with beta, then around + //! new Z with gamma. + //! + //! Variables are bound by + //! (-PI <= alfa <= PI), + //! (0 <= beta <= PI), + //! (-PI <= alfa <= PI) + void GetEulerZYZ(double& alfa,double& beta,double& gamma) const; + + + //! Sets the value of this object to a rotation specified with RPY convention: + //! first rotate around X with roll, then around the + //! old Y with pitch, then around old Z with alfa + static Rotation RPY(double roll,double pitch,double yaw); + + //! Gives back a vector in RPY coordinates, variables are bound by + //! -PI <= roll <= PI + //! -PI <= Yaw <= PI + //! -PI/2 <= PITCH <= PI/2 + //! + //! convention : first rotate around X with roll, then around the + //! old Y with pitch, then around old Z with alfa + void GetRPY(double& roll,double& pitch,double& yaw) const; + + + //! Gives back a rotation matrix specified with EulerZYX convention : + //! First rotate around Z with alfa, + //! then around the new Y with beta, then around + //! new X with gamma. + //! + //! closely related to RPY-convention + inline static Rotation EulerZYX(double Alfa,double Beta,double Gamma) { + return RPY(Gamma,Beta,Alfa); + } + + //! GetEulerZYX gets the euler ZYX parameters of a rotation : + //! First rotate around Z with alfa, + //! then around the new Y with beta, then around + //! new X with gamma. + //! + //! Range of the results of GetEulerZYX : + //! -PI <= alfa <= PI + //! -PI <= gamma <= PI + //! -PI/2 <= beta <= PI/2 + //! + //! Closely related to RPY-convention. + inline void GetEulerZYX(double& Alfa,double& Beta,double& Gamma) const { + GetRPY(Gamma,Beta,Alfa); + } + + //! Transformation of the base to which the twist is expressed. + //! Complexity : 18M+12A + //! @see Frame*Twist for a transformation that also transforms + //! the velocity reference point. + inline Twist operator * (const Twist& arg) const; + + //! Transformation of the base to which the wrench is expressed. + //! Complexity : 18M+12A + //! @see Frame*Wrench for a transformation that also transforms + //! the force reference point. + inline Wrench operator * (const Wrench& arg) const; + + //! Access to the underlying unitvectors of the rotation matrix + inline Vector UnitX() const { + return Vector(data[0],data[3],data[6]); + } + + //! Access to the underlying unitvectors of the rotation matrix + inline void UnitX(const Vector& X) { + data[0] = X(0); + data[3] = X(1); + data[6] = X(2); + } + + //! Access to the underlying unitvectors of the rotation matrix + inline Vector UnitY() const { + return Vector(data[1],data[4],data[7]); + } + + //! Access to the underlying unitvectors of the rotation matrix + inline void UnitY(const Vector& X) { + data[1] = X(0); + data[4] = X(1); + data[7] = X(2); + } + + //! Access to the underlying unitvectors of the rotation matrix + inline Vector UnitZ() const { + return Vector(data[2],data[5],data[8]); + } + + //! Access to the underlying unitvectors of the rotation matrix + inline void UnitZ(const Vector& X) { + data[2] = X(0); + data[5] = X(1); + data[8] = X(2); + } + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + friend bool Equal(const Rotation& a,const Rotation& b,double eps=epsilon); + + //! The literal equality operator==(), also identical. + friend bool operator==(const Rotation& a,const Rotation& b); + //! The literal inequality operator!=() + friend bool operator!=(const Rotation& a,const Rotation& b); + + friend class Frame; +}; + bool operator==(const Rotation& a,const Rotation& b); + + + +/** + \brief represents a frame transformation in 3D space (rotation + translation) + + if V2 = Frame*V1 (V2 expressed in frame A, V1 expressed in frame B) + then V2 = Frame.M*V1+Frame.p + + Frame.M contains columns that represent the axes of frame B wrt frame A + Frame.p contains the origin of frame B expressed in frame A. +*/ +class Frame { +public: + Vector p; //!< origine of the Frame + Rotation M; //!< Orientation of the Frame + +public: + + inline Frame(const Rotation& R,const Vector& V); + + //! The rotation matrix defaults to identity + explicit inline Frame(const Vector& V); + //! The position matrix defaults to zero + explicit inline Frame(const Rotation& R); + + inline void setValue(float* oglmat); + inline void getValue(float* oglmat) const; + + inline Frame() {} + //! The copy constructor. Normal copy by value semantics. + inline Frame(const Frame& arg); + + //! Reads data from an double array + //\TODO should be formulated as a constructor + void Make4x4(double* d); + + //! Treats a frame as a 4x4 matrix and returns element i,j + //! Access to elements 0..3,0..3, bounds are checked when NDEBUG is not set + inline double operator()(int i,int j); + + //! Treats a frame as a 4x4 matrix and returns element i,j + //! Access to elements 0..3,0..3, bounds are checked when NDEBUG is not set + inline double operator() (int i,int j) const; + + // = Inverse + //! Gives back inverse transformation of a Frame + inline Frame Inverse() const; + + //! The same as p2=R.Inverse()*p but more efficient. + inline Vector Inverse(const Vector& arg) const; + + //! The same as p2=R.Inverse()*p but more efficient. + inline Wrench Inverse(const Wrench& arg) const; + + //! The same as p2=R.Inverse()*p but more efficient. + inline Twist Inverse(const Twist& arg) const; + + //! Normal copy-by-value semantics. + inline Frame& operator = (const Frame& arg); + + //! Transformation of the base to which the vector + //! is expressed. + inline Vector operator * (const Vector& arg) const; + + //! Transformation of both the force reference point + //! and of the base to which the wrench is expressed. + //! look at Rotation*Wrench operator for a transformation + //! of only the base to which the twist is expressed. + //! + //! Complexity : 24M+18A + inline Wrench operator * (const Wrench& arg) const; + + //! Transformation of both the velocity reference point + //! and of the base to which the twist is expressed. + //! look at Rotation*Twist for a transformation of only the + //! base to which the twist is expressed. + //! + //! Complexity : 24M+18A + inline Twist operator * (const Twist& arg) const; + + //! Composition of two frames. + inline friend Frame operator *(const Frame& lhs,const Frame& rhs); + + //! @return the identity transformation Frame(Rotation::Identity(),Vector::Zero()). + inline static Frame Identity(); + + //! The twist is expressed wrt the current + //! frame. This frame is integrated into an updated frame with + //! . Very simple first order integration rule. + inline void Integrate(const Twist& t_this,double frequency); + + /* + // DH_Craig1989 : constructs a transformationmatrix + // T_link(i-1)_link(i) with the Denavit-Hartenberg convention as + // described in the Craigs book: Craig, J. J.,Introduction to + // Robotics: Mechanics and Control, Addison-Wesley, + // isbn:0-201-10326-5, 1986. + // + // Note that the frame is a redundant way to express the information + // in the DH-convention. + // \verbatim + // Parameters in full : a(i-1),alpha(i-1),d(i),theta(i) + // + // axis i-1 is connected by link i-1 to axis i numbering axis 1 + // to axis n link 0 (immobile base) to link n + // + // link length a(i-1) length of the mutual perpendicular line + // (normal) between the 2 axes. This normal runs from (i-1) to + // (i) axis. + // + // link twist alpha(i-1): construct plane perpendicular to the + // normal project axis(i-1) and axis(i) into plane angle from + // (i-1) to (i) measured in the direction of the normal + // + // link offset d(i) signed distance between normal (i-1) to (i) + // and normal (i) to (i+1) along axis i joint angle theta(i) + // signed angle between normal (i-1) to (i) and normal (i) to + // (i+1) along axis i + // + // First and last joints : a(0)= a(n) = 0 + // alpha(0) = alpha(n) = 0 + // + // PRISMATIC : theta(1) = 0 d(1) arbitrarily + // + // REVOLUTE : theta(1) arbitrarily d(1) = 0 + // + // Not unique : if intersecting joint axis 2 choices for normal + // Frame assignment of the DH convention : Z(i-1) follows axis + // (i-1) X(i-1) is the normal between axis(i-1) and axis(i) + // Y(i-1) follows out of Z(i-1) and X(i-1) + // + // a(i-1) = distance from Z(i-1) to Z(i) along X(i-1) + // alpha(i-1) = angle between Z(i-1) to Z(i) along X(i-1) + // d(i) = distance from X(i-1) to X(i) along Z(i) + // theta(i) = angle between X(i-1) to X(i) along X(i) + // \endverbatim + */ + static Frame DH_Craig1989(double a,double alpha,double d,double theta); + + // DH : constructs a transformationmatrix T_link(i-1)_link(i) with + // the Denavit-Hartenberg convention as described in the original + // publictation: Denavit, J. and Hartenberg, R. S., A kinematic + // notation for lower-pair mechanisms based on matrices, ASME + // Journal of Applied Mechanics, 23:215-221, 1955. + + static Frame DH(double a,double alpha,double d,double theta); + + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const Frame& a,const Frame& b,double eps=epsilon); + + //! The literal equality operator==(), also identical. + inline friend bool operator==(const Frame& a,const Frame& b); + //! The literal inequality operator!=(). + inline friend bool operator!=(const Frame& a,const Frame& b); +}; + +/** + * \brief represents both translational and rotational velocities. + * + * This class represents a twist. A twist is the combination of translational + * velocity and rotational velocity applied at one point. +*/ +class Twist { +public: + Vector vel; //!< The velocity of that point + Vector rot; //!< The rotational velocity of that point. +public: + + //! The default constructor initialises to Zero via the constructor of Vector. + Twist():vel(),rot() {}; + + Twist(const Vector& _vel,const Vector& _rot):vel(_vel),rot(_rot) {}; + + inline Twist& operator-=(const Twist& arg); + inline Twist& operator+=(const Twist& arg); + //! index-based access to components, first vel(0..2), then rot(3..5) + inline double& operator()(int i); + + //! index-based access to components, first vel(0..2), then rot(3..5) + //! For use with a const Twist + inline double operator()(int i) const; + + double operator[] ( int index ) const + { + return this->operator() ( index ); + } + + double& operator[] ( int index ) + { + return this->operator() ( index ); + } + + inline friend Twist operator*(const Twist& lhs,double rhs); + inline friend Twist operator*(double lhs,const Twist& rhs); + inline friend Twist operator/(const Twist& lhs,double rhs); + inline friend Twist operator+(const Twist& lhs,const Twist& rhs); + inline friend Twist operator-(const Twist& lhs,const Twist& rhs); + inline friend Twist operator-(const Twist& arg); + inline friend double dot(const Twist& lhs,const Wrench& rhs); + inline friend double dot(const Wrench& rhs,const Twist& lhs); + inline friend void SetToZero(Twist& v); + + + //! @return a zero Twist : Twist(Vector::Zero(),Vector::Zero()) + static inline Twist Zero(); + + //! Reverses the sign of the twist + inline void ReverseSign(); + + //! Changes the reference point of the twist. + //! The vector v_base_AB is expressed in the same base as the twist + //! The vector v_base_AB is a vector from the old point to + //! the new point. + //! + //! Complexity : 6M+6A + inline Twist RefPoint(const Vector& v_base_AB) const; + + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const Twist& a,const Twist& b,double eps=epsilon); + + //! The literal equality operator==(), also identical. + inline friend bool operator==(const Twist& a,const Twist& b); + //! The literal inequality operator!=(). + inline friend bool operator!=(const Twist& a,const Twist& b); + +// = Friends + friend class Rotation; + friend class Frame; + +}; + +/** + * \brief represents both translational and rotational acceleration. + * + * This class represents an acceleration twist. A acceleration twist is + * the combination of translational + * acceleration and rotational acceleration applied at one point. +*/ +/* +class AccelerationTwist { +public: + Vector trans; //!< The translational acceleration of that point + Vector rot; //!< The rotational acceleration of that point. +public: + + //! The default constructor initialises to Zero via the constructor of Vector. + AccelerationTwist():trans(),rot() {}; + + AccelerationTwist(const Vector& _trans,const Vector& _rot):trans(_trans),rot(_rot) {}; + + inline AccelerationTwist& operator-=(const AccelerationTwist& arg); + inline AccelerationTwist& operator+=(const AccelerationTwist& arg); + //! index-based access to components, first vel(0..2), then rot(3..5) + inline double& operator()(int i); + + //! index-based access to components, first vel(0..2), then rot(3..5) + //! For use with a const AccelerationTwist + inline double operator()(int i) const; + + double operator[] ( int index ) const + { + return this->operator() ( index ); + } + + double& operator[] ( int index ) + { + return this->operator() ( index ); + } + + inline friend AccelerationTwist operator*(const AccelerationTwist& lhs,double rhs); + inline friend AccelerationTwist operator*(double lhs,const AccelerationTwist& rhs); + inline friend AccelerationTwist operator/(const AccelerationTwist& lhs,double rhs); + inline friend AccelerationTwist operator+(const AccelerationTwist& lhs,const AccelerationTwist& rhs); + inline friend AccelerationTwist operator-(const AccelerationTwist& lhs,const AccelerationTwist& rhs); + inline friend AccelerationTwist operator-(const AccelerationTwist& arg); + //inline friend double dot(const AccelerationTwist& lhs,const Wrench& rhs); + //inline friend double dot(const Wrench& rhs,const AccelerationTwist& lhs); + inline friend void SetToZero(AccelerationTwist& v); + + + //! @return a zero AccelerationTwist : AccelerationTwist(Vector::Zero(),Vector::Zero()) + static inline AccelerationTwist Zero(); + + //! Reverses the sign of the AccelerationTwist + inline void ReverseSign(); + + //! Changes the reference point of the AccelerationTwist. + //! The vector v_base_AB is expressed in the same base as the AccelerationTwist + //! The vector v_base_AB is a vector from the old point to + //! the new point. + //! + //! Complexity : 6M+6A + inline AccelerationTwist RefPoint(const Vector& v_base_AB) const; + + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const AccelerationTwist& a,const AccelerationTwist& b,double eps=epsilon); + + //! The literal equality operator==(), also identical. + inline friend bool operator==(const AccelerationTwist& a,const AccelerationTwist& b); + //! The literal inequality operator!=(). + inline friend bool operator!=(const AccelerationTwist& a,const AccelerationTwist& b); + +// = Friends + friend class Rotation; + friend class Frame; + +}; +*/ +/** + * \brief represents the combination of a force and a torque. + * + * This class represents a Wrench. A Wrench is the force and torque applied at a point + */ +class Wrench +{ +public: + Vector force; //!< Force that is applied at the origin of the current ref frame + Vector torque; //!< Torque that is applied at the origin of the current ref frame +public: + + //! Does initialise force and torque to zero via the underlying constructor of Vector + Wrench():force(),torque() {}; + Wrench(const Vector& _force,const Vector& _torque):force(_force),torque(_torque) {}; + +// = Operators + inline Wrench& operator-=(const Wrench& arg); + inline Wrench& operator+=(const Wrench& arg); + + //! index-based access to components, first force(0..2), then torque(3..5) + inline double& operator()(int i); + + //! index-based access to components, first force(0..2), then torque(3..5) + //! for use with a const Wrench + inline double operator()(int i) const; + + double operator[] ( int index ) const + { + return this->operator() ( index ); + } + + double& operator[] ( int index ) + { + return this->operator() ( index ); + } + + //! Scalar multiplication + inline friend Wrench operator*(const Wrench& lhs,double rhs); + //! Scalar multiplication + inline friend Wrench operator*(double lhs,const Wrench& rhs); + //! Scalar division + inline friend Wrench operator/(const Wrench& lhs,double rhs); + + inline friend Wrench operator+(const Wrench& lhs,const Wrench& rhs); + inline friend Wrench operator-(const Wrench& lhs,const Wrench& rhs); + + //! An unary - operator + inline friend Wrench operator-(const Wrench& arg); + + //! Sets the Wrench to Zero, to have a uniform function that sets an object or + //! double to zero. + inline friend void SetToZero(Wrench& v); + + //! @return a zero Wrench + static inline Wrench Zero(); + + //! Reverses the sign of the current Wrench + inline void ReverseSign(); + + //! Changes the reference point of the wrench. + //! The vector v_base_AB is expressed in the same base as the twist + //! The vector v_base_AB is a vector from the old point to + //! the new point. + //! + //! Complexity : 6M+6A + inline Wrench RefPoint(const Vector& v_base_AB) const; + + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const Wrench& a,const Wrench& b,double eps=epsilon); + + //! The literal equality operator==(), also identical. + inline friend bool operator==(const Wrench& a,const Wrench& b); + //! The literal inequality operator!=(). + inline friend bool operator!=(const Wrench& a,const Wrench& b); + + friend class Rotation; + friend class Frame; + + +}; + + +//! 2D version of Vector +class Vector2 +{ + double data[2]; +public: + //! Does not initialise to Zero(). + Vector2() {data[0]=data[1] = 0.0;} + inline Vector2(double x,double y); + inline Vector2(const Vector2& arg); + inline Vector2(double* xyz); + inline Vector2(float* xyz); + + inline Vector2& operator = ( const Vector2& arg); + + //! Access to elements, range checked when NDEBUG is not set, from 0..1 + inline double operator()(int index) const; + + //! Access to elements, range checked when NDEBUG is not set, from 0..1 + inline double& operator() (int index); + + //! store vector components in array + inline void GetValue(double* xy) const; + + inline void ReverseSign(); + inline Vector2& operator-=(const Vector2& arg); + inline Vector2& operator +=(const Vector2& arg); + + + inline friend Vector2 operator*(const Vector2& lhs,double rhs); + inline friend Vector2 operator*(double lhs,const Vector2& rhs); + inline friend Vector2 operator/(const Vector2& lhs,double rhs); + inline friend Vector2 operator+(const Vector2& lhs,const Vector2& rhs); + inline friend Vector2 operator-(const Vector2& lhs,const Vector2& rhs); + inline friend Vector2 operator*(const Vector2& lhs,const Vector2& rhs); + inline friend Vector2 operator-(const Vector2& arg); + inline friend void SetToZero(Vector2& v); + + //! @return a zero 2D vector. + inline static Vector2 Zero(); + + /** Normalizes this vector and returns it norm + * makes v a unitvector and returns the norm of v. + * if v is smaller than eps, Vector(1,0,0) is returned with norm 0. + * if this is not good, check the return value of this method. + */ + double Normalize(double eps=epsilon); + + //! @return the norm of the vector + inline double Norm() const; + + //! projects v in its XY plane, and sets *this to these values + inline void Set3DXY(const Vector& v); + + //! projects v in its YZ plane, and sets *this to these values + inline void Set3DYZ(const Vector& v); + + //! projects v in its ZX plane, and sets *this to these values + inline void Set3DZX(const Vector& v); + + //! projects v_someframe in the XY plane of F_someframe_XY, + //! and sets *this to these values + //! expressed wrt someframe. + inline void Set3DPlane(const Frame& F_someframe_XY,const Vector& v_someframe); + + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const Vector2& a,const Vector2& b,double eps=epsilon); + + friend class Rotation2; +}; + + +//! A 2D Rotation class, for conventions see Rotation. For further documentation +//! of the methods see Rotation class. +class Rotation2 +{ + double s,c; + //! c,s represent cos(angle), sin(angle), this also represents first col. of rot matrix + //! from outside, this class behaves as if it would store the complete 2x2 matrix. +public: + //! Default constructor does NOT initialise to Zero(). + Rotation2() {c=1.0;s=0.0;} + + explicit Rotation2(double angle_rad):s(sin(angle_rad)),c(cos(angle_rad)) {} + + Rotation2(double ca,double sa):s(sa),c(ca){} + + inline Rotation2& operator=(const Rotation2& arg); + inline Vector2 operator*(const Vector2& v) const; + //! Access to elements 0..1,0..1, bounds are checked when NDEBUG is not set + inline double operator() (int i,int j) const; + + inline friend Rotation2 operator *(const Rotation2& lhs,const Rotation2& rhs); + + inline void SetInverse(); + inline Rotation2 Inverse() const; + inline Vector2 Inverse(const Vector2& v) const; + + inline void SetIdentity(); + inline static Rotation2 Identity(); + + + //! The SetRot.. functions set the value of *this to the appropriate rotation matrix. + inline void SetRot(double angle); + + //! The Rot... static functions give the value of the appropriate rotation matrix bac + inline static Rotation2 Rot(double angle); + + //! Gets the angle (in radians) + inline double GetRot() const; + + //! do not use operator == because the definition of Equal(.,.) is slightly + //! different. It compares whether the 2 arguments are equal in an eps-interval + inline friend bool Equal(const Rotation2& a,const Rotation2& b,double eps=epsilon); +}; + +//! A 2D frame class, for further documentation see the Frames class +//! for methods with unchanged semantics. +class Frame2 + { +public: + Vector2 p; //!< origine of the Frame + Rotation2 M; //!< Orientation of the Frame + +public: + + inline Frame2(const Rotation2& R,const Vector2& V); + explicit inline Frame2(const Vector2& V); + explicit inline Frame2(const Rotation2& R); + inline Frame2(void); + inline Frame2(const Frame2& arg); + inline void Make4x4(double* d); + + //! Treats a frame as a 3x3 matrix and returns element i,j + //! Access to elements 0..2,0..2, bounds are checked when NDEBUG is not set + inline double operator()(int i,int j); + + //! Treats a frame as a 4x4 matrix and returns element i,j + //! Access to elements 0..3,0..3, bounds are checked when NDEBUG is not set + inline double operator() (int i,int j) const; + + inline void SetInverse(); + inline Frame2 Inverse() const; + inline Vector2 Inverse(const Vector2& arg) const; + inline Frame2& operator = (const Frame2& arg); + inline Vector2 operator * (const Vector2& arg); + inline friend Frame2 operator *(const Frame2& lhs,const Frame2& rhs); + inline void SetIdentity(); + inline void Integrate(const Twist& t_this,double frequency); + inline static Frame2 Identity() { + Frame2 tmp; + tmp.SetIdentity(); + return tmp; + } + inline friend bool Equal(const Frame2& a,const Frame2& b,double eps=epsilon); +}; + +IMETHOD Vector diff(const Vector& a,const Vector& b,double dt=1); +IMETHOD Vector diff(const Rotation& R_a_b1,const Rotation& R_a_b2,double dt=1); +IMETHOD Twist diff(const Frame& F_a_b1,const Frame& F_a_b2,double dt=1); +IMETHOD Twist diff(const Twist& a,const Twist& b,double dt=1); +IMETHOD Wrench diff(const Wrench& W_a_p1,const Wrench& W_a_p2,double dt=1); +IMETHOD Vector addDelta(const Vector& a,const Vector&da,double dt=1); +IMETHOD Rotation addDelta(const Rotation& a,const Vector&da,double dt=1); +IMETHOD Frame addDelta(const Frame& a,const Twist& da,double dt=1); +IMETHOD Twist addDelta(const Twist& a,const Twist&da,double dt=1); +IMETHOD Wrench addDelta(const Wrench& a,const Wrench&da,double dt=1); +#ifdef KDL_INLINE +// #include "vector.inl" +// #include "wrench.inl" + //#include "rotation.inl" + //#include "frame.inl" + //#include "twist.inl" + //#include "vector2.inl" + //#include "rotation2.inl" + //#include "frame2.inl" +#include "frames.inl" +#endif + + + +} + + +#endif diff --git a/intern/itasc/kdl/frames.inl b/intern/itasc/kdl/frames.inl new file mode 100644 index 00000000000..9a176070171 --- /dev/null +++ b/intern/itasc/kdl/frames.inl @@ -0,0 +1,1390 @@ +/*************************************************************************** + frames.inl - description + ------------------------- + begin : June 2006 + copyright : (C) 2006 Erwin Aertbelien + email : firstname.lastname@mech.kuleuven.ac.be + + History (only major changes)( AUTHOR-Description ) : + + *************************************************************************** + * This library is free software; you can redistribute it and/or * + * modify it under the terms of the GNU Lesser General Public * + * License as published by the Free Software Foundation; either * + * version 2.1 of the License, or (at your option) any later version. * + * * + * This library is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * Lesser General Public License for more details. * + * * + * You should have received a copy of the GNU Lesser General Public * + * License along with this library; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, * + * Suite 330, Boston, MA 02111-1307 USA * + * * + ***************************************************************************/ + + +IMETHOD Vector::Vector(const Vector & arg) +{ + data[0] = arg.data[0]; + data[1] = arg.data[1]; + data[2] = arg.data[2]; +} + +IMETHOD Vector::Vector(double x,double y, double z) +{ + data[0]=x;data[1]=y;data[2]=z; +} + +IMETHOD Vector::Vector(double* xyz) +{ + data[0]=xyz[0];data[1]=xyz[1];data[2]=xyz[2]; +} + +IMETHOD Vector::Vector(float* xyz) +{ + data[0]=xyz[0];data[1]=xyz[1];data[2]=xyz[2]; +} + +IMETHOD void Vector::GetValue(double* xyz) const +{ + xyz[0]=data[0];xyz[1]=data[1];xyz[2]=data[2]; +} + + +IMETHOD Vector& Vector::operator =(const Vector & arg) +{ + data[0] = arg.data[0]; + data[1] = arg.data[1]; + data[2] = arg.data[2]; + return *this; +} + +IMETHOD Vector operator +(const Vector & lhs,const Vector& rhs) +{ + Vector tmp; + tmp.data[0] = lhs.data[0]+rhs.data[0]; + tmp.data[1] = lhs.data[1]+rhs.data[1]; + tmp.data[2] = lhs.data[2]+rhs.data[2]; + return tmp; +} + +IMETHOD Vector operator -(const Vector & lhs,const Vector& rhs) +{ + Vector tmp; + tmp.data[0] = lhs.data[0]-rhs.data[0]; + tmp.data[1] = lhs.data[1]-rhs.data[1]; + tmp.data[2] = lhs.data[2]-rhs.data[2]; + return tmp; +} + +IMETHOD double Vector::x() const { return data[0]; } +IMETHOD double Vector::y() const { return data[1]; } +IMETHOD double Vector::z() const { return data[2]; } + +IMETHOD void Vector::x( double _x ) { data[0] = _x; } +IMETHOD void Vector::y( double _y ) { data[1] = _y; } +IMETHOD void Vector::z( double _z ) { data[2] = _z; } + +Vector operator *(const Vector& lhs,double rhs) +{ + Vector tmp; + tmp.data[0] = lhs.data[0]*rhs; + tmp.data[1] = lhs.data[1]*rhs; + tmp.data[2] = lhs.data[2]*rhs; + return tmp; +} + +Vector operator *(double lhs,const Vector& rhs) +{ + Vector tmp; + tmp.data[0] = lhs*rhs.data[0]; + tmp.data[1] = lhs*rhs.data[1]; + tmp.data[2] = lhs*rhs.data[2]; + return tmp; +} + +Vector operator /(const Vector& lhs,double rhs) +{ + Vector tmp; + tmp.data[0] = lhs.data[0]/rhs; + tmp.data[1] = lhs.data[1]/rhs; + tmp.data[2] = lhs.data[2]/rhs; + return tmp; +} + +Vector operator *(const Vector & lhs,const Vector& rhs) +// Complexity : 6M+3A +{ + Vector tmp; + tmp.data[0] = lhs.data[1]*rhs.data[2]-lhs.data[2]*rhs.data[1]; + tmp.data[1] = lhs.data[2]*rhs.data[0]-lhs.data[0]*rhs.data[2]; + tmp.data[2] = lhs.data[0]*rhs.data[1]-lhs.data[1]*rhs.data[0]; + return tmp; +} + +Vector& Vector::operator +=(const Vector & arg) +// Complexity : 3A +{ + data[0]+=arg.data[0]; + data[1]+=arg.data[1]; + data[2]+=arg.data[2]; + return *this; +} + +Vector& Vector::operator -=(const Vector & arg) +// Complexity : 3A +{ + data[0]-=arg.data[0]; + data[1]-=arg.data[1]; + data[2]-=arg.data[2]; + return *this; +} + +Vector Vector::Zero() +{ + return Vector(0,0,0); +} + +double Vector::operator()(int index) const { + FRAMES_CHECKI((0<=index)&&(index<=2)); + return data[index]; +} + +double& Vector::operator () (int index) +{ + FRAMES_CHECKI((0<=index)&&(index<=2)); + return data[index]; +} + +IMETHOD Vector Normalize(const Vector& a, double eps) +{ + double l=a.Norm(); + return (lforce, + this->torque+this->force*v_base_AB + ); +} + + +Wrench& Wrench::operator-=(const Wrench& arg) +{ + torque-=arg.torque; + force -=arg.force; + return *this; +} + +Wrench& Wrench::operator+=(const Wrench& arg) +{ + torque+=arg.torque; + force +=arg.force; + return *this; +} + +double& Wrench::operator()(int i) +{ + // assert((0<=i)&&(i<6)); done by underlying routines + if (i<3) + return force(i); + else + return torque(i-3); +} + +double Wrench::operator()(int i) const +{ + // assert((0<=i)&&(i<6)); done by underlying routines + if (i<3) + return force(i); + else + return torque(i-3); +} + + +Wrench operator*(const Wrench& lhs,double rhs) +{ + return Wrench(lhs.force*rhs,lhs.torque*rhs); +} + +Wrench operator*(double lhs,const Wrench& rhs) +{ + return Wrench(lhs*rhs.force,lhs*rhs.torque); +} + +Wrench operator/(const Wrench& lhs,double rhs) +{ + return Wrench(lhs.force/rhs,lhs.torque/rhs); +} + +// addition of Wrench's +Wrench operator+(const Wrench& lhs,const Wrench& rhs) +{ + return Wrench(lhs.force+rhs.force,lhs.torque+rhs.torque); +} + +Wrench operator-(const Wrench& lhs,const Wrench& rhs) +{ + return Wrench(lhs.force-rhs.force,lhs.torque-rhs.torque); +} + +// unary - +Wrench operator-(const Wrench& arg) +{ + return Wrench(-arg.force,-arg.torque); +} + +Twist Frame::operator * (const Twist& arg) const +// Complexity : 24M+18A +{ + Twist tmp; + tmp.rot = M*arg.rot; + tmp.vel = M*arg.vel+p*tmp.rot; + return tmp; +} +Twist Frame::Inverse(const Twist& arg) const +{ + Twist tmp; + tmp.rot = M.Inverse(arg.rot); + tmp.vel = M.Inverse(arg.vel-p*arg.rot); + return tmp; +} + +Twist Twist::Zero() +{ + return Twist(Vector::Zero(),Vector::Zero()); +} + + +void Twist::ReverseSign() +{ + vel.ReverseSign(); + rot.ReverseSign(); +} + +Twist Twist::RefPoint(const Vector& v_base_AB) const + // Changes the reference point of the twist. + // The vector v_base_AB is expressed in the same base as the twist + // The vector v_base_AB is a vector from the old point to + // the new point. + // Complexity : 6M+6A +{ + return Twist(this->vel+this->rot*v_base_AB,this->rot); +} + +Twist& Twist::operator-=(const Twist& arg) +{ + vel-=arg.vel; + rot -=arg.rot; + return *this; +} + +Twist& Twist::operator+=(const Twist& arg) +{ + vel+=arg.vel; + rot +=arg.rot; + return *this; +} + +double& Twist::operator()(int i) +{ + // assert((0<=i)&&(i<6)); done by underlying routines + if (i<3) + return vel(i); + else + return rot(i-3); +} + +double Twist::operator()(int i) const +{ + // assert((0<=i)&&(i<6)); done by underlying routines + if (i<3) + return vel(i); + else + return rot(i-3); +} + + +Twist operator*(const Twist& lhs,double rhs) +{ + return Twist(lhs.vel*rhs,lhs.rot*rhs); +} + +Twist operator*(double lhs,const Twist& rhs) +{ + return Twist(lhs*rhs.vel,lhs*rhs.rot); +} + +Twist operator/(const Twist& lhs,double rhs) +{ + return Twist(lhs.vel/rhs,lhs.rot/rhs); +} + +// addition of Twist's +Twist operator+(const Twist& lhs,const Twist& rhs) +{ + return Twist(lhs.vel+rhs.vel,lhs.rot+rhs.rot); +} + +Twist operator-(const Twist& lhs,const Twist& rhs) +{ + return Twist(lhs.vel-rhs.vel,lhs.rot-rhs.rot); +} + +// unary - +Twist operator-(const Twist& arg) +{ + return Twist(-arg.vel,-arg.rot); +} + +Frame::Frame(const Rotation & R) +{ + M=R; + p=Vector::Zero(); +} + +Frame::Frame(const Vector & V) +{ + M = Rotation::Identity(); + p = V; +} + +Frame::Frame(const Rotation & R, const Vector & V) +{ + M = R; + p = V; +} + + Frame operator *(const Frame& lhs,const Frame& rhs) +// Complexity : 36M+36A +{ + return Frame(lhs.M*rhs.M,lhs.M*rhs.p+lhs.p); +} + +Vector Frame::operator *(const Vector & arg) const +{ + return M*arg+p; +} + +Vector Frame::Inverse(const Vector& arg) const +{ + return M.Inverse(arg-p); +} + +Frame Frame::Inverse() const +{ + return Frame(M.Inverse(),-M.Inverse(p)); +} + + +Frame& Frame::operator =(const Frame & arg) +{ + M = arg.M; + p = arg.p; + return *this; +} + +Frame::Frame(const Frame & arg) : + p(arg.p),M(arg.M) +{} + + +void Vector::ReverseSign() +{ + data[0] = -data[0]; + data[1] = -data[1]; + data[2] = -data[2]; +} + + + +Vector operator-(const Vector & arg) +{ + Vector tmp; + tmp.data[0]=-arg.data[0]; + tmp.data[1]=-arg.data[1]; + tmp.data[2]=-arg.data[2]; + return tmp; +} + +void Vector::Set2DXY(const Vector2& v) +// a 3D vector where the 2D vector v is put in the XY plane +{ + data[0]=v(0); + data[1]=v(1); + data[2]=0; + +} +void Vector::Set2DYZ(const Vector2& v) +// a 3D vector where the 2D vector v is put in the YZ plane +{ + data[1]=v(0); + data[2]=v(1); + data[0]=0; + +} + +void Vector::Set2DZX(const Vector2& v) +// a 3D vector where the 2D vector v is put in the ZX plane +{ + data[2]=v(0); + data[0]=v(1); + data[1]=0; + +} + + + + + +double& Rotation::operator()(int i,int j) { + FRAMES_CHECKI((0<=i)&&(i<=2)&&(0<=j)&&(j<=2)); + return data[i*3+j]; +} + +double Rotation::operator()(int i,int j) const { + FRAMES_CHECKI((0<=i)&&(i<=2)&&(0<=j)&&(j<=2)); + return data[i*3+j]; +} + +Rotation::Rotation( double Xx,double Yx,double Zx, + double Xy,double Yy,double Zy, + double Xz,double Yz,double Zz) +{ + data[0] = Xx;data[1]=Yx;data[2]=Zx; + data[3] = Xy;data[4]=Yy;data[5]=Zy; + data[6] = Xz;data[7]=Yz;data[8]=Zz; +} + + +Rotation::Rotation(const Vector& x,const Vector& y,const Vector& z) +{ + data[0] = x.data[0];data[3] = x.data[1];data[6] = x.data[2]; + data[1] = y.data[0];data[4] = y.data[1];data[7] = y.data[2]; + data[2] = z.data[0];data[5] = z.data[1];data[8] = z.data[2]; +} + +Rotation& Rotation::operator=(const Rotation& arg) { + int count=9; + while (count--) data[count] = arg.data[count]; + return *this; +} + +Vector Rotation::operator*(const Vector& v) const { +// Complexity : 9M+6A + return Vector( + data[0]*v.data[0] + data[1]*v.data[1] + data[2]*v.data[2], + data[3]*v.data[0] + data[4]*v.data[1] + data[5]*v.data[2], + data[6]*v.data[0] + data[7]*v.data[1] + data[8]*v.data[2] + ); +} + +Twist Rotation::operator * (const Twist& arg) const + // Transformation of the base to which the twist is expressed. + // look at Frame*Twist for a transformation that also transforms + // the velocity reference point. + // Complexity : 18M+12A +{ + return Twist((*this)*arg.vel,(*this)*arg.rot); +} + +Wrench Rotation::operator * (const Wrench& arg) const + // Transformation of the base to which the wrench is expressed. + // look at Frame*Twist for a transformation that also transforms + // the force reference point. +{ + return Wrench((*this)*arg.force,(*this)*arg.torque); +} + +Rotation Rotation::Identity() { + return Rotation(1,0,0,0,1,0,0,0,1); +} +// *this = *this * ROT(X,angle) +void Rotation::DoRotX(double angle) +{ + double cs = cos(angle); + double sn = sin(angle); + double x1,x2,x3; + x1 = cs* (*this)(0,1) + sn* (*this)(0,2); + x2 = cs* (*this)(1,1) + sn* (*this)(1,2); + x3 = cs* (*this)(2,1) + sn* (*this)(2,2); + (*this)(0,2) = -sn* (*this)(0,1) + cs* (*this)(0,2); + (*this)(1,2) = -sn* (*this)(1,1) + cs* (*this)(1,2); + (*this)(2,2) = -sn* (*this)(2,1) + cs* (*this)(2,2); + (*this)(0,1) = x1; + (*this)(1,1) = x2; + (*this)(2,1) = x3; +} + +void Rotation::DoRotY(double angle) +{ + double cs = cos(angle); + double sn = sin(angle); + double x1,x2,x3; + x1 = cs* (*this)(0,0) - sn* (*this)(0,2); + x2 = cs* (*this)(1,0) - sn* (*this)(1,2); + x3 = cs* (*this)(2,0) - sn* (*this)(2,2); + (*this)(0,2) = sn* (*this)(0,0) + cs* (*this)(0,2); + (*this)(1,2) = sn* (*this)(1,0) + cs* (*this)(1,2); + (*this)(2,2) = sn* (*this)(2,0) + cs* (*this)(2,2); + (*this)(0,0) = x1; + (*this)(1,0) = x2; + (*this)(2,0) = x3; +} + +void Rotation::DoRotZ(double angle) +{ + double cs = cos(angle); + double sn = sin(angle); + double x1,x2,x3; + x1 = cs* (*this)(0,0) + sn* (*this)(0,1); + x2 = cs* (*this)(1,0) + sn* (*this)(1,1); + x3 = cs* (*this)(2,0) + sn* (*this)(2,1); + (*this)(0,1) = -sn* (*this)(0,0) + cs* (*this)(0,1); + (*this)(1,1) = -sn* (*this)(1,0) + cs* (*this)(1,1); + (*this)(2,1) = -sn* (*this)(2,0) + cs* (*this)(2,1); + (*this)(0,0) = x1; + (*this)(1,0) = x2; + (*this)(2,0) = x3; +} + + +Rotation Rotation::RotX(double angle) { + double cs=cos(angle); + double sn=sin(angle); + return Rotation(1,0,0,0,cs,-sn,0,sn,cs); +} +Rotation Rotation::RotY(double angle) { + double cs=cos(angle); + double sn=sin(angle); + return Rotation(cs,0,sn,0,1,0,-sn,0,cs); +} +Rotation Rotation::RotZ(double angle) { + double cs=cos(angle); + double sn=sin(angle); + return Rotation(cs,-sn,0,sn,cs,0,0,0,1); +} + + + + +void Frame::Integrate(const Twist& t_this,double samplefrequency) +{ + double n = t_this.rot.Norm()/samplefrequency; + if (n +#include +#include +#include + +namespace KDL { + + +std::ostream& operator << (std::ostream& os,const Vector& v) { + os << "[" << std::setw(KDL_FRAME_WIDTH) << v(0) << "," << std::setw(KDL_FRAME_WIDTH)<> operators. +std::ostream& operator << (std::ostream& os,const Rotation2& R) { + os << "[" << R.GetRot()*rad2deg << "]"; + return os; +} + +std::ostream& operator << (std::ostream& os, const Frame2& T) +{ + os << T.M << T.p; + return os; +} + +std::istream& operator >> (std::istream& is,Vector& v) +{ IOTrace("Stream input Vector (vector or ZERO)"); + char storage[10]; + EatWord(is,"[]",storage,10); + if (strlen(storage)==0) { + Eat(is,'['); + is >> v(0); + Eat(is,','); + is >> v(1); + Eat(is,','); + is >> v(2); + EatEnd(is,']'); + IOTracePop(); + return is; + } + if (strcmp(storage,"ZERO")==0) { + v = Vector::Zero(); + IOTracePop(); + return is; + } + throw Error_Frame_Vector_Unexpected_id(); +} + +std::istream& operator >> (std::istream& is,Twist& v) +{ IOTrace("Stream input Twist"); + Eat(is,'['); + is >> v.vel(0); + Eat(is,','); + is >> v.vel(1); + Eat(is,','); + is >> v.vel(2); + Eat(is,','); + is >> v.rot(0); + Eat(is,','); + is >> v.rot(1); + Eat(is,','); + is >> v.rot(2); + EatEnd(is,']'); + IOTracePop(); + return is; +} + +std::istream& operator >> (std::istream& is,Wrench& v) +{ IOTrace("Stream input Wrench"); + Eat(is,'['); + is >> v.force(0); + Eat(is,','); + is >> v.force(1); + Eat(is,','); + is >> v.force(2); + Eat(is,','); + is >> v.torque(0); + Eat(is,','); + is >> v.torque(1); + Eat(is,','); + is >> v.torque(2); + EatEnd(is,']'); + IOTracePop(); + return is; +} + +std::istream& operator >> (std::istream& is,Rotation& r) +{ IOTrace("Stream input Rotation (Matrix or EULERZYX, EULERZYZ,RPY, ROT, IDENTITY)"); + char storage[10]; + EatWord(is,"[]",storage,10); + if (strlen(storage)==0) { + Eat(is,'['); + for (int i=0;i<3;i++) { + is >> r(i,0); + Eat(is,',') ; + is >> r(i,1); + Eat(is,','); + is >> r(i,2); + if (i<2) + Eat(is,';'); + else + EatEnd(is,']'); + } + IOTracePop(); + return is; + } + Vector v; + if (strcmp(storage,"EULERZYX")==0) { + is >> v; + v=v*deg2rad; + r = Rotation::EulerZYX(v(0),v(1),v(2)); + IOTracePop(); + return is; + } + if (strcmp(storage,"EULERZYZ")==0) { + is >> v; + v=v*deg2rad; + r = Rotation::EulerZYZ(v(0),v(1),v(2)); + IOTracePop(); + return is; + } + if (strcmp(storage,"RPY")==0) { + is >> v; + v=v*deg2rad; + r = Rotation::RPY(v(0),v(1),v(2)); + IOTracePop(); + return is; + } + if (strcmp(storage,"ROT")==0) { + is >> v; + double angle; + Eat(is,'['); + is >> angle; + EatEnd(is,']'); + r = Rotation::Rot(v,angle*deg2rad); + IOTracePop(); + return is; + } + if (strcmp(storage,"IDENTITY")==0) { + r = Rotation::Identity(); + IOTracePop(); + return is; + } + throw Error_Frame_Rotation_Unexpected_id(); + return is; +} + +std::istream& operator >> (std::istream& is,Frame& T) +{ IOTrace("Stream input Frame (Rotation,Vector) or DH[...]"); + char storage[10]; + EatWord(is,"[",storage,10); + if (strlen(storage)==0) { + Eat(is,'['); + is >> T.M; + is >> T.p; + EatEnd(is,']'); + IOTracePop(); + return is; + } + if (strcmp(storage,"DH")==0) { + double a,alpha,d,theta; + Eat(is,'['); + is >> a; + Eat(is,','); + is >> alpha; + Eat(is,','); + is >> d; + Eat(is,','); + is >> theta; + EatEnd(is,']'); + T = Frame::DH(a,alpha*deg2rad,d,theta*deg2rad); + IOTracePop(); + return is; + } + throw Error_Frame_Frame_Unexpected_id(); + return is; +} + +std::istream& operator >> (std::istream& is,Vector2& v) +{ IOTrace("Stream input Vector2"); + Eat(is,'['); + is >> v(0); + Eat(is,','); + is >> v(1); + EatEnd(is,']'); + IOTracePop(); + return is; +} +std::istream& operator >> (std::istream& is,Rotation2& r) +{ IOTrace("Stream input Rotation2"); + Eat(is,'['); + double val; + is >> val; + r.Rot(val*deg2rad); + EatEnd(is,']'); + IOTracePop(); + return is; +} +std::istream& operator >> (std::istream& is,Frame2& T) +{ IOTrace("Stream input Frame2"); + is >> T.M; + is >> T.p; + IOTracePop(); + return is; +} + +} // namespace Frame diff --git a/intern/itasc/kdl/frames_io.hpp b/intern/itasc/kdl/frames_io.hpp new file mode 100644 index 00000000000..a358d27383f --- /dev/null +++ b/intern/itasc/kdl/frames_io.hpp @@ -0,0 +1,114 @@ +/*************************************************************************** + frames_io.h - description + ------------------------- + begin : June 2006 + copyright : (C) 2006 Erwin Aertbelien + email : firstname.lastname@mech.kuleuven.ac.be + + History (only major changes)( AUTHOR-Description ) : + + Ruben Smits - Added output for jacobian and jntarray 06/2007 + + *************************************************************************** + * This library is free software; you can redistribute it and/or * + * modify it under the terms of the GNU Lesser General Public * + * License as published by the Free Software Foundation; either * + * version 2.1 of the License, or (at your option) any later version. * + * * + * This library is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * Lesser General Public License for more details. * + * * + * You should have received a copy of the GNU Lesser General Public * + * License along with this library; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, * + * Suite 330, Boston, MA 02111-1307 USA * + * * + ***************************************************************************/ +/** +// +// \file +// Defines routines for I/O of Frame and related objects. +// \verbatim +// Spaces, tabs and newlines do not have any importance. +// Comments are allowed C-style,C++-style, make/perl/csh -style +// Description of the I/O : +// Vector : OUTPUT : e.g. [10,20,30] +// INPUT : +// 1) [10,20,30] +// 2) Zero +// Twist : e.g. [1,2,3,4,5,6] +// where [1,2,3] is velocity vector +// where [4,5,6] is rotational velocity vector +// Wrench : e.g. [1,2,3,4,5,6] +// where [1,2,3] represents a force vector +// where [4,5,6] represents a torque vector +// Rotation : output : +// [1,2,3; +// 4,5,6; +// 7,8,9] cfr definition of Rotation object. +// input : +// 1) like the output +// 2) EulerZYX,EulerZYZ,RPY word followed by a vector, e.g. : +// Eulerzyx[10,20,30] +// (ANGLES are always expressed in DEGREES for I/O) +// (ANGELS are always expressed in RADIANS for internal representation) +// 3) Rot [1,2,3] [20] Rotates around axis [1,2,3] with an angle +// of 20 degrees. +// 4) Identity returns identity rotation matrix. +// Frames : output : [ Rotationmatrix positionvector ] +// e.g. [ [1,0,0;0,1,0;0,0,1] [1,2,3] ] +// Input : +// 1) [ Rotationmatrix positionvector ] +// 2) DH [ 10,10,50,30] Denavit-Hartenberg representation +// ( is in fact not the representation of a Frame, but more +// limited, cfr. documentation of Frame object.) +// \endverbatim +// +// \warning +// You can use iostream.h or iostream header files for file I/O, +// if one declares the define WANT_STD_IOSTREAM then the standard C++ +// iostreams headers are included instead of the compiler-dependent version +// + * + ****************************************************************************/ +#ifndef FRAMES_IO_H +#define FRAMES_IO_H + +#include "utilities/utility_io.h" +#include "frames.hpp" +#include "jntarray.hpp" +#include "jacobian.hpp" + +namespace KDL { + + //! width to be used when printing variables out with frames_io.h + //! global variable, can be changed. + + + // I/O to C++ stream. + std::ostream& operator << (std::ostream& os,const Vector& v); + std::ostream& operator << (std::ostream& os,const Rotation& R); + std::ostream& operator << (std::ostream& os,const Frame& T); + std::ostream& operator << (std::ostream& os,const Twist& T); + std::ostream& operator << (std::ostream& os,const Wrench& T); + std::ostream& operator << (std::ostream& os,const Vector2& v); + std::ostream& operator << (std::ostream& os,const Rotation2& R); + std::ostream& operator << (std::ostream& os,const Frame2& T); + + + + std::istream& operator >> (std::istream& is,Vector& v); + std::istream& operator >> (std::istream& is,Rotation& R); + std::istream& operator >> (std::istream& is,Frame& T); + std::istream& operator >> (std::istream& os,Twist& T); + std::istream& operator >> (std::istream& os,Wrench& T); + std::istream& operator >> (std::istream& is,Vector2& v); + std::istream& operator >> (std::istream& is,Rotation2& R); + std::istream& operator >> (std::istream& is,Frame2& T); + + +} // namespace Frame + +#endif diff --git a/intern/itasc/kdl/framevel.cpp b/intern/itasc/kdl/framevel.cpp new file mode 100644 index 00000000000..f70bef2e923 --- /dev/null +++ b/intern/itasc/kdl/framevel.cpp @@ -0,0 +1,27 @@ +/***************************************************************************** + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + ****************************************************************************/ + + +#include "framevel.hpp" + +namespace KDL { + +#ifndef KDL_INLINE + #include "framevel.inl" +#endif + + + +} diff --git a/intern/itasc/kdl/framevel.hpp b/intern/itasc/kdl/framevel.hpp new file mode 100644 index 00000000000..21a7844f522 --- /dev/null +++ b/intern/itasc/kdl/framevel.hpp @@ -0,0 +1,382 @@ +/***************************************************************************** + * \file + * This file contains the definition of classes for a + * Rall Algebra of (subset of) the classes defined in frames, + * i.e. classes that contain a pair (value,derivative) and define operations on that pair + * this classes are usefull for automatic differentiation ( <-> symbolic diff , <-> numeric diff) + * Defines VectorVel, RotationVel, FrameVel. Look at Frames.h for details on how to work + * with Frame objects. + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id: framevel.hpp 19905 2009-04-23 13:29:54Z ben2610 $ + * $Name: $ + ****************************************************************************/ + +#ifndef KDL_FRAMEVEL_H +#define KDL_FRAMEVEL_H + +#include "utilities/utility.h" +#include "utilities/rall1d.h" +#include "utilities/traits.h" + +#include "frames.hpp" + + + +namespace KDL { + +typedef Rall1d doubleVel; + +IMETHOD doubleVel diff(const doubleVel& a,const doubleVel& b,double dt=1.0) { + return doubleVel((b.t-a.t)/dt,(b.grad-a.grad)/dt); +} + +IMETHOD doubleVel addDelta(const doubleVel& a,const doubleVel&da,double dt=1.0) { + return doubleVel(a.t+da.t*dt,a.grad+da.grad*dt); +} + +IMETHOD void random(doubleVel& F) { + random(F.t); + random(F.grad); +} +IMETHOD void posrandom(doubleVel& F) { + posrandom(F.t); + posrandom(F.grad); +} + +} + +template <> +struct Traits { + typedef double valueType; + typedef KDL::doubleVel derivType; +}; + +namespace KDL { + +class TwistVel; +class VectorVel; +class FrameVel; +class RotationVel; + +class VectorVel +// = TITLE +// An VectorVel is a Vector and its first derivative +// = CLASS TYPE +// Concrete +{ +public: + Vector p; // position vector + Vector v; // velocity vector +public: + VectorVel():p(),v(){} + VectorVel(const Vector& _p,const Vector& _v):p(_p),v(_v) {} + explicit VectorVel(const Vector& _p):p(_p),v(Vector::Zero()) {} + + Vector value() const { return p;} + Vector deriv() const { return v;} + + IMETHOD VectorVel& operator = (const VectorVel& arg); + IMETHOD VectorVel& operator = (const Vector& arg); + IMETHOD VectorVel& operator += (const VectorVel& arg); + IMETHOD VectorVel& operator -= (const VectorVel& arg); + IMETHOD static VectorVel Zero(); + IMETHOD void ReverseSign(); + IMETHOD doubleVel Norm() const; + IMETHOD friend VectorVel operator + (const VectorVel& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator - (const VectorVel& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator + (const Vector& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator - (const Vector& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator + (const VectorVel& r1,const Vector& r2); + IMETHOD friend VectorVel operator - (const VectorVel& r1,const Vector& r2); + IMETHOD friend VectorVel operator * (const VectorVel& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator * (const VectorVel& r1,const Vector& r2); + IMETHOD friend VectorVel operator * (const Vector& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator * (const VectorVel& r1,double r2); + IMETHOD friend VectorVel operator * (double r1,const VectorVel& r2); + IMETHOD friend VectorVel operator * (const doubleVel& r1,const VectorVel& r2); + IMETHOD friend VectorVel operator * (const VectorVel& r2,const doubleVel& r1); + IMETHOD friend VectorVel operator*(const Rotation& R,const VectorVel& x); + + IMETHOD friend VectorVel operator / (const VectorVel& r1,double r2); + IMETHOD friend VectorVel operator / (const VectorVel& r2,const doubleVel& r1); + IMETHOD friend void SetToZero(VectorVel& v); + + + IMETHOD friend bool Equal(const VectorVel& r1,const VectorVel& r2,double eps=epsilon); + IMETHOD friend bool Equal(const Vector& r1,const VectorVel& r2,double eps=epsilon); + IMETHOD friend bool Equal(const VectorVel& r1,const Vector& r2,double eps=epsilon); + IMETHOD friend VectorVel operator - (const VectorVel& r); + IMETHOD friend doubleVel dot(const VectorVel& lhs,const VectorVel& rhs); + IMETHOD friend doubleVel dot(const VectorVel& lhs,const Vector& rhs); + IMETHOD friend doubleVel dot(const Vector& lhs,const VectorVel& rhs); +}; + + + +class RotationVel +// = TITLE +// An RotationVel is a Rotation and its first derivative, a rotation vector +// = CLASS TYPE +// Concrete +{ +public: + Rotation R; // Rotation matrix + Vector w; // rotation vector +public: + RotationVel():R(),w() {} + explicit RotationVel(const Rotation& _R):R(_R),w(Vector::Zero()){} + RotationVel(const Rotation& _R,const Vector& _w):R(_R),w(_w){} + + + Rotation value() const { return R;} + Vector deriv() const { return w;} + + + IMETHOD RotationVel& operator = (const RotationVel& arg); + IMETHOD RotationVel& operator = (const Rotation& arg); + IMETHOD VectorVel UnitX() const; + IMETHOD VectorVel UnitY() const; + IMETHOD VectorVel UnitZ() const; + IMETHOD static RotationVel Identity(); + IMETHOD RotationVel Inverse() const; + IMETHOD VectorVel Inverse(const VectorVel& arg) const; + IMETHOD VectorVel Inverse(const Vector& arg) const; + IMETHOD VectorVel operator*(const VectorVel& arg) const; + IMETHOD VectorVel operator*(const Vector& arg) const; + IMETHOD void DoRotX(const doubleVel& angle); + IMETHOD void DoRotY(const doubleVel& angle); + IMETHOD void DoRotZ(const doubleVel& angle); + IMETHOD static RotationVel RotX(const doubleVel& angle); + IMETHOD static RotationVel RotY(const doubleVel& angle); + IMETHOD static RotationVel RotZ(const doubleVel& angle); + IMETHOD static RotationVel Rot(const Vector& rotvec,const doubleVel& angle); + // rotvec has arbitrary norm + // rotation around a constant vector ! + IMETHOD static RotationVel Rot2(const Vector& rotvec,const doubleVel& angle); + // rotvec is normalized. + // rotation around a constant vector ! + IMETHOD friend RotationVel operator* (const RotationVel& r1,const RotationVel& r2); + IMETHOD friend RotationVel operator* (const Rotation& r1,const RotationVel& r2); + IMETHOD friend RotationVel operator* (const RotationVel& r1,const Rotation& r2); + IMETHOD friend bool Equal(const RotationVel& r1,const RotationVel& r2,double eps=epsilon); + IMETHOD friend bool Equal(const Rotation& r1,const RotationVel& r2,double eps=epsilon); + IMETHOD friend bool Equal(const RotationVel& r1,const Rotation& r2,double eps=epsilon); + + IMETHOD TwistVel Inverse(const TwistVel& arg) const; + IMETHOD TwistVel Inverse(const Twist& arg) const; + IMETHOD TwistVel operator * (const TwistVel& arg) const; + IMETHOD TwistVel operator * (const Twist& arg) const; +}; + + + + +class FrameVel +// = TITLE +// An FrameVel is a Frame and its first derivative, a Twist vector +// = CLASS TYPE +// Concrete +// = CAVEATS +// +{ +public: + RotationVel M; + VectorVel p; +public: + FrameVel(){} + + explicit FrameVel(const Frame& _T): + M(_T.M),p(_T.p) {} + + FrameVel(const Frame& _T,const Twist& _t): + M(_T.M,_t.rot),p(_T.p,_t.vel) {} + + FrameVel(const RotationVel& _M,const VectorVel& _p): + M(_M),p(_p) {} + + + Frame value() const { return Frame(M.value(),p.value());} + Twist deriv() const { return Twist(p.deriv(),M.deriv());} + + + IMETHOD FrameVel& operator = (const Frame& arg); + IMETHOD FrameVel& operator = (const FrameVel& arg); + IMETHOD static FrameVel Identity(); + IMETHOD FrameVel Inverse() const; + IMETHOD VectorVel Inverse(const VectorVel& arg) const; + IMETHOD VectorVel operator*(const VectorVel& arg) const; + IMETHOD VectorVel operator*(const Vector& arg) const; + IMETHOD VectorVel Inverse(const Vector& arg) const; + IMETHOD Frame GetFrame() const; + IMETHOD Twist GetTwist() const; + IMETHOD friend FrameVel operator * (const FrameVel& f1,const FrameVel& f2); + IMETHOD friend FrameVel operator * (const Frame& f1,const FrameVel& f2); + IMETHOD friend FrameVel operator * (const FrameVel& f1,const Frame& f2); + IMETHOD friend bool Equal(const FrameVel& r1,const FrameVel& r2,double eps=epsilon); + IMETHOD friend bool Equal(const Frame& r1,const FrameVel& r2,double eps=epsilon); + IMETHOD friend bool Equal(const FrameVel& r1,const Frame& r2,double eps=epsilon); + + IMETHOD TwistVel Inverse(const TwistVel& arg) const; + IMETHOD TwistVel Inverse(const Twist& arg) const; + IMETHOD TwistVel operator * (const TwistVel& arg) const; + IMETHOD TwistVel operator * (const Twist& arg) const; +}; + + + + + +//very similar to Wrench class. +class TwistVel +// = TITLE +// This class represents a TwistVel. This is a velocity and rotational velocity together +{ +public: + VectorVel vel; + VectorVel rot; +public: + +// = Constructors + TwistVel():vel(),rot() {}; + TwistVel(const VectorVel& _vel,const VectorVel& _rot):vel(_vel),rot(_rot) {}; + TwistVel(const Twist& p,const Twist& v):vel(p.vel, v.vel), rot( p.rot, v.rot) {}; + TwistVel(const Twist& p):vel(p.vel), rot( p.rot) {}; + + Twist value() const { + return Twist(vel.value(),rot.value()); + } + Twist deriv() const { + return Twist(vel.deriv(),rot.deriv()); + } +// = Operators + IMETHOD TwistVel& operator-=(const TwistVel& arg); + IMETHOD TwistVel& operator+=(const TwistVel& arg); + +// = External operators + IMETHOD friend TwistVel operator*(const TwistVel& lhs,double rhs); + IMETHOD friend TwistVel operator*(double lhs,const TwistVel& rhs); + IMETHOD friend TwistVel operator/(const TwistVel& lhs,double rhs); + + IMETHOD friend TwistVel operator*(const TwistVel& lhs,const doubleVel& rhs); + IMETHOD friend TwistVel operator*(const doubleVel& lhs,const TwistVel& rhs); + IMETHOD friend TwistVel operator/(const TwistVel& lhs,const doubleVel& rhs); + + IMETHOD friend TwistVel operator+(const TwistVel& lhs,const TwistVel& rhs); + IMETHOD friend TwistVel operator-(const TwistVel& lhs,const TwistVel& rhs); + IMETHOD friend TwistVel operator-(const TwistVel& arg); + IMETHOD friend void SetToZero(TwistVel& v); + + +// = Zero + static IMETHOD TwistVel Zero(); + +// = Reverse Sign + IMETHOD void ReverseSign(); + +// = Change Reference point + IMETHOD TwistVel RefPoint(const VectorVel& v_base_AB); + // Changes the reference point of the TwistVel. + // The VectorVel v_base_AB is expressed in the same base as the TwistVel + // The VectorVel v_base_AB is a VectorVel from the old point to + // the new point. + // Complexity : 6M+6A + + // = Equality operators + // do not use operator == because the definition of Equal(.,.) is slightly + // different. It compares whether the 2 arguments are equal in an eps-interval + IMETHOD friend bool Equal(const TwistVel& a,const TwistVel& b,double eps=epsilon); + IMETHOD friend bool Equal(const Twist& a,const TwistVel& b,double eps=epsilon); + IMETHOD friend bool Equal(const TwistVel& a,const Twist& b,double eps=epsilon); + +// = Conversion to other entities + IMETHOD Twist GetTwist() const; + IMETHOD Twist GetTwistDot() const; +// = Friends + friend class RotationVel; + friend class FrameVel; + +}; + +IMETHOD VectorVel diff(const VectorVel& a,const VectorVel& b,double dt=1.0) { + return VectorVel(diff(a.p,b.p,dt),diff(a.v,b.v,dt)); +} + +IMETHOD VectorVel addDelta(const VectorVel& a,const VectorVel&da,double dt=1.0) { + return VectorVel(addDelta(a.p,da.p,dt),addDelta(a.v,da.v,dt)); +} +IMETHOD VectorVel diff(const RotationVel& a,const RotationVel& b,double dt = 1.0) { + return VectorVel(diff(a.R,b.R,dt),diff(a.w,b.w,dt)); +} + +IMETHOD RotationVel addDelta(const RotationVel& a,const VectorVel&da,double dt=1.0) { + return RotationVel(addDelta(a.R,da.p,dt),addDelta(a.w,da.v,dt)); +} + +IMETHOD TwistVel diff(const FrameVel& a,const FrameVel& b,double dt=1.0) { + return TwistVel(diff(a.M,b.M,dt),diff(a.p,b.p,dt)); +} + +IMETHOD FrameVel addDelta(const FrameVel& a,const TwistVel& da,double dt=1.0) { + return FrameVel( + addDelta(a.M,da.rot,dt), + addDelta(a.p,da.vel,dt) + ); +} + +IMETHOD void random(VectorVel& a) { + random(a.p); + random(a.v); +} +IMETHOD void random(TwistVel& a) { + random(a.vel); + random(a.rot); +} + +IMETHOD void random(RotationVel& R) { + random(R.R); + random(R.w); +} + +IMETHOD void random(FrameVel& F) { + random(F.M); + random(F.p); +} +IMETHOD void posrandom(VectorVel& a) { + posrandom(a.p); + posrandom(a.v); +} +IMETHOD void posrandom(TwistVel& a) { + posrandom(a.vel); + posrandom(a.rot); +} + +IMETHOD void posrandom(RotationVel& R) { + posrandom(R.R); + posrandom(R.w); +} + +IMETHOD void posrandom(FrameVel& F) { + posrandom(F.M); + posrandom(F.p); +} + +#ifdef KDL_INLINE +#include "framevel.inl" +#endif + +} // namespace + +#endif + + + + diff --git a/intern/itasc/kdl/framevel.inl b/intern/itasc/kdl/framevel.inl new file mode 100644 index 00000000000..994b3d2028e --- /dev/null +++ b/intern/itasc/kdl/framevel.inl @@ -0,0 +1,534 @@ +/***************************************************************************** + * \file + * provides inline functions of rframes.h + * + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id: framevel.inl 19905 2009-04-23 13:29:54Z ben2610 $ + * $Name: $ + ****************************************************************************/ + + +// Methods and operators related to FrameVelVel +// They all delegate most of the work to RotationVelVel and VectorVelVel +FrameVel& FrameVel::operator = (const FrameVel& arg) { + M=arg.M; + p=arg.p; + return *this; +} + +FrameVel FrameVel::Identity() { + return FrameVel(RotationVel::Identity(),VectorVel::Zero()); +} + + +FrameVel operator *(const FrameVel& lhs,const FrameVel& rhs) +{ + return FrameVel(lhs.M*rhs.M,lhs.M*rhs.p+lhs.p); +} +FrameVel operator *(const FrameVel& lhs,const Frame& rhs) +{ + return FrameVel(lhs.M*rhs.M,lhs.M*rhs.p+lhs.p); +} +FrameVel operator *(const Frame& lhs,const FrameVel& rhs) +{ + return FrameVel(lhs.M*rhs.M , lhs.M*rhs.p+lhs.p ); +} + +VectorVel FrameVel::operator *(const VectorVel & arg) const +{ + return M*arg+p; +} +VectorVel FrameVel::operator *(const Vector & arg) const +{ + return M*arg+p; +} + +VectorVel FrameVel::Inverse(const VectorVel& arg) const +{ + return M.Inverse(arg-p); +} + +VectorVel FrameVel::Inverse(const Vector& arg) const +{ + return M.Inverse(arg-p); +} + +FrameVel FrameVel::Inverse() const +{ + return FrameVel(M.Inverse(),-M.Inverse(p)); +} + +FrameVel& FrameVel::operator = (const Frame& arg) { + M = arg.M; + p = arg.p; + return *this; +} +bool Equal(const FrameVel& r1,const FrameVel& r2,double eps) { + return (Equal(r1.M,r2.M,eps) && Equal(r1.p,r2.p,eps)); +} +bool Equal(const Frame& r1,const FrameVel& r2,double eps) { + return (Equal(r1.M,r2.M,eps) && Equal(r1.p,r2.p,eps)); +} +bool Equal(const FrameVel& r1,const Frame& r2,double eps) { + return (Equal(r1.M,r2.M,eps) && Equal(r1.p,r2.p,eps)); +} + +Frame FrameVel::GetFrame() const { + return Frame(M.R,p.p); +} + +Twist FrameVel::GetTwist() const { + return Twist(p.v,M.w); +} + + +RotationVel operator* (const RotationVel& r1,const RotationVel& r2) { + return RotationVel( r1.R*r2.R, r1.w + r1.R*r2.w ); +} + +RotationVel operator* (const Rotation& r1,const RotationVel& r2) { + return RotationVel( r1*r2.R, r1*r2.w ); +} + +RotationVel operator* (const RotationVel& r1,const Rotation& r2) { + return RotationVel( r1.R*r2, r1.w ); +} + +RotationVel& RotationVel::operator = (const RotationVel& arg) { + R=arg.R; + w=arg.w; + return *this; + } +RotationVel& RotationVel::operator = (const Rotation& arg) { + R=arg; + w=Vector::Zero(); + return *this; +} + +VectorVel RotationVel::UnitX() const { + return VectorVel(R.UnitX(),w*R.UnitX()); +} + +VectorVel RotationVel::UnitY() const { + return VectorVel(R.UnitY(),w*R.UnitY()); +} + +VectorVel RotationVel::UnitZ() const { + return VectorVel(R.UnitZ(),w*R.UnitZ()); +} + + + +RotationVel RotationVel::Identity() { + return RotationVel(Rotation::Identity(),Vector::Zero()); +} + +RotationVel RotationVel::Inverse() const { + return RotationVel(R.Inverse(),-R.Inverse(w)); +} + +VectorVel RotationVel::Inverse(const VectorVel& arg) const { + Vector tmp=R.Inverse(arg.p); + return VectorVel(tmp, + R.Inverse(arg.v-w*arg.p) + ); +} + +VectorVel RotationVel::Inverse(const Vector& arg) const { + Vector tmp=R.Inverse(arg); + return VectorVel(tmp, + R.Inverse(-w*arg) + ); +} + + +VectorVel RotationVel::operator*(const VectorVel& arg) const { + Vector tmp=R*arg.p; + return VectorVel(tmp,w*tmp+R*arg.v); +} + +VectorVel RotationVel::operator*(const Vector& arg) const { + Vector tmp=R*arg; + return VectorVel(tmp,w*tmp); +} + + +// = Rotations +// The Rot... static functions give the value of the appropriate rotation matrix back. +// The DoRot... functions apply a rotation R to *this,such that *this = *this * R. + +void RotationVel::DoRotX(const doubleVel& angle) { + w+=R*Vector(angle.grad,0,0); + R.DoRotX(angle.t); +} +RotationVel RotationVel::RotX(const doubleVel& angle) { + return RotationVel(Rotation::RotX(angle.t),Vector(angle.grad,0,0)); +} + +void RotationVel::DoRotY(const doubleVel& angle) { + w+=R*Vector(0,angle.grad,0); + R.DoRotY(angle.t); +} +RotationVel RotationVel::RotY(const doubleVel& angle) { + return RotationVel(Rotation::RotX(angle.t),Vector(0,angle.grad,0)); +} + +void RotationVel::DoRotZ(const doubleVel& angle) { + w+=R*Vector(0,0,angle.grad); + R.DoRotZ(angle.t); +} +RotationVel RotationVel::RotZ(const doubleVel& angle) { + return RotationVel(Rotation::RotZ(angle.t),Vector(0,0,angle.grad)); +} + + +RotationVel RotationVel::Rot(const Vector& rotvec,const doubleVel& angle) +// rotvec has arbitrary norm +// rotation around a constant vector ! +{ + Vector v(rotvec); + v.Normalize(); + return RotationVel(Rotation::Rot2(v,angle.t),v*angle.grad); +} + +RotationVel RotationVel::Rot2(const Vector& rotvec,const doubleVel& angle) + // rotvec is normalized. +{ + return RotationVel(Rotation::Rot2(rotvec,angle.t),rotvec*angle.grad); +} + + +VectorVel operator + (const VectorVel& r1,const VectorVel& r2) { + return VectorVel(r1.p+r2.p,r1.v+r2.v); +} + +VectorVel operator - (const VectorVel& r1,const VectorVel& r2) { + return VectorVel(r1.p-r2.p,r1.v-r2.v); +} + +VectorVel operator + (const VectorVel& r1,const Vector& r2) { + return VectorVel(r1.p+r2,r1.v); +} + +VectorVel operator - (const VectorVel& r1,const Vector& r2) { + return VectorVel(r1.p-r2,r1.v); +} + +VectorVel operator + (const Vector& r1,const VectorVel& r2) { + return VectorVel(r1+r2.p,r2.v); +} + +VectorVel operator - (const Vector& r1,const VectorVel& r2) { + return VectorVel(r1-r2.p,-r2.v); +} + +// unary - +VectorVel operator - (const VectorVel& r) { + return VectorVel(-r.p,-r.v); +} + +void SetToZero(VectorVel& v){ + SetToZero(v.p); + SetToZero(v.v); +} + +// cross prod. +VectorVel operator * (const VectorVel& r1,const VectorVel& r2) { + return VectorVel(r1.p*r2.p, r1.p*r2.v+r1.v*r2.p); +} + +VectorVel operator * (const VectorVel& r1,const Vector& r2) { + return VectorVel(r1.p*r2, r1.v*r2); +} + +VectorVel operator * (const Vector& r1,const VectorVel& r2) { + return VectorVel(r1*r2.p, r1*r2.v); +} + + + +// scalar mult. +VectorVel operator * (double r1,const VectorVel& r2) { + return VectorVel(r1*r2.p, r1*r2.v); +} + +VectorVel operator * (const VectorVel& r1,double r2) { + return VectorVel(r1.p*r2, r1.v*r2); +} + + + +VectorVel operator * (const doubleVel& r1,const VectorVel& r2) { + return VectorVel(r1.t*r2.p, r1.t*r2.v + r1.grad*r2.p); +} + +VectorVel operator * (const VectorVel& r2,const doubleVel& r1) { + return VectorVel(r1.t*r2.p, r1.t*r2.v + r1.grad*r2.p); +} + +VectorVel operator / (const VectorVel& r1,double r2) { + return VectorVel(r1.p/r2, r1.v/r2); +} + +VectorVel operator / (const VectorVel& r2,const doubleVel& r1) { + return VectorVel(r2.p/r1.t, r2.v/r1.t - r2.p*r1.grad/r1.t/r1.t); +} + +VectorVel operator*(const Rotation& R,const VectorVel& x) { + return VectorVel(R*x.p,R*x.v); +} + +VectorVel& VectorVel::operator = (const VectorVel& arg) { + p=arg.p; + v=arg.v; + return *this; +} +VectorVel& VectorVel::operator = (const Vector& arg) { + p=arg; + v=Vector::Zero(); + return *this; +} +VectorVel& VectorVel::operator += (const VectorVel& arg) { + p+=arg.p; + v+=arg.v; + return *this; +} +VectorVel& VectorVel::operator -= (const VectorVel& arg) { + p-=arg.p; + v-=arg.v; + return *this; +} + +VectorVel VectorVel::Zero() { + return VectorVel(Vector::Zero(),Vector::Zero()); +} +void VectorVel::ReverseSign() { + p.ReverseSign(); + v.ReverseSign(); +} +doubleVel VectorVel::Norm() const { + double n = p.Norm(); + return doubleVel(n,dot(p,v)/n); +} + +bool Equal(const VectorVel& r1,const VectorVel& r2,double eps) { + return (Equal(r1.p,r2.p,eps) && Equal(r1.v,r2.v,eps)); +} +bool Equal(const Vector& r1,const VectorVel& r2,double eps) { + return (Equal(r1,r2.p,eps) && Equal(Vector::Zero(),r2.v,eps)); +} +bool Equal(const VectorVel& r1,const Vector& r2,double eps) { + return (Equal(r1.p,r2,eps) && Equal(r1.v,Vector::Zero(),eps)); +} + +bool Equal(const RotationVel& r1,const RotationVel& r2,double eps) { + return (Equal(r1.w,r2.w,eps) && Equal(r1.R,r2.R,eps)); +} +bool Equal(const Rotation& r1,const RotationVel& r2,double eps) { + return (Equal(Vector::Zero(),r2.w,eps) && Equal(r1,r2.R,eps)); +} +bool Equal(const RotationVel& r1,const Rotation& r2,double eps) { + return (Equal(r1.w,Vector::Zero(),eps) && Equal(r1.R,r2,eps)); +} +bool Equal(const TwistVel& a,const TwistVel& b,double eps) { + return (Equal(a.rot,b.rot,eps)&& + Equal(a.vel,b.vel,eps) ); +} +bool Equal(const Twist& a,const TwistVel& b,double eps) { + return (Equal(a.rot,b.rot,eps)&& + Equal(a.vel,b.vel,eps) ); +} +bool Equal(const TwistVel& a,const Twist& b,double eps) { + return (Equal(a.rot,b.rot,eps)&& + Equal(a.vel,b.vel,eps) ); +} + + + +IMETHOD doubleVel dot(const VectorVel& lhs,const VectorVel& rhs) { + return doubleVel(dot(lhs.p,rhs.p),dot(lhs.p,rhs.v)+dot(lhs.v,rhs.p)); +} +IMETHOD doubleVel dot(const VectorVel& lhs,const Vector& rhs) { + return doubleVel(dot(lhs.p,rhs),dot(lhs.v,rhs)); +} +IMETHOD doubleVel dot(const Vector& lhs,const VectorVel& rhs) { + return doubleVel(dot(lhs,rhs.p),dot(lhs,rhs.v)); +} + + + + + + + + + + + + +TwistVel TwistVel::Zero() +{ + return TwistVel(VectorVel::Zero(),VectorVel::Zero()); +} + + +void TwistVel::ReverseSign() +{ + vel.ReverseSign(); + rot.ReverseSign(); +} + +TwistVel TwistVel::RefPoint(const VectorVel& v_base_AB) + // Changes the reference point of the TwistVel. + // The VectorVel v_base_AB is expressed in the same base as the TwistVel + // The VectorVel v_base_AB is a VectorVel from the old point to + // the new point. + // Complexity : 6M+6A +{ + return TwistVel(this->vel+this->rot*v_base_AB,this->rot); +} + +TwistVel& TwistVel::operator-=(const TwistVel& arg) +{ + vel-=arg.vel; + rot -=arg.rot; + return *this; +} + +TwistVel& TwistVel::operator+=(const TwistVel& arg) +{ + vel+=arg.vel; + rot +=arg.rot; + return *this; +} + + +TwistVel operator*(const TwistVel& lhs,double rhs) +{ + return TwistVel(lhs.vel*rhs,lhs.rot*rhs); +} + +TwistVel operator*(double lhs,const TwistVel& rhs) +{ + return TwistVel(lhs*rhs.vel,lhs*rhs.rot); +} + +TwistVel operator/(const TwistVel& lhs,double rhs) +{ + return TwistVel(lhs.vel/rhs,lhs.rot/rhs); +} + + +TwistVel operator*(const TwistVel& lhs,const doubleVel& rhs) +{ + return TwistVel(lhs.vel*rhs,lhs.rot*rhs); +} + +TwistVel operator*(const doubleVel& lhs,const TwistVel& rhs) +{ + return TwistVel(lhs*rhs.vel,lhs*rhs.rot); +} + +TwistVel operator/(const TwistVel& lhs,const doubleVel& rhs) +{ + return TwistVel(lhs.vel/rhs,lhs.rot/rhs); +} + + + +// addition of TwistVel's +TwistVel operator+(const TwistVel& lhs,const TwistVel& rhs) +{ + return TwistVel(lhs.vel+rhs.vel,lhs.rot+rhs.rot); +} + +TwistVel operator-(const TwistVel& lhs,const TwistVel& rhs) +{ + return TwistVel(lhs.vel-rhs.vel,lhs.rot-rhs.rot); +} + +// unary - +TwistVel operator-(const TwistVel& arg) +{ + return TwistVel(-arg.vel,-arg.rot); +} + +void SetToZero(TwistVel& v) +{ + SetToZero(v.vel); + SetToZero(v.rot); +} + + + + + +TwistVel RotationVel::Inverse(const TwistVel& arg) const +{ + return TwistVel(Inverse(arg.vel),Inverse(arg.rot)); +} + +TwistVel RotationVel::operator * (const TwistVel& arg) const +{ + return TwistVel((*this)*arg.vel,(*this)*arg.rot); +} + +TwistVel RotationVel::Inverse(const Twist& arg) const +{ + return TwistVel(Inverse(arg.vel),Inverse(arg.rot)); +} + +TwistVel RotationVel::operator * (const Twist& arg) const +{ + return TwistVel((*this)*arg.vel,(*this)*arg.rot); +} + + +TwistVel FrameVel::operator * (const TwistVel& arg) const +{ + TwistVel tmp; + tmp.rot = M*arg.rot; + tmp.vel = M*arg.vel+p*tmp.rot; + return tmp; +} + +TwistVel FrameVel::operator * (const Twist& arg) const +{ + TwistVel tmp; + tmp.rot = M*arg.rot; + tmp.vel = M*arg.vel+p*tmp.rot; + return tmp; +} + +TwistVel FrameVel::Inverse(const TwistVel& arg) const +{ + TwistVel tmp; + tmp.rot = M.Inverse(arg.rot); + tmp.vel = M.Inverse(arg.vel-p*arg.rot); + return tmp; +} + +TwistVel FrameVel::Inverse(const Twist& arg) const +{ + TwistVel tmp; + tmp.rot = M.Inverse(arg.rot); + tmp.vel = M.Inverse(arg.vel-p*arg.rot); + return tmp; +} + +Twist TwistVel::GetTwist() const { + return Twist(vel.p,rot.p); +} + +Twist TwistVel::GetTwistDot() const { + return Twist(vel.v,rot.v); +} diff --git a/intern/itasc/kdl/inertia.cpp b/intern/itasc/kdl/inertia.cpp new file mode 100644 index 00000000000..6c7337d0dc4 --- /dev/null +++ b/intern/itasc/kdl/inertia.cpp @@ -0,0 +1,48 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "inertia.hpp" + +#include + +namespace KDL { +using namespace Eigen; + +Inertia::Inertia(double m,double Ixx,double Iyy,double Izz,double Ixy,double Ixz,double Iyz): +data(Matrix::Zero()) +{ + data(0,0)=Ixx; + data(1,1)=Iyy; + data(2,2)=Izz; + data(2,1)=data(1,2)=Ixy; + data(3,1)=data(1,3)=Ixz; + data(3,2)=data(2,3)=Iyz; + + data.block(3,3,3,3)=m*Matrix::Identity(); +} + +Inertia::~Inertia() +{ +} + + + +} diff --git a/intern/itasc/kdl/inertia.hpp b/intern/itasc/kdl/inertia.hpp new file mode 100644 index 00000000000..9f33859671c --- /dev/null +++ b/intern/itasc/kdl/inertia.hpp @@ -0,0 +1,70 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDLINERTIA_HPP +#define KDLINERTIA_HPP + +#include +#include "frames.hpp" + +namespace KDL { + +using namespace Eigen; + +/** + * This class offers the inertia-structure of a body + * An inertia is defined in a certain reference point and a certain reference base. + * The reference point does not have to coincide with the origin of the reference frame. + */ +class Inertia{ +public: + + /** + * This constructor creates a cartesian space inertia matrix, + * the arguments are the mass and the inertia moments in the cog. + */ + Inertia(double m=0,double Ixx=0,double Iyy=0,double Izz=0,double Ixy=0,double Ixz=0,double Iyz=0); + + static inline Inertia Zero(){ + return Inertia(0,0,0,0,0,0,0); + }; + + friend class Rotation; + friend class Frame; + + /** + * F = m*a + */ + // Wrench operator* (const AccelerationTwist& acc); + + + ~Inertia(); +private: + Matrix data; + +}; + + + + +} + +#endif diff --git a/intern/itasc/kdl/jacobian.cpp b/intern/itasc/kdl/jacobian.cpp new file mode 100644 index 00000000000..f8f46b32619 --- /dev/null +++ b/intern/itasc/kdl/jacobian.cpp @@ -0,0 +1,129 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "jacobian.hpp" + +namespace KDL +{ + Jacobian::Jacobian(unsigned int _size,unsigned int _nr_blocks): + size(_size),nr_blocks(_nr_blocks) + { + twists = new Twist[size*nr_blocks]; + } + + Jacobian::Jacobian(const Jacobian& arg): + size(arg.columns()), + nr_blocks(arg.nr_blocks) + { + twists = new Twist[size*nr_blocks]; + for(unsigned int i=0;i + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_JACOBIAN_HPP +#define KDL_JACOBIAN_HPP + +#include "frames.hpp" + +namespace KDL +{ + //Forward declaration + class ChainJntToJacSolver; + + class Jacobian + { + friend class ChainJntToJacSolver; + private: + unsigned int size; + unsigned int nr_blocks; + public: + Twist* twists; + Jacobian(unsigned int size,unsigned int nr=1); + Jacobian(const Jacobian& arg); + + Jacobian& operator=(const Jacobian& arg); + + bool operator ==(const Jacobian& arg); + bool operator !=(const Jacobian& arg); + + friend bool Equal(const Jacobian& a,const Jacobian& b,double eps=epsilon); + + + ~Jacobian(); + + double operator()(int i,int j)const; + double& operator()(int i,int j); + unsigned int rows()const; + unsigned int columns()const; + + friend void SetToZero(Jacobian& jac); + + friend void changeRefPoint(const Jacobian& src1, const Vector& base_AB, Jacobian& dest); + friend void changeBase(const Jacobian& src1, const Rotation& rot, Jacobian& dest); + friend void changeRefFrame(const Jacobian& src1,const Frame& frame, Jacobian& dest); + + + }; +} + +#endif diff --git a/intern/itasc/kdl/jntarray.cpp b/intern/itasc/kdl/jntarray.cpp new file mode 100644 index 00000000000..2adb76081f3 --- /dev/null +++ b/intern/itasc/kdl/jntarray.cpp @@ -0,0 +1,152 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "jntarray.hpp" + +namespace KDL +{ + JntArray::JntArray(): + size(0), + data(NULL) + { + } + + JntArray::JntArray(unsigned int _size): + size(_size) + { + assert(0 < size); + data = new double[size]; + SetToZero(*this); + } + + + JntArray::JntArray(const JntArray& arg): + size(arg.size) + { + data = ((0 < size) ? new double[size] : NULL); + for(unsigned int i=0;i + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_JNTARRAY_HPP +#define KDL_JNTARRAY_HPP + +#include "frames.hpp" +#include "jacobian.hpp" + +namespace KDL +{ + /** + * @brief This class represents an fixed size array containing + * joint values of a KDL::Chain. + * + * \warning An object constructed with the default constructor provides + * a valid, but inert, object. Many of the member functions will do + * the correct thing and have no affect on this object, but some + * member functions can _NOT_ deal with an inert/empty object. These + * functions will assert() and exit the program instead. The intended use + * case for the default constructor (in an RTT/OCL setting) is outlined in + * code below - the default constructor plus the resize() function allow + * use of JntArray objects whose size is set within a configureHook() call + * (typically based on a size determined from a property). + +\code +class MyTask : public RTT::TaskContext +{ + JntArray j; + MyTask() + {} // invokes j's default constructor + + bool configureHook() + { + unsigned int size = some_property.rvalue(); + j.resize(size) + ... + } + + void updateHook() + { + ** use j here + } +}; +/endcode + + */ + + class JntArray + { + private: + unsigned int size; + double* data; + public: + /** Construct with _no_ data array + * @post NULL == data + * @post 0 == rows() + * @warning use of an object constructed like this, without + * a resize() first, may result in program exit! See class + * documentation. + */ + JntArray(); + /** + * Constructor of the joint array + * + * @param size size of the array, this cannot be changed + * afterwards. + * @pre 0 < size + * @post NULL != data + * @post 0 < rows() + * @post all elements in data have 0 value + */ + JntArray(unsigned int size); + /** Copy constructor + * @note Will correctly copy an empty object + */ + JntArray(const JntArray& arg); + ~JntArray(); + /** Resize the array + * @warning This causes a dynamic allocation (and potentially + * also a dynamic deallocation). This _will_ negatively affect + * real-time performance! + * + * @post newSize == rows() + * @post NULL != data + * @post all elements in data have 0 value + */ + void resize(unsigned int newSize); + + JntArray& operator = ( const JntArray& arg); + /** + * get_item operator for the joint array, if a second value is + * given it should be zero, since a JntArray resembles a column. + * + * + * @return the joint value at position i, starting from 0 + * @pre 0 != size (ie non-default constructor or resize() called) + */ + double operator()(unsigned int i,unsigned int j=0)const; + /** + * set_item operator, again if a second value is given it + *should be zero. + * + * @return reference to the joint value at position i,starting + *from zero. + * @pre 0 != size (ie non-default constructor or resize() called) + */ + double& operator()(unsigned int i,unsigned int j=0); + /** + * Returns the number of rows (size) of the array + * + */ + unsigned int rows()const; + /** + * Returns the number of columns of the array, always 1. + */ + unsigned int columns()const; + + /** + * Function to add two joint arrays, all the arguments must + * have the same size: A + B = C. This function is + * aliasing-safe, A or B can be the same array as C. + * + * @param src1 A + * @param src2 B + * @param dest C + */ + friend void Add(const JntArray& src1,const JntArray& src2,JntArray& dest); + /** + * Function to subtract two joint arrays, all the arguments must + * have the same size: A - B = C. This function is + * aliasing-safe, A or B can be the same array as C. + * + * @param src1 A + * @param src2 B + * @param dest C + */ + friend void Subtract(const JntArray& src1,const JntArray& src2,JntArray& dest); + /** + * Function to multiply all the array values with a scalar + * factor: A*b=C. This function is aliasing-safe, A can be the + * same array as C. + * + * @param src A + * @param factor b + * @param dest C + */ + friend void Multiply(const JntArray& src,const double& factor,JntArray& dest); + /** + * Function to divide all the array values with a scalar + * factor: A/b=C. This function is aliasing-safe, A can be the + * same array as C. + * + * @param src A + * @param factor b + * @param dest C + */ + friend void Divide(const JntArray& src,const double& factor,JntArray& dest); + /** + * Function to multiply a KDL::Jacobian with a KDL::JntArray + * to get a KDL::Twist, it should not be used to calculate the + * forward velocity kinematics, the solver classes are built + * for this purpose. + * J*q = t + * + * @param jac J + * @param src q + * @param dest t + * @post dest==Twist::Zero() if 0==src.rows() (ie src is empty) + */ + friend void MultiplyJacobian(const Jacobian& jac, const JntArray& src, Twist& dest); + /** + * Function to set all the values of the array to 0 + * + * @param array + */ + friend void SetToZero(JntArray& array); + /** + * Function to check if two arrays are the same with a + *precision of eps + * + * @param src1 + * @param src2 + * @param eps default: epsilon + * @return true if each element of src1 is within eps of the same + * element in src2, or if both src1 and src2 have no data (ie 0==rows()) + */ + friend bool Equal(const JntArray& src1,const JntArray& src2,double eps=epsilon); + + friend bool operator==(const JntArray& src1,const JntArray& src2); + //friend bool operator!=(const JntArray& src1,const JntArray& src2); + }; + + bool operator==(const JntArray& src1,const JntArray& src2); + //bool operator!=(const JntArray& src1,const JntArray& src2); + +} + +#endif diff --git a/intern/itasc/kdl/jntarrayacc.cpp b/intern/itasc/kdl/jntarrayacc.cpp new file mode 100644 index 00000000000..3c9c67d9ef9 --- /dev/null +++ b/intern/itasc/kdl/jntarrayacc.cpp @@ -0,0 +1,170 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "jntarrayacc.hpp" + +namespace KDL +{ + JntArrayAcc::JntArrayAcc(unsigned int size): + q(size),qdot(size),qdotdot(size) + { + } + JntArrayAcc::JntArrayAcc(const JntArray& qin, const JntArray& qdotin,const JntArray& qdotdotin): + q(qin),qdot(qdotin),qdotdot(qdotdotin) + { + assert(q.rows()==qdot.rows()&&qdot.rows()==qdotdot.rows()); + } + JntArrayAcc::JntArrayAcc(const JntArray& qin, const JntArray& qdotin): + q(qin),qdot(qdotin),qdotdot(q.rows()) + { + assert(q.rows()==qdot.rows()); + } + JntArrayAcc::JntArrayAcc(const JntArray& qin): + q(qin),qdot(q.rows()),qdotdot(q.rows()) + { + } + + JntArray JntArrayAcc::value()const + { + return q; + } + + JntArray JntArrayAcc::deriv()const + { + return qdot; + } + JntArray JntArrayAcc::dderiv()const + { + return qdotdot; + } + + void Add(const JntArrayAcc& src1,const JntArrayAcc& src2,JntArrayAcc& dest) + { + Add(src1.q,src2.q,dest.q); + Add(src1.qdot,src2.qdot,dest.qdot); + Add(src1.qdotdot,src2.qdotdot,dest.qdotdot); + } + void Add(const JntArrayAcc& src1,const JntArrayVel& src2,JntArrayAcc& dest) + { + Add(src1.q,src2.q,dest.q); + Add(src1.qdot,src2.qdot,dest.qdot); + dest.qdotdot=src1.qdotdot; + } + void Add(const JntArrayAcc& src1,const JntArray& src2,JntArrayAcc& dest) + { + Add(src1.q,src2,dest.q); + dest.qdot=src1.qdot; + dest.qdotdot=src1.qdotdot; + } + + void Subtract(const JntArrayAcc& src1,const JntArrayAcc& src2,JntArrayAcc& dest) + { + Subtract(src1.q,src2.q,dest.q); + Subtract(src1.qdot,src2.qdot,dest.qdot); + Subtract(src1.qdotdot,src2.qdotdot,dest.qdotdot); + } + void Subtract(const JntArrayAcc& src1,const JntArrayVel& src2,JntArrayAcc& dest) + { + Subtract(src1.q,src2.q,dest.q); + Subtract(src1.qdot,src2.qdot,dest.qdot); + dest.qdotdot=src1.qdotdot; + } + void Subtract(const JntArrayAcc& src1,const JntArray& src2,JntArrayAcc& dest) + { + Subtract(src1.q,src2,dest.q); + dest.qdot=src1.qdot; + dest.qdotdot=src1.qdotdot; + } + + void Multiply(const JntArrayAcc& src,const double& factor,JntArrayAcc& dest) + { + Multiply(src.q,factor,dest.q); + Multiply(src.qdot,factor,dest.qdot); + Multiply(src.qdotdot,factor,dest.qdotdot); + } + void Multiply(const JntArrayAcc& src,const doubleVel& factor,JntArrayAcc& dest) + { + Multiply(src.qdot,factor.grad*2,dest.qdot); + Multiply(src.qdotdot,factor.t,dest.qdotdot); + Add(dest.qdot,dest.qdotdot,dest.qdotdot); + Multiply(src.q,factor.grad,dest.q); + Multiply(src.qdot,factor.t,dest.qdot); + Add(dest.qdot,dest.q,dest.qdot); + Multiply(src.q,factor.t,dest.q); + } + void Multiply(const JntArrayAcc& src,const doubleAcc& factor,JntArrayAcc& dest) + { + Multiply(src.q,factor.dd,dest.q); + Multiply(src.qdot,factor.d*2,dest.qdot); + Multiply(src.qdotdot,factor.t,dest.qdotdot); + Add(dest.qdotdot,dest.qdot,dest.qdotdot); + Add(dest.qdotdot,dest.q,dest.qdotdot); + Multiply(src.q,factor.d,dest.q); + Multiply(src.qdot,factor.t,dest.qdot); + Add(dest.qdot,dest.q,dest.qdot); + Multiply(src.q,factor.t,dest.q); + } + + void Divide(const JntArrayAcc& src,const double& factor,JntArrayAcc& dest) + { + Divide(src.q,factor,dest.q); + Divide(src.qdot,factor,dest.qdot); + Divide(src.qdotdot,factor,dest.qdotdot); + } + void Divide(const JntArrayAcc& src,const doubleVel& factor,JntArrayAcc& dest) + { + Multiply(src.q,(2*factor.grad*factor.grad)/(factor.t*factor.t*factor.t),dest.q); + Multiply(src.qdot,(2*factor.grad)/(factor.t*factor.t),dest.qdot); + Divide(src.qdotdot,factor.t,dest.qdotdot); + Subtract(dest.qdotdot,dest.qdot,dest.qdotdot); + Add(dest.qdotdot,dest.q,dest.qdotdot); + Multiply(src.q,factor.grad/(factor.t*factor.t),dest.q); + Divide(src.qdot,factor.t,dest.qdot); + Subtract(dest.qdot,dest.q,dest.qdot); + Divide(src.q,factor.t,dest.q); + } + void Divide(const JntArrayAcc& src,const doubleAcc& factor,JntArrayAcc& dest) + { + Multiply(src.q,(2*factor.d*factor.d)/(factor.t*factor.t*factor.t)-factor.dd/(factor.t*factor.t),dest.q); + Multiply(src.qdot,(2*factor.d)/(factor.t*factor.t),dest.qdot); + Divide(src.qdotdot,factor.t,dest.qdotdot); + Subtract(dest.qdotdot,dest.qdot,dest.qdotdot); + Add(dest.qdotdot,dest.q,dest.qdotdot); + Multiply(src.q,factor.d/(factor.t*factor.t),dest.q); + Divide(src.qdot,factor.t,dest.qdot); + Subtract(dest.qdot,dest.q,dest.qdot); + Divide(src.q,factor.t,dest.q); + } + + void SetToZero(JntArrayAcc& array) + { + SetToZero(array.q); + SetToZero(array.qdot); + SetToZero(array.qdotdot); + } + + bool Equal(const JntArrayAcc& src1,const JntArrayAcc& src2,double eps) + { + return (Equal(src1.q,src2.q,eps)&&Equal(src1.qdot,src2.qdot,eps)&&Equal(src1.qdotdot,src2.qdotdot,eps)); + } +} + + diff --git a/intern/itasc/kdl/jntarrayacc.hpp b/intern/itasc/kdl/jntarrayacc.hpp new file mode 100644 index 00000000000..275aa58f21e --- /dev/null +++ b/intern/itasc/kdl/jntarrayacc.hpp @@ -0,0 +1,66 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_JNTARRAYACC_HPP +#define KDL_JNTARRAYACC_HPP + +#include "utilities/utility.h" +#include "jntarray.hpp" +#include "jntarrayvel.hpp" +#include "frameacc.hpp" + +namespace KDL +{ + class JntArrayAcc + { + public: + JntArray q; + JntArray qdot; + JntArray qdotdot; + public: + JntArrayAcc(unsigned int size); + JntArrayAcc(const JntArray& q,const JntArray& qdot,const JntArray& qdotdot); + JntArrayAcc(const JntArray& q,const JntArray& qdot); + JntArrayAcc(const JntArray& q); + + JntArray value()const; + JntArray deriv()const; + JntArray dderiv()const; + + friend void Add(const JntArrayAcc& src1,const JntArrayAcc& src2,JntArrayAcc& dest); + friend void Add(const JntArrayAcc& src1,const JntArrayVel& src2,JntArrayAcc& dest); + friend void Add(const JntArrayAcc& src1,const JntArray& src2,JntArrayAcc& dest); + friend void Subtract(const JntArrayAcc& src1,const JntArrayAcc& src2,JntArrayAcc& dest); + friend void Subtract(const JntArrayAcc& src1,const JntArrayVel& src2,JntArrayAcc& dest); + friend void Subtract(const JntArrayAcc& src1,const JntArray& src2,JntArrayAcc& dest); + friend void Multiply(const JntArrayAcc& src,const double& factor,JntArrayAcc& dest); + friend void Multiply(const JntArrayAcc& src,const doubleVel& factor,JntArrayAcc& dest); + friend void Multiply(const JntArrayAcc& src,const doubleAcc& factor,JntArrayAcc& dest); + friend void Divide(const JntArrayAcc& src,const double& factor,JntArrayAcc& dest); + friend void Divide(const JntArrayAcc& src,const doubleVel& factor,JntArrayAcc& dest); + friend void Divide(const JntArrayAcc& src,const doubleAcc& factor,JntArrayAcc& dest); + friend void SetToZero(JntArrayAcc& array); + friend bool Equal(const JntArrayAcc& src1,const JntArrayAcc& src2,double eps=epsilon); + + }; +} + +#endif diff --git a/intern/itasc/kdl/jntarrayvel.cpp b/intern/itasc/kdl/jntarrayvel.cpp new file mode 100644 index 00000000000..df5c7fb0fb3 --- /dev/null +++ b/intern/itasc/kdl/jntarrayvel.cpp @@ -0,0 +1,111 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +#include "jntarrayacc.hpp" + +namespace KDL +{ + JntArrayVel::JntArrayVel(unsigned int size): + q(size),qdot(size) + { + } + JntArrayVel::JntArrayVel(const JntArray& qin, const JntArray& qdotin): + q(qin),qdot(qdotin) + { + assert(q.rows()==qdot.rows()); + } + JntArrayVel::JntArrayVel(const JntArray& qin): + q(qin),qdot(q.rows()) + { + } + + JntArray JntArrayVel::value()const + { + return q; + } + + JntArray JntArrayVel::deriv()const + { + return qdot; + } + + void Add(const JntArrayVel& src1,const JntArrayVel& src2,JntArrayVel& dest) + { + Add(src1.q,src2.q,dest.q); + Add(src1.qdot,src2.qdot,dest.qdot); + } + void Add(const JntArrayVel& src1,const JntArray& src2,JntArrayVel& dest) + { + Add(src1.q,src2,dest.q); + dest.qdot=src1.qdot; + } + + void Subtract(const JntArrayVel& src1,const JntArrayVel& src2,JntArrayVel& dest) + { + Subtract(src1.q,src2.q,dest.q); + Subtract(src1.qdot,src2.qdot,dest.qdot); + } + void Subtract(const JntArrayVel& src1,const JntArray& src2,JntArrayVel& dest) + { + Subtract(src1.q,src2,dest.q); + dest.qdot=src1.qdot; + } + + void Multiply(const JntArrayVel& src,const double& factor,JntArrayVel& dest) + { + Multiply(src.q,factor,dest.q); + Multiply(src.qdot,factor,dest.qdot); + } + void Multiply(const JntArrayVel& src,const doubleVel& factor,JntArrayVel& dest) + { + Multiply(src.q,factor.grad,dest.q); + Multiply(src.qdot,factor.t,dest.qdot); + Add(dest.qdot,dest.q,dest.qdot); + Multiply(src.q,factor.t,dest.q); + } + + void Divide(const JntArrayVel& src,const double& factor,JntArrayVel& dest) + { + Divide(src.q,factor,dest.q); + Divide(src.qdot,factor,dest.qdot); + } + void Divide(const JntArrayVel& src,const doubleVel& factor,JntArrayVel& dest) + { + Multiply(src.q,(factor.grad/factor.t/factor.t),dest.q); + Divide(src.qdot,factor.t,dest.qdot); + Subtract(dest.qdot,dest.q,dest.qdot); + Divide(src.q,factor.t,dest.q); + } + + void SetToZero(JntArrayVel& array) + { + SetToZero(array.q); + SetToZero(array.qdot); + } + + bool Equal(const JntArrayVel& src1,const JntArrayVel& src2,double eps) + { + return Equal(src1.q,src2.q,eps)&&Equal(src1.qdot,src2.qdot,eps); + } +} + + diff --git a/intern/itasc/kdl/jntarrayvel.hpp b/intern/itasc/kdl/jntarrayvel.hpp new file mode 100644 index 00000000000..faa82076ebb --- /dev/null +++ b/intern/itasc/kdl/jntarrayvel.hpp @@ -0,0 +1,59 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_JNTARRAYVEL_HPP +#define KDL_JNTARRAYVEL_HPP + +#include "utilities/utility.h" +#include "jntarray.hpp" +#include "framevel.hpp" + +namespace KDL +{ + + class JntArrayVel + { + public: + JntArray q; + JntArray qdot; + public: + JntArrayVel(unsigned int size); + JntArrayVel(const JntArray& q,const JntArray& qdot); + JntArrayVel(const JntArray& q); + + JntArray value()const; + JntArray deriv()const; + + friend void Add(const JntArrayVel& src1,const JntArrayVel& src2,JntArrayVel& dest); + friend void Add(const JntArrayVel& src1,const JntArray& src2,JntArrayVel& dest); + friend void Subtract(const JntArrayVel& src1,const JntArrayVel& src2,JntArrayVel& dest); + friend void Subtract(const JntArrayVel& src1,const JntArray& src2,JntArrayVel& dest); + friend void Multiply(const JntArrayVel& src,const double& factor,JntArrayVel& dest); + friend void Multiply(const JntArrayVel& src,const doubleVel& factor,JntArrayVel& dest); + friend void Divide(const JntArrayVel& src,const double& factor,JntArrayVel& dest); + friend void Divide(const JntArrayVel& src,const doubleVel& factor,JntArrayVel& dest); + friend void SetToZero(JntArrayVel& array); + friend bool Equal(const JntArrayVel& src1,const JntArrayVel& src2,double eps=epsilon); + + }; +} + +#endif diff --git a/intern/itasc/kdl/joint.cpp b/intern/itasc/kdl/joint.cpp new file mode 100644 index 00000000000..dc5f17e5bf7 --- /dev/null +++ b/intern/itasc/kdl/joint.cpp @@ -0,0 +1,153 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "joint.hpp" + +namespace KDL { + + Joint::Joint(const JointType& _type, const double& _scale, const double& _offset, + const double& _inertia, const double& _damping, const double& _stiffness): + type(_type),scale(_scale),offset(_offset),inertia(_inertia),damping(_damping),stiffness(_stiffness) + { + // for sphere and swing, offset is not used, assume no offset + } + + Joint::Joint(const Joint& in): + type(in.type),scale(in.scale),offset(in.offset), + inertia(in.inertia),damping(in.damping),stiffness(in.stiffness) + { + } + + Joint& Joint::operator=(const Joint& in) + { + type=in.type; + scale=in.scale; + offset=in.offset; + inertia=in.inertia; + damping=in.damping; + stiffness=in.stiffness; + return *this; + } + + + Joint::~Joint() + { + } + + Frame Joint::pose(const double& q)const + { + + switch(type){ + case RotX: + return Frame(Rotation::RotX(scale*q+offset)); + break; + case RotY: + return Frame(Rotation::RotY(scale*q+offset)); + break; + case RotZ: + return Frame(Rotation::RotZ(scale*q+offset)); + break; + case TransX: + return Frame(Vector(scale*q+offset,0.0,0.0)); + break; + case TransY: + return Frame(Vector(0.0,scale*q+offset,0.0)); + break; + case TransZ: + return Frame(Vector(0.0,0.0,scale*q+offset)); + break; + case Sphere: + // the joint angles represent a rotation vector expressed in the base frame of the joint + // (= the frame you get when there is no offset nor rotation) + return Frame(Rot(Vector((&q)[0], (&q)[1], (&q)[2]))); + break; + case Swing: + // the joint angles represent a 2D rotation vector in the XZ planee of the base frame of the joint + // (= the frame you get when there is no offset nor rotation) + return Frame(Rot(Vector((&q)[0], 0.0, (&q)[1]))); + break; + default: + return Frame::Identity(); + break; + } + } + + Twist Joint::twist(const double& qdot, int dof)const + { + switch(type){ + case RotX: + return Twist(Vector(0.0,0.0,0.0),Vector(scale*qdot,0.0,0.0)); + break; + case RotY: + return Twist(Vector(0.0,0.0,0.0),Vector(0.0,scale*qdot,0.0)); + break; + case RotZ: + return Twist(Vector(0.0,0.0,0.0),Vector(0.0,0.0,scale*qdot)); + break; + case TransX: + return Twist(Vector(scale*qdot,0.0,0.0),Vector(0.0,0.0,0.0)); + break; + case TransY: + return Twist(Vector(0.0,scale*qdot,0.0),Vector(0.0,0.0,0.0)); + break; + case TransZ: + return Twist(Vector(0.0,0.0,scale*qdot),Vector(0.0,0.0,0.0)); + break; + case Swing: + switch (dof) { + case 0: + return Twist(Vector(0.0,0.0,0.0),Vector(scale*qdot,0.0,0.0)); + case 1: + return Twist(Vector(0.0,0.0,0.0),Vector(0.0,0.0,scale*qdot)); + } + return Twist::Zero(); + case Sphere: + switch (dof) { + case 0: + return Twist(Vector(0.0,0.0,0.0),Vector(scale*qdot,0.0,0.0)); + case 1: + return Twist(Vector(0.0,0.0,0.0),Vector(0.0,scale*qdot,0.0)); + case 2: + return Twist(Vector(0.0,0.0,0.0),Vector(0.0,0.0,scale*qdot)); + } + return Twist::Zero(); + default: + return Twist::Zero(); + break; + } + } + + unsigned int Joint::getNDof() const + { + switch (type) { + case Sphere: + return 3; + case Swing: + return 2; + case None: + return 0; + default: + return 1; + } + } + +} // end of namespace KDL + diff --git a/intern/itasc/kdl/joint.hpp b/intern/itasc/kdl/joint.hpp new file mode 100644 index 00000000000..a1291509f0f --- /dev/null +++ b/intern/itasc/kdl/joint.hpp @@ -0,0 +1,138 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_JOINT_HPP +#define KDL_JOINT_HPP + +#include "frames.hpp" +#include + +namespace KDL { + + /** + * \brief This class encapsulates a simple joint, that is with one + * parameterized degree of freedom and with scalar dynamic properties. + * + * A simple joint is described by the following properties : + * - scale: ratio between motion input and motion output + * - offset: between the "physical" and the "logical" zero position. + * - type: revolute or translational, along one of the basic frame axes + * - inertia, stiffness and damping: scalars representing the physical + * effects along/about the joint axis only. + * + * @ingroup KinematicFamily + */ + class Joint { + public: + typedef enum { RotX,RotY,RotZ,TransX,TransY,TransZ,Sphere,Swing,None} JointType; + /** + * Constructor of a joint. + * + * @param type type of the joint, default: Joint::None + * @param scale scale between joint input and actual geometric + * movement, default: 1 + * @param offset offset between joint input and actual + * geometric input, default: 0 + * @param inertia 1D inertia along the joint axis, default: 0 + * @param damping 1D damping along the joint axis, default: 0 + * @param stiffness 1D stiffness along the joint axis, + * default: 0 + */ + Joint(const JointType& type=None,const double& scale=1,const double& offset=0, + const double& inertia=0,const double& damping=0,const double& stiffness=0); + Joint(const Joint& in); + + Joint& operator=(const Joint& arg); + + /** + * Request the 6D-pose between the beginning and the end of + * the joint at joint position q + * + * @param q the 1D joint position + * + * @return the resulting 6D-pose + */ + Frame pose(const double& q)const; + /** + * Request the resulting 6D-velocity with a joint velocity qdot + * + * @param qdot the 1D joint velocity + * + * @return the resulting 6D-velocity + */ + Twist twist(const double& qdot, int dof=0)const; + + /** + * Request the type of the joint. + * + * @return const reference to the type + */ + const JointType& getType() const + { + return type; + }; + + /** + * Request the stringified type of the joint. + * + * @return const string + */ + const std::string getTypeName() const + { + switch (type) { + case RotX: + return "RotX"; + case RotY: + return "RotY"; + case RotZ: + return "RotZ"; + case TransX: + return "TransX"; + case TransY: + return "TransY"; + case TransZ: + return "TransZ"; + case Sphere: + return "Sphere"; + case Swing: + return "Swing"; + case None: + return "None"; + default: + return "None"; + } + }; + unsigned int getNDof() const; + + virtual ~Joint(); + + private: + Joint::JointType type; + double scale; + double offset; + double inertia; + double damping; + double stiffness; + }; + +} // end of namespace KDL + +#endif diff --git a/intern/itasc/kdl/kinfam_io.cpp b/intern/itasc/kdl/kinfam_io.cpp new file mode 100644 index 00000000000..900e2e101a9 --- /dev/null +++ b/intern/itasc/kdl/kinfam_io.cpp @@ -0,0 +1,101 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "kinfam_io.hpp" +#include "frames_io.hpp" + +namespace KDL { +std::ostream& operator <<(std::ostream& os, const Joint& joint) { + return os << joint.getTypeName(); +} + +std::istream& operator >>(std::istream& is, Joint& joint) { + return is; +} + +std::ostream& operator <<(std::ostream& os, const Segment& segment) { + os << "[" << segment.getJoint() << ",\n" << segment.getFrameToTip() << "]"; + return os; +} + +std::istream& operator >>(std::istream& is, Segment& segment) { + return is; +} + +std::ostream& operator <<(std::ostream& os, const Chain& chain) { + os << "["; + for (unsigned int i = 0; i < chain.getNrOfSegments(); i++) + os << chain.getSegment(i) << "\n"; + os << "]"; + return os; +} + +std::istream& operator >>(std::istream& is, Chain& chain) { + return is; +} + +std::ostream& operator <<(std::ostream& os, const Tree& tree) { + SegmentMap::const_iterator root = tree.getSegment("root"); + return os << root; +} + +std::ostream& operator <<(std::ostream& os, SegmentMap::const_iterator root) { + //os<first<<": "<second.segment<<"\n"; + os << root->first<<"(q_nr: "<second.q_nr<<")"<<"\n \t"; + for (unsigned int i = 0; i < root->second.children.size(); i++) { + os <<(root->second.children[i])<<"\t"; + } + return os << "\n"; +} + +std::istream& operator >>(std::istream& is, Tree& tree) { + return is; +} + +std::ostream& operator <<(std::ostream& os, const JntArray& array) { + os << "["; + for (unsigned int i = 0; i < array.rows(); i++) + os << std::setw(KDL_FRAME_WIDTH) << array(i); + os << "]"; + return os; +} + +std::istream& operator >>(std::istream& is, JntArray& array) { + return is; +} + +std::ostream& operator <<(std::ostream& os, const Jacobian& jac) { + os << "["; + for (unsigned int i = 0; i < jac.rows(); i++) { + for (unsigned int j = 0; j < jac.columns(); j++) + os << std::setw(KDL_FRAME_WIDTH) << jac(i, j); + os << std::endl; + } + os << "]"; + return os; +} + +std::istream& operator >>(std::istream& is, Jacobian& jac) { + return is; +} + +} + diff --git a/intern/itasc/kdl/kinfam_io.hpp b/intern/itasc/kdl/kinfam_io.hpp new file mode 100644 index 00000000000..a8dbfd1c5dc --- /dev/null +++ b/intern/itasc/kdl/kinfam_io.hpp @@ -0,0 +1,70 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_KINFAM_IO_HPP +#define KDL_KINFAM_IO_HPP + +#include +#include + +#include "joint.hpp" +#include "segment.hpp" +#include "chain.hpp" +#include "jntarray.hpp" +#include "jacobian.hpp" +#include "tree.hpp" + +namespace KDL { +std::ostream& operator <<(std::ostream& os, const Joint& joint); +std::istream& operator >>(std::istream& is, Joint& joint); +std::ostream& operator <<(std::ostream& os, const Segment& segment); +std::istream& operator >>(std::istream& is, Segment& segment); +std::ostream& operator <<(std::ostream& os, const Chain& chain); +std::istream& operator >>(std::istream& is, Chain& chain); + +std::ostream& operator <<(std::ostream& os, const Tree& tree); +std::istream& operator >>(std::istream& is, Tree& tree); + +std::ostream& operator <<(std::ostream& os, SegmentMap::const_iterator it); + +std::ostream& operator <<(std::ostream& os, const JntArray& array); +std::istream& operator >>(std::istream& is, JntArray& array); +std::ostream& operator <<(std::ostream& os, const Jacobian& jac); +std::istream& operator >>(std::istream& is, Jacobian& jac); + +template +std::ostream& operator<<(std::ostream& os, const std::vector& vec) { + os << "["; + for (unsigned int i = 0; i < vec.size(); i++) + os << vec[i] << " "; + os << "]"; + return os; +} +; + +template +std::istream& operator >>(std::istream& is, std::vector& vec) { + return is; +} +; +} +#endif + diff --git a/intern/itasc/kdl/segment.cpp b/intern/itasc/kdl/segment.cpp new file mode 100644 index 00000000000..02f71d5e9f1 --- /dev/null +++ b/intern/itasc/kdl/segment.cpp @@ -0,0 +1,68 @@ +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "segment.hpp" + +namespace KDL { + + Segment::Segment(const Joint& _joint, const Frame& _f_tip, const Inertia& _M): + joint(_joint),M(_M), + f_tip(_f_tip) + { + } + + Segment::Segment(const Segment& in): + joint(in.joint),M(in.M), + f_tip(in.f_tip) + { + } + + Segment& Segment::operator=(const Segment& arg) + { + joint=arg.joint; + M=arg.M; + f_tip=arg.f_tip; + return *this; + } + + Segment::~Segment() + { + } + + Frame Segment::pose(const double& q)const + { + return joint.pose(q)*f_tip; + } + + Twist Segment::twist(const double& q, const double& qdot, int dof)const + { + return joint.twist(qdot, dof).RefPoint(pose(q).p); + } + + Twist Segment::twist(const Vector& p, const double& qdot, int dof)const + { + return joint.twist(qdot, dof).RefPoint(p); + } + + Twist Segment::twist(const Frame& f, const double& qdot, int dof)const + { + return (f.M*joint.twist(qdot, dof)).RefPoint(f.p); + } +}//end of namespace KDL + diff --git a/intern/itasc/kdl/segment.hpp b/intern/itasc/kdl/segment.hpp new file mode 100644 index 00000000000..7c82ab418fa --- /dev/null +++ b/intern/itasc/kdl/segment.hpp @@ -0,0 +1,149 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +#ifndef KDL_SEGMENT_HPP +#define KDL_SEGMENT_HPP + +#include "frames.hpp" +#include "inertia.hpp" +#include "joint.hpp" +#include + +namespace KDL { + + /** + * \brief This class encapsulates a simple segment, that is a "rigid + * body" (i.e., a frame and an inertia) with a joint and with + * "handles", root and tip to connect to other segments. + * + * A simple segment is described by the following properties : + * - Joint + * - inertia: of the rigid body part of the Segment + * - Offset from the end of the joint to the tip of the segment: + * the joint is located at the root of the segment. + * + * @ingroup KinematicFamily + */ + class Segment { + friend class Chain; + private: + Joint joint; + Inertia M; + Frame f_tip; + + public: + /** + * Constructor of the segment + * + * @param joint joint of the segment, default: + * Joint(Joint::None) + * @param f_tip frame from the end of the joint to the tip of + * the segment, default: Frame::Identity() + * @param M rigid body inertia of the segment, default: Inertia::Zero() + */ + Segment(const Joint& joint=Joint(), const Frame& f_tip=Frame::Identity(),const Inertia& M = Inertia::Zero()); + Segment(const Segment& in); + Segment& operator=(const Segment& arg); + + virtual ~Segment(); + + /** + * Request the pose of the segment, given the joint position q. + * + * @param q 1D position of the joint + * + * @return pose from the root to the tip of the segment + */ + Frame pose(const double& q)const; + /** + * Request the 6D-velocity of the tip of the segment, given + * the joint position q and the joint velocity qdot. + * + * @param q ND position of the joint + * @param qdot ND velocity of the joint + * + * @return 6D-velocity of the tip of the segment, expressed + *in the base-frame of the segment(root) and with the tip of + *the segment as reference point. + */ + Twist twist(const double& q,const double& qdot, int dof=0)const; + + /** + * Request the 6D-velocity at a given point p, relative to base frame of the segment + * givven the joint velocity qdot. + * + * @param p reference point + * @param qdot ND velocity of the joint + * + * @return 6D-velocity at a given point p, expressed + * in the base-frame of the segment(root) + */ + Twist twist(const Vector& p, const double& qdot, int dof=0)const; + + /** + * Request the 6D-velocity at a given frame origin, relative to base frame of the segment + * assuming the frame rotation is the rotation of the joint. + * + * @param f joint pose frame + reference point + * @param qdot ND velocity of the joint + * + * @return 6D-velocity at frame reference point, expressed + * in the base-frame of the segment(root) + */ + Twist twist(const Frame& f, const double& qdot, int dof)const; + + /** + * Request the joint of the segment + * + * + * @return const reference to the joint of the segment + */ + const Joint& getJoint()const + { + return joint; + } + /** + * Request the inertia of the segment + * + * + * @return const reference to the inertia of the segment + */ + const Inertia& getInertia()const + { + return M; + } + + /** + * Request the pose from the joint end to the tip of the + *segment. + * + * @return const reference to the joint end - segment tip pose. + */ + const Frame& getFrameToTip()const + { + return f_tip; + } + + }; +}//end of namespace KDL + +#endif diff --git a/intern/itasc/kdl/tree.cpp b/intern/itasc/kdl/tree.cpp new file mode 100644 index 00000000000..f117e54959b --- /dev/null +++ b/intern/itasc/kdl/tree.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "tree.hpp" +#include +namespace KDL { +using namespace std; + +Tree::Tree() : + nrOfJoints(0), nrOfSegments(0) { + segments.insert(make_pair("root", TreeElement::Root())); +} + +Tree::Tree(const Tree& in) { + segments.clear(); + nrOfSegments = 0; + nrOfJoints = 0; + + segments.insert(make_pair("root", TreeElement::Root())); + this->addTree(in, "", "root"); + +} + +Tree& Tree::operator=(const Tree& in) { + segments.clear(); + nrOfSegments = 0; + nrOfJoints = 0; + + segments.insert(make_pair("root", TreeElement::Root())); + this->addTree(in, "", "root"); + return *this; +} + +bool Tree::addSegment(const Segment& segment, const std::string& segment_name, + const std::string& hook_name) { + SegmentMap::iterator parent = segments.find(hook_name); + //check if parent exists + if (parent == segments.end()) + return false; + pair retval; + //insert new element + retval = segments.insert(make_pair(segment_name, TreeElement(segment, + parent, nrOfJoints))); + //check if insertion succeeded + if (!retval.second) + return false; + //add iterator to new element in parents children list + parent->second.children.push_back(retval.first); + //increase number of segments + nrOfSegments++; + //increase number of joints + nrOfJoints += segment.getJoint().getNDof(); + return true; +} + +bool Tree::addChain(const Chain& chain, const std::string& chain_name, + const std::string& hook_name) { + string parent_name = hook_name; + for (unsigned int i = 0; i < chain.getNrOfSegments(); i++) { + ostringstream segment_name; + segment_name << chain_name << "Segment" << i; + if (this->addSegment(chain.getSegment(i), segment_name.str(), + parent_name)) + parent_name = segment_name.str(); + else + return false; + } + return true; +} + +bool Tree::addTree(const Tree& tree, const std::string& tree_name, + const std::string& hook_name) { + return this->addTreeRecursive(tree.getSegment("root"), tree_name, hook_name); +} + +bool Tree::addTreeRecursive(SegmentMap::const_iterator root, + const std::string& tree_name, const std::string& hook_name) { + //get iterator for root-segment + SegmentMap::const_iterator child; + //try to add all of root's children + for (unsigned int i = 0; i < root->second.children.size(); i++) { + child = root->second.children[i]; + //Try to add the child + if (this->addSegment(child->second.segment, tree_name + child->first, + hook_name)) { + //if child is added, add all the child's children + if (!(this->addTreeRecursive(child, tree_name, tree_name + + child->first))) + //if it didn't work, return false + return false; + } else + //If the child could not be added, return false + return false; + } + return true; +} + +} + diff --git a/intern/itasc/kdl/tree.hpp b/intern/itasc/kdl/tree.hpp new file mode 100644 index 00000000000..bdd3aa94572 --- /dev/null +++ b/intern/itasc/kdl/tree.hpp @@ -0,0 +1,167 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_TREE_HPP +#define KDL_TREE_HPP + +#include "segment.hpp" +#include "chain.hpp" + +#include +#include + +namespace KDL +{ + //Forward declaration + class TreeElement; + typedef std::map SegmentMap; + + class TreeElement + { + private: + TreeElement():q_nr(0) + {}; + public: + Segment segment; + unsigned int q_nr; + SegmentMap::const_iterator parent; + std::vector children; + TreeElement(const Segment& segment_in,const SegmentMap::const_iterator& parent_in,unsigned int q_nr_in) + { + q_nr=q_nr_in; + segment=segment_in; + parent=parent_in; + }; + static TreeElement Root() + { + return TreeElement(); + }; + }; + + /** + * \brief This class encapsulates a tree + * kinematic interconnection structure. It is build out of segments. + * + * @ingroup KinematicFamily + */ + class Tree + { + private: + SegmentMap segments; + unsigned int nrOfJoints; + unsigned int nrOfSegments; + + bool addTreeRecursive(SegmentMap::const_iterator root, const std::string& tree_name, const std::string& hook_name); + + public: + /** + * The constructor of a tree, a new tree is always empty + */ + Tree(); + Tree(const Tree& in); + Tree& operator= (const Tree& arg); + + /** + * Adds a new segment to the end of the segment with + * hook_name as segment_name + * + * @param segment new segment to add + * @param segment_name name of the new segment + * @param hook_name name of the segment to connect this + * segment with. + * + * @return false if hook_name could not be found. + */ + bool addSegment(const Segment& segment, const std::string& segment_name, const std::string& hook_name); + + /** + * Adds a complete chain to the end of the segment with + * hook_name as segment_name. Segment i of + * the chain will get chain_name+".Segment"+i as segment_name. + * + * @param chain Chain to add + * @param chain_name name of the chain + * @param hook_name name of the segment to connect the chain with. + * + * @return false if hook_name could not be found. + */ + bool addChain(const Chain& chain, const std::string& chain_name, const std::string& hook_name); + + /** + * Adds a complete tree to the end of the segment with + * hookname as segment_name. The segments of the tree will get + * tree_name+segment_name as segment_name. + * + * @param tree Tree to add + * @param tree_name name of the tree + * @param hook_name name of the segment to connect the tree with + * + * @return false if hook_name could not be found + */ + bool addTree(const Tree& tree, const std::string& tree_name,const std::string& hook_name); + + /** + * Request the total number of joints in the tree.\n + * Important: It is not the same as the + * total number of segments since a segment does not need to have + * a joint. + * + * @return total nr of joints + */ + unsigned int getNrOfJoints()const + { + return nrOfJoints; + }; + + /** + * Request the total number of segments in the tree. + * @return total number of segments + */ + unsigned int getNrOfSegments()const {return nrOfSegments;}; + + /** + * Request the segment of the tree with name segment_name. + * + * @param segment_name the name of the requested segment + * + * @return constant iterator pointing to the requested segment + */ + SegmentMap::const_iterator getSegment(const std::string& segment_name)const + { + return segments.find(segment_name); + }; + + + + const SegmentMap& getSegments()const + { + return segments; + } + + virtual ~Tree(){}; + }; +} +#endif + + + + + diff --git a/intern/itasc/kdl/treefksolver.hpp b/intern/itasc/kdl/treefksolver.hpp new file mode 100644 index 00000000000..22d5400ab0a --- /dev/null +++ b/intern/itasc/kdl/treefksolver.hpp @@ -0,0 +1,110 @@ +// Copyright (C) 2007 Ruben Smits +// Copyright (C) 2008 Julia Jesse + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDL_TREE_FKSOLVER_HPP +#define KDL_TREE_FKSOLVER_HPP + +#include + +#include "tree.hpp" +//#include "framevel.hpp" +//#include "frameacc.hpp" +#include "jntarray.hpp" +//#include "jntarrayvel.hpp" +//#include "jntarrayacc.hpp" + +namespace KDL { + + /** + * \brief This abstract class encapsulates a + * solver for the forward position kinematics for a KDL::Tree. + * + * @ingroup KinematicFamily + */ + + //Forward definition + class TreeFkSolverPos { + public: + /** + * Calculate forward position kinematics for a KDL::Tree, + * from joint coordinates to cartesian pose. + * + * @param q_in input joint coordinates + * @param p_out reference to output cartesian pose + * + * @return if < 0 something went wrong + */ + virtual int JntToCart(const JntArray& q_in, Frame& p_out, const std::string& segmentName, const std::string& baseName)=0; + virtual ~TreeFkSolverPos(){}; + }; + + /** + * \brief This abstract class encapsulates a solver + * for the forward velocity kinematics for a KDL::Tree. + * + * @ingroup KinematicFamily + */ +// class TreeFkSolverVel { +// public: + /** + * Calculate forward position and velocity kinematics, from + * joint coordinates to cartesian coordinates. + * + * @param q_in input joint coordinates (position and velocity) + * @param out output cartesian coordinates (position and velocity) + * + * @return if < 0 something went wrong + */ +// virtual int JntToCart(const JntArrayVel& q_in, FrameVel& out,int segmentNr=-1)=0; + +// virtual ~TreeFkSolverVel(){}; +// }; + + /** + * \brief This abstract class encapsulates a solver + * for the forward acceleration kinematics for a KDL::Tree. + * + * @ingroup KinematicFamily + */ + +// class TreeFkSolverAcc { +// public: + /** + * Calculate forward position, velocity and accelaration + * kinematics, from joint coordinates to cartesian coordinates + * + * @param q_in input joint coordinates (position, velocity and + * acceleration + @param out output cartesian coordinates (position, velocity + * and acceleration + * + * @return if < 0 something went wrong + */ +// virtual int JntToCart(const JntArrayAcc& q_in, FrameAcc& out,int segmentNr=-1)=0; + +// virtual ~TreeFkSolverAcc()=0; +// }; + + +}//end of namespace KDL + +#endif diff --git a/intern/itasc/kdl/treefksolverpos_recursive.cpp b/intern/itasc/kdl/treefksolverpos_recursive.cpp new file mode 100644 index 00000000000..f9dcb336d5d --- /dev/null +++ b/intern/itasc/kdl/treefksolverpos_recursive.cpp @@ -0,0 +1,70 @@ +// Copyright (C) 2007 Ruben Smits +// Copyright (C) 2008 Julia Jesse + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#include "treefksolverpos_recursive.hpp" +#include + +namespace KDL { + + TreeFkSolverPos_recursive::TreeFkSolverPos_recursive(const Tree& _tree): + tree(_tree) + { + } + + int TreeFkSolverPos_recursive::JntToCart(const JntArray& q_in, Frame& p_out, const std::string& segmentName, const std::string& baseName) + { + SegmentMap::const_iterator it = tree.getSegment(segmentName); + SegmentMap::const_iterator baseit = tree.getSegment(baseName); + + if(q_in.rows() != tree.getNrOfJoints()) + return -1; + else if(it == tree.getSegments().end()) //if the segment name is not found + return -2; + else if(baseit == tree.getSegments().end()) //if the base segment name is not found + return -3; + else{ + p_out = recursiveFk(q_in, it, baseit); + return 0; + } + } + + Frame TreeFkSolverPos_recursive::recursiveFk(const JntArray& q_in, const SegmentMap::const_iterator& it, const SegmentMap::const_iterator& baseit) + { + //gets the frame for the current element (segment) + const TreeElement& currentElement = it->second; + + if(it == baseit){ + return KDL::Frame::Identity(); + } + else{ + Frame currentFrame = currentElement.segment.pose(((JntArray&)q_in)(currentElement.q_nr)); + SegmentMap::const_iterator parentIt = currentElement.parent; + return recursiveFk(q_in, parentIt, baseit) * currentFrame; + } + } + + TreeFkSolverPos_recursive::~TreeFkSolverPos_recursive() + { + } + + +} diff --git a/intern/itasc/kdl/treefksolverpos_recursive.hpp b/intern/itasc/kdl/treefksolverpos_recursive.hpp new file mode 100644 index 00000000000..c22fe4af75b --- /dev/null +++ b/intern/itasc/kdl/treefksolverpos_recursive.hpp @@ -0,0 +1,53 @@ +// Copyright (C) 2007 Ruben Smits +// Copyright (C) 2008 Julia Jesse + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +#ifndef KDLTREEFKSOLVERPOS_RECURSIVE_HPP +#define KDLTREEFKSOLVERPOS_RECURSIVE_HPP + +#include "treefksolver.hpp" + +namespace KDL { + + /** + * Implementation of a recursive forward position kinematics + * algorithm to calculate the position transformation from joint + * space to Cartesian space of a general kinematic tree (KDL::Tree). + * + * @ingroup KinematicFamily + */ + class TreeFkSolverPos_recursive : public TreeFkSolverPos + { + public: + TreeFkSolverPos_recursive(const Tree& tree); + ~TreeFkSolverPos_recursive(); + + virtual int JntToCart(const JntArray& q_in, Frame& p_out, const std::string& segmentName, const std::string& baseName); + + private: + const Tree tree; + + Frame recursiveFk(const JntArray& q_in, const SegmentMap::const_iterator& it, const SegmentMap::const_iterator& baseit); + }; + +} + +#endif diff --git a/intern/itasc/kdl/treejnttojacsolver.cpp b/intern/itasc/kdl/treejnttojacsolver.cpp new file mode 100644 index 00000000000..194f18eb959 --- /dev/null +++ b/intern/itasc/kdl/treejnttojacsolver.cpp @@ -0,0 +1,78 @@ +/* + * TreeJntToJacSolver.cpp + * + * Created on: Nov 27, 2008 + * Author: rubensmits + */ + +#include "treejnttojacsolver.hpp" +#include + +namespace KDL { + +TreeJntToJacSolver::TreeJntToJacSolver(const Tree& tree_in) : + tree(tree_in) { +} + +TreeJntToJacSolver::~TreeJntToJacSolver() { +} + +int TreeJntToJacSolver::JntToJac(const JntArray& q_in, Jacobian& jac, + const std::string& segmentname) { + //First we check all the sizes: + if (q_in.rows() != tree.getNrOfJoints() || jac.columns() + != tree.getNrOfJoints()) + return -1; + + //Lets search the tree-element + SegmentMap::const_iterator it = tree.getSegments().find(segmentname); + + //If segmentname is not inside the tree, back out: + if (it == tree.getSegments().end()) + return -2; + + //Let's make the jacobian zero: + SetToZero(jac); + + SegmentMap::const_iterator root = tree.getSegments().find("root"); + + Frame T_total = Frame::Identity(); + Frame T_local, T_joint; + Twist t_local; + //Lets recursively iterate until we are in the root segment + while (it != root) { + //get the corresponding q_nr for this TreeElement: + unsigned int q_nr = it->second.q_nr; + + //get the pose of the joint. + T_joint = it->second.segment.getJoint().pose(((JntArray&)q_in)(q_nr)); + // combine with the tip to have the tip pose + T_local = T_joint*it->second.segment.getFrameToTip(); + //calculate new T_end: + T_total = T_local * T_total; + + //get the twist of the segment: + int ndof = it->second.segment.getJoint().getNDof(); + for (int dof=0; dofsecond.segment.twist(T_joint, 1.0, dof); + //transform the endpoint of the local twist to the global endpoint: + t_local = t_local.RefPoint(T_total.p - T_local.p); + //transform the base of the twist to the endpoint + t_local = T_total.M.Inverse(t_local); + //store the twist in the jacobian: + jac.twists[q_nr+dof] = t_local; + } + //goto the parent + it = it->second.parent; + }//endwhile + //Change the base of the complete jacobian from the endpoint to the base + changeBase(jac, T_total.M, jac); + + return 0; + +}//end JntToJac +}//end namespace + diff --git a/intern/itasc/kdl/treejnttojacsolver.hpp b/intern/itasc/kdl/treejnttojacsolver.hpp new file mode 100644 index 00000000000..40977dcd577 --- /dev/null +++ b/intern/itasc/kdl/treejnttojacsolver.hpp @@ -0,0 +1,38 @@ +/* + * TreeJntToJacSolver.hpp + * + * Created on: Nov 27, 2008 + * Author: rubensmits + */ + +#ifndef TREEJNTTOJACSOLVER_HPP_ +#define TREEJNTTOJACSOLVER_HPP_ + +#include "tree.hpp" +#include "jacobian.hpp" +#include "jntarray.hpp" + +namespace KDL { + +class TreeJntToJacSolver { +public: + TreeJntToJacSolver(const Tree& tree); + + virtual ~TreeJntToJacSolver(); + + /* + * Calculate the jacobian for a part of the tree: from a certain segment, given by segmentname to the root. + * The resulting jacobian is expressed in the baseframe of the tree ("root"), the reference point is in the end-segment + */ + + int JntToJac(const JntArray& q_in, Jacobian& jac, + const std::string& segmentname); + +private: + KDL::Tree tree; + +}; + +}//End of namespace + +#endif /* TREEJNTTOJACSOLVER_H_ */ diff --git a/intern/itasc/kdl/utilities/Makefile b/intern/itasc/kdl/utilities/Makefile new file mode 100644 index 00000000000..8ee08089e10 --- /dev/null +++ b/intern/itasc/kdl/utilities/Makefile @@ -0,0 +1,40 @@ +# +# $Id$ +# +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# The Original Code is Copyright (C) 2001-2002 by NaN Holding BV. +# All rights reserved. +# +# The Original Code is: all of this file. +# +# Contributor(s): Hans Lambermont +# +# ***** END GPL LICENSE BLOCK ***** +# iksolver main makefile. +# + +include nan_definitions.mk + +LIBNAME = itasc_kdl_util +# Same dir than parent (itasc instead of $(LIBNAME)) +DIR = $(OCGDIR)/intern/itasc + +include nan_compile.mk + +CPPFLAGS += -I. +CPPFLAGS += -I../../../../extern/Eigen2 diff --git a/intern/itasc/kdl/utilities/error.h b/intern/itasc/kdl/utilities/error.h new file mode 100644 index 00000000000..868daef3db3 --- /dev/null +++ b/intern/itasc/kdl/utilities/error.h @@ -0,0 +1,245 @@ +/*************************************************************************** + tag: Erwin Aertbelien Mon Jan 10 16:38:38 CET 2005 error.h + + error.h - description + ------------------- + begin : Mon January 10 2005 + copyright : (C) 2005 Erwin Aertbelien + email : erwin.aertbelien@mech.kuleuven.ac.be + + *************************************************************************** + * This library is free software; you can redistribute it and/or * + * modify it under the terms of the GNU Lesser General Public * + * License as published by the Free Software Foundation; either * + * version 2.1 of the License, or (at your option) any later version. * + * * + * This library is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * Lesser General Public License for more details. * + * * + * You should have received a copy of the GNU Lesser General Public * + * License along with this library; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, * + * Suite 330, Boston, MA 02111-1307 USA * + * * + ***************************************************************************/ + + +/***************************************************************************** + * \file + * Defines the exception classes that can be thrown + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + ****************************************************************************/ +#ifndef ERROR_H_84822 // to make it unique, a random number +#define ERROR_H_84822 + +#include "utility.h" +#include + +namespace KDL { + +/** + * Base class for errors generated by ORO_Geometry + */ +class Error { +public: + /** Returns a description string describing the error. + * the returned pointer only garanteed to exists as long as + * the Error object exists. + */ + virtual ~Error() {} + virtual const char* Description() const {return "Unspecified Error\n";} + + virtual int GetType() const {return 0;} +}; + + +class Error_IO : public Error { + std::string msg; + int typenr; +public: + Error_IO(const std::string& _msg="Unspecified I/O Error",int typenr=0):msg(_msg) {} + virtual const char* Description() const {return msg.c_str();} + virtual int GetType() const {return typenr;} +}; +class Error_BasicIO : public Error_IO {}; +class Error_BasicIO_File : public Error_BasicIO { +public: + virtual const char* Description() const {return "Error while reading stream";} + virtual int GetType() const {return 1;} +}; +class Error_BasicIO_Exp_Delim : public Error_BasicIO { +public: + virtual const char* Description() const {return "Expected Delimiter not encountered";} + virtual int GetType() const {return 2;} +}; +class Error_BasicIO_Not_A_Space : public Error_BasicIO { +public: + virtual const char* Description() const {return "Expected space,tab or newline not encountered";} + virtual int GetType() const {return 3;} +}; +class Error_BasicIO_Unexpected : public Error_BasicIO { +public: + virtual const char* Description() const {return "Unexpected character";} + virtual int GetType() const {return 4;} +}; + +class Error_BasicIO_ToBig : public Error_BasicIO { +public: + virtual const char* Description() const {return "Word that is read out of stream is bigger than maxsize";} + virtual int GetType() const {return 5;} +}; + +class Error_BasicIO_Not_Opened : public Error_BasicIO { +public: + virtual const char* Description() const {return "File cannot be opened";} + virtual int GetType() const {return 6;} +}; +class Error_FrameIO : public Error_IO {}; +class Error_Frame_Vector_Unexpected_id : public Error_FrameIO { +public: + virtual const char* Description() const {return "Unexpected identifier, expecting a vector (explicit or ZERO)";} + virtual int GetType() const {return 101;} +}; +class Error_Frame_Frame_Unexpected_id : public Error_FrameIO { +public: + virtual const char* Description() const {return "Unexpected identifier, expecting a Frame (explicit or DH)";} + virtual int GetType() const {return 102;} +}; +class Error_Frame_Rotation_Unexpected_id : public Error_FrameIO { +public: + virtual const char* Description() const {return "Unexpected identifier, expecting a Rotation (explicit or EULERZYX, EULERZYZ, RPY,ROT,IDENTITY)";} + virtual int GetType() const {return 103;} +}; +class Error_ChainIO : public Error {}; +class Error_Chain_Unexpected_id : public Error_ChainIO { +public: + virtual const char* Description() const {return "Unexpected identifier, expecting TRANS or ROT";} + virtual int GetType() const {return 201;} +}; +//! Error_Redundancy indicates an error that occured during solving for redundancy. +class Error_RedundancyIO:public Error_IO {}; +class Error_Redundancy_Illegal_Resolutiontype : public Error_RedundancyIO { +public: + virtual const char* Description() const {return "Illegal Resolutiontype is used in I/O with ResolutionTask";} + virtual int GetType() const {return 301;} +}; +class Error_Redundancy:public Error {}; +class Error_Redundancy_Unavoidable : public Error_Redundancy { +public: + virtual const char* Description() const {return "Joint limits cannot be avoided";} + virtual int GetType() const {return 1002;} +}; +class Error_Redundancy_Low_Manip: public Error_Redundancy { +public: + virtual const char* Description() const {return "Manipulability is very low";} + virtual int GetType() const {return 1003;} +}; +class Error_MotionIO : public Error {}; +class Error_MotionIO_Unexpected_MotProf : public Error_MotionIO { +public: + virtual const char* Description() const { return "Wrong keyword while reading motion profile";} + virtual int GetType() const {return 2001;} +}; +class Error_MotionIO_Unexpected_Traj : public Error_MotionIO { +public: + virtual const char* Description() const { return "Trajectory type keyword not known";} + virtual int GetType() const {return 2002;} +}; + +class Error_MotionPlanning : public Error {}; + +class Error_MotionPlanning_Circle_ToSmall : public Error_MotionPlanning { +public: + virtual const char* Description() const { return "Circle : radius is to small";} + virtual int GetType() const {return 3001;} +}; + +class Error_MotionPlanning_Circle_No_Plane : public Error_MotionPlanning { +public: + virtual const char* Description() const { return "Circle : Plane for motion is not properly defined";} + virtual int GetType() const {return 3002;} +}; + +class Error_MotionPlanning_Incompatible: public Error_MotionPlanning { +public: + virtual const char* Description() const { return "Acceleration of a rectangular velocityprofile cannot be used";} + virtual int GetType() const {return 3003;} +}; + +class Error_MotionPlanning_Not_Feasible: public Error_MotionPlanning { +public: + virtual const char* Description() const { return "Motion Profile with requested parameters is not feasible";} + virtual int GetType() const {return 3004;} +}; + +class Error_MotionPlanning_Not_Applicable: public Error_MotionPlanning { +public: + virtual const char* Description() const { return "Method is not applicable for this derived object";} + virtual int GetType() const {return 3004;} +}; +//! Abstract subclass of all errors that can be thrown by Adaptive_Integrator +class Error_Integrator : public Error {}; + +//! Error_Stepsize_Underflow is thrown if the stepsize becomes to small +class Error_Stepsize_Underflow : public Error_Integrator { +public: + virtual const char* Description() const { return "Stepsize Underflow";} + virtual int GetType() const {return 4001;} +}; + +//! Error_To_Many_Steps is thrown if the number of steps needed to +//! integrate to the desired accuracy becomes to big. +class Error_To_Many_Steps : public Error_Integrator { +public: + virtual const char* Description() const { return "To many steps"; } + virtual int GetType() const {return 4002;} +}; + +//! Error_Stepsize_To_Small is thrown if the stepsize becomes to small +class Error_Stepsize_To_Small : public Error_Integrator { +public: + virtual const char* Description() const { return "Stepsize to small"; } + virtual int GetType() const {return 4003;} +}; + +class Error_Criterium : public Error {}; + +class Error_Criterium_Unexpected_id: public Error_Criterium { +public: + virtual const char* Description() const { return "Unexpected identifier while reading a criterium"; } + virtual int GetType() const {return 5001;} +}; + +class Error_Limits : public Error {}; + +class Error_Limits_Unexpected_id: public Error_Limits { +public: + virtual const char* Description() const { return "Unexpected identifier while reading a jointlimits"; } + virtual int GetType() const {return 6001;} +}; + + +class Error_Not_Implemented: public Error { +public: + virtual const char* Description() const { return "The requested object/method/function is not implemented"; } + virtual int GetType() const {return 7000;} +}; + + + +} + +#endif diff --git a/intern/itasc/kdl/utilities/error_stack.cpp b/intern/itasc/kdl/utilities/error_stack.cpp new file mode 100644 index 00000000000..d55308c7346 --- /dev/null +++ b/intern/itasc/kdl/utilities/error_stack.cpp @@ -0,0 +1,59 @@ +/***************************************************************************** + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + ****************************************************************************/ + + +#include "error_stack.h" +#include +#include +#include +#include + +namespace KDL { + +// Trace of the call stack of the I/O routines to help user +// interprete error messages from I/O +typedef std::stack ErrorStack; + +ErrorStack errorstack; +// should be in Thread Local Storage if this gets multithreaded one day... + + +void IOTrace(const std::string& description) { + errorstack.push(description); +} + + +void IOTracePop() { + errorstack.pop(); +} + +void IOTraceOutput(std::ostream& os) { + while (!errorstack.empty()) { + os << errorstack.top().c_str() << std::endl; + errorstack.pop(); + } +} + + +void IOTracePopStr(char* buffer,int size) { + if (errorstack.empty()) { + *buffer = 0; + return; + } + strncpy(buffer,errorstack.top().c_str(),size); + errorstack.pop(); +} + +} diff --git a/intern/itasc/kdl/utilities/error_stack.h b/intern/itasc/kdl/utilities/error_stack.h new file mode 100644 index 00000000000..918bc0786a6 --- /dev/null +++ b/intern/itasc/kdl/utilities/error_stack.h @@ -0,0 +1,70 @@ +/*************************************************************************** + tag: Erwin Aertbelien Mon Jan 10 16:38:39 CET 2005 error_stack.h + + error_stack.h - description + ------------------- + begin : Mon January 10 2005 + copyright : (C) 2005 Erwin Aertbelien + email : erwin.aertbelien@mech.kuleuven.ac.be + + *************************************************************************** + * This library is free software; you can redistribute it and/or * + * modify it under the terms of the GNU Lesser General Public * + * License as published by the Free Software Foundation; either * + * version 2.1 of the License, or (at your option) any later version. * + * * + * This library is distributed in the hope that it will be useful, * + * but WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * Lesser General Public License for more details. * + * * + * You should have received a copy of the GNU Lesser General Public * + * License along with this library; if not, write to the Free Software * + * Foundation, Inc., 59 Temple Place, * + * Suite 330, Boston, MA 02111-1307 USA * + * * + ***************************************************************************/ + + +/** + * \file + * \author Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * \version + * ORO_Geometry V0.2 + * + * \par history + * - changed layout of the comments to accomodate doxygen + */ +#ifndef ERROR_STACK_H +#define ERROR_STACK_H + +#include "utility.h" +#include "utility_io.h" +#include + + +namespace KDL { + +/* + * \todo + * IOTrace-routines store in static memory, should be in thread-local memory. + * pushes a description of the current routine on the IO-stack trace + */ +void IOTrace(const std::string& description); + +//! pops a description of the IO-stack +void IOTracePop(); + + +//! outputs the IO-stack to a stream to provide a better errormessage. +void IOTraceOutput(std::ostream& os); + +//! outputs one element of the IO-stack to the buffer (maximally size chars) +//! returns empty string if no elements on the stack. +void IOTracePopStr(char* buffer,int size); + + +} + +#endif + diff --git a/intern/itasc/kdl/utilities/kdl-config.h b/intern/itasc/kdl/utilities/kdl-config.h new file mode 100644 index 00000000000..4d2df2df6c5 --- /dev/null +++ b/intern/itasc/kdl/utilities/kdl-config.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2007 Ruben Smits */ + +/* Version: 1.0 */ +/* Author: Ruben Smits */ +/* Maintainer: Ruben Smits */ +/* URL: http://www.orocos.org/kdl */ + +/* This library is free software; you can redistribute it and/or */ +/* modify it under the terms of the GNU Lesser General Public */ +/* License as published by the Free Software Foundation; either */ +/* version 2.1 of the License, or (at your option) any later version. */ + +/* This library is distributed in the hope that it will be useful, */ +/* but WITHOUT ANY WARRANTY; without even the implied warranty of */ +/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU */ +/* Lesser General Public License for more details. */ + +/* You should have received a copy of the GNU Lesser General Public */ +/* License along with this library; if not, write to the Free Software */ +/* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + + +/* Methods are inlined */ +#define KDL_INLINE 1 + +/* Column width that is used form printing frames */ +#define KDL_FRAME_WIDTH 12 + +/* Indices are checked when accessing members of the objects */ +#define KDL_INDEX_CHECK 1 + +/* use KDL implementation for == operator */ +#define KDL_USE_EQUAL 1 diff --git a/intern/itasc/kdl/utilities/rall1d.h b/intern/itasc/kdl/utilities/rall1d.h new file mode 100644 index 00000000000..98bd4385d1e --- /dev/null +++ b/intern/itasc/kdl/utilities/rall1d.h @@ -0,0 +1,478 @@ + +/***************************************************************************** + * \file + * class for automatic differentiation on scalar values and 1st + * derivatives . + * + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par Note + * VC6++ contains a bug, concerning the use of inlined friend functions + * in combination with namespaces. So, try to avoid inlined friend + * functions ! + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + ****************************************************************************/ + +#ifndef Rall1D_H +#define Rall1D_H +#include +#include "utility.h" + +namespace KDL { +/** + * Rall1d contains a value, and its gradient, and defines an algebraic structure on this pair. + * This template class has 3 template parameters : + * - T contains the type of the value. + * - V contains the type of the gradient (can be a vector-like type). + * - S defines a scalar type that can operate on Rall1d. This is the type that + * is used to give back values of Norm() etc. + * + * S is usefull when you recurse a Rall1d object into itself to create a 2nd, 3th, 4th,.. + * derivatives. (e.g. Rall1d< Rall1d, Rall1d, double> ). + * + * S is always passed by value. + * + * \par Class Type + * Concrete implementation + */ +template +class Rall1d + { + public: + typedef T valuetype; + typedef V gradienttype; + typedef S scalartype; + public : + T t; //!< value + V grad; //!< gradient + public : + INLINE Rall1d() {} + + T value() const { + return t; + } + V deriv() const { + return grad; + } + + explicit INLINE Rall1d(typename TI::Arg c) + {t=T(c);SetToZero(grad);} + + INLINE Rall1d(typename TI::Arg tn, typename TI::Arg afg):t(tn),grad(afg) {} + + INLINE Rall1d(const Rall1d& r):t(r.t),grad(r.grad) {} + //if one defines this constructor, it's better optimized then the + //automatically generated one ( this one set's up a loop to copy + // word by word. + + INLINE T& Value() { + return t; + } + + INLINE V& Gradient() { + return grad; + } + + INLINE static Rall1d Zero() { + Rall1d tmp; + SetToZero(tmp); + return tmp; + } + INLINE static Rall1d Identity() { + Rall1d tmp; + SetToIdentity(tmp); + return tmp; + } + + INLINE Rall1d& operator =(S c) + {t=c;SetToZero(grad);return *this;} + + INLINE Rall1d& operator =(const Rall1d& r) + {t=r.t;grad=r.grad;return *this;} + + INLINE Rall1d& operator /=(const Rall1d& rhs) + { + grad = LinComb(rhs.t,grad,-t,rhs.grad) / (rhs.t*rhs.t); + t /= rhs.t; + return *this; + } + + INLINE Rall1d& operator *=(const Rall1d& rhs) + { + LinCombR(rhs.t,grad,t,rhs.grad,grad); + t *= rhs.t; + return *this; + } + + INLINE Rall1d& operator +=(const Rall1d& rhs) + { + grad +=rhs.grad; + t +=rhs.t; + return *this; + } + + INLINE Rall1d& operator -=(const Rall1d& rhs) + { + grad -= rhs.grad; + t -= rhs.t; + return *this; + } + + INLINE Rall1d& operator /=(S rhs) + { + grad /= rhs; + t /= rhs; + return *this; + } + + INLINE Rall1d& operator *=(S rhs) + { + grad *= rhs; + t *= rhs; + return *this; + } + + INLINE Rall1d& operator +=(S rhs) + { + t += rhs; + return *this; + } + + INLINE Rall1d& operator -=(S rhs) + { + t -= rhs; + return *this; + } + + + + // = operators + /* gives warnings on cygwin + + template + friend INLINE Rall1d operator /(const Rall1d& lhs,const Rall1d& rhs); + + friend INLINE Rall1d operator *(const Rall1d& lhs,const Rall1d& rhs); + friend INLINE Rall1d operator +(const Rall1d& lhs,const Rall1d& rhs); + friend INLINE Rall1d operator -(const Rall1d& lhs,const Rall1d& rhs); + friend INLINE Rall1d operator -(const Rall1d& arg); + friend INLINE Rall1d operator *(S s,const Rall1d& v); + friend INLINE Rall1d operator *(const Rall1d& v,S s); + friend INLINE Rall1d operator +(S s,const Rall1d& v); + friend INLINE Rall1d operator +(const Rall1d& v,S s); + friend INLINE Rall1d operator -(S s,const Rall1d& v); + friend INLINE Rall1d operator -(const Rall1d& v,S s); + friend INLINE Rall1d operator /(S s,const Rall1d& v); + friend INLINE Rall1d operator /(const Rall1d& v,S s); + + // = Mathematical functions that operate on Rall1d objects + friend INLINE Rall1d exp(const Rall1d& arg); + friend INLINE Rall1d log(const Rall1d& arg); + friend INLINE Rall1d sin(const Rall1d& arg); + friend INLINE Rall1d cos(const Rall1d& arg); + friend INLINE Rall1d tan(const Rall1d& arg); + friend INLINE Rall1d sinh(const Rall1d& arg); + friend INLINE Rall1d cosh(const Rall1d& arg); + friend INLINE Rall1d sqr(const Rall1d& arg); + friend INLINE Rall1d pow(const Rall1d& arg,double m) ; + friend INLINE Rall1d sqrt(const Rall1d& arg); + friend INLINE Rall1d atan(const Rall1d& x); + friend INLINE Rall1d hypot(const Rall1d& y,const Rall1d& x); + friend INLINE Rall1d asin(const Rall1d& x); + friend INLINE Rall1d acos(const Rall1d& x); + friend INLINE Rall1d abs(const Rall1d& x); + friend INLINE S Norm(const Rall1d& value) ; + friend INLINE Rall1d tanh(const Rall1d& arg); + friend INLINE Rall1d atan2(const Rall1d& y,const Rall1d& x); + + // = Utility functions to improve performance + + friend INLINE Rall1d LinComb(S alfa,const Rall1d& a, + const T& beta,const Rall1d& b ); + + friend INLINE void LinCombR(S alfa,const Rall1d& a, + const T& beta,const Rall1d& b,Rall1d& result ); + + // = Setting value of a Rall1d object to 0 or 1 + + friend INLINE void SetToZero(Rall1d& value); + friend INLINE void SetToOne(Rall1d& value); + // = Equality in an eps-interval + friend INLINE bool Equal(const Rall1d& y,const Rall1d& x,double eps); + */ + }; + + +template +INLINE Rall1d operator /(const Rall1d& lhs,const Rall1d& rhs) + { + return Rall1d(lhs.t/rhs.t,(lhs.grad*rhs.t-lhs.t*rhs.grad)/(rhs.t*rhs.t)); + } + +template +INLINE Rall1d operator *(const Rall1d& lhs,const Rall1d& rhs) + { + return Rall1d(lhs.t*rhs.t,rhs.t*lhs.grad+lhs.t*rhs.grad); + } + +template +INLINE Rall1d operator +(const Rall1d& lhs,const Rall1d& rhs) + { + return Rall1d(lhs.t+rhs.t,lhs.grad+rhs.grad); + } + + +template +INLINE Rall1d operator -(const Rall1d& lhs,const Rall1d& rhs) + { + return Rall1d(lhs.t-rhs.t,lhs.grad-rhs.grad); + } + +template +INLINE Rall1d operator -(const Rall1d& arg) + { + return Rall1d(-arg.t,-arg.grad); + } + +template +INLINE Rall1d operator *(S s,const Rall1d& v) + { + return Rall1d(s*v.t,s*v.grad); + } + +template +INLINE Rall1d operator *(const Rall1d& v,S s) + { + return Rall1d(v.t*s,v.grad*s); + } + +template +INLINE Rall1d operator +(S s,const Rall1d& v) + { + return Rall1d(s+v.t,v.grad); + } + +template +INLINE Rall1d operator +(const Rall1d& v,S s) + { + return Rall1d(v.t+s,v.grad); + } + +template +INLINE Rall1d operator -(S s,const Rall1d& v) + { + return Rall1d(s-v.t,-v.grad); + } + +template +INLINE Rall1d operator -(const Rall1d& v,S s) + { + return Rall1d(v.t-s,v.grad); + } + +template +INLINE Rall1d operator /(S s,const Rall1d& v) + { + return Rall1d(s/v.t,(-s*v.grad)/(v.t*v.t)); + } + +template +INLINE Rall1d operator /(const Rall1d& v,S s) + { + return Rall1d(v.t/s,v.grad/s); + } + + +template +INLINE Rall1d exp(const Rall1d& arg) + { + T v; + v= (exp(arg.t)); + return Rall1d(v,v*arg.grad); + } + +template +INLINE Rall1d log(const Rall1d& arg) + { + T v; + v=(log(arg.t)); + return Rall1d(v,arg.grad/arg.t); + } + +template +INLINE Rall1d sin(const Rall1d& arg) + { + T v; + v=(sin(arg.t)); + return Rall1d(v,cos(arg.t)*arg.grad); + } + +template +INLINE Rall1d cos(const Rall1d& arg) + { + T v; + v=(cos(arg.t)); + return Rall1d(v,-sin(arg.t)*arg.grad); + } + +template +INLINE Rall1d tan(const Rall1d& arg) + { + T v; + v=(tan(arg.t)); + return Rall1d(v,arg.grad/sqr(cos(arg.t))); + } + +template +INLINE Rall1d sinh(const Rall1d& arg) + { + T v; + v=(sinh(arg.t)); + return Rall1d(v,cosh(arg.t)*arg.grad); + } + +template +INLINE Rall1d cosh(const Rall1d& arg) + { + T v; + v=(cosh(arg.t)); + return Rall1d(v,sinh(arg.t)*arg.grad); + } + +template +INLINE Rall1d sqr(const Rall1d& arg) + { + T v; + v=(arg.t*arg.t); + return Rall1d(v,(2.0*arg.t)*arg.grad); + } + +template +INLINE Rall1d pow(const Rall1d& arg,double m) + { + T v; + v=(pow(arg.t,m)); + return Rall1d(v,(m*v/arg.t)*arg.grad); + } + +template +INLINE Rall1d sqrt(const Rall1d& arg) + { + T v; + v=sqrt(arg.t); + return Rall1d(v, (0.5/v)*arg.grad); + } + +template +INLINE Rall1d atan(const Rall1d& x) +{ + T v; + v=(atan(x.t)); + return Rall1d(v,x.grad/(1.0+sqr(x.t))); +} + +template +INLINE Rall1d hypot(const Rall1d& y,const Rall1d& x) +{ + T v; + v=(hypot(y.t,x.t)); + return Rall1d(v,(x.t/v)*x.grad+(y.t/v)*y.grad); +} + +template +INLINE Rall1d asin(const Rall1d& x) +{ + T v; + v=(asin(x.t)); + return Rall1d(v,x.grad/sqrt(1.0-sqr(x.t))); +} + +template +INLINE Rall1d acos(const Rall1d& x) +{ + T v; + v=(acos(x.t)); + return Rall1d(v,-x.grad/sqrt(1.0-sqr(x.t))); +} + +template +INLINE Rall1d abs(const Rall1d& x) +{ + T v; + v=(Sign(x)); + return Rall1d(v*x,v*x.grad); +} + + +template +INLINE S Norm(const Rall1d& value) +{ + return Norm(value.t); +} + +template +INLINE Rall1d tanh(const Rall1d& arg) +{ + T v(tanh(arg.t)); + return Rall1d(v,arg.grad/sqr(cosh(arg.t))); +} + +template +INLINE Rall1d atan2(const Rall1d& y,const Rall1d& x) +{ + T v(x.t*x.t+y.t*y.t); + return Rall1d(atan2(y.t,x.t),(x.t*y.grad-y.t*x.grad)/v); +} + + +template +INLINE Rall1d LinComb(S alfa,const Rall1d& a, + const T& beta,const Rall1d& b ) { + return Rall1d( + LinComb(alfa,a.t,beta,b.t), + LinComb(alfa,a.grad,beta,b.grad) + ); +} + +template +INLINE void LinCombR(S alfa,const Rall1d& a, + const T& beta,const Rall1d& b,Rall1d& result ) { + LinCombR(alfa, a.t, beta, b.t, result.t); + LinCombR(alfa, a.grad, beta, b.grad, result.grad); +} + + +template +INLINE void SetToZero(Rall1d& value) + { + SetToZero(value.grad); + SetToZero(value.t); + } +template +INLINE void SetToIdentity(Rall1d& value) + { + SetToIdentity(value.t); + SetToZero(value.grad); + } + +template +INLINE bool Equal(const Rall1d& y,const Rall1d& x,double eps=epsilon) +{ + return (Equal(x.t,y.t,eps)&&Equal(x.grad,y.grad,eps)); +} + +} + + + +#endif diff --git a/intern/itasc/kdl/utilities/rall2d.h b/intern/itasc/kdl/utilities/rall2d.h new file mode 100644 index 00000000000..cbd9e70b04f --- /dev/null +++ b/intern/itasc/kdl/utilities/rall2d.h @@ -0,0 +1,538 @@ + +/***************************************************************************** + * \file + * class for automatic differentiation on scalar values and 1st + * derivatives and 2nd derivative. + * + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par Note + * VC6++ contains a bug, concerning the use of inlined friend functions + * in combination with namespaces. So, try to avoid inlined friend + * functions ! + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + ****************************************************************************/ + +#ifndef Rall2D_H +#define Rall2D_H + +#include +#include +#include "utility.h" + + +namespace KDL { + +/** + * Rall2d contains a value, and its gradient and its 2nd derivative, and defines an algebraic + * structure on this pair. + * This template class has 3 template parameters : + * - T contains the type of the value. + * - V contains the type of the gradient (can be a vector-like type). + * - S defines a scalar type that can operate on Rall1d. This is the type that + * is used to give back values of Norm() etc. + * + * S is usefull when you recurse a Rall1d object into itself to create a 2nd, 3th, 4th,.. + * derivatives. (e.g. Rall1d< Rall1d, Rall1d, double> ). + * + * S is always passed by value. + * + * \par Class Type + * Concrete implementation + */ +template +class Rall2d + { + public : + T t; //!< value + V d; //!< 1st derivative + V dd; //!< 2nd derivative + public : + // = Constructors + INLINE Rall2d() {} + + explicit INLINE Rall2d(typename TI::Arg c) + {t=c;SetToZero(d);SetToZero(dd);} + + INLINE Rall2d(typename TI::Arg tn,const V& afg):t(tn),d(afg) {SetToZero(dd);} + + INLINE Rall2d(typename TI::Arg tn,const V& afg,const V& afg2):t(tn),d(afg),dd(afg2) {} + + // = Copy Constructor + INLINE Rall2d(const Rall2d& r):t(r.t),d(r.d),dd(r.dd) {} + //if one defines this constructor, it's better optimized then the + //automatically generated one ( that one set's up a loop to copy + // word by word. + + // = Member functions to access internal structures : + INLINE T& Value() { + return t; + } + + INLINE V& D() { + return d; + } + + INLINE V& DD() { + return dd; + } + INLINE static Rall2d Zero() { + Rall2d tmp; + SetToZero(tmp); + return tmp; + } + INLINE static Rall2d Identity() { + Rall2d tmp; + SetToIdentity(tmp); + return tmp; + } + + // = assignment operators + INLINE Rall2d& operator =(S c) + {t=c;SetToZero(d);SetToZero(dd);return *this;} + + INLINE Rall2d& operator =(const Rall2d& r) + {t=r.t;d=r.d;dd=r.dd;return *this;} + + INLINE Rall2d& operator /=(const Rall2d& rhs) + { + t /= rhs.t; + d = (d-t*rhs.d)/rhs.t; + dd= (dd - S(2)*d*rhs.d-t*rhs.dd)/rhs.t; + return *this; + } + + INLINE Rall2d& operator *=(const Rall2d& rhs) + { + t *= rhs.t; + d = (d*rhs.t+t*rhs.d); + dd = (dd*rhs.t+S(2)*d*rhs.d+t*rhs.dd); + return *this; + } + + INLINE Rall2d& operator +=(const Rall2d& rhs) + { + t +=rhs.t; + d +=rhs.d; + dd+=rhs.dd; + return *this; + } + + INLINE Rall2d& operator -=(const Rall2d& rhs) + { + t -= rhs.t; + d -= rhs.d; + dd -= rhs.dd; + return *this; + } + + INLINE Rall2d& operator /=(S rhs) + { + t /= rhs; + d /= rhs; + dd /= rhs; + return *this; + } + + INLINE Rall2d& operator *=(S rhs) + { + t *= rhs; + d *= rhs; + dd *= rhs; + return *this; + } + + INLINE Rall2d& operator -=(S rhs) + { + t -= rhs; + return *this; + } + + INLINE Rall2d& operator +=(S rhs) + { + t += rhs; + return *this; + } + + // = Operators between Rall2d objects +/* + friend INLINE Rall2d operator /(const Rall2d& lhs,const Rall2d& rhs); + friend INLINE Rall2d operator *(const Rall2d& lhs,const Rall2d& rhs); + friend INLINE Rall2d operator +(const Rall2d& lhs,const Rall2d& rhs); + friend INLINE Rall2d operator -(const Rall2d& lhs,const Rall2d& rhs); + friend INLINE Rall2d operator -(const Rall2d& arg); + friend INLINE Rall2d operator *(S s,const Rall2d& v); + friend INLINE Rall2d operator *(const Rall2d& v,S s); + friend INLINE Rall2d operator +(S s,const Rall2d& v); + friend INLINE Rall2d operator +(const Rall2d& v,S s); + friend INLINE Rall2d operator -(S s,const Rall2d& v); + friend INLINE INLINE Rall2d operator -(const Rall2d& v,S s); + friend INLINE Rall2d operator /(S s,const Rall2d& v); + friend INLINE Rall2d operator /(const Rall2d& v,S s); + + // = Mathematical functions that operate on Rall2d objects + + friend INLINE Rall2d exp(const Rall2d& arg); + friend INLINE Rall2d log(const Rall2d& arg); + friend INLINE Rall2d sin(const Rall2d& arg); + friend INLINE Rall2d cos(const Rall2d& arg); + friend INLINE Rall2d tan(const Rall2d& arg); + friend INLINE Rall2d sinh(const Rall2d& arg); + friend INLINE Rall2d cosh(const Rall2d& arg); + friend INLINE Rall2d tanh(const Rall2d& arg); + friend INLINE Rall2d sqr(const Rall2d& arg); + friend INLINE Rall2d pow(const Rall2d& arg,double m) ; + friend INLINE Rall2d sqrt(const Rall2d& arg); + friend INLINE Rall2d asin(const Rall2d& arg); + friend INLINE Rall2d acos(const Rall2d& arg); + friend INLINE Rall2d atan(const Rall2d& x); + friend INLINE Rall2d atan2(const Rall2d& y,const Rall2d& x); + friend INLINE Rall2d abs(const Rall2d& x); + friend INLINE Rall2d hypot(const Rall2d& y,const Rall2d& x); + // returns sqrt(y*y+x*x), but is optimized for accuracy and speed. + friend INLINE S Norm(const Rall2d& value) ; + // returns Norm( value.Value() ). + + // = Some utility functions to improve performance + // (should also be declared on primitive types to improve uniformity + friend INLINE Rall2d LinComb(S alfa,const Rall2d& a, + TI::Arg beta,const Rall2d& b ); + friend INLINE void LinCombR(S alfa,const Rall2d& a, + TI::Arg beta,const Rall2d& b,Rall2d& result ); + // = Setting value of a Rall2d object to 0 or 1 + friend INLINE void SetToZero(Rall2d& value); + friend INLINE void SetToOne(Rall2d& value); + // = Equality in an eps-interval + friend INLINE bool Equal(const Rall2d& y,const Rall2d& x,double eps); + */ + }; + + + + + +// = Operators between Rall2d objects +template +INLINE Rall2d operator /(const Rall2d& lhs,const Rall2d& rhs) + { + Rall2d tmp; + tmp.t = lhs.t/rhs.t; + tmp.d = (lhs.d-tmp.t*rhs.d)/rhs.t; + tmp.dd= (lhs.dd-S(2)*tmp.d*rhs.d-tmp.t*rhs.dd)/rhs.t; + return tmp; + } + +template +INLINE Rall2d operator *(const Rall2d& lhs,const Rall2d& rhs) + { + Rall2d tmp; + tmp.t = lhs.t*rhs.t; + tmp.d = (lhs.d*rhs.t+lhs.t*rhs.d); + tmp.dd = (lhs.dd*rhs.t+S(2)*lhs.d*rhs.d+lhs.t*rhs.dd); + return tmp; + } + +template +INLINE Rall2d operator +(const Rall2d& lhs,const Rall2d& rhs) + { + return Rall2d(lhs.t+rhs.t,lhs.d+rhs.d,lhs.dd+rhs.dd); + } + +template +INLINE Rall2d operator -(const Rall2d& lhs,const Rall2d& rhs) + { + return Rall2d(lhs.t-rhs.t,lhs.d-rhs.d,lhs.dd-rhs.dd); + } + +template +INLINE Rall2d operator -(const Rall2d& arg) + { + return Rall2d(-arg.t,-arg.d,-arg.dd); + } + +template +INLINE Rall2d operator *(S s,const Rall2d& v) + { + return Rall2d(s*v.t,s*v.d,s*v.dd); + } + +template +INLINE Rall2d operator *(const Rall2d& v,S s) + { + return Rall2d(v.t*s,v.d*s,v.dd*s); + } + +template +INLINE Rall2d operator +(S s,const Rall2d& v) + { + return Rall2d(s+v.t,v.d,v.dd); + } + +template +INLINE Rall2d operator +(const Rall2d& v,S s) + { + return Rall2d(v.t+s,v.d,v.dd); + } + +template +INLINE Rall2d operator -(S s,const Rall2d& v) + { + return Rall2d(s-v.t,-v.d,-v.dd); + } + +template +INLINE Rall2d operator -(const Rall2d& v,S s) + { + return Rall2d(v.t-s,v.d,v.dd); + } + +template +INLINE Rall2d operator /(S s,const Rall2d& rhs) + { + Rall2d tmp; + tmp.t = s/rhs.t; + tmp.d = (-tmp.t*rhs.d)/rhs.t; + tmp.dd= (-S(2)*tmp.d*rhs.d-tmp.t*rhs.dd)/rhs.t; + return tmp; +} + + +template +INLINE Rall2d operator /(const Rall2d& v,S s) + { + return Rall2d(v.t/s,v.d/s,v.dd/s); + } + + +template +INLINE Rall2d exp(const Rall2d& arg) + { + Rall2d tmp; + tmp.t = exp(arg.t); + tmp.d = tmp.t*arg.d; + tmp.dd = tmp.d*arg.d+tmp.t*arg.dd; + return tmp; + } + +template +INLINE Rall2d log(const Rall2d& arg) + { + Rall2d tmp; + tmp.t = log(arg.t); + tmp.d = arg.d/arg.t; + tmp.dd = (arg.dd-tmp.d*arg.d)/arg.t; + return tmp; + } + +template +INLINE Rall2d sin(const Rall2d& arg) + { + T v1 = sin(arg.t); + T v2 = cos(arg.t); + return Rall2d(v1,v2*arg.d,v2*arg.dd - (v1*arg.d)*arg.d ); + } + +template +INLINE Rall2d cos(const Rall2d& arg) + { + T v1 = cos(arg.t); + T v2 = -sin(arg.t); + return Rall2d(v1,v2*arg.d, v2*arg.dd - (v1*arg.d)*arg.d); + } + +template +INLINE Rall2d tan(const Rall2d& arg) + { + T v1 = tan(arg.t); + T v2 = S(1)+sqr(v1); + return Rall2d(v1,v2*arg.d, v2*(arg.dd+(S(2)*v1*sqr(arg.d)))); + } + +template +INLINE Rall2d sinh(const Rall2d& arg) + { + T v1 = sinh(arg.t); + T v2 = cosh(arg.t); + return Rall2d(v1,v2*arg.d,v2*arg.dd + (v1*arg.d)*arg.d ); + } + +template +INLINE Rall2d cosh(const Rall2d& arg) + { + T v1 = cosh(arg.t); + T v2 = sinh(arg.t); + return Rall2d(v1,v2*arg.d,v2*arg.dd + (v1*arg.d)*arg.d ); + } + +template +INLINE Rall2d tanh(const Rall2d& arg) + { + T v1 = tanh(arg.t); + T v2 = S(1)-sqr(v1); + return Rall2d(v1,v2*arg.d, v2*(arg.dd-(S(2)*v1*sqr(arg.d)))); + } + +template +INLINE Rall2d sqr(const Rall2d& arg) + { + return Rall2d(arg.t*arg.t, + (S(2)*arg.t)*arg.d, + S(2)*(sqr(arg.d)+arg.t*arg.dd) + ); + } + +template +INLINE Rall2d pow(const Rall2d& arg,double m) + { + Rall2d tmp; + tmp.t = pow(arg.t,m); + T v2 = (m/arg.t)*tmp.t; + tmp.d = v2*arg.d; + tmp.dd = (S((m-1))/arg.t)*tmp.d*arg.d + v2*arg.dd; + return tmp; + } + +template +INLINE Rall2d sqrt(const Rall2d& arg) + { + /* By inversion of sqr(x) :*/ + Rall2d tmp; + tmp.t = sqrt(arg.t); + tmp.d = (S(0.5)/tmp.t)*arg.d; + tmp.dd = (S(0.5)*arg.dd-sqr(tmp.d))/tmp.t; + return tmp; + } + +template +INLINE Rall2d asin(const Rall2d& arg) +{ + /* By inversion of sin(x) */ + Rall2d tmp; + tmp.t = asin(arg.t); + T v = cos(tmp.t); + tmp.d = arg.d/v; + tmp.dd = (arg.dd+arg.t*sqr(tmp.d))/v; + return tmp; +} + +template +INLINE Rall2d acos(const Rall2d& arg) +{ + /* By inversion of cos(x) */ + Rall2d tmp; + tmp.t = acos(arg.t); + T v = -sin(tmp.t); + tmp.d = arg.d/v; + tmp.dd = (arg.dd+arg.t*sqr(tmp.d))/v; + return tmp; + +} + +template +INLINE Rall2d atan(const Rall2d& x) +{ + /* By inversion of tan(x) */ + Rall2d tmp; + tmp.t = atan(x.t); + T v = S(1)+sqr(x.t); + tmp.d = x.d/v; + tmp.dd = x.dd/v-(S(2)*x.t)*sqr(tmp.d); + return tmp; +} + +template +INLINE Rall2d atan2(const Rall2d& y,const Rall2d& x) +{ + Rall2d tmp; + tmp.t = atan2(y.t,x.t); + T v = sqr(y.t)+sqr(x.t); + tmp.d = (x.t*y.d-x.d*y.t)/v; + tmp.dd = ( x.t*y.dd-x.dd*y.t-S(2)*(x.t*x.d+y.t*y.d)*tmp.d ) / v; + return tmp; +} + +template +INLINE Rall2d abs(const Rall2d& x) +{ + T v(Sign(x)); + return Rall2d(v*x,v*x.d,v*x.dd); +} + +template +INLINE Rall2d hypot(const Rall2d& y,const Rall2d& x) +{ + Rall2d tmp; + tmp.t = hypot(y.t,x.t); + tmp.d = (x.t*x.d+y.t*y.d)/tmp.t; + tmp.dd = (sqr(x.d)+x.t*x.dd+sqr(y.d)+y.t*y.dd-sqr(tmp.d))/tmp.t; + return tmp; +} +// returns sqrt(y*y+x*x), but is optimized for accuracy and speed. + +template +INLINE S Norm(const Rall2d& value) +{ + return Norm(value.t); +} +// returns Norm( value.Value() ). + + +// (should also be declared on primitive types to improve uniformity +template +INLINE Rall2d LinComb(S alfa,const Rall2d& a, + const T& beta,const Rall2d& b ) { + return Rall2d( + LinComb(alfa,a.t,beta,b.t), + LinComb(alfa,a.d,beta,b.d), + LinComb(alfa,a.dd,beta,b.dd) + ); +} + +template +INLINE void LinCombR(S alfa,const Rall2d& a, + const T& beta,const Rall2d& b,Rall2d& result ) { + LinCombR(alfa, a.t, beta, b.t, result.t); + LinCombR(alfa, a.d, beta, b.d, result.d); + LinCombR(alfa, a.dd, beta, b.dd, result.dd); +} + +template +INLINE void SetToZero(Rall2d& value) + { + SetToZero(value.t); + SetToZero(value.d); + SetToZero(value.dd); + } + +template +INLINE void SetToIdentity(Rall2d& value) + { + SetToZero(value.d); + SetToIdentity(value.t); + SetToZero(value.dd); + } + +template +INLINE bool Equal(const Rall2d& y,const Rall2d& x,double eps=epsilon) +{ + return (Equal(x.t,y.t,eps)&& + Equal(x.d,y.d,eps)&& + Equal(x.dd,y.dd,eps) + ); +} + + +} + + +#endif diff --git a/intern/itasc/kdl/utilities/svd_eigen_HH.hpp b/intern/itasc/kdl/utilities/svd_eigen_HH.hpp new file mode 100644 index 00000000000..2bbb8df521f --- /dev/null +++ b/intern/itasc/kdl/utilities/svd_eigen_HH.hpp @@ -0,0 +1,309 @@ +// Copyright (C) 2007 Ruben Smits + +// Version: 1.0 +// Author: Ruben Smits +// Maintainer: Ruben Smits +// URL: http://www.orocos.org/kdl + +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Lesser General Public +// License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Lesser General Public License for more details. + +// You should have received a copy of the GNU Lesser General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + + +//Based on the svd of the KDL-0.2 library by Erwin Aertbelien +#ifndef SVD_EIGEN_HH_HPP +#define SVD_EIGEN_HH_HPP + + +#include +#include + +namespace KDL +{ + template inline Scalar PYTHAG(Scalar a,Scalar b) { + double at,bt,ct; + at = fabs(a); + bt = fabs(b); + if (at > bt ) { + ct=bt/at; + return Scalar(at*sqrt(1.0+ct*ct)); + } else { + if (bt==0) + return Scalar(0.0); + else { + ct=at/bt; + return Scalar(bt*sqrt(1.0+ct*ct)); + } + } + } + + + template inline Scalar SIGN(Scalar a,Scalar b) { + return ((b) >= Scalar(0.0) ? fabs(a) : -fabs(a)); + } + + /** + * svd calculation of boost ublas matrices + * + * @param A matrix(mxn) + * @param U matrix(mxn) + * @param S vector n + * @param V matrix(nxn) + * @param tmp vector n + * @param maxiter defaults to 150 + * + * @return -2 if maxiter exceeded, 0 otherwise + */ + template + int svd_eigen_HH( + const Eigen::MatrixBase& A, + Eigen::MatrixBase& U, + Eigen::MatrixBase& S, + Eigen::MatrixBase& V, + Eigen::MatrixBase& tmp, + int maxiter=150) + { + //get the rows/columns of the matrix + const int rows = A.rows(); + const int cols = A.cols(); + + U = A; + + int i(-1),its(-1),j(-1),jj(-1),k(-1),nm=0; + int ppi(0); + bool flag; + e_scalar maxarg1,maxarg2,anorm(0),c(0),f(0),h(0),s(0),scale(0),x(0),y(0),z(0),g(0); + + g=scale=anorm=e_scalar(0.0); + + /* Householder reduction to bidiagonal form. */ + for (i=0;i maxarg2 ? maxarg1 : maxarg2; + } + /* Accumulation of right-hand transformations. */ + for (i=cols-1;i>=0;i--) { + if (i=0;i--) { + ppi=i+1; + g=S(i); + for (j=ppi;j=0;k--) { /* Loop over singular values. */ + for (its=1;its<=maxiter;its++) { /* Loop over allowed iterations. */ + flag=true; + for (ppi=k;ppi>=0;ppi--) { /* Test for splitting. */ + nm=ppi-1; /* Note that tmp(1) is always zero. */ + if ((fabs(tmp(ppi))+anorm) == anorm) { + flag=false; + break; + } + if ((fabs(S(nm)+anorm) == anorm)) break; + } + if (flag) { + c=e_scalar(0.0); /* Cancellation of tmp(l), if l>1: */ + s=e_scalar(1.); + for (i=ppi;i<=k;i++) { + f=s*tmp(i); + tmp(i)=c*tmp(i); + if ((fabs(f)+anorm) == anorm) break; + g=S(i); + h=PYTHAG(f,g); + S(i)=h; + h=e_scalar(1.0)/h; + c=g*h; + s=(-f*h); + for (j=0;j S_max){ + S_max = Sj; + i_max = j; + } + } + if (i_max != i){ + /* swap eigenvalues */ + e_scalar tmp = S(i); + S(i)=S(i_max); + S(i_max)=tmp; + + /* swap eigenvectors */ + U.col(i).swap(U.col(i_max)); + V.col(i).swap(V.col(i_max)); + } + } + + + if (its == maxiter) + return (-2); + else + return (0); + } + +} +#endif diff --git a/intern/itasc/kdl/utilities/traits.h b/intern/itasc/kdl/utilities/traits.h new file mode 100644 index 00000000000..2656d633653 --- /dev/null +++ b/intern/itasc/kdl/utilities/traits.h @@ -0,0 +1,111 @@ +#ifndef KDLPV_TRAITS_H +#define KDLPV_TRAITS_H + +#include "utility.h" + + +// forwards declarations : +namespace KDL { + class Frame; + class Rotation; + class Vector; + class Twist; + class Wrench; + class FrameVel; + class RotationVel; + class VectorVel; + class TwistVel; +} + + +/** + * @brief Traits are traits classes to determine the type of a derivative of another type. + * + * For geometric objects the "geometric" derivative is chosen. For example the derivative of a Rotation + * matrix is NOT a 3x3 matrix containing the derivative of the elements of a rotation matrix. The derivative + * of the rotation matrix is a Vector corresponding the rotational velocity. Mostly used in template classes + * and routines to derive a correct type when needed. + * + * You can see this as a compile-time lookuptable to find the type of the derivative. + * + * Example + * \verbatim + Rotation R; + Traits dR; + \endverbatim + */ +template +struct Traits { + typedef T valueType; + typedef T derivType; +}; + +template <> +struct Traits { + typedef KDL::Frame valueType; + typedef KDL::Twist derivType; +}; +template <> +struct Traits { + typedef KDL::Twist valueType; + typedef KDL::Twist derivType; +}; +template <> +struct Traits { + typedef KDL::Wrench valueType; + typedef KDL::Wrench derivType; +}; + +template <> +struct Traits { + typedef KDL::Rotation valueType; + typedef KDL::Vector derivType; +}; + +template <> +struct Traits { + typedef KDL::Vector valueType; + typedef KDL::Vector derivType; +}; + +template <> +struct Traits { + typedef double valueType; + typedef double derivType; +}; + +template <> +struct Traits { + typedef float valueType; + typedef float derivType; +}; + +template <> +struct Traits { + typedef KDL::Frame valueType; + typedef KDL::TwistVel derivType; +}; +template <> +struct Traits { + typedef KDL::Twist valueType; + typedef KDL::TwistVel derivType; +}; + +template <> +struct Traits { + typedef KDL::Rotation valueType; + typedef KDL::VectorVel derivType; +}; + +template <> +struct Traits { + typedef KDL::Vector valueType; + typedef KDL::VectorVel derivType; +}; + + + +#endif + + + diff --git a/intern/itasc/kdl/utilities/utility.cpp b/intern/itasc/kdl/utilities/utility.cpp new file mode 100644 index 00000000000..1ab9cb6f83d --- /dev/null +++ b/intern/itasc/kdl/utilities/utility.cpp @@ -0,0 +1,21 @@ +/** @file utility.cpp + * @author Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * @version + * ORO_Geometry V0.2 + * + * @par history + * - changed layout of the comments to accomodate doxygen + */ + +#include "utility.h" + +namespace KDL { + +int STREAMBUFFERSIZE=10000; +int MAXLENFILENAME = 255; +const double PI= 3.1415926535897932384626433832795; +const double deg2rad = 0.01745329251994329576923690768488; +const double rad2deg = 57.2957795130823208767981548141052; +double epsilon = 0.000001; +double epsilon2 = 0.000001*0.000001; +} diff --git a/intern/itasc/kdl/utilities/utility.h b/intern/itasc/kdl/utilities/utility.h new file mode 100644 index 00000000000..7151792536e --- /dev/null +++ b/intern/itasc/kdl/utilities/utility.h @@ -0,0 +1,299 @@ +/***************************************************************************** + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + * \file + * Included by most lrl-files to provide some general + * functions and macro definitions. + * + * \par history + * - changed layout of the comments to accomodate doxygen + */ + + +#ifndef KDL_UTILITY_H +#define KDL_UTILITY_H + +#include "kdl-config.h" +#include +#include +#include + + +///////////////////////////////////////////////////////////// +// configurable options for the frames library. + +#ifdef KDL_INLINE + #ifdef _MSC_VER + // Microsoft Visual C + #define IMETHOD __forceinline + #else + // Some other compiler, e.g. gcc + #define IMETHOD inline + #endif +#else + #define IMETHOD +#endif + + + +//! turn on or off frames bounds checking. If turned on, assert() can still +//! be turned off with -DNDEBUG. +#ifdef KDL_INDEX_CHECK + #define FRAMES_CHECKI(a) assert(a) +#else + #define FRAMES_CHECKI(a) +#endif + + +namespace KDL { + +#ifdef __GNUC__ + // so that sin,cos can be overloaded and complete + // resolution of overloaded functions work. + using ::sin; + using ::cos; + using ::exp; + using ::log; + using ::sin; + using ::cos; + using ::tan; + using ::sinh; + using ::cosh; + using ::pow; + using ::sqrt; + using ::atan; + using ::hypot; + using ::asin; + using ::acos; + using ::tanh; + using ::atan2; +#endif +#ifndef __GNUC__ + //only real solution : get Rall1d and varia out of namespaces. + #pragma warning (disable:4786) + + inline double sin(double a) { + return ::sin(a); + } + + inline double cos(double a) { + return ::cos(a); + } + inline double exp(double a) { + return ::exp(a); + } + inline double log(double a) { + return ::log(a); + } + inline double tan(double a) { + return ::tan(a); + } + inline double cosh(double a) { + return ::cosh(a); + } + inline double sinh(double a) { + return ::sinh(a); + } + inline double sqrt(double a) { + return ::sqrt(a); + } + inline double atan(double a) { + return ::atan(a); + } + inline double acos(double a) { + return ::acos(a); + } + inline double asin(double a) { + return ::asin(a); + } + inline double tanh(double a) { + return ::tanh(a); + } + inline double pow(double a,double b) { + return ::pow(a,b); + } + inline double atan2(double a,double b) { + return ::atan2(a,b); + } +#endif + + + + + +/** + * Auxiliary class for argument types (Trait-template class ) + * + * Is used to pass doubles by value, and arbitrary objects by const reference. + * This is TWICE as fast (2 x less memory access) and avoids bugs in VC6++ concerning + * the assignment of the result of intrinsic functions to const double&-typed variables, + * and optimization on. + */ +template +class TI +{ + public: + typedef const T& Arg; //!< Arg is used for passing the element to a function. +}; + +template <> +class TI { +public: + typedef double Arg; +}; + +template <> +class TI { +public: + typedef int Arg; +}; + + + + + +/** + * /note linkage + * Something fishy about the difference between C++ and C + * in C++ const values default to INTERNAL linkage, in C they default + * to EXTERNAL linkage. Here the constants should have EXTERNAL linkage + * because they, for at least some of them, can be changed by the user. + * If you want to explicitly declare internal linkage, use "static". + */ +//! +extern int STREAMBUFFERSIZE; + +//! maximal length of a file name +extern int MAXLENFILENAME; + +//! the value of pi +extern const double PI; + +//! the value pi/180 +extern const double deg2rad; + +//! the value 180/pi +extern const double rad2deg; + +//! default precision while comparing with Equal(..,..) functions. Initialized at 0.0000001. +extern double epsilon; + +//! power or 2 of epsilon +extern double epsilon2; + +//! the number of derivatives used in the RN-... objects. +extern int VSIZE; + + + +#ifndef _MFC_VER +#undef max +inline double max(double a,double b) { + if (btmp)&& (tmp>-eps) ); +} + +inline void random(double& a) { + a = 1.98*rand()/(double)RAND_MAX -0.99; +} + +inline void posrandom(double& a) { + a = 0.001+0.99*rand()/(double)RAND_MAX; +} + +inline double diff(double a,double b,double dt) { + return (b-a)/dt; +} +//inline float diff(float a,float b,double dt) { +//return (b-a)/dt; +//} +inline double addDelta(double a,double da,double dt) { + return a+da*dt; +} + +//inline float addDelta(float a,float da,double dt) { +// return a+da*dt; +//} + + +} + + + +#endif diff --git a/intern/itasc/kdl/utilities/utility_io.cpp b/intern/itasc/kdl/utilities/utility_io.cpp new file mode 100644 index 00000000000..994567dfdfc --- /dev/null +++ b/intern/itasc/kdl/utilities/utility_io.cpp @@ -0,0 +1,208 @@ +/***************************************************************************** + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + * \todo + * make IO routines more robust against the differences between DOS/UNIX end-of-line style. + ****************************************************************************/ + + +#include "utility_io.h" +#include "error.h" + +#include +#include +#include + +namespace KDL { + +// +// _functions are private functions +// + + void _check_istream(std::istream& is) + { + if ((!is.good())&&(is.eof()) ) + { + throw Error_BasicIO_File(); + } + } +// Eats until the end of the line + int _EatUntilEndOfLine( std::istream& is, int* countp=NULL) { + int ch; + int count; + count = 0; + do { + ch = is.get(); + count++; + _check_istream(is); + } while (ch!='\n'); + if (countp!=NULL) *countp = count; + return ch; +} + +// Eats until the end of the comment + int _EatUntilEndOfComment( std::istream& is, int* countp=NULL) { + int ch; + int count; + count = 0; + int prevch; + ch = 0; + do { + prevch = ch; + ch = is.get(); + count++; + _check_istream(is); + if ((prevch=='*')&&(ch=='/')) { + break; + } + } while (true); + if (countp!=NULL) *countp = count; + ch = is.get(); + return ch; +} + +// Eats space-like characters and comments +// possibly returns the number of space-like characters eaten. +int _EatSpace( std::istream& is,int* countp=NULL) { + int ch; + int count; + count=-1; + do { + _check_istream(is); + + ch = is.get(); + count++; + if (ch == '#') { + ch = _EatUntilEndOfLine(is,&count); + } + if (ch == '/') { + ch = is.get(); + if (ch == '/') { + ch = _EatUntilEndOfLine(is,&count); + } else if (ch == '*') { + ch = _EatUntilEndOfComment(is,&count); + } else { + is.putback(ch); + ch = '/'; + } + } + } while ((ch==' ')||(ch=='\n')||(ch=='\t')); + if (countp!=NULL) *countp = count; + return ch; +} + + + +// Eats whites, returns, tabs and the delim character +// Checks wether delim char. is encountered. +void Eat( std::istream& is, int delim ) +{ + int ch; + ch=_EatSpace(is); + if (ch != delim) { + throw Error_BasicIO_Exp_Delim(); + } + ch=_EatSpace(is); + is.putback(ch); +} + +// Eats whites, returns, tabs and the delim character +// Checks wether delim char. is encountered. +// EatEnd does not eat all space-like char's at the end. +void EatEnd( std::istream& is, int delim ) +{ + int ch; + ch=_EatSpace(is); + if (ch != delim) { + throw Error_BasicIO_Exp_Delim(); + } +} + + + +// For each space in descript, this routine eats whites,tabs, and newlines (at least one) +// There should be no consecutive spaces in the description. +// for each letter in descript, its reads the corresponding letter in the output +// the routine is case insensitive. + + +// Simple routine, enough for our purposes. +// works with ASCII chars +inline char Upper(char ch) +{ + /*if (('a'<=ch)&&(ch<='z')) + return (ch-'a'+'A'); + else + return ch; + */ + return toupper(ch); +} + +void Eat(std::istream& is,const char* descript) +{ + // eats whites before word + char ch; + char chdescr; + ch=_EatSpace(is); + is.putback(ch); + const char* p; + p = descript; + while ((*p)!=0) { + chdescr = (char)Upper(*p); + if (chdescr==' ') { + int count=0; + ch=_EatSpace(is,&count); + is.putback(ch); + if (count==0) { + throw Error_BasicIO_Not_A_Space(); + } + } else { + ch=(char)is.get(); + if (chdescr!=Upper(ch)) { + throw Error_BasicIO_Unexpected(); + } + } + p++; + } + +} + + + +void EatWord(std::istream& is,const char* delim,char* storage,int maxsize) +{ + int ch; + char* p; + int size; + // eat white before word + ch=_EatSpace(is); + p = storage; + size=0; + int count = 0; + while ((count==0)&&(strchr(delim,ch)==NULL)) { + *p = (char) toupper(ch); + ++p; + if (size==maxsize) { + throw Error_BasicIO_ToBig(); + } + _check_istream(is); + ++size; + //ch = is.get(); + ch =_EatSpace(is,&count); + } + *p=0; + is.putback(ch); +} + + +} diff --git a/intern/itasc/kdl/utilities/utility_io.h b/intern/itasc/kdl/utilities/utility_io.h new file mode 100644 index 00000000000..2a71ce870a3 --- /dev/null +++ b/intern/itasc/kdl/utilities/utility_io.h @@ -0,0 +1,79 @@ +/***************************************************************************** + * \author + * Erwin Aertbelien, Div. PMA, Dep. of Mech. Eng., K.U.Leuven + * + * \version + * ORO_Geometry V0.2 + * + * \par History + * - $log$ + * + * \par Release + * $Id$ + * $Name: $ + * + * \file utility_io.h + * Included by most lrl-files to provide some general + * functions and macro definitions related to file/stream I/O. + */ + +#ifndef KDL_UTILITY_IO_H_84822 +#define KDL_UTILITY_IO_H_84822 + +//#include + + +// Standard includes +#include +#include +#include + + +namespace KDL { + + +/** + * checks validity of basic io of is + */ +void _check_istream(std::istream& is); + + +/** + * Eats characters of the stream until the character delim is encountered + * @param is a stream + * @param delim eat until this character is encountered + */ +void Eat(std::istream& is, int delim ); + +/** + * Eats characters of the stream as long as they satisfy the description in descript + * @param is a stream + * @param descript description string. A sequence of spaces, tabs, + * new-lines and comments is regarded as 1 space in the description string. + */ +void Eat(std::istream& is,const char* descript); + +/** + * Eats a word of the stream delimited by the letters in delim or space(tabs...) + * @param is a stream + * @param delim a string containing the delimmiting characters + * @param storage for returning the word + * @param maxsize a word can be maximally maxsize-1 long. + */ +void EatWord(std::istream& is,const char* delim,char* storage,int maxsize); + +/** + * Eats characters of the stream until the character delim is encountered + * similar to Eat(is,delim) but spaces at the end are not read. + * @param is a stream + * @param delim eat until this character is encountered + */ +void EatEnd( std::istream& is, int delim ); + + + + +} + + +#endif diff --git a/intern/itasc/make/msvc_9_0/itasc.vcproj b/intern/itasc/make/msvc_9_0/itasc.vcproj new file mode 100644 index 00000000000..f4a81079da0 --- /dev/null +++ b/intern/itasc/make/msvc_9_0/itasc.vcproj @@ -0,0 +1,539 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/intern/itasc/ublas_types.hpp b/intern/itasc/ublas_types.hpp new file mode 100644 index 00000000000..bf9bdcc26f2 --- /dev/null +++ b/intern/itasc/ublas_types.hpp @@ -0,0 +1,82 @@ +/* + * ublas_types.hpp + * + * Created on: Jan 5, 2009 + * Author: rubensmits + */ + +#ifndef UBLAS_TYPES_HPP_ +#define UBLAS_TYPES_HPP_ + +#include +#include +#include +#include +#include "kdl/frames.hpp" +#include "kdl/tree.hpp" +#include "kdl/chain.hpp" +#include "kdl/jacobian.hpp" +#include "kdl/jntarray.hpp" + + +namespace iTaSC{ + +namespace ublas = boost::numeric::ublas; +using KDL::Twist; +using KDL::Frame; +using KDL::Joint; +using KDL::Inertia; +using KDL::SegmentMap; +using KDL::Tree; +using KDL::JntArray; +using KDL::Jacobian; +using KDL::Segment; +using KDL::Rotation; +using KDL::Vector; +using KDL::Chain; + +#define u_scalar double +#define u_vector ublas::vector +#define u_zero_vector ublas::zero_vector +#define u_matrix ublas::matrix +#define u_matrix6 ublas::matrix +#define u_identity_matrix ublas::identity_matrix +#define u_scalar_vector ublas::scalar_vector +#define u_zero_matrix ublas::zero_matrix +#define u_vector6 ublas::bounded_vector + +inline static int changeBase(const u_matrix& J_in, const Frame& T, u_matrix& J_out) { + + if (J_out.size1() != 6 || J_in.size1() != 6 || J_in.size2() != J_out.size2()) + return -1; + for (unsigned int j = 0; j < J_in.size2(); ++j) { + ublas::matrix_column Jj_in = column(J_in,j); + ublas::matrix_column Jj_out = column(J_out,j); + Twist arg; + for(unsigned int i=0;i<6;++i) + arg(i)=Jj_in(i); + Twist tmp(T*arg); + for(unsigned int i=0;i<6;++i) + Jj_out(i)=tmp(i); + } + return 0; +} +inline static int changeBase(const ublas::matrix_range& J_in, const Frame& T, ublas::matrix_range& J_out) { + + if (J_out.size1() != 6 || J_in.size1() != 6 || J_in.size2() != J_out.size2()) + return -1; + for (unsigned int j = 0; j < J_in.size2(); ++j) { + ublas::matrix_column > Jj_in = column(J_in,j); + ublas::matrix_column > Jj_out = column(J_out,j); + Twist arg; + for(unsigned int i=0;i<6;++i) + arg(i)=Jj_in(i); + Twist tmp(T*arg); + for(unsigned int i=0;i<6;++i) + Jj_out(i)=tmp(i); + } + return 0; +} + +} +#endif /* UBLAS_TYPES_HPP_ */ diff --git a/intern/memutil/make/msvc_9_0/memutil.vcproj b/intern/memutil/make/msvc_9_0/memutil.vcproj index 6f642fb16bc..0b8251f0d7e 100644 --- a/intern/memutil/make/msvc_9_0/memutil.vcproj +++ b/intern/memutil/make/msvc_9_0/memutil.vcproj @@ -119,6 +119,7 @@ /> 0.000001f) _pressure[index] /= pcnt; - // test - dg - // if(vp > 0.000001f) - // _zVelocity[index] /= vp; - // TODO? set correct velocity bc's // velocities are only set to zero right now // this means it's not a full no-slip boundary condition diff --git a/intern/smoke/intern/FLUID_3D_STATIC.cpp b/intern/smoke/intern/FLUID_3D_STATIC.cpp index 2d3ec125c2b..afeca2b1faa 100644 --- a/intern/smoke/intern/FLUID_3D_STATIC.cpp +++ b/intern/smoke/intern/FLUID_3D_STATIC.cpp @@ -54,23 +54,21 @@ void FLUID_3D::addSmokeTestCase(float* field, Vec3Int res) float yTotal = dx * res[1]; float zTotal = dx * res[2]; - float heighMin = 0.05; - float heighMax = 0.10; - - for (int y = 0; y < res[2]; y++) - for (int z = (int)(heighMin*res[2]); z <= (int)(heighMax * res[2]); z++) - for (int x = 0; x < res[0]; x++) - { - float xLength = x * dx - xTotal * 0.4f; - float yLength = y * dx - yTotal * 0.5f; - float radius = sqrtf(xLength * xLength + yLength * yLength); - - if (radius < 0.075f * xTotal) - { - int index = x + y * res[0] + z * slabSize; - field[index] = 1.0f; - } - } + float heighMin = 0.05; + float heighMax = 0.10; + + for (int y = 0; y < res[2]; y++) + for (int z = (int)(heighMin*res[2]); z <= (int)(heighMax * res[2]); z++) + for (int x = 0; x < res[0]; x++) { + float xLength = x * dx - xTotal * 0.4f; + float yLength = y * dx - yTotal * 0.5f; + float radius = sqrtf(xLength * xLength + yLength * yLength); + + if (radius < 0.075f * xTotal) { + int index = x + y * res[0] + z * slabSize; + field[index] = 1.0f; + } + } } @@ -98,7 +96,7 @@ void FLUID_3D::setNeumannX(float* field, Vec3Int res) for (int z = 0; z < res[2]; z++) { // top slab - int index = y * res[0] + z * slabSize; + index = y * res[0] + z * slabSize; index += res[0] - 1; if(field[index]<0.) field[index] = 0.; index -= 1; @@ -130,7 +128,7 @@ void FLUID_3D::setNeumannY(float* field, Vec3Int res) for (int x = 0; x < res[0]; x++) { // top slab - int index = x + z * slabSize; + index = x + z * slabSize; index += slabSize - res[0]; if(field[index]<0.) field[index] = 0.; index -= res[0]; @@ -164,7 +162,7 @@ void FLUID_3D::setNeumannZ(float* field, Vec3Int res) for (int x = 0; x < res[0]; x++) { // top slab - int index = x + y * res[0]; + index = x + y * res[0]; index += totalCells - slabSize; if(field[index]<0.) field[index] = 0.; index -= slabSize; @@ -298,7 +296,7 @@ void FLUID_3D::advectFieldSemiLagrange(const float dt, const float* velx, const const int slabSize = res[0] * res[1]; // scale dt up to grid resolution -#if PARALLEL==1 +#if PARALLEL==1 && !_WIN32 #pragma omp parallel #pragma omp for schedule(static) #endif diff --git a/intern/smoke/intern/WTURBULENCE.cpp b/intern/smoke/intern/WTURBULENCE.cpp index a1b2aaf30f2..7ea4bde3884 100644 --- a/intern/smoke/intern/WTURBULENCE.cpp +++ b/intern/smoke/intern/WTURBULENCE.cpp @@ -735,19 +735,17 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa // enlarge timestep to match grid const float dt = dtOrg * _amplify; const float invAmp = 1.0f / _amplify; - float *tempBig1 = new float[_totalCellsBig]; - float *tempBig2 = new float[_totalCellsBig]; - float *bigUx = new float[_totalCellsBig]; - float *bigUy = new float[_totalCellsBig]; - float *bigUz = new float[_totalCellsBig]; - float *_energy = new float[_totalCellsSm]; - float *highFreqEnergy = new float[_totalCellsSm]; - float *eigMin = new float[_totalCellsSm]; - float *eigMax = new float[_totalCellsSm]; - - memset(highFreqEnergy, 0, sizeof(float)*_totalCellsSm); - memset(eigMin, 0, sizeof(float)*_totalCellsSm); - memset(eigMax, 0, sizeof(float)*_totalCellsSm); + float *tempBig1 = (float *)calloc(_totalCellsBig, sizeof(float)); + float *tempBig2 = (float *)calloc(_totalCellsBig, sizeof(float)); + float *bigUx = (float *)calloc(_totalCellsBig, sizeof(float)); + float *bigUy = (float *)calloc(_totalCellsBig, sizeof(float)); + float *bigUz = (float *)calloc(_totalCellsBig, sizeof(float)); + float *_energy = (float *)calloc(_totalCellsSm, sizeof(float)); + float *highFreqEnergy = (float *)calloc(_totalCellsSm, sizeof(float)); + float *eigMin = (float *)calloc(_totalCellsSm, sizeof(float)); + float *eigMax = (float *)calloc(_totalCellsSm, sizeof(float)); + + memset(_tcTemp, 0, sizeof(float)*_totalCellsSm); // prepare textures advectTextureCoordinates(dtOrg, xvel,yvel,zvel, tempBig1, tempBig2); @@ -771,16 +769,16 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa // parallel region setup float maxVelMagThreads[8] = { -1., -1., -1., -1., -1., -1., -1., -1. }; -#if PARALLEL==1 +#if PARALLEL==1 && !_WIN32 #pragma omp parallel #endif { float maxVelMag1 = 0.; -#if PARALLEL==1 +#if PARALLEL==1 && !_WIN32 const int id = omp_get_thread_num(); /*, num = omp_get_num_threads(); */ #endif // vector noise main loop -#if PARALLEL==1 +#if PARALLEL==1 && !_WIN32 #pragma omp for schedule(static) #endif for (int zSmall = 0; zSmall < _zResSm; zSmall++) @@ -912,7 +910,7 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa bigUx[index] = bigUy[index] = bigUz[index] = 0.; } // xyz -#if PARALLEL==1 +#if PARALLEL==1 && !_WIN32 maxVelMagThreads[id] = maxVelMag1; #else maxVelMagThreads[0] = maxVelMag1; @@ -922,7 +920,7 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa // compute maximum over threads float maxVelMag = maxVelMagThreads[0]; -#if PARALLEL==1 +#if PARALLEL==1 && !_WIN32 for (int i = 1; i < 8; i++) if (maxVelMag < maxVelMagThreads[i]) maxVelMag = maxVelMagThreads[i]; @@ -957,13 +955,13 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa SWAP_POINTERS(_densityBig, _densityBigOld); } // substep - delete[] tempBig1; - delete[] tempBig2; - delete[] bigUx; - delete[] bigUy; - delete[] bigUz; - delete[] _energy; - delete[] highFreqEnergy; + free(tempBig1); + free(tempBig2); + free(bigUx); + free(bigUy); + free(bigUz); + free(_energy); + free(highFreqEnergy); // wipe the density borders FLUID_3D::setZeroBorder(_densityBig, _resBig); @@ -973,8 +971,8 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa // eigenvalues stored do not reflect the underlying texture coordinates resetTextureCoordinates(eigMin, eigMax); - delete[] eigMin; - delete[] eigMax; + free(eigMin); + free(eigMax); // output files // string prefix = string("./amplified.preview/density_bigxy_"); @@ -986,4 +984,3 @@ void WTURBULENCE::stepTurbulenceFull(float dtOrg, float* xvel, float* yvel, floa _totalStepsBig++; } - diff --git a/intern/smoke/intern/smoke_API.cpp b/intern/smoke/intern/smoke_API.cpp index 67df6e805d8..2d1d590fcc0 100644 --- a/intern/smoke/intern/smoke_API.cpp +++ b/intern/smoke/intern/smoke_API.cpp @@ -235,6 +235,21 @@ extern "C" float *smoke_get_velocity_z(FLUID_3D *fluid) return fluid->_zVelocity; } +extern "C" float *smoke_get_force_x(FLUID_3D *fluid) +{ + return fluid->_xForce; +} + +extern "C" float *smoke_get_force_y(FLUID_3D *fluid) +{ + return fluid->_yForce; +} + +extern "C" float *smoke_get_force_z(FLUID_3D *fluid) +{ + return fluid->_zForce; +} + extern "C" float *smoke_turbulence_get_density(WTURBULENCE *wt) { return wt ? wt->getDensityBig() : NULL; diff --git a/intern/smoke/make/msvc_9_0/smoke.vcproj b/intern/smoke/make/msvc_9_0/smoke.vcproj index aa3779031f0..38a761d5d82 100644 --- a/intern/smoke/make/msvc_9_0/smoke.vcproj +++ b/intern/smoke/make/msvc_9_0/smoke.vcproj @@ -42,6 +42,7 @@ /> @@ -264,6 +265,7 @@ /> + + @@ -627,6 +632,10 @@ RelativePath="..\..\..\source\blender\blenlib\BLI_array.h" > + + diff --git a/projectfiles_vc9/blender/blenpluginapi/blenpluginapi/blenpluginapi.vcproj b/projectfiles_vc9/blender/blenpluginapi/blenpluginapi/blenpluginapi.vcproj index 02ea370e34a..c8e8dffbb24 100644 --- a/projectfiles_vc9/blender/blenpluginapi/blenpluginapi/blenpluginapi.vcproj +++ b/projectfiles_vc9/blender/blenpluginapi/blenpluginapi/blenpluginapi.vcproj @@ -4,6 +4,7 @@ Version="9,00" Name="blenpluginapi" ProjectGUID="{BB6AA598-B336-4F8B-9DF9-8CAE7BE71C23}" + RootNamespace="blenpluginapi" TargetFrameworkVersion="131072" > @@ -42,7 +43,7 @@ @@ -1243,11 +1244,11 @@ > + + @@ -1440,38 +1445,34 @@ - - @@ -1567,6 +1568,26 @@ > + + + + + + + + + + diff --git a/projectfiles_vc9/blender/gpu/BL_gpu.vcproj b/projectfiles_vc9/blender/gpu/BL_gpu.vcproj index 1daf345f638..af0b90796d1 100644 --- a/projectfiles_vc9/blender/gpu/BL_gpu.vcproj +++ b/projectfiles_vc9/blender/gpu/BL_gpu.vcproj @@ -192,6 +192,7 @@ /> + + @@ -502,6 +507,10 @@ Name="Header Files" Filter="h;hpp;hxx;hm;inl" > + + diff --git a/projectfiles_vc9/blender/ikplugin/BIK_ikplugin.vcproj b/projectfiles_vc9/blender/ikplugin/BIK_ikplugin.vcproj new file mode 100644 index 00000000000..48693942798 --- /dev/null +++ b/projectfiles_vc9/blender/ikplugin/BIK_ikplugin.vcproj @@ -0,0 +1,214 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/projectfiles_vc9/blender/imbuf/BL_imbuf.vcproj b/projectfiles_vc9/blender/imbuf/BL_imbuf.vcproj index 0e25b26831b..b0ba1133393 100644 --- a/projectfiles_vc9/blender/imbuf/BL_imbuf.vcproj +++ b/projectfiles_vc9/blender/imbuf/BL_imbuf.vcproj @@ -266,6 +266,7 @@ /> + + @@ -682,6 +681,10 @@ RelativePath="..\..\..\source\blender\makesrna\intern\rna_image.c" > + + @@ -706,6 +709,10 @@ RelativePath="..\..\..\source\blender\makesrna\intern\rna_material.c" > + + @@ -754,6 +761,10 @@ RelativePath="..\..\..\source\blender\makesrna\intern\rna_pose.c" > + + @@ -810,6 +821,10 @@ RelativePath="..\..\..\source\blender\makesrna\intern\rna_text.c" > + + @@ -851,6 +866,22 @@ Name="Header Files" Filter="h;hpp;hxx;hm;inl" > + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/projectfiles_vc9/blender/render/BRE_render.vcproj b/projectfiles_vc9/blender/render/BRE_render.vcproj index 4e354c6dde3..5a40dd4e97e 100644 --- a/projectfiles_vc9/blender/render/BRE_render.vcproj +++ b/projectfiles_vc9/blender/render/BRE_render.vcproj @@ -4,6 +4,7 @@ Version="9,00" Name="BRE_render" ProjectGUID="{106AE171-0083-41D6-A949-20DB0E8DC251}" + RootNamespace="BRE_render" TargetFrameworkVersion="131072" > @@ -41,8 +42,9 @@ /> + + + + + + + + + + diff --git a/projectfiles_vc9/blender/windowmanager/windowmanager.vcproj b/projectfiles_vc9/blender/windowmanager/windowmanager.vcproj index a074f66327d..805ad1f1cf3 100644 --- a/projectfiles_vc9/blender/windowmanager/windowmanager.vcproj +++ b/projectfiles_vc9/blender/windowmanager/windowmanager.vcproj @@ -43,7 +43,7 @@ + + + + + + + + @@ -554,7 +571,15 @@ > + + + + + + @@ -658,6 +663,10 @@ RelativePath="..\..\..\source\gameengine\GameLogic\SCA_ANDController.h" > + + diff --git a/projectfiles_vc9/gameengine/gameplayer/axctl/GP_axctl.vcproj b/projectfiles_vc9/gameengine/gameplayer/axctl/GP_axctl.vcproj index ed2dad65374..5bcfe1aabd1 100644 --- a/projectfiles_vc9/gameengine/gameplayer/axctl/GP_axctl.vcproj +++ b/projectfiles_vc9/gameengine/gameplayer/axctl/GP_axctl.vcproj @@ -161,6 +161,7 @@ /> + + @@ -1026,6 +1031,10 @@ + + diff --git a/projectfiles_vc9/gameengine/ketsji/network/KX_network.vcproj b/projectfiles_vc9/gameengine/ketsji/network/KX_network.vcproj index 36c8218b803..92cde144652 100644 --- a/projectfiles_vc9/gameengine/ketsji/network/KX_network.vcproj +++ b/projectfiles_vc9/gameengine/ketsji/network/KX_network.vcproj @@ -42,6 +42,7 @@ /> @@ -116,6 +117,7 @@ /> @@ -191,6 +192,7 @@ /> @@ -264,6 +265,7 @@ /> @@ -264,6 +265,7 @@ /> @@ -189,6 +190,7 @@ /> @@ -266,6 +267,7 @@ /> Copy python infrastructure" @[ ! -d scripts ] || cp -r scripts $(CONFDIR)/scripts - - @echo "----> Copy python UI files" - @[ ! -d ui ] || cp -r ui $(CONFDIR)/ui ifeq ($(OS),darwin) @echo "----> Copy python modules" diff --git a/release/datafiles/blenderbuttons b/release/datafiles/blenderbuttons index 79d6138e3f0..f9a4b934969 100644 Binary files a/release/datafiles/blenderbuttons and b/release/datafiles/blenderbuttons differ diff --git a/release/io/engine_render_pov.py b/release/io/engine_render_pov.py deleted file mode 100644 index c1cd84c8978..00000000000 --- a/release/io/engine_render_pov.py +++ /dev/null @@ -1,928 +0,0 @@ -import bpy - -from math import atan, pi, degrees -import subprocess -import os -import sys -import time - -import platform as pltfrm - -if pltfrm.architecture()[0] == '64bit': - bitness = 64 -else: - bitness = 32 - -def write_pov(filename, scene=None, info_callback = None): - file = open(filename, 'w') - - # Only for testing - if not scene: - scene = bpy.data.scenes[0] - - render = scene.render_data - world = scene.world - - # --- taken from fbx exporter - ## This was used to make V, but faster not to do all that - ##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}' - ##v = range(255) - ##for c in valid: v.remove(ord(c)) - v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,46,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] - invalid = ''.join([chr(i) for i in v]) - def cleanName(name): - for ch in invalid: name = name.replace(ch, '_') - return name - del v - - # --- done with clean name. - - def uniqueName(name, nameSeq): - - if name not in nameSeq: - return name - - name_orig = name - i = 1 - while name in nameSeq: - name = '%s_%.3d' % (name_orig, i) - i+=1 - - return name - - - def writeMatrix(matrix): - file.write('\tmatrix <%.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f>\n' %\ - (matrix[0][0], matrix[0][1], matrix[0][2], matrix[1][0], matrix[1][1], matrix[1][2], matrix[2][0], matrix[2][1], matrix[2][2], matrix[3][0], matrix[3][1], matrix[3][2]) ) - - def writeObjectMaterial(material): - if material and material.transparency_method=='RAYTRACE': - file.write('\tinterior { ior %.6f }\n' % material.raytrace_transparency.ior) - - # Other interior args - # fade_distance 2 - # fade_power [Value] - # fade_color - - # dispersion - # dispersion_samples - - materialNames = {} - DEF_MAT_NAME = 'Default' - def writeMaterial(material): - # Assumes only called once on each material - - if material: - name_orig = material.name - else: - name_orig = DEF_MAT_NAME - - name = materialNames[name_orig] = uniqueName(cleanName(name_orig), materialNames) - - file.write('#declare %s = finish {\n' % name) - - if material: - file.write('\tdiffuse %.3g\n' % material.diffuse_intensity) - file.write('\tspecular %.3g\n' % material.specular_intensity) - - file.write('\tambient %.3g\n' % material.ambient) - #file.write('\tambient rgb <%.3g, %.3g, %.3g>\n' % tuple([c*material.ambient for c in world.ambient_color])) # povray blends the global value - - # map hardness between 0.0 and 1.0 - roughness = ((1.0 - ((material.specular_hardness-1.0)/510.0))) - # scale from 0.0 to 0.1 - roughness *= 0.1 - # add a small value because 0.0 is invalid - roughness += (1/511.0) - - file.write('\troughness %.3g\n' % roughness) - - # 'phong 70.0 ' - - if material.raytrace_mirror.enabled: - raytrace_mirror= material.raytrace_mirror - if raytrace_mirror.reflect_factor: - file.write('\treflection {\n') - file.write('\t\trgb <%.3g, %.3g, %.3g>' % tuple(material.mirror_color)) - file.write('\t\tfresnel 1 falloff %.3g exponent %.3g metallic %.3g} ' % (raytrace_mirror.fresnel, raytrace_mirror.fresnel_factor, raytrace_mirror.reflect_factor)) - - else: - file.write('\tdiffuse 0.8\n') - file.write('\tspecular 0.2\n') - - - - # This is written into the object - ''' - if material and material.transparency_method=='RAYTRACE': - 'interior { ior %.3g} ' % material.raytrace_transparency.ior - ''' - - #file.write('\t\t\tcrand 1.0\n') # Sand granyness - #file.write('\t\t\tmetallic %.6f\n' % material.spec) - #file.write('\t\t\tphong %.6f\n' % material.spec) - #file.write('\t\t\tphong_size %.6f\n' % material.spec) - #file.write('\t\t\tbrilliance %.6f ' % (material.specular_hardness/256.0) # Like hardness - - file.write('}\n') - - def exportCamera(): - camera = scene.camera - matrix = camera.matrix - - # compute resolution - Qsize=float(render.resolution_x)/float(render.resolution_y) - - file.write('camera {\n') - file.write('\tlocation <0, 0, 0>\n') - file.write('\tlook_at <0, 0, -1>\n') - file.write('\tright <%s, 0, 0>\n' % -Qsize) - file.write('\tup <0, 1, 0>\n') - file.write('\tangle %f \n' % (360.0*atan(16.0/camera.data.lens)/pi)) - - file.write('\trotate <%.6f, %.6f, %.6f>\n' % tuple([degrees(e) for e in matrix.rotationPart().toEuler()])) - file.write('\ttranslate <%.6f, %.6f, %.6f>\n' % (matrix[3][0], matrix[3][1], matrix[3][2])) - file.write('}\n') - - - - def exportLamps(lamps): - # Get all lamps - for ob in lamps: - lamp = ob.data - - matrix = ob.matrix - - color = tuple([c * lamp.energy for c in lamp.color]) # Colour is modified by energy - - file.write('light_source {\n') - file.write('\t< 0,0,0 >\n') - file.write('\tcolor rgb<%.3g, %.3g, %.3g>\n' % color) - - if lamp.type == 'POINT': # Point Lamp - pass - elif lamp.type == 'SPOT': # Spot - file.write('\tspotlight\n') - - # Falloff is the main radius from the centre line - file.write('\tfalloff %.2f\n' % (lamp.spot_size/2.0) ) # 1 TO 179 FOR BOTH - file.write('\tradius %.6f\n' % ((lamp.spot_size/2.0) * (1-lamp.spot_blend)) ) - - # Blender does not have a tightness equivilent, 0 is most like blender default. - file.write('\ttightness 0\n') # 0:10f - - file.write('\tpoint_at <0, 0, -1>\n') - elif lamp.type == 'SUN': - file.write('\tparallel\n') - file.write('\tpoint_at <0, 0, -1>\n') # *must* be after 'parallel' - - elif lamp.type == 'AREA': - - size_x = lamp.size - samples_x = lamp.shadow_ray_samples_x - if lamp.shape == 'SQUARE': - size_y = size_x - samples_y = samples_x - else: - size_y = lamp.size_y - samples_y = lamp.shadow_ray_samples_y - - - - file.write('\tarea_light <%d,0,0>,<0,0,%d> %d, %d\n' % (size_x, size_y, samples_x, samples_y)) - if lamp.shadow_ray_sampling_method == 'CONSTANT_JITTERED': - if lamp.jitter: - file.write('\tjitter\n') - else: - file.write('\tadaptive 1\n') - file.write('\tjitter\n') - - if lamp.shadow_method == 'NOSHADOW': - file.write('\tshadowless\n') - - file.write('\tfade_distance %.6f\n' % lamp.distance) - file.write('\tfade_power %d\n' % 1) # Could use blenders lamp quad? - writeMatrix(matrix) - - file.write('}\n') - - def exportMeta(metas): - - # TODO - blenders 'motherball' naming is not supported. - - for ob in metas: - meta = ob.data - - file.write('blob {\n') - file.write('\t\tthreshold %.4g\n' % meta.threshold) - - try: - material= meta.materials[0] # lame! - blender cant do enything else. - except: - material= None - - for elem in meta.elements: - - if elem.type not in ('BALL', 'ELLIPSOID'): - continue # Not supported - - loc = elem.location - - stiffness= elem.stiffness - if elem.negative: - stiffness = -stiffness - - if elem.type == 'BALL': - - file.write('\tsphere { <%.6g, %.6g, %.6g>, %.4g, %.4g ' % (loc.x, loc.y, loc.z, elem.radius, stiffness)) - - # After this wecould do something simple like... - # "pigment {Blue} }" - # except we'll write the color - - elif elem.type == 'ELLIPSOID': - # location is modified by scale - file.write('\tsphere { <%.6g, %.6g, %.6g>, %.4g, %.4g ' % (loc.x/elem.size_x, loc.y/elem.size_y, loc.z/elem.size_z, elem.radius, stiffness)) - file.write( 'scale <%.6g, %.6g, %.6g> ' % (elem.size_x, elem.size_y, elem.size_z)) - - if material: - diffuse_color = material.diffuse_color - - if material.transparency and material.transparency_method=='RAYTRACE': trans = 1-material.raytrace_transparency.filter - else: trans = 0.0 - - file.write( - 'pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} finish {%s} }\n' % \ - (diffuse_color[0], diffuse_color[1], diffuse_color[2], 1-material.alpha, trans, materialNames[material.name]) - ) - - else: - file.write('pigment {rgb<1 1 1>} finish {%s} }\n' % DEF_MAT_NAME) # Write the finish last. - - writeObjectMaterial(material) - - writeMatrix(ob.matrix) - - file.write('}\n') - - - - - def exportMeshs(sel): - - ob_num = 0 - - for ob in sel: - ob_num+= 1 - - if ob.type in ('LAMP', 'CAMERA', 'EMPTY', 'META'): - continue - - me = ob.data - me_materials= me.materials - - me = ob.create_render_mesh(scene) - - if not me: - continue - - if info_callback: - info_callback('Object %2.d of %2.d (%s)' % (ob_num, len(sel), ob.name)) - - #if ob.type!='MESH': - # continue - # me = ob.data - - matrix = ob.matrix - try: uv_layer = me.active_uv_texture.data - except:uv_layer = None - - try: vcol_layer = me.active_vertex_color.data - except:vcol_layer = None - - faces_verts = [f.verts for f in me.faces] - faces_normals = [tuple(f.normal) for f in me.faces] - verts_normals = [tuple(v.normal) for v in me.verts] - - # quads incur an extra face - quadCount = len([f for f in faces_verts if len(f)==4]) - - file.write('mesh2 {\n') - file.write('\tvertex_vectors {\n') - file.write('\t\t%s' % (len(me.verts))) # vert count - for v in me.verts: - file.write(',\n\t\t<%.6f, %.6f, %.6f>' % tuple(v.co)) # vert count - file.write('\n }\n') - - - # Build unique Normal list - uniqueNormals = {} - for fi, f in enumerate(me.faces): - fv = faces_verts[fi] - # [-1] is a dummy index, use a list so we can modify in place - if f.smooth: # Use vertex normals - for v in fv: - key = verts_normals[v] - uniqueNormals[key] = [-1] - else: # Use face normal - key = faces_normals[fi] - uniqueNormals[key] = [-1] - - file.write('\tnormal_vectors {\n') - file.write('\t\t%d' % len(uniqueNormals)) # vert count - idx = 0 - for no, index in uniqueNormals.items(): - file.write(',\n\t\t<%.6f, %.6f, %.6f>' % no) # vert count - index[0] = idx - idx +=1 - file.write('\n }\n') - - - # Vertex colours - vertCols = {} # Use for material colours also. - - if uv_layer: - # Generate unique UV's - uniqueUVs = {} - - for fi, uv in enumerate(uv_layer): - - if len(faces_verts[fi])==4: - uvs = uv.uv1, uv.uv2, uv.uv3, uv.uv4 - else: - uvs = uv.uv1, uv.uv2, uv.uv3 - - for uv in uvs: - uniqueUVs[tuple(uv)] = [-1] - - file.write('\tuv_vectors {\n') - #print unique_uvs - file.write('\t\t%s' % (len(uniqueUVs))) # vert count - idx = 0 - for uv, index in uniqueUVs.items(): - file.write(',\n\t\t<%.6f, %.6f>' % uv) - index[0] = idx - idx +=1 - ''' - else: - # Just add 1 dummy vector, no real UV's - file.write('\t\t1') # vert count - file.write(',\n\t\t<0.0, 0.0>') - ''' - file.write('\n }\n') - - - if me.vertex_colors: - - for fi, f in enumerate(me.faces): - material_index = f.material_index - material = me_materials[material_index] - - if material and material.vertex_color_paint: - - col = vcol_layer[fi] - - if len(faces_verts[fi])==4: - cols = col.color1, col.color2, col.color3, col.color4 - else: - cols = col.color1, col.color2, col.color3 - - for col in cols: - key = col[0], col[1], col[2], material_index # Material index! - vertCols[key] = [-1] - - else: - if material: - diffuse_color = tuple(material.diffuse_color) - key = diffuse_color[0], diffuse_color[1], diffuse_color[2], material_index - vertCols[key] = [-1] - - - else: - # No vertex colours, so write material colours as vertex colours - for i, material in enumerate(me_materials): - - if material: - diffuse_color = tuple(material.diffuse_color) - key = diffuse_color[0], diffuse_color[1], diffuse_color[2], i # i == f.mat - vertCols[key] = [-1] - - - # Vert Colours - file.write('\ttexture_list {\n') - file.write('\t\t%s' % (len(vertCols))) # vert count - idx=0 - for col, index in vertCols.items(): - - if me_materials: - material = me_materials[col[3]] - material_finish = materialNames[material.name] - - if material.transparency and material.transparency_method=='RAYTRACE': trans = 1-material.raytrace_transparency.filter - else: trans = 0.0 - - else: - material_finish = DEF_MAT_NAME # not working properly, - trans = 0.0 - - #print material.apl - file.write( ',\n\t\ttexture { pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} finish {%s}}' % - (col[0], col[1], col[2], 1-material.alpha, trans, material_finish) ) - - index[0] = idx - idx+=1 - - file.write( '\n }\n' ) - - # Face indicies - file.write('\tface_indices {\n') - file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count - for fi, f in enumerate(me.faces): - fv = faces_verts[fi] - material_index= f.material_index - if len(fv) == 4: indicies = (0,1,2), (0,2,3) - else: indicies = ((0,1,2),) - - if vcol_layer: - col = vcol_layer[fi] - - if len(fv) == 4: - cols = col.color1, col.color2, col.color3, col.color4 - else: - cols = col.color1, col.color2, col.color3 - - - if not me_materials or me_materials[material_index] == None: # No materials - for i1, i2, i3 in indicies: - file.write(',\n\t\t<%d,%d,%d>' % (fv[i1], fv[i2], fv[i3])) # vert count - else: - material = me_materials[material_index] - for i1, i2, i3 in indicies: - if me.vertex_colors and material.vertex_color_paint: - # Colour per vertex - vertex colour - - col1 = cols[i1] - col2 = cols[i2] - col3 = cols[i3] - - ci1 = vertCols[col1[0], col1[1], col1[2], material_index][0] - ci2 = vertCols[col2[0], col2[1], col2[2], material_index][0] - ci3 = vertCols[col3[0], col3[1], col3[2], material_index][0] - else: - # Colour per material - flat material colour - diffuse_color= material.diffuse_color - ci1 = ci2 = ci3 = vertCols[diffuse_color[0], diffuse_color[1], diffuse_color[2], f.material_index][0] - - file.write(',\n\t\t<%d,%d,%d>, %d,%d,%d' % (fv[i1], fv[i2], fv[i3], ci1, ci2, ci3)) # vert count - - - - file.write('\n }\n') - - # normal_indices indicies - file.write('\tnormal_indices {\n') - file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count - for fi, fv in enumerate(faces_verts): - - if len(fv) == 4: indicies = (0,1,2), (0,2,3) - else: indicies = ((0,1,2),) - - for i1, i2, i3 in indicies: - if f.smooth: - file.write(',\n\t\t<%d,%d,%d>' %\ - (uniqueNormals[verts_normals[fv[i1]]][0],\ - uniqueNormals[verts_normals[fv[i2]]][0],\ - uniqueNormals[verts_normals[fv[i3]]][0])) # vert count - else: - idx = uniqueNormals[faces_normals[fi]][0] - file.write(',\n\t\t<%d,%d,%d>' % (idx, idx, idx)) # vert count - - file.write('\n }\n') - - if uv_layer: - file.write('\tuv_indices {\n') - file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count - for fi, fv in enumerate(faces_verts): - - if len(fv) == 4: indicies = (0,1,2), (0,2,3) - else: indicies = ((0,1,2),) - - uv = uv_layer[fi] - if len(faces_verts[fi])==4: - uvs = tuple(uv.uv1), tuple(uv.uv2), tuple(uv.uv3), tuple(uv.uv4) - else: - uvs = tuple(uv.uv1), tuple(uv.uv2), tuple(uv.uv3) - - for i1, i2, i3 in indicies: - file.write(',\n\t\t<%d,%d,%d>' %\ - (uniqueUVs[uvs[i1]][0],\ - uniqueUVs[uvs[i2]][0],\ - uniqueUVs[uvs[i2]][0])) # vert count - file.write('\n }\n') - - if me.materials: - material = me.materials[0] # dodgy - writeObjectMaterial(material) - - writeMatrix(matrix) - file.write('}\n') - - bpy.data.remove_mesh(me) - - def exportWorld(world): - if not world: - return - - mist = world.mist - - if mist.enabled: - file.write('fog {\n') - file.write('\tdistance %.6f\n' % mist.depth) - file.write('\tcolor rgbt<%.3g, %.3g, %.3g, %.3g>\n' % (tuple(world.horizon_color) + (1-mist.intensity,))) - #file.write('\tfog_offset %.6f\n' % mist.start) - #file.write('\tfog_alt 5\n') - #file.write('\tturbulence 0.2\n') - #file.write('\tturb_depth 0.3\n') - file.write('\tfog_type 1\n') - file.write('}\n') - - def exportGlobalSettings(scene): - - file.write('global_settings {\n') - - if scene.pov_radio_enable: - file.write('\tradiosity {\n') - file.write("\t\tadc_bailout %.4g\n" % scene.pov_radio_adc_bailout) - file.write("\t\talways_sample %d\n" % scene.pov_radio_always_sample) - file.write("\t\tbrightness %.4g\n" % scene.pov_radio_brightness) - file.write("\t\tcount %d\n" % scene.pov_radio_count) - file.write("\t\terror_bound %.4g\n" % scene.pov_radio_error_bound) - file.write("\t\tgray_threshold %.4g\n" % scene.pov_radio_gray_threshold) - file.write("\t\tlow_error_factor %.4g\n" % scene.pov_radio_low_error_factor) - file.write("\t\tmedia %d\n" % scene.pov_radio_media) - file.write("\t\tminimum_reuse %.4g\n" % scene.pov_radio_minimum_reuse) - file.write("\t\tnearest_count %d\n" % scene.pov_radio_nearest_count) - file.write("\t\tnormal %d\n" % scene.pov_radio_normal) - file.write("\t\trecursion_limit %d\n" % scene.pov_radio_recursion_limit) - file.write('\t}\n') - - if world: - file.write("\tambient_light rgb<%.3g, %.3g, %.3g>\n" % tuple(world.ambient_color)) - - file.write('}\n') - - - # Convert all materials to strings we can access directly per vertex. - writeMaterial(None) # default material - - for material in bpy.data.materials: - writeMaterial(material) - - exportCamera() - #exportMaterials() - sel = scene.objects - exportLamps([l for l in sel if l.type == 'LAMP']) - exportMeta([l for l in sel if l.type == 'META']) - exportMeshs(sel) - exportWorld(scene.world) - exportGlobalSettings(scene) - - file.close() - - -def write_pov_ini(filename_ini, filename_pov, filename_image): - scene = bpy.data.scenes[0] - render = scene.render_data - - x= int(render.resolution_x*render.resolution_percentage*0.01) - y= int(render.resolution_y*render.resolution_percentage*0.01) - - file = open(filename_ini, 'w') - - file.write('Input_File_Name="%s"\n' % filename_pov) - file.write('Output_File_Name="%s"\n' % filename_image) - - file.write('Width=%d\n' % x) - file.write('Height=%d\n' % y) - - # Needed for border render. - ''' - file.write('Start_Column=%d\n' % part.x) - file.write('End_Column=%d\n' % (part.x+part.w)) - - file.write('Start_Row=%d\n' % (part.y)) - file.write('End_Row=%d\n' % (part.y+part.h)) - ''' - - file.write('Display=0\n') - file.write('Pause_When_Done=0\n') - file.write('Output_File_Type=T\n') # TGA, best progressive loading - file.write('Output_Alpha=1\n') - - if render.antialiasing: - aa_mapping = {'OVERSAMPLE_5':2, 'OVERSAMPLE_8':3, 'OVERSAMPLE_11':4, 'OVERSAMPLE_16':5} # method 1 assumed - file.write('Antialias=1\n') - file.write('Antialias_Depth=%d\n' % aa_mapping[render.antialiasing_samples]) - else: - file.write('Antialias=0\n') - - file.close() - -# Radiosity panel, use in the scene for now. -FloatProperty= bpy.types.Scene.FloatProperty -IntProperty= bpy.types.Scene.IntProperty -BoolProperty= bpy.types.Scene.BoolProperty - -# Not a real pov option, just to know if we should write -BoolProperty( attr="pov_radio_enable", - name="Enable Radiosity", - description="Enable povrays radiosity calculation.", - default= False) -BoolProperty( attr="pov_radio_display_advanced", - name="Advanced Options", - description="Show advanced options.", - default= False) - -# Real pov options -FloatProperty( attr="pov_radio_adc_bailout", - name="ADC Bailout", - description="The adc_bailout for radiosity rays. Use adc_bailout = 0.01 / brightest_ambient_object for good results.", - min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default= 0.01) - -BoolProperty( attr="pov_radio_always_sample", - name="Always Sample", - description="Only use the data from the pretrace step and not gather any new samples during the final radiosity pass..", - default= True) - -FloatProperty( attr="pov_radio_brightness", - name="Brightness", - description="Ammount objects are brightened before being returned upwards to the rest of the system.", - min=0.0, max=1000.0, soft_min=0.0, soft_max=10.0, default= 1.0) - -IntProperty( attr="pov_radio_count", - name="Ray Count", - description="number of rays that are sent out whenever a new radiosity value has to be calculated.", - min=1, max=1600, default= 35) - -FloatProperty( attr="pov_radio_error_bound", - name="Error Bound", - description="one of the two main speed/quality tuning values, lower values are more accurate.", - min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default= 1.8) - -FloatProperty( attr="pov_radio_gray_threshold", - name="Gray Threshold", - description="one of the two main speed/quality tuning values, lower values are more accurate.", - min=0.0, max=1.0, soft_min=0, soft_max=1, default= 0.0) - -FloatProperty( attr="pov_radio_low_error_factor", - name="Low Error Factor", - description="If you calculate just enough samples, but no more, you will get an image which has slightly blotchy lighting.", - min=0.0, max=1.0, soft_min=0.0, soft_max=1.0, default= 0.5) - -# max_sample - not available yet -BoolProperty( attr="pov_radio_media", - name="Media", - description="Radiosity estimation can be affected by media.", - default= False) - -FloatProperty( attr="pov_radio_minimum_reuse", - name="Minimum Reuse", - description="Fraction of the screen width which sets the minimum radius of reuse for each sample point (At values higher than 2% expect errors).", - min=0.0, max=1.0, soft_min=0.1, soft_max=0.1, default= 0.015) - -IntProperty( attr="pov_radio_nearest_count", - name="Nearest Count", - description="Number of old ambient values blended together to create a new interpolated value.", - min=1, max=20, default= 5) - -BoolProperty( attr="pov_radio_normal", - name="Normals", - description="Radiosity estimation can be affected by normals.", - default= False) - -IntProperty( attr="pov_radio_recursion_limit", - name="Recursion Limit", - description="how many recursion levels are used to calculate the diffuse inter-reflection.", - min=1, max=20, default= 3) - - -class PovrayRender(bpy.types.RenderEngine): - __idname__ = 'POVRAY_RENDER' - __label__ = "Povray" - DELAY = 0.02 - - def _export(self, scene): - import tempfile - - self.temp_file_in = tempfile.mktemp(suffix='.pov') - self.temp_file_out = tempfile.mktemp(suffix='.tga') - self.temp_file_ini = tempfile.mktemp(suffix='.ini') - ''' - self.temp_file_in = '/test.pov' - self.temp_file_out = '/test.tga' - self.temp_file_ini = '/test.ini' - ''' - - def info_callback(txt): - self.update_stats("", "POVRAY: " + txt) - - write_pov(self.temp_file_in, scene, info_callback) - - def _render(self): - - try: os.remove(self.temp_file_out) # so as not to load the old file - except: pass - - write_pov_ini(self.temp_file_ini, self.temp_file_in, self.temp_file_out) - - print ("***-STARTING-***") - - pov_binary = "povray" - - if sys.platform=='win32': - import winreg - regKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\POV-Ray\\v3.6\\Windows') - - if bitness == 64: - pov_binary = winreg.QueryValueEx(regKey, 'Home')[0] + '\\bin\\pvengine64' - else: - pov_binary = winreg.QueryValueEx(regKey, 'Home')[0] + '\\bin\\pvengine' - - if 1: - self.process = subprocess.Popen([pov_binary, self.temp_file_ini]) # stdout=subprocess.PIPE, stderr=subprocess.PIPE - else: - # This works too but means we have to wait until its done - os.system('%s %s' % (pov_binary, self.temp_file_ini)) - - print ("***-DONE-***") - - def _cleanup(self): - for f in (self.temp_file_in, self.temp_file_ini, self.temp_file_out): - try: os.remove(f) - except: pass - - self.update_stats("", "") - - def render(self, scene): - - self.update_stats("", "POVRAY: Exporting data from Blender") - self._export(scene) - self.update_stats("", "POVRAY: Parsing File") - self._render() - - r = scene.render_data - - # compute resolution - x= int(r.resolution_x*r.resolution_percentage*0.01) - y= int(r.resolution_y*r.resolution_percentage*0.01) - - - - # Wait for the file to be created - while not os.path.exists(self.temp_file_out): - if self.test_break(): - try: self.process.terminate() - except: pass - break - - if self.process.poll() != None: - self.update_stats("", "POVRAY: Failed") - break - - time.sleep(self.DELAY) - - if os.path.exists(self.temp_file_out): - - self.update_stats("", "POVRAY: Rendering") - - prev_size = -1 - - def update_image(): - result = self.begin_result(0, 0, x, y) - lay = result.layers[0] - # possible the image wont load early on. - try: lay.load_from_file(self.temp_file_out) - except: pass - self.end_result(result) - - # Update while povray renders - while True: - - # test if povray exists - if self.process.poll() != None: - update_image(); - break - - # user exit - if self.test_break(): - try: self.process.terminate() - except: pass - - break - - # Would be nice to redirect the output - # stdout_value, stderr_value = self.process.communicate() # locks - - - # check if the file updated - new_size = os.path.getsize(self.temp_file_out) - - if new_size != prev_size: - update_image() - prev_size = new_size - - time.sleep(self.DELAY) - - self._cleanup() - -bpy.types.register(PovrayRender) - -# Use some of the existing buttons. -import buttons_scene -buttons_scene.SCENE_PT_render.COMPAT_ENGINES.add('POVRAY_RENDER') -buttons_scene.SCENE_PT_dimensions.COMPAT_ENGINES.add('POVRAY_RENDER') -buttons_scene.SCENE_PT_antialiasing.COMPAT_ENGINES.add('POVRAY_RENDER') -buttons_scene.SCENE_PT_output.COMPAT_ENGINES.add('POVRAY_RENDER') -del buttons_scene - -# Use only a subset of the world panels -import buttons_world -buttons_world.WORLD_PT_preview.COMPAT_ENGINES.add('POVRAY_RENDER') -buttons_world.WORLD_PT_context_world.COMPAT_ENGINES.add('POVRAY_RENDER') -buttons_world.WORLD_PT_world.COMPAT_ENGINES.add('POVRAY_RENDER') -buttons_world.WORLD_PT_mist.COMPAT_ENGINES.add('POVRAY_RENDER') -del buttons_world - -# Example of wrapping every class 'as is' -import buttons_material -for member in dir(buttons_material): - subclass = getattr(buttons_material, member) - try: subclass.COMPAT_ENGINES.add('POVRAY_RENDER') - except: pass -del buttons_material - -class RenderButtonsPanel(bpy.types.Panel): - __space_type__ = 'PROPERTIES' - __region_type__ = 'WINDOW' - __context__ = "scene" - # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here - - def poll(self, context): - rd = context.scene.render_data - return (rd.use_game_engine==False) and (rd.engine in self.COMPAT_ENGINES) - -class SCENE_PT_povray_radiosity(RenderButtonsPanel): - __label__ = "Radiosity" - COMPAT_ENGINES = set(['POVRAY_RENDER']) - - def draw_header(self, context): - layout = self.layout - scene = context.scene - layout.itemR(scene, "pov_radio_enable", text="") - - def draw(self, context): - layout = self.layout - scene = context.scene - rd = scene.render_data - - layout.active = scene.pov_radio_enable - - split = layout.split() - - col = split.column() - - col.itemR(scene, "pov_radio_count", text="Rays") - col.itemR(scene, "pov_radio_recursion_limit", text="Recursions") - col = split.column() - col.itemR(scene, "pov_radio_error_bound", text="Error") - - layout.itemR(scene, "pov_radio_display_advanced") - - if scene.pov_radio_display_advanced: - split = layout.split() - - col = split.column() - col.itemR(scene, "pov_radio_adc_bailout", slider=True) - col.itemR(scene, "pov_radio_gray_threshold", slider=True) - col.itemR(scene, "pov_radio_low_error_factor", slider=True) - - - - col = split.column() - col.itemR(scene, "pov_radio_brightness") - col.itemR(scene, "pov_radio_minimum_reuse", text="Min Reuse") - col.itemR(scene, "pov_radio_nearest_count") - - - split = layout.split() - - col = split.column() - col.itemL(text="Estimation Influence:") - col.itemR(scene, "pov_radio_media") - col.itemR(scene, "pov_radio_normal") - - col = split.column() - col.itemR(scene, "pov_radio_always_sample") - - -bpy.types.register(SCENE_PT_povray_radiosity) diff --git a/release/io/export_ply.py b/release/io/export_ply.py deleted file mode 100644 index ed983c2b169..00000000000 --- a/release/io/export_ply.py +++ /dev/null @@ -1,286 +0,0 @@ -import bpy - -__author__ = "Bruce Merry" -__version__ = "0.93" -__bpydoc__ = """\ -This script exports Stanford PLY files from Blender. It supports normals, -colours, and texture coordinates per face or per vertex. -Only one mesh can be exported at a time. -""" - -# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# Vector rounding se we can use as keys -# -# Updated on Aug 11, 2008 by Campbell Barton -# - added 'comment' prefix to comments - Needed to comply with the PLY spec. -# -# Updated on Jan 1, 2007 by Gabe Ghearing -# - fixed normals so they are correctly smooth/flat -# - fixed crash when the model doesn't have uv coords or vertex colors -# - fixed crash when the model has vertex colors but doesn't have uv coords -# - changed float32 to float and uint8 to uchar for compatibility -# Errata/Notes as of Jan 1, 2007 -# - script exports texture coords if they exist even if TexFace isn't selected (not a big deal to me) -# - ST(R) should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either) -# -# Updated on Jan 3, 2007 by Gabe Ghearing -# - fixed "sticky" vertex UV exporting -# - added pupmenu to enable/disable exporting normals, uv coords, and colors -# Errata/Notes as of Jan 3, 2007 -# - ST(R) coords should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either) -# - edges should be exported since PLY files support them -# - code is getting spaghettish, it should be refactored... -# - - -def rvec3d(v): return round(v[0], 6), round(v[1], 6), round(v[2], 6) -def rvec2d(v): return round(v[0], 6), round(v[1], 6) - -def write(filename, scene, ob, \ - EXPORT_APPLY_MODIFIERS= True,\ - EXPORT_NORMALS= True,\ - EXPORT_UV= True,\ - EXPORT_COLORS= True\ - ): - - if not filename.lower().endswith('.ply'): - filename += '.ply' - - if not ob: - raise Exception("Error, Select 1 active object") - return - - file = open(filename, 'wb') - - - #EXPORT_EDGES = Draw.Create(0) - """ - is_editmode = Blender.Window.EditMode() - if is_editmode: - Blender.Window.EditMode(0, '', 0) - - Window.WaitCursor(1) - """ - - #mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False, scn) # XXX - if EXPORT_APPLY_MODIFIERS: - mesh = ob.create_render_mesh(scene) - else: - mesh = ob.data - - if not mesh: - raise ("Error, could not get mesh data from active object") - return - - # mesh.transform(ob.matrixWorld) # XXX - - faceUV = len(mesh.uv_layers) > 0 - vertexUV = len(mesh.sticky) > 0 - vertexColors = len(mesh.vcol_layers) > 0 - - if (not faceUV) and (not vertexUV): EXPORT_UV = False - if not vertexColors: EXPORT_COLORS = False - - if not EXPORT_UV: faceUV = vertexUV = False - if not EXPORT_COLORS: vertexColors = False - - if faceUV: - active_uv_layer = None - for lay in mesh.uv_layers: - if lay.active: - active_uv_layer= lay.data - break - if not active_uv_layer: - EXPORT_UV = False - faceUV = None - - if vertexColors: - active_col_layer = None - for lay in mesh.vcol_layers: - if lay.active: - active_col_layer= lay.data - if not active_col_layer: - EXPORT_COLORS = False - vertexColors = None - - # incase - color = uvcoord = uvcoord_key = normal = normal_key = None - - mesh_verts = mesh.verts # save a lookup - ply_verts = [] # list of dictionaries - # vdict = {} # (index, normal, uv) -> new index - vdict = [{} for i in xrange(len(mesh_verts))] - ply_faces = [[] for f in xrange(len(mesh.faces))] - vert_count = 0 - for i, f in enumerate(mesh.faces): - - - smooth = f.smooth - # XXX need face normals - """ - if not smooth: - normal = tuple(f.no) - normal_key = rvec3d(normal) - """ - if faceUV: - uv = active_uv_layer[i] - uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/ - if vertexColors: - col = active_col_layer[i] - col = col.color1, col.color2, col.color3, col.color4 - - f_verts= list(f.verts) - if not f_verts[3]: f_verts.pop() # XXX face length should be 3/4, not always 4 - - pf= ply_faces[i] - for j, vidx in enumerate(f_verts): - v = mesh_verts[vidx] - """ - if smooth: - normal= tuple(v.no) - normal_key = rvec3d(normal) - """ - normal_key = None # XXX - - if faceUV: - uvcoord= uv[j][0], 1.0-uv[j][1] - uvcoord_key = rvec2d(uvcoord) - elif vertexUV: - uvcoord= v.uvco[0], 1.0-v.uvco[1] - uvcoord_key = rvec2d(uvcoord) - - if vertexColors: - color= col[j] - color= int(color[0]*255.0), int(color[1]*255.0), int(color[2]*255.0) - - - key = normal_key, uvcoord_key, color - - vdict_local = vdict[vidx] - pf_vidx = vdict_local.get(key) # Will be None initially - - if pf_vidx == None: # same as vdict_local.has_key(key) - pf_vidx = vdict_local[key] = vert_count; - ply_verts.append((vidx, normal, uvcoord, color)) - vert_count += 1 - - pf.append(pf_vidx) - - file.write('ply\n') - file.write('format ascii 1.0\n') - version = "2.5" # Blender.Get('version') - file.write('comment Created by Blender3D %s - www.blender.org, source file: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] )) - - file.write('element vertex %d\n' % len(ply_verts)) - - file.write('property float x\n') - file.write('property float y\n') - file.write('property float z\n') - - # XXX - """ - if EXPORT_NORMALS: - file.write('property float nx\n') - file.write('property float ny\n') - file.write('property float nz\n') - """ - if EXPORT_UV: - file.write('property float s\n') - file.write('property float t\n') - if EXPORT_COLORS: - file.write('property uchar red\n') - file.write('property uchar green\n') - file.write('property uchar blue\n') - - file.write('element face %d\n' % len(mesh.faces)) - file.write('property list uchar uint vertex_indices\n') - file.write('end_header\n') - - for i, v in enumerate(ply_verts): - file.write('%.6f %.6f %.6f ' % tuple(mesh_verts[v[0]].co)) # co - """ - if EXPORT_NORMALS: - file.write('%.6f %.6f %.6f ' % v[1]) # no - """ - if EXPORT_UV: file.write('%.6f %.6f ' % v[2]) # uv - if EXPORT_COLORS: file.write('%u %u %u' % v[3]) # col - file.write('\n') - - for pf in ply_faces: - if len(pf)==3: file.write('3 %d %d %d\n' % tuple(pf)) - else: file.write('4 %d %d %d %d\n' % tuple(pf)) - - file.close() - print("writing", filename, "done") - - if EXPORT_APPLY_MODIFIERS: - bpy.data.remove_mesh(mesh) - - # XXX - """ - if is_editmode: - Blender.Window.EditMode(1, '', 0) - """ - -class EXPORT_OT_ply(bpy.types.Operator): - ''' - Operator documentatuon text, will be used for the operator tooltip and python docs. - ''' - __label__ = "Export PLY" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [ - bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the PLY file", maxlen= 1024, default= ""), - bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default= True), - bpy.props.BoolProperty(attr="use_normals", name="Export Normals", description="Export Normals for smooth and hard shaded faces", default= True), - bpy.props.BoolProperty(attr="use_uvs", name="Export UVs", description="Exort the active UV layer", default= True), - bpy.props.BoolProperty(attr="use_colors", name="Export Vertex Colors", description="Exort the active vertex color layer", default= True) - ] - - def poll(self, context): - print("Poll") - return context.active_object != None - - def execute(self, context): - # print("Selected: " + context.active_object.name) - - if not self.filename: - raise Exception("filename not set") - - write(self.filename, context.scene, context.active_object,\ - EXPORT_APPLY_MODIFIERS = self.use_modifiers, - EXPORT_NORMALS = self.use_normals, - EXPORT_UV = self.use_uvs, - EXPORT_COLORS = self.use_colors, - ) - - return ('FINISHED',) - - def invoke(self, context, event): - wm = context.manager - wm.add_fileselect(self.__operator__) - return ('RUNNING_MODAL',) - - -bpy.ops.add(EXPORT_OT_ply) - -if __name__ == "__main__": - bpy.ops.EXPORT_OT_ply(filename="/tmp/test.ply") - - diff --git a/release/io/netrender/__init__.py b/release/io/netrender/__init__.py deleted file mode 100644 index 1eb91abb938..00000000000 --- a/release/io/netrender/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# This directory is a Python package. - -import model -import operators -import client -import slave -import master -import utils -import ui - -# store temp data in bpy module - -import bpy - -bpy.data.netrender_jobs = [] -bpy.data.netrender_slaves = [] -bpy.data.netrender_blacklist = [] \ No newline at end of file diff --git a/release/io/netrender/client.py b/release/io/netrender/client.py deleted file mode 100644 index a6cfb4e020d..00000000000 --- a/release/io/netrender/client.py +++ /dev/null @@ -1,204 +0,0 @@ -import bpy -import sys, os -import http, http.client, http.server, urllib -import subprocess, shutil, time, hashlib - -import netrender.slave as slave -import netrender.master as master -from netrender.utils import * - - -def clientSendJob(conn, scene, anim = False, chunks = 5): - netsettings = scene.network_render - job = netrender.model.RenderJob() - - if anim: - for f in range(scene.start_frame, scene.end_frame + 1): - job.addFrame(f) - else: - job.addFrame(scene.current_frame) - - filename = bpy.data.filename - job.addFile(filename) - - job_name = netsettings.job_name - path, name = os.path.split(filename) - if job_name == "[default]": - job_name = name - - ########################### - # LIBRARIES - ########################### - for lib in bpy.data.libraries: - lib_path = lib.filename - - if lib_path.startswith("//"): - lib_path = path + os.sep + lib_path[2:] - - job.addFile(lib_path) - - ########################### - # POINT CACHES - ########################### - - root, ext = os.path.splitext(name) - cache_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that - - if os.path.exists(cache_path): - caches = {} - pattern = re.compile("([a-zA-Z0-9]+)_([0-9]+)_[0-9]+\.bphys") - for cache_file in sorted(os.listdir(cache_path)): - match = pattern.match(cache_file) - - if match: - cache_id = match.groups()[0] - cache_frame = int(match.groups()[1]) - - cache_files = caches.get(cache_id, []) - cache_files.append((cache_frame, cache_file)) - caches[cache_id] = cache_files - - for cache in caches.values(): - cache.sort() - - if len(cache) == 1: - cache_frame, cache_file = cache[0] - job.addFile(cache_path + cache_file, cache_frame, cache_frame) - else: - for i in range(len(cache)): - current_item = cache[i] - next_item = cache[i+1] if i + 1 < len(cache) else None - previous_item = cache[i - 1] if i > 0 else None - - current_frame, current_file = current_item - - if not next_item and not previous_item: - job.addFile(cache_path + current_file, current_frame, current_frame) - elif next_item and not previous_item: - next_frame = next_item[0] - job.addFile(cache_path + current_file, current_frame, next_frame - 1) - elif not next_item and previous_item: - previous_frame = previous_item[0] - job.addFile(cache_path + current_file, previous_frame + 1, current_frame) - else: - next_frame = next_item[0] - previous_frame = previous_item[0] - job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1) - - ########################### - # IMAGES - ########################### - for image in bpy.data.images: - if image.source == "FILE" and not image.packed_file: - job.addFile(image.filename) - - # print(job.files) - - job.name = job_name - - for slave in scene.network_render.slaves_blacklist: - job.blacklist.append(slave.id) - - job.chunks = netsettings.chunks - job.priority = netsettings.priority - - # try to send path first - conn.request("POST", "job", repr(job.serialize())) - response = conn.getresponse() - - job_id = response.getheader("job-id") - - # if not ACCEPTED (but not processed), send files - if response.status == http.client.ACCEPTED: - for filepath, start, end in job.files: - f = open(filepath, "rb") - conn.request("PUT", "file", f, headers={"job-id": job_id, "job-file": filepath}) - f.close() - response = conn.getresponse() - - # server will reply with NOT_FOUD until all files are found - - return job_id - -def requestResult(conn, job_id, frame): - conn.request("GET", "render", headers={"job-id": job_id, "job-frame":str(frame)}) - -@rnaType -class NetworkRenderEngine(bpy.types.RenderEngine): - __idname__ = 'NET_RENDER' - __label__ = "Network Render" - def render(self, scene): - if scene.network_render.mode == "RENDER_CLIENT": - self.render_client(scene) - elif scene.network_render.mode == "RENDER_SLAVE": - self.render_slave(scene) - elif scene.network_render.mode == "RENDER_MASTER": - self.render_master(scene) - else: - print("UNKNOWN OPERATION MODE") - - def render_master(self, scene): - netsettings = scene.network_render - - address = "" if netsettings.server_address == "[default]" else netsettings.server_address - - master.runMaster((address, netsettings.server_port), netsettings.server_broadcast, netsettings.path, self.update_stats, self.test_break) - - - def render_slave(self, scene): - slave.render_slave(self, scene) - - def render_client(self, scene): - netsettings = scene.network_render - self.update_stats("", "Network render client initiation") - - - conn = clientConnection(scene) - - if conn: - # Sending file - - self.update_stats("", "Network render exporting") - - job_id = netsettings.job_id - - # reading back result - - self.update_stats("", "Network render waiting for results") - - requestResult(conn, job_id, scene.current_frame) - response = conn.getresponse() - - if response.status == http.client.NO_CONTENT: - netsettings.job_id = clientSendJob(conn, scene) - requestResult(conn, job_id, scene.current_frame) - - while response.status == http.client.ACCEPTED and not self.test_break(): - print("waiting") - time.sleep(1) - requestResult(conn, job_id, scene.current_frame) - response = conn.getresponse() - - if response.status != http.client.OK: - conn.close() - return - - r = scene.render_data - x= int(r.resolution_x*r.resolution_percentage*0.01) - y= int(r.resolution_y*r.resolution_percentage*0.01) - - f = open(netsettings.path + "output.exr", "wb") - buf = response.read(1024) - - while buf: - f.write(buf) - buf = response.read(1024) - - f.close() - - result = self.begin_result(0, 0, x, y) - result.load_from_file(netsettings.path + "output.exr", 0, 0) - self.end_result(result) - - conn.close() - diff --git a/release/io/netrender/master.py b/release/io/netrender/master.py deleted file mode 100644 index 13e8b399d6c..00000000000 --- a/release/io/netrender/master.py +++ /dev/null @@ -1,635 +0,0 @@ -import sys, os -import http, http.client, http.server, urllib, socket -import subprocess, shutil, time, hashlib - -from netrender.utils import * -import netrender.model - -JOB_WAITING = 0 # before all data has been entered -JOB_PAUSED = 1 # paused by user -JOB_QUEUED = 2 # ready to be dispatched - -class MRenderFile: - def __init__(self, filepath, start, end): - self.filepath = filepath - self.start = start - self.end = end - self.found = False - - def test(self): - self.found = os.path.exists(self.filepath) - return self.found - - -class MRenderSlave(netrender.model.RenderSlave): - def __init__(self, name, address, stats): - super().__init__() - self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest() - self.name = name - self.address = address - self.stats = stats - self.last_seen = time.time() - - self.job = None - self.frame = None - - netrender.model.RenderSlave._slave_map[self.id] = self - - def seen(self): - self.last_seen = time.time() - -# sorting key for jobs -def groupKey(job): - return (job.status, job.framesLeft() > 0, job.priority, job.credits) - -class MRenderJob(netrender.model.RenderJob): - def __init__(self, job_id, name, files, chunks = 1, priority = 1, credits = 100.0, blacklist = []): - super().__init__() - self.id = job_id - self.name = name - self.files = files - self.frames = [] - self.chunks = chunks - self.priority = priority - self.credits = credits - self.blacklist = blacklist - self.last_dispatched = time.time() - - # special server properties - self.save_path = "" - self.files_map = {path: MRenderFile(path, start, end) for path, start, end in files} - self.status = JOB_WAITING - - def save(self): - if self.save_path: - f = open(self.save_path + "job.txt", "w") - f.write(repr(self.serialize())) - f.close() - - def testStart(self): - for f in self.files_map.values(): - if not f.test(): - return False - - self.start() - return True - - def start(self): - self.status = JOB_QUEUED - - def update(self): - self.credits -= 5 # cost of one frame - self.credits += (time.time() - self.last_dispatched) / 60 - self.last_dispatched = time.time() - - def addFrame(self, frame_number): - frame = MRenderFrame(frame_number) - self.frames.append(frame) - return frame - - def framesLeft(self): - total = 0 - for j in self.frames: - if j.status == QUEUED: - total += 1 - - return total - - def reset(self, all): - for f in self.frames: - f.reset(all) - - def getFrames(self): - frames = [] - for f in self.frames: - if f.status == QUEUED: - self.update() - frames.append(f) - if len(frames) >= self.chunks: - break - - return frames - -class MRenderFrame(netrender.model.RenderFrame): - def __init__(self, frame): - super().__init__() - self.number = frame - self.slave = None - self.time = 0 - self.status = QUEUED - - def reset(self, all): - if all or self.status == ERROR: - self.slave = None - self.time = 0 - self.status = QUEUED - - -# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- -# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= -# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - -class RenderHandler(http.server.BaseHTTPRequestHandler): - def send_head(self, code = http.client.OK, headers = {}): - self.send_response(code) - self.send_header("Content-type", "application/octet-stream") - - for key, value in headers.items(): - self.send_header(key, value) - - self.end_headers() - - def do_HEAD(self): - print(self.path) - - if self.path == "status": - job_id = self.headers.get('job-id', "") - job_frame = int(self.headers.get('job-frame', -1)) - - if job_id: - print("status:", job_id, "\n") - - job = self.server.getJobByID(job_id) - if job: - if job_frame != -1: - frame = job[frame] - - if not frame: - # no such frame - self.send_heat(http.client.NO_CONTENT) - return - else: - # no such job id - self.send_head(http.client.NO_CONTENT) - return - - self.send_head() - - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - - def do_GET(self): - print(self.path) - - if self.path == "version": - self.send_head() - self.server.stats("", "New client connection") - self.wfile.write(VERSION) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "render": - job_id = self.headers['job-id'] - job_frame = int(self.headers['job-frame']) - print("render:", job_id, job_frame) - - job = self.server.getJobByID(job_id) - - if job: - frame = job[job_frame] - - if frame: - if frame.status in (QUEUED, DISPATCHED): - self.send_head(http.client.ACCEPTED) - elif frame.status == DONE: - self.server.stats("", "Sending result back to client") - f = open(job.save_path + "%04d" % job_frame + ".exr", 'rb') - - self.send_head() - - shutil.copyfileobj(f, self.wfile) - - f.close() - elif frame.status == ERROR: - self.send_head(http.client.PARTIAL_CONTENT) - else: - # no such frame - self.send_head(http.client.NO_CONTENT) - else: - # no such job id - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "log": - job_id = self.headers['job-id'] - job_frame = int(self.headers['job-frame']) - print("log:", job_id, job_frame) - - job = self.server.getJobByID(job_id) - - if job: - frame = job[job_frame] - - if frame: - if frame.status in (QUEUED, DISPATCHED): - self.send_head(http.client.PROCESSING) - else: - self.server.stats("", "Sending log back to client") - f = open(job.save_path + "%04d" % job_frame + ".log", 'rb') - - self.send_head() - - shutil.copyfileobj(f, self.wfile) - - f.close() - else: - # no such frame - self.send_head(http.client.NO_CONTENT) - else: - # no such job id - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "status": - job_id = self.headers.get('job-id', "") - job_frame = int(self.headers.get('job-frame', -1)) - - if job_id: - print("status:", job_id, "\n") - - job = self.server.getJobByID(job_id) - if job: - if job_frame != -1: - frame = job[frame] - - if frame: - message = frame.serialize() - else: - # no such frame - self.send_heat(http.client.NO_CONTENT) - return - else: - message = job.serialize() - else: - # no such job id - self.send_head(http.client.NO_CONTENT) - return - else: # status of all jobs - message = [] - - for job in self.server: - message.append(job.serialize()) - - self.send_head() - self.wfile.write(bytes(repr(message), encoding='utf8')) - - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "job": - self.server.update() - - slave_id = self.headers['slave-id'] - - print("slave-id", slave_id) - - slave = self.server.updateSlave(slave_id) - - if slave: # only if slave id is valid - job, frames = self.server.getNewJob(slave_id) - - if job and frames: - for f in frames: - print("dispatch", f.number) - f.status = DISPATCHED - f.slave = slave - - self.send_head(headers={"job-id": job.id}) - - message = job.serialize(frames) - - self.wfile.write(bytes(repr(message), encoding='utf8')) - - self.server.stats("", "Sending job frame to render node") - else: - # no job available, return error code - self.send_head(http.client.ACCEPTED) - else: # invalid slave id - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "file": - slave_id = self.headers['slave-id'] - - slave = self.server.updateSlave(slave_id) - - if slave: # only if slave id is valid - job_id = self.headers['job-id'] - job_file = self.headers['job-file'] - print("job:", job_id, "\n") - print("file:", job_file, "\n") - - job = self.server.getJobByID(job_id) - - if job: - render_file = job.files_map.get(job_file, None) - - if render_file: - self.server.stats("", "Sending file to render node") - f = open(render_file.path, 'rb') - - shutil.copyfileobj(f, self.wfile) - - f.close() - else: - # no such file - self.send_head(http.client.NO_CONTENT) - else: - # no such job id - self.send_head(http.client.NO_CONTENT) - else: # invalid slave id - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "slave": - message = [] - - for slave in self.server.slaves: - message.append(slave.serialize()) - - self.send_head() - - self.wfile.write(bytes(repr(message), encoding='utf8')) - - - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - def do_POST(self): - print(self.path) - - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - if self.path == "job": - print("posting job info") - self.server.stats("", "Receiving job") - - length = int(self.headers['content-length']) - - job_info = netrender.model.RenderJob.materialize(eval(str(self.rfile.read(length), encoding='utf8'))) - - job_id = self.server.nextJobID() - - print(job_info.files) - - job = MRenderJob(job_id, job_info.name, job_info.files, chunks = job_info.chunks, priority = job_info.priority, blacklist = job_info.blacklist) - - for frame in job_info.frames: - frame = job.addFrame(frame.number) - - self.server.addJob(job) - - headers={"job-id": job_id} - - if job.testStart(): - self.send_head(headers=headers) - else: - self.send_head(http.client.ACCEPTED, headers=headers) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "cancel": - job_id = self.headers.get('job-id', "") - if job_id: - print("cancel:", job_id, "\n") - self.server.removeJob(job_id) - else: # cancel all jobs - self.server.clear() - - self.send_head() - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "reset": - job_id = self.headers.get('job-id', "") - job_frame = int(self.headers.get('job-frame', "-1")) - all = bool(self.headers.get('reset-all', "False")) - - job = self.server.getJobByID(job_id) - - if job: - if job_frame != -1: - job[job_frame].reset(all) - else: - job.reset(all) - - self.send_head() - else: # job not found - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "slave": - length = int(self.headers['content-length']) - job_frame_string = self.headers['job-frame'] - - slave_info = netrender.model.RenderSlave.materialize(eval(str(self.rfile.read(length), encoding='utf8'))) - - slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats) - - self.send_head(headers = {"slave-id": slave_id}) - - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - def do_PUT(self): - print(self.path) - - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - if self.path == "file": - print("writing blend file") - self.server.stats("", "Receiving job") - - length = int(self.headers['content-length']) - job_id = self.headers['job-id'] - job_file = self.headers['job-file'] - - job = self.server.getJobByID(job_id) - - if job: - - render_file = job.files_map.get(job_file, None) - - if render_file: - main_file = job.files[0] - - main_path, main_name = os.path.split(main_file) - - if job_file != main_file: - file_path = prefixPath(job.save_path, job_file, main_path) - else: - file_path = job.save_path + main_name - - buf = self.rfile.read(length) - - # add same temp file + renames as slave - - f = open(file_path, "wb") - f.write(buf) - f.close() - del buf - - render_file.path = file_path # set the new path - - if job.testStart(): - self.send_head(headers=headers) - else: - self.send_head(http.client.ACCEPTED, headers=headers) - else: # invalid file - self.send_head(http.client.NO_CONTENT) - else: # job not found - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "render": - print("writing result file") - self.server.stats("", "Receiving render result") - - slave_id = self.headers['slave-id'] - - slave = self.server.updateSlave(slave_id) - - if slave: # only if slave id is valid - job_id = self.headers['job-id'] - - job = self.server.getJobByID(job_id) - - if job: - job_frame = int(self.headers['job-frame']) - job_result = int(self.headers['job-result']) - job_time = float(self.headers['job-time']) - - frame = job[job_frame] - - if job_result == DONE: - length = int(self.headers['content-length']) - buf = self.rfile.read(length) - f = open(job.save_path + "%04d" % job_frame + ".exr", 'wb') - f.write(buf) - f.close() - - del buf - elif job_result == ERROR: - # blacklist slave on this job on error - job.blacklist.append(slave.id) - - frame.status = job_result - frame.time = job_time - - self.server.updateSlave(self.headers['slave-id']) - - self.send_head() - else: # job not found - self.send_head(http.client.NO_CONTENT) - else: # invalid slave id - self.send_head(http.client.NO_CONTENT) - # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- - elif self.path == "log": - print("writing log file") - self.server.stats("", "Receiving log file") - - job_id = self.headers['job-id'] - - job = self.server.getJobByID(job_id) - - if job: - length = int(self.headers['content-length']) - job_frame = int(self.headers['job-frame']) - - buf = self.rfile.read(length) - f = open(job.save_path + "%04d" % job_frame + ".log", 'ab') - f.write(buf) - f.close() - - del buf - - self.server.updateSlave(self.headers['slave-id']) - - self.send_head() - else: # job not found - self.send_head(http.client.NO_CONTENT) - -class RenderMasterServer(http.server.HTTPServer): - def __init__(self, address, handler_class, path): - super().__init__(address, handler_class) - self.jobs = [] - self.jobs_map = {} - self.slaves = [] - self.slaves_map = {} - self.job_id = 0 - self.path = path + "master_" + str(os.getpid()) + os.sep - - if not os.path.exists(self.path): - os.mkdir(self.path) - - def nextJobID(self): - self.job_id += 1 - return str(self.job_id) - - def addSlave(self, name, address, stats): - slave = MRenderSlave(name, address, stats) - self.slaves.append(slave) - self.slaves_map[slave.id] = slave - - return slave.id - - def getSlave(self, slave_id): - return self.slaves_map.get(slave_id, None) - - def updateSlave(self, slave_id): - slave = self.getSlave(slave_id) - if slave: - slave.seen() - - return slave - - def clear(self): - self.jobs_map = {} - self.jobs = [] - - def update(self): - self.jobs.sort(key = groupKey) - - def removeJob(self, id): - job = self.jobs_map.pop(id) - - if job: - self.jobs.remove(job) - - def addJob(self, job): - self.jobs.append(job) - self.jobs_map[job.id] = job - - # create job directory - job.save_path = self.path + "job_" + job.id + os.sep - if not os.path.exists(job.save_path): - os.mkdir(job.save_path) - - job.save() - - def getJobByID(self, id): - return self.jobs_map.get(id, None) - - def __iter__(self): - for job in self.jobs: - yield job - - def getNewJob(self, slave_id): - if self.jobs: - for job in reversed(self.jobs): - if job.status == JOB_QUEUED and job.framesLeft() > 0 and slave_id not in job.blacklist: - return job, job.getFrames() - - return None, None - -def runMaster(address, broadcast, path, update_stats, test_break): - httpd = RenderMasterServer(address, RenderHandler, path) - httpd.timeout = 1 - httpd.stats = update_stats - - if broadcast: - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - - start_time = time.time() - - while not test_break(): - httpd.handle_request() - - if broadcast: - if time.time() - start_time >= 10: # need constant here - print("broadcasting address") - s.sendto(bytes("%s:%i" % address, encoding='utf8'), 0, ('',address[1])) - start_time = time.time() diff --git a/release/io/netrender/model.py b/release/io/netrender/model.py deleted file mode 100644 index 7803ad034a7..00000000000 --- a/release/io/netrender/model.py +++ /dev/null @@ -1,162 +0,0 @@ -import sys, os -import http, http.client, http.server, urllib -import subprocess, shutil, time, hashlib - -from netrender.utils import * - -class RenderSlave: - _slave_map = {} - - def __init__(self): - self.id = "" - self.name = "" - self.address = (0,0) - self.stats = "" - self.total_done = 0 - self.total_error = 0 - self.last_seen = 0.0 - - def serialize(self): - return { - "id": self.id, - "name": self.name, - "address": self.address, - "stats": self.stats, - "total_done": self.total_done, - "total_error": self.total_error, - "last_seen": self.last_seen - } - - @staticmethod - def materialize(data): - if not data: - return None - - slave_id = data["id"] - - if slave_id in RenderSlave._slave_map: - return RenderSlave._slave_map[slave_id] - else: - slave = RenderSlave() - slave.id = slave_id - slave.name = data["name"] - slave.address = data["address"] - slave.stats = data["stats"] - slave.total_done = data["total_done"] - slave.total_error = data["total_error"] - slave.last_seen = data["last_seen"] - - RenderSlave._slave_map[slave_id] = slave - - return slave - -class RenderJob: - def __init__(self): - self.id = "" - self.name = "" - self.files = [] - self.frames = [] - self.chunks = 0 - self.priority = 0 - self.credits = 0 - self.blacklist = [] - self.last_dispatched = 0.0 - - def addFile(self, file_path, start=-1, end=-1): - self.files.append((file_path, start, end)) - - def addFrame(self, frame_number): - frame = RenderFrame(frame_number) - self.frames.append(frame) - return frame - - def __len__(self): - return len(self.frames) - - def framesStatus(self): - results = { - QUEUED: 0, - DISPATCHED: 0, - DONE: 0, - ERROR: 0 - } - - for frame in self.frames: - results[frame.status] += 1 - - return results - - def __contains__(self, frame_number): - for f in self.frames: - if f.number == frame_number: - return True - else: - return False - - def __getitem__(self, frame_number): - for f in self.frames: - if f.number == frame_number: - return f - else: - return None - - def serialize(self, frames = None): - min_frame = min((f.number for f in frames)) if frames else -1 - max_frame = max((f.number for f in frames)) if frames else -1 - return { - "id": self.id, - "name": self.name, - "files": [f for f in self.files if f[1] == -1 or not frames or (f[1] <= min_frame <= f[2] or f[1] <= max_frame <= f[2])], - "frames": [f.serialize() for f in self.frames if not frames or f in frames], - "chunks": self.chunks, - "priority": self.priority, - "credits": self.credits, - "blacklist": self.blacklist, - "last_dispatched": self.last_dispatched - } - - @staticmethod - def materialize(data): - if not data: - return None - - job = RenderJob() - job.id = data["id"] - job.name = data["name"] - job.files = data["files"] - job.frames = [RenderFrame.materialize(f) for f in data["frames"]] - job.chunks = data["chunks"] - job.priority = data["priority"] - job.credits = data["credits"] - job.blacklist = data["blacklist"] - job.last_dispatched = data["last_dispatched"] - - return job - -class RenderFrame: - def __init__(self, number = 0): - self.number = number - self.time = 0 - self.status = QUEUED - self.slave = None - - def serialize(self): - return { - "number": self.number, - "time": self.time, - "status": self.status, - "slave": None if not self.slave else self.slave.serialize() - } - - @staticmethod - def materialize(data): - if not data: - return None - - frame = RenderFrame() - frame.number = data["number"] - frame.time = data["time"] - frame.status = data["status"] - frame.slave = RenderSlave.materialize(data["slave"]) - - return frame diff --git a/release/io/netrender/operators.py b/release/io/netrender/operators.py deleted file mode 100644 index 928c2b9efaf..00000000000 --- a/release/io/netrender/operators.py +++ /dev/null @@ -1,356 +0,0 @@ -import bpy -import sys, os -import http, http.client, http.server, urllib, socket - -from netrender.utils import * -import netrender.client as client -import netrender.model - -@rnaOperator -class RENDER_OT_netclientanim(bpy.types.Operator): - ''' - Operator documentation text, will be used for the operator tooltip and python docs. - ''' - __idname__ = "render.netclientanim" - __label__ = "Net Render Client Anim" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - scene = context.scene - - conn = clientConnection(scene) - - if conn: - # Sending file - scene.network_render.job_id = client.clientSendJob(conn, scene, True) - conn.close() - - bpy.ops.screen.render('INVOKE_AREA', animation=True) - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class RENDER_OT_netclientsend(bpy.types.Operator): - ''' - Operator documentation text, will be used for the operator tooltip and python docs. - ''' - __idname__ = "render.netclientsend" - __label__ = "Net Render Client Send" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - scene = context.scene - - conn = clientConnection(scene) - - if conn: - # Sending file - scene.network_render.job_id = client.clientSendJob(conn, scene, True) - conn.close() - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class RENDER_OT_netclientstatus(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientstatus" - __label__ = "Net Render Client Status" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - netsettings = context.scene.network_render - conn = clientConnection(context.scene) - - if conn: - conn.request("GET", "status") - - response = conn.getresponse() - print( response.status, response.reason ) - - jobs = (netrender.model.RenderJob.materialize(j) for j in eval(str(response.read(), encoding='utf8'))) - - while(len(netsettings.jobs) > 0): - netsettings.jobs.remove(0) - - bpy.data.netrender_jobs = [] - - for j in jobs: - bpy.data.netrender_jobs.append(j) - netsettings.jobs.add() - job = netsettings.jobs[-1] - - j.results = j.framesStatus() # cache frame status - - job.name = j.name - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class RENDER_OT_netclientblacklistslave(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientblacklistslave" - __label__ = "Net Render Client Blacklist Slave" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - netsettings = context.scene.network_render - - if netsettings.active_slave_index >= 0: - - # deal with data - slave = bpy.data.netrender_slaves.pop(netsettings.active_slave_index) - bpy.data.netrender_blacklist.append(slave) - - # deal with rna - netsettings.slaves_blacklist.add() - netsettings.slaves_blacklist[-1].name = slave.name - - netsettings.slaves.remove(netsettings.active_slave_index) - netsettings.active_slave_index = -1 - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class RENDER_OT_netclientwhitelistslave(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientwhitelistslave" - __label__ = "Net Render Client Whitelist Slave" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - netsettings = context.scene.network_render - - if netsettings.active_blacklisted_slave_index >= 0: - - # deal with data - slave = bpy.data.netrender_blacklist.pop(netsettings.active_blacklisted_slave_index) - bpy.data.netrender_slaves.append(slave) - - # deal with rna - netsettings.slaves.add() - netsettings.slaves[-1].name = slave.name - - netsettings.slaves_blacklist.remove(netsettings.active_blacklisted_slave_index) - netsettings.active_blacklisted_slave_index = -1 - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - - -@rnaOperator -class RENDER_OT_netclientslaves(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientslaves" - __label__ = "Net Render Client Slaves" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - netsettings = context.scene.network_render - conn = clientConnection(context.scene) - - if conn: - conn.request("GET", "slave") - - response = conn.getresponse() - print( response.status, response.reason ) - - slaves = (netrender.model.RenderSlave.materialize(s) for s in eval(str(response.read(), encoding='utf8'))) - - while(len(netsettings.slaves) > 0): - netsettings.slaves.remove(0) - - bpy.data.netrender_slaves = [] - - for s in slaves: - for i in range(len(bpy.data.netrender_blacklist)): - slave = bpy.data.netrender_blacklist[i] - if slave.id == s.id: - bpy.data.netrender_blacklist[i] = s - netsettings.slaves_blacklist[i].name = s.name - break - else: - bpy.data.netrender_slaves.append(s) - - netsettings.slaves.add() - slave = netsettings.slaves[-1] - slave.name = s.name - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class RENDER_OT_netclientcancel(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientcancel" - __label__ = "Net Render Client Cancel" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - netsettings = context.scene.network_render - return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0 - - def execute(self, context): - netsettings = context.scene.network_render - conn = clientConnection(context.scene) - - if conn: - job = bpy.data.netrender_jobs[netsettings.active_job_index] - - conn.request("POST", "cancel", headers={"job-id":job.id}) - - response = conn.getresponse() - print( response.status, response.reason ) - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class netclientdownload(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientdownload" - __label__ = "Net Render Client Download" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - netsettings = context.scene.network_render - return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0 - - def execute(self, context): - netsettings = context.scene.network_render - rd = context.scene.render_data - - conn = clientConnection(context.scene) - - if conn: - job = bpy.data.netrender_jobs[netsettings.active_job_index] - - for frame in job.frames: - client.requestResult(conn, job.id, frame.number) - response = conn.getresponse() - - if response.status != http.client.OK: - print("missing", frame.number) - continue - - print("got back", frame.number) - - f = open(netsettings.path + "%06d" % frame.number + ".exr", "wb") - buf = response.read(1024) - - while buf: - f.write(buf) - buf = response.read(1024) - - f.close() - - conn.close() - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) - -@rnaOperator -class netclientscan(bpy.types.Operator): - '''Operator documentation text, will be used for the operator tooltip and python docs.''' - __idname__ = "render.netclientscan" - __label__ = "Net Render Client Scan" - - # List of operator properties, the attributes will be assigned - # to the class instance from the operator settings before calling. - - __props__ = [] - - def poll(self, context): - return True - - def execute(self, context): - netsettings = context.scene.network_render - - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - s.settimeout(30) - - s.bind(('', netsettings.server_port)) - - try: - buf, address = s.recvfrom(128) - - print("received:", buf) - - netsettings.server_address = address[0] - except socket.timeout: - print("no server info") - - return ('FINISHED',) - - def invoke(self, context, event): - return self.execute(context) diff --git a/release/io/netrender/slave.py b/release/io/netrender/slave.py deleted file mode 100644 index 1dcb608931e..00000000000 --- a/release/io/netrender/slave.py +++ /dev/null @@ -1,180 +0,0 @@ -import sys, os -import http, http.client, http.server, urllib -import subprocess, time - -from netrender.utils import * -import netrender.model - -CANCEL_POLL_SPEED = 2 -MAX_TIMEOUT = 10 -INCREMENT_TIMEOUT = 1 - -def slave_Info(): - sysname, nodename, release, version, machine = os.uname() - slave = netrender.model.RenderSlave() - slave.name = nodename - slave.stats = sysname + " " + release + " " + machine - return slave - -def testCancel(conn, job_id): - conn.request("HEAD", "status", headers={"job-id":job_id}) - response = conn.getresponse() - - # cancelled if job isn't found anymore - if response.status == http.client.NO_CONTENT: - return True - else: - return False - -def testFile(conn, JOB_PREFIX, file_path, main_path = None): - job_full_path = prefixPath(JOB_PREFIX, file_path, main_path) - - if not os.path.exists(job_full_path): - temp_path = JOB_PREFIX + "slave.temp.blend" - conn.request("GET", "file", headers={"job-id": job.id, "slave-id":slave_id, "job-file":file_path}) - response = conn.getresponse() - - if response.status != http.client.OK: - return None # file for job not returned by server, need to return an error code to server - - f = open(temp_path, "wb") - buf = response.read(1024) - - while buf: - f.write(buf) - buf = response.read(1024) - - f.close() - - os.renames(temp_path, job_full_path) - - return job_full_path - - -def render_slave(engine, scene): - netsettings = scene.network_render - timeout = 1 - - engine.update_stats("", "Network render node initiation") - - conn = clientConnection(scene) - - if conn: - conn.request("POST", "slave", repr(slave_Info().serialize())) - response = conn.getresponse() - - slave_id = response.getheader("slave-id") - - NODE_PREFIX = netsettings.path + "slave_" + slave_id + os.sep - if not os.path.exists(NODE_PREFIX): - os.mkdir(NODE_PREFIX) - - while not engine.test_break(): - - conn.request("GET", "job", headers={"slave-id":slave_id}) - response = conn.getresponse() - - if response.status == http.client.OK: - timeout = 1 # reset timeout on new job - - job = netrender.model.RenderJob.materialize(eval(str(response.read(), encoding='utf8'))) - - JOB_PREFIX = NODE_PREFIX + "job_" + job.id + os.sep - if not os.path.exists(JOB_PREFIX): - os.mkdir(JOB_PREFIX) - - job_path = job.files[0][0] # data in files have format (path, start, end) - main_path, main_file = os.path.split(job_path) - - job_full_path = testFile(conn, JOB_PREFIX, job_path) - print("Fullpath", job_full_path) - print("File:", main_file, "and %i other files" % (len(job.files) - 1,)) - engine.update_stats("", "Render File", main_file, "for job", job.id) - - for file_path, start, end in job.files[1:]: - print("\t", file_path) - testFile(conn, JOB_PREFIX, file_path, main_path) - - frame_args = [] - - for frame in job.frames: - print("frame", frame.number) - frame_args += ["-f", str(frame.number)] - - start_t = time.time() - - process = subprocess.Popen([sys.argv[0], "-b", job_full_path, "-o", JOB_PREFIX + "######", "-E", "BLENDER_RENDER", "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - - headers = {"job-id":job.id, "slave-id":slave_id} - - cancelled = False - stdout = bytes() - run_t = time.time() - while process.poll() == None and not cancelled: - stdout += process.stdout.read(32) - current_t = time.time() - cancelled = engine.test_break() - if current_t - run_t > CANCEL_POLL_SPEED: - - # update logs. Eventually, it should support one log file for many frames - for frame in job.frames: - headers["job-frame"] = str(frame.number) - conn.request("PUT", "log", stdout, headers=headers) - response = conn.getresponse() - - stdout = bytes() - - run_t = current_t - if testCancel(conn, job.id): - cancelled = True - - if cancelled: - # kill process if needed - if process.poll() == None: - process.terminate() - continue # to next frame - - total_t = time.time() - start_t - - avg_t = total_t / len(job.frames) - - status = process.returncode - - print("status", status) - - # flush the rest of the logs - if stdout: - for frame in job.frames: - headers["job-frame"] = str(frame.number) - conn.request("PUT", "log", stdout, headers=headers) - response = conn.getresponse() - - headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)} - - if status == 0: # non zero status is error - headers["job-result"] = str(DONE) - for frame in job.frames: - headers["job-frame"] = str(frame.number) - # send result back to server - f = open(JOB_PREFIX + "%06d" % frame.number + ".exr", 'rb') - conn.request("PUT", "render", f, headers=headers) - f.close() - response = conn.getresponse() - else: - headers["job-result"] = str(ERROR) - for frame in job.frames: - headers["job-frame"] = str(frame.number) - # send error result back to server - conn.request("PUT", "render", headers=headers) - response = conn.getresponse() - else: - if timeout < MAX_TIMEOUT: - timeout += INCREMENT_TIMEOUT - - for i in range(timeout): - time.sleep(1) - if engine.test_break(): - conn.close() - return - - conn.close() diff --git a/release/io/netrender/ui.py b/release/io/netrender/ui.py deleted file mode 100644 index df2b6288fb0..00000000000 --- a/release/io/netrender/ui.py +++ /dev/null @@ -1,293 +0,0 @@ -import bpy -import sys, os -import http, http.client, http.server, urllib -import subprocess, shutil, time, hashlib - -import netrender.slave as slave -import netrender.master as master - -from netrender.utils import * - -VERSION = b"0.3" - -PATH_PREFIX = "/tmp/" - -QUEUED = 0 -DISPATCHED = 1 -DONE = 2 -ERROR = 3 - -class RenderButtonsPanel(bpy.types.Panel): - __space_type__ = "PROPERTIES" - __region_type__ = "WINDOW" - __context__ = "scene" - # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here - - def poll(self, context): - rd = context.scene.render_data - return (rd.use_game_engine==False) and (rd.engine in self.COMPAT_ENGINES) - -# Setting panel, use in the scene for now. -@rnaType -class SCENE_PT_network_settings(RenderButtonsPanel): - __label__ = "Network Settings" - COMPAT_ENGINES = set(['NET_RENDER']) - - def draw_header(self, context): - layout = self.layout - scene = context.scene - - def draw(self, context): - layout = self.layout - scene = context.scene - rd = scene.render_data - - layout.active = True - - split = layout.split() - - col = split.column() - - if scene.network_render.mode == "RENDER_CLIENT": - col.itemO("render.netclientanim", icon='ICON_RENDER_ANIMATION', text="Animaton on network") - - col.itemR(scene.network_render, "mode") - col.itemR(scene.network_render, "path") - col.itemR(scene.network_render, "server_address") - col.itemR(scene.network_render, "server_port") - - if scene.network_render.mode == "RENDER_MASTER": - col.itemR(scene.network_render, "server_broadcast") - else: - col.itemO("render.netclientscan", icon="ICON_FILE_REFRESH", text="") - - if scene.network_render.mode == "RENDER_CLIENT": - col.itemO("render.netclientsend", text="send job to server") - col.itemR(scene.network_render, "job_name") - col.itemR(scene.network_render, "priority") - col.itemR(scene.network_render, "chunks") - -@rnaType -class SCENE_PT_network_slaves(RenderButtonsPanel): - __label__ = "Slaves Status" - COMPAT_ENGINES = set(['NET_RENDER']) - - def poll(self, context): - scene = context.scene - return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" - - def draw(self, context): - layout = self.layout - - scene = context.scene - netsettings = scene.network_render - - row = layout.row() - row.template_list(netsettings, "slaves", netsettings, "active_slave_index", rows=2) - - col = row.column() - - subcol = col.column(align=True) - subcol.itemO("render.netclientslaves", icon="ICON_FILE_REFRESH", text="") - subcol.itemO("render.netclientblacklistslave", icon="ICON_ZOOMOUT", text="") - - if len(bpy.data.netrender_slaves) == 0 and len(netsettings.slaves) > 0: - while(len(netsettings.slaves) > 0): - netsettings.slaves.remove(0) - - if netsettings.active_slave_index >= 0 and len(netsettings.slaves) > 0: - layout.itemS() - - slave = bpy.data.netrender_slaves[netsettings.active_slave_index] - - layout.itemL(text="Name: " + slave.name) - layout.itemL(text="Address: " + slave.address[0]) - layout.itemL(text="Seen: " + time.ctime(slave.last_seen)) - layout.itemL(text="Stats: " + slave.stats) - -@rnaType -class SCENE_PT_network_slaves_blacklist(RenderButtonsPanel): - __label__ = "Slaves Blacklist" - COMPAT_ENGINES = set(['NET_RENDER']) - - def poll(self, context): - scene = context.scene - return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" - - def draw(self, context): - layout = self.layout - - scene = context.scene - netsettings = scene.network_render - - row = layout.row() - row.template_list(netsettings, "slaves_blacklist", netsettings, "active_blacklisted_slave_index", rows=2) - - col = row.column() - - subcol = col.column(align=True) - subcol.itemO("render.netclientwhitelistslave", icon="ICON_ZOOMOUT", text="") - - if len(bpy.data.netrender_blacklist) == 0 and len(netsettings.slaves_blacklist) > 0: - while(len(netsettings.slaves_blacklist) > 0): - netsettings.slaves_blacklist.remove(0) - - if netsettings.active_blacklisted_slave_index >= 0 and len(netsettings.slaves_blacklist) > 0: - layout.itemS() - - slave = bpy.data.netrender_blacklist[netsettings.active_blacklisted_slave_index] - - layout.itemL(text="Name: " + slave.name) - layout.itemL(text="Address: " + slave.address[0]) - layout.itemL(text="Seen: " + slave.last_seen) - layout.itemL(text="Stats: " + time.ctime(slave.stats)) - -@rnaType -class SCENE_PT_network_jobs(RenderButtonsPanel): - __label__ = "Jobs" - COMPAT_ENGINES = set(['NET_RENDER']) - - def poll(self, context): - scene = context.scene - return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" - - def draw(self, context): - layout = self.layout - - scene = context.scene - netsettings = scene.network_render - - row = layout.row() - row.template_list(netsettings, "jobs", netsettings, "active_job_index", rows=2) - - col = row.column() - - subcol = col.column(align=True) - subcol.itemO("render.netclientstatus", icon="ICON_FILE_REFRESH", text="") - subcol.itemO("render.netclientcancel", icon="ICON_ZOOMOUT", text="") - subcol.itemO("render.netclientdownload", icon='ICON_RENDER_ANIMATION', text="") - - if len(bpy.data.netrender_jobs) == 0 and len(netsettings.jobs) > 0: - while(len(netsettings.jobs) > 0): - netsettings.jobs.remove(0) - - if netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0: - layout.itemS() - - job = bpy.data.netrender_jobs[netsettings.active_job_index] - - layout.itemL(text="Name: %s" % job.name) - layout.itemL(text="Length: %04i" % len(job)) - layout.itemL(text="Done: %04i" % job.results[DONE]) - layout.itemL(text="Error: %04i" % job.results[ERROR]) - -@rnaType -class NetRenderSettings(bpy.types.IDPropertyGroup): - pass - -@rnaType -class NetRenderSlave(bpy.types.IDPropertyGroup): - pass - -@rnaType -class NetRenderJob(bpy.types.IDPropertyGroup): - pass - -bpy.types.Scene.PointerProperty(attr="network_render", type=NetRenderSettings, name="Network Render", description="Network Render Settings") - -NetRenderSettings.StringProperty( attr="server_address", - name="Server address", - description="IP or name of the master render server", - maxlen = 128, - default = "[default]") - -NetRenderSettings.IntProperty( attr="server_port", - name="Server port", - description="port of the master render server", - default = 8000, - min=1, - max=65535) - -NetRenderSettings.BoolProperty( attr="server_broadcast", - name="Broadcast server address", - description="broadcast server address on local network", - default = True) - -NetRenderSettings.StringProperty( attr="path", - name="Path", - description="Path for temporary files", - maxlen = 128, - default = "/tmp/") - -NetRenderSettings.StringProperty( attr="job_name", - name="Job name", - description="Name of the job", - maxlen = 128, - default = "[default]") - -NetRenderSettings.IntProperty( attr="chunks", - name="Chunks", - description="Number of frame to dispatch to each slave in one chunk", - default = 5, - min=1, - max=65535) - -NetRenderSettings.IntProperty( attr="priority", - name="Priority", - description="Priority of the job", - default = 1, - min=1, - max=10) - -NetRenderSettings.StringProperty( attr="job_id", - name="Network job id", - description="id of the last sent render job", - maxlen = 64, - default = "") - -NetRenderSettings.IntProperty( attr="active_slave_index", - name="Index of the active slave", - description="", - default = -1, - min= -1, - max=65535) - -NetRenderSettings.IntProperty( attr="active_blacklisted_slave_index", - name="Index of the active slave", - description="", - default = -1, - min= -1, - max=65535) - -NetRenderSettings.IntProperty( attr="active_job_index", - name="Index of the active job", - description="", - default = -1, - min= -1, - max=65535) - -NetRenderSettings.EnumProperty(attr="mode", - items=( - ("RENDER_CLIENT", "Client", "Act as render client"), - ("RENDER_MASTER", "Master", "Act as render master"), - ("RENDER_SLAVE", "Slave", "Act as render slave"), - ), - name="network mode", - description="mode of operation of this instance", - default="RENDER_CLIENT") - -NetRenderSettings.CollectionProperty(attr="slaves", type=NetRenderSlave, name="Slaves", description="") -NetRenderSettings.CollectionProperty(attr="slaves_blacklist", type=NetRenderSlave, name="Slaves Blacklist", description="") -NetRenderSettings.CollectionProperty(attr="jobs", type=NetRenderJob, name="Job List", description="") - -NetRenderSlave.StringProperty( attr="name", - name="Name of the slave", - description="", - maxlen = 64, - default = "") - -NetRenderJob.StringProperty( attr="name", - name="Name of the job", - description="", - maxlen = 128, - default = "") diff --git a/release/io/netrender/utils.py b/release/io/netrender/utils.py deleted file mode 100644 index 72a29472748..00000000000 --- a/release/io/netrender/utils.py +++ /dev/null @@ -1,72 +0,0 @@ -import bpy -import sys, os -import re -import http, http.client, http.server, urllib -import subprocess, shutil, time, hashlib - -import netrender.model - -VERSION = b"0.5" - -QUEUED = 0 -DISPATCHED = 1 -DONE = 2 -ERROR = 3 - -def rnaType(rna_type): - bpy.types.register(rna_type) - return rna_type - -def rnaOperator(rna_op): - bpy.ops.add(rna_op) - return rna_op - -def clientConnection(scene): - netsettings = scene.network_render - - if netsettings.server_address == "[default]": - bpy.ops.render.netclientscan() - - conn = http.client.HTTPConnection(netsettings.server_address, netsettings.server_port) - - if clientVerifyVersion(conn): - return conn - else: - conn.close() - return None - -def clientVerifyVersion(conn): - conn.request("GET", "version") - response = conn.getresponse() - - if response.status != http.client.OK: - conn.close() - return False - - server_version = response.read() - - if server_version != VERSION: - print("Incorrect server version!") - print("expected", VERSION, "received", server_version) - return False - - return True - -def prefixPath(prefix_directory, file_path, prefix_path): - if os.path.isabs(file_path): - # if an absolute path, make sure path exists, if it doesn't, use relative local path - full_path = file_path - if not os.path.exists(full_path): - p, n = os.path.split(full_path) - - if main_path and p.startswith(main_path): - directory = prefix_directory + p[len(main_path):] - full_path = directory + n - if not os.path.exists(directory): - os.mkdir(directory) - else: - full_path = prefix_directory + n - else: - full_path = prefix_directory + file_path - - return full_path \ No newline at end of file diff --git a/release/scripts/3ds_export.py b/release/scripts/3ds_export.py deleted file mode 100644 index 87680bce1b0..00000000000 --- a/release/scripts/3ds_export.py +++ /dev/null @@ -1,1019 +0,0 @@ -#!BPY -# coding: utf-8 -""" -Name: '3D Studio (.3ds)...' -Blender: 243 -Group: 'Export' -Tooltip: 'Export to 3DS file format (.3ds).' -""" - -__author__ = ["Campbell Barton", "Bob Holcomb", "Richard Lärkäng", "Damien McGinnes", "Mark Stijnman"] -__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/") -__version__ = "0.90a" -__bpydoc__ = """\ - -3ds Exporter - -This script Exports a 3ds file. - -Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information -from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Bob Holcomb -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -###################################################### -# Importing modules -###################################################### - -import Blender -import bpy -from BPyMesh import getMeshFromObject -from BPyObject import getDerivedObjects -try: - import struct -except: - struct = None - -# So 3ds max can open files, limit names to 12 in length -# this is verry annoying for filenames! -name_unique = [] -name_mapping = {} -def sane_name(name): - name_fixed = name_mapping.get(name) - if name_fixed != None: - return name_fixed - - if len(name) > 12: - new_name = name[:12] - else: - new_name = name - - i = 0 - - while new_name in name_unique: - new_name = new_name[:-4] + '.%.3d' % i - i+=1 - - name_unique.append(new_name) - name_mapping[name] = new_name - return new_name - -###################################################### -# Data Structures -###################################################### - -#Some of the chunks that we will export -#----- Primary Chunk, at the beginning of each file -PRIMARY= long("0x4D4D",16) - -#------ Main Chunks -OBJECTINFO = long("0x3D3D",16); #This gives the version of the mesh and is found right before the material and object information -VERSION = long("0x0002",16); #This gives the version of the .3ds file -KFDATA = long("0xB000",16); #This is the header for all of the key frame info - -#------ sub defines of OBJECTINFO -MATERIAL=45055 #0xAFFF // This stored the texture info -OBJECT=16384 #0x4000 // This stores the faces, vertices, etc... - -#>------ sub defines of MATERIAL -MATNAME = long("0xA000",16); # This holds the material name -MATAMBIENT = long("0xA010",16); # Ambient color of the object/material -MATDIFFUSE = long("0xA020",16); # This holds the color of the object/material -MATSPECULAR = long("0xA030",16); # SPecular color of the object/material -MATSHINESS = long("0xA040",16); # ?? -MATMAP = long("0xA200",16); # This is a header for a new material -MATMAPFILE = long("0xA300",16); # This holds the file name of the texture - -RGB1= long("0x0011",16) -RGB2= long("0x0012",16) - -#>------ sub defines of OBJECT -OBJECT_MESH = long("0x4100",16); # This lets us know that we are reading a new object -OBJECT_LIGHT = long("0x4600",16); # This lets un know we are reading a light object -OBJECT_CAMERA= long("0x4700",16); # This lets un know we are reading a camera object - -#>------ sub defines of CAMERA -OBJECT_CAM_RANGES= long("0x4720",16); # The camera range values - -#>------ sub defines of OBJECT_MESH -OBJECT_VERTICES = long("0x4110",16); # The objects vertices -OBJECT_FACES = long("0x4120",16); # The objects faces -OBJECT_MATERIAL = long("0x4130",16); # This is found if the object has a material, either texture map or color -OBJECT_UV = long("0x4140",16); # The UV texture coordinates -OBJECT_TRANS_MATRIX = long("0x4160",16); # The Object Matrix - -#>------ sub defines of KFDATA -KFDATA_KFHDR = long("0xB00A",16); -KFDATA_KFSEG = long("0xB008",16); -KFDATA_KFCURTIME = long("0xB009",16); -KFDATA_OBJECT_NODE_TAG = long("0xB002",16); - -#>------ sub defines of OBJECT_NODE_TAG -OBJECT_NODE_ID = long("0xB030",16); -OBJECT_NODE_HDR = long("0xB010",16); -OBJECT_PIVOT = long("0xB013",16); -OBJECT_INSTANCE_NAME = long("0xB011",16); -POS_TRACK_TAG = long("0xB020",16); -ROT_TRACK_TAG = long("0xB021",16); -SCL_TRACK_TAG = long("0xB022",16); - -def uv_key(uv): - return round(uv.x, 6), round(uv.y, 6) - -# size defines: -SZ_SHORT = 2 -SZ_INT = 4 -SZ_FLOAT = 4 - -class _3ds_short(object): - '''Class representing a short (2-byte integer) for a 3ds file. - *** This looks like an unsigned short H is unsigned from the struct docs - Cam***''' - __slots__ = 'value' - def __init__(self, val=0): - self.value=val - - def get_size(self): - return SZ_SHORT - - def write(self,file): - file.write(struct.pack("= mat_ls_len: - mat_index = f.mat = 0 - mat = mat_ls[mat_index] - if mat: mat_name = mat.name - else: mat_name = None - # else there alredy set to none - - img = f.image - if img: img_name = img.name - else: img_name = None - - materialDict.setdefault((mat_name, img_name), (mat, img) ) - - - else: - for mat in mat_ls: - if mat: # material may be None so check its not. - materialDict.setdefault((mat.name, None), (mat, None) ) - - # Why 0 Why! - for f in data.faces: - if f.mat >= mat_ls_len: - f.mat = 0 - - # Make material chunks for all materials used in the meshes: - for mat_and_image in materialDict.itervalues(): - object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1])) - - # Give all objects a unique ID and build a dictionary from object name to object id: - """ - name_to_id = {} - for ob, data in mesh_objects: - name_to_id[ob.name]= len(name_to_id) - #for ob in empty_objects: - # name_to_id[ob.name]= len(name_to_id) - """ - - # Create object chunks for all meshes: - i = 0 - for ob, blender_mesh in mesh_objects: - # create a new object chunk - object_chunk = _3ds_chunk(OBJECT) - - # set the object name - object_chunk.add_variable("name", _3ds_string(sane_name(ob.name))) - - # make a mesh chunk out of the mesh: - object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict)) - object_info.add_subchunk(object_chunk) - - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # make a kf object node for the object: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - ''' - blender_mesh.verts = None - i+=i - - # Create chunks for all empties: - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - for ob in empty_objects: - # Empties only require a kf object node: - kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) - pass - ''' - - # Add main object info chunk to primary chunk: - primary.add_subchunk(object_info) - - ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX - # Add main keyframe data chunk to primary chunk: - primary.add_subchunk(kfdata) - ''' - - # At this point, the chunk hierarchy is completely built. - - # Check the size: - primary.get_size() - # Open the file for writing: - file = open( filename, 'wb' ) - - # Recursively write the chunks to file: - primary.write(file) - - # Close the file: - file.close() - - # Debugging only: report the exporting time: - Blender.Window.WaitCursor(0) - print "3ds export time: %.2f" % (Blender.sys.time() - time1) - - # Debugging only: dump the chunk hierarchy: - #primary.dump() - - -if __name__=='__main__': - if struct: - Blender.Window.FileSelector(save_3ds, "Export 3DS", Blender.sys.makename(ext='.3ds')) - else: - Blender.Draw.PupMenu("Error%t|This script requires a full python installation") -# save_3ds('/test_b.3ds') diff --git a/release/scripts/3ds_import.py b/release/scripts/3ds_import.py deleted file mode 100644 index bcde82c4869..00000000000 --- a/release/scripts/3ds_import.py +++ /dev/null @@ -1,1007 +0,0 @@ -#!BPY -""" -Name: '3D Studio (.3ds)...' -Blender: 244 -Group: 'Import' -Tooltip: 'Import from 3DS file format (.3ds)' -""" - -__author__= ['Bob Holcomb', 'Richard L?rk?ng', 'Damien McGinnes', 'Campbell Barton', 'Mario Lapin'] -__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/") -__version__= '0.996' -__bpydoc__= '''\ - -3ds Importer - -This script imports a 3ds file and the materials into Blender for editing. - -Loader is based on 3ds loader from www.gametutorials.com (Thanks DigiBen). - -0.996 by Mario Lapin (mario.lapin@gmail.com) 13/04/200
- - Implemented workaround to correct association between name, geometry and materials of - imported meshes. - - Without this patch, version 0.995 of this importer would associate to each mesh object the - geometry and the materials of the previously parsed mesh object. By so, the name of the - first mesh object would be thrown away, and the name of the last mesh object would be - automatically merged with a '.001' at the end. No object would desappear, however object's - names and materials would be completely jumbled. - -0.995 by Campbell Barton
-- workaround for buggy mesh vert delete -- minor tweaks - -0.99 by Bob Holcomb
-- added support for floating point color values that previously broke on import. - -0.98 by Campbell Barton
-- import faces and verts to lists instead of a mesh, convert to a mesh later -- use new index mapping feature of mesh to re-map faces that were not added. - -0.97 by Campbell Barton
-- Strip material names of spaces -- Added import as instance to import the 3ds into its own - scene and add a group instance to the current scene -- New option to scale down imported objects so they are within a limited bounding area. - -0.96 by Campbell Barton
-- Added workaround for bug in setting UV's for Zero vert index UV faces. -- Removed unique name function, let blender make the names unique. - -0.95 by Campbell Barton
-- Removed workarounds for Blender 2.41 -- Mesh objects split by material- many 3ds objects used more then 16 per mesh. -- Removed a lot of unneeded variable creation. - -0.94 by Campbell Barton
-- Face import tested to be about overall 16x speedup over 0.93. -- Material importing speedup. -- Tested with more models. -- Support some corrupt models. - -0.93 by Campbell Barton
-- Tested with 400 3ds files from turbosquid and samples. -- Tactfully ignore faces that used the same verts twice. -- Rollback to 0.83 sloppy un-reorganized code, this broke UV coord loading. -- Converted from NMesh to Mesh. -- Faster and cleaner new names. -- Use external comprehensive image loader. -- Re intergrated 0.92 and 0.9 changes -- Fixes for 2.41 compat. -- Non textured faces do not use a texture flag. - -0.92
-- Added support for diffuse, alpha, spec, bump maps in a single material - -0.9
-- Reorganized code into object/material block functions
-- Use of Matrix() to copy matrix data
-- added support for material transparency
- -0.83 2005-08-07: Campell Barton -- Aggressive image finding and case insensitivy for posisx systems. - -0.82a 2005-07-22 -- image texture loading (both for face uv and renderer) - -0.82 - image texture loading (for face uv) - -0.81a (fork- not 0.9) Campbell Barton 2005-06-08 -- Simplified import code -- Never overwrite data -- Faster list handling -- Leaves import selected - -0.81 Damien McGinnes 2005-01-09 -- handle missing images better - -0.8 Damien McGinnes 2005-01-08 -- copies sticky UV coords to face ones -- handles images better -- Recommend that you run 'RemoveDoubles' on each imported mesh after using this script - -''' - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Bob Holcomb -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -# Importing modules - -import Blender -import bpy -from Blender import Mesh, Object, Material, Image, Texture, Lamp, Mathutils -from Blender.Mathutils import Vector -import BPyImage - -import BPyMessages - -try: - from struct import calcsize, unpack -except: - calcsize= unpack= None - - - -# If python version is less than 2.4, try to get set stuff from module -try: - set -except: - from sets import Set as set - -BOUNDS_3DS= [] - - -#this script imports uvcoords as sticky vertex coords -#this parameter enables copying these to face uv coords -#which shold be more useful. - -def createBlenderTexture(material, name, image): - texture= bpy.data.textures.new(name) - texture.setType('Image') - texture.image= image - material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) - - - -###################################################### -# Data Structures -###################################################### - -#Some of the chunks that we will see -#----- Primary Chunk, at the beginning of each file -PRIMARY= long('0x4D4D',16) - -#------ Main Chunks -OBJECTINFO = long('0x3D3D',16); #This gives the version of the mesh and is found right before the material and object information -VERSION = long('0x0002',16); #This gives the version of the .3ds file -EDITKEYFRAME= long('0xB000',16); #This is the header for all of the key frame info - -#------ sub defines of OBJECTINFO -MATERIAL=45055 #0xAFFF // This stored the texture info -OBJECT=16384 #0x4000 // This stores the faces, vertices, etc... - -#>------ sub defines of MATERIAL -#------ sub defines of MATERIAL_BLOCK -MAT_NAME = long('0xA000',16) # This holds the material name -MAT_AMBIENT = long('0xA010',16) # Ambient color of the object/material -MAT_DIFFUSE = long('0xA020',16) # This holds the color of the object/material -MAT_SPECULAR = long('0xA030',16) # SPecular color of the object/material -MAT_SHINESS = long('0xA040',16) # ?? -MAT_TRANSPARENCY= long('0xA050',16) # Transparency value of material -MAT_SELF_ILLUM = long('0xA080',16) # Self Illumination value of material -MAT_WIRE = long('0xA085',16) # Only render's wireframe - -MAT_TEXTURE_MAP = long('0xA200',16) # This is a header for a new texture map -MAT_SPECULAR_MAP= long('0xA204',16) # This is a header for a new specular map -MAT_OPACITY_MAP = long('0xA210',16) # This is a header for a new opacity map -MAT_REFLECTION_MAP= long('0xA220',16) # This is a header for a new reflection map -MAT_BUMP_MAP = long('0xA230',16) # This is a header for a new bump map -MAT_MAP_FILENAME = long('0xA300',16) # This holds the file name of the texture - -MAT_FLOAT_COLOR = long ('0x0010', 16) #color defined as 3 floats -MAT_24BIT_COLOR = long ('0x0011', 16) #color defined as 3 bytes - -#>------ sub defines of OBJECT -OBJECT_MESH = long('0x4100',16); # This lets us know that we are reading a new object -OBJECT_LAMP = long('0x4600',16); # This lets un know we are reading a light object -OBJECT_LAMP_SPOT = long('0x4610',16); # The light is a spotloght. -OBJECT_LAMP_OFF = long('0x4620',16); # The light off. -OBJECT_LAMP_ATTENUATE = long('0x4625',16); -OBJECT_LAMP_RAYSHADE = long('0x4627',16); -OBJECT_LAMP_SHADOWED = long('0x4630',16); -OBJECT_LAMP_LOCAL_SHADOW = long('0x4640',16); -OBJECT_LAMP_LOCAL_SHADOW2 = long('0x4641',16); -OBJECT_LAMP_SEE_CONE = long('0x4650',16); -OBJECT_LAMP_SPOT_RECTANGULAR= long('0x4651',16); -OBJECT_LAMP_SPOT_OVERSHOOT= long('0x4652',16); -OBJECT_LAMP_SPOT_PROJECTOR= long('0x4653',16); -OBJECT_LAMP_EXCLUDE= long('0x4654',16); -OBJECT_LAMP_RANGE= long('0x4655',16); -OBJECT_LAMP_ROLL= long('0x4656',16); -OBJECT_LAMP_SPOT_ASPECT= long('0x4657',16); -OBJECT_LAMP_RAY_BIAS= long('0x4658',16); -OBJECT_LAMP_INNER_RANGE= long('0x4659',16); -OBJECT_LAMP_OUTER_RANGE= long('0x465A',16); -OBJECT_LAMP_MULTIPLIER = long('0x465B',16); -OBJECT_LAMP_AMBIENT_LIGHT = long('0x4680',16); - - - -OBJECT_CAMERA= long('0x4700',16); # This lets un know we are reading a camera object - -#>------ sub defines of CAMERA -OBJECT_CAM_RANGES= long('0x4720',16); # The camera range values - -#>------ sub defines of OBJECT_MESH -OBJECT_VERTICES = long('0x4110',16); # The objects vertices -OBJECT_FACES = long('0x4120',16); # The objects faces -OBJECT_MATERIAL = long('0x4130',16); # This is found if the object has a material, either texture map or color -OBJECT_UV = long('0x4140',16); # The UV texture coordinates -OBJECT_TRANS_MATRIX = long('0x4160',16); # The Object Matrix - -global scn -scn= None - -#the chunk class -class chunk: - ID=0 - length=0 - bytes_read=0 - - #we don't read in the bytes_read, we compute that - binary_format='3): - print '\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version - - #is it an object info chunk? - elif (new_chunk.ID==OBJECTINFO): - #print 'elif (new_chunk.ID==OBJECTINFO):' - # print 'found an OBJECTINFO chunk' - process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH) - - #keep track of how much we read in the main chunk - new_chunk.bytes_read+=temp_chunk.bytes_read - - #is it an object chunk? - elif (new_chunk.ID==OBJECT): - - if CreateBlenderObject: - putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials) - contextMesh_vertls= []; contextMesh_facels= [] - - ## preparando para receber o proximo objeto - contextMeshMaterials= {} # matname:[face_idxs] - contextMeshUV= None - #contextMesh.vertexUV= 1 # Make sticky coords. - # Reset matrix - contextMatrix_rot= None - #contextMatrix_tx= None - - CreateBlenderObject= True - tempName= read_string(file) - contextObName= tempName - new_chunk.bytes_read += len(tempName)+1 - - #is it a material chunk? - elif (new_chunk.ID==MATERIAL): - #print 'elif (new_chunk.ID==MATERIAL):' - contextMaterial= bpy.data.materials.new('Material') - - elif (new_chunk.ID==MAT_NAME): - #print 'elif (new_chunk.ID==MAT_NAME):' - material_name= read_string(file) - - #plus one for the null character that ended the string - new_chunk.bytes_read+= len(material_name)+1 - - contextMaterial.name= material_name.rstrip() # remove trailing whitespace - MATDICT[material_name]= (contextMaterial.name, contextMaterial) - - elif (new_chunk.ID==MAT_AMBIENT): - #print 'elif (new_chunk.ID==MAT_AMBIENT):' - read_chunk(file, temp_chunk) - if (temp_chunk.ID==MAT_FLOAT_COLOR): - temp_data=file.read(calcsize('3f')) - temp_chunk.bytes_read+=12 - contextMaterial.mirCol=[float(col) for col in unpack('<3f', temp_data)] - elif (temp_chunk.ID==MAT_24BIT_COLOR): - temp_data=file.read(calcsize('3B')) - temp_chunk.bytes_read+= 3 - contextMaterial.mirCol= [float(col)/255 for col in unpack('<3B', temp_data)] # data [0,1,2] == rgb - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read+= temp_chunk.bytes_read - - elif (new_chunk.ID==MAT_DIFFUSE): - #print 'elif (new_chunk.ID==MAT_DIFFUSE):' - read_chunk(file, temp_chunk) - if (temp_chunk.ID==MAT_FLOAT_COLOR): - temp_data=file.read(calcsize('3f')) - temp_chunk.bytes_read+=12 - contextMaterial.rgbCol=[float(col) for col in unpack('<3f', temp_data)] - elif (temp_chunk.ID==MAT_24BIT_COLOR): - temp_data=file.read(calcsize('3B')) - temp_chunk.bytes_read+= 3 - contextMaterial.rgbCol= [float(col)/255 for col in unpack('<3B', temp_data)] # data [0,1,2] == rgb - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read+= temp_chunk.bytes_read - - elif (new_chunk.ID==MAT_SPECULAR): - #print 'elif (new_chunk.ID==MAT_SPECULAR):' - read_chunk(file, temp_chunk) - if (temp_chunk.ID==MAT_FLOAT_COLOR): - temp_data=file.read(calcsize('3f')) - temp_chunk.bytes_read+=12 - contextMaterial.mirCol=[float(col) for col in unpack('<3f', temp_data)] - elif (temp_chunk.ID==MAT_24BIT_COLOR): - temp_data=file.read(calcsize('3B')) - temp_chunk.bytes_read+= 3 - contextMaterial.mirCol= [float(col)/255 for col in unpack('<3B', temp_data)] # data [0,1,2] == rgb - else: - skip_to_end(file, temp_chunk) - new_chunk.bytes_read+= temp_chunk.bytes_read - - elif (new_chunk.ID==MAT_TEXTURE_MAP): - #print 'elif (new_chunk.ID==MAT_TEXTURE_MAP):' - new_texture= bpy.data.textures.new('Diffuse') - new_texture.setType('Image') - img = None - while (new_chunk.bytes_read BOUNDS_3DS[i+3]: - BOUNDS_3DS[i+3]= v[i] # min - - # Get the max axis x/y/z - max_axis= max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2]) - # print max_axis - if max_axis < 1<<30: # Should never be false but just make sure. - - # Get a new scale factor if set as an option - SCALE=1.0 - while (max_axis*SCALE) > IMPORT_CONSTRAIN_BOUNDS: - SCALE/=10 - - # SCALE Matrix - SCALE_MAT= Blender.Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1]) - - for ob in importedObjects: - ob.setMatrix(ob.matrixWorld*SCALE_MAT) - - # Done constraining to bounds. - - # Select all new objects. - print 'finished importing: "%s" in %.4f sec.' % (filename, (Blender.sys.time()-time1)) - file.close() - Blender.Window.WaitCursor(0) - - -DEBUG= False -if __name__=='__main__' and not DEBUG: - if calcsize==None: - Blender.Draw.PupMenu('Error%t|a full python installation not found') - else: - Blender.Window.FileSelector(load_3ds, 'Import 3DS', '*.3ds') - -# For testing compatibility -#load_3ds('/metavr/convert/vehicle/truck_002/TruckTanker1.3DS', False) -#load_3ds('/metavr/archive/convert/old/arranged_3ds_to_hpx-2/only-need-engine-trains/Engine2.3DS', False) -''' - -else: - import os - # DEBUG ONLY - TIME= Blender.sys.time() - import os - print 'Searching for files' - os.system('find /metavr/ -iname "*.3ds" > /tmp/temp3ds_list') - # os.system('find /storage/ -iname "*.3ds" > /tmp/temp3ds_list') - print '...Done' - file= open('/tmp/temp3ds_list', 'r') - lines= file.readlines() - file.close() - # sort by filesize for faster testing - lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines] - lines_size.sort() - lines = [f[1] for f in lines_size] - - - def between(v,a,b): - if v <= max(a,b) and v >= min(a,b): - return True - return False - - for i, _3ds in enumerate(lines): - if between(i, 650,800): - #_3ds= _3ds[:-1] - print 'Importing', _3ds, '\nNUMBER', i, 'of', len(lines) - _3ds_file= _3ds.split('/')[-1].split('\\')[-1] - newScn= Blender.Scene.New(_3ds_file) - newScn.makeCurrent() - load_3ds(_3ds, False) - - print 'TOTAL TIME: %.6f' % (Blender.sys.time() - TIME) - -''' diff --git a/release/scripts/Axiscopy.py b/release/scripts/Axiscopy.py deleted file mode 100644 index 6a31432edb6..00000000000 --- a/release/scripts/Axiscopy.py +++ /dev/null @@ -1,125 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: <- these words are ignored -Name: 'Axis Orientation Copy' -Blender: 242 -Group: 'Object' -Tip: 'Copy local axis orientation of active object to all selected meshes (changes mesh data)' -""" - -__author__ = "A Vanpoucke (xand)" -__url__ = ("blenderartists.org", "www.blender.org", -"French Blender support forum, http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender") -__version__ = "2 17/12/05" - -__bpydoc__ = """\ -This script copies the axis orientation -- X, Y and Z rotations -- of the -active object to all selected meshes. - -It's useful to align the orientations of all meshes of a structure, a human -skeleton, for example. - -Usage: - -Select all mesh objects that need to have their orientations changed -(reminder: keep SHIFT pressed after the first, to add each new one to the -selection), then select the object whose orientation will be copied from and -finally run this script to update the angles. - -Notes:
- This script changes mesh data: the vertices are transformed.
- Before copying the orientation to each object, the script stores its -transformation matrix. Then the angles are copied and after that the object's -vertices are transformed "back" so that they still have the same positions as -before. In other words, the rotations are updated, but you won't notice that -just from looking at the objects.
- Checking their X, Y and Z rotation values with "Transform Properties" in -the 3D View's Object menu shows the angles are now the same of the active -object. Or simply look at the transform manipulator handles in local transform -orientation. -""" - - -# $Id$ -# -#---------------------------------------------- -# A Vanpoucke (xand) -#from the previous script realignaxis -#---------------------------------------------- -# Communiquer les problemes et erreurs sur: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2003, 2004: A Vanpoucke -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from Blender import * -from Blender import Mathutils -from Blender.Mathutils import * -import BPyMessages - -def realusers(data): - users = data.users - if data.fakeUser: users -= 1 - return users - - - -def main(): - - scn_obs= Scene.GetCurrent().objects - ob_act = scn_obs.active - scn_obs = scn_obs.context - - if not ob_act: - BPyMessages.Error_NoActive() - - obs = [(ob, ob.getData(mesh=1)) for ob in scn_obs if ob != ob_act] - - for ob, me in obs: - - if ob.type != 'Mesh': - Draw.PupMenu("Error%t|Selection must be made up of mesh objects only") - return - - if realusers(me) != 1: - Draw.PupMenu("Error%t|Meshes must be single user") - return - - if len(obs) < 1: - Draw.PupMenu("Error: you must select at least 2 objects") - return - - result = Draw.PupMenu("Copy axis orientation from: " + ob_act.name + " ?%t|OK") - if result == -1: - return - - for ob_target, me_target in obs: - if ob_act.rot != ob_target.rot: - rot_target = ob_target.matrixWorld.rotationPart().toEuler().toMatrix() - rot_source = ob_act.matrixWorld.rotationPart().toEuler().toMatrix() - rot_source_inv = rot_source.copy().invert() - tx_mat = rot_target * rot_source_inv - tx_mat.resize4x4() - me_target.transform(tx_mat) - ob_target.rot=ob_act.rot - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/release/scripts/DirectX8Exporter.py b/release/scripts/DirectX8Exporter.py deleted file mode 100644 index 8a0ecaf0eb7..00000000000 --- a/release/scripts/DirectX8Exporter.py +++ /dev/null @@ -1,1196 +0,0 @@ -#!BPY - -""" -# Name: 'DirectX (.x)...' -# Blender: 242 -# Group: 'Export' -# Tooltip: 'Export to DirectX text file format format for XNA Animation Component Library.' -""" -__author__ = "vertex color exporting feature is added by mnemoto (original:minahito (original:Arben (Ben) Omari))" -__url__ = ("blender.org", "blenderartists.org", "Adjuster's site http://sunday-lab.blogspot.com/, Author's site http://www.omariben.too.it","Adjuster's site http://ex.homeunix.net/") -__version__ = "3.1" - -__bpydoc__ = """\ -This script exports a Blender mesh with armature to DirectX 8's text file -format. - -Notes:
- Check author's site or the elYsiun forum for a new beta version of the -DX exporter. -""" -# DirectXExporter.py version 3.0 -# Copyright (C) 2006 Arben OMARI -- omariarben@everyday.com -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# This script export meshes created with Blender in DirectX8 file format -# it exports meshes,armatures,materials,normals,texturecoords and animations - -# Grab the latest version here :www.omariben.too.it - -# [Notice] -# This script is the custom version of Mr.Arben Omari's great work. -# If you have a question about the adjusted part, visit http://sunday-lab.blogspot.com/. - -import Blender -from Blender import Types, Object, NMesh, Material,Armature,Mesh -from Blender.Mathutils import * -from Blender import Draw, BGL -from Blender.BGL import * -try: import math -except: math = None - -global mat_flip,index_list,space,bone_list,mat_dict -global anim,flip_norm,swap_zy,flip_z,speed,ticks,no_light,recalc_norm,Bl_norm -bone_list =[] -index_list = [] -mat_dict = {} -space = 0;flip_z = 1;anim=0;swap_yz=0;flip_norm=0;speed=0;ticks= 25 -Bl_norm = 1;recalc_norm = 0;no_light = 0 - -toggle_val = 0 -toggle1_val = 0 -toggle2_val = 0 -toggle3_val = 1 -toggle4_val = 0 -toggle5_val = 1 -toggle6_val = 0 -toggle7_val = 0 -anim_tick = Draw.Create(25) - -#*********************************************** -# DirectX file spec only allows letters, digits, and -# underscore in Names. -#*********************************************** -def make_legal_name(starting_name): - new_name = starting_name.replace('.','_') - new_name = new_name.replace(' ','_') - if new_name[0].isdigit(): - new_name = '_' + new_name - return new_name - -#*********************************************** -# MAIN -#*********************************************** - -def my_callback(filename): - if filename.find('.x', -2) <= 0: filename += '.x' - xexport = xExport(filename) - xexport.SelectObjs() - -def my_callback_sel(filename): - if filename.find('.x', -2) <= 0: filename += '.x' - xexport = xExport(filename) - xexport.exportSelMesh() -def event(evt, val): - if evt == Draw.ESCKEY: - Draw.Exit() - return - -def button_event(evt): - global toggle_val,toggle1_val,toggle2_val,toggle3_val,toggle4_val,toggle5_val,toggle6_val,toggle7_val - global flip_z,swap_yz,flip_norm,anim,ticks,speed,no_light,Bl_norm,recalc_norm - arg = __script__['arg'] - if evt == 1: - toggle_val = 1 - toggle_val - anim = toggle_val - Draw.Redraw(1) - if evt == 2: - toggle1_val = 1 - toggle1_val - flip_norm = toggle1_val - Draw.Redraw(1) - if evt == 3: - toggle2_val = 1 - toggle2_val - swap_yz = toggle2_val - Draw.Redraw(1) - if evt == 4: - toggle3_val = 1 - toggle3_val - flip_z = toggle3_val - Draw.Redraw(1) - if evt == 5: - toggle4_val = 1 - toggle4_val - speed = toggle4_val - Draw.Redraw(1) - if evt == 10: - toggle5_val = 1 - toggle5_val - if toggle5_val==1: - toggle6_val = 0 - toggle7_val = 0 - else : - toggle6_val = 1 - toggle7_val = 1 - no_light = toggle7_val - recalc_norm = toggle6_val - Bl_norm = toggle5_val - Draw.Redraw(1) - if evt == 11: - toggle6_val = 1 - toggle6_val - if toggle6_val==1: - toggle5_val = 0 - toggle7_val = 0 - else : - toggle5_val = 1 - toggle7_val = 1 - no_light = toggle7_val - recalc_norm = toggle6_val - Bl_norm = toggle5_val - Draw.Redraw(1) - if evt == 12: - toggle7_val = 1 - toggle7_val - if toggle7_val==1: - toggle6_val = 0 - toggle5_val = 0 - else : - toggle6_val = 1 - toggle5_val = 1 - no_light = toggle7_val - recalc_norm = toggle6_val - Bl_norm = toggle5_val - Draw.Redraw(1) - if evt == 6: - ticks = anim_tick.val - if evt == 7: - fname = Blender.sys.makename(ext = ".x") - Blender.Window.FileSelector(my_callback, "Export DirectX", fname) - if evt == 8: - fname = Blender.sys.makename(ext = ".x") - Blender.Window.FileSelector(my_callback_sel, "Export DirectX", fname) - if evt == 9: - Draw.Exit() - - -def draw(): - global animsg,flipmsg,swapmsg,anim_tick - global flip_z,swap_yz,flip_norm,anim,ticks,speed,recalc_norm,Bl_norm,no_light - glClearColor(0.55,0.6,0.6,1) - glClear(BGL.GL_COLOR_BUFFER_BIT) - #external box - glColor3f(0.2,0.3,0.3) - rect(10,402,300,382) - #-- - #glColor3f(0.3,0.4,0.4) - #rect(11,399,298,398) - #-- - glColor3f(0.5,0.75,0.65) - rect(14,398,292,30) - #-- - glColor3f(0.5,0.75,0.65) - rect(14,366,292,160) - #-- - glColor3f(0.5,0.75,0.65) - rect(14,202,292,60) - #-- - glColor3f(0.5,0.75,0.65) - rect(14,138,292,40) - #-- - glColor3f(0.5,0.75,0.65) - rect(14,94,292,70) - - glColor3f(0.8,.8,0.6) - glRasterPos2i(20, 380) - Draw.Text("DirectX Exporter ",'large') - Draw.Text("(for Blender 2.41)", 'small') - #-------Aniamtion toggle--------------------------------------------- - Draw.Toggle("Anim", 1, 20, 330, 55, 20, toggle_val,"export animations") - if toggle_val : - anim = 1 - animsg = "animation will be exported" - else: - anim = 0 - animsg = "animation will be not exported" - glRasterPos2i(100,335) - Draw.Text(animsg) - #---Flip normals toggle----------------------------------------------- - Draw.Toggle("Flip norm", 2, 20, 300, 55, 20, toggle1_val,"invert normals") - if toggle1_val : - flip_norm = 1 - flipmsg = "flipped normals" - else: - flip_norm = 0 - flipmsg = "not flipped normals" - glRasterPos2i(100,305) - Draw.Text(flipmsg) - #------Swap yz toggle---------------------------------------------------------------- - Draw.Toggle("Swap zy", 3, 20, 270, 55, 20, toggle2_val,"swap z,y axis(y up)") - if toggle2_val : - swap_yz = 1 - swapmsg = "Y-axis up" - else: - swap_yz = 0 - swapmsg = "Z-axis up" - glRasterPos2i(100,275) - Draw.Text(swapmsg) - #------Flip z toggle---------------------------------------------------------------- - Draw.Toggle("Flip z", 4, 20, 240, 55, 20, toggle3_val,"flip z axis") - if toggle3_val : - flip_z = 1 - zmsg = "left handed system" - else: - flip_z = 0 - zmsg = "right handed system" - glRasterPos2i(100,245) - Draw.Text(zmsg) - #------Speed toggle---------------------------------------------------------------- - Draw.Toggle("Speed", 5, 20, 210, 55, 20, toggle4_val,"Animation speed") - if toggle4_val : - speed = 1 - spedmsg = "set speed" - anim_tick = Draw.Number("", 6,200, 210, 85, 20, anim_tick.val,1,100000,"ticks per second") - else: - speed = 0 - spedmsg = "" - glRasterPos2i(100,215) - Draw.Text(spedmsg) - #------Blender Normals toggle---------------------------------------------------------------- - Draw.Toggle("Bl.normals", 10, 20, 105, 75, 25, toggle5_val,"export normals as in Blender") - if toggle5_val : - Bl_norm = 1 - #------Recalculute Normals toggle---------------------------------------------------------------- - Draw.Toggle("recalc.no", 11, 120, 105, 75, 25, toggle6_val,"export recalculated normals") - if toggle6_val : - recalc_norm = 1 - #------Recalculute Normals toggle---------------------------------------------------------------- - Draw.Toggle("no smooth", 12, 220, 105, 75, 25, toggle7_val,"every vertex has the face normal,no smoothing") - if toggle7_val : - no_light = 1 - #------Draw Button export---------------------------------------------------------------- - exp_butt = Draw.Button("Export All",7,20, 155, 75, 30, "export all the scene objects") - sel_butt = Draw.Button("Export Sel",8,120, 155, 75, 30, "export the selected object") - exit_butt = Draw.Button("Exit",9,220, 155, 75, 30, "exit") - glRasterPos2i(20,75) - Draw.Text("(C) 2006 Arben OMARI ") - glRasterPos2i(20,55) - Draw.Text("http://www.omariben.too.it") - glRasterPos2i(20,35) - Draw.Text("aromar@tin.it") - -def rect(x,y,width,height): - glBegin(GL_LINE_LOOP) - glVertex2i(x,y) - glVertex2i(x+width,y) - glVertex2i(x+width,y-height) - glVertex2i(x,y-height) - glEnd() - -def rectFill(x,y,width,height): - glBegin(GL_POLYGON) - glVertex2i(x,y) - glVertex2i(x+width,y) - glVertex2i(x+width,y-height) - glVertex2i(x,y-height) - glEnd() - - - -Draw.Register(draw, event, button_event) - - -#*********************************************** -#*********************************************** -# EXPORTER -#*********************************************** -#*********************************************** - -class xExport: - def __init__(self, filename): - self.file = open(filename, "w") - -#********************************************************************************************************************************************* - #*********************************************** - #Select Scene objects - #*********************************************** - def analyzeScene(self): - parent_list = [] - for obj in Blender.Scene.GetCurrent().objects: - if obj.type in ('Mesh', 'Armature', 'Empty'): - if obj.parent == None : - parent_list.append(obj) - - return parent_list - - def getChildren(self,obj): - obs = Blender.Scene.GetCurrent().objects - return [ ob for ob in obs if ob.parent == obj ] - - def getArmChildren(self,obj): - for ob in Blender.Scene.GetCurrent().objects: #Object.Get(): - if ob.parent == obj : - return ob - - def getLocMat(self, obj): - pare = obj.parent - mat = obj.matrixWorld - mat_id = Matrix([1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]) - if pare: - mat_p = pare.matrixWorld - mat_c = Matrix(mat_p) - mat_c.invert() - mat_f = mat * mat_c - else : - mat_id.invert() - mat_f = mat * mat_id - return mat_f - - def writeObjFrames(self,obj): - global space,chld_obj,ch_list - mesh = obj.getData() - if obj.type == "Empty" : - mat = self.getLocMat(obj) - mat_c = Matrix(mat) - self.writeArmFrames(mat_c, make_legal_name(obj.name)) - if type(mesh) == Types.ArmatureType : - Child_obj = self.getArmChildren(obj) - chld_obj = obj - ch_list.append(Child_obj) - self.writeRootBone(obj, Child_obj) - if obj.type == 'Mesh' and obj not in ch_list: - self.exportMesh(obj) - - - def writeChildObj(self,obj): - global space,ch_list - space += 1 - if obj : - for ob in obj: - if ob not in ch_list: - self.writeObjFrames(ob) - ch_list.append(ob) - ch_ob = self.getChildren(ob) - self.writeChildObj(ch_ob) - self.closeBrackets() - self.file.write(" // End of the Object %s \n" % (ob.name)) - - - def writeRootFrame(self): - global flip_z,swap_yz,speed - if speed: - self.writeAnimTicks() - if flip_z: - mat_flip = Matrix([1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]) - else : - mat_flip = Matrix([1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]) - if swap_yz : - mat_rot = RotationMatrix(-90, 4, 'x') - mat_flip = mat_rot * mat_flip - self.writeArmFrames(mat_flip, "RootFrame") - - ################################################################## - def SelectObjs(self): - global space,chld_obj,ch_list,flip_z,swap_yz,speed - print "exporting..." - self.writeHeader() - self.writeRootFrame() - obj_list = self.analyzeScene() - space += 1 - ch_list = [] - for obj in obj_list: - self.writeObjFrames(obj) - ch_l = self.getChildren(obj) - for ch in ch_l: - - - if ch and ch.type == "Armature": - ch_list.append(ch) - self.writeObjFrames(ch) - else : - self.writeChildObj(ch_l) - if obj.type != "Armature": - self.file.write(" } // SI End of the Object %s \n" % (obj.name)) - - - - self.file.write("} // End of the Root Frame\n") - if anim : - self.file.write("AnimationSet AnimationSet0 {\n") - for obj in Blender.Scene.GetCurrent().objects: - if obj.type in ('Mesh', 'Empty'): - ip_list = obj.ipo - if ip_list != None : - self.writeAnimationObj(obj) - elif obj.type == 'Armature': - act_list = obj.getAction() - if act_list != None : - self.writeAnimation(obj) - #ip_list = obj.ipo - #if ip_list != None : - # self.writeAnimationObj(obj) - - self.file.write("} // End of Animation Set\n") - self.writeEnd() - ####################################################### - - - def writeAnimTicks(self): - global ticks - self.file.write("AnimTicksPerSecond {\n") - self.file.write("%d; \n" % (ticks)) - self.file.write("}\n") - - #*********************************************** - #Export Mesh without Armature - #*********************************************** - def exportMesh(self, obj): - tex = [] - mesh = obj.getData() - self.writeTextures(obj, tex) - self.writeMeshcoordArm(obj, arm_ob = None) - self.writeMeshMaterialList(obj, mesh, tex) - self.writeMeshNormals(obj, mesh) - self.writeMeshTextureCoords(obj, mesh) - self.writeMeshVertexColors(obj, mesh) - self.file.write(" } // End of the Mesh %s \n" % (obj.name)) - - - #*********************************************** - #Export the Selected Mesh - #*********************************************** - def exportSelMesh(self): - print "exporting ..." - self.writeHeader() - self.writeRootFrame() - tex = [] - objs = Object.GetSelected() - for obj in objs: - if obj.type == 'Mesh': - mesh = obj.data - self.writeTextures(obj, tex) - self.writeMeshcoordArm(obj, arm_ob = None) - self.writeMeshMaterialList(obj, mesh, tex) - self.writeMeshNormals(obj, mesh) - self.writeMeshTextureCoords(obj, mesh) - self.writeMeshVertexColors(obj, mesh) - self.file.write(" }\n") - self.file.write("}\n") - ind = objs.index(obj) - if ind == len(objs)-1: - self.file.write("}\n") - ip_list = obj.ipo - if ip_list != None : - self.file.write("AnimationSet AnimationSet0 {\n") - self.writeAnimationObj(obj) - self.file.write("}\n") - else : - print "The selected object is not a mesh" - print "...finished" - #*********************************************** - #Export Mesh with Armature - #*********************************************** - def exportMeshArm(self,arm,arm_ob,ch_obj): - tex = [] - mesh = ch_obj.getData() - self.writeTextures(ch_obj, tex) - self.writeMeshcoordArm(ch_obj ,arm_ob) - self.writeMeshMaterialList(ch_obj, mesh, tex) - self.writeMeshNormals(ch_obj, mesh) - self.writeMeshTextureCoords(ch_obj, mesh) - self.writeSkinWeights(arm,mesh) - #self.file.write(" } // End of the Frame %s \n" % (ch_obj.name)) - self.file.write(" } // End of the Object %s \n" % (ch_obj.name)) - - #*********************************************** - #Export Root Bone - #*********************************************** - def writeRootBone(self, chld_obj, child_obj): - global space,root_bon - arms = chld_obj.getData() - mat_arm = self.getLocMat(chld_obj) - for bon in arms.bones.values(): - if bon.hasParent(): - pass - else: - root_bon = bon - space += 1 - mat_r = self.writeAnimCombineMatrix(root_bon,1) - self.writeArmFrames(mat_r, make_legal_name(root_bon.name)) - - bon_c = root_bon.children - self.writeChildren(bon_c) - self.file.write(" } // End of the Bone %s \n" % (root_bon.name)) - self.exportMeshArm(arms, chld_obj ,child_obj) - - #*********************************************** - #Create Children structure - #*********************************************** - def writeBon(self,bon): - global space - mat_r = self.writeAnimCombineMatrix(bon,1) - self.writeArmFrames(mat_r, make_legal_name(bon.name)) - - - def writeChildren(self,bon_c): - global space,bone_list - space += 1 - if bon_c: - for bo in bon_c: - if bo.name not in bone_list: - self.writeBon(bo) - bone_list.append(bo.name) - bo_c = bo.children - self.writeChildren(bo_c) - self.closeBrackets() - - - - def closeBrackets(self): - global space - space = space-1 - tab = " " - self.file.write("%s" % (tab * space)) - self.file.write("}\n") - - - - #*********************************************** - #Offset Matrix - #*********************************************** - def writeMatrixOffset(self,bon): - global chld_obj - Blender.Set('curframe', 1) - pose = chld_obj.getPose() - pos_b = pose.bones[bon.name] - mat_b = pos_b.poseMatrix - mat_c = Matrix(mat_b) - mat_c.invert() - return mat_c - - - #*********************************************** - #Combine Matrix - #*********************************************** - def writeCombineMatrix(self,bon): - global chld_obj - - Blender.Set('curframe', 1) - pose = chld_obj.getPose() - pos_b = pose.bones[bon.name] - mat_b = pos_b.poseMatrix - if bon.hasParent(): - pare = bon.parent - pos_p = pose.bones[pare.name] - mat_p = pos_p.poseMatrix - - else: - mat_p = Matrix([1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]) - mat_c = Matrix(mat_p) - mat_c.invert() - mat_f = mat_b * mat_c - - return mat_f - #*********************************************** - #Combine Matrix - #*********************************************** - def writeAnimCombineMatrix(self,bon,fre): - global chld_obj - Blender.Set('curframe', fre) - pose = chld_obj.getPose() - pos_b = pose.bones[bon.name] - mat_b = pos_b.poseMatrix - if bon.hasParent(): - pare = bon.parent - pos_p = pose.bones[pare.name] - mat_p = pos_p.poseMatrix - - else: - mat_p = Matrix([1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]) - mat_c = Matrix(mat_p) - mat_c.invert() - mat_f = mat_b * mat_c - - return mat_f - - -#********************************************************************************************************************************************* - #*********************************************** - #Write SkinWeights - #*********************************************** - def writeSkinWeights(self, arm, mesh): - global index_list - v_dict = {} - Blender.Set('curframe',1) - self.file.write(" XSkinMeshHeader {\n") - max_infl = 0 - for bo in arm.bones.values() : - name = bo.name - try : - vertx_list = mesh.getVertsFromGroup(name,1) - for inde in vertx_list : - vert_infl = mesh.getVertexInfluences(inde[0]) - ln_infl = len(vert_infl) - if ln_infl > max_infl : - max_infl = ln_infl - - except: - pass - - self.file.write(" %d; \n" % (max_infl)) - self.file.write(" %d; \n" % (max_infl * 3)) - self.file.write(" %d; \n" % (len(arm.bones.values()))) - self.file.write(" }\n") - - for bo in arm.bones.values() : - bo_list = [] - weight_list = [] - name = bo.name - f_name = make_legal_name(name) - try : - vert_list = mesh.getVertsFromGroup(name,1) - le = 0 - for indx in vert_list: - ver_infl = mesh.getVertexInfluences(indx[0]) - infl = 0.0 - if len(ver_infl) != 0: - sum = 0.0 - for bone_n, weight in ver_infl: - if bone_n == name: - infl = weight - sum += weight - infl /= sum - - i = -1 - for el in index_list : - i += 1 - if el == indx[0] : - le +=1 - bo_list.append(i) - weight_list.append(infl) - - - self.file.write(" SkinWeights {\n") - self.file.write(' "%s"; \n' % (f_name)) - self.file.write(' %d; \n' % (le)) - count = 0 - for ind in bo_list : - count += 1 - if count == len(bo_list): - self.file.write(" %d; \n" % (ind)) - else : - self.file.write(" %d, \n" % (ind)) - cou = 0 - for wegh in weight_list : - cou += 1 - - if cou == len(weight_list): - self.file.write(" %f; \n" % (round(wegh,6))) - else : - self.file.write(" %f, \n" % (round(wegh,6))) - - - matx = self.writeMatrixOffset(bo) - self.writeOffsFrames(matx, name) - except : - pass - self.file.write(" } // End of XSkinMeshHeader\n") - - - #*********************************************** - # Write Matrices - #*********************************************** - def writeArmFrames(self, matx, name): - global space - tab = " " - self.file.write("%s" % (tab * space)) - self.file.write("Frame ") - self.file.write("%s {\n\n" % (name)) - self.file.write("%s" % (tab * space)) - self.file.write(" FrameTransformMatrix {\n") - self.writeFrame(matx) - - #*********************************************** - # Write Frames - #*********************************************** - def writeOffsFrames(self, matx, name): - space = 1 - self.writeFrame(matx) - - #*********************************************** - # Write Frames - #*********************************************** - def writeFrame(self, matx): - tab = " " - self.file.write("%s" % (tab * space)) - self.file.write(" %f,%f,%f,%f,\n" % - (round(matx[0][0],4),round(matx[0][1],4),round(matx[0][2],4),round(matx[0][3],4))) - self.file.write("%s" % (tab * space)) - self.file.write(" %f,%f,%f,%f,\n" % - (round(matx[1][0],4),round(matx[1][1],4),round(matx[1][2],4),round(matx[1][3],4))) - self.file.write("%s" % (tab * space)) - self.file.write(" %f,%f,%f,%f,\n" % - (round(matx[2][0],4),round(matx[2][1],4),round(matx[2][2],4),round(matx[2][3],4))) - self.file.write("%s" % (tab * space)) - self.file.write(" %f,%f,%f,%f;;\n" % - (round(matx[3][0],4),round(matx[3][1],4),round(matx[3][2],4),round(matx[3][3],4))) - self.file.write("%s" % (tab * space)) - self.file.write(" }\n") -#********************************************************************************************************************************************* - - #*********************************************** - #HEADER - #*********************************************** - def writeHeader(self): - self.file.write("xof 0303txt 0032\n\n\n") - self.file.write("template VertexDuplicationIndices { \n\ - \n\ - DWORD nIndices;\n\ - DWORD nOriginalVertices;\n\ - array DWORD indices[nIndices];\n\ -}\n\ -template XSkinMeshHeader {\n\ - <3cf169ce-ff7c-44ab-93c0-f78f62d172e2>\n\ - WORD nMaxSkinWeightsPerVertex;\n\ - WORD nMaxSkinWeightsPerFace;\n\ - WORD nBones;\n\ -}\n\ -template SkinWeights {\n\ - <6f0d123b-bad2-4167-a0d0-80224f25fabb>\n\ - STRING transformNodeName;\n\ - DWORD nWeights;\n\ - array DWORD vertexIndices[nWeights];\n\ - array float weights[nWeights];\n\ - Matrix4x4 matrixOffset;\n\ -}\n\n") - - #*********************************************** - #CLOSE FILE - #*********************************************** - def writeEnd(self): - self.file.close() - print "... finished" - - - #*********************************************** - #EXPORT TEXTURES - #*********************************************** - def writeTextures(self,name, tex): - mesh = name.data - for face in mesh.faces: - if face.image and face.image.name not in tex: - tex.append(face.image.name) - - - - #*********************************************** - #EXPORT MESH DATA with Armature - #*********************************************** - def writeMeshcoordArm(self, obj ,arm_ob): - global index_list,flip_z - #TransformMatrix - mat = self.getLocMat(obj) - self.writeArmFrames(mat, make_legal_name(obj.name)) - mesh = NMesh.GetRawFromObject(obj.name) - self.file.write("Mesh {\n") - numface=len(mesh.faces) - #VERTICES NUMBER - numvert = 0 - for face in mesh.faces: - numvert = numvert + len(face.v) - self.file.write("%d;\n" % (numvert)) - if numvert == 0: - print "Mesh named",mesh.name,"has no vertices.Problems may occur using the .x file" - #VERTICES COORDINATES - counter = 0 - for face in mesh.faces: - counter += 1 - for n in range(len(face.v)): - index_list.append(face.v[n].index) - vec_vert = Vector([(face.v[n].co[0]), face.v[n].co[1], face.v[n].co[2], 1]) - if arm_ob : - f_vec_vert = vec_vert * mat - else : - f_vec_vert = vec_vert - self.file.write("%f; %f; %f;" % (round(f_vec_vert[0],4), round(f_vec_vert[1],4), round(f_vec_vert[2],4))) - if counter == numface : - if n == len(face.v)-1 : - self.file.write(";\n") - else : - self.file.write(",\n") - else : - self.file.write(",\n") - if flip_z: - a3 = 0;b3 = 2;c3 = 1 - a4 = 0;b4 = 3;c4 = 2;d4 = 1 - else: - a3 = 0;b3 = 1;c3 = 2 - a4 = 0;b4 = 1;c4 = 2;d4 = 3 - - #FACES NUMBER - self.file.write("%s;\n" % (numface)) - coun,counter = 0, 0 - for face in mesh.faces : - coun += 1 - separator = ',' - if coun == numface: - separator = ';' - if len(face.v) == 3: - self.file.write("3; %d, %d, %d;%c\n" % (counter + a3, counter + b3, counter + c3, separator)) - counter += 3 - elif len(face.v) == 4: - self.file.write("4; %d, %d, %d, %d;%c\n" % (counter + a4, counter + b4, counter + c4, counter + d4, separator)) - counter += 4 - elif len(face.v) < 3: - print "WARNING:the mesh has faces with less then 3 vertices" - print " It my be not exported correctly." - - - #*********************************************** - #MESH MATERIAL LIST - #*********************************************** - def writeMeshMaterialList(self, obj, mesh, tex): - self.file.write(" MeshMaterialList {\n") - #HOW MANY MATERIALS ARE USED - count = 0 - for mat in mesh.getMaterials(): - count+=1 - self.file.write(" %d;\n" % (len(tex) + count)) - #HOW MANY FACES IT HAS - numfaces=len(mesh.faces) - self.file.write(" %d;\n" % (numfaces)) - ##MATERIALS INDEX FOR EVERY FACE - counter = 0 - for face in mesh.faces : - counter += 1 - mater = face.materialIndex - if counter == numfaces: - if face.image and face.image.name in tex : - self.file.write(" %d;;\n" % (tex.index(face.image.name) + count)) - else : - self.file.write(" %d;;\n" % (mater)) - else : - if face.image and face.image.name in tex : - self.file.write(" %d,\n" % (tex.index(face.image.name) + count)) - else : - self.file.write(" %d,\n" % (mater)) - - ##MATERIAL NAME - for mat in mesh.getMaterials(): - self.file.write(" Material") - self.file.write(" %s "% (make_legal_name(mat.name))) - self.file.write("{\n") - self.file.write(" %f; %f; %f;" % (mat.R, mat.G, mat.B)) - self.file.write("%s;;\n" % (mat.alpha)) - self.file.write(" %f;\n" % (mat.spec)) - self.file.write(" %f; %f; %f;;\n" % (mat.specR, mat.specG, mat.specB)) - self.file.write(" 0.0; 0.0; 0.0;;\n") - self.file.write(" } //End of Material\n") - - for mat in tex: - self.file.write(" Material Mat") - self.file.write("%s "% (len(tex))) - self.file.write("{\n") - self.file.write(" 1.0; 1.0; 1.0; 1.0;;\n") - self.file.write(" 1.0;\n") - self.file.write(" 1.0; 1.0; 1.0;;\n") - self.file.write(" 0.0; 0.0; 0.0;;\n") - self.file.write(" TextureFilename {") - self.file.write(' "%s";'% (mat)) - self.file.write(" }\n") - self.file.write(" } // End of Material\n") - self.file.write(" } //End of MeshMaterialList\n") - - #*********************************************** - #MESH NORMALS - #*********************************************** - def writeMeshNormals(self,name,mesh): - global flip_norm,flip_z,no_light,recalc_norm,Bl_norm - - self.file.write(" MeshNormals {\n") - #VERTICES NUMBER - numvert = 0 - for face in mesh.faces: - numvert = numvert + len(face.v) - self.file.write("%d;\n" % (numvert)) - numfaces=len(mesh.faces) - if flip_norm : - fl = -1 - else : - fl = 1 - #VERTICES NORMAL - if Bl_norm: - self.writeBlenderNormals(mesh,fl) - if recalc_norm: - self.writeRecalcNormals(mesh,fl) - if no_light: - self.writeNoSmothing(mesh,fl) - - - - if flip_z: - a3 = 0;b3 = 2;c3 = 1 - a4 = 0;b4 = 3;c4 = 2;d4 = 1 - else: - a3 = 0;b3 = 1;c3 = 2 - a4 = 0;b4 = 1;c4 = 2;d4 = 3 - - #FACES NUMBER - self.file.write("%s;\n" % (numfaces)) - coun,counter = 0, 0 - for face in mesh.faces : - coun += 1 - if coun == numfaces: - if len(face.v) == 3: - self.file.write("3; %d, %d, %d;;\n" % (counter + a3, counter + b3, counter + c3)) - counter += 3 - else : - self.file.write("4; %d, %d, %d, %d;;\n" % (counter + a4, counter + b4, counter + c4, counter + d4)) - counter += 4 - else: - - if len(face.v) == 3: - self.file.write("3; %d, %d, %d;,\n" % (counter + a3, counter + b3, counter + c3)) - counter += 3 - else : - self.file.write("4; %d, %d, %d, %d;,\n" % (counter + a4, counter + b4, counter + c4, counter + d4)) - counter += 4 - self.file.write("} //End of MeshNormals\n") - - def writeBlenderNormals(self,mesh,fl): - numfaces=len(mesh.faces) - #VERTICES NORMAL - counter = 0 - for face in mesh.faces: - counter += 1 - for n in range(len(face.v)): - self.file.write(" %f; %f; %f;" % ( - (round(face.v[n].no[0],6)*fl),(round(face.v[n].no[1],6)*fl),(round(face.v[n].no[2],6)*fl))) - if counter == numfaces : - if n == len(face.v)-1 : - self.file.write(";\n") - else : - self.file.write(",\n") - else : - self.file.write(",\n") - - def writeRecalcNormals(self,mesh,fl): - numfaces=len(mesh.faces) - normal_list = {} - idx = 0 - for vertex in mesh.verts: - v_norm = Vector([0, 0, 0]) - normal_list[idx] = v_norm - idx += 1 - for face in mesh.faces: - for verts in face.v: - if verts.index == vertex.index : - v_norm[0] += face.no[0] - v_norm[1] += face.no[1] - v_norm[2] += face.no[2] - - v_norm.normalize() - - counter = 0 - for face in mesh.faces: - counter += 1 - n = 0 - for vert in face.v: - n += 1 - norm = normal_list[vert.index] - - self.file.write(" %f; %f; %f;" % ( - (round(norm[0],6)*fl),(round(norm[1],6)*fl),(round(norm[2],6)*fl))) - if counter == numfaces : - if n == len(face.v) : - self.file.write(";\n") - else : - self.file.write(",\n") - else : - self.file.write(",\n") - - def writeNoSmothing(self,mesh,fl): - numfaces=len(mesh.faces) - counter = 0 - for face in mesh.faces: - counter += 1 - n = 0 - for n in range(len(face.v)): - n += 1 - self.file.write(" %f; %f; %f;" % ( - (round(face.no[0],6)*fl),(round(face.no[1],6)*fl),(round(face.no[2],6)*fl))) - - - if counter == numfaces : - if n == len(face.v) : - self.file.write(";\n") - else : - self.file.write(",\n") - else : - self.file.write(",\n") - #*********************************************** - #MESH TEXTURE COORDS - #*********************************************** - def writeMeshTextureCoords(self, name, mesh): - if mesh.hasFaceUV(): - self.file.write("MeshTextureCoords {\n") - #VERTICES NUMBER - numvert = 0 - for face in mesh.faces: - numvert += len(face.v) - self.file.write("%d;\n" % (numvert)) - #UV COORDS - numfaces = len(mesh.faces) - counter = -1 - co = 0 - for face in mesh.faces: - counter += 1 - co += 1 - for n in range(len(face.v)): - self.file.write("%f;%f;" % (mesh.faces[counter].uv[n][0], -mesh.faces[counter].uv[n][1])) - if co == numfaces : - if n == len(face.v) - 1 : - self.file.write(";\n") - else : - self.file.write(",\n") - else : - self.file.write(",\n") - - self.file.write("} //End of MeshTextureCoords\n") - - #*********************************************** - #MESH VORTEX COLORS - #*********************************************** - def writeMeshVertexColors(self, name, mesh): - if mesh.hasVertexColours(): - self.file.write("MeshVertexColors {\n") - #VERTICES NUMBER - numvert = reduce( lambda i,f: len(f)+i, mesh.faces, 0) - self.file.write("%d;\n" % (numvert)) - #VERTEX COLORS - - vcounter =0 - for f in mesh.faces: - col = f.col - for i,c in enumerate(col): - # Note vcol alpha has no meaning - self.file.write("%d;%f;%f;%f;%f;" % (vcounter,c.r/255.0, c.g/255.0, c.b/255.0, 1.0)) # c.a/255.0)) - vcounter+=1 - if vcounter == numvert : - self.file.write(";\n") - else : - self.file.write(",\n") - - self.file.write("} //End of MeshVertexColors\n") - -#***********************************************#***********************************************#*********************************************** - #*********************************************** - #FRAMES - #*********************************************** - def writeFrames(self, matx): - - self.file.write("%f,%f,%f,%f," % - (round(matx[0][0],4),round(matx[0][1],4),round(matx[0][2],4),round(matx[0][3],4))) - self.file.write("%f,%f,%f,%f," % - (round(matx[1][0],4),round(matx[1][1],4),round(matx[1][2],4),round(matx[1][3],4))) - self.file.write("%f,%f,%f,%f," % - (round(matx[2][0],4),round(matx[2][1],4),round(matx[2][2],4),round(matx[2][3],4))) - self.file.write("%f,%f,%f,%f;;" % - (round(matx[3][0],4),round(matx[3][1],4),round(matx[3][2],4),round(matx[3][3],4))) - - - - - - #*********************************************** - #WRITE ANIMATION KEYS - #*********************************************** - def writeAnimation(self,arm_ob): - global mat_dict, root_bon - arm = arm_ob.getData() - act_list = arm_ob.getAction() - ip = act_list.getAllChannelIpos() - for bon in arm.bones.values() : - point_list = [] - name = bon.name - name_f = make_legal_name(name) - try : - ip_bon_channel = ip[bon.name] - ip_bon_name = ip_bon_channel.getName() - - ip_bon = Blender.Ipo.Get(ip_bon_name) - poi = ip_bon.getCurves() - - for po in poi[3].getPoints(): - a = po.getPoints() - point_list.append(int(a[0])) - #point_list.pop(0) - - self.file.write(" Animation { \n") - self.file.write(" { %s }\n" %(name_f)) - self.file.write(" AnimationKey { \n") - self.file.write(" 4;\n") - self.file.write(" %d; \n" % (len(point_list))) - - for fr in point_list: - - if name == root_bon.name : - - - mat_b = self.writeAnimCombineMatrix(bon,fr) - mat_arm = self.getLocMat(arm_ob) - mat = mat_b * mat_arm - else: - mat = self.writeAnimCombineMatrix(bon,fr) - - self.file.write(" %d;" % (fr)) - self.file.write("16;") - - self.writeFrames(mat) - - if fr == point_list[len(point_list)-1]: - self.file.write(";\n") - else: - self.file.write(",\n") - self.file.write(" }\n") - self.file.write(" }\n") - self.file.write("\n") - except: - pass - - - - #*********************************************** - #WRITE ANIMATION KEYS - #*********************************************** - def writeAnimationObj(self, obj): - point_list = [] - ip = obj.ipo - poi = ip.getCurves() - for po in poi[0].getPoints(): - a = po.getPoints() - point_list.append(int(a[0])) - - self.file.write(" Animation {\n") - self.file.write(" { ") - self.file.write("%s }\n" % (make_legal_name(obj.name))) - self.file.write(" AnimationKey { \n") - self.file.write(" 4;\n") - self.file.write(" %d; \n" % (len(point_list))) - for fr in point_list: - self.file.write(" %d;" % (fr)) - self.file.write("16;") - Blender.Set('curframe',fr) - - #mat_new = self.getLocMat(obj) - mat_new = obj.matrixLocal - self.writeFrames(mat_new) - - if fr == point_list[len(point_list)-1]: - self.file.write(";\n") - else: - self.file.write(",\n") - self.file.write(" }\n") - self.file.write(" }\n") - - - -#***********************************************#***********************************************#*********************************************** - - - - - diff --git a/release/scripts/DirectX8Importer.py b/release/scripts/DirectX8Importer.py deleted file mode 100644 index 0dda654944d..00000000000 --- a/release/scripts/DirectX8Importer.py +++ /dev/null @@ -1,238 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: -Name: 'DirectX(.x)...' -Blender: 244 -Group: 'Import' - -Tip: 'Import from DirectX text file format format.' -""" -# DirectXImporter.py version 1.2 -# Copyright (C) 2005 Arben OMARI -- omariarben@everyday.com -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. - -# This script import meshes from DirectX text file format - -# Grab the latest version here :www.omariben.too.it -import bpy -import Blender -from Blender import Mesh,Object,Material,Texture,Image,Draw - - -class xImport: - def __init__(self, filename): - global my_path - self.file = open(filename, "r") - my_path = Blender.sys.dirname(filename) - - # - self.lines = [l_split for l in self.file.readlines() for l_split in (' '.join(l.split()),) if l_split] - - def Import(self): - lines = self.lines - print "importing into Blender ..." - scene = bpy.data.scenes.active - - mesh_indicies = {} # the index of each 'Mesh' is used as the key for those meshes indicies - context_indicies = None # will raise an error if used! - - - #Get the line of Texture Coords - nr_uv_ind = 0 - - #Get Materials - nr_fac_mat = 0 - i = -1 - mat_list = [] - tex_list = [] - mesh_line_indicies = [] - for j, line in enumerate(lines): - l = line.strip() - words = line.split() - if words[0] == "Material" : - #context_indicies["Material"] = j - self.loadMaterials(j, mat_list, tex_list) - elif words[0] == "MeshTextureCoords" : - context_indicies["MeshTextureCoords"] = j - #nr_uv_ind = j - elif words[0] == "MeshMaterialList" : - context_indicies["MeshMaterialList"] = j+2 - #nr_fac_mat = j + 2 - elif words[0] == "Mesh": # Avoid a second loop - context_indicies = mesh_indicies[j] = {'MeshTextureCoords':0, 'MeshMaterialList':0} - - for mesh_index, value in mesh_indicies.iteritems(): - mesh = Mesh.New() - self.loadVertices(mesh_index, mesh, value['MeshTextureCoords'], value['MeshMaterialList'], tex_list) - - mesh.materials = mat_list[:16] - if value['MeshMaterialList']: - self.loadMeshMaterials(value['MeshMaterialList'], mesh) - scene.objects.new(mesh) - - self.file.close() - print "... finished" - - #------------------------------------------------------------------------------ - # CREATE THE MESH - #------------------------------------------------------------------------------ - def loadVertices(self, nr_vr_ind, mesh, nr_uv, nr_fac_mat, tex_list): - v_ind = nr_vr_ind + 1 - lin = self.lines[v_ind] - if lin : - lin_c = self.CleanLine(lin) - nr_vert = int((lin_c.split()[0])) - else : - v_ind = nr_vr_ind + 2 - lin = self.lines[v_ind] - lin_c = self.CleanLine(lin) - nr_vert = int((lin_c.split()[0])) - - #-------------------------------------------------- - nr_fac_li = v_ind + nr_vert +1 - lin_f = self.lines[nr_fac_li] - if lin_f : - lin_fc = self.CleanLine(lin_f) - nr_face = int((lin_fc.split()[0])) - else : - nr_fac_li = v_ind + nr_vert +1 - lin_f = self.lines[nr_fac_li] - lin_fc = self.CleanLine(lin_f) - nr_face = int((lin_fc.split()[0])) - - #Get Coordinates - verts_list = [(0,0,0)] # WARNING - DUMMY VERT - solves EEKADOODLE ERROR - for l in xrange(v_ind + 1, (v_ind + nr_vert +1)): - line_v = self.lines[l] - lin_v = self.CleanLine(line_v) - words = lin_v.split() - if len(words)==3: - verts_list.append((float(words[0]),float(words[1]),float(words[2]))) - - mesh.verts.extend(verts_list) - del verts_list - - face_list = [] - #Make Faces - i = 0 - mesh_verts = mesh.verts - for f in xrange(nr_fac_li + 1, (nr_fac_li + nr_face + 1)): - i += 1 - line_f = self.lines[f] - lin_f = self.CleanLine(line_f) - - # +1 for dummy vert only! - words = lin_f.split() - if len(words) == 5: - face_list.append((1+int(words[1]), 1+int(words[2]), 1+int(words[3]), 1+int(words[4]))) - elif len(words) == 4: - face_list.append((1+int(words[1]), 1+int(words[2]), 1+int(words[3]))) - - mesh.faces.extend(face_list) - del face_list - - if nr_uv : - mesh.faceUV = True - for f in mesh.faces: - fuv = f.uv - for ii, v in enumerate(f): - # _u, _v = self.CleanLine(self.lines[nr_uv + 2 + v.index]).split() - - # Use a dummy vert - _u, _v = self.CleanLine(self.lines[nr_uv + 1 + v.index]).split() - - fuv[ii].x = float(_u) - fuv[ii].y = float(_v) - - if nr_fac_mat : - fac_line = self.lines[nr_fac_mat + i] - fixed_fac = self.CleanLine(fac_line) - w_tex = int(fixed_fac.split()[0]) - f.image = tex_list[w_tex] - - # remove dummy vert - mesh.verts.delete([0,]) - - def CleanLine(self,line): - return line.replace(\ - ";", " ").replace(\ - '"', ' ').replace(\ - "{", " ").replace(\ - "}", " ").replace(\ - ",", " ").replace(\ - "'", " ") - - #------------------------------------------------------------------ - # CREATE MATERIALS - #------------------------------------------------------------------ - def loadMaterials(self, nr_mat, mat_list, tex_list): - - def load_image(name): - try: - return Image.Load(Blender.sys.join(my_path,name)) - except: - return None - - mat = bpy.data.materials.new() - line = self.lines[nr_mat + 1] - fixed_line = self.CleanLine(line) - words = fixed_line.split() - mat.rgbCol = [float(words[0]),float(words[1]),float(words[2])] - mat.setAlpha(float(words[3])) - mat_list.append(mat) - l = self.lines[nr_mat + 5] - fix_3_line = self.CleanLine(l) - tex_n = fix_3_line.split() - - if tex_n and tex_n[0] == "TextureFilename" : - - if len(tex_n) > 1: - tex_list.append(load_image(tex_n[1])) - - if len(tex_n) <= 1 : - - l_succ = self.lines[nr_mat + 6] - fix_3_succ = self.CleanLine(l_succ) - tex_n_succ = fix_3_succ.split() - tex_list.append(load_image(tex_n_succ[0])) - else : - tex_list.append(None) # no texture for this index - - return mat_list, tex_list - #------------------------------------------------------------------ - # SET MATERIALS - #------------------------------------------------------------------ - def loadMeshMaterials(self, nr_fc_mat, mesh): - for face in mesh.faces: - nr_fc_mat += 1 - line = self.lines[nr_fc_mat] - fixed_line = self.CleanLine(line) - wrd = fixed_line.split() - mat_idx = int(wrd[0]) - face.mat = mat_idx - -#------------------------------------------------------------------ -# MAIN -#------------------------------------------------------------------ -def my_callback(filename): - if not filename.lower().endswith('.x'): print "Not an .x file" - ximport = xImport(filename) - ximport.Import() - -arg = __script__['arg'] - -if __name__ == '__main__': - Blender.Window.FileSelector(my_callback, "Import DirectX", "*.x") - -#my_callback('/fe/x/directxterrain.x') -#my_callback('/fe/x/Male_Normal_MAX.X') -#my_callback('/fe/x/male_ms3d.x') diff --git a/release/scripts/IDPropBrowser.py b/release/scripts/IDPropBrowser.py deleted file mode 100644 index 2a14760270a..00000000000 --- a/release/scripts/IDPropBrowser.py +++ /dev/null @@ -1,523 +0,0 @@ -#!BPY - -""" -Name: 'ID Property Browser' -Blender: 242 -Group: 'Help' -Tooltip: 'Browse ID properties' -""" - -__author__ = "Joe Eagar" -__version__ = "0.3.108" -__email__ = "joeedh@gmail.com" -__bpydoc__ = """\ - -Allows browsing, creating and editing of ID Properties -for various ID block types such as mesh, scene, object, -etc. -""" - -# -------------------------------------------------------------------------- -# ID Property Browser. -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -from Blender import * -from Blender.BGL import * -from Blender.Types import IDGroupType, IDArrayType -import Blender - -def IsInRectWH(mx, my, x, y, wid, hgt): - if mx >= x and mx <= x + wid: - if my >= y and my <= y + hgt: - return 1 - return 0 - -Button_Back = 1 -Button_New = 2 -Button_MatMenu = 3 -Button_TypeMenu = 4 - -ButStart = 55 - -IDP_String = 0 -IDP_Int = 1 -IDP_Float = 2 -IDP_Array = 5 -IDP_Group = 6 - -ButDelStart = 255 -#max limit for string input button -strmax = 100 - -State_Normal = 0 -State_InArray = 1 - -#IDTypeModules entries are of form [module, active_object_index, module_name] -IDTypeModules = [[Scene, 0, "Scenes"], [Object, 0, "Objects"], [Mesh, 0, "Meshes"]] -IDTypeModules += [[Material, 0, "Materials"], [Texture, 0, "Textures"]] -IDTypeModules += [[Image, 0, "Images"]] - -class IDArrayBrowser: - array = 0 - parentbrowser = 0 - buts = 0 - - def __init__(self): - self.buts = [] - - def Draw(self): - pb = self.parentbrowser - x = pb.x - y = pb.y - width = pb.width - height = pb.height - pad = pb.pad - itemhgt = pb.itemhgt - cellwid = 65 - y = y + height - itemhgt - pad - - Draw.PushButton("Back", Button_Back, x, y, 40, 20) - y -= itemhgt + pad - - self.buts = [] - Draw.BeginAlign() - for i in xrange(len(self.array)): - st = "" - if type(self.array[0]) == float: - st = "%.5f" % self.array[i] - else: st = str(self.array[i]) - - b = Draw.String("", ButStart+i, x, y, cellwid, itemhgt, st, 30) - self.buts.append(b) - x += cellwid + pad - if x + cellwid + pad > width: - x = 0 - y -= itemhgt + pad - Draw.EndAlign() - def Button(self, bval): - if bval == Button_Back: - self.parentbrowser.state = State_Normal - self.parentbrowser.array = 0 - self.buts = [] - Draw.Draw() - self.array = 0 - elif bval >= ButStart: - i = bval - ButStart - st = self.buts[i].val - n = 0 - if type(self.array[0]) == float: - try: - n = int(st) - except: - return - elif type(self.array[0]) == int: - try: - n = float(st) - except: - return - - self.array[i] = n - Draw.Draw() - - def Evt(self, evt, val): - if evt == Draw.ESCKEY: - Draw.Exit() - -class IDPropertyBrowser: - width = 0 - height = 0 - x = 0 - y = 0 - scrollx = 0 - scrolly = 0 - itemhgt = 22 - pad = 2 - - group = 0 - parents = 0 #list stack of parent groups - active_item = -1 - mousecursor = 0 - _i = 0 - buts = [] - - state = 0 - array = 0 - prop = 0 - - IDList = 0 - idindex = 0 - idblock = 0 - - type = 0 # attach buildin type() method to class - # since oddly it's not available to button - # callbacks! EEK! :( - - def __init__(self, idgroup, mat, x, y, wid, hgt): - self.group = idgroup - self.prop = idgroup - self.x = x - self.y = y - self.width = wid - self.height = hgt - self.mousecursor = [0, 0] - self.parents = [] - self.idblock = mat - self.type = type - - def DrawBox(self, glmode, x, y, width, height): - glBegin(glmode) - glVertex2f(x, y) - glVertex2f(x+width, y) - glVertex2f(x+width, y+height) - glVertex2f(x, y+height) - glEnd() - - def Draw(self): - global IDTypeModules - - #first draw outlining box :) - glColor3f(0, 0, 0) - self.DrawBox(GL_LINE_LOOP, self.x, self.y, self.width, self.height) - - itemhgt = self.itemhgt - pad = self.pad - x = self.x - y = self.y + self.height - itemhgt - pad - - if self.state == State_InArray: - self.array.Draw() - return - - plist = [] - self.buts = [] - for p in self.group.iteritems(): - plist.append(p) - - #-------do top buttons----------# - Draw.BeginAlign() - Draw.PushButton("New", Button_New, x, y, 40, 20) - x += 40 + pad - #do the menu button for all materials - st = "" - - blocks = IDTypeModules[self.IDList][0].Get() - i = 1 - mi = 0 - for m in blocks: - if m.name == self.idblock.name: - mi = i - st += m.name + " %x" + str(i) + "|" - i += 1 - - self.menubut = Draw.Menu(st, Button_MatMenu, x, y, 100, 20, mi) - - x += 100 + pad - - st = "" - i = 0 - for e in IDTypeModules: - st += e[2] + " %x" + str(i+1) + "|" - i += 1 - - cur = self.IDList + 1 - self.idmenu = Draw.Menu(st, Button_TypeMenu, x, y, 100, 20, cur) - x = self.x - y -= self.itemhgt + self.pad - Draw.EndAlign() - - - #-----------do property items---------# - i = 0 - while y > self.y - 20 - pad and i < len(plist): - k = plist[i][0] - p = plist[i][1] - if i == self.active_item: - glColor3f(0.5, 0.4, 0.3) - self.DrawBox(GL_POLYGON, x+pad, y, self.width-pad*2, itemhgt) - - glColor3f(0, 0, 0) - self.DrawBox(GL_LINE_LOOP, x+pad, y, self.width-pad*2, itemhgt) - - glRasterPos2f(x+pad*2, y+5) - Draw.Text(str(k)) #str(self.mousecursor) + " " + str(self.active_item)) #p.name) - tlen = Draw.GetStringWidth(str(k)) - - type_p = type(p) - if type_p == str: - b = Draw.String("", ButStart+i, x+pad*5+tlen, y, 200, itemhgt, p, strmax) - self.buts.append(b) - elif type_p in [int, float]: - #only do precision to 5 points on floats - st = "" - if type_p == float: - st = "%.5f" % p - else: st = str(p) - b = Draw.String("", ButStart+i, x+pad*5+tlen, y, 75, itemhgt, st, strmax) - self.buts.append(b) - else: - glRasterPos2f(x+pad*2 +tlen+10, y+5) - if type_p == Types.IDArrayType: - Draw.Text('(array, click to edit)') - elif type_p == Types.IDGroupType: - Draw.Text('(group, click to edit)') - - - self.buts.append(None) - - Draw.PushButton("Del", ButDelStart+i, x+self.width-35, y, 30, 20) - - i += 1 - y -= self.itemhgt + self.pad - - if len(self.parents) != 0: - Draw.PushButton("Back", Button_Back, x, y, 40, 20) - x = x + 40 + pad - - def SetActive(self): - m = self.mousecursor - itemhgt = self.itemhgt - pad = self.pad - - x = self.x + pad - y = self.y + self.height - itemhgt - pad - itemhgt - - plist = [] - for p in self.group.iteritems(): - plist.append(p) - - self.active_item = -1 - i = 0 - while y > self.y and i < len(plist): - p = plist[i] - if IsInRectWH(m[0], m[1], x, y, self.width-pad, itemhgt): - self.active_item = i - - i += 1 - y -= self.itemhgt + self.pad - - def EventIn(self, evt, val): - if self.state == State_InArray: - self.array.Evt(evt, val) - - if evt == Draw.ESCKEY: - Draw.Exit() - if evt == Draw.MOUSEX or evt == Draw.MOUSEY: - size = Buffer(GL_FLOAT, 4) - glGetFloatv(GL_SCISSOR_BOX, size) - if evt == Draw.MOUSEX: - self.mousecursor[0] = val - size[0] - else: - self.mousecursor[1] = val - size[1] - del size - - self.SetActive() - self._i += 1 - if self._i == 5: - Draw.Draw() - self._i = 0 - - - if evt == Draw.LEFTMOUSE and val == 1: - plist = list(self.group.iteritems()) - a = self.active_item - if a >= 0 and a < len(plist): - p = plist[a] - - basictypes = [IDGroupType, float, str, int] - if type(p[1]) == IDGroupType: - self.parents.append(self.group) - self.group = p[1] - self.active_item = -1 - Draw.Draw() - elif type(p[1]) == IDArrayType: - self.array = IDArrayBrowser() - self.array.array = p[1] - self.array.parentbrowser = self - self.state = State_InArray - Draw.Draw() - - if evt == Draw.TKEY and val == 1: - try: - self.prop['float'] = 0.0 - self.prop['int'] = 1 - self.prop['string'] = "hi!" - self.prop['float array'] = [0, 0, 1.0, 0] - self.prop['int array'] = [0, 0, 0, 0] - self.prop.data['a subgroup'] = {"int": 0, "float": 0.0, "anothergroup": {"a": 0.0, "intarr": [0, 0, 0, 0]}} - Draw.Draw() - except: - Draw.PupMenu("Can only do T once per block, the test names are already taken!") - - - def Button(self, bval): - global IDTypeModules - if self.state == State_InArray: - self.array.Button(bval) - return - - if bval == Button_MatMenu: - global IDTypeModules - - val = self.idindex = self.menubut.val - 1 - i = self.IDList - block = IDTypeModules[i][0].Get()[val] - self.idblock = block - self.prop = block.properties - self.group = self.prop - self.active_item = -1 - self.parents = [] - Draw.Draw() - - if bval == Button_TypeMenu: - i = IDTypeModules[self.idmenu.val-1] - if len(i[0].Get()) == 0: - Draw.PupMenu("Error%t|There are no " + i[2] + "!") - return - - IDTypeModules[self.IDList][1] = self.idindex - self.IDList = self.idmenu.val-1 - val = self.idindex = IDTypeModules[self.IDList][1] - i = self.IDList - block = IDTypeModules[i][0].Get()[val] - self.idblock = block - self.prop = block.properties - self.group = self.prop - self.active_item = -1 - self.parents = [] - Draw.Draw() - - if bval >= ButDelStart: - plist = [p for p in self.group] - prop = plist[bval - ButDelStart] - del self.group[prop] - Draw.Draw() - - elif bval >= ButStart: - plist = list(self.group.iteritems()) - - prop = plist[bval - ButStart] - print prop - - if self.type(prop[1]) == str: - self.group[prop[0]] = self.buts[bval - ButStart].val - elif self.type(prop[1]) == int: - i = self.buts[bval - ButStart].val - try: - i = int(i) - self.group[prop[0]] = i - except: - Draw.Draw() - return - Draw.Draw() - elif self.type(prop[1]) == float: - f = self.buts[bval - ButStart].val - try: - f = float(f) - self.group[prop[0]] = f - except: - Draw.Draw() - return - Draw.Draw() - - elif bval == Button_Back: - self.group = self.parents[len(self.parents)-1] - self.parents.pop(len(self.parents)-1) - Draw.Draw() - - elif bval == Button_New: - name = Draw.Create("untitled") - stype = Draw.Create(0) - gtype = Draw.Create(0) - ftype = Draw.Create(0) - itype = Draw.Create(0) - atype = Draw.Create(0) - - block = [] - block.append(("Name: ", name, 0, 30, "Click to type in the name of the new ID property")) - block.append("Type") - block.append(("String", stype)) - block.append(("Subgroup", gtype)) - block.append(("Float", ftype)) - block.append(("Int", itype)) - block.append(("Array", atype)) - - retval = Blender.Draw.PupBlock("New IDProperty", block) - if retval == 0: return - - name = name.val - i = 1 - stop = 0 - while stop == 0: - stop = 1 - for p in self.group: - if p == name: - d = name.rfind(".") - if d != -1: - name = name[:d] - name = name + "." + str(i).zfill(3) - i += 1 - stop = 0 - - type = "String" - if stype.val: - self.group[name] = "" - elif gtype.val: - self.group[name] = {} - elif ftype.val: - self.group[name] = 0.0 - elif itype.val: - self.group[name] = 0 #newProperty("Int", name, 0) - elif atype.val: - arrfloat = Draw.Create(1) - arrint = Draw.Create(0) - arrlen = Draw.Create(3) - block = [] - block.append("Type") - block.append(("Float", arrfloat, "Make a float array")) - block.append(("Int", arrint, "Make an integer array")) - block.append(("Len", arrlen, 2, 200)) - - if Blender.Draw.PupBlock("Array Properties", block): - if arrfloat.val: - tmpl = 0.0 - elif arrint.val: - tmpl = 0 - else: - return - - self.group[name] = [tmpl] * arrlen.val - - - def Go(self): - Draw.Register(self.Draw, self.EventIn, self.Button) - -scenes = Scene.Get() - -size = Window.GetAreaSize() -browser = IDPropertyBrowser(scenes[0].properties, scenes[0], 2, 2, size[0], size[1]) -browser.Go() - -#a = prop.newProperty("String", "hwello!", "bleh") -#b = prop.newProperty("Group", "subgroup") - -#for p in prop: - #print p.name diff --git a/release/scripts/ac3d_export.py b/release/scripts/ac3d_export.py deleted file mode 100644 index 57f27c7e3a2..00000000000 --- a/release/scripts/ac3d_export.py +++ /dev/null @@ -1,828 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: -Name: 'AC3D (.ac)...' -Blender: 243 -Group: 'Export' -Tip: 'Export selected meshes to AC3D (.ac) format' -""" - -__author__ = "Willian P. Germano" -__url__ = ("blender", "blenderartists.org", "AC3D's homepage, http://www.ac3d.org", - "PLib 3d gaming lib, http://plib.sf.net") -__version__ = "2.44 2007-05-05" - -__bpydoc__ = """\ -This script exports selected Blender meshes to AC3D's .ac file format. - -AC3D is a simple commercial 3d modeller also built with OpenGL. -The .ac file format is an easy to parse text format well supported, -for example, by the PLib 3d gaming library (AC3D 3.x). - -Supported:
- UV-textured meshes with hierarchy (grouping) information. - -Missing:
- The 'url' tag, specific to AC3D. It is easy to add by hand to the exported -file, if needed. - -Known issues:
- The ambient and emit data we can retrieve from Blender are single values, -that this script copies to R, G, B, giving shades of gray.
- Loose edges (lines) receive the first material found in the mesh, if any, or a default white material.
- In AC3D 4 "compatibility mode":
- - shininess of materials is taken from the shader specularity value in Blender, mapped from [0.0, 2.0] to [0, 128];
- - crease angle is exported, but in Blender it is limited to [1, 80], since there are other more powerful ways to control surface smoothing. In AC3D 4.0 crease's range is [0.0, 180.0]; - -Config Options:
- toggle:
- - AC3D 4 mode: unset it to export without the 'crease' tag that was -introduced with AC3D 4.0 and with the old material handling;
- - global coords: transform all vertices of all meshes to global coordinates;
- - skip data: set it if you don't want mesh names (ME:, not OB: field) -to be exported as strings for AC's "data" tags (19 chars max);
- - rgb mirror color can be exported as ambient and/or emissive if needed, -since Blender handles these differently;
- - default mat: a default (white) material is added if some mesh was -left without mats -- it's better to always add your own materials;
- - no split: don't split meshes (see above);
- - set texture dir: override the actual textures path with a given default -path (or simply export the texture names, without dir info, if the path is -empty);
- - per face 1 or 2 sided: override the "Double Sided" button that defines this behavior per whole mesh in favor of the UV Face Select mode "twosided" per face atribute;
- - only selected: only consider selected objects when looking for meshes -to export (read notes below about tokens, too);
- strings:
- - export dir: default dir to export to;
- - texture dir: override textures path with this path if 'set texture dir' -toggle is "on". - -Notes:
- This version updates:
- - modified meshes are correctly exported, no need to apply the modifiers in Blender;
- - correctly export each used material, be it assigned to the object or to its mesh data;
- - exporting lines (edges) is again supported; color comes from first material found in the mesh, if any, or a default white one.
- - there's a new option to choose between exporting meshes with transformed (global) coordinates or local ones;
- Multiple textures per mesh are supported (mesh gets split);
- Parents are exported as a group containing both the parent and its children;
- Start mesh object names (OB: field) with "!" or "#" if you don't want them to be exported;
- Start mesh object names (OB: field) with "=" or "$" to prevent them from being split (meshes with multiple textures or both textured and non textured faces are split unless this trick is used or the "no split" option is set. -""" - -# $Id$ -# -# -------------------------------------------------------------------------- -# AC3DExport version 2.44 -# Program versions: Blender 2.42+ and AC3Db files (means version 0xb) -# new: updated for new Blender version and Mesh module; supports lines (edges) again; -# option to export vertices transformed to global coordinates or not; now the modified -# (by existing mesh modifiers) mesh is exported; materials are properly exported, no -# matter if each of them is linked to the mesh or to the object. New (2.43.1): loose -# edges use color of first material found in the mesh, if any. -# -------------------------------------------------------------------------- -# Thanks: Steve Baker for discussions and inspiration; for testing, bug -# reports, suggestions, patches: David Megginson, Filippo di Natale, -# Franz Melchior, Campbell Barton, Josh Babcock, Ralf Gerlich, Stewart Andreason. -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004-2007: Willian P. Germano, wgermano _at_ ig.com.br -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# -------------------------------------------------------------------------- - -import Blender -from Blender import Object, Mesh, Material, Image, Mathutils, Registry -from Blender import sys as bsys - -# Globals -REPORT_DATA = { - 'main': [], - 'errors': [], - 'warns': [], - 'nosplit': [], - 'noexport': [] -} -TOKENS_DONT_EXPORT = ['!', '#'] -TOKENS_DONT_SPLIT = ['=', '$'] - -MATIDX_ERROR = 0 - -# flags: -LOOSE = Mesh.EdgeFlags['LOOSE'] -FACE_TWOSIDED = Mesh.FaceModes['TWOSIDE'] -MESH_TWOSIDED = Mesh.Modes['TWOSIDED'] - -REG_KEY = 'ac3d_export' - -# config options: -GLOBAL_COORDS = True -SKIP_DATA = False -MIRCOL_AS_AMB = False -MIRCOL_AS_EMIS = False -ADD_DEFAULT_MAT = True -SET_TEX_DIR = True -TEX_DIR = '' -AC3D_4 = True # export crease value, compatible with AC3D 4 loaders -NO_SPLIT = False -ONLY_SELECTED = True -EXPORT_DIR = '' -PER_FACE_1_OR_2_SIDED = True - -tooltips = { - 'GLOBAL_COORDS': "transform all vertices of all meshes to global coordinates", - 'SKIP_DATA': "don't export mesh names as data fields", - 'MIRCOL_AS_AMB': "export mirror color as ambient color", - 'MIRCOL_AS_EMIS': "export mirror color as emissive color", - 'ADD_DEFAULT_MAT': "always add a default white material", - 'SET_TEX_DIR': "don't export default texture paths (edit also \"tex dir\")", - 'EXPORT_DIR': "default / last folder used to export .ac files to", - 'TEX_DIR': "(see \"set tex dir\") dir to prepend to all exported texture names (leave empty for no dir)", - 'AC3D_4': "compatibility mode, adds 'crease' tag and slightly better material support", - 'NO_SPLIT': "don't split meshes with multiple textures (or both textured and non textured polygons)", - 'ONLY_SELECTED': "export only selected objects", - 'PER_FACE_1_OR_2_SIDED': "override \"Double Sided\" button in favor of per face \"twosided\" attribute (UV Face Select mode)" -} - -def update_RegistryInfo(): - d = {} - d['SKIP_DATA'] = SKIP_DATA - d['MIRCOL_AS_AMB'] = MIRCOL_AS_AMB - d['MIRCOL_AS_EMIS'] = MIRCOL_AS_EMIS - d['ADD_DEFAULT_MAT'] = ADD_DEFAULT_MAT - d['SET_TEX_DIR'] = SET_TEX_DIR - d['TEX_DIR'] = TEX_DIR - d['AC3D_4'] = AC3D_4 - d['NO_SPLIT'] = NO_SPLIT - d['EXPORT_DIR'] = EXPORT_DIR - d['ONLY_SELECTED'] = ONLY_SELECTED - d['PER_FACE_1_OR_2_SIDED'] = PER_FACE_1_OR_2_SIDED - d['tooltips'] = tooltips - d['GLOBAL_COORDS'] = GLOBAL_COORDS - Registry.SetKey(REG_KEY, d, True) - -# Looking for a saved key in Blender.Registry dict: -rd = Registry.GetKey(REG_KEY, True) - -if rd: - try: - AC3D_4 = rd['AC3D_4'] - SKIP_DATA = rd['SKIP_DATA'] - MIRCOL_AS_AMB = rd['MIRCOL_AS_AMB'] - MIRCOL_AS_EMIS = rd['MIRCOL_AS_EMIS'] - ADD_DEFAULT_MAT = rd['ADD_DEFAULT_MAT'] - SET_TEX_DIR = rd['SET_TEX_DIR'] - TEX_DIR = rd['TEX_DIR'] - EXPORT_DIR = rd['EXPORT_DIR'] - ONLY_SELECTED = rd['ONLY_SELECTED'] - NO_SPLIT = rd['NO_SPLIT'] - PER_FACE_1_OR_2_SIDED = rd['PER_FACE_1_OR_2_SIDED'] - GLOBAL_COORDS = rd['GLOBAL_COORDS'] - except KeyError: update_RegistryInfo() - -else: - update_RegistryInfo() - -VERBOSE = True -CONFIRM_OVERWRITE = True - -# check General scripts config key for default behaviors -rd = Registry.GetKey('General', True) -if rd: - try: - VERBOSE = rd['verbose'] - CONFIRM_OVERWRITE = rd['confirm_overwrite'] - except: pass - - -# The default material to be used when necessary (see ADD_DEFAULT_MAT) -DEFAULT_MAT = \ -'MATERIAL "DefaultWhite" rgb 1 1 1 amb 1 1 1 emis 0 0 0 \ -spec 0.5 0.5 0.5 shi 64 trans 0' - -# This transformation aligns Blender and AC3D coordinate systems: -BLEND_TO_AC3D_MATRIX = Mathutils.Matrix([1,0,0,0], [0,0,-1,0], [0,1,0,0], [0,0,0,1]) - -def Round_s(f): - "Round to default precision and turn value to a string" - r = round(f,6) # precision set to 10e-06 - if r == int(r): - return str(int(r)) - else: - return str(r) - -def transform_verts(verts, m): - vecs = [] - for v in verts: - x, y, z = v.co - vec = Mathutils.Vector([x, y, z, 1]) - vecs.append(vec*m) - return vecs - -def get_loose_edges(mesh): - loose = LOOSE - return [e for e in mesh.edges if e.flag & loose] - -# --- - -# meshes with more than one texture assigned -# are split and saved as these foomeshes -class FooMesh: - - class FooVert: - def __init__(self, v): - self.v = v - self.index = 0 - - class FooFace: - def __init__(self, foomesh, f): - self.f = f - foov = foomesh.FooVert - self.v = [foov(f.v[0]), foov(f.v[1])] - len_fv = len(f.v) - if len_fv > 2 and f.v[2]: - self.v.append(foov(f.v[2])) - if len_fv > 3 and f.v[3]: self.v.append(foov(f.v[3])) - - def __getattr__(self, attr): - if attr == 'v': return self.v - return getattr(self.f, attr) - - def __len__(self): - return len(self.f) - - def __init__(self, tex, faces, mesh): - self.name = mesh.name - self.mesh = mesh - self.looseEdges = [] - self.faceUV = mesh.faceUV - self.degr = mesh.degr - vidxs = [0]*len(mesh.verts) - foofaces = [] - for f in faces: - foofaces.append(self.FooFace(self, f)) - for v in f.v: - if v: vidxs[v.index] = 1 - i = 0 - fooverts = [] - for v in mesh.verts: - if vidxs[v.index]: - fooverts.append(v) - vidxs[v.index] = i - i += 1 - for f in foofaces: - for v in f.v: - if v: v.index = vidxs[v.v.index] - self.faces = foofaces - self.verts = fooverts - - -class AC3DExport: # the ac3d exporter part - - def __init__(self, scene_objects, file): - - global ARG, SKIP_DATA, ADD_DEFAULT_MAT, DEFAULT_MAT - - header = 'AC3Db' - self.file = file - self.buf = '' - self.mbuf = [] - self.mlist = [] - world_kids = 0 - parents_list = self.parents_list = [] - kids_dict = self.kids_dict = {} - objs = [] - exp_objs = self.exp_objs = [] - tree = {} - - file.write(header+'\n') - - objs = \ - [o for o in scene_objects if o.type in ['Mesh', 'Empty']] - - # create a tree from parents to children objects - - for obj in objs[:]: - parent = obj.parent - lineage = [obj] - - while parent: - parents_list.append(parent.name) - obj = parent - parent = parent.getParent() - lineage.insert(0, obj) - - d = tree - for i in xrange(len(lineage)): - lname = lineage[i].getType()[:2] + lineage[i].name - if lname not in d.keys(): - d[lname] = {} - d = d[lname] - - # traverse the tree to get an ordered list of names of objects to export - self.traverse_dict(tree) - - world_kids = len(tree.keys()) - - # get list of objects to export, start writing the .ac file - - objlist = [Object.Get(name) for name in exp_objs] - - meshlist = [o for o in objlist if o.type == 'Mesh'] - - # create a temporary mesh to hold actual (modified) mesh data - TMP_mesh = Mesh.New('tmp_for_ac_export') - - # write materials - - self.MATERIALS(meshlist, TMP_mesh) - mbuf = self.mbuf - if not mbuf or ADD_DEFAULT_MAT: - mbuf.insert(0, "%s\n" % DEFAULT_MAT) - mbuf = "".join(mbuf) - file.write(mbuf) - - file.write('OBJECT world\nkids %s\n' % world_kids) - - # write the objects - - for obj in objlist: - self.obj = obj - - objtype = obj.type - objname = obj.name - kidsnum = kids_dict[objname] - - # A parent plus its children are exported as a group. - # If the parent is a mesh, its rot and loc are exported as the - # group rot and loc and the mesh (w/o rot and loc) is added to the group. - if kidsnum: - self.OBJECT('group') - self.name(objname) - if objtype == 'Mesh': - kidsnum += 1 - if not GLOBAL_COORDS: - localmatrix = obj.getMatrix('localspace') - if not obj.getParent(): - localmatrix *= BLEND_TO_AC3D_MATRIX - self.rot(localmatrix.rotationPart()) - self.loc(localmatrix.translationPart()) - self.kids(kidsnum) - - if objtype == 'Mesh': - mesh = TMP_mesh # temporary mesh to hold actual (modified) mesh data - mesh.getFromObject(objname) - self.mesh = mesh - if mesh.faceUV: - meshes = self.split_mesh(mesh) - else: - meshes = [mesh] - if len(meshes) > 1: - if NO_SPLIT or self.dont_split(objname): - self.export_mesh(mesh, ob) - REPORT_DATA['nosplit'].append(objname) - else: - self.OBJECT('group') - self.name(objname) - self.kids(len(meshes)) - counter = 0 - for me in meshes: - self.export_mesh(me, obj, - name = '%s_%s' % (obj.name, counter), foomesh = True) - self.kids() - counter += 1 - else: - self.export_mesh(mesh, obj) - self.kids() - - - def traverse_dict(self, d): - kids_dict = self.kids_dict - exp_objs = self.exp_objs - keys = d.keys() - keys.sort() # sort for predictable output - keys.reverse() - for k in keys: - objname = k[2:] - klen = len(d[k]) - kids_dict[objname] = klen - if self.dont_export(objname): - d.pop(k) - parent = Object.Get(objname).getParent() - if parent: kids_dict[parent.name] -= 1 - REPORT_DATA['noexport'].append(objname) - continue - if klen: - self.traverse_dict(d[k]) - exp_objs.insert(0, objname) - else: - if k.find('Em', 0) == 0: # Empty w/o children - d.pop(k) - parent = Object.Get(objname).getParent() - if parent: kids_dict[parent.name] -= 1 - else: - exp_objs.insert(0, objname) - - def dont_export(self, name): # if name starts with '!' or '#' - length = len(name) - if length >= 1: - if name[0] in TOKENS_DONT_EXPORT: # '!' or '#' doubled (escaped): export - if length > 1 and name[1] == name[0]: - return 0 - return 1 - - def dont_split(self, name): # if name starts with '=' or '$' - length = len(name) - if length >= 1: - if name[0] in TOKENS_DONT_SPLIT: # '=' or '$' doubled (escaped): split - if length > 1 and name[1] == name[0]: - return 0 - return 1 - - def split_mesh(self, mesh): - tex_dict = {0:[]} - for f in mesh.faces: - if f.image: - if not f.image.name in tex_dict: tex_dict[f.image.name] = [] - tex_dict[f.image.name].append(f) - else: tex_dict[0].append(f) - keys = tex_dict.keys() - len_keys = len(keys) - if not tex_dict[0]: - len_keys -= 1 - tex_dict.pop(0) - keys.remove(0) - elif len_keys > 1: - lines = [] - anyimgkey = [k for k in keys if k != 0][0] - for f in tex_dict[0]: - if len(f.v) < 3: - lines.append(f) - if len(tex_dict[0]) == len(lines): - for l in lines: - tex_dict[anyimgkey].append(l) - len_keys -= 1 - tex_dict.pop(0) - if len_keys > 1: - foo_meshes = [] - for k in keys: - faces = tex_dict[k] - foo_meshes.append(FooMesh(k, faces, mesh)) - foo_meshes[0].edges = get_loose_edges(mesh) - return foo_meshes - return [mesh] - - def export_mesh(self, mesh, obj, name = None, foomesh = False): - file = self.file - self.OBJECT('poly') - if not name: name = obj.name - self.name(name) - if not SKIP_DATA: - meshname = obj.getData(name_only = True) - self.data(len(meshname), meshname) - if mesh.faceUV: - texline = self.texture(mesh.faces) - if texline: file.write(texline) - if AC3D_4: - self.crease(mesh.degr) - - # If exporting using local coordinates, children object coordinates should not be - # transformed to ac3d's coordinate system, since that will be accounted for in - # their topmost parents (the parents w/o parents) transformations. - if not GLOBAL_COORDS: - # We hold parents in a list, so they also don't get transformed, - # because for each parent we create an ac3d group to hold both the - # parent and its children. - if obj.name not in self.parents_list: - localmatrix = obj.getMatrix('localspace') - if not obj.getParent(): - localmatrix *= BLEND_TO_AC3D_MATRIX - self.rot(localmatrix.rotationPart()) - self.loc(localmatrix.translationPart()) - matrix = None - else: - matrix = obj.getMatrix() * BLEND_TO_AC3D_MATRIX - - self.numvert(mesh.verts, matrix) - self.numsurf(mesh, foomesh) - - def MATERIALS(self, meshlist, me): - for meobj in meshlist: - me.getFromObject(meobj) - mats = me.materials - mbuf = [] - mlist = self.mlist - for m in mats: - if not m: continue - name = m.name - if name not in mlist: - mlist.append(name) - M = Material.Get(name) - material = 'MATERIAL "%s"' % name - mirCol = "%s %s %s" % (Round_s(M.mirCol[0]), Round_s(M.mirCol[1]), - Round_s(M.mirCol[2])) - rgb = "rgb %s %s %s" % (Round_s(M.R), Round_s(M.G), Round_s(M.B)) - ambval = Round_s(M.amb) - amb = "amb %s %s %s" % (ambval, ambval, ambval) - spec = "spec %s %s %s" % (Round_s(M.specCol[0]), - Round_s(M.specCol[1]), Round_s(M.specCol[2])) - if AC3D_4: - emit = Round_s(M.emit) - emis = "emis %s %s %s" % (emit, emit, emit) - shival = int(M.spec * 64) - else: - emis = "emis 0 0 0" - shival = 72 - shi = "shi %s" % shival - trans = "trans %s" % (Round_s(1 - M.alpha)) - if MIRCOL_AS_AMB: - amb = "amb %s" % mirCol - if MIRCOL_AS_EMIS: - emis = "emis %s" % mirCol - mbuf.append("%s %s %s %s %s %s %s\n" \ - % (material, rgb, amb, emis, spec, shi, trans)) - self.mlist = mlist - self.mbuf.append("".join(mbuf)) - - def OBJECT(self, type): - self.file.write('OBJECT %s\n' % type) - - def name(self, name): - if name[0] in TOKENS_DONT_EXPORT or name[0] in TOKENS_DONT_SPLIT: - if len(name) > 1: name = name[1:] - self.file.write('name "%s"\n' % name) - - def kids(self, num = 0): - self.file.write('kids %s\n' % num) - - def data(self, num, str): - self.file.write('data %s\n%s\n' % (num, str)) - - def texture(self, faces): - tex = "" - for f in faces: - if f.image: - tex = f.image.name - break - if tex: - image = Image.Get(tex) - texfname = image.filename - if SET_TEX_DIR: - texfname = bsys.basename(texfname) - if TEX_DIR: - texfname = bsys.join(TEX_DIR, texfname) - buf = 'texture "%s"\n' % texfname - xrep = image.xrep - yrep = image.yrep - buf += 'texrep %s %s\n' % (xrep, yrep) - self.file.write(buf) - - def rot(self, matrix): - rot = '' - not_I = 0 # not identity - matstr = [] - for i in [0, 1, 2]: - r = map(Round_s, matrix[i]) - not_I += (r[0] != '0')+(r[1] != '0')+(r[2] != '0') - not_I -= (r[i] == '1') - for j in [0, 1, 2]: - matstr.append(' %s' % r[j]) - if not_I: # no need to write identity - self.file.write('rot%s\n' % "".join(matstr)) - - def loc(self, loc): - loc = map(Round_s, loc) - if loc != ['0', '0', '0']: # no need to write default - self.file.write('loc %s %s %s\n' % (loc[0], loc[1], loc[2])) - - def crease(self, crease): - self.file.write('crease %f\n' % crease) - - def numvert(self, verts, matrix): - file = self.file - nvstr = [] - nvstr.append("numvert %s\n" % len(verts)) - - if matrix: - verts = transform_verts(verts, matrix) - for v in verts: - v = map (Round_s, v) - nvstr.append("%s %s %s\n" % (v[0], v[1], v[2])) - else: - for v in verts: - v = map(Round_s, v.co) - nvstr.append("%s %s %s\n" % (v[0], v[1], v[2])) - - file.write("".join(nvstr)) - - def numsurf(self, mesh, foomesh = False): - - global MATIDX_ERROR - - # local vars are faster and so better in tight loops - lc_ADD_DEFAULT_MAT = ADD_DEFAULT_MAT - lc_MATIDX_ERROR = MATIDX_ERROR - lc_PER_FACE_1_OR_2_SIDED = PER_FACE_1_OR_2_SIDED - lc_FACE_TWOSIDED = FACE_TWOSIDED - lc_MESH_TWOSIDED = MESH_TWOSIDED - - faces = mesh.faces - hasFaceUV = mesh.faceUV - if foomesh: - looseEdges = mesh.looseEdges - else: - looseEdges = get_loose_edges(mesh) - - file = self.file - - file.write("numsurf %s\n" % (len(faces) + len(looseEdges))) - - if not foomesh: verts = list(self.mesh.verts) - - materials = self.mesh.materials - mlist = self.mlist - matidx_error_reported = False - objmats = [] - for omat in materials: - if omat: objmats.append(omat.name) - else: objmats.append(None) - for f in faces: - if not objmats: - m_idx = 0 - elif objmats[f.mat] in mlist: - m_idx = mlist.index(objmats[f.mat]) - else: - if not lc_MATIDX_ERROR: - rdat = REPORT_DATA['warns'] - rdat.append("Object %s" % self.obj.name) - rdat.append("has at least one material *index* assigned but not") - rdat.append("defined (not linked to an existing material).") - rdat.append("Result: some faces may be exported with a wrong color.") - rdat.append("You can assign materials in the Edit Buttons window (F9).") - elif not matidx_error_reported: - midxmsg = "- Same for object %s." % self.obj.name - REPORT_DATA['warns'].append(midxmsg) - lc_MATIDX_ERROR += 1 - matidx_error_reported = True - m_idx = 0 - if lc_ADD_DEFAULT_MAT: m_idx -= 1 - refs = len(f) - flaglow = 0 # polygon - if lc_PER_FACE_1_OR_2_SIDED and hasFaceUV: # per face attribute - two_side = f.mode & lc_FACE_TWOSIDED - else: # global, for the whole mesh - two_side = self.mesh.mode & lc_MESH_TWOSIDED - two_side = (two_side > 0) << 1 - flaghigh = f.smooth | two_side - surfstr = "SURF 0x%d%d\n" % (flaghigh, flaglow) - if lc_ADD_DEFAULT_MAT and objmats: m_idx += 1 - matstr = "mat %s\n" % m_idx - refstr = "refs %s\n" % refs - u, v, vi = 0, 0, 0 - fvstr = [] - if foomesh: - for vert in f.v: - fvstr.append(str(vert.index)) - if hasFaceUV: - u = f.uv[vi][0] - v = f.uv[vi][1] - vi += 1 - fvstr.append(" %s %s\n" % (u, v)) - else: - for vert in f.v: - fvstr.append(str(verts.index(vert))) - if hasFaceUV: - u = f.uv[vi][0] - v = f.uv[vi][1] - vi += 1 - fvstr.append(" %s %s\n" % (u, v)) - - fvstr = "".join(fvstr) - - file.write("%s%s%s%s" % (surfstr, matstr, refstr, fvstr)) - - # material for loose edges - edges_mat = 0 # default to first material - for omat in objmats: # but look for a material from this mesh - if omat in mlist: - edges_mat = mlist.index(omat) - if lc_ADD_DEFAULT_MAT: edges_mat += 1 - break - - for e in looseEdges: - fvstr = [] - #flaglow = 2 # 1 = closed line, 2 = line - #flaghigh = 0 - #surfstr = "SURF 0x%d%d\n" % (flaghigh, flaglow) - surfstr = "SURF 0x02\n" - - fvstr.append("%d 0 0\n" % verts.index(e.v1)) - fvstr.append("%d 0 0\n" % verts.index(e.v2)) - fvstr = "".join(fvstr) - - matstr = "mat %d\n" % edges_mat # for now, use first material - refstr = "refs 2\n" # 2 verts - - file.write("%s%s%s%s" % (surfstr, matstr, refstr, fvstr)) - - MATIDX_ERROR = lc_MATIDX_ERROR - -# End of Class AC3DExport - -from Blender.Window import FileSelector - -def report_data(): - global VERBOSE - - if not VERBOSE: return - - d = REPORT_DATA - msgs = { - '0main': '%s\nExporting meshes to AC3D format' % str(19*'-'), - '1warns': 'Warnings', - '2errors': 'Errors', - '3nosplit': 'Not split (because name starts with "=" or "$")', - '4noexport': 'Not exported (because name starts with "!" or "#")' - } - if NO_SPLIT: - l = msgs['3nosplit'] - l = "%s (because OPTION NO_SPLIT is set)" % l.split('(')[0] - msgs['3nosplit'] = l - keys = msgs.keys() - keys.sort() - for k in keys: - msgk = msgs[k] - msg = '\n'.join(d[k[1:]]) - if msg: - print '\n-%s:' % msgk - print msg - -# File Selector callback: -def fs_callback(filename): - global EXPORT_DIR, OBJS, CONFIRM_OVERWRITE, VERBOSE - - if not filename.endswith('.ac'): filename = '%s.ac' % filename - - if bsys.exists(filename) and CONFIRM_OVERWRITE: - if Blender.Draw.PupMenu('OVERWRITE?%t|File exists') != 1: - return - - Blender.Window.WaitCursor(1) - starttime = bsys.time() - - export_dir = bsys.dirname(filename) - if export_dir != EXPORT_DIR: - EXPORT_DIR = export_dir - update_RegistryInfo() - - try: - file = open(filename, 'w') - except IOError, (errno, strerror): - error = "IOError #%s: %s" % (errno, strerror) - REPORT_DATA['errors'].append("Saving failed - %s." % error) - error_msg = "Couldn't save file!%%t|%s" % error - Blender.Draw.PupMenu(error_msg) - return - - try: - test = AC3DExport(OBJS, file) - except: - file.close() - raise - else: - file.close() - endtime = bsys.time() - starttime - REPORT_DATA['main'].append("Done. Saved to: %s" % filename) - REPORT_DATA['main'].append("Data exported in %.3f seconds." % endtime) - - if VERBOSE: report_data() - Blender.Window.WaitCursor(0) - - -# -- End of definitions - -scn = Blender.Scene.GetCurrent() - -if ONLY_SELECTED: - OBJS = list(scn.objects.context) -else: - OBJS = list(scn.objects) - -if not OBJS: - Blender.Draw.PupMenu('ERROR: no objects selected') -else: - fname = bsys.makename(ext=".ac") - if EXPORT_DIR: - fname = bsys.join(EXPORT_DIR, bsys.basename(fname)) - FileSelector(fs_callback, "Export AC3D", fname) diff --git a/release/scripts/ac3d_import.py b/release/scripts/ac3d_import.py deleted file mode 100644 index 2f5512e7150..00000000000 --- a/release/scripts/ac3d_import.py +++ /dev/null @@ -1,783 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: -Name: 'AC3D (.ac)...' -Blender: 243 -Group: 'Import' -Tip: 'Import an AC3D (.ac) file.' -""" - -__author__ = "Willian P. Germano" -__url__ = ("blender", "blenderartists.org", "AC3D's homepage, http://www.ac3d.org", - "PLib 3d gaming lib, http://plib.sf.net") -__version__ = "2.48.1 2009-01-11" - -__bpydoc__ = """\ -This script imports AC3D models into Blender. - -AC3D is a simple and affordable commercial 3d modeller also built with OpenGL. -The .ac file format is an easy to parse text format well supported, -for example, by the PLib 3d gaming library. - -Supported:
- UV-textured meshes with hierarchy (grouping) information. - -Missing:
- The url tag is irrelevant for Blender. - -Known issues:
- - Some objects may be imported with wrong normals due to wrong information in the model itself. This can be noticed by strange shading, like darker than expected parts in the model. To fix this, select the mesh with wrong normals, enter edit mode and tell Blender to recalculate the normals, either to make them point outside (the usual case) or inside.
- -Config Options:
- - display transp (toggle): if "on", objects that have materials with alpha < 1.0 are shown with translucency (transparency) in the 3D View.
- - subdiv (toggle): if "on", ac3d objects meant to be subdivided receive a SUBSURF modifier in Blender.
- - emis as mircol: store the emissive rgb color from AC3D as mirror color in Blender -- this is a hack to preserve the values and be able to export them using the equivalent option in the exporter.
- - textures dir (string): if non blank, when imported texture paths are -wrong in the .ac file, Blender will also look for them at this dir. - -Notes:
- - When looking for assigned textures, Blender tries in order: the actual -paths from the .ac file, the .ac file's dir and the default textures dir path -users can configure (see config options above). -""" - -# $Id$ -# -# -------------------------------------------------------------------------- -# AC3DImport version 2.43.1 Feb 21, 2007 -# Program versions: Blender 2.43 and AC3Db files (means version 0xb) -# changed: better triangulation of ngons, more fixes to support bad .ac files, -# option to display transp mats in 3d view, support "subdiv" tag (via SUBSURF modifier) -# -------------------------------------------------------------------------- -# Thanks: Melchior Franz for extensive bug testing and reporting, making this -# version cope much better with old or bad .ac files, among other improvements; -# Stewart Andreason for reporting a serious crash; Francesco Brisa for the -# emis as mircol functionality (w/ patch). -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004-2009: Willian P. Germano, wgermano _at_ ig.com.br -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from math import radians - -import Blender -from Blender import Scene, Object, Mesh, Lamp, Registry, sys as bsys, Window, Image, Material, Modifier -from Blender.sys import dirsep -from Blender.Mathutils import Vector, Matrix, Euler -from Blender.Geometry import PolyFill - -# Default folder for AC3D textures, to override wrong paths, change to your -# liking or leave as "": -TEXTURES_DIR = "" - -DISPLAY_TRANSP = True - -SUBDIV = True - -EMIS_AS_MIRCOL = False - - -tooltips = { - 'DISPLAY_TRANSP': 'Turn transparency on in the 3d View for objects using materials with alpha < 1.0.', - 'SUBDIV': 'Apply a SUBSURF modifier to objects meant to appear subdivided.', - 'TEXTURES_DIR': 'Additional folder to look for missing textures.', - 'EMIS_AS_MIRCOL': 'Store emis color as mirror color in Blender.' -} - -def update_registry(): - global TEXTURES_DIR, DISPLAY_TRANSP, EMIS_AS_MIRCOL - rd = dict([('tooltips', tooltips), ('TEXTURES_DIR', TEXTURES_DIR), ('DISPLAY_TRANSP', DISPLAY_TRANSP), ('SUBDIV', SUBDIV), ('EMIS_AS_MIRCOL', EMIS_AS_MIRCOL)]) - Registry.SetKey('ac3d_import', rd, True) - -rd = Registry.GetKey('ac3d_import', True) - -if rd: - if 'GROUP' in rd: - update_registry() - try: - TEXTURES_DIR = rd['TEXTURES_DIR'] - DISPLAY_TRANSP = rd['DISPLAY_TRANSP'] - SUBDIV = rd['SUBDIV'] - EMIS_AS_MIRCOL = rd['EMIS_AS_MIRCOL'] - except: - update_registry() -else: update_registry() - -if TEXTURES_DIR: - oldtexdir = TEXTURES_DIR - if dirsep == '/': TEXTURES_DIR = TEXTURES_DIR.replace('\\', '/') - if TEXTURES_DIR[-1] != dirsep: TEXTURES_DIR = "%s%s" % (TEXTURES_DIR, dirsep) - if oldtexdir != TEXTURES_DIR: update_registry() - - -VERBOSE = True -rd = Registry.GetKey('General', True) -if rd: - if rd.has_key('verbose'): - VERBOSE = rd['verbose'] - - -errmsg = "" - -# Matrix to align ac3d's coordinate system with Blender's one, -# it's a -90 degrees rotation around the x axis: -AC_TO_BLEND_MATRIX = Matrix([1, 0, 0], [0, 0, 1], [0, -1, 0]) - -AC_WORLD = 0 -AC_GROUP = 1 -AC_POLY = 2 -AC_LIGHT = 3 -AC_OB_TYPES = { - 'world': AC_WORLD, - 'group': AC_GROUP, - 'poly': AC_POLY, - 'light': AC_LIGHT - } - -AC_OB_BAD_TYPES_LIST = [] # to hold references to unknown (wrong) ob types - -def inform(msg): - global VERBOSE - if VERBOSE: print msg - -def euler_in_radians(eul): - "Used while there's a bug in the BPY API" - eul.x = radians(eul.x) - eul.y = radians(eul.y) - eul.z = radians(eul.z) - return eul - -class Obj: - - def __init__(self, type): - self.type = type - self.dad = None - self.name = '' - self.data = '' - self.tex = '' - self.texrep = [1,1] - self.texoff = None - self.loc = [] - self.rot = [] - self.size = [] - self.crease = 30 - self.subdiv = 0 - self.vlist = [] - self.flist_cfg = [] - self.flist_v = [] - self.flist_uv = [] - self.elist = [] - self.matlist = [] - self.kids = 0 - - self.bl_obj = None # the actual Blender object created from this data - -class AC3DImport: - - def __init__(self, filename): - - global errmsg - - self.scene = Scene.GetCurrent() - - self.i = 0 - errmsg = '' - self.importdir = bsys.dirname(filename) - try: - file = open(filename, 'r') - except IOError, (errno, strerror): - errmsg = "IOError #%s: %s" % (errno, strerror) - Blender.Draw.PupMenu('ERROR: %s' % errmsg) - inform(errmsg) - return None - header = file.read(5) - header, version = header[:4], header[-1] - if header != 'AC3D': - file.close() - errmsg = 'AC3D header not found (invalid file)' - Blender.Draw.PupMenu('ERROR: %s' % errmsg) - inform(errmsg) - return None - elif version != 'b': - inform('AC3D file version 0x%s.' % version) - inform('This importer is for version 0xb, so it may fail.') - - self.token = {'OBJECT': self.parse_obj, - 'numvert': self.parse_vert, - 'numsurf': self.parse_surf, - 'name': self.parse_name, - 'data': self.parse_data, - 'kids': self.parse_kids, - 'loc': self.parse_loc, - 'rot': self.parse_rot, - 'MATERIAL': self.parse_mat, - 'texture': self.parse_tex, - 'texrep': self.parse_texrep, - 'texoff': self.parse_texoff, - 'subdiv': self.parse_subdiv, - 'crease': self.parse_crease} - - self.objlist = [] - self.mlist = [] - self.kidsnumlist = [] - self.dad = None - - self.lines = file.readlines() - self.lines.append('') - self.parse_file() - file.close() - - self.testAC3DImport() - - def parse_obj(self, value): - kidsnumlist = self.kidsnumlist - if kidsnumlist: - while not kidsnumlist[-1]: - kidsnumlist.pop() - if kidsnumlist: - self.dad = self.dad.dad - else: - inform('Ignoring unexpected data at end of file.') - return -1 # bad file with more objects than reported - kidsnumlist[-1] -= 1 - if value in AC_OB_TYPES: - new = Obj(AC_OB_TYPES[value]) - else: - if value not in AC_OB_BAD_TYPES_LIST: - AC_OB_BAD_TYPES_LIST.append(value) - inform('Unexpected object type keyword: "%s". Assuming it is of type: "poly".' % value) - new = Obj(AC_OB_TYPES['poly']) - new.dad = self.dad - new.name = value - self.objlist.append(new) - - def parse_kids(self, value): - kids = int(value) - if kids: - self.kidsnumlist.append(kids) - self.dad = self.objlist[-1] - self.objlist[-1].kids = kids - - def parse_name(self, value): - name = value.split('"')[1] - self.objlist[-1].name = name - - def parse_data(self, value): - data = self.lines[self.i].strip() - self.objlist[-1].data = data - - def parse_tex(self, value): - line = self.lines[self.i - 1] # parse again to properly get paths with spaces - texture = line.split('"')[1] - self.objlist[-1].tex = texture - - def parse_texrep(self, trash): - trep = self.lines[self.i - 1] - trep = trep.split() - trep = [float(trep[1]), float(trep[2])] - self.objlist[-1].texrep = trep - self.objlist[-1].texoff = [0, 0] - - def parse_texoff(self, trash): - toff = self.lines[self.i - 1] - toff = toff.split() - toff = [float(toff[1]), float(toff[2])] - self.objlist[-1].texoff = toff - - def parse_mat(self, value): - i = self.i - 1 - lines = self.lines - line = lines[i].split() - mat_name = '' - mat_col = mat_amb = mat_emit = mat_spec_col = mat_mir_col = [0,0,0] - mat_alpha = 1 - mat_spec = 1.0 - - while line[0] == 'MATERIAL': - mat_name = line[1].split('"')[1] - mat_col = map(float,[line[3],line[4],line[5]]) - v = map(float,[line[7],line[8],line[9]]) - mat_amb = (v[0]+v[1]+v[2]) / 3.0 - v = map(float,[line[11],line[12],line[13]]) - mat_emit = (v[0]+v[1]+v[2]) / 3.0 - if EMIS_AS_MIRCOL: - mat_emit = 0 - mat_mir_col = map(float,[line[11],line[12],line[13]]) - - mat_spec_col = map(float,[line[15],line[16],line[17]]) - mat_spec = float(line[19]) / 64.0 - mat_alpha = float(line[-1]) - mat_alpha = 1 - mat_alpha - self.mlist.append([mat_name, mat_col, mat_amb, mat_emit, mat_spec_col, mat_spec, mat_mir_col, mat_alpha]) - i += 1 - line = lines[i].split() - - self.i = i - - def parse_rot(self, trash): - i = self.i - 1 - ob = self.objlist[-1] - rot = self.lines[i].split(' ', 1)[1] - rot = map(float, rot.split()) - matrix = Matrix(rot[:3], rot[3:6], rot[6:]) - ob.rot = matrix - size = matrix.scalePart() # vector - ob.size = size - - def parse_loc(self, trash): - i = self.i - 1 - loc = self.lines[i].split(' ', 1)[1] - loc = map(float, loc.split()) - self.objlist[-1].loc = Vector(loc) - - def parse_crease(self, value): - # AC3D: range is [0.0, 180.0]; Blender: [1, 80] - value = float(value) - self.objlist[-1].crease = int(value) - - def parse_subdiv(self, value): - self.objlist[-1].subdiv = int(value) - - def parse_vert(self, value): - i = self.i - lines = self.lines - obj = self.objlist[-1] - vlist = obj.vlist - n = int(value) - - while n: - line = lines[i].split() - line = map(float, line) - vlist.append(line) - n -= 1 - i += 1 - - if vlist: # prepend a vertex at 1st position to deal with vindex 0 issues - vlist.insert(0, line) - - self.i = i - - def parse_surf(self, value): - i = self.i - is_smooth = 0 - double_sided = 0 - lines = self.lines - obj = self.objlist[-1] - vlist = obj.vlist - matlist = obj.matlist - numsurf = int(value) - NUMSURF = numsurf - - badface_notpoly = badface_multirefs = 0 - - while numsurf: - flags = lines[i].split()[1][2:] - if len(flags) > 1: - flaghigh = int(flags[0]) - flaglow = int(flags[1]) - else: - flaghigh = 0 - flaglow = int(flags[0]) - - is_smooth = flaghigh & 1 - twoside = flaghigh & 2 - nextline = lines[i+1].split() - if nextline[0] != 'mat': # the "mat" line may be missing (found in one buggy .ac file) - matid = 0 - if not matid in matlist: matlist.append(matid) - i += 2 - else: - matid = int(nextline[1]) - if not matid in matlist: matlist.append(matid) - nextline = lines[i+2].split() - i += 3 - refs = int(nextline[1]) - face = [] - faces = [] - edges = [] - fuv = [] - fuvs = [] - rfs = refs - - while rfs: - line = lines[i].split() - v = int(line[0]) + 1 # + 1 to avoid vindex == 0 - uv = [float(line[1]), float(line[2])] - face.append(v) - fuv.append(Vector(uv)) - rfs -= 1 - i += 1 - - if flaglow: # it's a line or closed line, not a polygon - while len(face) >= 2: - cut = face[:2] - edges.append(cut) - face = face[1:] - - if flaglow == 1 and edges: # closed line - face = [edges[-1][-1], edges[0][0]] - edges.append(face) - - else: # polygon - - # check for bad face, that references same vertex more than once - lenface = len(face) - if lenface < 3: - # less than 3 vertices, not a face - badface_notpoly += 1 - elif sum(map(face.count, face)) != lenface: - # multiple references to the same vertex - badface_multirefs += 1 - else: # ok, seems fine - if len(face) > 4: # ngon, triangulate it - polyline = [] - for vi in face: - polyline.append(Vector(vlist[vi])) - tris = PolyFill([polyline]) - for t in tris: - tri = [face[t[0]], face[t[1]], face[t[2]]] - triuvs = [fuv[t[0]], fuv[t[1]], fuv[t[2]]] - faces.append(tri) - fuvs.append(triuvs) - else: # tri or quad - faces.append(face) - fuvs.append(fuv) - - obj.flist_cfg.extend([[matid, is_smooth, twoside]] * len(faces)) - obj.flist_v.extend(faces) - obj.flist_uv.extend(fuvs) - obj.elist.extend(edges) # loose edges - - numsurf -= 1 - - if badface_notpoly or badface_multirefs: - inform('Object "%s" - ignoring bad faces:' % obj.name) - if badface_notpoly: - inform('\t%d face(s) with less than 3 vertices.' % badface_notpoly) - if badface_multirefs: - inform('\t%d face(s) with multiple references to a same vertex.' % badface_multirefs) - - self.i = i - - def parse_file(self): - i = 1 - lines = self.lines - line = lines[i].split() - - while line: - kw = '' - for k in self.token.keys(): - if line[0] == k: - kw = k - break - i += 1 - if kw: - self.i = i - result = self.token[kw](line[1]) - if result: - break # bad .ac file, stop parsing - i = self.i - line = lines[i].split() - - # for each group of meshes we try to find one that can be used as - # parent of the group in Blender. - # If not found, we can use an Empty as parent. - def found_parent(self, groupname, olist): - l = [o for o in olist if o.type == AC_POLY \ - and not o.kids and not o.rot and not o.loc] - if l: - for o in l: - if o.name == groupname: - return o - #return l[0] - return None - - def build_hierarchy(self): - blmatrix = AC_TO_BLEND_MATRIX - - olist = self.objlist[1:] - olist.reverse() - - scene = self.scene - - newlist = [] - - for o in olist: - kids = o.kids - if kids: - children = newlist[-kids:] - newlist = newlist[:-kids] - if o.type == AC_GROUP: - parent = self.found_parent(o.name, children) - if parent: - children.remove(parent) - o.bl_obj = parent.bl_obj - else: # not found, use an empty - empty = scene.objects.new('Empty', o.name) - o.bl_obj = empty - - bl_children = [c.bl_obj for c in children if c.bl_obj != None] - - o.bl_obj.makeParent(bl_children, 0, 1) - for child in children: - blob = child.bl_obj - if not blob: continue - if child.rot: - eul = euler_in_radians(child.rot.toEuler()) - blob.setEuler(eul) - if child.size: - blob.size = child.size - if not child.loc: - child.loc = Vector(0.0, 0.0, 0.0) - blob.setLocation(child.loc) - - newlist.append(o) - - for o in newlist: # newlist now only has objs w/o parents - blob = o.bl_obj - if not blob: - continue - if o.size: - o.bl_obj.size = o.size - if not o.rot: - blob.setEuler([1.5707963267948966, 0, 0]) - else: - matrix = o.rot * blmatrix - eul = euler_in_radians(matrix.toEuler()) - blob.setEuler(eul) - if o.loc: - o.loc *= blmatrix - else: - o.loc = Vector(0.0, 0.0, 0.0) - blob.setLocation(o.loc) # forces DAG update, so we do it even for 0, 0, 0 - - # XXX important: until we fix the BPy API so it doesn't increase user count - # when wrapping a Blender object, this piece of code is needed for proper - # object (+ obdata) deletion in Blender: - for o in self.objlist: - if o.bl_obj: - o.bl_obj = None - - def testAC3DImport(self): - - FACE_TWOSIDE = Mesh.FaceModes['TWOSIDE'] - FACE_TEX = Mesh.FaceModes['TEX'] - MESH_AUTOSMOOTH = Mesh.Modes['AUTOSMOOTH'] - - MAT_MODE_ZTRANSP = Material.Modes['ZTRANSP'] - MAT_MODE_TRANSPSHADOW = Material.Modes['TRANSPSHADOW'] - - scene = self.scene - - bl_images = {} # loaded texture images - missing_textures = [] # textures we couldn't find - - objlist = self.objlist[1:] # skip 'world' - - bmat = [] - has_transp_mats = False - for mat in self.mlist: - name = mat[0] - m = Material.New(name) - m.rgbCol = (mat[1][0], mat[1][1], mat[1][2]) - m.amb = mat[2] - m.emit = mat[3] - m.specCol = (mat[4][0], mat[4][1], mat[4][2]) - m.spec = mat[5] - m.mirCol = (mat[6][0], mat[6][1], mat[6][2]) - m.alpha = mat[7] - if m.alpha < 1.0: - m.mode |= MAT_MODE_ZTRANSP - has_transp_mats = True - bmat.append(m) - - if has_transp_mats: - for mat in bmat: - mat.mode |= MAT_MODE_TRANSPSHADOW - - obj_idx = 0 # index of current obj in loop - for obj in objlist: - if obj.type == AC_GROUP: - continue - elif obj.type == AC_LIGHT: - light = Lamp.New('Lamp') - object = scene.objects.new(light, obj.name) - #object.select(True) - obj.bl_obj = object - if obj.data: - light.name = obj.data - continue - - # type AC_POLY: - - # old .ac files used empty meshes as groups, convert to a real ac group - if not obj.vlist and obj.kids: - obj.type = AC_GROUP - continue - - mesh = Mesh.New() - object = scene.objects.new(mesh, obj.name) - #object.select(True) - obj.bl_obj = object - if obj.data: mesh.name = obj.data - mesh.degr = obj.crease # will auto clamp to [1, 80] - - if not obj.vlist: # no vertices? nothing more to do - continue - - mesh.verts.extend(obj.vlist) - - objmat_indices = [] - for mat in bmat: - if bmat.index(mat) in obj.matlist: - objmat_indices.append(bmat.index(mat)) - mesh.materials += [mat] - if DISPLAY_TRANSP and mat.alpha < 1.0: - object.transp = True - - for e in obj.elist: - mesh.edges.extend(e) - - if obj.flist_v: - mesh.faces.extend(obj.flist_v) - - facesnum = len(mesh.faces) - - if facesnum == 0: # shouldn't happen, of course - continue - - mesh.faceUV = True - - # checking if the .ac file had duplicate faces (Blender ignores them) - if facesnum != len(obj.flist_v): - # it has, ugh. Let's clean the uv list: - lenfl = len(obj.flist_v) - flist = obj.flist_v - uvlist = obj.flist_uv - cfglist = obj.flist_cfg - for f in flist: - f.sort() - fi = lenfl - while fi > 0: # remove data related to duplicates - fi -= 1 - if flist[fi] in flist[:fi]: - uvlist.pop(fi) - cfglist.pop(fi) - - img = None - if obj.tex != '': - if obj.tex in bl_images.keys(): - img = bl_images[obj.tex] - elif obj.tex not in missing_textures: - texfname = None - objtex = obj.tex - baseimgname = bsys.basename(objtex) - if bsys.exists(objtex) == 1: - texfname = objtex - elif bsys.exists(bsys.join(self.importdir, objtex)): - texfname = bsys.join(self.importdir, objtex) - else: - if baseimgname.find('\\') > 0: - baseimgname = bsys.basename(objtex.replace('\\','/')) - objtex = bsys.join(self.importdir, baseimgname) - if bsys.exists(objtex) == 1: - texfname = objtex - else: - objtex = bsys.join(TEXTURES_DIR, baseimgname) - if bsys.exists(objtex): - texfname = objtex - if texfname: - try: - img = Image.Load(texfname) - # Commented because it's unnecessary: - #img.xrep = int(obj.texrep[0]) - #img.yrep = int(obj.texrep[1]) - if img: - bl_images[obj.tex] = img - except: - inform("Couldn't load texture: %s" % baseimgname) - else: - missing_textures.append(obj.tex) - inform("Couldn't find texture: %s" % baseimgname) - - for i in range(facesnum): - f = obj.flist_cfg[i] - fmat = f[0] - is_smooth = f[1] - twoside = f[2] - bface = mesh.faces[i] - bface.smooth = is_smooth - if twoside: bface.mode |= FACE_TWOSIDE - if img: - bface.mode |= FACE_TEX - bface.image = img - bface.mat = objmat_indices.index(fmat) - fuv = obj.flist_uv[i] - if obj.texoff: - uoff = obj.texoff[0] - voff = obj.texoff[1] - urep = obj.texrep[0] - vrep = obj.texrep[1] - for uv in fuv: - uv[0] *= urep - uv[1] *= vrep - uv[0] += uoff - uv[1] += voff - - mesh.faces[i].uv = fuv - - # finally, delete the 1st vertex we added to prevent vindices == 0 - mesh.verts.delete(0) - - mesh.calcNormals() - - mesh.mode = MESH_AUTOSMOOTH - - # subdiv: create SUBSURF modifier in Blender - if SUBDIV and obj.subdiv > 0: - subdiv = obj.subdiv - subdiv_render = subdiv - # just to be safe: - if subdiv_render > 6: subdiv_render = 6 - if subdiv > 3: subdiv = 3 - modif = object.modifiers.append(Modifier.Types.SUBSURF) - modif[Modifier.Settings.LEVELS] = subdiv - modif[Modifier.Settings.RENDLEVELS] = subdiv_render - - obj_idx += 1 - - self.build_hierarchy() - scene.update() - -# End of class AC3DImport - -def filesel_callback(filename): - - inform("\nTrying to import AC3D model(s) from:\n%s ..." % filename) - Window.WaitCursor(1) - starttime = bsys.time() - test = AC3DImport(filename) - Window.WaitCursor(0) - endtime = bsys.time() - starttime - inform('Done! Data imported in %.3f seconds.\n' % endtime) - -Window.EditMode(0) - -Window.FileSelector(filesel_callback, "Import AC3D", "*.ac") diff --git a/release/scripts/add_mesh_empty.py b/release/scripts/add_mesh_empty.py deleted file mode 100644 index 537bd1e2c3d..00000000000 --- a/release/scripts/add_mesh_empty.py +++ /dev/null @@ -1,13 +0,0 @@ -#!BPY -""" -Name: 'Empty mesh' -Blender: 243 -Group: 'AddMesh' -""" -import BPyAddMesh -import Blender - -def main(): - BPyAddMesh.add_mesh_simple('EmptyMesh', [], [], []) - -main() \ No newline at end of file diff --git a/release/scripts/add_mesh_torus.py b/release/scripts/add_mesh_torus.py deleted file mode 100644 index 2941c56420e..00000000000 --- a/release/scripts/add_mesh_torus.py +++ /dev/null @@ -1,69 +0,0 @@ -#!BPY -""" -Name: 'Torus' -Blender: 243 -Group: 'AddMesh' -""" -import BPyAddMesh -import Blender -try: from math import cos, sin, pi -except: math = None - -def add_torus(PREF_MAJOR_RAD, PREF_MINOR_RAD, PREF_MAJOR_SEG, PREF_MINOR_SEG): - Vector = Blender.Mathutils.Vector - RotationMatrix = Blender.Mathutils.RotationMatrix - verts = [] - faces = [] - i1 = 0 - tot_verts = PREF_MAJOR_SEG * PREF_MINOR_SEG - for major_index in xrange(PREF_MAJOR_SEG): - verts_tmp = [] - mtx = RotationMatrix( 360 * float(major_index)/PREF_MAJOR_SEG, 3, 'z' ) - - for minor_index in xrange(PREF_MINOR_SEG): - angle = 2*pi*minor_index/PREF_MINOR_SEG - - verts.append( Vector(PREF_MAJOR_RAD+(cos(angle)*PREF_MINOR_RAD), 0, (sin(angle)*PREF_MINOR_RAD)) * mtx ) - if minor_index+1==PREF_MINOR_SEG: - i2 = (major_index)*PREF_MINOR_SEG - i3 = i1 + PREF_MINOR_SEG - i4 = i2 + PREF_MINOR_SEG - - else: - i2 = i1 + 1 - i3 = i1 + PREF_MINOR_SEG - i4 = i3 + 1 - - if i2>=tot_verts: i2 = i2-tot_verts - if i3>=tot_verts: i3 = i3-tot_verts - if i4>=tot_verts: i4 = i4-tot_verts - - faces.append( (i3,i4,i2,i1) ) - i1+=1 - - return verts, faces - -def main(): - Draw = Blender.Draw - PREF_MAJOR_RAD = Draw.Create(1.0) - PREF_MINOR_RAD = Draw.Create(0.25) - PREF_MAJOR_SEG = Draw.Create(48) - PREF_MINOR_SEG = Draw.Create(16) - - if not Draw.PupBlock('Add Torus', [\ - ('Major Radius:', PREF_MAJOR_RAD, 0.01, 100, 'Radius for the main ring of the torus'),\ - ('Minor Radius:', PREF_MINOR_RAD, 0.01, 100, 'Radius for the minor ring of the torus setting the thickness of the ring'),\ - ('Major Segments:', PREF_MAJOR_SEG, 3, 256, 'Number of segments for the main ring of the torus'),\ - ('Minor Segments:', PREF_MINOR_SEG, 3, 256, 'Number of segments for the minor ring of the torus'),\ - ]): - return - - verts, faces = add_torus(PREF_MAJOR_RAD.val, PREF_MINOR_RAD.val, PREF_MAJOR_SEG.val, PREF_MINOR_SEG.val) - - BPyAddMesh.add_mesh_simple('Torus', verts, [], faces) - -if cos and sin and pi: - main() -else: - Blender.Draw.PupMenu("Error%t|This script requires a full python installation") - diff --git a/release/scripts/animation_bake_constraints.py b/release/scripts/animation_bake_constraints.py deleted file mode 100644 index 16855828460..00000000000 --- a/release/scripts/animation_bake_constraints.py +++ /dev/null @@ -1,792 +0,0 @@ -#!BPY - -""" -Name: 'Bake Constraints' -Blender: 246 -Group: 'Animation' -Tooltip: 'Bake a Constrained object/rig to IPOs' -Fillename: 'Bake_Constraint.py' -""" - -__author__ = "Roger Wickes (rogerwickes(at)yahoo.com)" -__script__ = "Animation Bake Constraints" -__version__ = "0.7" -__url__ = ["Communicate problems and errors, http://www.blenderartists.com/forum/private.php?do=newpm to PapaSmurf"] -__email__= ["Roger Wickes, rogerwickes@yahoo.com", "scripts"] -__bpydoc__ = """\ - -bake_constraints - -This script bakes the real-world LocRot of an object (the net effect of any constraints - -(Copy, Limit, Track, Follow, - that affect Location, Rotation) -(usually one constrained to match another's location and/or Tracked to another) -and creates a clone with a set of Ipo Curves named Ipo -These curves control a non-constrained object and thus make it mimic the constrained object -Actions can be then be edited without the need for the drivers/constraining objects - -Developed for use with MoCap data, where a bone is constrained to point at an empty -moving through space and time. This records the actual locrot of the armature -so that the motion can be edited, reoriented, scaled, and used as NLA Actions - -see also wiki Scripts/Manual/ Tutorial/Motion Capture
- -Usage:
- - Select the reference Object(s) you want to bake
- - Set the frame range to bake in the Anim Panel
- - Set the test code (if you want a self-test) in the RT field in the Anim Panel
- -- Set RT:1 to create a test armature
- -- Set RT: up to 100 for more debug messages and status updates
-
- - Run the script
- - The clone copy of the object is created and it has an IPO curve assigned to it.
- - The clone shadows the object by an offset locrot (see usrDelta)
- - That Object has Ipo Location and Rotation curves that make the clone mimic the movement
- of the selected object, but without using constraints.
- - If the object was an Armature, the clone's bones move identically in relation to the
- original armature, and an Action is created that drives the bone movements.
- -Version History: - 0.1: bakes Loc Rot for a constrained object - 0.2: bakes Loc and Rot for the bones within Armature object - 0.3: UI for setting options - 0.3.1 add manual to script library - 0.4: bake multiple objects - 0.5: root bone worldspace rotation - 0.6: re-integration with BPyArmature - 0.7: bakes parents and leaves clones selected - -License, Copyright, and Attribution: - by Roger WICKES May 2008, released under Blender Artistic Licence to Public Domain - feel free to add to any Blender Python Scripts Bundle. - Thanks to Jean-Baptiste PERIN, IdeasMan42 (Campbell Barton), Basil_Fawlty/Cage_drei (Andrew Cruse) - much lifted/learned from blender.org/documentation/245PytonDoc and wiki - some modules based on c3D_Import.py, PoseLib16.py and IPO/Armature code examples e.g. camera jitter - -Pseudocode: - Initialize - If at least one object is selected - For each selected object, - create a cloned object - remove any constraints on the clone - create or reset an ipo curve named like the object - for each frame - set the clone's locrot key based on the reference object - if it's an armature, - create an action (which is an Ipo for each bone) - for each frame of the animation - for each bone in the armature - set the key - Else you're a smurf - -Test Conditions and Regressions: - 1. (v0.1) Non-armatures (the cube), with ipo curve and constraints at the object level - 2. armatures, with ipo curve and constraints at the object level - 3. armatures, with bones that have ipo curves and constraints - 4. objects without parents, children with unselected parents, select children first. - -Naming conventions: - arm = a specific objec type armature - bone = bones that make up the skeleton of an armature - - ob = object, an instance of an object type - ebone = edit bone, a bone in edit mode - pbone = pose bone, a posed bone in an object - tst = testing, self-test routines - usr = user-entered or designated stuff -""" -######################################## - -import Blender -from Blender import * -from Blender.Mathutils import * -import struct -import string -import bpy -import BPyMessages -import BPyArmature -# reload(BPyArmature) -from BPyArmature import getBakedPoseData - -Vector= Blender.Mathutils.Vector -Euler= Blender.Mathutils.Euler -Matrix= Blender.Mathutils.Matrix #invert() function at least -RotationMatrix = Blender.Mathutils.RotationMatrix -TranslationMatrix= Blender.Mathutils.TranslationMatrix -Quaternion = Blender.Mathutils.Quaternion -Vector = Blender.Mathutils.Vector -POSE_XFORM= [Blender.Object.Pose.LOC, Blender.Object.Pose.ROT] - -#================= -# Global Variables -#================= - -# set senstitivity for displaying debug/console messages. 0=none, 100=max -# then call debug(num,string) to conditionally display status/info in console window -MODE=Blender.Get('rt') #execution mode: 0=run normal, 1=make test armature -DEBUG=Blender.Get('rt') #how much detail on internal processing for user to see. range 0-100 -BATCH=False #called from command line? is someone there? Would you like some cake? - -#there are two coordinate systems, the real, or absolute 3D space, -# and the local relative to a parent. -COORDINATE_SYSTEMS = ['local','real'] -COORD_LOCAL = 0 -COORD_REAL = 1 - -# User Settings - Change these options manually or via GUI (future TODO) -usrCoord = COORD_REAL # what the user wants -usrParent = False # True=clone keeps original parent, False = clone's parent is the clone of the original parent (if cloned) -usrFreeze = 2 #2=yes, 0=no. Freezes shadow object in place at current frame as origin -# delta is amount to offset/change from the reference object. future set in a ui, so technically not a constant -usrDelta = [10,10,0,0,0,0] #order specific - Loc xyz Rot xyz -usrACTION = True # Offset baked Action frames to start at frame 1 - -CURFRAME = 'curframe' #keyword to use when getting the frame number that the scene is presently on -ARMATURE = 'Armature' #en anglais -BONE_SPACES = ['ARMATURESPACE','BONESPACE'] - # 'ARMATURESPACE' - this matrix of the bone in relation to the armature - # 'BONESPACE' - the matrix of the bone in relation to itself - -#Ipo curves created are prefixed with a name, like Ipo_ or Bake_ followed by the object/bone name -#bakedArmName = "b." #used for both the armature class and object instance -usrObjectNamePrefix= "" -#ipoBoneNamePrefix = "" -# for example, if on entry an armature named Man was selected, and the object prefix was "a." -# on exit an armature and an IPO curve named a.Man exists for the object as a whole -# if that armature had bones (spine, neck, arm) and the bone prefix was "a." -# the bones and IPO curves will be (a.spine, a.neck, a.arm) - -R2D = 18/3.141592653589793 # radian to grad -BLENDER_VERSION = Blender.Get('version') - -# Gets the current scene, there can be many scenes in 1 blend file. -scn = Blender.Scene.GetCurrent() - -#================= -# Methods -#================= -######################################## -def debug(num,msg): #use log4j or just console here. - if DEBUG >= num: - if BATCH == False: - print 'debug: '[:num/10+7]+msg - #TODO: else write out to file (runs faster if it doesnt have to display details) - return - -######################################## -def error(str): - debug(0,'ERROR: '+str) - if BATCH == False: - Draw.PupMenu('ERROR%t|'+str) - return - -######################################## -def getRenderInfo(): - context=scn.getRenderingContext() - staframe = context.startFrame() - endframe = context.endFrame() - if endframe= 1: - for i in range(10): - curFrame+=frameinc - Blender.Set(CURFRAME,curFrame) # computes the constrained location of the 'real' objects - Blender.Redraw() - Blender.Set(CURFRAME, staFrame) - return - -######################################## -def bakeBones(ref_ob,arm_ob): #copy pose from ref_ob to arm_ob - scrub() - staFrame,endFrame,curFrame = getRenderInfo() - act = getBakedPoseData(ref_ob, staFrame, endFrame, ACTION_BAKE = True, ACTION_BAKE_FIRST_FRAME = usrACTION) # bake the pose positions of the reference ob to the armature ob - arm_ob.action = act - scrub() - - # user comprehension feature - change action name and channel ipo names to match the names of the bone they drive - debug (80,'Renaming each action ipo to match the bone they pose') - act.name = arm_ob.name - arm_channels = act.getAllChannelIpos() - pose= arm_ob.getPose() - pbones= pose.bones.values() #we want the bones themselves, not the dictionary lookup - for pbone in pbones: - debug (100,'Channel listing for %s: %s' % (pbone.name,arm_channels[pbone.name] )) - ipo=arm_channels[pbone.name] - ipo.name = pbone.name # since bone names are unique within an armature, the pose names can be the same since they are within an Action - - return - -######################################## -def getOrCreateCurve(ipo, curvename): - """ - Retrieve or create a Blender Ipo Curve named C{curvename} in the C{ipo} Ipo - Either an ipo curve named C{curvename} exists before the call then this curve is returned, - Or such a curve doesn't exist before the call .. then it is created into the c{ipo} Ipo and returned - """ - try: - mycurve = ipo.getCurve(curvename) - if mycurve != None: - pass - else: - mycurve = ipo.addCurve(curvename) - except: - mycurve = ipo.addCurve(curvename) - return mycurve - -######################################## -def eraseCurve(ipo,numCurves): - debug(90,'Erasing %i curves for %' % (numCurves,ipo.GetName())) - for i in range(numCurves): - nbBezPoints= ipo.getNBezPoints(i) - for j in range(nbBezPoints): - ipo.delBezPoint(i) - return - -######################################## -def resetIPO(ipo): - debug(60,'Resetting ipo curve named %s' %ipo.name) - numCurves = ipo.getNcurves() #like LocX, LocY, etc - if numCurves > 0: - eraseCurve(ipo, numCurves) #erase data if one exists - return - -######################################## -def resetIPOs(ob): #resets all IPO curvess assocated with an object and its bones - debug(30,'Resetting any ipo curves linked to %s' %ob.getName()) - ipo = ob.getIpo() #may be None - ipoName = ipo.getName() #name of the IPO that guides/controls this object - debug(70,'Object IPO is %s' %ipoName) - try: - ipo = Ipo.Get(ipoName) - except: - ipo = Ipo.New('Object', ipoName) - resetIPO(ipo) - if ob.getType() == ARMATURE: - arm_data=ob.getData() - bones=arm_data.bones.values() - for bone in bones: - #for each bone: get the name and check for a Pose IPO - debug(10,'Processing '+ bone.name) - return - -######################################## -def parse(string,delim): - index = string.find(delim) # -1 if not found, else pointer to delim - if index+1: return string[:index] - return string - -######################################## -def newIpo(ipoName): #add a new Ipo object to the Blender scene - ipo=Blender.Ipo.New('Object',ipoName) - - ipo.addCurve('LocX') - ipo.addCurve('LocY') - ipo.addCurve('LocZ') - ipo.addCurve('RotX') - ipo.addCurve('RotY') - ipo.addCurve('RotZ') - return ipo - -######################################## -def makeUpaName(type,name): #i know this exists in Blender somewhere... - debug(90,'Making up a new %s name using %s as a basis.' % (type,name)) - name = (parse(name,'.')) - if type == 'Ipo': - ipoName = name # maybe we get lucky today - ext = 0 - extlen = 3 # 3 digit extensions, like hello.002 - success = False - while not(success): - try: - debug(100,'Trying %s' % ipoName) - ipo = Ipo.Get(ipoName) - #that one exists if we get here. add on extension and keep trying - ext +=1 - if ext>=10**extlen: extlen +=1 # go to more digits if 999 not found - ipoName = '%s.%s' % (name, str(ext).zfill(extlen)) - except: # could not find it - success = True - name=ipoName - else: - debug (0,'FATAL ERROR: I dont know how to make up a new %s name based on %s' % (type,ob)) - return None - return name - -######################################## -def createIpo(ob): #create an Ipo and curves and link them to this object - #first, we have to create a unique name - #try first with just the name of the object to keep things simple. - ipoName = makeUpaName('Ipo',ob.getName()) # make up a name for a new Ipo based on the object name - debug(20,'Ipo and LocRot curves called %s' % ipoName) - ipo=newIpo(ipoName) - ob.setIpo(ipo) #link them - return ipo - -######################################## -def getLocLocal(ob): - key = [ - ob.LocX, - ob.LocY, - ob.LocZ, - ob.RotX*R2D, #get the curves in this order - ob.RotY*R2D, - ob.RotZ*R2D - ] - return key - -######################################## -def getLocReal(ob): - obMatrix = ob.matrixWorld #Thank you IdeasMan42 - loc = obMatrix.translationPart() - rot = obMatrix.toEuler() - key = [ - loc.x, - loc.y, - loc.z, - rot.x/10, - rot.y/10, - rot.z/10 - ] - return key - -######################################## -def getLocRot(ob,space): - if space in xrange(len(COORDINATE_SYSTEMS)): - if space == COORD_LOCAL: - key = getLocLocal(ob) - return key - elif space == COORD_REAL: - key = getLocReal(ob) - return key - else: #hey, programmers make mistakes too. - debug(0,'Fatal Error: getLoc called with %i' % space) - return - -######################################## -def getCurves(ipo): - ipos = [ - ipo[Ipo.OB_LOCX], - ipo[Ipo.OB_LOCY], - ipo[Ipo.OB_LOCZ], - ipo[Ipo.OB_ROTX], #get the curves in this order - ipo[Ipo.OB_ROTY], - ipo[Ipo.OB_ROTZ] - ] - return ipos - -######################################## -def addPoint(time,keyLocRot,ipos): - if BLENDER_VERSION < 245: - debug(0,'WARNING: addPoint uses BezTriple') - for i in range(len(ipos)): - point = BezTriple.New() #this was new with Blender 2.45 API - point.pt = (time, keyLocRot[i]) - point.handleTypes = [1,1] - - ipos[i].append(point) - return ipos - -######################################## -def bakeFrames(ob,myipo): #bakes an object in a scene, returning the IPO containing the curves - myipoName = myipo.getName() - debug(20,'Baking frames for scene %s object %s to ipo %s' % (scn.getName(),ob.getName(),myipoName)) - ipos = getCurves(myipo) - #TODO: Gui setup idea: myOffset - # reset action to start at frame 1 or at location - myOffset=0 #=1-staframe - #loop through frames in the animation. Often, there is rollup and the mocap starts late - staframe,endframe,curframe = getRenderInfo() - for frame in range(staframe, endframe+1): - debug(80,'Baking Frame %i' % frame) - #tell Blender to advace to frame - Blender.Set(CURFRAME,frame) # computes the constrained location of the 'real' objects - if not BATCH: Blender.Redraw() # no secrets, let user see what we are doing - - #using the constrained Loc Rot of the object, set the location of the unconstrained clone. Yea! Clones are FreeMen - key = getLocRot(ob,usrCoord) #a key is a set of specifed exact channel values (LocRotScale) for a certain frame - key = [a+b for a,b in zip(key, usrDelta)] #offset to the new location - - myframe= frame+myOffset - Blender.Set(CURFRAME,myframe) - - time = Blender.Get('curtime') #for BezTriple - ipos = addPoint(time,key,ipos) #add this data at this time to the ipos - debug(100,'%s %i %.3f %.2f %.2f %.2f %.2f %.2f %.2f' % (myipoName, myframe, time, key[0], key[1], key[2], key[3], key[4], key[5])) - # eye-candy - smoothly rewind the animation, showing now how the clone match moves - if endframe-staframe <400 and not BATCH: - for frame in range (endframe,staframe,-1): #rewind - Blender.Set(CURFRAME,frame) # computes the constrained location of the 'real' objects - Blender.Redraw() - Blender.Set(CURFRAME,staframe) - Blender.Redraw() - - return ipos - -######################################## -def duplicateLinked(ob): - obType = ob.type - debug(10,'Duplicating %s Object named %s' % (obType,ob.getName())) - scn.objects.selected = [ob] -## rdw: simplified by just duplicating armature. kept code as reference for creating armatures -## disadvantage is that you cant have clone as stick and original as octahedron -## since they share the same Armature. User can click Make Single User button. -## if obType == ARMATURE: #build a copy from scratch -## myob= dupliArmature(ob) -## else: - Blender.Object.Duplicate() # Duplicate linked, including pose constraints. - myobs = Object.GetSelected() #duplicate is top on the list - myob = myobs[0] - if usrParent == False: - myob.clrParent(usrFreeze) - debug(20,'=myob= was created as %s' % myob.getName()) - return myob - -######################################## -def removeConstraints(ob): - for const in ob.constraints: - debug(90,'removed %s => %s' % (ob.name, const)) - ob.constraints.remove(const) - return - -######################################## -def removeConstraintsOb(ob): # from object or armature - debug(40,'Removing constraints from '+ob.getName()) - if BLENDER_VERSION > 241: #constraints module not available before 242 - removeConstraints(ob) - if ob.getType() == ARMATURE: - pose = ob.getPose() - for pbone in pose.bones.values(): - #bone = pose.bones[bonename] - removeConstraints(pbone) - #should also check if it is a deflector? - return - -######################################## -def deLinkOb(type,ob): #remove linkages - if type == 'Ipo': - success = ob.clearIpo() #true=there was one - if success: debug(80,'deLinked Ipo curve to %s' % ob.getName()) - return - -######################################## -def bakeObject(ob): #bakes the core object locrot and assigns the Ipo to a Clone - if ob != None: - # Clone the object - duplicate it, clean the clone, and create an ipo curve for the clone - myob = duplicateLinked(ob) #clone it - myob.setName(usrObjectNamePrefix + ob.getName()) - removeConstraintsOb(myob) #my object is a free man - deLinkOb('Ipo',myob) #kids, it's not nice to share. you've been lied to - if ob.getType() != ARMATURE: # baking armatures is based on bones, not object - myipo = createIpo(myob) #create own IPO and curves for the clone object - ipos = bakeFrames(ob,myipo) #bake the locrot for this obj for the scene frames - return myob - -######################################## -def bake(ob,par): #bakes an object of any type, linking it to parent - debug(0,'Baking %s object %s' % (ob.getType(), ob)) - clone = bakeObject(ob) #creates and bakes the object motion - if par!= None: - par.makeParent([clone]) - debug(20,"assigned object to parent %s" % par) - if ob.getType() == ARMATURE: -## error('Object baked. Continue with bones?') - bakeBones(ob,clone) #go into the bones and copy from -> to in frame range - #future idea: bakeMesh (net result of Shapekeys, Softbody, Cloth, Fluidsim,...) - return clone - -######################################## -def tstCreateArm(): #create a test armature in scene - # rip-off from http://www.blender.org/documentation/245PythonDoc/Pose-module.html - thank you! - - debug(0,'Making Test Armature') - # New Armature - arm_data= Armature.New('myArmature') - print arm_data - arm_ob = scn.objects.new(arm_data) - arm_data.makeEditable() - - # Add 4 bones - ebones = [Armature.Editbone(), Armature.Editbone(), Armature.Editbone(), Armature.Editbone()] - - # Name the editbones - ebones[0].name = 'Bone.001' - ebones[1].name = 'Bone.002' - ebones[2].name = 'Bone.003' - ebones[3].name = 'Bone.004' - - # Assign the editbones to the armature - for eb in ebones: - arm_data.bones[eb.name]= eb - - # Set the locations of the bones - ebones[0].head= Mathutils.Vector(0,0,0) - ebones[0].tail= Mathutils.Vector(0,0,1) #tip - ebones[1].head= Mathutils.Vector(0,0,1) - ebones[1].tail= Mathutils.Vector(0,0,2) - ebones[2].head= Mathutils.Vector(0,0,2) - ebones[2].tail= Mathutils.Vector(0,0,3) - ebones[3].head= Mathutils.Vector(0,0,3) - ebones[3].tail= Mathutils.Vector(0,0,4) - - ebones[1].parent= ebones[0] - ebones[2].parent= ebones[1] - ebones[3].parent= ebones[2] - - arm_data.update() - # Done with editing the armature - - # Assign the pose animation - arm_pose = arm_ob.getPose() - - act = arm_ob.getAction() - if not act: # Add a pose action if we dont have one - act = Armature.NLA.NewAction() - act.setActive(arm_ob) - - xbones=arm_ob.data.bones.values() - pbones = arm_pose.bones.values() - - frame = 1 - for pbone in pbones: # set bones to no rotation - pbone.quat[:] = 1.000,0.000,0.000,0.0000 - pbone.insertKey(arm_ob, frame, Object.Pose.ROT) - - # Set a different rotation at frame 25 - pbones[0].quat[:] = 1.000,0.1000,0.2000,0.20000 - pbones[1].quat[:] = 1.000,0.6000,0.5000,0.40000 - pbones[2].quat[:] = 1.000,0.1000,0.3000,0.40000 - pbones[3].quat[:] = 1.000,-0.2000,-0.3000,0.30000 - - frame = 25 - for i in xrange(4): - pbones[i].insertKey(arm_ob, frame, Object.Pose.ROT) - - pbones[0].quat[:] = 1.000,0.000,0.000,0.0000 - pbones[1].quat[:] = 1.000,0.000,0.000,0.0000 - pbones[2].quat[:] = 1.000,0.000,0.000,0.0000 - pbones[3].quat[:] = 1.000,0.000,0.000,0.0000 - - frame = 50 - for pbone in pbones: # set bones to no rotation - pbone.quat[:] = 1.000,0.000,0.000,0.0000 - pbone.insertKey(arm_ob, frame, Object.Pose.ROT) - - return arm_ob - -######################################## -def tstMoveOb(ob): # makes a simple LocRot animation of object in the scene - anim = [ - #Loc Rot/10 - # - ( 0,0,0, 0, 0, 0), #frame 1 origin - ( 1,0,0, 0, 0, 0), #frame 2 - ( 1,1,0, 0, 0, 0), - ( 1,1,1, 0, 0, 0), - ( 1,1,1,4.5, 0, 0), - ( 1,1,1,4.5,4.5, 0), - ( 1,1,1,4.5,4.5,4.5) - ] - space = COORD_LOCAL - ipo = createIpo(ob) #create an Ipo and curves for this object - ipos = getCurves(ipo) - - # span this motion over the currently set anim range - # to set points, i need time but do not know how it is computed, so will have to advance the animation - staframe,endframe,curframe = getRenderInfo() - - frame = staframe #x position of new ipo datapoint. set to staframe if you want a match - frameDelta=(endframe-staframe)/(len(anim)) #accomplish the animation in frame range - for key in anim: #effectively does a getLocRot() - #tell Blender to advace to frame - Blender.Set('curframe',frame) # computes the constrained location of the 'real' objects - time = Blender.Get('curtime') - - ipos = addPoint(time,key,ipos) #add this data at this time to the ipos - - debug(100,'%s %i %.3f %.2f %.2f %.2f %.2f %.2f %.2f' % (ipo.name, frame, time, key[0], key[1], key[2], key[3], key[4], key[5])) - frame += frameDelta - Blender.Set(CURFRAME,curframe) # reset back to where we started - return -#================= -# Program Template -#================= -######################################## -def main(): - # return code set via rt button in Blender Buttons Scene Context Anim panel - if MODE == 1: #create test armature #1 - ob = tstCreateArm() # make test arm and select it - tstMoveOb(ob) - scn.objects.selected = [ob] - - obs= Blender.Object.GetSelected() #scn.objects.selected - obs= sortObjects(obs) - debug(0,'Baking %i objects' % len(obs)) - - if len(obs) >= 1: # user might have multiple objects selected - i= 0 - clones=[] # my clone army - for ob in obs: - par= ob.getParent() - if not usrParent: - if par in obs: - par= clones[obs.index(par)] - clones.append(bake(ob,par)) - scn.objects.selected = clones - else: - error('Please select at least one object') - return - -######################################## -def benchmark(): # This lets you benchmark (time) the script's running duration - Window.WaitCursor(1) - t = sys.time() - debug(60,'%s began at %.0f' %(__script__,sys.time())) - - # Run the function on the active scene - in_editmode = Window.EditMode() - if in_editmode: Window.EditMode(0) - - main() - - if in_editmode: Window.EditMode(1) - - # Timing the script is a good way to be aware on any speed hits when scripting - debug(0,'%s Script finished in %.2f seconds' % (__script__,sys.time()-t) ) - Window.WaitCursor(0) - return - -######################################## -# This lets you can import the script without running it -if __name__ == '__main__': - debug(0, "------------------------------------") - debug(0, "%s %s Script begins with mode=%i debug=%i batch=%s" % (__script__,__version__,MODE,DEBUG,BATCH)) - benchmark() diff --git a/release/scripts/animation_clean.py b/release/scripts/animation_clean.py deleted file mode 100644 index fc44f264ac1..00000000000 --- a/release/scripts/animation_clean.py +++ /dev/null @@ -1,192 +0,0 @@ -#!BPY - -""" -Name: 'Clean Animation Curves' -Blender: 249 -Group: 'Animation' -Tooltip: 'Remove unused keyframes for ipo curves' -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2008-2009: Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# -------------------------------------------------------------------------- - -import bpy -from Blender import IpoCurve, Draw, Window - -def clean_ipos(ipos): - eul = 0.001 - - def isflat(vec): - prev_y = vec[0][1] - mid_y = vec[1][1] - next_y = vec[2][1] - - # flat status for prev and next - return abs(mid_y-prev_y) < eul, abs(mid_y-next_y) < eul - - - - X=0 - Y=1 - PREV=0 - MID=1 - NEXT=2 - - LEFT = 0 - RIGHT = 1 - - TOT = 0 - TOTBEZ = 0 - # for ipo in bpy.data.ipos: - for ipo in ipos: - if ipo.lib: - continue - # print ipo - for icu in ipo: - interp = icu.interpolation - extend = icu.extend - - bezierPoints = icu.bezierPoints - bezierVecs = [bez.vec for bez in bezierPoints] - - l = len(bezierPoints) - - TOTBEZ += l - - # our aim is to simplify this ipo as much as possible! - if interp == IpoCurve.InterpTypes.BEZIER or interp == interp == IpoCurve.InterpTypes.LINEAR: - #print "Not yet supported" - - if interp == IpoCurve.InterpTypes.BEZIER: - flats = [isflat(bez) for bez in bezierVecs] - else: - # A bit of a waste but fake the locations for these so they will always be flats - # IS better then too much duplicate code. - flats = [(True, True)] * l - for v in bezierVecs: - v[PREV][Y] = v[NEXT][Y] = v[MID][Y] - - - # remove middle points - if l>2: - done_nothing = False - - while not done_nothing and len(bezierVecs) > 2: - done_nothing = True - i = l-2 - - while i > 0: - #print i - #print i, len(bezierVecs) - if flats[i]==(True,True) and flats[i-1][RIGHT] and flats[i+1][LEFT]: - - if abs(bezierVecs[i][MID][Y] - bezierVecs[i-1][MID][Y]) < eul and abs(bezierVecs[i][MID][Y] - bezierVecs[i+1][MID][Y]) < eul: - done_nothing = False - - del flats[i] - del bezierVecs[i] - icu.delBezier(i) - TOT += 1 - l-=1 - i-=1 - - # remove endpoints - if extend == IpoCurve.ExtendTypes.CONST and len(bezierVecs) > 1: - #print l, len(bezierVecs) - # start - - while l > 2 and (flats[0][RIGHT] and flats[1][LEFT] and (abs(bezierVecs[0][MID][Y] - bezierVecs[1][MID][Y]) < eul)): - print "\tremoving 1 point from start of the curve" - del flats[0] - del bezierVecs[0] - icu.delBezier(0) - TOT += 1 - l-=1 - - - # End - while l > 2 and flats[-2][RIGHT] and flats[-1][LEFT] and (abs(bezierVecs[-2][MID][Y] - bezierVecs[-1][MID][Y]) < eul): - print "\tremoving 1 point from end of the curve", l - del flats[l-1] - del bezierVecs[l-1] - icu.delBezier(l-1) - TOT += 1 - l-=1 - - - - if l==2: - if isflat( bezierVecs[0] )[RIGHT] and isflat( bezierVecs[1] )[LEFT] and abs(bezierVecs[0][MID][Y] - bezierVecs[1][MID][Y]) < eul: - # remove the second point - print "\tremoving 1 point from 2 point bez curve" - # remove the second point - del flats[1] - del bezierVecs[1] - icu.delBezier(1) - TOT+=1 - l-=1 - - # Change to linear for faster evaluation - ''' - if l==1: - print 'Linear' - icu.interpolation = IpoCurve.InterpTypes.LINEAR - ''' - - - - - if interp== IpoCurve.InterpTypes.CONST: - print "Not yet supported" - - print 'total', TOT, TOTBEZ - return TOT, TOTBEZ - -def main(): - ret = Draw.PupMenu('Clean Selected Objects Ipos%t|Object IPO%x1|Object Action%x2|%l|All IPOs (be careful!)%x3') - - sce = bpy.data.scenes.active - ipos = [] - - if ret == 3: - ipos.extend(list(bpy.data.ipos)) - else: - for ob in sce.objects.context: - if ret == 1: - ipo = ob.ipo - if ipo: - ipos.append(ipo) - - elif ret == 2: - action = ob.action - if action: - ipos.extend([ipo for ipo in action.getAllChannelIpos().values() if ipo]) - - - - if not ipos: - Draw.PupMenu('Error%t|No ipos found') - else: - total_removed, total = clean_ipos(ipos) - Draw.PupMenu('Done!%t|Removed ' + str(total_removed) + ' of ' + str(total) + ' points') - - Window.RedrawAll() - - -if __name__ == '__main__': - main() diff --git a/release/scripts/animation_trajectory.py b/release/scripts/animation_trajectory.py deleted file mode 100644 index 55a670b66b1..00000000000 --- a/release/scripts/animation_trajectory.py +++ /dev/null @@ -1,575 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: <- these words are ignored -Name: 'Trajectory' -Blender: 243 -Group: 'Animation' -Tip: 'See Trajectory of selected object' -""" - -__author__ = '3R - R3gis' -__version__ = '2.43' -__url__ = ["Script's site , http://blenderfrance.free.fr/python/Trajectory_en.htm","Author's site , http://cybercreator.free.fr", "French Blender support forum, http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender"] -__email__=["3R, r3gis@free.fr"] - - -__bpydoc__ = """ - -Usage: - -* Launch with alt+P (or put it in .script folder) - -Allow to see in real time trajectory of selected object. - -On first run, it ask you -- If you want that actually selected object have they trajectory always shown -- If you want to use Space Handler or a Scriptlink in Redraw mode -- Future and Past : it is the frame in past and future -of the beggining and the end of the path -- Width of line that represent the trajectory - -Then the object's trajectory will be shown in all 3D areas. -When trajectory is red, you can modifiy it by moving object. -When trajectory is blue and you want to be able to modify it, inser a Key (I-Key) - -Points appears on trajectory : -- Left Clic to modify position -- Right Clic to go to the frame it represents - -Notes:
-In scriptlink mode, it create one script link so make sure that 'Enable Script Link' toogle is on -In SpaceHandler mode, you have to go in View>>SpaceHandlerScript menu to activate Trajectory - - -""" - - -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004-2006: Regis Montoya -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- -################################# -# by 3R - 26/08/05 -# for any problem : -# r3gis@free.fr -# ou sur le newsgroup: -# http://zoo-logique.org/3D.Blender/ -################################# -#Many thanks to cambo for his fixes -################################# - - - -import Blender - - -scene= Blender.Scene.GetCurrent() - - -#Writing -def write_script(name, script): - global scene - #List texts and their name - #write : type of writing : 1->New, 2->Overwrite - scripting= None - for text in Blender.Text.Get(): - if text.name==name and text.asLines()[1] != "#"+str(__version__): - scripting = text - scripting.clear() - scripting.write(script) - break - - if not scripting: - scripting= Blender.Text.New(name) - scripting.write(script) - -def link_script(name, type): - global scene - scriptlinks = scene.getScriptLinks(type) # none or list - if not scriptlinks or name not in scriptlinks: - scene.addScriptLink(name, type) - - -#Deleting of a text -def text_remove(name): - global scene - #try to delete text if already linked - try: - text= Blender.Text.Get(name) - # Texte.clear() - scene.clearScriptLinks([name]) - Blender.Text.unlink(text) - except: - print('---Initialisation of Trajectory_'+str(__version__)+'.py---') - -#Whether is already running, also check if it's the last version of the script : second line contain the version fo the script -ask_modif= 0 # Default -for text in Blender.Text.Get(): - if text.name == 'Trajectory' and text.asLines()[1] == "#"+str(__version__): - #We ask if script modify his seetings, keep it or stop script - ask_modif= Blender.Draw.PupMenu("Script already launch %t|Modify settings%x0|Keep settings%x1|Stop script%x2|") - if ask_modif==-1: # user canceled. - ask_modif= 1 - break - -selection_mode= 0 -future= 35 -past= 20 -width= 2 - -#In modify case -if ask_modif==0: - handle_mode= Blender.Draw.Create(0) - selection_mode= Blender.Draw.Create(0) - future= Blender.Draw.Create(35) - past= Blender.Draw.Create(20) - width= Blender.Draw.Create(2) - - block= [] - block.append(("Space Handlers", handle_mode, "You have to activate for each area by View>>SpaceHandler")) #You can delete this option... - block.append(("Always Draw", selection_mode, "Selected object will have their trajectory always shown")) - block.append(("Past :", past, 1, 900)) - block.append(("Futur:", future, 1, 900)) - block.append(("Width:", width, 1,5)) - - if not Blender.Draw.PupBlock("Trajectory seetings", block): - ask_modif=1 - - handle_mode= handle_mode.val - selection_mode= selection_mode.val - future= future.val - past= past.val - width= width.val - - -#put names of selected objects in objects_select if option choosen by user -if selection_mode==1: - objects_select= [ob.name for ob in scene.objects.context] -else: - objects_select= [] - - -try: - if handle_mode==1: - DrawPart="#SPACEHANDLER.VIEW3D.DRAW\n" - else: - DrawPart="#!BPY\n" -except:DrawPart="#BadlyMade" - - -#Here is the script to write in Blender and to link, options are also written now -DrawPart=DrawPart+"#"+str(__version__)+""" -#This script is a part of Trajectory.py and have to be linked to the scene in Redraw if not in HANDLER mode. -#Author : 3R - Regis Montoya -#It's better to use the Trajectory_"version_number".py -#You can modify the two following value to change the path settings -future="""+str(future)+""" -past="""+str(past)+""" -object_init_names="""+str(objects_select)+""" - - -import Blender, math -from Blender import BGL, Draw, Ipo -from Blender.BGL import * -from Blender.Draw import * -from math import * - -from Blender.Mathutils import Vector - -#take actual frame -frameC=Blender.Get('curframe') -scene = Blender.Scene.GetCurrent() -render_context=scene.getRenderingContext() -#ajust number of frames with NewMap and OldMapvalue values -k=1.00*render_context.oldMapValue()/render_context.newMapValue() -if k<1: - tr=-1*int(log(k*0.1, 10)) -else: - tr=-1*int(log(k, 10)) -#The real and integer frame to compare to ipos keys frames -frameCtr=round(frameC*k, tr) -frameCr=frameC*k -frameC=int(round(frameC*k, 0)) - - -#List objects that we have to show trajectory in $objects -# In this case, using a dict for unique objects is the fastest way. -object_dict= dict([(ob.name, ob) for ob in scene.objects.context]) -for obname in object_init_names: - if not object_dict.has_key(obname): - try: # Object may be removed. - object_dict[obname]= Blender.Object.Get(obname) - except: - pass # object was removed. - -#This fonction give the resulting matrix of all parents at a given frame -#parent_list is the list of all parents [object, matrix, locX_ipo, locY, Z, rotX, Y, Z, sizeX, Y, Z] of current object -def matrixForTraj(frame, parent_list): - DecMatC=Blender.Mathutils.Matrix([1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]) - - for parent_data in parent_list: - parent_ob= parent_data[0] - - try: X= parent_data[5][frame]*pi/18 - except: X= parent_ob.RotX - try: Y= parent_data[6][frame]*pi/18 - except: Y= parent_ob.RotY - try: Z= parent_data[7][frame]*pi/18 - except: Z= parent_ob.RotZ - try: LX= parent_data[2][frame] - except: LX= parent_ob.LocX - try: LY= parent_data[3][frame] - except: LY= parent_ob.LocY - try: LZ= parent_data[4][frame] - except: LZ= parent_ob.LocZ - try: SX= parent_data[8][frame] - except: SX= parent_ob.SizeX - try: SY= parent_data[9][frame] - except: SY= parent_ob.SizeY - try: SZ= parent_data[10][frame] - except: SZ= parent_ob.SizeZ - - NMat=Blender.Mathutils.Matrix([cos(Y)*cos(Z)*SX,SX*cos(Y)*sin(Z),-SX*sin(Y),0], - [(-cos(X)*sin(Z)+sin(Y)*sin(X)*cos(Z))*SY,(sin(X)*sin(Y)*sin(Z)+cos(X)*cos(Z))*SY,sin(X)*cos(Y)*SY,0], - [(cos(X)*sin(Y)*cos(Z)+sin(X)*sin(Z))*SZ,(cos(X)*sin(Y)*sin(Z)-sin(X)*cos(Z))*SZ,SZ*cos(X)*cos(Y),0], - [LX,LY,LZ,1]) - DecMatC=DecMatC*parent_data[1]*NMat - return DecMatC - -##### -TestLIST=[] -matview=Blender.Window.GetPerspMatrix() -########### -#Fonction to draw trajectories -########### - -def Trace_Traj(ob): - global TestLIST, matview - #we draw trajectories for all objects in list - - LocX=[] - LocY=[] - LocZ=[] - #List with trajectories' vertexs - vertexX=[] - - contextIpo= ob.ipo - if contextIpo: - ipoLocX=contextIpo[Ipo.OB_LOCX] - ipoLocY=contextIpo[Ipo.OB_LOCY] - ipoLocZ=contextIpo[Ipo.OB_LOCZ] - ipoTime=contextIpo[Ipo.OB_TIME] - else: # only do if there is no IPO (if no ipo curves : return None object and don't go in this except) - ipoLocX= ipoLocY= ipoLocZ= ipoTime= None - - if ipoTime: - return 0 - - #Get all parents of ob - parent=ob.parent - backup_ob= ob - child= ob - parent_list= [] - - #Get parents's infos : - #list of [name, initial matrix at make parent, ipo in X,Y,Z,rotX,rotY,rotZ,sizeX,Y,Z] - while parent: - Init_Mat=Blender.Mathutils.Matrix(child.getMatrix('worldspace')) #must be done like it (it isn't a matrix otherwise) - Init_Mat.invert() - Init_Mat=Init_Mat*child.getMatrix('localspace') - Init_Mat=parent.getMatrix()*Init_Mat - Init_Mat.invert() - - contextIpo= parent.ipo # None or IPO - if contextIpo: - ipo_Parent_LocX=contextIpo[Ipo.OB_LOCX] - ipo_Parent_LocY=contextIpo[Ipo.OB_LOCY] - ipo_Parent_LocZ=contextIpo[Ipo.OB_LOCZ] - ipo_Parent_RotX=contextIpo[Ipo.OB_ROTX] - ipo_Parent_RotY=contextIpo[Ipo.OB_ROTY] - ipo_Parent_RotZ=contextIpo[Ipo.OB_ROTZ] - ipo_Parent_SizeX=contextIpo[Ipo.OB_SIZEX] - ipo_Parent_SizeY=contextIpo[Ipo.OB_SIZEY] - ipo_Parent_SizeZ=contextIpo[Ipo.OB_SIZEZ] - else: - ipo_Parent_LocX=ipo_Parent_LocY=ipo_Parent_LocZ=\ - ipo_Parent_RotX=ipo_Parent_RotY=ipo_Parent_RotZ=\ - ipo_Parent_SizeX=ipo_Parent_SizeY=ipo_Parent_SizeZ= None - - parent_list.append([parent, Init_Mat, ipo_Parent_LocX, ipo_Parent_LocY, ipo_Parent_LocZ, ipo_Parent_RotX, ipo_Parent_RotY, ipo_Parent_RotZ, ipo_Parent_SizeX, ipo_Parent_SizeY, ipo_Parent_SizeZ]) - - child=parent - parent=parent.parent - - #security : if one of parents object are a path>>follow : trajectory don't work properly so it have to draw nothing - for parent in parent_list: - if parent[0].type == 'Curve': - if parent[0].data.flag & 1<<4: # Follow path, 4th bit - return 1 - - #ob >> re-assign obj and not parent - ob= backup_ob - ob= backup_ob - - - if ipoLocX: LXC= ipoLocX[frameC] - else: LXC= ob.LocX - if ipoLocY: LYC= ipoLocY[frameC] - else: LYC= ob.LocY - if ipoLocZ: LZC= ipoLocZ[frameC] - else: LZC= ob.LocZ - - vect= Vector([ob.LocX, ob.LocY, ob.LocZ, 1]) - color=[0, 1] - - #If trajectory is being modified and we are at a frame where a ipo key already exist - if round(ob.LocX, 5)!=round(LXC, 5): - for bez in ipoLocX.bezierPoints: - if round(bez.pt[0], tr)==frameCtr: - bez.pt = [frameCr, vect[0]] - ipoLocX.recalc() - if round(ob.LocY, 5)!=round(LYC, 5): - for bez in ipoLocY.bezierPoints: - if round(bez.pt[0], tr)==frameCtr: - bez.pt = [frameCr, vect[1]] - ipoLocY.recalc() - if round(ob.LocZ, 5)!=round(LZC, 5): - for bez in ipoLocZ.bezierPoints: - if round(bez.pt[0], tr)==frameCtr: - bez.pt = [frameCr, vect[2]] - ipoLocZ.recalc() - - #change trajectory color if at an ipoKey - VertexFrame=[] - bezier_Coord=0 - if ipoLocX: # FIXED like others it was just in case ipoLocX==None - for bez in ipoLocX.bezierPoints: - bezier_Coord=round(bez.pt[0], tr) - if bezier_Coord not in VertexFrame: - VertexFrame.append(bezier_Coord) - if bezier_Coord==frameCtr: - color=[1, color[1]-0.3] - if ipoLocY: # FIXED - for bez in ipoLocY.bezierPoints: - bezier_Coord=round(bez.pt[0], tr) - if bezier_Coord not in VertexFrame: - VertexFrame.append(bezier_Coord) - if round(bez.pt[0], tr)==frameCtr: - color=[1, color[1]-0.3] - if ipoLocZ: # FIXED - for bez in ipoLocZ.bezierPoints: - bezier_Coord=round(bez.pt[0], tr) - if bezier_Coord not in VertexFrame: - VertexFrame.append(bezier_Coord) - if round(bez.pt[0], tr)==frameCtr: - color=[1, color[1]-0.3] - - - #put in LocX, LocY and LocZ all points of trajectory - for frame in xrange(frameC-past, frameC+future): - DecMat=matrixForTraj(frame, parent_list) - - if ipoLocX: LX= ipoLocX[frame] - else: LX= ob.LocX - if ipoLocY: LY= ipoLocY[frame] - else: LY= ob.LocY - if ipoLocZ: LZ= ipoLocZ[frame] - else: LZ= ob.LocZ - - vect=Vector(LX, LY, LZ)*DecMat - LocX.append(vect[0]) - LocY.append(vect[1]) - LocZ.append(vect[2]) - - - #draw part : get current view - MatPreBuff= [matview[i][j] for i in xrange(4) for j in xrange(4)] - - MatBuff=BGL.Buffer(GL_FLOAT, 16, MatPreBuff) - - glLoadIdentity() - glMatrixMode(GL_PROJECTION) - glPushMatrix() - glLoadMatrixf(MatBuff) - - #draw trajectory line - glLineWidth("""+str(width)+""") - - glBegin(GL_LINE_STRIP) - for i in xrange(len(LocX)): - glColor3f((i+1)*1.00/len(LocX)*color[0], 0, (i+1)*1.00/len(LocX)*color[1]) - glVertex3f(LocX[i], LocY[i], LocZ[i]) - - glEnd() - - #draw trajectory's "vertexs" - if not Blender.Window.EditMode(): - glPointSize(5) - glBegin(GL_POINTS) - TestPOINTS=[] - TestFRAME=[] - i=0 - for frame in VertexFrame: - ix=int(frame)-frameC+past - if ix>=0 and ixpt[0]-4 and mouse_co[1]>pt[1]-4 and mouse_co[1]R and R>L can use the same code - def IS_XMIRROR_SOURCE(xval): - '''Source means is this the value we want to copy from''' - - if PREF_MODE_L2R: - if xval<0: return True - else: return False - else: # PREF_MODE_R2L - if xval<0: return False - else: return True - - if IS_XMIRROR_SOURCE( h1.x ):# head bone 1s negative, so copy it to h2 - editbone2.head= VecXFlip(h1) - else: - ''' - assume h2.x<0 - not a big deal if were wrong, - its unlikely to ever happen because the bones would both be on the same side. - ''' - - # head bone 2s negative, so copy it to h1 - editbone1.head= VecXFlip(h2) - - # Same as above for tail - if IS_XMIRROR_SOURCE(t1.x): - editbone2.tail= VecXFlip(t1) - else: - editbone1.tail= VecXFlip(t2) - - # Copy roll from 1 bone to another, use the head's location to decide which side it's on. - if IS_XMIRROR_SOURCE(editbone1.head): - editbone2.roll= -editbone1.roll - else: - editbone1.roll= -editbone2.roll - - -def armature_symetry(\ - arm_ob,\ - PREF_MAX_DIST,\ - PREF_XMID_SNAP,\ - PREF_XZERO_THRESH,\ - PREF_MODE_L2R,\ - PREF_MODE_R2L,\ - PREF_SEL_ONLY): - - ''' - Main function that does all the work, - return the number of - ''' - arm_data= arm_ob.data - arm_data.makeEditable() - - # Get the bones - bones= [] - HIDDEN_EDIT= Blender.Armature.HIDDEN_EDIT - BONE_SELECTED= Blender.Armature.BONE_SELECTED - - if PREF_SEL_ONLY: - for eb in arm_data.bones.values(): - options= eb.options - if HIDDEN_EDIT not in options and BONE_SELECTED in options: - bones.append(eb) - else: - # All non hidden bones - for eb in arm_data.bones.values(): - options= eb.options - if HIDDEN_EDIT not in options: - bones.append(eb) - - del HIDDEN_EDIT # remove temp variables - del BONE_SELECTED - - # Store the numder of bones we have modified for a message - tot_editbones= len(bones) - tot_editbones_modified= 0 - - if PREF_XMID_SNAP: - # Remove bones that are in the middle (X Zero) - # reverse loop so we can remove items in the list. - for eb_idx in xrange(len(bones)-1, -1, -1): - edit_bone= bones[eb_idx] - if abs(edit_bone.head.x) + abs(edit_bone.tail.x) <= PREF_XZERO_THRESH/2: - - # This is a center bone, clamp and remove from the bone list so we dont use again. - if edit_bone.tail.x or edit_bone.head.x: - tot_editbones_modified += 1 - - edit_bone.tail.x= edit_bone.head.x= 0 - del bones[eb_idx] - - - - - bone_comparisons= [] - - # Compare every bone with every other bone, shouldn't be too slow. - # These 2 "for" loops only compare once - for eb_idx_a in xrange(len(bones)-1, -1, -1): - edit_bone_a= bones[eb_idx_a] - for eb_idx_b in xrange(eb_idx_a-1, -1, -1): - edit_bone_b= bones[eb_idx_b] - # Error float the first value from editbone_mirror_diff() so we can sort the resulting list. - bone_comparisons.append(editbone_mirror_diff(edit_bone_a, edit_bone_b)) - - - bone_comparisons.sort() # best matches first - - # Make a dict() of bone names that have been used so we dont mirror more then once - bone_mirrored= {} - - for error, editbone1, editbone2 in bone_comparisons: - # print 'Trying to merge at error %.3f' % error - if error > PREF_MAX_DIST: - # print 'breaking, max error limit reached PREF_MAX_DIST: %.3f' % PREF_MAX_DIST - break - - if not bone_mirrored.has_key(editbone1.name) and not bone_mirrored.has_key(editbone2.name): - # Were not used, execute the mirror - editbone_mirror_merge(editbone1, editbone2, PREF_MODE_L2R, PREF_MODE_R2L) - # print 'Merging bones' - - # Add ourselves so we aren't touched again - bone_mirrored[editbone1.name] = None # dummy value, would use sets in python 2.4 - bone_mirrored[editbone2.name] = None - - # If both options are enabled, then we have changed 2 bones - tot_editbones_modified+= PREF_MODE_L2R + PREF_MODE_R2L - - arm_data.update() # get out of armature editmode - return tot_editbones, tot_editbones_modified - - -def main(): - ''' - User interface function that gets the options and calls armature_symetry() - ''' - - scn= bpy.data.scenes.active - arm_ob= scn.objects.active - - if not arm_ob or arm_ob.type!='Armature': - Blender.Draw.PupMenu('No Armature object selected.') - return - - # Cant be in editmode for armature.makeEditable() - is_editmode= Blender.Window.EditMode() - if is_editmode: Blender.Window.EditMode(0) - Draw= Blender.Draw - - # Defaults for the user input - PREF_XMID_SNAP= Draw.Create(1) - PREF_MAX_DIST= Draw.Create(0.4) - PREF_XZERO_THRESH= Draw.Create(0.02) - - PREF_MODE_L2R= Draw.Create(1) - PREF_MODE_R2L= Draw.Create(0) - PREF_SEL_ONLY= Draw.Create(1) - - pup_block = [\ - 'Left (-), Right (+)',\ - ('Left > Right', PREF_MODE_L2R, 'Copy from the Left to Right of the mesh. Enable Both for a mid loc.'),\ - ('Right > Left', PREF_MODE_R2L, 'Copy from the Right to Left of the mesh. Enable Both for a mid loc.'),\ - '',\ - ('MaxDist:', PREF_MAX_DIST, 0.0, 4.0, 'Maximum difference in mirror bones to match up pairs.'),\ - ('XZero limit:', PREF_XZERO_THRESH, 0.0, 2.0, 'Tolerance for locking bones into the middle (X/zero).'),\ - ('XMidSnap Bones', PREF_XMID_SNAP, 'Snap middle verts to X Zero (uses XZero limit)'),\ - ('Selected Only', PREF_SEL_ONLY, 'Only xmirror selected bones.'),\ - ] - - # Popup, exit if the user doesn't click OK - if not Draw.PupBlock("X Mirror mesh tool", pup_block): - return - - # Replace the variables with their button values. - PREF_XMID_SNAP= PREF_XMID_SNAP.val - PREF_MAX_DIST= PREF_MAX_DIST.val - PREF_MODE_L2R= PREF_MODE_L2R.val - PREF_MODE_R2L= PREF_MODE_R2L.val - PREF_XZERO_THRESH= PREF_XZERO_THRESH.val - PREF_SEL_ONLY= PREF_SEL_ONLY.val - - # If both are off assume mid-point and enable both - if not PREF_MODE_R2L and not PREF_MODE_L2R: - PREF_MODE_R2L= PREF_MODE_L2R= True - - - tot_editbones, tot_editbones_modified = armature_symetry(\ - arm_ob,\ - PREF_MAX_DIST,\ - PREF_XMID_SNAP,\ - PREF_XZERO_THRESH,\ - PREF_MODE_L2R,\ - PREF_MODE_R2L,\ - PREF_SEL_ONLY) - - if is_editmode: Blender.Window.EditMode(1) - - # Redraw all views before popup - Blender.Window.RedrawAll() - - # Print results - if PREF_SEL_ONLY: - msg= 'moved %i bones of %i selected' % (tot_editbones_modified, tot_editbones) - else: - msg= 'moved %i bones of %i visible' % (tot_editbones_modified, tot_editbones) - - - Blender.Draw.PupMenu(msg) - -# Check for __main__ so this function can be imported by other scripts without running the script. -if __name__=='__main__': - main() diff --git a/release/scripts/bevel_center.py b/release/scripts/bevel_center.py deleted file mode 100644 index 637ed08127f..00000000000 --- a/release/scripts/bevel_center.py +++ /dev/null @@ -1,474 +0,0 @@ -#!BPY -# -*- coding: utf-8 -*- -""" Registration info for Blender menus -Name: 'Bevel Center' -Blender: 243 -Group: 'Mesh' -Tip: 'Bevel selected faces, edges, and vertices' -""" - -__author__ = "Loic BERTHE" -__url__ = ("blender", "blenderartists.org") -__version__ = "2.0" - -__bpydoc__ = """\ -This script implements vertex and edges bevelling in Blender. - -Usage: - -Select the mesh you want to work on, enter Edit Mode and select the edges -to bevel. Then run this script from the 3d View's Mesh->Scripts menu. - -You can control the thickness of the bevel with the slider -- redefine the -end points for bigger or smaller ranges. The thickness can be changed even -after applying the bevel, as many times as needed. - -For an extra smoothing after or instead of direct bevel, set the level of -recursiveness and use the "Recursive" button. - -This "Recursive" Button, won't work in face select mode, unless you choose -"faces" in the select mode menu. - -Notes:
- You can undo and redo your steps just like with normal mesh operations in -Blender. -""" - -###################################################################### -# Bevel Center v2.0 for Blender - -# This script lets you bevel the selected vertices or edges and control the -# thickness of the bevel - -# (c) 2004-2006 Loïc Berthe (loic+blender@lilotux.net) -# released under Blender Artistic License - -###################################################################### - -import Blender -from Blender import NMesh, Window, Scene -from Blender.Draw import * -from Blender.Mathutils import * -from Blender.BGL import * -import BPyMessages -#PY23 NO SETS# -''' -try: - set() -except: - from sets import set -''' - -###################################################################### -# Functions to handle the global structures of the script NF, NE and NC -# which contain informations about faces and corners to be created - -global E_selected -E_selected = NMesh.EdgeFlags['SELECT'] - -old_dist = None - -def act_mesh_ob(): - scn = Scene.GetCurrent() - ob = scn.objects.active - if ob == None or ob.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - if ob.getData(mesh=1).multires: - BPyMessages.Error_NoMeshMultiresEdit() - return - - return ob - -def make_sel_vert(*co): - v= NMesh.Vert(*co) - v.sel = 1 - me.verts.append(v) - return v - -def make_sel_face(verts): - f = NMesh.Face(verts) - f.sel = 1 - me.addFace(f) - -def add_to_NV(old,dir,new): - try: - NV[old][dir] = new - except: - NV[old] = {dir:new} - -def get_v(old, *neighbors): - # compute the direction of the new vert - if len(neighbors) == 1: dir = (neighbors[0].co - old.co).normalize() - #dir - else: dir = (neighbors[0].co - old.co).normalize() + (neighbors[1].co-old.co).normalize() - - # look in NV if this vert already exists - key = tuple(dir) - if old in NV and key in NV[old] : return NV[old][key] - - # else, create it - new = old.co + dist.val*dir - v = make_sel_vert(new.x,new.y,new.z) - add_to_NV(old,key,v) - return v - -def make_faces(): - """ Analyse the mesh, make the faces corresponding to selected faces and - fill the structures NE and NC """ - - # make the differents flags consistent - for e in me.edges: - if e.flag & E_selected : - e.v1.sel = 1 - e.v2.sel = 1 - - NF =[] # NF : New faces - for f in me.faces: - V = f.v - nV = len(V) - enumV = range(nV) - E = [me.findEdge(V[i],V[(i+1) % nV]) for i in enumV] - Esel = [x.flag & E_selected for x in E] - - # look for selected vertices and creates a list containing the new vertices - newV = V[:] - changes = False - for (i,v) in enumerate(V): - if v.sel : - changes = True - if Esel[i-1] == 0 and Esel[i] == 1 : newV[i] = get_v(v,V[i-1]) - elif Esel[i-1] == 1 and Esel[i] == 0 : newV[i] = get_v(v,V[(i+1) % nV]) - elif Esel[i-1] == 1 and Esel[i] == 1 : newV[i] = get_v(v,V[i-1],V[(i+1) % nV]) - else : newV[i] = [get_v(v,V[i-1]),get_v(v,V[(i+1) % nV])] - - if changes: - # determine and store the face to be created - - lenV = [len(x) for x in newV] - if 2 not in lenV : - new_f = NMesh.Face(newV) - if sum(Esel) == nV : new_f.sel = 1 - NF.append(new_f) - - else : - nb2 = lenV.count(2) - - if nV == 4 : # f is a quad - if nb2 == 1 : - ind2 = lenV.index(2) - NF.append(NMesh.Face([newV[ind2-1],newV[ind2][0],newV[ind2][1],newV[ind2-3]])) - NF.append(NMesh.Face([newV[ind2-1],newV[ind2-2],newV[ind2-3]])) - - elif nb2 == 2 : - # We must know if the tuples are neighbours - ind2 = ''.join([str(x) for x in lenV+lenV[:1]]).find('22') - - if ind2 != -1 : # They are - NF.append(NMesh.Face([newV[ind2][0],newV[ind2][1],newV[ind2-3][0],newV[ind2-3][1]])) - NF.append(NMesh.Face([newV[ind2][0],newV[ind2-1],newV[ind2-2],newV[ind2-3][1]])) - - else: # They aren't - ind2 = lenV.index(2) - NF.append(NMesh.Face([newV[ind2][0],newV[ind2][1],newV[ind2-2][0],newV[ind2-2][1]])) - NF.append(NMesh.Face([newV[ind2][1],newV[ind2-3],newV[ind2-2][0]])) - NF.append(NMesh.Face([newV[ind2][0],newV[ind2-1],newV[ind2-2][1]])) - - elif nb2 == 3 : - ind2 = lenV.index(3) - NF.append(NMesh.Face([newV[ind2-1][1],newV[ind2],newV[ind2-3][0]])) - NF.append(NMesh.Face([newV[ind2-1][0],newV[ind2-1][1],newV[ind2-3][0],newV[ind2-3][1]])) - NF.append(NMesh.Face([newV[ind2-3][1],newV[ind2-2][0],newV[ind2-2][1],newV[ind2-1][0]])) - - else: - if (newV[0][1].co-newV[3][0].co).length + (newV[1][0].co-newV[2][1].co).length \ - < (newV[0][0].co-newV[1][1].co).length + (newV[2][0].co-newV[3][1].co).length : - ind2 = 0 - else : - ind2 = 1 - NF.append(NMesh.Face([newV[ind2-1][0],newV[ind2-1][1],newV[ind2][0],newV[ind2][1]])) - NF.append(NMesh.Face([newV[ind2][1],newV[ind2-3][0],newV[ind2-2][1],newV[ind2-1][0]])) - NF.append(NMesh.Face([newV[ind2-3][0],newV[ind2-3][1],newV[ind2-2][0],newV[ind2-2][1]])) - - else : # f is a tri - if nb2 == 1: - ind2 = lenV.index(2) - NF.append(NMesh.Face([newV[ind2-2],newV[ind2-1],newV[ind2][0],newV[ind2][1]])) - - elif nb2 == 2: - ind2 = lenV.index(3) - NF.append(NMesh.Face([newV[ind2-1][1],newV[ind2],newV[ind2-2][0]])) - NF.append(NMesh.Face([newV[ind2-2][0],newV[ind2-2][1],newV[ind2-1][0],newV[ind2-1][1]])) - - else: - ind2 = min( [((newV[i][1].co-newV[i-1][0].co).length, i) for i in enumV] )[1] - NF.append(NMesh.Face([newV[ind2-1][1],newV[ind2][0],newV[ind2][1],newV[ind2-2][0]])) - NF.append(NMesh.Face([newV[ind2-2][0],newV[ind2-2][1],newV[ind2-1][0],newV[ind2-1][1]])) - - # Preparing the corners - for i in enumV: - if lenV[i] == 2 : NC.setdefault(V[i],[]).append(newV[i]) - - - old_faces.append(f) - - # Preparing the Edges - for i in enumV: - if Esel[i]: - verts = [newV[i],newV[(i+1) % nV]] - if V[i].index > V[(i+1) % nV].index : verts.reverse() - NE.setdefault(E[i],[]).append(verts) - - # Create the faces - for f in NF: me.addFace(f) - -def make_edges(): - """ Make the faces corresponding to selected edges """ - - for old,new in NE.iteritems() : - if len(new) == 1 : # This edge was on a border - oldv = [old.v1, old.v2] - if old.v1.index < old.v2.index : oldv.reverse() - - make_sel_face(oldv+new[0]) - - me.findEdge(*oldv).flag |= E_selected - me.findEdge(*new[0]).flag |= E_selected - - #PY23 NO SETS# for v in oldv : NV_ext.add(v) - for v in oldv : NV_ext[v]= None - - else: - make_sel_face(new[0] + new[1][::-1]) - - me.findEdge(*new[0]).flag |= E_selected - me.findEdge(*new[1]).flag |= E_selected - -def make_corners(): - """ Make the faces corresponding to corners """ - - for v in NV.iterkeys(): - V = NV[v].values() - nV = len(V) - - if nV == 1: pass - - elif nV == 2 : - #PY23 NO SETS# if v in NV_ext: - if v in NV_ext.iterkeys(): - make_sel_face(V+[v]) - me.findEdge(*V).flag |= E_selected - - else: - #PY23 NO SETS# if nV == 3 and v not in NV_ext : make_sel_face(V) - if nV == 3 and v not in NV_ext.iterkeys() : make_sel_face(V) - - - else : - - # We need to know which are the edges around the corner. - # First, we look for the quads surrounding the corner. - eed = [] - for old, new in NE.iteritems(): - if v in (old.v1,old.v2) : - if v.index == min(old.v1.index,old.v2.index) : ind = 0 - else : ind = 1 - - if len(new) == 1: eed.append([v,new[0][ind]]) - else : eed.append([new[0][ind],new[1][ind]]) - - # We will add the edges coming from faces where only one vertice is selected. - # They are stored in NC. - if v in NC: eed = eed+NC[v] - - # Now we have to sort these vertices - hc = {} - for (a,b) in eed : - hc.setdefault(a,[]).append(b) - hc.setdefault(b,[]).append(a) - - for x0,edges in hc.iteritems(): - if len(edges) == 1 : break - - b = [x0] # b will contain the sorted list of vertices - - for i in xrange(len(hc)-1): - for x in hc[x0] : - if x not in b : break - b.append(x) - x0 = x - - b.append(b[0]) - - # Now we can create the faces - if len(b) == 5: make_sel_face(b[:4]) - - else: - New_V = Vector(0.0, 0.0,0.0) - New_d = [0.0, 0.0,0.0] - - for x in hc.iterkeys(): New_V += x.co - for dir in NV[v] : - for i in xrange(3): New_d[i] += dir[i] - - New_V *= 1./len(hc) - for i in xrange(3) : New_d[i] /= nV - - center = make_sel_vert(New_V.x,New_V.y,New_V.z) - add_to_NV(v,tuple(New_d),center) - - for k in xrange(len(b)-1): make_sel_face([center, b[k], b[k+1]]) - - if 2 < nV and v in NC : - for edge in NC[v] : me.findEdge(*edge).flag |= E_selected - -def clear_old(): - """ Erase old faces and vertices """ - - for f in old_faces: me.removeFace(f) - - for v in NV.iterkeys(): - #PY23 NO SETS# if v not in NV_ext : me.verts.remove(v) - if v not in NV_ext.iterkeys() : me.verts.remove(v) - - for e in me.edges: - if e.flag & E_selected : - e.v1.sel = 1 - e.v2.sel = 1 - - -###################################################################### -# Interface - -global dist - -dist = Create(0.2) -left = Create(0.0) -right = Create(1.0) -num = Create(2) - -# Events -EVENT_NOEVENT = 1 -EVENT_BEVEL = 2 -EVENT_UPDATE = 3 -EVENT_RECURS = 4 -EVENT_EXIT = 5 - -def draw(): - global dist, left, right, num, old_dist - global EVENT_NOEVENT, EVENT_BEVEL, EVENT_UPDATE, EVENT_RECURS, EVENT_EXIT - - glClear(GL_COLOR_BUFFER_BIT) - Button("Bevel",EVENT_BEVEL,10,100,280,25) - - BeginAlign() - left=Number('', EVENT_NOEVENT,10,70,45, 20,left.val,0,right.val,'Set the minimum of the slider') - dist=Slider("Thickness ",EVENT_UPDATE,60,70,180,20,dist.val,left.val,right.val,0, \ - "Thickness of the bevel, can be changed even after bevelling") - right = Number("",EVENT_NOEVENT,245,70,45,20,right.val,left.val,200,"Set the maximum of the slider") - - EndAlign() - glRasterPos2d(8,40) - Text('To finish, you can use recursive bevel to smooth it') - - - if old_dist != None: - num=Number('', EVENT_NOEVENT,10,10,40, 16,num.val,1,100,'Recursion level') - Button("Recursive",EVENT_RECURS,55,10,100,16) - - Button("Exit",EVENT_EXIT,210,10,80,20) - -def event(evt, val): - if ((evt == QKEY or evt == ESCKEY) and not val): Exit() - -def bevent(evt): - if evt == EVENT_EXIT : Exit() - elif evt == EVENT_BEVEL : bevel() - elif evt == EVENT_UPDATE : - try: bevel_update() - except NameError : pass - elif evt == EVENT_RECURS : recursive() - -Register(draw, event, bevent) - -###################################################################### -def bevel(): - """ The main function, which creates the bevel """ - global me,NV,NV_ext,NE,NC, old_faces,old_dist - - ob = act_mesh_ob() - if not ob: return - - Window.WaitCursor(1) # Change the Cursor - t= Blender.sys.time() - is_editmode = Window.EditMode() - if is_editmode: Window.EditMode(0) - - me = ob.data - - NV = {} - #PY23 NO SETS# NV_ext = set() - NV_ext= {} - NE = {} - NC = {} - old_faces = [] - - make_faces() - make_edges() - make_corners() - clear_old() - - old_dist = dist.val - print '\tbevel in %.6f sec' % (Blender.sys.time()-t) - me.update(1) - if is_editmode: Window.EditMode(1) - Window.WaitCursor(0) - Blender.Redraw() - -def bevel_update(): - """ Use NV to update the bevel """ - global dist, old_dist - - if old_dist == None: - # PupMenu('Error%t|Must bevel first.') - return - - is_editmode = Window.EditMode() - if is_editmode: Window.EditMode(0) - - fac = dist.val - old_dist - old_dist = dist.val - - for old_v in NV.iterkeys(): - for dir in NV[old_v].iterkeys(): - for i in xrange(3): - NV[old_v][dir].co[i] += fac*dir[i] - - me.update(1) - if is_editmode: Window.EditMode(1) - Blender.Redraw() - -def recursive(): - """ Make a recursive bevel... still experimental """ - global dist - from math import pi, sin - - if num.val > 1: - a = pi/4 - ang = [] - for k in xrange(num.val): - ang.append(a) - a = (pi+2*a)/4 - - l = [2*(1-sin(x))/sin(2*x) for x in ang] - R = dist.val/sum(l) - l = [x*R for x in l] - - dist.val = l[0] - bevel_update() - - for x in l[1:]: - dist.val = x - bevel() - diff --git a/release/scripts/blenderLipSynchro.py b/release/scripts/blenderLipSynchro.py deleted file mode 100644 index c4815811512..00000000000 --- a/release/scripts/blenderLipSynchro.py +++ /dev/null @@ -1,729 +0,0 @@ -#!BPY -# coding: utf-8 -""" -Name: 'BlenderLipSynchro' -Blender: 242 -Group: 'Animation' -Tooltip: 'Import phonemes from Papagayo or JLipSync for lip synchronization' -""" - -__author__ = "Dienben: Benoit Foucque dienben_mail@yahoo.fr" -__url__ = ["blenderLipSynchro Blog, http://blenderlipsynchro.blogspot.com/", -"Papagayo (Python), http://www.lostmarble.com/papagayo/index.shtml", -"JLipSync (Java), http://jlipsync.sourceforge.net/"] -__version__ = "2.0" -__bpydoc__ = """\ -Description: - -This script imports Voice Export made by Papagayo or JLipSync and maps the export with your shapes. - -Usage: - -Import a Papagayo or JLipSync voice export file and link it with your shapes. - -Note:
-- Naturally, you need files exported from one of the supported lip synching -programs. Check their sites to learn more and download them. - -""" - -# -------------------------------------------------------------------------- -# BlenderLipSynchro -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - - -#il y a 3 etapes -#la deuxieme on charge le dictionnaire de correspondance -#la troisieme on fait le choix des correpondance -#la quatrieme on construit les cles a partir du fichiers frame - -#there are 3 stages -#the second one load the mapping dictionnary -#the tird make the mapping -#the fourth make the key in the IPO Curve - -#voici mes differents imports -#the imports -import os -import Blender - -from Blender import Ipo -from Blender.Draw import * -from Blender.BGL import * -from Blender.sys import basename - - - -#ici commencent mes fonctions -#here begin my functions -#cette fonction trace l'interface graphique -#this functions draw the User interface -def trace(): - #voici mes variables pouvant etre modifie - #my variables - global nbr_phoneme, mon_fichier_dico - global let01, let02, let03, let04,let05, let06, let07, let08, let09, let10 - global let11, let12, let13, let14,let15, let16, let17, let18, let19, let20 - global let21, let22, let23, let24 - - global let01selectkey,let02selectkey,let03selectkey,let04selectkey,let05selectkey - global let06selectkey,let07selectkey,let08selectkey,let09selectkey,let10selectkey,let11selectkey - global let12selectkey,let13selectkey,let14selectkey,let15selectkey,let16selectkey,let17selectkey - global let18selectkey,let19selectkey,let20selectkey,let21selectkey,let22selectkey,let23selectkey - global let24selectkey - - glClearColor(0.4,0.5,0.6 ,0.0) - glClear(GL_COLOR_BUFFER_BIT) - - glColor3d(1,1,1) - glRasterPos2i(87, 375) - Text("Blendersynchro V 2.0") - glColor3d(1,1,1) - glRasterPos2i(84, 360) - Text("Programming: Dienben") - - glColor3d(0,0,0) - glRasterPos2i(13, 342) - Text("Lip Synchronization Tool") - glColor3d(0,0,0) - glRasterPos2i(13, 326) - Text("Thanks to Chris Clawson and Liubomir Kovatchev") - - glColor3d(1,1,1) - glRasterPos2i(5, 320) - Text("_______________________________________________________") - glColor3d(0,0,0) - glRasterPos2i(6, 318) - Text("_______________________________________________________") - - - if (etape==1): - #cette etape permet de choisi la correspondance entre les phonemes et les cles - #this stage offer the possibility to choose the mapping between phonems and shapes - - glColor3d(1,1,1) - glRasterPos2i(140, 300) - Text("Objet: "+Blender.Object.GetSelected()[0].getName() ) - - glColor3d(1,1,1) - glRasterPos2i(5, 215) - Text("Assign phonems to shapes:") - - #on mesure la taille de la liste de phonemes - #this is the lenght of the phonem list - nbr_phoneme=len(liste_phoneme) - - #on dessine les listes de choix - #we draw the choice list - - # - if (nbr_phoneme > 0): - let01 = String(" ", 4, 5, 185, 30, 16, liste_phoneme[0], 3) - glColor3d(0,0,0) - glRasterPos2i(40, 188) - Text("=") - let01selectkey = Menu(key_menu, 50, 50, 185, 70, 16, let01selectkey.val) - - # - if (nbr_phoneme > 1): - let02 = String(" ", 4, 150, 185, 30, 16, liste_phoneme[1], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 188) - Text("=") - let02selectkey = Menu(key_menu, 51, 195, 185, 70, 16, let02selectkey.val) - - # - if (nbr_phoneme > 2): - let03 = String(" ", 4, 5, 165, 30, 16, liste_phoneme[2], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 168) - Text("=") - let03selectkey = Menu(key_menu, 52, 50, 165, 70, 16, let03selectkey.val) - - # - if (nbr_phoneme > 3): - let04 = String(" ", 4, 150, 165, 30, 16, liste_phoneme[3], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 168) - Text("=") - let04selectkey = Menu(key_menu, 53, 195, 165, 70, 16, let04selectkey.val) - - # - if (nbr_phoneme > 4): - let05 = String(" ", 4, 5, 145, 30, 16, liste_phoneme[4], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 148) - Text("=") - let05selectkey = Menu(key_menu, 54, 50, 145, 70, 16, let05selectkey.val) - - # - if (nbr_phoneme > 5): - let06 = String(" ", 4, 150, 145, 30, 16, liste_phoneme[5], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 148) - Text("=") - let06selectkey = Menu(key_menu, 55, 195, 145, 70, 16, let06selectkey.val) - - # - if (nbr_phoneme > 6): - let07 = String(" ", 4, 5, 125, 30, 16, liste_phoneme[6], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 128) - Text("=") - let07selectkey = Menu(key_menu, 56, 50, 125, 70, 16, let07selectkey.val) - - # - if (nbr_phoneme > 7): - let08 = String(" ", 4, 150, 125, 30, 16, liste_phoneme[7], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 128) - Text("=") - let08selectkey = Menu(key_menu, 57, 195, 125, 70, 16,let08selectkey.val) - - # - if (nbr_phoneme > 8): - let09 = String(" ", 4, 5, 105, 30, 16, liste_phoneme[8], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 108) - Text("=") - let09selectkey = Menu(key_menu, 58, 50, 105, 70, 16,let09selectkey.val) - - # - if (nbr_phoneme > 9): - let10 = String(" ", 4, 150, 105, 30, 16, liste_phoneme[9], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 108) - Text("=") - let10selectkey = Menu(key_menu, 59, 195, 105, 70, 16, let10selectkey.val) - - # - if (nbr_phoneme > 10): - let11 = String(" ", 4, 5, 85, 30, 16, liste_phoneme[10], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 88) - Text("=") - let11selectkey = Menu(key_menu, 60, 50, 85, 70, 16, let11selectkey.val) - - # - if (nbr_phoneme > 11): - let12 = String(" ", 4, 150, 85, 30, 16, liste_phoneme[11], 2) - glColor3d(0,0,0) - Text("=") - let12selectkey = Menu(key_menu, 61, 195, 85, 70, 16, let12selectkey.val) - - # - if (nbr_phoneme > 12): - let13 = String(" ", 4, 5, 65, 30, 16, liste_phoneme[12], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 68) - Text("=") - let13selectkey = Menu(key_menu, 62, 50, 65, 70, 16, let13selectkey.val) - - # - if (nbr_phoneme > 13): - let14 = String(" ", 4, 150, 65, 30, 16, liste_phoneme[13], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 68) - Text("=") - let14selectkey = Menu(key_menu, 63, 195, 65, 70, 16, let14selectkey.val) - - # - if (nbr_phoneme > 14): - let15 = String(" ", 4, 5, 45, 30, 16, liste_phoneme[14], 2) - glColor3d(0,0,0) - glRasterPos2i(40, 48) - Text("=") - let15selectkey = Menu(key_menu, 64, 50, 45, 70, 16, let15selectkey.val) - - # - if (nbr_phoneme > 15): - let16 = String(" ", 4, 150, 45, 30, 16, liste_phoneme[15], 2) - glColor3d(0,0,0) - glRasterPos2i(185, 48) - Text("=") - let16selectkey = Menu(key_menu, 65, 195, 45, 70, 16, let16selectkey.val) - - # - if (nbr_phoneme > 16): - let17 = String(" ", 4, 295, 185, 30, 16, liste_phoneme[16], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 188) - Text("=") - let17selectkey = Menu(key_menu, 66, 340, 185, 70, 16, let17selectkey.val) - - # - if (nbr_phoneme > 17): - let18 = String(" ", 4, 440, 185, 70, 16, liste_phoneme[17], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 188) - Text("=") - let18selectkey = Menu(key_menu, 67, 525, 185, 70, 16, let18selectkey.val) - - # - if (nbr_phoneme > 18): - let19 = String(" ", 4, 295, 165, 30, 16, liste_phoneme[18], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 168) - Text("=") - let19selectkey = Menu(key_menu, 68, 340, 165, 70, 16, let19selectkey.val) - - # - if (nbr_phoneme > 19): - let20 = String(" ", 4, 440, 165, 70, 16, liste_phoneme[19], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 168) - Text("=") - let20selectkey = Menu(key_menu, 69, 525, 165, 70, 16, let20selectkey.val) - - # - if (nbr_phoneme > 20): - let21 = String(" ", 4, 295, 145, 30, 16, liste_phoneme[20], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 148) - Text("=") - let21selectkey = Menu(key_menu, 70, 340, 145, 70, 16, let21selectkey.val) - - # - if (nbr_phoneme > 21): - let22 = String(" ", 4, 440, 145, 70, 16, liste_phoneme[21], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 148) - Text("=") - let22selectkey = Menu(key_menu, 71, 525, 145, 70, 16, let22selectkey.val) - - # - if (nbr_phoneme > 22): - let23 = String(" ", 4, 295, 125, 30, 16, liste_phoneme[22], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 128) - Text("=") - let23selectkey = Menu(key_menu, 72, 340, 125, 70, 16,let23selectkey.val) - - # - if (nbr_phoneme > 23): - let24 = String(" ", 4, 440, 125, 70, 16, liste_phoneme[23], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 128) - Text("=") - let24selectkey = Menu(key_menu, 73, 525, 125, 70, 16, let24selectkey.val) - - # - if (nbr_phoneme > 24): - let25 = String(" ", 4, 295, 105, 30, 16, liste_phoneme[24], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 108) - Text("=") - let25selectkey = Menu(key_menu, 74, 340, 105, 70, 16, let25selectkey.val) - - # - if (nbr_phoneme > 25): - let26 = String(" ", 4, 440, 105, 70, 16, liste_phoneme[25], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 108) - Text("=") - let26selectkey = Menu(key_menu, 75, 525, 105, 70, 16,let26selectkey.val) - - # - if (nbr_phoneme > 26): - let27 = String(" ", 4, 295, 85, 30, 16, liste_phoneme[26], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 88) - Text("=") - let27selectkey = Menu(key_menu, 76, 340, 85, 70, 16, let27selectkey.val) - - # - if (nbr_phoneme > 27): - let28 = String(" ", 4, 440, 85, 70, 16, liste_phoneme[27], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 88) - Text("=") - let28selectkey = Menu(key_menu, 77, 525, 85, 70, 16,let28selectkey.val) - - # - if (nbr_phoneme > 28): - let29 = String(" ", 4, 295, 65, 30, 16, liste_phoneme[28], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 68) - Text("=") - let29selectkey = Menu(key_menu, 78, 340, 65, 70, 16, let29selectkey.val) - - # - if (nbr_phoneme > 29): - let30 = String(" ", 4, 440, 65, 70, 16, liste_phoneme[29], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 68) - Text("=") - let30selectkey = Menu(key_menu, 79, 525, 65, 70, 16, let30selectkey.val) - - # - if (nbr_phoneme > 30): - let31 = String(" ", 4, 295, 45, 30, 16, liste_phoneme[30], 2) - glColor3d(0,0,0) - glRasterPos2i(330, 48) - Text("=") - let31selectkey = Menu(key_menu, 80, 340, 45, 70, 16, let31selectkey.val) - - # - if (nbr_phoneme > 31): - let32 = String(" ", 4, 440, 45, 70, 16, liste_phoneme[31], 8) - glColor3d(0,0,0) - glRasterPos2i(515, 48) - Text("=") - let32selectkey = Menu(key_menu, 81, 525, 45, 70, 16, let32selectkey.val) - - Button("Go", 3, 155, 5, 145, 22) - - if (etape==2): - glColor3d(1,1,1) - glRasterPos2i(125, 200) - Text("Operation Completed") - - if (etape==0): - glColor3d(1,1,1) - glRasterPos2i(125, 200) - Text("Please select a Mesh'Object and Create all the IPO Curves for your Shapes") - - if (etape==3): - #this stage permits to load a custom dictionnary - load_file_text = "Load File" - if mon_fichier_dico: - Button("Import Loaded File", 2, 5, 5, 145, 22) - glColor3d(1,1,1) - glRasterPos2i(6, 50) - Text("loaded file: %s" % basename(mon_fichier_dico)) - load_file_text = "Choose Another File" - Button(load_file_text, 8, 125, 180, 145, 22) - - glRasterPos2i(6, 40) - Text("_______________________________________________________") - glColor3d(0,0,0) - glRasterPos2i(6, 38) - Text("_______________________________________________________") - - Button("Exit", 1, 305, 5, 80, 22) - - - -#cette fonction sur evenement quite en cas d'ESC -#this functions catch the ESC event and quit -def event(evt,val): - if (evt == ESCKEY and not val): Exit() - -#cette fonction gere les evenements -#the event functions -def bevent(evt): - global etape,soft_type,liste_phoneme,dico_phoneme_export - - if (evt == 1): - Exit() - - elif (evt == 2): - #c'est l'import du dictionnaire - #we create and import the dictionnary - lecture_chaine(mon_fichier_dico,dico_phoneme_export) - construction_dictionnaire_phoneme() - #we change the stage - etape=1 - - elif (evt == 3): - #c'est l'import - #we import - lecture_chaine(mon_fichier_export,dico_phoneme_export) - construction_dico_correspondance() - construction_lipsynchro() - #on change d'etape - #we change the stage - etape=2 - - elif (evt == 8): - #we choose the file - Blender.Window.FileSelector(selectionner_fichier,"Select File") - - Blender.Redraw() - -#cette fonction recupere le nom et le chemin du fichier dictionnaire -#we catch the name and the path of the dictionnary -def selectionner_fichier(filename): - global mon_fichier_dico,mon_fichier_export - mon_fichier_dico=filename - mon_fichier_export=filename - -#fonction de lecture de la liste frame phoneme -#we read the frame and phonems -def lecture_chaine(fichier,liste): - mon_fichier=open(fichier) - #je lis la premiere ligne qui contiens la version de moho - #first, we read the moho version - mon_fichier.readline() - - #je lis jusqu'a la fin - #then we read until the end of the file - while 1: - ma_ligne=mon_fichier.readline() - if ma_ligne=='': - break - decoup=ma_ligne.split() - liste[decoup[0]]=decoup[1] - print liste - - - - -#fonction qui construit la liste dictionnaire simple -#we make the dictionnary -def construction_dictionnaire_phoneme(): - global liste_phoneme - index_liste=0 - #je transforme mon dictionnaire en list de tulpes - #we transform the list in tulpes - ma_liste=dico_phoneme_export.items() - #je parcours ma liste a la recherche d'elements non existant - #we read the list to find non existing elements - print dico_phoneme - for index in range(len(ma_liste)): - if ma_liste[index][1] not in liste_phoneme: - liste_phoneme[index_liste:index_liste]=[ma_liste[index][1]] - index_liste=index_liste+1 - print liste_phoneme - - -#cette fonction recupere les courbes cible -#this functon catch the IPO curve -def recuperation_courbe(): - global key_menu,dico_key - - #on recupere le nom des shapes - #we catch the shapes - key=Blender.Object.GetSelected()[0].getData().getKey().getBlocks() - for n in range(len(key)): - #on vire la première cle (en effet basic n'est pas une cle en tant que telle) - #we threw away the basic shapes - if (n>0): - key_menu=key_menu+key[n].name + " %x" + str(n-1) + "|" - dico_key[str(n-1)]=Blender.Object.GetSelected()[0].getData().getKey().getIpo().getCurves()[n-1] - - - print "dico_key" - print dico_key - print 'end dico_key' - -#cette fonction construit un dictionnaire de correspondance entre les phonemes prononces et les cles a utiliser -#we make the dictionnary for the mapping between shapes and phonems -def construction_dico_correspondance(): - global dico_correspondance - #je parcours les phonemes - #we read the phonems - if (nbr_phoneme>0): - dico_correspondance[liste_phoneme[0]]=dico_key[str(let01selectkey.val)] - if (nbr_phoneme>1): - dico_correspondance[liste_phoneme[1]]=dico_key[str(let02selectkey.val)] - if (nbr_phoneme>2): - dico_correspondance[liste_phoneme[2]]=dico_key[str(let03selectkey.val)] - if (nbr_phoneme>3): - dico_correspondance[liste_phoneme[3]]=dico_key[str(let04selectkey.val)] - if (nbr_phoneme>4): - dico_correspondance[liste_phoneme[4]]=dico_key[str(let05selectkey.val)] - if (nbr_phoneme>5): - dico_correspondance[liste_phoneme[5]]=dico_key[str(let06selectkey.val)] - if (nbr_phoneme>6): - dico_correspondance[liste_phoneme[6]]=dico_key[str(let07selectkey.val)] - if (nbr_phoneme>7): - dico_correspondance[liste_phoneme[7]]=dico_key[str(let08selectkey.val)] - if (nbr_phoneme>8): - dico_correspondance[liste_phoneme[8]]=dico_key[str(let09selectkey.val)] - if (nbr_phoneme>9): - dico_correspondance[liste_phoneme[9]]=dico_key[str(let10selectkey.val)] - if (nbr_phoneme>10): - dico_correspondance[liste_phoneme[10]]=dico_key[str(let11selectkey.val)] - if (nbr_phoneme>11): - dico_correspondance[liste_phoneme[11]]=dico_key[str(let12selectkey.val)] - if (nbr_phoneme>12): - dico_correspondance[liste_phoneme[12]]=dico_key[str(let13selectkey.val)] - if (nbr_phoneme>13): - dico_correspondance[liste_phoneme[13]]=dico_key[str(let14selectkey.val)] - if (nbr_phoneme>14): - dico_correspondance[liste_phoneme[14]]=dico_key[str(let15selectkey.val)] - if (nbr_phoneme>15): - dico_correspondance[liste_phoneme[15]]=dico_key[str(let16selectkey.val)] - if (nbr_phoneme>16): - dico_correspondance[liste_phoneme[16]]=dico_key[str(let17selectkey.val)] - if (nbr_phoneme>17): - dico_correspondance[liste_phoneme[17]]=dico_key[str(let18selectkey.val)] - if (nbr_phoneme>18): - dico_correspondance[liste_phoneme[18]]=dico_key[str(let19selectkey.val)] - if (nbr_phoneme>19): - dico_correspondance[liste_phoneme[19]]=dico_key[str(let20selectkey.val)] - if (nbr_phoneme>20): - dico_correspondance[liste_phoneme[20]]=dico_key[str(let21selectkey.val)] - if (nbr_phoneme>21): - dico_correspondance[liste_phoneme[21]]=dico_key[str(let22selectkey.val)] - if (nbr_phoneme>22): - dico_correspondance[liste_phoneme[22]]=dico_key[str(let23selectkey.val)] - if (nbr_phoneme>23): - dico_correspondance[liste_phoneme[23]]=dico_key[str(let24selectkey.val)] - if (nbr_phoneme>24): - dico_correspondance[liste_phoneme[24]]=dico_key[str(let25selectkey.val)] - if (nbr_phoneme>25): - dico_correspondance[liste_phoneme[25]]=dico_key[str(let26selectkey.val)] - if (nbr_phoneme>26): - dico_correspondance[liste_phoneme[26]]=dico_key[str(let27selectkey.val)] - if (nbr_phoneme>27): - dico_correspondance[liste_phoneme[27]]=dico_key[str(let28selectkey.val)] - if (nbr_phoneme>28): - dico_correspondance[liste_phoneme[28]]=dico_key[str(let29selectkey.val)] - if (nbr_phoneme>29): - dico_correspondance[liste_phoneme[29]]=dico_key[str(let30selectkey.val)] - if (nbr_phoneme>30): - dico_correspondance[liste_phoneme[30]]=dico_key[str(let31selectkey.val)] - if (nbr_phoneme>31): - dico_correspondance[liste_phoneme[31]]=dico_key[str(let32selectkey.val)] - - print dico_correspondance - - -#cette fonction ajoute un points a la cle donnee a la frame donnee -#we add a point to the IPO curve Target -def ajoute_point(cle,frame,valeur): - cle.setInterpolation('Linear') - cle.append((frame,valeur)) - cle.Recalc() - -#cette fonction parcours le dictionnaire des frame à ajouter et construit les points -#we add all the point to the IPO Curve -def construction_lipsynchro(): - print "je construit" - doublet_old="" - #construction de la liste des frame - cpt=0 - liste_frame=[] - for frame in dico_phoneme_export: - liste_frame.append(int(frame)) - cpt=cpt+1 - liste_frame.sort() - print "listeframe" - print liste_frame - print "fini" - - for doublet in liste_frame: - ajoute_point(dico_correspondance[dico_phoneme_export[str(doublet)]],doublet,1) - if (doublet_old==""): - ajoute_point(dico_correspondance[dico_phoneme_export[str(doublet)]],(doublet-2),0) - if (doublet_old!=''): - if (dico_correspondance[dico_phoneme_export[str(doublet)]]!=dico_correspondance[dico_phoneme_export[doublet_old]]): - print "doublet:"+str(doublet) - print "doublet old:"+doublet_old - ajoute_point(dico_correspondance[dico_phoneme_export[doublet_old]],(int(doublet_old)+2),0) - ajoute_point(dico_correspondance[dico_phoneme_export[str(doublet)]],(doublet-2),0) - doublet_old=str(doublet) - - -#end of my functions we begin the execution -#je commence l execution----------------------------------------------------------------------------------------------- -#voici mes variables - -#declaration et instanciation -#decleration and instanciation - - -#voici mon objet de travail -objet_travail=Create(0) - -#my soft type -soft_type=1 - -#voici la liste des phoneme effectivement utilise -#the phonems'list -#liste_phoneme_papagayo=['AI','E','O','U','FV','L','WQ','MBP','etc','rest'] -#liste_phoneme_jlipsinch=['A','B','C','Closed','D','E','F','G','I','K','L','M','N','O','P','Q','R','S','SH','T','TH','U','V','W'] - -liste_phoneme=[] -#voici mon dictionnaire des frames o -dico_phoneme_export = Create(0) -dico_phoneme_export={} -dico_phoneme={} - - -#voici mes cle -key_menu="" -dico_key={} - -#voici mes ipo -dico_bloc={} -iponame = Create(0) - -#voici mon dictionnaire de correspondance -dico_correspondance={} - -try: - #on verifie est bien une mesh et qu'il a des courbes - if ((Blender.Object.GetSelected()[0].getType()=='Mesh')): - #on verifie que l'objet a bien toute ses Courbes - if (len(Blender.Object.GetSelected()[0].getData().getKey().getBlocks())-1==Blender.Object.GetSelected()[0].getData().getKey().getIpo().getNcurves()): - etape=3 - #on lance la creation du dictionnaire - recuperation_courbe() - else: - print "not the good number of IPO Curve" - etape = 0 - else: - print "error: bad object Type:" - print Blender.Object.GetSelected()[0].getType() - etape = 0 -except: - print 'error: exception' - etape = 0 - - -#voici le fichier dictionnaire -mon_fichier_dico="" - -#voici le fichier export pamela -mon_fichier_export="" - - -let01selectkey = Create(0) -let02selectkey = Create(0) -let03selectkey = Create(0) -let04selectkey = Create(0) -let05selectkey = Create(0) -let06selectkey = Create(0) -let07selectkey = Create(0) -let08selectkey = Create(0) -let09selectkey = Create(0) -let10selectkey = Create(0) -let11selectkey = Create(0) -let12selectkey = Create(0) -let13selectkey = Create(0) -let14selectkey = Create(0) -let15selectkey = Create(0) -let16selectkey = Create(0) -let17selectkey = Create(0) -let18selectkey = Create(0) -let19selectkey = Create(0) -let20selectkey = Create(0) -let21selectkey = Create(0) -let22selectkey = Create(0) -let23selectkey = Create(0) -let24selectkey = Create(0) - - -Register (trace,event,bevent) diff --git a/release/scripts/bpydata/KUlang.txt b/release/scripts/bpydata/KUlang.txt deleted file mode 100644 index 38605d69c9f..00000000000 --- a/release/scripts/bpydata/KUlang.txt +++ /dev/null @@ -1,121 +0,0 @@ -Version 3.233-2004 -****************** -Espanol -Sale del programa -Utilidades de...%t|Alinea objetos%x1|Creacion%x2|Edita mallas%x3|Edita objetos%x4 -11 -Mov -Esc -Encaja -Abarca -Separa -Alinea -Rota -Incr. -Crea nuevos objetos -Es+ -Es* -Separar entre:%t|Origenes%x1|Centros geometricos%x2|Minimos%x3|Maximos%x4|Baricentro%x5|Objetos%x6 -Crear%t|Arco (3 ptos.)%x1|Arco (interactivo)%x2|Circunferencia (3 ptos.)%x3 -12 -Puntos -Centro -Orden -Objeto -AngIni: -AngFin: -Angulo: -Radio: -Puntos: -Centro -Nombre: -Puntos -Modifica vertices%t|Subdivide%x1|Envia a un plano%x2|Aplica LocRotSize%x3 -Partes -Proyectar en el plano:%t|Coordenado global...%x1|Coordenado local...%x2 -Actuar sobre el plano%t|Yz%x1|Zx%x2|Xy%x3 -En la dirección%t|X%x1|Y%x2|Z%x3|Ortogonal al plano%x4 -Captura -Buffer%t|Copia vector diferencia%x1|Copia distancia%x2|Copia diferencia de rotacion%x3|Copia media LocRotSiz%x4|Ver buffer en consola%x5 -Transformar LocRotSize%t|Hacia el obj. activo%x1|Aleatoriamente%x2 -Poner a distancia fija%x1|Sumar (desp. absoluto)%x2|Multiplicar (desp. relativo)%x3 -******************** -English -Exit program -Utils about:%t|Align Objects%x1|Create%x2|Edit Meshes%x3|Edit Objects%x4 -11 -Mov -Sca -Fit -Embrace -Separate -Align -Rota -Incr. -Create new objects -Sc+ -Sc* -Separate between:%t|Origins%x1|Geometric centers%x2|Minimum%x3|Maximum%x4|Baricenter%x5|Objects%x6 -Create what%t|Arc (3 pts.)%x1|Arc (interactive)%x2|Circunference (3 pts.)%x3 -12 -Points -Centre -Sort -Object -AngIni: -AngEnd: -Angle: -Radius: -Points: -Centre -ObjName: -Points -Modify vertices%t|Subdivide edges%x1|Send to a plane%x2|Set LocRotSize%x3 -Parts -Project onto the plane:%t|Global coordinated...%x1|Local coordinated...%x2 -Act on plane%t|Yz%x1|Zx%x2|Xy%x3 -In direction%t|X%x1|Y%x2|Z%x3|Ortogonal to plane%x4 -Get -Buffer%t|Copy diference vector%x1|Copy distance%x2|Copy rot diference%x3|Copy LocRotSiz average%x4|Show Buffer in Console%x5 -Transform LocRotSize%t|Close to active%x1|Randomly%x2 -Set at fixed distance%x1|Add (absolute displ.)%x2|Multiply (relative displ.)%x3 -******************** -Catala -Surt del programa -Utilitats de...%t|Alinea objectes%x1|Creacio%x2|Edita malles%x3|Edita objetes%x4 -11 -Mov -Esc -Encaixa -Abarca -Separa -Alinea -Rotacio -Incr. -Crea objectes nous -Es+ -Es* -Separa entra:%t|Origens%x1|Centres geometrics%x2|Minims%x3|Maxims%x4|Baricentre%x5|Objectes%x6 -Crear%t|Arc (3 pts.)%x1|Arc (interactiu)%x2|Circumferencia (3 pts.)%x3 -12 -Punts -Centre -Ordre -Objecte -AngIni: -AngFi: -Angle: -Radi: -Punts: -Centre -Nom: -Punts -Modifica vertex%t|Subdivideix%x1|Envia a un pla%x2|Aplica LocRotSize%x3 -Parts -Projectar en el pla:%t|Coordenacio global...%x1|Coordenacio local...%x2 -Actuar sobre el pla%t|Yz%x1|Zx%x2|Xy%x3 -En la direccio%t|X%x1|Y%x2|Z%x3|Ortogonal al pla%x4 -Captura -Buffer%t|Copia vector diferencia%x1|Copia distancia%x2|Copia diferencia de rotacio%x3|Copia mitjana LocRotSiz%x4|Veure buffer en consola%x5 -Transformar LocRotSize%t|Cap al obj. actiu%x1|Aleatoriamente%x2 -Posar a distancia fixa%x1|Sumar (desp. absolut)%x2|Multiplicar (desp. relatiu)%x3 diff --git a/release/scripts/bpydata/config/readme.txt b/release/scripts/bpydata/config/readme.txt deleted file mode 100644 index 4b5cb61b063..00000000000 --- a/release/scripts/bpydata/config/readme.txt +++ /dev/null @@ -1,6 +0,0 @@ -This folder is for automatically saved scripts configuration data. - -To use this feature scripts just need to set a proper Blender.Registry key. - -To know more, check the API Reference doc (specifically the API_related and -Registry parts) and the documentation for the "Scripts Config Editor" script. diff --git a/release/scripts/bpydata/readme.txt b/release/scripts/bpydata/readme.txt deleted file mode 100644 index 3e640e27c4b..00000000000 --- a/release/scripts/bpydata/readme.txt +++ /dev/null @@ -1,9 +0,0 @@ -This directory is the default place for scripts to put their data, -like internal files needed by the script and its saved configuration. - -Scripts can find the path to this dir using Blender.Get("datadir"). -Ex: - -import Blender -print Blender.Get("datadir") - diff --git a/release/scripts/bpymodules/BPyAddMesh.py b/release/scripts/bpymodules/BPyAddMesh.py deleted file mode 100644 index 901e68866cc..00000000000 --- a/release/scripts/bpymodules/BPyAddMesh.py +++ /dev/null @@ -1,159 +0,0 @@ -import Blender -from Blender.Window import EditMode, GetCursorPos, GetViewQuat -import bpy -import BPyMessages - -def add_mesh_simple(name, verts, edges, faces): - ''' - Adds a mesh from verts, edges and faces - - name - new object/mesh name - verts - list of 3d vectors - edges - list of int pairs - faces - list of int triplets/quads - ''' - - scn = bpy.data.scenes.active - if scn.lib: return - ob_act = scn.objects.active - - is_editmode = EditMode() - - cursor = GetCursorPos() - quat = None - if is_editmode or Blender.Get('add_view_align'): # Aligning seems odd for editmode, but blender does it, oh well - try: quat = Blender.Mathutils.Quaternion(GetViewQuat()) - except: pass - - # Exist editmode for non mesh types - if ob_act and ob_act.type != 'Mesh' and is_editmode: - EditMode(0) - - # We are in mesh editmode - if EditMode(): - me = ob_act.getData(mesh=1) - - if me.multires: - BPyMessages.Error_NoMeshMultiresEdit() - return - - # Add to existing mesh - # must exit editmode to modify mesh - EditMode(0) - - me.sel = False - - vert_offset = len(me.verts) - edge_offset = len(me.edges) - face_offset = len(me.faces) - - # transform the verts - txmat = Blender.Mathutils.TranslationMatrix(Blender.Mathutils.Vector(cursor)) - if quat: - mat = quat.toMatrix() - mat.invert() - mat.resize4x4() - txmat = mat * txmat - - txmat = txmat * ob_act.matrixWorld.copy().invert() - - - me.verts.extend(verts) - # Transform the verts by the cursor and view rotation - me.transform(txmat, selected_only=True) - - if vert_offset: - me.edges.extend([[i+vert_offset for i in e] for e in edges]) - me.faces.extend([[i+vert_offset for i in f] for f in faces]) - else: - # Mesh with no data, unlikely - me.edges.extend(edges) - me.faces.extend(faces) - else: - - # Object mode add new - - me = bpy.data.meshes.new(name) - me.verts.extend(verts) - me.edges.extend(edges) - me.faces.extend(faces) - me.sel = True - - # Object creation and location - scn.objects.selected = [] - ob_act = scn.objects.new(me, name) - scn.objects.active = ob_act - - if quat: - mat = quat.toMatrix() - mat.invert() - mat.resize4x4() - ob_act.setMatrix(mat) - - ob_act.loc = cursor - - me.calcNormals() - - if is_editmode or Blender.Get('add_editmode'): - EditMode(1) - - - - - -def write_mesh_script(filepath, me): - ''' - filepath - path to py file - me - mesh to write - ''' - - name = me.name - file = open(filepath, 'w') - - file.write('#!BPY\n') - file.write('"""\n') - file.write('Name: \'%s\'\n' % name) - file.write('Blender: 245\n') - file.write('Group: \'AddMesh\'\n') - file.write('"""\n\n') - file.write('import BPyAddMesh\n') - file.write('from Blender.Mathutils import Vector\n\n') - - file.write('verts = [\\\n') - for v in me.verts: - file.write('Vector(%f,%f,%f),\\\n' % tuple(v.co)) - file.write(']\n') - - file.write('edges = []\n') # TODO, write loose edges - - file.write('faces = [\\\n') - for f in me.faces: - file.write('%s,\\\n' % str(tuple([v.index for v in f]))) - file.write(']\n') - - file.write('BPyAddMesh.add_mesh_simple("%s", verts, edges, faces)\n' % name) - -# The script below can make a file from a mesh with teh above function... -''' -#!BPY -""" -Name: 'Mesh as AddMesh Script' -Blender: 242 -Group: 'Mesh' -Tip: '' -""" -import BPyAddMesh -reload(BPyAddMesh) - -import bpy - -def main(): - # Add error checking - scn = bpy.data.scenes.active - ob = scn.objects.active - me = ob.getData(mesh=1) - - BPyAddMesh.write_mesh_script('/test.py', me) - -main() -''' diff --git a/release/scripts/bpymodules/BPyArmature.py b/release/scripts/bpymodules/BPyArmature.py deleted file mode 100644 index 63df02d080c..00000000000 --- a/release/scripts/bpymodules/BPyArmature.py +++ /dev/null @@ -1,152 +0,0 @@ -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# Version History: -# 1.0 original release bakes an armature into a matrix -# 1.1 optional params (ACTION_BAKE, ACTION_BAKE_FIRST_FRAME, direct function to key and return the Action - -import Blender -from Blender import sys -import bpy -def getBakedPoseData(ob_arm, start_frame, end_frame, ACTION_BAKE = False, ACTION_BAKE_FIRST_FRAME = True): - ''' - If you are currently getting IPO's this function can be used to - ACTION_BAKE==False: return a list of frame aligned bone dictionary's - ACTION_BAKE==True: return an action with keys aligned to bone constrained movement - if ACTION_BAKE_FIRST_FRAME is not supplied or is true: keys begin at frame 1 - - The data in these can be swaped in for the IPO loc and quat - - If you want to bake an action, this is not as hard and the ipo hack can be removed. - ''' - - # --------------------------------- Dummy Action! Only for this functon - backup_action = ob_arm.action - backup_frame = Blender.Get('curframe') - - DUMMY_ACTION_NAME = '~DONT_USE~' - # Get the dummy action if it has no users - try: - new_action = bpy.data.actions[DUMMY_ACTION_NAME] - if new_action.users: - new_action = None - except: - new_action = None - - if not new_action: - new_action = bpy.data.actions.new(DUMMY_ACTION_NAME) - new_action.fakeUser = False - # ---------------------------------- Done - - Matrix = Blender.Mathutils.Matrix - Quaternion = Blender.Mathutils.Quaternion - Vector = Blender.Mathutils.Vector - POSE_XFORM= [Blender.Object.Pose.LOC, Blender.Object.Pose.ROT] - - # Each dict a frame - bake_data = [{} for i in xrange(1+end_frame-start_frame)] - - pose= ob_arm.getPose() - armature_data= ob_arm.getData(); - pose_bones= pose.bones - - # --------------------------------- Build a list of arma data for reuse - armature_bone_data = [] - bones_index = {} - for bone_name, rest_bone in armature_data.bones.items(): - pose_bone = pose_bones[bone_name] - rest_matrix = rest_bone.matrix['ARMATURESPACE'] - rest_matrix_inv = rest_matrix.copy().invert() - armature_bone_data.append( [len(bones_index), -1, bone_name, rest_bone, rest_matrix, rest_matrix_inv, pose_bone, None ]) - bones_index[bone_name] = len(bones_index) - - # Set the parent ID's - for bone_name, pose_bone in pose_bones.items(): - parent = pose_bone.parent - if parent: - bone_index= bones_index[bone_name] - parent_index= bones_index[parent.name] - armature_bone_data[ bone_index ][1]= parent_index - # ---------------------------------- Done - - - - # --------------------------------- Main loop to collect IPO data - frame_index = 0 - NvideoFrames= end_frame-start_frame - for current_frame in xrange(start_frame, end_frame+1): - if frame_index==0: start=sys.time() - elif frame_index==15: print NvideoFrames*(sys.time()-start),"seconds estimated..." #slows as it grows *3 - elif frame_index >15: - percom= frame_index*100/NvideoFrames - print "Frame %i Overall %i percent complete\r" % (current_frame, percom), - ob_arm.action = backup_action - #pose.update() # not needed - Blender.Set('curframe', current_frame) - #Blender.Window.RedrawAll() - #frame_data = bake_data[frame_index] - ob_arm.action = new_action - ###for i,pose_bone in enumerate(pose_bones): - - for index, parent_index, bone_name, rest_bone, rest_matrix, rest_matrix_inv, pose_bone, ipo in armature_bone_data: - matrix= pose_bone.poseMatrix - parent_bone= rest_bone.parent - if parent_index != -1: - parent_pose_matrix = armature_bone_data[parent_index][6].poseMatrix - parent_bone_matrix_inv = armature_bone_data[parent_index][5] - matrix= matrix * parent_pose_matrix.copy().invert() - rest_matrix= rest_matrix * parent_bone_matrix_inv - - matrix=matrix * rest_matrix.copy().invert() - pose_bone.quat= matrix.toQuat() - pose_bone.loc= matrix.translationPart() - if ACTION_BAKE==False: - pose_bone.insertKey(ob_arm, 1, POSE_XFORM) # always frame 1 - - # THIS IS A BAD HACK! IT SUCKS BIGTIME BUT THE RESULT ARE NICE - # - use a temp action and bake into that, always at the same frame - # so as not to make big IPO's, then collect the result from the IPOs - - # Now get the data from the IPOs - if not ipo: ipo = armature_bone_data[index][7] = new_action.getChannelIpo(bone_name) - - loc = Vector() - quat = Quaternion() - - for curve in ipo: - val = curve.evaluate(1) - curve_name= curve.name - if curve_name == 'LocX': loc[0] = val - elif curve_name == 'LocY': loc[1] = val - elif curve_name == 'LocZ': loc[2] = val - elif curve_name == 'QuatW': quat[3] = val - elif curve_name == 'QuatX': quat[0] = val - elif curve_name == 'QuatY': quat[1] = val - elif curve_name == 'QuatZ': quat[2] = val - - bake_data[frame_index][bone_name] = loc, quat - else: - if ACTION_BAKE_FIRST_FRAME: pose_bone.insertKey(ob_arm, frame_index+1, POSE_XFORM) - else: pose_bone.insertKey(ob_arm, current_frame , POSE_XFORM) - frame_index+=1 - print "\nBaking Complete." - ob_arm.action = backup_action - if ACTION_BAKE==False: - Blender.Set('curframe', backup_frame) - return bake_data - elif ACTION_BAKE==True: - return new_action - else: print "ERROR: Invalid ACTION_BAKE %i sent to BPyArmature" % ACTION_BAKE - - - diff --git a/release/scripts/bpymodules/BPyBlender.py b/release/scripts/bpymodules/BPyBlender.py deleted file mode 100644 index 681dff63cf8..00000000000 --- a/release/scripts/bpymodules/BPyBlender.py +++ /dev/null @@ -1,36 +0,0 @@ -# $Id$ -# -# -------------------------------------------------------------------------- -# BPyBlender.py version 0.3 Mar 20, 2005 -# -------------------------------------------------------------------------- -# helper functions to be used by other scripts -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -# Basic set of modules Blender should have in all supported platforms. -# The second and third lines are the contents of the Python23.zip file -# included with Windows Blender binaries along with zlib.pyd. -# Other platforms are assumed to have Python installed. -basic_modules = [ -'Blender', -'chunk','colorsys','copy','copy_reg','gzip','os','random','repr','stat', -'string','StringIO','types','UserDict','webbrowser', 'zlib', 'math', -'BPyBlender', 'BPyRegistry' -] diff --git a/release/scripts/bpymodules/BPyCurve.py b/release/scripts/bpymodules/BPyCurve.py deleted file mode 100644 index 3dd5f1784f2..00000000000 --- a/release/scripts/bpymodules/BPyCurve.py +++ /dev/null @@ -1,79 +0,0 @@ -# -------------------------------------------------------------------------- -# BPyImage.py version 0.15 -# -------------------------------------------------------------------------- -# helper functions to be used by other scripts -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from Blender import * - -def curve2vecs(ob, WORLDSPACE= True): - ''' - Takes a curve object and retuirns a list of vec lists (polylines) - one list per curve - - This is usefull as a way to get a polyline per curve - so as not to have to deal with the spline types directly - ''' - if ob.type != 'Curve': - raise 'must be a curve object' - - me_dummy = Mesh.New() - me_dummy.getFromObject(ob) - - if WORLDSPACE: - me_dummy.transform(ob.matrixWorld) - - # build an edge dict - edges = {} # should be a set - - def sort_pair(i1, i2): - if i1 > i2: return i2, i1 - else: return i1, i2 - - for ed in me_dummy.edges: - edges[sort_pair(ed.v1.index,ed.v2.index)] = None # dummy value - - # now set the curves - first_time = True - - current_vecs = [] - vec_list = [current_vecs] - - for v in me_dummy.verts: - if first_time: - first_time = False - current_vecs.append(v.co.copy()) - last_index = v.index - else: - index = v.index - if edges.has_key(sort_pair(index, last_index)): - current_vecs.append( v.co.copy() ) - else: - current_vecs = [] - vec_list.append(current_vecs) - - last_index = index - - me_dummy.verts = None - - return vec_list - - diff --git a/release/scripts/bpymodules/BPyImage.py b/release/scripts/bpymodules/BPyImage.py deleted file mode 100644 index 504e4ee29ba..00000000000 --- a/release/scripts/bpymodules/BPyImage.py +++ /dev/null @@ -1,318 +0,0 @@ -# -------------------------------------------------------------------------- -# BPyImage.py version 0.15 -# -------------------------------------------------------------------------- -# helper functions to be used by other scripts -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -#===========================================================================# -# Comprehensive image loader, will search and find the image # -# Will return a blender image or a new image if the image is missing # -#===========================================================================# -import bpy -from Blender import sys -try: - import os -except: - os=None - -#==============================================# -# Return directory, where the file is # -#==============================================# -def stripFile(path): - lastSlash = max(path.rfind('\\'), path.rfind('/')) - if lastSlash != -1: - path = path[:lastSlash] - newpath= '%s%s' % (path, sys.sep) - else: - newpath= path - return newpath - -#==============================================# -# Strips the slashes from the back of a string # -#==============================================# -def stripPath(path): - return path.split('/')[-1].split('\\')[-1] - -#====================================================# -# Strips the prefix off the name before writing # -#====================================================# -def stripExt(name): # name is a string - index = name.rfind('.') - if index != -1: - return name[ : index ] - else: - return name - -def getExt(name): - index = name.rfind('.') - if index != -1: - return name[index+1:] - return name - -#====================================================# -# Adds a slash to the end of a path if its not there # -#====================================================# -def addSlash(path): - if not path: - return '' - - elif path.endswith('\\') or path.endswith('/'): - return path - return path + sys.sep - - -def comprehensiveImageLoad(imagePath, filePath, PLACE_HOLDER= True, RECURSIVE=True, VERBOSE=False, CONVERT_CALLBACK=None): - ''' - imagePath: The image filename - If a path precedes it, this will be searched as well. - - filePath: is the directory where the image may be located - any file at teh end will be ignored. - - PLACE_HOLDER: if True a new place holder image will be created. - this is usefull so later you can relink the image to its original data. - - VERBOSE: If True debug info will be printed. - - RECURSIVE: If True, directories will be recursivly searched. - Be carefull with this if you have files in your root directory because it may take a long time. - - CASE_INSENSITIVE: for non win32 systems, find the correct case for the file. - - CONVERT_CALLBACK: a function that takes an existing path and returns a new one. - Use this when loading image formats blender may not support, the CONVERT_CALLBACK - can take the path for a GIF (for example), convert it to a PNG and return the PNG's path. - For formats blender can read, simply return the path that is given. - ''' - - # VERBOSE = True - - if VERBOSE: print 'img:', imagePath, 'file:', filePath - - if os == None and CASE_INSENSITIVE: - CASE_INSENSITIVE = True - - # When we have the file load it with this. try/except niceness. - def imageLoad(path): - #if path.endswith('\\') or path.endswith('/'): - # raise 'INVALID PATH' - - if CONVERT_CALLBACK: - path = CONVERT_CALLBACK(path) - - try: - img = bpy.data.images.new(filename=path) - if VERBOSE: print '\t\tImage loaded "%s"' % path - return img - except: - if VERBOSE: - if sys.exists(path): print '\t\tImage failed loading "%s", mabe its not a format blender can read.' % (path) - else: print '\t\tImage not found, making a place holder "%s"' % (path) - if PLACE_HOLDER: - img= bpy.data.images.new(stripPath(path),4,4) - img.filename= path - return img #blank image - else: - return None - - # Image formats blender can read - IMAGE_EXT = ['jpg', 'jpeg', 'png', 'tga', 'bmp', 'rgb', 'sgi', 'bw', 'iff', 'lbm', # Blender Internal - 'gif', 'psd', 'tif', 'tiff', 'pct', 'pict', 'pntg', 'qtif'] # Quacktime, worth a try. - - imageFileName = stripPath(imagePath) # image path only - imageFileName_lower = imageFileName.lower() # image path only - - if VERBOSE: print '\tSearchingExisting Images for "%s"' % imagePath - for i in bpy.data.images: - if stripPath(i.filename.lower()) == imageFileName_lower: - if VERBOSE: print '\t\tUsing existing image.' - return i - - - if VERBOSE: print '\tAttempting to load "%s"' % imagePath - if sys.exists(imagePath): - if VERBOSE: print '\t\tFile found where expected "%s".' % imagePath - return imageLoad(imagePath) - - - - imageFileName_noext = stripExt(imageFileName) # With no extension. - imageFileName_noext_lower = stripExt(imageFileName_lower) # With no extension. - imageFilePath = stripFile(imagePath) - - # Remove relative path from image path - if imageFilePath.startswith('./') or imageFilePath.startswith('.\\'): - imageFilePath = imageFilePath[2:] - - - # Attempt to load from obj path. - tmpPath = stripFile(filePath) + stripPath(imageFileName) - if sys.exists(tmpPath): - if VERBOSE: print '\t\tFile found in path (1)"%s".' % tmpPath - return imageLoad(tmpPath) - - - # os needed if we go any further. - if not os: - if VERBOSE: print '\t\tCreating a placeholder with a face path: "%s".' % imagePath - return imageLoad(imagePath) # Will jus treturn a placeholder. - - - # We have os. - # GATHER PATHS. - paths = {} # Store possible paths we may use, dict for no doubles. - tmpPath = addSlash(sys.expandpath('//')) # Blenders path - if sys.exists(tmpPath): - if VERBOSE: print '\t\tSearching in %s' % tmpPath - paths[tmpPath] = [os.listdir(tmpPath)] # Orig name for loading - paths[tmpPath].append([f.lower() for f in paths[tmpPath][0]]) # Lower case list. - paths[tmpPath].append([stripExt(f) for f in paths[tmpPath][1]]) # Lower case no ext - else: - if VERBOSE: print '\tNo Path: "%s"' % tmpPath - - tmpPath = imageFilePath - if sys.exists(tmpPath): - if VERBOSE: print '\t\tSearching in %s' % tmpPath - paths[tmpPath] = [os.listdir(tmpPath)] # Orig name for loading - paths[tmpPath].append([f.lower() for f in paths[tmpPath][0]]) # Lower case list. - paths[tmpPath].append([stripExt(f) for f in paths[tmpPath][1]]) # Lower case no ext - else: - if VERBOSE: print '\tNo Path: "%s"' % tmpPath - - tmpPath = stripFile(filePath) - if sys.exists(tmpPath): - if VERBOSE: print '\t\tSearching in %s' % tmpPath - paths[tmpPath] = [os.listdir(tmpPath)] # Orig name for loading - paths[tmpPath].append([f.lower() for f in paths[tmpPath][0]]) # Lower case list. - paths[tmpPath].append([stripExt(f) for f in paths[tmpPath][1]]) # Lower case no ext - else: - if VERBOSE: print '\tNo Path: "%s"' % tmpPath - - tmpPath = addSlash(bpy.config.textureDir) - if tmpPath and sys.exists(tmpPath): - if VERBOSE: print '\t\tSearching in %s' % tmpPath - paths[tmpPath] = [os.listdir(tmpPath)] # Orig name for loading - paths[tmpPath].append([f.lower() for f in paths[tmpPath][0]]) # Lower case list. - paths[tmpPath].append([stripExt(f) for f in paths[tmpPath][1]]) # Lower case no ext - else: - if VERBOSE: print '\tNo Path: "%s"' % tmpPath - - # Add path if relative image patrh was given. - tmp_paths= paths.keys() - for k in tmp_paths: - tmpPath = k + imageFilePath - if sys.exists(tmpPath): - paths[tmpPath] = [os.listdir(tmpPath)] # Orig name for loading - paths[tmpPath].append([f.lower() for f in paths[tmpPath][0]]) # Lower case list. - paths[tmpPath].append([stripExt(f) for f in paths[tmpPath][1]]) # Lower case no ext - else: - if VERBOSE: print '\tNo Path: "%s"' % tmpPath - # DONE - # - for path, files in paths.iteritems(): - if sys.exists(path + imageFileName): - if VERBOSE: print '\tFound image at path: "%s" file" "%s"' % (path, imageFileName) - return imageLoad(path + imageFileName) - - # If the files not there then well do a case insensitive seek. - filesOrigCase = files[0] - filesLower = files[1] - filesLowerNoExt = files[2] - - # We are going to try in index the file directly, if its not there just keep on - - index = None - try: - # Is it just a case mismatch? - index = filesLower.index(imageFileName_lower) - except: - try: - # Have the extensions changed? - index = filesLowerNoExt.index(imageFileName_noext_lower) - - ext = getExt( filesLower[index] ) # Get the extension of the file that matches all but ext. - - # Check that the ext is useable eg- not a 3ds file :) - if ext.lower() not in IMAGE_EXT: - index = None - - except: - index = None - - if index != None: - tmpPath = path + filesOrigCase[index] - img = imageLoad( tmpPath ) - if img != None: - if VERBOSE: print '\t\tImage Found "%s"' % tmpPath - return img - - if RECURSIVE: - # IMAGE NOT FOUND IN ANY OF THE DIRS!, DO A RECURSIVE SEARCH. - if VERBOSE: print '\t\tImage Not Found in any of the dirs, doing a recusrive search' - for path in paths.iterkeys(): - # Were not going to use files - if path == '/' or len(path) == 3 and path[1:] == ':\\': - continue - - # print path , 'ASS' - - #------------------ - # finds the file starting at the root. - # def findImage(findRoot, imagePath): - #W--------------- - - # ROOT, DIRS, FILES - pathWalk = os.walk(path) - pathList = [True] - - matchList = [] # Store a list of (match, size), choose the biggest. - while True: - try: - pathList = pathWalk.next() - except: - break - - for file in pathList[2]: - file_lower = file.lower() - # FOUND A MATCH - if (file_lower == imageFileName_lower) or\ - (stripExt(file_lower) == imageFileName_noext_lower and getExt(file_lower) in IMAGE_EXT): - name = pathList[0] + sys.sep + file - size = os.path.getsize(name) - if VERBOSE: print '\t\t\tfound:', name - matchList.append( (name, size) ) - - if matchList: - # Sort by file size - matchList.sort(lambda A, B: cmp(B[1], A[1]) ) - - if VERBOSE: print '\t\tFound "%s"' % matchList[0][0] - - # Loop through all we have found - img = None - for match in matchList: - img = imageLoad(match[0]) # 0 - first, 0 - pathname - if img != None: - break - return img - - # No go. - if VERBOSE: print '\t\tImage Not Found after looking everywhere! "%s"' % imagePath - return imageLoad(imagePath) # Will jus treturn a placeholder. diff --git a/release/scripts/bpymodules/BPyMathutils.py b/release/scripts/bpymodules/BPyMathutils.py deleted file mode 100644 index 4882e9aaf21..00000000000 --- a/release/scripts/bpymodules/BPyMathutils.py +++ /dev/null @@ -1,228 +0,0 @@ -# $Id$ -# -# -------------------------------------------------------------------------- -# helper functions to be used by other scripts -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender.Mathutils import * - -# ------ Mersenne Twister - start - -# Copyright (C) 1997 Makoto Matsumoto and Takuji Nishimura. -# Any feedback is very welcome. For any question, comments, -# see http://www.math.keio.ac.jp/matumoto/emt.html or email -# matumoto@math.keio.ac.jp - -# The link above is dead, this is the new one: -# http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/emt.html -# And here the license info, from Mr. Matsumoto's site: -# Until 2001/4/6, MT had been distributed under GNU Public License, -# but after 2001/4/6, we decided to let MT be used for any purpose, including -# commercial use. 2002-versions mt19937ar.c, mt19937ar-cok.c are considered -# to be usable freely. -# -# So from the year above (1997), this code is under GPL. - -# Period parameters -N = 624 -M = 397 -MATRIX_A = 0x9908b0dfL # constant vector a -UPPER_MASK = 0x80000000L # most significant w-r bits -LOWER_MASK = 0x7fffffffL # least significant r bits - -# Tempering parameters -TEMPERING_MASK_B = 0x9d2c5680L -TEMPERING_MASK_C = 0xefc60000L - -def TEMPERING_SHIFT_U(y): - return (y >> 11) - -def TEMPERING_SHIFT_S(y): - return (y << 7) - -def TEMPERING_SHIFT_T(y): - return (y << 15) - -def TEMPERING_SHIFT_L(y): - return (y >> 18) - -mt = [] # the array for the state vector -mti = N+1 # mti==N+1 means mt[N] is not initialized - -# initializing the array with a NONZERO seed -def sgenrand(seed): - # setting initial seeds to mt[N] using - # the generator Line 25 of Table 1 in - # [KNUTH 1981, The Art of Computer Programming - # Vol. 2 (2nd Ed.), pp102] - - global mt, mti - - mt = [] - - mt.append(seed & 0xffffffffL) - for i in xrange(1, N + 1): - mt.append((69069 * mt[i-1]) & 0xffffffffL) - - mti = i -# end sgenrand - - -def genrand(): - global mt, mti - - mag01 = [0x0L, MATRIX_A] - # mag01[x] = x * MATRIX_A for x=0,1 - y = 0 - - if mti >= N: # generate N words at one time - if mti == N+1: # if sgenrand() has not been called, - sgenrand(4357) # a default initial seed is used - - for kk in xrange((N-M) + 1): - y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK) - mt[kk] = mt[kk+M] ^ (y >> 1) ^ mag01[y & 0x1] - - for kk in xrange(kk, N): - y = (mt[kk]&UPPER_MASK)|(mt[kk+1]&LOWER_MASK) - mt[kk] = mt[kk+(M-N)] ^ (y >> 1) ^ mag01[y & 0x1] - - y = (mt[N-1]&UPPER_MASK)|(mt[0]&LOWER_MASK) - mt[N-1] = mt[M-1] ^ (y >> 1) ^ mag01[y & 0x1] - - mti = 0 - - y = mt[mti] - mti += 1 - y ^= TEMPERING_SHIFT_U(y) - y ^= TEMPERING_SHIFT_S(y) & TEMPERING_MASK_B - y ^= TEMPERING_SHIFT_T(y) & TEMPERING_MASK_C - y ^= TEMPERING_SHIFT_L(y) - - return ( float(y) / 0xffffffffL ) # reals - -#------ Mersenne Twister -- end - - - - -""" 2d convexhull -Based from Dinu C. Gherman's work, -modified for Blender/Mathutils by Campell Barton -""" -###################################################################### -# Public interface -###################################################################### -def convexHull(point_list_2d): - """Calculate the convex hull of a set of vectors - The vectors can be 3 or 4d but only the Xand Y are used. - returns a list of convex hull indicies to the given point list - """ - - ###################################################################### - # Helpers - ###################################################################### - - def _myDet(p, q, r): - """Calc. determinant of a special matrix with three 2D points. - - The sign, "-" or "+", determines the side, right or left, - respectivly, on which the point r lies, when measured against - a directed vector from p to q. - """ - return (q.x*r.y + p.x*q.y + r.x*p.y) - (q.x*p.y + r.x*q.y + p.x*r.y) - - def _isRightTurn((p, q, r)): - "Do the vectors pq:qr form a right turn, or not?" - #assert p[0] != q[0] and q[0] != r[0] and p[0] != r[0] - if _myDet(p[0], q[0], r[0]) < 0: - return 1 - else: - return 0 - - # Get a local list copy of the points and sort them lexically. - points = [(p, i) for i, p in enumerate(point_list_2d)] - - try: points.sort(key = lambda a: (a[0].x, a[0].y)) - except: points.sort(lambda a,b: cmp((a[0].x, a[0].y), (b[0].x, b[0].y))) - - # Build upper half of the hull. - upper = [points[0], points[1]] # cant remove these. - for i in xrange(len(points)-2): - upper.append(points[i+2]) - while len(upper) > 2 and not _isRightTurn(upper[-3:]): - del upper[-2] - - # Build lower half of the hull. - points.reverse() - lower = [points.pop(0), points.pop(1)] - for p in points: - lower.append(p) - while len(lower) > 2 and not _isRightTurn(lower[-3:]): - del lower[-2] - - # Concatenate both halfs and return. - return [p[1] for ls in (upper, lower) for p in ls] - - -def plane2mat(plane, normalize= False): - ''' - Takes a plane and converts to a matrix - points between 0 and 1 are up - 1 and 2 are right - assumes the plane has 90d corners - ''' - cent= (plane[0]+plane[1]+plane[2]+plane[3] ) /4.0 - - - up= cent - ((plane[0]+plane[1])/2.0) - right= cent - ((plane[1]+plane[2])/2.0) - z= up.cross(right) - - if normalize: - up.normalize() - right.normalize() - z.normalize() - - mat= Matrix(up, right, z) - - # translate - mat.resize4x4() - tmat= Blender.Mathutils.TranslationMatrix(cent) - return mat * tmat - - -# Used for mesh_solidify.py and mesh_wire.py - -# returns a length from an angle -# Imaging a 2d space. -# there is a hoz line at Y1 going to inf on both X ends, never moves (LINEA) -# down at Y0 is a unit length line point up at (angle) from X0,Y0 (LINEB) -# This function returns the length of LINEB at the point it would intersect LINEA -# - Use this for working out how long to make the vector - differencing it from surrounding faces, -# import math -from math import pi, sin, cos, sqrt - -def angleToLength(angle): - # Alredy accounted for - if angle < 0.000001: return 1.0 - else: return abs(1.0 / cos(pi*angle/180)); diff --git a/release/scripts/bpymodules/BPyMesh.py b/release/scripts/bpymodules/BPyMesh.py deleted file mode 100644 index 292f7a4b91e..00000000000 --- a/release/scripts/bpymodules/BPyMesh.py +++ /dev/null @@ -1,1326 +0,0 @@ -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -import Blender -import bpy -import BPyMesh_redux # seperated because of its size. -# reload(BPyMesh_redux) -redux= BPyMesh_redux.redux - -# python 2.3 has no reversed() iterator. this will only work on lists and tuples -try: - reversed -except: - def reversed(l): return l[::-1] - - -# If python version is less than 2.4, try to get set stuff from module -try: - set -except: - try: - from sets import Set as set - except: - set= None - - - - - -def meshWeight2List(me): - ''' Takes a mesh and return its group names and a list of lists, one list per vertex. - aligning the each vert list with the group names, each list contains float value for the weight. - These 2 lists can be modified and then used with list2MeshWeight to apply the changes. - ''' - - # Clear the vert group. - groupNames= me.getVertGroupNames() - len_groupNames= len(groupNames) - - if not len_groupNames: - # no verts? return a vert aligned empty list - return [[] for i in xrange(len(me.verts))], [] - - else: - vWeightList= [[0.0]*len_groupNames for i in xrange(len(me.verts))] - - for group_index, group in enumerate(groupNames): - for vert_index, weight in me.getVertsFromGroup(group, 1): # (i,w) tuples. - vWeightList[vert_index][group_index]= weight - - # removed this because me may be copying teh vertex groups. - #for group in groupNames: - # me.removeVertGroup(group) - - return groupNames, vWeightList - - -def list2MeshWeight(me, groupNames, vWeightList): - ''' Takes a list of groups and a list of vertex Weight lists as created by meshWeight2List - and applys it to the mesh.''' - - if len(vWeightList) != len(me.verts): - raise 'Error, Lists Differ in size, do not modify your mesh.verts before updating the weights' - - act_group = me.activeGroup - - # Clear the vert group. - currentGroupNames= me.getVertGroupNames() - for group in currentGroupNames: - me.removeVertGroup(group) # messes up the active group. - - # Add clean unused vert groupNames back - currentGroupNames= me.getVertGroupNames() - for group in groupNames: - me.addVertGroup(group) - - add_ = Blender.Mesh.AssignModes.ADD - - vertList= [None] - for i, v in enumerate(me.verts): - vertList[0]= i - for group_index, weight in enumerate(vWeightList[i]): - if weight: - try: - me.assignVertsToGroup(groupNames[group_index], vertList, min(1, max(0, weight)), add_) - except: - pass # vert group is not used anymore. - - try: me.activeGroup = act_group - except: pass - - me.update() - - - - -def meshWeight2Dict(me): - ''' Takes a mesh and return its group names and a list of dicts, one dict per vertex. - using the group as a key and a float value for the weight. - These 2 lists can be modified and then used with dict2MeshWeight to apply the changes. - ''' - - vWeightDict= [dict() for i in xrange(len(me.verts))] # Sync with vertlist. - - # Clear the vert group. - groupNames= me.getVertGroupNames() - - for group in groupNames: - for vert_index, weight in me.getVertsFromGroup(group, 1): # (i,w) tuples. - vWeightDict[vert_index][group]= weight - - # removed this because me may be copying teh vertex groups. - #for group in groupNames: - # me.removeVertGroup(group) - - return groupNames, vWeightDict - - -def dict2MeshWeight(me, groupNames, vWeightDict): - ''' Takes a list of groups and a list of vertex Weight dicts as created by meshWeight2Dict - and applys it to the mesh.''' - - if len(vWeightDict) != len(me.verts): - raise 'Error, Lists Differ in size, do not modify your mesh.verts before updating the weights' - - act_group = me.activeGroup - - # Clear the vert group. - currentGroupNames= me.getVertGroupNames() - for group in currentGroupNames: - if group not in groupNames: - me.removeVertGroup(group) # messes up the active group. - else: - me.removeVertsFromGroup(group) - - # Add clean unused vert groupNames back - currentGroupNames= me.getVertGroupNames() - for group in groupNames: - if group not in currentGroupNames: - me.addVertGroup(group) - - add_ = Blender.Mesh.AssignModes.ADD - - vertList= [None] - for i, v in enumerate(me.verts): - vertList[0]= i - for group, weight in vWeightDict[i].iteritems(): - try: - me.assignVertsToGroup(group, vertList, min(1, max(0, weight)), add_) - except: - pass # vert group is not used anymore. - - try: me.activeGroup = act_group - except: pass - - me.update() - -def dictWeightMerge(dict_weights): - ''' - Takes dict weight list and merges into 1 weight dict item and returns it - ''' - - if not dict_weights: - return {} - - keys= [] - for weight in dict_weights: - keys.extend([ (k, 0.0) for k in weight.iterkeys() ]) - - new_wdict = dict(keys) - - len_dict_weights= len(dict_weights) - - for weight in dict_weights: - for group, value in weight.iteritems(): - new_wdict[group] += value/len_dict_weights - - return new_wdict - - -FLIPNAMES=[\ -('Left','Right'),\ -('_L','_R'),\ -('-L','-R'),\ -('.L','.R'),\ -] - -def dictWeightFlipGroups(dict_weight, groupNames, createNewGroups): - ''' - Returns a weight with flip names - dict_weight - 1 vert weight. - groupNames - because we may need to add new group names. - dict_weight - Weather to make new groups where needed. - ''' - - def flipName(name): - for n1,n2 in FLIPNAMES: - for nA, nB in ( (n1,n2), (n1.lower(),n2.lower()), (n1.upper(),n2.upper()) ): - if createNewGroups: - newName= name.replace(nA,nB) - if newName!=name: - if newName not in groupNames: - groupNames.append(newName) - return newName - - newName= name.replace(nB,nA) - if newName!=name: - if newName not in groupNames: - groupNames.append(newName) - return newName - - else: - newName= name.replace(nA,nB) - if newName!=name and newName in groupNames: - return newName - - newName= name.replace(nB,nA) - if newName!=name and newName in groupNames: - return newName - - return name - - if not dict_weight: - return dict_weight, groupNames - - - new_wdict = {} - for group, weight in dict_weight.iteritems(): - flipname= flipName(group) - new_wdict[flipname]= weight - - return new_wdict, groupNames - - -def mesh2linkedFaces(me): - ''' - Splits the mesh into connected parts, - these parts are returned as lists of faces. - used for seperating cubes from other mesh elements in the 1 mesh - ''' - - # Build vert face connectivity - vert_faces= [[] for i in xrange(len(me.verts))] - for f in me.faces: - for v in f: - vert_faces[v.index].append(f) - - # sort faces into connectivity groups - face_groups= [[f] for f in me.faces] - face_mapping = range(len(me.faces)) # map old, new face location - - # Now clump faces iterativly - ok= True - while ok: - ok= False - - for i, f in enumerate(me.faces): - mapped_index= face_mapping[f.index] - mapped_group= face_groups[mapped_index] - - for v in f: - for nxt_f in vert_faces[v.index]: - if nxt_f != f: - nxt_mapped_index= face_mapping[nxt_f.index] - - # We are not a part of the same group - if mapped_index != nxt_mapped_index: - - ok= True - - # Assign mapping to this group so they all map to this group - for grp_f in face_groups[nxt_mapped_index]: - face_mapping[grp_f.index] = mapped_index - - # Move faces into this group - mapped_group.extend(face_groups[nxt_mapped_index]) - - # remove reference to the list - face_groups[nxt_mapped_index]= None - - - # return all face groups that are not null - # this is all the faces that are connected in their own lists. - return [fg for fg in face_groups if fg] - - -def getFaceLoopEdges(faces, seams=[]): - ''' - Takes me.faces or a list of faces and returns the edge loops - These edge loops are the edges that sit between quads, so they dont touch - 1 quad, not not connected will make 2 edge loops, both only containing 2 edges. - - return a list of edge key lists - [ [(0,1), (4, 8), (3,8)], ...] - - optionaly, seams are edge keys that will be removed - ''' - - OTHER_INDEX = 2,3,0,1 # opposite face index - - edges = {} - - for f in faces: - if len(f) == 4: - edge_keys = f.edge_keys - for i, edkey in enumerate(f.edge_keys): - edges.setdefault(edkey, []).append(edge_keys[OTHER_INDEX[i]]) - - for edkey in seams: - edges[edkey] = [] - - # Collect edge loops here - edge_loops = [] - - for edkey, ed_adj in edges.iteritems(): - if 0 face indicies - face_edges[i] -> list referencs local faces v indicies 1,2,3 &| 4 - face_edges[i][j] -> list of faces that this edge uses. - crap this is tricky to explain :/ - ''' - face_edges= [ [-1] * len(f) for f in me.faces ] - - face_edges_dict= dict([(ed.key, []) for ed in me.edges]) - for fidx, f in enumerate(me.faces): - for i, edkey in enumerate(f.edge_keys): - edge_face_users= face_edges_dict[edkey] - edge_face_users.append(f) - face_edges[fidx][i]= edge_face_users - - return face_edges - - -def facesPlanerIslands(me): - - def roundvec(v): - return round(v[0], 4), round(v[1], 4), round(v[2], 4) - - face_props= [(cent, no, roundvec(no), cent.dot(no)) for f in me.faces for no, cent in ((f.no, f.cent),)] - - face_edge_users= face_edges(me) - islands= [] - - used_faces= [0] * len(me.faces) - while True: - new_island= False - for i, used_val in enumerate(used_faces): - if used_val==0: - island= [i] - new_island= True - used_faces[i]= 1 - break - - if not new_island: - break - - island_growing= True - while island_growing: - island_growing= False - for fidx1 in island[:]: - if used_faces[fidx1]==1: - used_faces[fidx1]= 2 - face_prop1= face_props[fidx1] - for ed in face_edge_users[fidx1]: - for f2 in ed: - fidx2= f2.index - if fidx1 != fidx2 and used_faces[fidx2]==0: - island_growing= True - face_prop2= face_props[fidx2] - # normals are the same? - if face_prop1[2]==face_prop2[2]: - if abs(face_prop1[3] - face_prop1[1].dot(face_prop2[0])) < 0.000001: - used_faces[fidx2]= 1 - island.append(fidx2) - islands.append([me.faces[i] for i in island]) - return islands - - - -def facesUvIslands(me, PREF_IMAGE_DELIMIT=True): - def roundvec(v): - return round(v[0], 4), round(v[1], 4) - - if not me.faceUV: - return [ list(me.faces), ] - - # make a list of uv dicts - face_uvs= [ [roundvec(uv) for uv in f.uv] for f in me.faces] - - # key - face uv || value - list of face idxs - uv_connect_dict= dict([ (uv, [] ) for f_uvs in face_uvs for uv in f_uvs]) - - for i, f_uvs in enumerate(face_uvs): - for uv in f_uvs: # loops through rounded uv values - uv_connect_dict[uv].append(i) - islands= [] - - used_faces= [0] * len(me.faces) - while True: - new_island= False - for i, used_val in enumerate(used_faces): - if used_val==0: - island= [i] - new_island= True - used_faces[i]= 1 - break - - if not new_island: - break - - island_growing= True - while island_growing: - island_growing= False - for fidx1 in island[:]: - if used_faces[fidx1]==1: - used_faces[fidx1]= 2 - for uv in face_uvs[fidx1]: - for fidx2 in uv_connect_dict[uv]: - if fidx1 != fidx2 and used_faces[fidx2]==0: - if not PREF_IMAGE_DELIMIT or me.faces[fidx1].image==me.faces[fidx2].image: - island_growing= True - used_faces[fidx2]= 1 - island.append(fidx2) - - islands.append([me.faces[i] for i in island]) - return islands - -#def faceUvBounds(me, faces= None): - - -def facesUvRotate(me, deg, faces= None, pivot= (0,0)): - ''' - Faces can be None an all faces will be used - pivot is just the x/y well rotated about - - positive deg value for clockwise rotation - ''' - if faces==None: faces= me.faces - pivot= Blender.Mathutils.Vector(pivot) - - rotmat= Blender.Mathutils.RotationMatrix(-deg, 2) - - for f in faces: - f.uv= [((uv-pivot)*rotmat)+pivot for uv in f.uv] - -def facesUvScale(me, sca, faces= None, pivot= (0,0)): - ''' - Faces can be None an all faces will be used - pivot is just the x/y well rotated about - sca can be wither an int/float or a vector if you want to - scale x/y seperately. - a sca or (1.0, 1.0) will do nothing. - ''' - def vecmulti(v1,v2): - '''V2 is unchanged''' - v1[:]= (v1.x*v2.x, v1.y*v2.y) - return v1 - - sca= Blender.Mathutils.Vector(sca) - if faces==None: faces= me.faces - pivot= Blender.Mathutils.Vector(pivot) - - for f in faces: - f.uv= [vecmulti(uv-pivot, sca)+pivot for uv in f.uv] - - -def facesUvTranslate(me, tra, faces= None, pivot= (0,0)): - ''' - Faces can be None an all faces will be used - pivot is just the x/y well rotated about - ''' - if faces==None: faces= me.faces - tra= Blender.Mathutils.Vector(tra) - - for f in faces: - f.uv= [uv+tra for uv in f.uv] - - - -def edgeFaceUserCount(me, faces= None): - ''' - Return an edge aligned list with the count for all the faces that use that edge. - - can spesify a subset of the faces, so only those will be counted. - ''' - if faces==None: - faces= me.faces - max_vert= len(me.verts) - else: - # find the lighest vert index - pass - - edge_users= [0] * len(me.edges) - - edges_idx_dict= dict([(ed.key, ed.index) for ed in me.edges]) - - for f in faces: - for edkey in f.edge_keys: - edge_users[edges_idx_dict[edkey]] += 1 - - return edge_users - - -#============================================================================# -# Takes a face, and a pixel x/y on the image and returns a worldspace x/y/z # -# will return none if the pixel is not inside the faces UV # -#============================================================================# -def getUvPixelLoc(face, pxLoc, img_size = None, uvArea = None): - TriangleArea= Blender.Mathutils.TriangleArea - Vector= Blender.Mathutils.Vector - - if not img_size: - w,h = face.image.size - else: - w,h= img_size - - scaled_uvs= [Vector(uv.x*w, uv.y*h) for uv in f.uv] - - if len(scaled_uvs)==3: - indicies= ((0,1,2),) - else: - indicies= ((0,1,2), (0,2,3)) - - for fidxs in indicies: - for i1,i2,i3 in fidxs: - # IS a point inside our triangle? - # UVArea could be cached? - uv_area = TriangleArea(scaled_uvs[i1], scaled_uvs[i2], scaled_uvs[i3]) - area0 = TriangleArea(pxLoc, scaled_uvs[i2], scaled_uvs[i3]) - area1 = TriangleArea(pxLoc, scaled_uvs[i1], scaled_uvs[i3]) - area2 = TriangleArea(pxLoc, scaled_uvs[i1], scaled_uvs[i2]) - if area0 + area1 + area2 > uv_area + 1: # 1 px bleed/error margin. - pass # if were a quad the other side may contain the pixel so keep looking. - else: - # We know the point is in the tri - area0 /= uv_area - area1 /= uv_area - area2 /= uv_area - - # New location - return Vector(\ - face.v[i1].co[0]*area0 + face.v[i2].co[0]*area1 + face.v[i3].co[0]*area2,\ - face.v[i1].co[1]*area0 + face.v[i2].co[1]*area1 + face.v[i3].co[1]*area2,\ - face.v[i1].co[2]*area0 + face.v[i2].co[2]*area1 + face.v[i3].co[2]*area2\ - ) - - return None - - -# Used for debugging ngon -""" -def draw_loops(loops): - - me= Blender.Mesh.New() - for l in loops: - #~ me= Blender.Mesh.New() - - - i= len(me.verts) - me.verts.extend([v[0] for v in l]) - try: - me.verts[0].sel= 1 - except: - pass - me.edges.extend([ (j-1, j) for j in xrange(i+1, len(me.verts)) ]) - # Close the edge? - me.edges.extend((i, len(me.verts)-1)) - - - #~ ob= Blender.Object.New('Mesh') - #~ ob.link(me) - #~ scn= Blender.Scene.GetCurrent() - #~ scn.link(ob) - #~ ob.Layers= scn.Layers - #~ ob.sel= 1 - - - - # Fill - #fill= Blender.Mathutils.PolyFill(loops) - #me.faces.extend(fill) - - - ob= Blender.Object.New('Mesh') - ob.link(me) - scn= Blender.Scene.GetCurrent() - scn.link(ob) - ob.Layers= scn.Layers - ob.sel= 1 - Blender.Window.RedrawAll() -""" - -def ngon(from_data, indices, PREF_FIX_LOOPS= True): - ''' - Takes a polyline of indices (fgon) - and returns a list of face indicie lists. - Designed to be used for importers that need indices for an fgon to create from existing verts. - - from_data: either a mesh, or a list/tuple of vectors. - indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given. - PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly. - ''' - - if not set: # Need sets for this, otherwise do a normal fill. - PREF_FIX_LOOPS= False - - Vector= Blender.Mathutils.Vector - if not indices: - return [] - - # return [] - def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6) - def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length - - def vert_treplet(v, i): - return v, rvec(v), i, mlen(v) - - def ed_key_mlen(v1, v2): - if v1[3] > v2[3]: - return v2[1], v1[1] - else: - return v1[1], v2[1] - - - if not PREF_FIX_LOOPS: - ''' - Normal single concave loop filling - ''' - if type(from_data) in (tuple, list): - verts= [Vector(from_data[i]) for ii, i in enumerate(indices)] - else: - verts= [from_data.verts[i].co for ii, i in enumerate(indices)] - - for i in xrange(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))): - if verts[i][1]==verts[i-1][0]: - verts.pop(i-1) - - fill= Blender.Geometry.PolyFill([verts]) - - else: - ''' - Seperate this loop into multiple loops be finding edges that are used twice - This is used by lightwave LWO files a lot - ''' - - if type(from_data) in (tuple, list): - verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)] - else: - verts= [vert_treplet(from_data.verts[i].co, ii) for ii, i in enumerate(indices)] - - edges= [(i, i-1) for i in xrange(len(verts))] - if edges: - edges[0]= (0,len(verts)-1) - - if not verts: - return [] - - - edges_used= set() - edges_doubles= set() - # We need to check if any edges are used twice location based. - for ed in edges: - edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]]) - if edkey in edges_used: - edges_doubles.add(edkey) - else: - edges_used.add(edkey) - - # Store a list of unconnected loop segments split by double edges. - # will join later - loop_segments= [] - - v_prev= verts[0] - context_loop= [v_prev] - loop_segments= [context_loop] - - for v in verts: - if v!=v_prev: - # Are we crossing an edge we removed? - if ed_key_mlen(v, v_prev) in edges_doubles: - context_loop= [v] - loop_segments.append(context_loop) - else: - if context_loop and context_loop[-1][1]==v[1]: - #raise "as" - pass - else: - context_loop.append(v) - - v_prev= v - # Now join loop segments - - def join_seg(s1,s2): - if s2[-1][1]==s1[0][1]: # - s1,s2= s2,s1 - elif s1[-1][1]==s2[0][1]: - pass - else: - return False - - # If were stuill here s1 and s2 are 2 segments in the same polyline - s1.pop() # remove the last vert from s1 - s1.extend(s2) # add segment 2 to segment 1 - - if s1[0][1]==s1[-1][1]: # remove endpoints double - s1.pop() - - s2[:]= [] # Empty this segment s2 so we dont use it again. - return True - - joining_segments= True - while joining_segments: - joining_segments= False - segcount= len(loop_segments) - - for j in xrange(segcount-1, -1, -1): #reversed(xrange(segcount)): - seg_j= loop_segments[j] - if seg_j: - for k in xrange(j-1, -1, -1): # reversed(xrange(j)): - if not seg_j: - break - seg_k= loop_segments[k] - - if seg_k and join_seg(seg_j, seg_k): - joining_segments= True - - loop_list= loop_segments - - for verts in loop_list: - while verts and verts[0][1]==verts[-1][1]: - verts.pop() - - loop_list= [verts for verts in loop_list if len(verts)>2] - # DONE DEALING WITH LOOP FIXING - - - # vert mapping - vert_map= [None]*len(indices) - ii=0 - for verts in loop_list: - if len(verts)>2: - for i, vert in enumerate(verts): - vert_map[i+ii]= vert[2] - ii+=len(verts) - - fill= Blender.Geometry.PolyFill([ [v[0] for v in loop] for loop in loop_list ]) - #draw_loops(loop_list) - #raise 'done loop' - # map to original indicies - fill= [[vert_map[i] for i in reversed(f)] for f in fill] - - - if not fill: - print 'Warning Cannot scanfill, fallback on a triangle fan.' - fill= [ [0, i-1, i] for i in xrange(2, len(indices)) ] - else: - # Use real scanfill. - # See if its flipped the wrong way. - flip= None - for fi in fill: - if flip != None: - break - for i, vi in enumerate(fi): - if vi==0 and fi[i-1]==1: - flip= False - break - elif vi==1 and fi[i-1]==0: - flip= True - break - - if not flip: - for i, fi in enumerate(fill): - fill[i]= tuple([ii for ii in reversed(fi)]) - - - - - return fill - - - -# EG -''' -scn= Scene.GetCurrent() -me = scn.getActiveObject().getData(mesh=1) -ind= [v.index for v in me.verts if v.sel] # Get indices - -indices = ngon(me, ind) # fill the ngon. - -# Extand the faces to show what the scanfill looked like. -print len(indices) -me.faces.extend([[me.verts[ii] for ii in i] for i in indices]) -''' - -def meshCalcNormals(me, vertNormals=None): - ''' - takes a mesh and returns very high quality normals 1 normal per vertex. - The normals should be correct, indipendant of topology - - vertNormals - a list of vectors at least as long as the number of verts in the mesh - ''' - Ang= Blender.Mathutils.AngleBetweenVecs - Vector= Blender.Mathutils.Vector - SMALL_NUM=0.000001 - # Weight the edge normals by total angle difference - # EDGE METHOD - - if not vertNormals: - vertNormals= [ Vector() for v in xrange(len(me.verts)) ] - else: - for v in vertNormals: - v.zero() - - edges={} - for f in me.faces: - f_v = f.v - for edkey in f.edge_keys: - edges.setdefault(edkey, []).append(f.no) - - # Weight the edge normals by total angle difference - for fnos in edges.itervalues(): - - len_fnos= len(fnos) - if len_fnos>1: - totAngDiff=0 - for j in xrange(len_fnos-1, -1, -1): # same as reversed(xrange(...)) - for k in xrange(j-1, -1, -1): # same as reversed(xrange(...)) - #print j,k - try: - totAngDiff+= (Ang(fnos[j], fnos[k])) # /180 isnt needed, just to keeop the vert small. - except: - pass # Zero length face - - # print totAngDiff - if totAngDiff > SMALL_NUM: - ''' - average_no= Vector() - for no in fnos: - average_no+=no - ''' - average_no= reduce(lambda a,b: a+b, fnos, Vector()) - fnos.append(average_no*totAngDiff) # average no * total angle diff - #else: - # fnos[0] - else: - fnos.append(fnos[0]) - - for ed, v in edges.iteritems(): - vertNormals[ed[0]]+= v[-1] - vertNormals[ed[1]]+= v[-1] - for i, v in enumerate(me.verts): - v.no= vertNormals[i] - - - - -def pointInsideMesh(ob, pt): - Intersect = Blender.Mathutils.Intersect # 2 less dict lookups. - Vector = Blender.Mathutils.Vector - - def ptInFaceXYBounds(f, pt): - f_v = f.v - co= f_v[0].co - xmax= xmin= co.x - ymax= ymin= co.y - - co= f_v[1].co - xmax= max(xmax, co.x) - xmin= min(xmin, co.x) - ymax= max(ymax, co.y) - ymin= min(ymin, co.y) - - co= f_v[2].co - xmax= max(xmax, co.x) - xmin= min(xmin, co.x) - ymax= max(ymax, co.y) - ymin= min(ymin, co.y) - - if len(f_v)==4: - co= f_v[3].co - xmax= max(xmax, co.x) - xmin= min(xmin, co.x) - ymax= max(ymax, co.y) - ymin= min(ymin, co.y) - - # Now we have the bounds, see if the point is in it. - if\ - pt.x < xmin or\ - pt.y < ymin or\ - pt.x > xmax or\ - pt.y > ymax: - return False # point is outside face bounds - else: - return True # point inside. - #return xmax, ymax, xmin, ymin - - def faceIntersect(f): - f_v = f.v - isect = Intersect(f_v[0].co, f_v[1].co, f_v[2].co, ray, obSpacePt, 1) # Clipped. - if not isect and len(f) == 4: - isect = Intersect(f_v[0].co, f_v[2].co, f_v[3].co, ray, obSpacePt, 1) # Clipped. - - if isect and isect.z > obSpacePt.z: # This is so the ray only counts if its above the point. - return True - else: - return False - - obSpacePt = pt*ob.matrixWorld.copy().invert() - ray = Vector(0,0,-1) - me= ob.getData(mesh=1) - - # Here we find the number on intersecting faces, return true if an odd number (inside), false (outside) if its true. - return len([None for f in me.faces if ptInFaceXYBounds(f, obSpacePt) if faceIntersect(f)]) % 2 - - -def faceAngles(f): - ''' - Returns the angle between all corners in a tri or a quad - - ''' - AngleBetweenVecs = Blender.Mathutils.AngleBetweenVecs - def Ang(a1,a2): - try: return AngleBetweenVecs(a1,a2) - except: return 180 - - if len(f) == 3: - if type(f) in (tuple, list): v1,v2,v3 = f - else: v1,v2,v3 = [v.co for v in f] - a1= Ang(v2-v1,v3-v1) - a2= Ang(v1-v2,v3-v2) - a3 = 180 - (a1+a2) # a3= Mathutils.AngleBetweenVecs(v2-v3,v1-v3) - return a1,a2,a3 - - else: - if type(f) in (tuple, list): v1,v2,v3,v4 = f - else: v1,v2,v3,v4 = [v.co for v in f] - a1= Ang(v2-v1,v4-v1) - a2= Ang(v1-v2,v3-v2) - a3= Ang(v2-v3,v4-v3) - a4= Ang(v3-v4,v1-v4) - return a1,a2,a3,a4 - -# NMesh wrapper -Vector= Blender.Mathutils.Vector -class NMesh(object): - __slots__= 'verts', 'faces', 'edges', 'faceUV', 'materials', 'realmesh' - def __init__(self, mesh): - ''' - This is an NMesh wrapper that - mesh is an Mesh as returned by Blender.Mesh.New() - This class wraps NMesh like access into Mesh - - Running NMesh.update() - with this wrapper, - Will update the realmesh. - ''' - self.verts= [] - self.faces= [] - self.edges= [] - self.faceUV= False - self.materials= [] - self.realmesh= mesh - - def addFace(self, nmf): - self.faces.append(nmf) - - def Face(self, v=[]): - return NMFace(v) - def Vert(self, x,y,z): - return NMVert(x,y,z) - - def hasFaceUV(self, flag): - if flag: - self.faceUV= True - else: - self.faceUV= False - - def addMaterial(self, mat): - self.materials.append(mat) - - def update(self, recalc_normals=False): # recalc_normals is dummy - mesh= self.realmesh - mesh.verts= None # Clears the - - # Add in any verts from faces we may have not added. - for nmf in self.faces: - for nmv in nmf.v: - if nmv.index==-1: - nmv.index= len(self.verts) - self.verts.append(nmv) - - - mesh.verts.extend([nmv.co for nmv in self.verts]) - for i, nmv in enumerate(self.verts): - nmv.index= i - mv= mesh.verts[i] - mv.sel= nmv.sel - - good_faces= [nmf for nmf in self.faces if len(nmf.v) in (3,4)] - #print len(good_faces), 'AAA' - - - #mesh.faces.extend([nmf.v for nmf in self.faces]) - mesh.faces.extend([[mesh.verts[nmv.index] for nmv in nmf.v] for nmf in good_faces]) - if len(mesh.faces): - if self.faceUV: - mesh.faceUV= 1 - - #for i, nmf in enumerate(self.faces): - for i, nmf in enumerate(good_faces): - mf= mesh.faces[i] - if self.faceUV: - if len(nmf.uv) == len(mf.v): - mf.uv= [Vector(uv[0], uv[1]) for uv in nmf.uv] - if len(nmf.col) == len(mf.v): - for c, i in enumerate(mf.col): - c.r, c.g, c.b= nmf.col[i].r, nmf.col[i].g, nmf.col[i].b - if nmf.image: - mf.image= nmf.image - - mesh.materials= self.materials[:16] - -class NMVert(object): - __slots__= 'co', 'index', 'no', 'sel', 'uvco' - def __init__(self, x,y,z): - self.co= Vector(x,y,z) - self.index= None # set on appending. - self.no= Vector(0,0,1) # dummy - self.sel= 0 - self.uvco= None -class NMFace(object): - __slots__= 'col', 'flag', 'hide', 'image', 'mat', 'materialIndex', 'mode', 'normal',\ - 'sel', 'smooth', 'transp', 'uv', 'v' - - def __init__(self, v=[]): - self.col= [] - self.flag= 0 - self.hide= 0 - self.image= None - self.mat= 0 # materialIndex needs support too. - self.mode= 0 - self.normal= Vector(0,0,1) - self.uv= [] - self.sel= 0 - self.smooth= 0 - self.transp= 0 - self.uv= [] - self.v= [] # a list of nmverts. - -class NMCol(object): - __slots__ = 'r', 'g', 'b', 'a' - def __init__(self): - self.r= 255 - self.g= 255 - self.b= 255 - self.a= 255 - - -''' -# -verts_split= [dict() for i in xrange(len(me.verts))] - -tot_verts= 0 -for f in me.faces: - f_uv= f.uv - for i, v in enumerate(f.v): - vert_index= v.index # mesh index - vert_dict= verts_split[vert_index] # get the dict for this vert - - uv= f_uv[i] - # now we have the vert and the face uv well make a unique dict. - - vert_key= v.x, v.y, v.x, uv.x, uv.y # ADD IMAGE NAME HETR IF YOU WANT TO SPLIT BY THAT TOO - value= vert_index, tot_verts # ADD WEIGHT HERE IF YOU NEED. - try: - vert_dict[vert_key] # if this is missing it will fail. - except: - # this stores a mapping between the split and orig vert indicies - vert_dict[vert_key]= value - tot_verts+= 1 - -# a flat list of split verts - can add custom weight data here too if you need -split_verts= [None]*tot_verts - -for vert_split_dict in verts_split: - for key, value in vert_split_dict.iteritems(): - local_index, split_index= value - split_verts[split_index]= key - -# split_verts - Now you have a list of verts split by their UV. -''' diff --git a/release/scripts/bpymodules/BPyMesh_redux.py b/release/scripts/bpymodules/BPyMesh_redux.py deleted file mode 100644 index 5955d696fbd..00000000000 --- a/release/scripts/bpymodules/BPyMesh_redux.py +++ /dev/null @@ -1,652 +0,0 @@ -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# (C) Copyright 2006 MetaVR, Inc. -# http://www.metavr.com -# Written by Campbell Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -import bpy -Vector= Blender.Mathutils.Vector -Ang= Blender.Mathutils.AngleBetweenVecs -MidpointVecs= Blender.Mathutils.MidpointVecs -import BPyMesh - -# If python version is less than 2.4, try to get set stuff from module - -try: - set -except: - try: - from sets import Set as set - except: - set= None - -def uv_key(uv): - return round(uv.x, 5), round(uv.y, 5) - -def uv_key_mix(uv1, uv2, w1, w2): - # Weighted mix. w1+w2==1.0 - return w1*uv1[0]+w2*uv2[0], w1*uv1[1]+w2*uv2[1] - -def col_key(col): - return col.r, col.g, col.b - -def col_key_mix(col1, col2, w1, w2): - # Weighted mix. w1+w2==1.0 - return int(w1*col1[0] + w2*col2[0]), int(w1*col1[1] + w2*col2[1]), int(w1*col1[2]+col2[2]*w2) - - -def redux(ob, REDUX=0.5, BOUNDRY_WEIGHT=2.0, REMOVE_DOUBLES=False, FACE_AREA_WEIGHT=1.0, FACE_TRIANGULATE=True, DO_UV=True, DO_VCOL=True, DO_WEIGHTS=True, VGROUP_INF_REDUX= None, VGROUP_INF_WEIGHT=0.5): - """ - BOUNDRY_WEIGHT - 0 is no boundry weighting. 2.0 will make them twice as unlikely to collapse. - FACE_AREA_WEIGHT - 0 is no weight. 1 is normal, 2.0 is higher. - """ - - if REDUX<0 or REDUX>1.0: - raise 'Error, factor must be between 0 and 1.0' - elif not set: - raise 'Error, this function requires Python 2.4 or a full install of Python 2.3' - - BOUNDRY_WEIGHT= 1+BOUNDRY_WEIGHT - - """ # DEBUG! - if Blender.Get('rt') == 1000: - DEBUG=True - else: - DEBUG= False - """ - - me= ob.getData(mesh=1) - me.hide= False # unhide all data,. - if len(me.faces)<5: - return - - - - if FACE_TRIANGULATE or REMOVE_DOUBLES: - me.sel= True - - if FACE_TRIANGULATE: - me.quadToTriangle() - - if REMOVE_DOUBLES: - me.remDoubles(0.0001) - - vgroups= me.getVertGroupNames() - - if not me.getVertGroupNames(): - DO_WEIGHTS= False - - if (VGROUP_INF_REDUX!= None and VGROUP_INF_REDUX not in vgroups) or\ - VGROUP_INF_WEIGHT==0.0: - VGROUP_INF_REDUX= None - - try: - VGROUP_INF_REDUX_INDEX= vgroups.index(VGROUP_INF_REDUX) - except: - VGROUP_INF_REDUX_INDEX= -1 - - # del vgroups - len_vgroups= len(vgroups) - - - - OLD_MESH_MODE= Blender.Mesh.Mode() - Blender.Mesh.Mode(Blender.Mesh.SelectModes.VERTEX) - - if DO_UV and not me.faceUV: - DO_UV= False - - if DO_VCOL and not me.vertexColors: - DO_VCOL = False - - current_face_count= len(me.faces) - target_face_count= int(current_face_count * REDUX) - # % of the collapseable faces to collapse per pass. - #collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster. - collapse_per_pass= 0.333 # between 0.1 - lots of small nibbles, slow but high q. and 0.9 - big passes and faster. - - """# DEBUG! - if DEBUG: - COUNT= [0] - def rd(): - if COUNT[0]< 330: - COUNT[0]+=1 - return - me.update() - Blender.Window.RedrawAll() - print 'Press key for next, count "%s"' % COUNT[0] - try: input() - except KeyboardInterrupt: - raise "Error" - except: - pass - - COUNT[0]+=1 - """ - - class collapseEdge(object): - __slots__ = 'length', 'key', 'faces', 'collapse_loc', 'v1', 'v2','uv1', 'uv2', 'col1', 'col2', 'collapse_weight' - def __init__(self, ed): - self.init_from_edge(ed) # So we can re-use the classes without using more memory. - - def init_from_edge(self, ed): - self.key= ed.key - self.length= ed.length - self.faces= [] - self.v1= ed.v1 - self.v2= ed.v2 - if DO_UV or DO_VCOL: - self.uv1= [] - self.uv2= [] - self.col1= [] - self.col2= [] - - # self.collapse_loc= None # new collapse location. - # Basic weighting. - #self.collapse_weight= self.length * (1+ ((ed.v1.no-ed.v2.no).length**2)) - self.collapse_weight= 1.0 - - def collapse_locations(self, w1, w2): - ''' - Generate a smart location for this edge to collapse to - w1 and w2 are vertex location bias - ''' - - v1co= self.v1.co - v2co= self.v2.co - v1no= self.v1.no - v2no= self.v2.no - - # Basic operation, works fine but not as good as predicting the best place. - #between= ((v1co*w1) + (v2co*w2)) - #self.collapse_loc= between - - # normalize the weights of each vert - se we can use them as scalers. - wscale= w1+w2 - if not wscale: # no scale? - w1=w2= 0.5 - else: - w1/=wscale - w2/=wscale - - length= self.length - between= MidpointVecs(v1co, v2co) - - # Collapse - # new_location = between # Replace tricky code below. this code predicts the best collapse location. - - # Make lines at right angles to the normals- these 2 lines will intersect and be - # the point of collapsing. - - # Enlarge so we know they intersect: self.length*2 - cv1= v1no.cross(v1no.cross(v1co-v2co)) - cv2= v2no.cross(v2no.cross(v2co-v1co)) - - # Scale to be less then the edge lengths. - cv2.length = cv1.length = 1 - - cv1 = cv1 * (length* 0.4) - cv2 = cv2 * (length* 0.4) - - smart_offset_loc= between + (cv1 + cv2) - - # Now we need to blend between smart_offset_loc and w1/w2 - # you see were blending between a vert and the edges midpoint, so we cant use a normal weighted blend. - if w1 > 0.5: # between v1 and smart_offset_loc - #self.collapse_loc= v1co*(w2+0.5) + smart_offset_loc*(w1-0.5) - w2*=2 - w1= 1-w2 - new_loc_smart= v1co*w1 + smart_offset_loc*w2 - else: # w between v2 and smart_offset_loc - w1*=2 - w2= 1-w1 - new_loc_smart= v2co*w2 + smart_offset_loc*w1 - - if new_loc_smart.x != new_loc_smart.x: # NAN LOCATION, revert to between - new_loc_smart= None - - return new_loc_smart, between, v1co*0.99999 + v2co*0.00001, v1co*0.00001 + v2co*0.99999 - - - class collapseFace(object): - __slots__ = 'verts', 'normal', 'area', 'index', 'orig_uv', 'orig_col', 'uv', 'col' # , 'collapse_edge_count' - def __init__(self, f): - self.init_from_face(f) - - def init_from_face(self, f): - self.verts= f.v - self.normal= f.no - self.area= f.area - self.index= f.index - if DO_UV: - self.orig_uv= [uv_key(uv) for uv in f.uv] - self.uv= f.uv - if DO_VCOL: - self.orig_col= [col_key(col) for col in f.col] - self.col= f.col - - collapse_edges= collapse_faces= None - - # So meshCalcNormals can avoid making a new list all the time. - reuse_vertNormals= [ Vector() for v in xrange(len(me.verts)) ] - - while target_face_count <= len(me.faces): - BPyMesh.meshCalcNormals(me, reuse_vertNormals) - - if DO_WEIGHTS: - #groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) - groupNames, vWeightList= BPyMesh.meshWeight2List(me) - - # THIS CRASHES? Not anymore. - verts= list(me.verts) - edges= list(me.edges) - faces= list(me.faces) - - # THIS WORKS - #verts= me.verts - #edges= me.edges - #faces= me.faces - - # if DEBUG: DOUBLE_CHECK= [0]*len(verts) - me.sel= False - - if not collapse_faces: # Initialize the list. - collapse_faces= [collapseFace(f) for f in faces] - collapse_edges= [collapseEdge(ed) for ed in edges] - else: - for i, ed in enumerate(edges): - collapse_edges[i].init_from_edge(ed) - - # Strip the unneeded end off the list - collapse_edges[i+1:]= [] - - for i, f in enumerate(faces): - collapse_faces[i].init_from_face(f) - - # Strip the unneeded end off the list - collapse_faces[i+1:]= [] - - - collapse_edges_dict= dict( [(ced.key, ced) for ced in collapse_edges] ) - - # Store verts edges. - vert_ed_users= [[] for i in xrange(len(verts))] - for ced in collapse_edges: - vert_ed_users[ced.key[0]].append(ced) - vert_ed_users[ced.key[1]].append(ced) - - # Store face users - vert_face_users= [[] for i in xrange(len(verts))] - - # Have decieded not to use this. area is better. - #face_perim= [0.0]* len(me.faces) - - for ii, cfa in enumerate(collapse_faces): - for i, v1 in enumerate(cfa.verts): - vert_face_users[v1.index].append( (i,cfa) ) - - # add the uv coord to the vert - v2 = cfa.verts[i-1] - i1= v1.index - i2= v2.index - - if i1>i2: ced= collapse_edges_dict[i2,i1] - else: ced= collapse_edges_dict[i1,i2] - - ced.faces.append(cfa) - if DO_UV or DO_VCOL: - # if the edge is flipped from its order in the face then we need to flip the order indicies. - if cfa.verts[i]==ced.v1: i1,i2 = i, i-1 - else: i1,i2 = i-1, i - - if DO_UV: - ced.uv1.append( cfa.orig_uv[i1] ) - ced.uv2.append( cfa.orig_uv[i2] ) - - if DO_VCOL: - ced.col1.append( cfa.orig_col[i1] ) - ced.col2.append( cfa.orig_col[i2] ) - - - # PERIMITER - #face_perim[ii]+= ced.length - - - - # How weight the verts by the area of their faces * the normal difference. - # when the edge collapses, to vert weights are taken into account - - vert_weights= [0.5] * len(verts) - - for ii, vert_faces in enumerate(vert_face_users): - for f in vert_faces: - try: - no_ang= (Ang(verts[ii].no, f[1].normal)/180) * f[1].area - except: - no_ang= 1.0 - - vert_weights[ii] += no_ang - - # Use a vertex group as a weighting. - if VGROUP_INF_REDUX!=None: - - # Get Weights from a vgroup. - """ - vert_weights_map= [1.0] * len(verts) - for i, wd in enumerate(vWeightDict): - try: vert_weights_map[i]= 1+(wd[VGROUP_INF_REDUX] * VGROUP_INF_WEIGHT) - except: pass - """ - vert_weights_map= [1+(wl[VGROUP_INF_REDUX_INDEX]*VGROUP_INF_WEIGHT) for wl in vWeightList ] - - - # BOUNDRY CHECKING AND WEIGHT EDGES. CAN REMOVE - # Now we know how many faces link to an edge. lets get all the boundry verts - if BOUNDRY_WEIGHT > 0: - verts_boundry= [1] * len(verts) - #for ed_idxs, faces_and_uvs in edge_faces_and_uvs.iteritems(): - for ced in collapse_edges: - if len(ced.faces) < 2: - for key in ced.key: # only ever 2 key indicies. - verts_boundry[key]= 2 - - for ced in collapse_edges: - b1= verts_boundry[ced.key[0]] - b2= verts_boundry[ced.key[1]] - if b1 != b2: - # Edge has 1 boundry and 1 non boundry vert. weight higher - ced.collapse_weight= BOUNDRY_WEIGHT - #elif b1==b2==2: # if both are on a seam then weigh half as bad. - # ced.collapse_weight= ((BOUNDRY_WEIGHT-1)/2) +1 - # weight the verts by their boundry status - del b1 - del b2 - - for ii, boundry in enumerate(verts_boundry): - if boundry==2: - vert_weights[ii] *= BOUNDRY_WEIGHT - - vert_collapsed= verts_boundry - del verts_boundry - else: - vert_collapsed= [1] * len(verts) - - - - - # Best method, no quick hacks here, Correction. Should be the best but needs tweaks. - def ed_set_collapse_error(ced): - # Use the vertex weights to bias the new location. - new_locs= ced.collapse_locations(vert_weights[ced.key[0]], vert_weights[ced.key[1]]) - - - # Find the connecting faces of the 2 verts. - i1, i2= ced.key - test_faces= set() - for i in (i1,i2): # faster then LC's - for f in vert_face_users[i]: - test_faces.add(f[1].index) - for f in ced.faces: - test_faces.remove(f.index) - - - v1_orig= Vector(ced.v1.co) - v2_orig= Vector(ced.v2.co) - - def test_loc(new_loc): - ''' - Takes a location and tests the error without changing anything - ''' - new_weight= ced.collapse_weight - ced.v1.co= ced.v2.co= new_loc - - new_nos= [faces[i].no for i in test_faces] - - # So we can compare the befire and after normals - ced.v1.co= v1_orig - ced.v2.co= v2_orig - - # now see how bad the normals are effected - angle_diff= 1.0 - - for ii, i in enumerate(test_faces): # local face index, global face index - cfa= collapse_faces[i] # this collapse face - try: - # can use perim, but area looks better. - if FACE_AREA_WEIGHT: - # Psudo code for wrighting - # angle_diff= The before and after angle difference between the collapsed and un-collapsed face. - # ... devide by 180 so the value will be between 0 and 1.0 - # ... add 1 so we can use it as a multiplyer and not make the area have no eefect (below) - # area_weight= The faces original area * the area weight - # ... add 1.0 so a small area face dosent make the angle_diff have no effect. - # - # Now multiply - (angle_diff * area_weight) - # ... The weight will be a minimum of 1.0 - we need to subtract this so more faces done give the collapse an uneven weighting. - - angle_diff+= ((1+(Ang(cfa.normal, new_nos[ii])/180)) * (1+(cfa.area * FACE_AREA_WEIGHT))) -1 # 4 is how much to influence area - else: - angle_diff+= (Ang(cfa.normal), new_nos[ii])/180 - - except: - pass - - - # This is very arbirary, feel free to modify - try: no_ang= (Ang(ced.v1.no, ced.v2.no)/180) + 1 - except: no_ang= 2.0 - - # do *= because we face the boundry weight to initialize the weight. 1.0 default. - new_weight *= ((no_ang * ced.length) * (1-(1/angle_diff)))# / max(len(test_faces), 1) - return new_weight - # End testloc - - - # Test the collapse locatons - collapse_loc_best= None - collapse_weight_best= 1000000000 - ii= 0 - for collapse_loc in new_locs: - if collapse_loc: # will only ever fail if smart loc is NAN - test_weight= test_loc(collapse_loc) - if test_weight < collapse_weight_best: - iii= ii - collapse_weight_best = test_weight - collapse_loc_best= collapse_loc - ii+=1 - - ced.collapse_loc= collapse_loc_best - ced.collapse_weight= collapse_weight_best - - - # are we using a weight map - if VGROUP_INF_REDUX: - v= vert_weights_map[i1]+vert_weights_map[i2] - ced.collapse_weight*= v - # End collapse Error - - # We can calculate the weights on __init__ but this is higher qualuity. - for ced in collapse_edges: - if ced.faces: # dont collapse faceless edges. - ed_set_collapse_error(ced) - - # Wont use the function again. - del ed_set_collapse_error - # END BOUNDRY. Can remove - - # sort by collapse weight - try: collapse_edges.sort(key = lambda ced: ced.collapse_weight) # edges will be used for sorting - except: collapse_edges.sort(lambda ced1, ced2: cmp(ced1.collapse_weight, ced2.collapse_weight)) # edges will be used for sorting - - - vert_collapsed= [0]*len(verts) - - collapse_edges_to_collapse= [] - - # Make a list of the first half edges we can collapse, - # these will better edges to remove. - collapse_count=0 - for ced in collapse_edges: - if ced.faces: - i1, i2= ced.key - # Use vert selections - if vert_collapsed[i1] or vert_collapsed[i2]: - pass - else: - # Now we know the verts havnyt been collapsed. - vert_collapsed[i2]= vert_collapsed[i1]= 1 # Dont collapse again. - collapse_count+=1 - collapse_edges_to_collapse.append(ced) - - # Get a subset of the entire list- the first "collapse_per_pass", that are best to collapse. - if collapse_count > 4: - collapse_count = int(collapse_count*collapse_per_pass) - else: - collapse_count = len(collapse_edges) - # We know edge_container_list_collapse can be removed. - for ced in collapse_edges_to_collapse: - """# DEBUG! - if DEBUG: - if DOUBLE_CHECK[ced.v1.index] or\ - DOUBLE_CHECK[ced.v2.index]: - raise 'Error' - else: - DOUBLE_CHECK[ced.v1.index]=1 - DOUBLE_CHECK[ced.v2.index]=1 - - tmp= (ced.v1.co+ced.v2.co)*0.5 - Blender.Window.SetCursorPos(tmp.x, tmp.y, tmp.z) - Blender.Window.RedrawAll() - """ - - # Chech if we have collapsed our quota. - collapse_count-=1 - if not collapse_count: - break - - current_face_count -= len(ced.faces) - - # Find and assign the real weights based on collapse loc. - - # Find the weights from the collapse error - if DO_WEIGHTS or DO_UV or DO_VCOL: - i1, i2= ced.key - # Dont use these weights since they may not have been used to make the collapse loc. - #w1= vert_weights[i1] - #w2= vert_weights[i2] - w1= (ced.v2.co-ced.collapse_loc).length - w2= (ced.v1.co-ced.collapse_loc).length - - # Normalize weights - wscale= w1+w2 - if not wscale: # no scale? - w1=w2= 0.5 - else: - w1/= wscale - w2/= wscale - - - # Interpolate the bone weights. - if DO_WEIGHTS: - - # add verts vgroups to eachother - wl1= vWeightList[i1] # v1 weight dict - wl2= vWeightList[i2] # v2 weight dict - for group_index in xrange(len_vgroups): - wl1[group_index]= wl2[group_index]= (wl1[group_index]*w1) + (wl2[group_index]*w2) - # Done finding weights. - - - - if DO_UV or DO_VCOL: - # Handel UV's and vert Colors! - for v, my_weight, other_weight, edge_my_uvs, edge_other_uvs, edge_my_cols, edge_other_cols in (\ - (ced.v1, w1, w2, ced.uv1, ced.uv2, ced.col1, ced.col2),\ - (ced.v2, w2, w1, ced.uv2, ced.uv1, ced.col2, ced.col1)\ - ): - uvs_mixed= [ uv_key_mix(edge_my_uvs[iii], edge_other_uvs[iii], my_weight, other_weight) for iii in xrange(len(edge_my_uvs)) ] - cols_mixed= [ col_key_mix(edge_my_cols[iii], edge_other_cols[iii], my_weight, other_weight) for iii in xrange(len(edge_my_cols)) ] - - for face_vert_index, cfa in vert_face_users[v.index]: - if len(cfa.verts)==3 and cfa not in ced.faces: # if the face is apart of this edge then dont bother finding the uvs since the face will be removed anyway. - - if DO_UV: - # UV COORDS - uvk= cfa.orig_uv[face_vert_index] - try: - tex_index= edge_my_uvs.index(uvk) - except: - tex_index= None - """ # DEBUG! - if DEBUG: - print 'not found', uvk, 'in', edge_my_uvs, 'ed index', ii, '\nwhat about', edge_other_uvs - """ - if tex_index != None: # This face uses a uv in the collapsing face. - do a merge - other_uv= edge_other_uvs[tex_index] - uv_vec= cfa.uv[face_vert_index] - uv_vec.x, uv_vec.y= uvs_mixed[tex_index] - - # TEXFACE COLORS - if DO_VCOL: - colk= cfa.orig_col[face_vert_index] - try: tex_index= edge_my_cols.index(colk) - except: pass - if tex_index != None: - other_col= edge_other_cols[tex_index] - col_ob= cfa.col[face_vert_index] - col_ob.r, col_ob.g, col_ob.b= cols_mixed[tex_index] - - # DEBUG! if DEBUG: rd() - - # Execute the collapse - ced.v1.sel= ced.v2.sel= True # Select so remove doubles removed the edges and faces that use it - ced.v1.co= ced.v2.co= ced.collapse_loc - - # DEBUG! if DEBUG: rd() - if current_face_count <= target_face_count: - break - - # Copy weights back to the mesh before we remove doubles. - if DO_WEIGHTS: - #BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) - BPyMesh.list2MeshWeight(me, groupNames, vWeightList) - - doubles= me.remDoubles(0.0001) - current_face_count= len(me.faces) - - if current_face_count <= target_face_count or not doubles: # not doubles shoule never happen. - break - - me.update() - Blender.Mesh.Mode(OLD_MESH_MODE) - - -# Example usage -def main(): - Blender.Window.EditMode(0) - scn= bpy.data.scenes.active - active_ob= scn.objects.active - t= Blender.sys.time() - redux(active_ob, 0.5) - print '%.4f' % (Blender.sys.time()-t) - -if __name__=='__main__': - main() diff --git a/release/scripts/bpymodules/BPyMessages.py b/release/scripts/bpymodules/BPyMessages.py deleted file mode 100644 index 8ee1aa6c707..00000000000 --- a/release/scripts/bpymodules/BPyMessages.py +++ /dev/null @@ -1,61 +0,0 @@ -from Blender import Draw, sys -def Error_NoMeshSelected(): - Draw.PupMenu('Error%t|No mesh objects selected') -def Error_NoActive(): - Draw.PupMenu('Error%t|No active object') -def Error_NoMeshActive(): - Draw.PupMenu('Error%t|Active object is not a mesh') -def Error_NoMeshUvSelected(): - Draw.PupMenu('Error%t|No mesh objects with texface selected') -def Error_NoMeshUvActive(): - Draw.PupMenu('Error%t|Active object is not a mesh with texface') -def Error_NoMeshMultiresEdit(): - Draw.PupMenu('Error%t|Unable to complete action with multires enabled') -def Error_NoMeshFaces(): - Draw.PupMenu('Error%t|Mesh has no faces') - -# File I/O messages -def Error_NoFile(path): - '''True if file missing, False if files there - - Use simply by doing... - if Error_NoFile(path): return - ''' - if not sys.exists(sys.expandpath(path)): - Draw.PupMenu("Error%t|Can't open file: " + path) - return True - return False - -def Error_NoDir(path): - '''True if dirs missing, False if dirs there - - Use simply by doing... - if Error_NoDir(path): return - ''' - if not sys.exists(sys.expandpath(path)): - Draw.PupMenu("Error%t|Path does not exist: " + path) - return True - return False - - -def Warning_MeshDistroyLayers(mesh): - '''Returns true if we can continue to edit the mesh, warn when using NMesh''' - if len(mesh.getUVLayerNames()) >1 and len(mesh.getColorLayerNames()) >1: - return True - - ret = Draw.PupMenu('Warning%t|This script will distroy inactive UV and Color layers, OK?') - if ret == -1: - return False - - return True - -def Warning_SaveOver(path): - '''Returns - True to save, False dont save''' - if sys.exists(sys.expandpath(path)): - ret= Draw.PupMenu('Save over%t|' + path) - if ret == -1: - return False - - return True - - diff --git a/release/scripts/bpymodules/BPyNMesh.py b/release/scripts/bpymodules/BPyNMesh.py deleted file mode 100644 index 043d8514db9..00000000000 --- a/release/scripts/bpymodules/BPyNMesh.py +++ /dev/null @@ -1,48 +0,0 @@ -# $Id$ -# -# -------------------------------------------------------------------------- -# BPyNMesh.py version 0.1 -# -------------------------------------------------------------------------- -# helper functions to be used by other scripts -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -# -------------------------------------------------------------------------- -# "Apply size and rotation" function by Jonas Petersen -# -------------------------------------------------------------------------- -# This function does (hopefully) exactly what the -# "Apply size and rotation" command does (CTRL-A in Object Mode). -def ApplySizeAndRotation(obj): - if obj.getType() != "Mesh": return - if obj.SizeX==1.0 and obj.SizeY==1.0 and obj.SizeZ==1.0 and obj.RotX == 0.0 and obj.RotY == 0.0 and obj.RotZ == 0.0: return - mesh = obj.getData() - matrix = obj.matrix - v = [0,0,0] - for vert in mesh.verts: - co = vert.co - v[0] = co[0]*matrix[0][0] + co[1]*matrix[1][0] + co[2]*matrix[2][0] - v[1] = co[0]*matrix[0][1] + co[1]*matrix[1][1] + co[2]*matrix[2][1] - v[2] = co[0]*matrix[0][2] + co[1]*matrix[1][2] + co[2]*matrix[2][2] - co[0], co[1], co[2] = v - obj.SizeX = obj.SizeY = obj.SizeZ = 1.0 - obj.RotX = obj.RotY = obj.RotZ = 0.0 - mesh.update() - diff --git a/release/scripts/bpymodules/BPyObject.py b/release/scripts/bpymodules/BPyObject.py deleted file mode 100644 index 54ff949218d..00000000000 --- a/release/scripts/bpymodules/BPyObject.py +++ /dev/null @@ -1,108 +0,0 @@ -import Blender - -def getObjectArmature(ob): - ''' - This returns the first armature the mesh uses. - remember there can be more then 1 armature but most people dont do that. - ''' - if ob.type != 'Mesh': - return None - - arm = ob.parent - if arm and arm.type == 'Armature' and ob.parentType == Blender.Object.ParentTypes.ARMATURE: - return arm - - for m in ob.modifiers: - if m.type== Blender.Modifier.Types.ARMATURE: - arm = m[Blender.Modifier.Settings.OBJECT] - if arm: - return arm - - return None - - -def getDerivedObjects(ob, PARTICLES= True): - ''' - Takes an objects and returnes a list of (ob, maxrix4x4) pairs - that are derived from this object - - This will include the object its self if it would be rendered. - all dupli's for eg are not rendered themselves. - - currently supports - * dupligroups - * dupliverts - * dupliframes - * static particles as a mesh - - it is possible this function will return an empty list. - ''' - - ob_mtx_pairs = ob.DupObjects - effects= ob.effects - - # Ignore self if were a dupli* or our parent is a duplivert. - if ob.enableDupFrames or ob.enableDupGroup or ob.enableDupVerts: - pass - else: - parent= ob.parent - if parent and parent.enableDupVerts: - pass - else: - if effects and (not effects[0].flag & Blender.Effect.Flags.EMESH): - # Particles mesh wont render - pass - else: - ob_mtx_pairs.append((ob, ob.matrixWorld)) - - - if PARTICLES: - type_vec= type(Blender.Mathutils.Vector()) - type_tp= type((0,0)) - type_ls= type([]) - - # TODO, particles per child object. - # TODO Support materials - me= Blender.Mesh.New() - for eff in effects: - par= eff.getParticlesLoc() - - if par: - type_par= type(par[0]) - - if type_par == type_vec: - # point particles - me.verts.extend(par) - - elif type_par == type_tp: - # edge pairs - start_index= len(me.verts) - me.verts.extend([v for p in par for v in p]) - me.edges.extend( [(i, i+1) for i in xrange(start_index, start_index + len(par) - 1 )] ) - - elif type_par == type_ls: - # lines of edges - start_index= len(me.verts) - me.verts.extend([v for line in par for v in line]) - - edges= [] - for line in par: - edges.extend( [(i,i+1) for i in xrange(start_index, start_index+len(line)-1) ] ) - start_index+= len(line) - - me.edges.extend(edges) - - if me.verts: - # If we have verts, then add the mesh - ob_par = Blender.Object.New('Mesh') - ob_par.link( me ) - - LOOSE= Blender.Mesh.EdgeFlags.LOOSE - for ed in me.edges: - ed.flag |= LOOSE - - # Particle's are in worldspace so an identity matrix is fine. - ob_mtx_pairs.append( (ob_par, Blender.Mathutils.Matrix()) ) - - return ob_mtx_pairs - - diff --git a/release/scripts/bpymodules/BPyRegistry.py b/release/scripts/bpymodules/BPyRegistry.py deleted file mode 100644 index 4d681e15937..00000000000 --- a/release/scripts/bpymodules/BPyRegistry.py +++ /dev/null @@ -1,267 +0,0 @@ -# -------------------------------------------------------------------------- -# Module BPyRegistry version 0.1 -# Helper functions to store / restore configuration data. -# -------------------------------------------------------------------------- -# $Id$ -# -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004: Willian P. Germano, wgermano _at_ ig.com.br -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# -------------------------------------------------------------------------- - -# The Registry is a Python dictionary that is kept in Blender for as long as -# the program is running, where scripts can store / restore persistent data -# (data that is not lost when the script exits). This module provides -# functions to save and restore Registry entries as config data in the -# bpydata/config folder. Scripts just need to give an extra parameter to -# the Blender.Registry.Get/Set() functions to have their data automatically -# saved and restored when needed. -# -# Note: entries starting with an underscore are not saved, so script authors -# can use that fact to define data that is not meant to be stored in a -# config file. Example: data to be passed to another script and references to -# invalid data, like Blender objects and any function or method. -# -# Check the Blender.Registry documentation for more information. - -import Blender -from Blender import Registry, sys as bsys - -_EXT = '.cfg' # file extension for saved config data - -# limits: -#MAX_ITEMS_NUM = 60 # max number of keys per dict and itens per list and tuple -#MAX_STR_LEN = 300 # max string length (remember this is just for config data) - -_CFG_DIR = '' -if Blender.Get('udatadir'): - _CFG_DIR = Blender.sys.join(Blender.Get('udatadir'), 'config') -if not _CFG_DIR or not bsys.exists(_CFG_DIR): - _CFG_DIR = Blender.sys.join(Blender.Get('datadir'), 'config') -if not bsys.exists(_CFG_DIR): - _CFG_DIR = '' - -# to compare against, so we don't write to a cvs tree: -_CVS_SUBPATH = 'release/scripts/bpydata/config/' -if bsys.dirsep == '\\': - _CVS_SUBPATH = _CVS_SUBPATH.replace('/', '\\') - -_KEYS = [k for k in Registry.Keys() if k[0] != '_'] - -# _ITEMS_NUM = 0 - -def _sanitize(o): - "Check recursively that all objects are valid, set invalid ones to None" - - # global MAX_ITEMS_NUM, MAX_STR_LEN, _ITEMS_NUM - - valid_types = [int, float, bool, long, type] - valid_checked_types = [str, unicode] - # Only very simple types are considered valid for configuration data, - # functions, methods and Blender objects (use their names instead) aren't. - - t = type(o) - - if t == dict: - ''' - _ITEMS_NUM += len(o) - if _ITEMS_NUM > MAX_ITEMS_NUM: - return None - ''' - for k, v in o.iteritems(): - o[k] = _sanitize(v) - elif t in [list, tuple]: - ''' - _ITEMS_NUM += len(o) - if _ITEMS_NUM > MAX_ITEMS_NUM: - return None - ''' - return [_sanitize(i) for i in o] - elif t in valid_types: - return o - elif t in valid_checked_types: - ''' - if len(o) > MAX_STR_LEN: - o = o[:MAX_STR_LEN] - ''' - return o - else: return None - - return o - - -def _dict_to_str(name, d): - "Return a pretty-print version of the passed dictionary" - if not d: return 'None' # d can be None if there was no config to pass - - if name: l = ['%s = {' % name] - else: l = ['{'] - #keys = d.keys() - for k,v in d.iteritems(): # .keys() - if type(v) == dict: - l.append("'%s': %s" % (k, _dict_to_str(None, v))) - else: - l.append("'%s': %s," % (k, repr(v))) - if name: l.append('}') - else: l.append('},') - return "\n".join(l) - -_HELP_MSG = """ -Please create a valid scripts config dir tree either by -copying release/scripts/ tree to your dir -or by copying release/scripts/bpydata/ tree to a user -defined scripts dir that you can set in the -User Preferences -> Paths tab -> Python path input box. -""" - -def _check_dir(): - global _CFG_DIR, _CVS_SUBPATH, _HELP_MSG - - if not _CFG_DIR: - errmsg = "scripts config dir not found!\n%s" % _HELP_MSG - raise IOError, errmsg - elif _CFG_DIR.find(_CVS_SUBPATH) > 0: - errmsg = """ -Your scripts config dir:\n%s -seems to reside in your local Blender's cvs tree.\n%s""" % (_CFG_DIR, _HELP_MSG) - raise SystemError, errmsg - else: return - - -# API: - -BPY_KEY_MISSING = 0 -BPY_KEY_IN_REGISTRY = 1 -BPY_KEY_IN_FILE = 2 - -def HasConfigData (key): - """ - Check if the given key exists, either already loaded in the Registry dict or - as a file in the script data config dir. - @type key: string - @param key: a given key name. - @returns: - - 0: key does not exist; - - 1: key exists in the Registry dict only; - - 2: key exists as a file only; - - 3: key exists in the Registry dict and also as a file. - @note: for readability it's better to check against the constant bitmasks - BPY_KEY_MISSING = 0, BPY_KEY_IN_REGISTRY = 1 and BPY_KEY_IN_FILE = 2. - """ - - fname = bsys.join(_CFG_DIR, "%s%s" % (key, _EXT)) - - result = BPY_KEY_MISSING - if key in Registry.Keys(): result |= BPY_KEY_IN_REGISTRY - if bsys.exists(fname): result |= BPY_KEY_IN_FILE - - return result - - -def LoadConfigData (key = None): - """ - Load config data from file(s) to the Registry dictionary. - @type key: string - @param key: a given key name. If None (default), all available keys are - loaded. - @returns: None - """ - - _check_dir() - - import os - - if not key: - files = \ - [bsys.join(_CFG_DIR, f) for f in os.listdir(_CFG_DIR) if f.endswith(_EXT)] - else: - files = [] - fname = bsys.join(_CFG_DIR, "%s%s" % (key, _EXT)) - if bsys.exists(fname): files.append(fname) - - for p in files: - try: - f = file(p, 'r') - lines = f.readlines() - f.close() - if lines: # Lines may be blank - mainkey = lines[0].split('=')[0].strip() - pysrc = "\n".join(lines) - exec(pysrc) - exec("Registry.SetKey('%s', %s)" % (str(mainkey), mainkey)) - except Exception, e: - raise Warning(e) # Resend exception as warning - - -def RemoveConfigData (key = None): - """ - Remove this key's config file from the <(u)datadir>/config/ folder. - @type key: string - @param key: the name of the key to be removed. If None (default) all - available config files are deleted. - """ - - _check_dir() - - if not key: - files = \ - [bsys.join(_CFG_DIR, f) for f in os.listdir(_CFG_DIR) if f.endswith(_EXT)] - else: - files = [] - fname = bsys.join(_CFG_DIR, "%s%s" % (key, _EXT)) - if bsys.exists(fname): files.append(fname) - - import os - - try: - for p in files: - os.remove(p) # remove the file(s) - except Exception, e: - raise Warning(e) # Resend exception as warning - - -def SaveConfigData (key = None): - """ - Save Registry key(s) as file(s) in the <(u)datadir>/config/ folder. - @type key: string - @param key: the name of the key to be saved. If None (default) all - available keys are saved. - """ - - global _KEYS, _CFG_DIR - - _check_dir() - - if key: keys = [key] - else: keys = _KEYS - - for mainkey in keys: - cfgdict = Registry.GetKey(mainkey).copy() - for k in cfgdict: # .keys() - if not k or k[0] == '_': - del cfgdict[k] - - if not cfgdict: continue - - try: - filename = bsys.join(_CFG_DIR, "%s%s" % (mainkey, _EXT)) - f = file(filename, 'w') - output = _dict_to_str(mainkey, _sanitize(cfgdict)) - if output!='None': - f.write(output) - f.close() - except Exception, e: - raise Warning(e) # Resend exception as warning diff --git a/release/scripts/bpymodules/BPyRender.py b/release/scripts/bpymodules/BPyRender.py deleted file mode 100644 index 951e1ae6300..00000000000 --- a/release/scripts/bpymodules/BPyRender.py +++ /dev/null @@ -1,633 +0,0 @@ -import Blender -from Blender import Scene, sys, Camera, Object, Image -from Blender.Scene import Render -Vector= Blender.Mathutils.Vector - - -def extFromFormat(format): - if format == Render.TARGA: return 'tga' - if format == Render.RAWTGA: return 'tga' - if format == Render.HDR: return 'hdr' - if format == Render.PNG: return 'png' - if format == Render.BMP: return 'bmp' - if format == Render.JPEG: return 'jpg' - if format == Render.HAMX: return 'ham' - if format == Render.TIFF: return 'tif' - if format == Render.CINEON: return 'cine' - if format == Render.DPX: return 'tif' - if format == Render.OPENEXR: return 'exr' - if format == Render.IRIS: return 'rgb' - return '' - - - -def imageFromObjectsOrtho(objects, path, width, height, smooth, alpha= True, camera_matrix= None, format=Render.PNG): - ''' - Takes any number of objects and renders them on the z axis, between x:y-0 and x:y-1 - Usefull for making images from a mesh without per pixel operations - - objects must be alredy placed - - smooth, anti alias True/False - - path renders to a PNG image - - alpha weather to render background as alpha - - returns the blender image - ''' - ext = '.' + extFromFormat(format) - print ext - # remove an extension if its alredy there - if path.lower().endswith(ext): - path= path[:-4] - - path_expand= sys.expandpath(path) + ext - - print path_expand, 'path' - - # Touch the path - try: - f= open(path_expand, 'w') - f.close() - except: - raise 'Error, could not write to path:' + path_expand - - - # RENDER THE FACES. - scn= Scene.GetCurrent() - render_scn= Scene.New() - render_scn.makeCurrent() - render_scn.Layers |= (1<<20)-1 # all layers enabled - - # Add objects into the current scene - for ob in objects: - render_scn.link(ob) - - render_context= render_scn.getRenderingContext() - render_context.setRenderPath('') # so we can ignore any existing path and save to the abs path. - - - render_context.imageSizeX(width) - render_context.imageSizeY(height) - - if smooth: - render_context.enableOversampling(True) - render_context.setOversamplingLevel(16) - else: - render_context.enableOversampling(False) - - render_context.setRenderWinSize(100) - render_context.setImageType(format) - render_context.enableExtensions(True) - #render_context.enableSky() # No alpha needed. - if alpha: - render_context.alphaMode= 1 - render_context.enableRGBAColor() - else: - render_context.alphaMode= 0 - render_context.enableRGBColor() - - render_context.displayMode= 0 # fullscreen - - # New camera and object - render_cam_data= Camera.New('ortho') - render_cam_ob= Object.New('Camera') - render_cam_ob.link(render_cam_data) - render_scn.link(render_cam_ob) - render_scn.objects.camera = render_cam_ob - - render_cam_data.type= 'ortho' - - - - # Position the camera - if camera_matrix: - render_cam_ob.setMatrix(camera_matrix) - # We need to take into account the matrix scaling when setting the size - # so we get the image bounds defined by the matrix - # first get the x and y factors from the matrix. - # To render the correct dimensions we must use the aspy and aspy to force the matrix scale to - # override the aspect enforced by the width and weight. - cent= Vector() * camera_matrix - xvec= Vector(1,0,0) * camera_matrix - yvec= Vector(0,1,0) * camera_matrix - # zvec= Vector(0,0,1) * camera_matrix - xlen = (cent-xvec).length # half height of the image - ylen = (cent-yvec).length # half width of the image - # zlen = (cent-zvec).length # dist to place the camera? - just use the loc for now. - - - # less then 1.0 portrate, 1.0 or more is portrate - asp_cam_mat= xlen/ylen # divide by zero? - possible but scripters fault. - asp_image_res= float(width)/height - #print 'asp quad', asp_cam_mat, 'asp_image', asp_image_res - #print 'xylen', xlen, ylen, 'w/h', width, height - # Setup the aspect - - if asp_cam_mat > asp_image_res: - # camera is wider then image res. - # to make the image wider, reduce the aspy - asp_diff= asp_image_res/asp_cam_mat - min_asp= asp_diff * 200 - #print 'X', min_asp - - elif asp_cam_mat < asp_image_res: # asp_cam_mat < asp_image_res - # camera is narrower then image res - # to make the image narrower, reduce the aspx - asp_diff= asp_cam_mat/asp_image_res - min_asp= asp_diff * 200 - #print 'Y', min_asp - else: - min_asp= 200 - - # set the camera size - if xlen > ylen: - if asp_cam_mat > asp_image_res: - render_context.aspectX= 200 # get the greatest range possible - render_context.aspectY= min_asp # get the greatest range possible - else: - render_context.aspectY= 200 # get the greatest range possible - render_context.aspectX= min_asp # get the greatest range possible - #print "xlen bigger" - render_cam_data.scale= xlen * 2 - elif xlen < ylen:# ylen is bigger - if asp_cam_mat > asp_image_res: - render_context.aspectX= 200 # get the greatest range possible - render_context.aspectY= min_asp # get the greatest range possible - else: - render_context.aspectY= 200 # get the greatest range possible - render_context.aspectX= min_asp # get the greatest range possible - #print "ylen bigger" - render_cam_data.scale= ylen *2 - else: - # asppect 1:1 - #print 'NOLEN Bigger' - render_cam_data.scale= xlen * 2 - - #print xlen, ylen, 'xlen, ylen' - - else: - if width > height: - min_asp = int((float(height) / width) * 200) - render_context.aspectX= min_asp - render_context.aspectY= 200 - else: - min_asp = int((float(width) / height) * 200) - render_context.aspectX= 200 - render_context.aspectY= min_asp - - - render_cam_data.scale= 1.0 - render_cam_ob.LocZ= 1.0 - render_cam_ob.LocX= 0.5 - render_cam_ob.LocY= 0.5 - - Blender.Window.RedrawAll() - - render_context.render() - render_context.saveRenderedImage(path) - Render.CloseRenderWindow() - #if not B.sys.exists(PREF_IMAGE_PATH_EXPAND): - # raise 'Error!!!' - - scn.makeCurrent() - Scene.Unlink(render_scn) - - # NOW APPLY THE SAVED IMAGE TO THE FACES! - #print PREF_IMAGE_PATH_EXPAND - try: - target_image= Image.Load(path_expand) - return target_image - except: - raise 'Error: Could not render or load the image at path "%s"' % path_expand - return - - - -#-----------------------------------------------------------------------------# -# UV Baking functions, make a picture from mesh(es) uvs # -#-----------------------------------------------------------------------------# - -def mesh2uv(me_s, PREF_SEL_FACES_ONLY=False): - ''' - Converts a uv mapped mesh into a 2D Mesh from UV coords. - returns a triple - - (mesh2d, face_list, col_list) - "mesh" is the new mesh and... - "face_list" is the faces that were used to make the mesh, - "material_list" is a list of materials used by each face - These are in alligned with the meshes faces, so you can easerly copy data between them - - ''' - render_me= Blender.Mesh.New() - render_me.verts.extend( [Vector(0,0,0),] ) # 0 vert uv bugm dummy vert - face_list= [] - material_list= [] - for me in me_s: - me_materials= me.materials - if PREF_SEL_FACES_ONLY: - me_faces= [f for f in me.faces if f.sel] - else: - me_faces= me.faces - - face_list.extend(me_faces) - - # Dittro - if me_materials: - material_list.extend([me_materials[f.mat] for f in me_faces]) - else: - material_list.extend([None]*len(me_faces)) - - # Now add the verts - render_me.verts.extend( [ Vector(uv.x, uv.y, 0) for f in face_list for uv in f.uv ] ) - - # Now add the faces - tmp_faces= [] - vert_offset= 1 - for f in face_list: - tmp_faces.append( [ii+vert_offset for ii in xrange(len(f))] ) - vert_offset+= len(f) - - render_me.faces.extend(tmp_faces) - render_me.faceUV=1 - return render_me, face_list, material_list - - -def uvmesh_apply_normals(render_me, face_list): - '''Worldspace normals to vertex colors''' - for i, f in enumerate(render_me.faces): - face_orig= face_list[i] - f_col= f.col - for j, v in enumerate(face_orig): - c= f_col[j] - nx, ny, nz= v.no - c.r= int((nx+1)*128)-1 - c.g= int((ny+1)*128)-1 - c.b= int((nz+1)*128)-1 - -def uvmesh_apply_image(render_me, face_list): - '''Copy the image and uvs from the original faces''' - for i, f in enumerate(render_me.faces): - f.uv= face_list[i].uv - f.image= face_list[i].image - - -def uvmesh_apply_vcol(render_me, face_list): - '''Copy the vertex colors from the original faces''' - for i, f in enumerate(render_me.faces): - face_orig= face_list[i] - f_col= f.col - for j, c_orig in enumerate(face_orig.col): - c= f_col[j] - c.r= c_orig.r - c.g= c_orig.g - c.b= c_orig.b - -def uvmesh_apply_matcol(render_me, material_list): - '''Get the vertex colors from the original materials''' - for i, f in enumerate(render_me.faces): - mat_orig= material_list[i] - f_col= f.col - if mat_orig: - for c in f_col: - c.r= int(mat_orig.R*255) - c.g= int(mat_orig.G*255) - c.b= int(mat_orig.B*255) - else: - for c in f_col: - c.r= 255 - c.g= 255 - c.b= 255 - -def uvmesh_apply_col(render_me, color): - '''Get the vertex colors from the original materials''' - r,g,b= color - for i, f in enumerate(render_me.faces): - f_col= f.col - for c in f_col: - c.r= r - c.g= g - c.b= b - - -def vcol2image(me_s,\ - PREF_IMAGE_PATH,\ - PREF_IMAGE_SIZE,\ - PREF_IMAGE_BLEED,\ - PREF_IMAGE_SMOOTH,\ - PREF_IMAGE_WIRE,\ - PREF_IMAGE_WIRE_INVERT,\ - PREF_IMAGE_WIRE_UNDERLAY,\ - PREF_USE_IMAGE,\ - PREF_USE_VCOL,\ - PREF_USE_MATCOL,\ - PREF_USE_NORMAL,\ - PREF_USE_TEXTURE,\ - PREF_SEL_FACES_ONLY): - - - def rnd_mat(): - render_mat= Blender.Material.New() - mode= render_mat.mode - - # Dont use lights ever - mode |= Blender.Material.Modes.SHADELESS - - if PREF_IMAGE_WIRE: - # Set the wire color - if PREF_IMAGE_WIRE_INVERT: - render_mat.rgbCol= (1,1,1) - else: - render_mat.rgbCol= (0,0,0) - - mode |= Blender.Material.Modes.WIRE - if PREF_USE_VCOL or PREF_USE_MATCOL or PREF_USE_NORMAL: # both vcol and material color use vertex cols to avoid the 16 max limit in materials - mode |= Blender.Material.Modes.VCOL_PAINT - if PREF_USE_IMAGE: - mode |= Blender.Material.Modes.TEXFACE - - # Copy back the mode - render_mat.mode |= mode - return render_mat - - - render_me, face_list, material_list= mesh2uv(me_s, PREF_SEL_FACES_ONLY) - - # Normals exclude all others - if PREF_USE_NORMAL: - uvmesh_apply_normals(render_me, face_list) - else: - if PREF_USE_IMAGE: - uvmesh_apply_image(render_me, face_list) - uvmesh_apply_vcol(render_me, face_list) - - elif PREF_USE_VCOL: - uvmesh_apply_vcol(render_me, face_list) - - elif PREF_USE_MATCOL: - uvmesh_apply_matcol(render_me, material_list) - - elif PREF_USE_TEXTURE: - # if we have more then 16 materials across all the mesh objects were stuffed :/ - # get unique materials - tex_unique_materials= dict([(mat.name, mat) for mat in material_list]).values()[:16] # just incase we have more then 16 - tex_me= Blender.Mesh.New() - - # Backup the original shadless setting - tex_unique_materials_shadeless= [ mat.mode & Blender.Material.Modes.SHADELESS for mat in tex_unique_materials ] - - # Turn shadeless on - for mat in tex_unique_materials: - mat.mode |= Blender.Material.Modes.SHADELESS - - # Assign materials - render_me.materials= tex_unique_materials - - - - tex_material_indicies= dict([(mat.name, i) for i, mat in enumerate(tex_unique_materials)]) - - tex_me.verts.extend([Vector(0,0,0),]) # dummy - tex_me.verts.extend( [ Vector(v.co) for f in face_list for v in f ] ) - - # Now add the faces - tmp_faces= [] - vert_offset= 1 - for f in face_list: - tmp_faces.append( [ii+vert_offset for ii in xrange(len(f))] ) - vert_offset+= len(f) - - tex_me.faces.extend(tmp_faces) - - # Now we have the faces, put materials and normal, uvs into the mesh - if len(tex_me.faces) != len(face_list): - # Should never happen - raise "Error face length mismatch" - - # Copy data to the mesh that could be used as texture coords - for i, tex_face in enumerate(tex_me.faces): - orig_face= face_list[i] - - # Set the material index - try: - render_face.mat= tex_material_indicies[ material_list[i].name ] - except: - # more then 16 materials - pass - - - # set the uvs on the texmesh mesh - tex_face.uv= orig_face.uv - - orig_face_v= orig_face.v - # Set the normals - for j, v in enumerate(tex_face): - v.no= orig_face_v[j].no - - # Set the texmesh - render_me.texMesh= tex_me - # END TEXMESH - - - # Handel adding objects - render_ob= Blender.Object.New('Mesh') - render_ob.link(render_me) - - if not PREF_USE_TEXTURE: # textures use the original materials - render_me.materials= [rnd_mat()] - - - obs= [render_ob] - - - if PREF_IMAGE_WIRE_UNDERLAY: - # Make another mesh with the material colors - render_me_under, face_list, material_list= mesh2uv(me_s, PREF_SEL_FACES_ONLY) - - uvmesh_apply_matcol(render_me_under, material_list) - - # Handel adding objects - render_ob= Blender.Object.New('Mesh') - render_ob.link(render_me_under) - render_ob.LocZ= -0.01 - - # Add material and disable wire - mat= rnd_mat() - mat.rgbCol= 1,1,1 - mat.alpha= 0.5 - mat.mode &= ~Blender.Material.Modes.WIRE - mat.mode |= Blender.Material.Modes.VCOL_PAINT - - render_me_under.materials= [mat] - - obs.append(render_ob) - - elif PREF_IMAGE_BLEED and not PREF_IMAGE_WIRE: - # EVIL BLEEDING CODE!! - Just do copys of the mesh and place behind. Crufty but better then many other methods I have seen. - Cam - BLEED_PIXEL= 1.0/PREF_IMAGE_SIZE - z_offset= 0.0 - for i in xrange(PREF_IMAGE_BLEED): - for diag1, diag2 in ((-1,-1),(-1,1),(1,-1),(1,1), (1,0), (0,1), (-1,0), (0, -1)): # This line extends the object in 8 different directions, top avoid bleeding. - - render_ob= Blender.Object.New('Mesh') - render_ob.link(render_me) - - render_ob.LocX= (i+1)*diag1*BLEED_PIXEL - render_ob.LocY= (i+1)*diag2*BLEED_PIXEL - render_ob.LocZ= -z_offset - - obs.append(render_ob) - z_offset += 0.01 - - - - image= imageFromObjectsOrtho(obs, PREF_IMAGE_PATH, PREF_IMAGE_SIZE, PREF_IMAGE_SIZE, PREF_IMAGE_SMOOTH) - - # Clear from memory as best as we can - render_me.verts= None - - if PREF_IMAGE_WIRE_UNDERLAY: - render_me_under.verts= None - - if PREF_USE_TEXTURE: - tex_me.verts= None - # Restire Shadeless setting - for i, mat in enumerate(tex_unique_materials): - # we know there all on so turn it off of its not set - if not tex_unique_materials_shadeless[i]: - mat.mode &= ~Blender.Material.Modes.SHADELESS - - return image - -def bakeToPlane(sce, ob_from, width, height, bakemodes, axis='z', margin=0, depth=32): - ''' - Bakes terrain onto a plane from one object - sce - scene to bake with - ob_from - mesh object - width/height - image size - bakemodes - list of baking modes to use, Blender.Scene.Render.BakeModes.NORMALS, Blender.Scene.Render.BakeModes.AO ... etc - axis - axis to allign the plane to. - margin - margin setting for baking. - depth - bit depth for the images to bake into, (32 or 128 for floating point images) - Example: - import Blender - from Blender import * - import BPyRender - sce = Scene.GetCurrent() - ob = Object.Get('Plane') - BPyRender.bakeToPlane(sce, ob, 512, 512, [Scene.Render.BakeModes.DISPLACEMENT, Scene.Render.BakeModes.NORMALS], 'z', 8 ) - ''' - - # Backup bake settings - rend = sce.render - BACKUP_bakeDist = rend.bakeDist - BACKUP_bakeBias = rend.bakeBias - BACKUP_bakeMode = rend.bakeMode - BACKUP_bakeClear = rend.bakeClear - BACKUP_bakeMargin = rend.bakeMargin - BACKUP_bakeToActive = rend.bakeToActive - BACKUP_bakeNormalize = rend.bakeNormalize - - # Backup object selection - BACKUP_obsel = list(sce.objects.selected) - BACKUP_obact = sce.objects.active - - # New bake settings - rend.bakeClear = True - rend.bakeMargin = margin - rend.bakeToActive = True - rend.bakeNormalize = True - - # Assume a mesh - me_from = ob_from.getData(mesh=1) - - xmin = ymin = zmin = 10000000000 - xmax = ymax = zmax =-10000000000 - - # Dont trust bounding boxes :/ - #bounds = ob_from.boundingBox - #for v in bounds: - # x,y,z = tuple(v) - mtx = ob_from.matrixWorld - for v in me_from.verts: - x,y,z = tuple(v.co*mtx) - - xmax = max(xmax, x) - ymax = max(ymax, y) - zmax = max(zmax, z) - - xmin = min(xmin, x) - ymin = min(ymin, y) - zmin = min(zmin, z) - - if axis=='x': - xmed = (xmin+xmax)/2.0 - co1 = (xmed, ymin, zmin) - co2 = (xmed, ymin, zmax) - co3 = (xmed, ymax, zmax) - co4 = (xmed, ymax, zmin) - rend.bakeDist = ((xmax-xmin)/2.0) + 0.000001 # we need a euler value for this since it - elif axis=='y': - ymed = (ymin+ymax)/2.0 - co1 = (xmin, ymed, zmin) - co2 = (xmin, ymed, zmax) - co3 = (xmax, ymed, zmax) - co4 = (xmax, ymed, zmin) - rend.bakeDist = ((ymax-ymin)/2.0) + 0.000001 - elif axis=='z': - zmed = (zmin+zmax)/2.0 - co1 = (xmin, ymin, zmed) - co2 = (xmin, ymax, zmed) - co3 = (xmax, ymax, zmed) - co4 = (xmax, ymin, zmed) - rend.bakeDist = ((zmax-zmin)/2.0) + 0.000001 - else: - raise "invalid axis" - me_plane = Blender.Mesh.New() - ob_plane = Blender.Object.New('Mesh') - ob_plane.link(me_plane) - sce.objects.link(ob_plane) - ob_plane.Layers = ob_from.Layers - - ob_from.sel = 1 # make active - sce.objects.active = ob_plane - ob_plane.sel = 1 - - me_plane.verts.extend([co4, co3, co2, co1]) - me_plane.faces.extend([(0,1,2,3)]) - me_plane.faceUV = True - me_plane_face = me_plane.faces[0] - uvs = me_plane_face.uv - uvs[0].x = 0.0; uvs[0].y = 0.0 - uvs[1].x = 0.0; uvs[1].y = 1.0 - uvs[2].x = 1.0; uvs[2].y = 1.0 - uvs[3].x = 1.0; uvs[3].y = 0.0 - - images_return = [] - - for mode in bakemodes: - img = Blender.Image.New('bake', width, height, depth) - - me_plane_face.image = img - rend.bakeMode = mode - rend.bake() - images_return.append( img ) - - # Restore bake settings - #''' - rend.bakeDist = BACKUP_bakeDist - rend.bakeBias = BACKUP_bakeBias - rend.bakeMode = BACKUP_bakeMode - rend.bakeClear = BACKUP_bakeClear - rend.bakeMargin = BACKUP_bakeMargin - rend.bakeToActive = BACKUP_bakeToActive - rend.bakeNormalize = BACKUP_bakeNormalize - - - # Restore obsel - sce.objects.selected = BACKUP_obsel - sce.objects.active = BACKUP_obact - - me_plane.verts = None - sce.objects.unlink(ob_plane) - #''' - - return images_return - diff --git a/release/scripts/bpymodules/BPySys.py b/release/scripts/bpymodules/BPySys.py deleted file mode 100644 index a2d2120ebff..00000000000 --- a/release/scripts/bpymodules/BPySys.py +++ /dev/null @@ -1,74 +0,0 @@ - -## This was used to make V, but faster not to do all that -##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' -##v = range(255) -##for c in valid: v.remove(ord(c)) -v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,58,59,60,61,62,63,64,91,92,93,94,96,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] -invalid = ''.join([chr(i) for i in v]) -## del v, c, i, valid -del v, i - -def cleanName(name): - for ch in invalid: name = name.replace(ch, '_') - return name - -def caseInsensitivePath(path, RET_FOUND=False): - ''' - Get a case insensitive path on a case sensitive system - - RET_FOUND is for internal use only, to avoid too many calls to os.path.exists - # Example usage - getCaseInsensitivePath('/hOmE/mE/sOmEpAtH.tXt') - ''' - import os # todo, what happens with no os? - - if os==None: - if RET_FOUND: ret = path, True - else: ret = path - return ret - - if path=='' or os.path.exists(path): - if RET_FOUND: ret = path, True - else: ret = path - return ret - - f = os.path.basename(path) # f may be a directory or a file - d = os.path.dirname(path) - - suffix = '' - if not f: # dir ends with a slash? - if len(d) < len(path): - suffix = path[:len(path)-len(d)] - - f = os.path.basename(d) - d = os.path.dirname(d) - - if not os.path.exists(d): - d, found = caseInsensitivePath(d, True) - - if not found: - if RET_FOUND: ret = path, False - else: ret = path - return ret - - # at this point, the directory exists but not the file - - try: # we are expecting 'd' to be a directory, but it could be a file - files = os.listdir(d) - except: - if RET_FOUND: ret = path, False - else: ret = path - - f_low = f.lower() - - try: f_nocase = [fl for fl in files if fl.lower() == f_low][0] - except: f_nocase = None - - if f_nocase: - if RET_FOUND: ret = os.path.join(d, f_nocase) + suffix, True - else: ret = os.path.join(d, f_nocase) + suffix - return ret - else: - if RET_FOUND: ret = path, False - else: ret = path - return ret # cant find the right one, just return the path as is. \ No newline at end of file diff --git a/release/scripts/bpymodules/BPyTextPlugin.py b/release/scripts/bpymodules/BPyTextPlugin.py deleted file mode 100644 index cd5a085de37..00000000000 --- a/release/scripts/bpymodules/BPyTextPlugin.py +++ /dev/null @@ -1,814 +0,0 @@ -"""The BPyTextPlugin Module - -Use get_cached_descriptor(txt) to retrieve information about the script held in -the txt Text object. - -Use print_cache_for(txt) to print the information to the console. - -Use line, cursor = current_line(txt) to get the logical line and cursor position - -Use get_targets(line, cursor) to find out what precedes the cursor: - aaa.bbb.cc|c.ddd -> ['aaa', 'bbb', 'cc'] - -Use resolve_targets(txt, targets) to turn a target list into a usable object if -one is found to match. -""" - -import bpy, sys, os -import __builtin__, tokenize -from Blender.sys import time -from tokenize import generate_tokens, TokenError, \ - COMMENT, DEDENT, INDENT, NAME, NEWLINE, NL, STRING, NUMBER - -class Definition: - """Describes a definition or defined object through its name, line number - and docstring. This is the base class for definition based descriptors. - """ - - def __init__(self, name, lineno, doc=''): - self.name = name - self.lineno = lineno - self.doc = doc - -class ScriptDesc: - """Describes a script through lists of further descriptor objects (classes, - defs, vars) and dictionaries to built-in types (imports). If a script has - not been fully parsed, its incomplete flag will be set. The time of the last - parse is held by the time field and the name of the text object from which - it was parsed, the name field. - """ - - def __init__(self, name, imports, classes, defs, vars, incomplete=False): - self.name = name - self.imports = imports - self.classes = classes - self.defs = defs - self.vars = vars - self.incomplete = incomplete - self.parse_due = 0 - - def set_delay(self, delay): - self.parse_due = time() + delay - -class ClassDesc(Definition): - """Describes a class through lists of further descriptor objects (defs and - vars). The name of the class is held by the name field and the line on - which it is defined is held in lineno. - """ - - def __init__(self, name, parents, defs, vars, lineno, doc=''): - Definition.__init__(self, name, lineno, doc) - self.parents = parents - self.defs = defs - self.vars = vars - -class FunctionDesc(Definition): - """Describes a function through its name and list of parameters (name, - params) and the line on which it is defined (lineno). - """ - - def __init__(self, name, params, lineno, doc=''): - Definition.__init__(self, name, lineno, doc) - self.params = params - -class VarDesc(Definition): - """Describes a variable through its name and type (if ascertainable) and the - line on which it is defined (lineno). If no type can be determined, type - will equal None. - """ - - def __init__(self, name, type, lineno): - Definition.__init__(self, name, lineno) - self.type = type # None for unknown (supports: dict/list/str) - -# Context types -CTX_UNSET = -1 -CTX_NORMAL = 0 -CTX_SINGLE_QUOTE = 1 -CTX_DOUBLE_QUOTE = 2 -CTX_COMMENT = 3 - -# Python keywords -KEYWORDS = ['and', 'del', 'from', 'not', 'while', 'as', 'elif', 'global', - 'or', 'with', 'assert', 'else', 'if', 'pass', 'yield', - 'break', 'except', 'import', 'print', 'class', 'exec', 'in', - 'raise', 'continue', 'finally', 'is', 'return', 'def', 'for', - 'lambda', 'try' ] - -# Module file extensions -MODULE_EXTS = ['.py', '.pyc', '.pyo', '.pyw', '.pyd'] - -ModuleType = type(__builtin__) -NoneScriptDesc = ScriptDesc('', dict(), dict(), dict(), dict(), True) - -_modules = {} -_modules_updated = 0 -_parse_cache = dict() - -def _load_module_names(): - """Searches the sys.path for module files and lists them, along with - sys.builtin_module_names, in the global dict _modules. - """ - - global _modules - - for n in sys.builtin_module_names: - _modules[n] = None - for p in sys.path: - if p == '': p = os.curdir - if not os.path.isdir(p): continue - for f in os.listdir(p): - for ext in MODULE_EXTS: - if f.endswith(ext): - _modules[f[:-len(ext)]] = None - break - -_load_module_names() - -def _trim_doc(doc): - """Trims the quotes from a quoted STRING token (eg. "'''text'''" -> "text") - """ - - l = len(doc) - i = 0 - while i < l/2 and (doc[i] == "'" or doc[i] == '"'): - i += 1 - return doc[i:-i] - -def resolve_targets(txt, targets): - """Attempts to return a useful object for the locally or externally defined - entity described by targets. If the object is local (defined in txt), a - Definition instance is returned. If the object is external (imported or - built in), the object itself is returned. If no object can be found, None is - returned. - """ - - count = len(targets) - if count==0: return None - - obj = None - local = None - i = 1 - - desc = get_cached_descriptor(txt) - b = targets[0].find('(') - if b==-1: b = None # Trick to let us use [:b] and get the whole string - - if desc.classes.has_key(targets[0][:b]): - local = desc.classes[targets[0][:b]] - elif desc.defs.has_key(targets[0]): - local = desc.defs[targets[0]] - elif desc.vars.has_key(targets[0]): - obj = desc.vars[targets[0]].type - - if local: - while i < count: - b = targets[i].find('(') - if b==-1: b = None - if hasattr(local, 'classes') and local.classes.has_key(targets[i][:b]): - local = local.classes[targets[i][:b]] - elif hasattr(local, 'defs') and local.defs.has_key(targets[i]): - local = local.defs[targets[i]] - elif hasattr(local, 'vars') and local.vars.has_key(targets[i]): - obj = local.vars[targets[i]].type - local = None - i += 1 - break - else: - local = None - break - i += 1 - - if local: return local - - if not obj: - if desc.imports.has_key(targets[0]): - obj = desc.imports[targets[0]] - else: - builtins = get_builtins() - if builtins.has_key(targets[0]): - obj = builtins[targets[0]] - - while obj and i < count: - if hasattr(obj, targets[i]): - obj = getattr(obj, targets[i]) - else: - obj = None - break - i += 1 - - return obj - -def get_cached_descriptor(txt, force_parse=0): - """Returns the cached ScriptDesc for the specified Text object 'txt'. If the - script has not been parsed in the last 'period' seconds it will be reparsed - to obtain this descriptor. - - Specifying TP_AUTO for the period (default) will choose a period based on the - size of the Text object. Larger texts are parsed less often. - """ - - global _parse_cache - - parse = True - key = hash(txt) - if not force_parse and _parse_cache.has_key(key): - desc = _parse_cache[key] - if desc.parse_due > time(): - parse = desc.incomplete - - if parse: - desc = parse_text(txt) - - return desc - -def parse_text(txt): - """Parses an entire script's text and returns a ScriptDesc instance - containing information about the script. - - If the text is not a valid Python script (for example if brackets are left - open), parsing may fail to complete. However, if this occurs, no exception - is thrown. Instead the returned ScriptDesc instance will have its incomplete - flag set and information processed up to this point will still be accessible. - """ - - start_time = time() - txt.reset() - tokens = generate_tokens(txt.readline) # Throws TokenError - - curl, cursor = txt.getCursorPos() - linen = curl + 1 # Token line numbers are one-based - - imports = dict() - imp_step = 0 - - classes = dict() - cls_step = 0 - - defs = dict() - def_step = 0 - - vars = dict() - var1_step = 0 - var2_step = 0 - var3_step = 0 - var_accum = dict() - var_forflag = False - - indent = 0 - prev_type = -1 - prev_text = '' - incomplete = False - - while True: - try: - type, text, start, end, line = tokens.next() - except StopIteration: - break - except (TokenError, IndentationError): - incomplete = True - break - - # Skip all comments and line joining characters - if type == COMMENT or type == NL: - continue - - ################# - ## Indentation ## - ################# - - if type == INDENT: - indent += 1 - elif type == DEDENT: - indent -= 1 - - ######################### - ## Module importing... ## - ######################### - - imp_store = False - - # Default, look for 'from' or 'import' to start - if imp_step == 0: - if text == 'from': - imp_tmp = [] - imp_step = 1 - elif text == 'import': - imp_from = None - imp_tmp = [] - imp_step = 2 - - # Found a 'from', create imp_from in form '???.???...' - elif imp_step == 1: - if text == 'import': - imp_from = '.'.join(imp_tmp) - imp_tmp = [] - imp_step = 2 - elif type == NAME: - imp_tmp.append(text) - elif text != '.': - imp_step = 0 # Invalid syntax - - # Found 'import', imp_from is populated or None, create imp_name - elif imp_step == 2: - if text == 'as': - imp_name = '.'.join(imp_tmp) - imp_step = 3 - elif type == NAME or text == '*': - imp_tmp.append(text) - elif text != '.': - imp_name = '.'.join(imp_tmp) - imp_symb = imp_name - imp_store = True - - # Found 'as', change imp_symb to this value and go back to step 2 - elif imp_step == 3: - if type == NAME: - imp_symb = text - else: - imp_store = True - - # Both imp_name and imp_symb have now been populated so we can import - if imp_store: - - # Handle special case of 'import *' - if imp_name == '*': - parent = get_module(imp_from) - imports.update(parent.__dict__) - - else: - # Try importing the name as a module - try: - if imp_from: - module = get_module(imp_from +'.'+ imp_name) - else: - module = get_module(imp_name) - except (ImportError, ValueError, AttributeError, TypeError): - # Try importing name as an attribute of the parent - try: - module = __import__(imp_from, globals(), locals(), [imp_name]) - imports[imp_symb] = getattr(module, imp_name) - except (ImportError, ValueError, AttributeError, TypeError): - pass - else: - imports[imp_symb] = module - - # More to import from the same module? - if text == ',': - imp_tmp = [] - imp_step = 2 - else: - imp_step = 0 - - ################### - ## Class parsing ## - ################### - - # If we are inside a class then def and variable parsing should be done - # for the class. Otherwise the definitions are considered global - - # Look for 'class' - if cls_step == 0: - if text == 'class': - cls_name = None - cls_lineno = start[0] - cls_indent = indent - cls_step = 1 - - # Found 'class', look for cls_name followed by '(' parents ')' - elif cls_step == 1: - if not cls_name: - if type == NAME: - cls_name = text - cls_sline = False - cls_parents = dict() - cls_defs = dict() - cls_vars = dict() - elif type == NAME: - if classes.has_key(text): - parent = classes[text] - cls_parents[text] = parent - cls_defs.update(parent.defs) - cls_vars.update(parent.vars) - elif text == ':': - cls_step = 2 - - # Found 'class' name ... ':', now check if it's a single line statement - elif cls_step == 2: - if type == NEWLINE: - cls_sline = False - else: - cls_sline = True - cls_doc = '' - cls_step = 3 - - elif cls_step == 3: - if not cls_doc and type == STRING: - cls_doc = _trim_doc(text) - if cls_sline: - if type == NEWLINE: - classes[cls_name] = ClassDesc(cls_name, cls_parents, cls_defs, cls_vars, cls_lineno, cls_doc) - cls_step = 0 - else: - if type == DEDENT and indent <= cls_indent: - classes[cls_name] = ClassDesc(cls_name, cls_parents, cls_defs, cls_vars, cls_lineno, cls_doc) - cls_step = 0 - - ################# - ## Def parsing ## - ################# - - # Look for 'def' - if def_step == 0: - if text == 'def': - def_name = None - def_lineno = start[0] - def_step = 1 - - # Found 'def', look for def_name followed by '(' - elif def_step == 1: - if type == NAME: - def_name = text - def_params = [] - elif def_name and text == '(': - def_step = 2 - - # Found 'def' name '(', now identify the parameters upto ')' - # TODO: Handle ellipsis '...' - elif def_step == 2: - if type == NAME: - def_params.append(text) - elif text == ':': - def_step = 3 - - # Found 'def' ... ':', now check if it's a single line statement - elif def_step == 3: - if type == NEWLINE: - def_sline = False - else: - def_sline = True - def_doc = '' - def_step = 4 - - elif def_step == 4: - if type == STRING: - def_doc = _trim_doc(text) - newdef = None - if def_sline: - if type == NEWLINE: - newdef = FunctionDesc(def_name, def_params, def_lineno, def_doc) - else: - if type == NAME: - newdef = FunctionDesc(def_name, def_params, def_lineno, def_doc) - if newdef: - if cls_step > 0: # Parsing a class - cls_defs[def_name] = newdef - else: - defs[def_name] = newdef - def_step = 0 - - ########################## - ## Variable assignation ## - ########################## - - if cls_step > 0: # Parsing a class - # Look for 'self.???' - if var1_step == 0: - if text == 'self': - var1_step = 1 - elif var1_step == 1: - if text == '.': - var_name = None - var1_step = 2 - else: - var1_step = 0 - elif var1_step == 2: - if type == NAME: - var_name = text - if cls_vars.has_key(var_name): - var_step = 0 - else: - var1_step = 3 - elif var1_step == 3: - if text == '=': - var1_step = 4 - elif text != ',': - var1_step = 0 - elif var1_step == 4: - var_type = None - if type == NUMBER: - close = end[1] - if text.find('.') != -1: var_type = float - else: var_type = int - elif type == STRING: - close = end[1] - var_type = str - elif text == '[': - close = line.find(']', end[1]) - var_type = list - elif text == '(': - close = line.find(')', end[1]) - var_type = tuple - elif text == '{': - close = line.find('}', end[1]) - var_type = dict - elif text == 'dict': - close = line.find(')', end[1]) - var_type = dict - if var_type and close+1 < len(line): - if line[close+1] != ' ' and line[close+1] != '\t': - var_type = None - cls_vars[var_name] = VarDesc(var_name, var_type, start[0]) - var1_step = 0 - - elif def_step > 0: # Parsing a def - # Look for 'global ???[,???]' - if var2_step == 0: - if text == 'global': - var2_step = 1 - elif var2_step == 1: - if type == NAME: - if not vars.has_key(text): - vars[text] = VarDesc(text, None, start[0]) - elif text != ',' and type != NL: - var2_step == 0 - - else: # In global scope - if var3_step == 0: - # Look for names - if text == 'for': - var_accum = dict() - var_forflag = True - elif text == '=' or (var_forflag and text == 'in'): - var_forflag = False - var3_step = 1 - elif type == NAME: - if prev_text != '.' and not vars.has_key(text): - var_accum[text] = VarDesc(text, None, start[0]) - elif not text in [',', '(', ')', '[', ']']: - var_accum = dict() - var_forflag = False - elif var3_step == 1: - if len(var_accum) != 1: - var_type = None - vars.update(var_accum) - else: - var_name = var_accum.keys()[0] - var_type = None - if type == NUMBER: - if text.find('.') != -1: var_type = float - else: var_type = int - elif type == STRING: var_type = str - elif text == '[': var_type = list - elif text == '(': var_type = tuple - elif text == '{': var_type = dict - vars[var_name] = VarDesc(var_name, var_type, start[0]) - var3_step = 0 - - ####################### - ## General utilities ## - ####################### - - prev_type = type - prev_text = text - - desc = ScriptDesc(txt.name, imports, classes, defs, vars, incomplete) - desc.set_delay(10 * (time()-start_time) + 0.05) - - global _parse_cache - _parse_cache[hash(txt)] = desc - return desc - -def get_modules(since=1): - """Returns the set of built-in modules and any modules that have been - imported into the system upto 'since' seconds ago. - """ - - global _modules, _modules_updated - - t = time() - if _modules_updated < t - since: - _modules.update(sys.modules) - _modules_updated = t - return _modules.keys() - -def suggest_cmp(x, y): - """Use this method when sorting a list of suggestions. - """ - - return cmp(x[0].upper(), y[0].upper()) - -def get_module(name): - """Returns the module specified by its name. The module itself is imported - by this method and, as such, any initialization code will be executed. - """ - - mod = __import__(name) - components = name.split('.') - for comp in components[1:]: - mod = getattr(mod, comp) - return mod - -def type_char(v): - """Returns the character used to signify the type of a variable. Use this - method to identify the type character for an item in a suggestion list. - - The following values are returned: - 'm' if the parameter is a module - 'f' if the parameter is callable - 'v' if the parameter is variable or otherwise indeterminable - - """ - - if isinstance(v, ModuleType): - return 'm' - elif callable(v): - return 'f' - else: - return 'v' - -def get_context(txt): - """Establishes the context of the cursor in the given Blender Text object - - Returns one of: - CTX_NORMAL - Cursor is in a normal context - CTX_SINGLE_QUOTE - Cursor is inside a single quoted string - CTX_DOUBLE_QUOTE - Cursor is inside a double quoted string - CTX_COMMENT - Cursor is inside a comment - - """ - - l, cursor = txt.getCursorPos() - lines = txt.asLines(0, l+1) - - # FIXME: This method is too slow in large files for it to be called as often - # as it is. So for lines below the 1000th line we do this... (quorn) - if l > 1000: return CTX_NORMAL - - # Detect context (in string or comment) - in_str = CTX_NORMAL - for line in lines: - if l == 0: - end = cursor - else: - end = len(line) - l -= 1 - - # Comments end at new lines - if in_str == CTX_COMMENT: - in_str = CTX_NORMAL - - for i in range(end): - if in_str == 0: - if line[i] == "'": in_str = CTX_SINGLE_QUOTE - elif line[i] == '"': in_str = CTX_DOUBLE_QUOTE - elif line[i] == '#': in_str = CTX_COMMENT - else: - if in_str == CTX_SINGLE_QUOTE: - if line[i] == "'": - in_str = CTX_NORMAL - # In again if ' escaped, out again if \ escaped, and so on - for a in range(i-1, -1, -1): - if line[a] == '\\': in_str = 1-in_str - else: break - elif in_str == CTX_DOUBLE_QUOTE: - if line[i] == '"': - in_str = CTX_NORMAL - # In again if " escaped, out again if \ escaped, and so on - for a in range(i-1, -1, -1): - if line[i-a] == '\\': in_str = 2-in_str - else: break - - return in_str - -def current_line(txt): - """Extracts the Python script line at the cursor in the Blender Text object - provided and cursor position within this line as the tuple pair (line, - cursor). - """ - - lineindex, cursor = txt.getCursorPos() - lines = txt.asLines() - line = lines[lineindex] - - # Join previous lines to this line if spanning - i = lineindex - 1 - while i > 0: - earlier = lines[i].rstrip() - if earlier.endswith('\\'): - line = earlier[:-1] + ' ' + line - cursor += len(earlier) - i -= 1 - - # Join later lines while there is an explicit joining character - i = lineindex - while i < len(lines)-1 and lines[i].rstrip().endswith('\\'): - later = lines[i+1].strip() - line = line + ' ' + later[:-1] - i += 1 - - return line, cursor - -def get_targets(line, cursor): - """Parses a period separated string of valid names preceding the cursor and - returns them as a list in the same order. - """ - - brk = 0 - targets = [] - j = cursor - i = j-1 - while i >= 0: - if line[i] == ')': brk += 1 - elif brk: - if line[i] == '(': brk -= 1 - else: - if line[i] == '.': - targets.insert(0, line[i+1:j]); j=i - elif not (line[i].isalnum() or line[i] == '_' or line[i] == '.'): - break - i -= 1 - targets.insert(0, line[i+1:j]) - return targets - -def get_defs(txt): - """Returns a dictionary which maps definition names in the source code to - a list of their parameter names. - - The line 'def doit(one, two, three): print one' for example, results in the - mapping 'doit' : [ 'one', 'two', 'three' ] - """ - - return get_cached_descriptor(txt).defs - -def get_vars(txt): - """Returns a dictionary of variable names found in the specified Text - object. This method locates all names followed directly by an equal sign: - 'a = ???' or indirectly as part of a tuple/list assignment or inside a - 'for ??? in ???:' block. - """ - - return get_cached_descriptor(txt).vars - -def get_imports(txt): - """Returns a dictionary which maps symbol names in the source code to their - respective modules. - - The line 'from Blender import Text as BText' for example, results in the - mapping 'BText' : - - Note that this method imports the modules to provide this mapping as as such - will execute any initilization code found within. - """ - - return get_cached_descriptor(txt).imports - -def get_builtins(): - """Returns a dictionary of built-in modules, functions and variables.""" - - return __builtin__.__dict__ - - -################################# -## Debugging utility functions ## -################################# - -def print_cache_for(txt, period=sys.maxint): - """Prints out the data cached for a given Text object. If no period is - given the text will not be reparsed and the cached version will be returned. - Otherwise if the period has expired the text will be reparsed. - """ - - desc = get_cached_descriptor(txt, period) - print '================================================' - print 'Name:', desc.name, '('+str(hash(txt))+')' - print '------------------------------------------------' - print 'Defs:' - for name, ddesc in desc.defs.items(): - print ' ', name, ddesc.params, ddesc.lineno - print ' ', ddesc.doc - print '------------------------------------------------' - print 'Vars:' - for name, vdesc in desc.vars.items(): - print ' ', name, vdesc.type, vdesc.lineno - print '------------------------------------------------' - print 'Imports:' - for name, item in desc.imports.items(): - print ' ', name.ljust(15), item - print '------------------------------------------------' - print 'Classes:' - for clsnme, clsdsc in desc.classes.items(): - print ' *********************************' - print ' Name:', clsnme - print ' ', clsdsc.doc - print ' ---------------------------------' - print ' Defs:' - for name, ddesc in clsdsc.defs.items(): - print ' ', name, ddesc.params, ddesc.lineno - print ' ', ddesc.doc - print ' ---------------------------------' - print ' Vars:' - for name, vdesc in clsdsc.vars.items(): - print ' ', name, vdesc.type, vdesc.lineno - print ' *********************************' - print '================================================' diff --git a/release/scripts/bpymodules/BPyWindow.py b/release/scripts/bpymodules/BPyWindow.py deleted file mode 100644 index d3fd4fa88b5..00000000000 --- a/release/scripts/bpymodules/BPyWindow.py +++ /dev/null @@ -1,206 +0,0 @@ -import Blender -from Blender import Mathutils, Window, Scene, Draw, Mesh -from Blender.Mathutils import Matrix, Vector, Intersect - -# DESCRIPTION: -# screen_x, screen_y the origin point of the pick ray -# it is either the mouse location -# localMatrix is used if you want to have the returned values in an objects localspace. -# this is usefull when dealing with an objects data such as verts. -# or if useMid is true, the midpoint of the current 3dview -# returns -# Origin - the origin point of the pick ray -# Direction - the direction vector of the pick ray -# in global coordinates -epsilon = 1e-3 # just a small value to account for floating point errors - -def mouseViewRay(screen_x, screen_y, localMatrix=None, useMid = False): - - # Constant function variables - p = mouseViewRay.p - d = mouseViewRay.d - - for win3d in Window.GetScreenInfo(Window.Types.VIEW3D): # we search all 3dwins for the one containing the point (screen_x, screen_y) (could be the mousecoords for example) - win_min_x, win_min_y, win_max_x, win_max_y = win3d['vertices'] - # calculate a few geometric extents for this window - - win_mid_x = (win_max_x + win_min_x + 1.0) * 0.5 - win_mid_y = (win_max_y + win_min_y + 1.0) * 0.5 - win_size_x = (win_max_x - win_min_x + 1.0) * 0.5 - win_size_y = (win_max_y - win_min_y + 1.0) * 0.5 - - #useMid is for projecting the coordinates when we subdivide the screen into bins - if useMid: # == True - screen_x = win_mid_x - screen_y = win_mid_y - - # if the given screencoords (screen_x, screen_y) are within the 3dwin we fount the right one... - if (win_max_x > screen_x > win_min_x) and ( win_max_y > screen_y > win_min_y): - # first we handle all pending events for this window (otherwise the matrices might come out wrong) - Window.QHandle(win3d['id']) - - # now we get a few matrices for our window... - # sorry - i cannot explain here what they all do - # - if you're not familiar with all those matrices take a look at an introduction to OpenGL... - pm = Window.GetPerspMatrix() # the prespective matrix - pmi = Matrix(pm); pmi.invert() # the inverted perspective matrix - - if (1.0 - epsilon < pmi[3][3] < 1.0 + epsilon): - # pmi[3][3] is 1.0 if the 3dwin is in ortho-projection mode (toggled with numpad 5) - hms = mouseViewRay.hms - ortho_d = mouseViewRay.ortho_d - - # ortho mode: is a bit strange - actually there's no definite location of the camera ... - # but the camera could be displaced anywhere along the viewing direction. - - ortho_d.x, ortho_d.y, ortho_d.z = Window.GetViewVector() - ortho_d.w = 0 - - # all rays are parallel in ortho mode - so the direction vector is simply the viewing direction - #hms.x, hms.y, hms.z, hms.w = (screen_x-win_mid_x) /win_size_x, (screen_y-win_mid_y) / win_size_y, 0.0, 1.0 - hms[:] = (screen_x-win_mid_x) /win_size_x, (screen_y-win_mid_y) / win_size_y, 0.0, 1.0 - - # these are the homogenious screencoords of the point (screen_x, screen_y) ranging from -1 to +1 - p=(hms*pmi) + (1000*ortho_d) - p.resize3D() - d[:] = ortho_d[:3] - - - # Finally we shift the position infinitely far away in - # the viewing direction to make sure the camera if outside the scene - # (this is actually a hack because this function - # is used in sculpt_mesh to initialize backface culling...) - else: - # PERSPECTIVE MODE: here everything is well defined - all rays converge at the camera's location - vmi = Matrix(Window.GetViewMatrix()); vmi.invert() # the inverse viewing matrix - fp = mouseViewRay.fp - - dx = pm[3][3] * (((screen_x-win_min_x)/win_size_x)-1.0) - pm[3][0] - dy = pm[3][3] * (((screen_y-win_min_y)/win_size_y)-1.0) - pm[3][1] - - fp[:] = \ - pmi[0][0]*dx+pmi[1][0]*dy,\ - pmi[0][1]*dx+pmi[1][1]*dy,\ - pmi[0][2]*dx+pmi[1][2]*dy - - # fp is a global 3dpoint obtained from "unprojecting" the screenspace-point (screen_x, screen_y) - #- figuring out how to calculate this took me quite some time. - # The calculation of dxy and fp are simplified versions of my original code - #- so it's almost impossible to explain what's going on geometrically... sorry - - p[:] = vmi[3][:3] - - # the camera's location in global 3dcoords can be read directly from the inverted viewmatrix - #d.x, d.y, d.z =normalize_v3(sub_v3v3(p, fp)) - d[:] = p.x-fp.x, p.y-fp.y, p.z-fp.z - - #print 'd', d, 'p', p, 'fp', fp - - - # the direction vector is simply the difference vector from the virtual camera's position - #to the unprojected (screenspace) point fp - - # Do we want to return a direction in object's localspace? - - if localMatrix: - localInvMatrix = Matrix(localMatrix) - localInvMatrix.invert() - localInvMatrix_notrans = localInvMatrix.rotationPart() - p = p * localInvMatrix - d = d * localInvMatrix # normalize_v3 - - # remove the translation from d - d.x -= localInvMatrix[3][0] - d.y -= localInvMatrix[3][1] - d.z -= localInvMatrix[3][2] - - - d.normalize() - ''' - # Debugging - me = Blender.Mesh.New() - me.verts.extend([p[0:3]]) - me.verts.extend([(p-d)[0:3]]) - me.edges.extend([0,1]) - ob = Blender.Scene.GetCurrent().objects.new(me) - ''' - return True, p, d # Origin, Direction - - # Mouse is not in any view, return None. - return False, None, None - -# Constant function variables -mouseViewRay.d = Vector(0,0,0) # Perspective, 3d -mouseViewRay.p = Vector(0,0,0) -mouseViewRay.fp = Vector(0,0,0) - -mouseViewRay.hms = Vector(0,0,0,0) # ortho only 4d -mouseViewRay.ortho_d = Vector(0,0,0,0) # ortho only 4d - - -LMB= Window.MButs['L'] -def mouseup(): - # Loop until click - mouse_buttons = Window.GetMouseButtons() - while not mouse_buttons & LMB: - Blender.sys.sleep(10) - mouse_buttons = Window.GetMouseButtons() - while mouse_buttons & LMB: - Blender.sys.sleep(10) - mouse_buttons = Window.GetMouseButtons() - - -if __name__=='__main__': - mouseup() - x,y= Window.GetMouseCoords() - isect, point, dir= mouseViewRay(x,y) - if isect: - scn= Blender.Scene.GetCurrent() - me = Blender.Mesh.New() - ob= Blender.Object.New('Mesh') - ob.link(me) - scn.link(ob) - ob.sel= 1 - me.verts.extend([point, dir]) - me.verts[0].sel= 1 - - print isect, point, dir - - - -def spaceRect(): - ''' - Returns the space rect - xmin,ymin,width,height - ''' - - __UI_RECT__ = Blender.BGL.Buffer(Blender.BGL.GL_FLOAT, 4) - Blender.BGL.glGetFloatv(Blender.BGL.GL_SCISSOR_BOX, __UI_RECT__) - __UI_RECT__ = __UI_RECT__.list - __UI_RECT__ = int(__UI_RECT__[0]), int(__UI_RECT__[1]), int(__UI_RECT__[2])-1, int(__UI_RECT__[3]) - - return __UI_RECT__ - -def mouseRelativeLoc2d(__UI_RECT__= None): - if not __UI_RECT__: - __UI_RECT__ = spaceRect() - - mco = Window.GetMouseCoords() - if mco[0] > __UI_RECT__[0] and\ - mco[1] > __UI_RECT__[1] and\ - mco[0] < __UI_RECT__[0] + __UI_RECT__[2] and\ - mco[1] < __UI_RECT__[1] + __UI_RECT__[3]: - - return (mco[0] - __UI_RECT__[0], mco[1] - __UI_RECT__[1]) - - else: - return None - - - - - - - - - \ No newline at end of file diff --git a/release/scripts/bpymodules/blend2renderinfo.py b/release/scripts/bpymodules/blend2renderinfo.py deleted file mode 100644 index 1b9dec58d55..00000000000 --- a/release/scripts/bpymodules/blend2renderinfo.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/python - -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import struct - -# In Blender, selecting scenes in the databrowser (shift+f4) will tag for rendering. - -# This struct wont change according to ton. -# Note that the size differs on 32/64bit -''' -typedef struct BHead { - int code, len; - void *old; - int SDNAnr, nr; -} BHead; -''' - - -def read_blend_rend_chunk(path): - file = open(path, 'rb') - - if file.read(len('BLENDER')) != 'BLENDER': - return [] - - # - if file.read(1) == '-': - is64bit = True - else: # '_' - is64bit = False - - if file.read(1) == 'V': - isBigEndian = True # ppc - else: # 'V' - isBigEndian = False # x86 - - - # Now read the bhead chunk!!! - file.read(3) # skip the version - - scenes = [] - - while file.read(4) == 'REND': - - if is64bit: sizeof_bhead = sizeof_bhead_left = 24 # 64bit - else: sizeof_bhead = sizeof_bhead_left = 20 # 32bit - - sizeof_bhead_left -= 4 - - if isBigEndian: rend_length = struct.unpack('>i', file.read(4))[0] - else: rend_length = struct.unpack('2i', file.read(8)) - else: start_frame, end_frame = struct.unpack('<2i', file.read(8)) - - scene_name = file.read(24) - scene_name = scene_name[ : scene_name.index('\0') ] - - scenes.append( (start_frame, end_frame, scene_name) ) - return scenes - -def main(): - import sys - for arg in sys.argv[1:]: - if arg.lower().endswith('.blend'): - print read_blend_rend_chunk(arg) - -if __name__ == '__main__': - main() - diff --git a/release/scripts/bpymodules/defaultdoodads.py b/release/scripts/bpymodules/defaultdoodads.py deleted file mode 100644 index 987b8b8ae71..00000000000 --- a/release/scripts/bpymodules/defaultdoodads.py +++ /dev/null @@ -1,941 +0,0 @@ -# Default Doodad Set for Discombobulator -# by Evan J. Rosky, 2005 -# GPL- http://www.gnu.org/copyleft/gpl.html -# -# $Id$ -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2005: Evan J. Rosky -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -#Run discombobulator.py, not this. - -import Blender -from Blender import NMesh,Object,Material -from Blender.NMesh import Vert,Face -from Blender.Mathutils import * - -import BPyMathutils -from BPyMathutils import genrand -a = BPyMathutils.sgenrand(4859) - -#Create random numbers -def randnum(low,high): - num = genrand() - num = num*(high-low) - num = num+low - return num - -face = Face() -xmin = Vector([0,0,0]) -xmax = Vector([0,0,0]) -ymin = Vector([0,0,0]) -ymax = Vector([0,0,0]) -mxmin = Vector([0,0,0]) -mxmax = Vector([0,0,0]) -mymin = Vector([0,0,0]) -mymax = Vector([0,0,0]) -doodadCenter = Vector([0,0,0]) -orientation = 0 -center = Vector([0,0,0]) -tosel = 0 -seltopsonly = 0 -tempx = [] -doodadMesh = NMesh.GetRaw() - -global materialArray -global reassignMats -global thereAreMats -global currmat -global doodSideMat -global doodTopMat - -#face is the face to add the doodad to. -#sizeX and sizeY are values from 0.0 to 1.0 that represents a percentage the face that is covered by the doodad. -#height is how tall the doodad is. - -def settings(seltops,matArr,reasMats,therMats,sidemat,topmat): - global seltopsonly - global materialArray - global reassignMats - global thereAreMats - global currmat - global doodSideMat - global doodTopMat - materialArray = matArr - reassignMats = reasMats - thereAreMats = therMats - seltopsonly = seltops - doodSideMat = sidemat - doodTopMat = topmat - -def setCurrMat(curma): - global currmat - currmat = curma - -#Find center and orientation of doodad -def findDoodadCenter(sizeX, sizeY): - #globalizing junk - global face - global xmin - global xmax - global ymin - global ymax - global orientation - global doodadCenter - global center - global tosel - global mxmin - global mxmax - global mymin - global mymax - global tempx - global seltopsonly - - #Find the center of the face - center = Vector([0,0,0]) - for pt in face.v: - center = center + pt.co - center = divideVectorByInt(center,len(face.v)) - - #Find Temp Location Range by looking at the sizes - txmin = ((divideVectorByInt((face.v[0].co + face.v[3].co),2)) - center)*(1-sizeX) + center - txmax = ((divideVectorByInt((face.v[1].co + face.v[2].co),2)) - center)*(1-sizeX) + center - tymin = ((divideVectorByInt((face.v[0].co + face.v[1].co),2)) - center)*(1-sizeY) + center - tymax = ((divideVectorByInt((face.v[2].co + face.v[3].co),2)) - center)*(1-sizeY) + center - - #Find Center of doodad - amtx = randnum(0.0,1.0) - amty = randnum(0.0,1.0) - thepoint = (((((txmin - txmax)*amtx + txmax) - ((tymin - tymax)*amty + tymax))*.5 + ((tymin - tymax)*amty + tymax)) - center)*2 + center - doodadCenter = Vector([thepoint[0],thepoint[1],thepoint[2]]) - - #Find Main Range by looking at the sizes - mxmin = divideVectorByInt((face.v[0].co + face.v[3].co),2) - mxmax = divideVectorByInt((face.v[1].co + face.v[2].co),2) - mymin = divideVectorByInt((face.v[0].co + face.v[1].co),2) - mymax = divideVectorByInt((face.v[2].co + face.v[3].co),2) - - #Find x/y equivs for whole face - ve1 = (txmin - txmax)*amtx + txmax - ve1 = ve1 - mxmax - nax = ve1.length - ve1 = (mxmin - mxmax) - nax = nax/ve1.length - - ve1 = (tymin - tymax)*amty + tymax - ve1 = ve1 - mymax - nay = ve1.length - ve1 = (mymin - mymax) - nay = nay/ve1.length - - #Find new box thing - tempx = [] - amtx = nax-sizeX/2 - amty = nay-sizeY/2 - tempx.append((((((mxmin - mxmax)*amtx + mxmax) - ((mymin - mymax)*amty + mymax))*.5 + ((mymin - mymax)*amty + mymax)) - center)*2 + center) - - amtx = nax-sizeX/2 - amty = nay+sizeY/2 - tempx.append((((((mxmin - mxmax)*amtx + mxmax) - ((mymin - mymax)*amty + mymax))*.5 + ((mymin - mymax)*amty + mymax)) - center)*2 + center) - - amtx = nax+sizeX/2 - amty = nay+sizeY/2 - tempx.append((((((mxmin - mxmax)*amtx + mxmax) - ((mymin - mymax)*amty + mymax))*.5 + ((mymin - mymax)*amty + mymax)) - center)*2 + center) - - amtx = nax+sizeX/2 - amty = nay-sizeY/2 - tempx.append((((((mxmin - mxmax)*amtx + mxmax) - ((mymin - mymax)*amty + mymax))*.5 + ((mymin - mymax)*amty + mymax)) - center)*2 + center) - - #Find New Location Range by looking at the sizes - xmin = divideVectorByInt((tempx[0] + tempx[3]),2) - xmax = divideVectorByInt((tempx[1] + tempx[2]),2) - ymin = divideVectorByInt((tempx[0] + tempx[1]),2) - ymax = divideVectorByInt((tempx[2] + tempx[3]),2) - -#Make a point -def makePoint(x,y,z=0): - global xmin - global xmax - global ymin - global ymax - global doodadCenter - global tosel - global seltopsonly - global face - - amtx = x - amty = y - thepoint = (((((xmin - xmax)*amtx + xmax) - ((ymin - ymax)*amty + ymax))*.5 + ((ymin - ymax)*amty + ymax)) - doodadCenter)*2 + doodadCenter - thepoint = thepoint + z*Vector(face.no) - tver = Vert(thepoint[0],thepoint[1],thepoint[2]) - if tosel == 1 and seltopsonly == 0 and z == 0: - tver.sel = 1 - return tver - -#extrude ground-plane(s) -def extrudedoodad(vArray,heig): - global face - global doodadMesh - global tosel - - topVArray = [] - - doodadMesh.verts.extend(vArray) - - #Create array for extruded verts - for ind in range(0,(len(vArray))): - point = vArray[ind].co + heig*Vector(face.no) - ver = Vert(point[0],point[1],point[2]) - if tosel == 1: - ver.sel = 1 - topVArray.append(ver) - doodadMesh.verts.append(topVArray[ind]) - - #make faces around sides - for ind in range(0,(len(vArray) - 1)): - face = Face() - face.v.extend([vArray[ind],vArray[ind+1],topVArray[ind+1],topVArray[ind]]) - if tosel == 1 and seltopsonly == 0: face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodSideMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vArray[len(vArray) - 1],vArray[0],topVArray[0],topVArray[len(topVArray) - 1]]) - if tosel == 1 and seltopsonly == 0: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodSideMat-1 - doodadMesh.faces.append(face) - - return topVArray - -#For switching face vertices -def fixvertindex(ind): - if ind > 3: - indx = ind - 4 - else: - indx = ind - return indx - -#runs doodads -def createDoodad(indexArray,facec,minsi,maxsi,minhei,maxhei,selec,amtmin,amtmax,facpercent): - global doodadMesh - global seltopsonly - global tosel - - doodadMesh = NMesh.GetRaw() - - theamt = round(randnum(amtmin,amtmax),0) - theamt = int(theamt) - tosel = selec - - for i in range(0,(theamt)): - if randnum(0,1) <= facpercent: - index = round(randnum(1,len(indexArray)),0) - index = indexArray[(int(index) - 1)] - - Xsi = randnum(minsi,maxsi) - Ysi = randnum(minsi,maxsi) - hei = randnum(minhei,maxhei) - - #Determine orientation - orient = int(round(randnum(0.0,3.0))) - - #face to use as range - facer = Face() - facer.v.extend([facec.v[orient],facec.v[fixvertindex(1+orient)],facec.v[fixvertindex(2+orient)],facec.v[fixvertindex(3+orient)]]) - - if index == 1: - singleBox(facer,Xsi,Ysi,hei) - if index == 2: - doubleBox(facer,Xsi,Ysi,hei) - if index == 3: - tripleBox(facer,Xsi,Ysi,hei) - if index == 4: - LShape(facer,Xsi,Ysi,hei) - if index == 5: - TShape(facer,Xsi,Ysi,hei) - if index == 6: - if randnum(0.0,1.0) > .5: - SShape(facer,Xsi,Ysi,hei) - else: - ZShape(facer,Xsi,Ysi,hei) - - return doodadMesh - -def divideVectorByInt(thevect,theint): - thevect.x = thevect.x/theint - thevect.y = thevect.y/theint - thevect.z = thevect.z/theint - return thevect - -#Single Box Doodad -def singleBox(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - vertArray = [] - - #place four points - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,1)) - vertArray.append(makePoint(1,1)) - vertArray.append(makePoint(1,0)) - topVertArray = extrudedoodad(vertArray,height) - - face = Face() - face.v.extend(vertArray) - face.v.reverse() - doodadMesh.faces.append(face) - face = Face() - face.v.extend(topVertArray) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - -#Double Box Doodad -def doubleBox(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - vertArray = [] - - #place first box - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,1)) - vertArray.append(makePoint(0.45,1)) - vertArray.append(makePoint(0.45,0)) - topVertArray = extrudedoodad(vertArray,height) - - face = Face() - face.v.extend(vertArray) - face.v.reverse() - doodadMesh.faces.append(face) - face = Face() - face.v.extend(topVertArray) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - vertArray = [] - - #place second box - vertArray.append(makePoint(0.55,0)) - vertArray.append(makePoint(0.55,1)) - vertArray.append(makePoint(1,1)) - vertArray.append(makePoint(1,0)) - topVertArray = extrudedoodad(vertArray,height) - - face = Face() - face.v.extend(vertArray) - face.v.reverse() - doodadMesh.faces.append(face) - face = Face() - face.v.extend(topVertArray) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - -#Triple Box Doodad -def tripleBox(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - vertArray = [] - - #place first box - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,1)) - vertArray.append(makePoint(0.3,1)) - vertArray.append(makePoint(0.3,0)) - topVertArray = extrudedoodad(vertArray,height) - - face = Face() - face.v.extend(vertArray) - face.v.reverse() - doodadMesh.faces.append(face) - face = Face() - face.v.extend(topVertArray) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - vertArray = [] - - #place second box - vertArray.append(makePoint(0.35,0)) - vertArray.append(makePoint(0.35,1)) - vertArray.append(makePoint(0.65,1)) - vertArray.append(makePoint(0.65,0)) - topVertArray = extrudedoodad(vertArray,height) - - face = Face() - face.v.extend(vertArray) - face.v.reverse() - doodadMesh.faces.append(face) - face = Face() - face.v.extend(topVertArray) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - vertArray = [] - - #place third box - vertArray.append(makePoint(0.7,0)) - vertArray.append(makePoint(0.7,1)) - vertArray.append(makePoint(1,1)) - vertArray.append(makePoint(1,0)) - topVertArray = extrudedoodad(vertArray,height) - - face = Face() - face.v.extend(vertArray) - face.v.reverse() - doodadMesh.faces.append(face) - face = Face() - face.v.extend(topVertArray) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - -#The "L" Shape -def LShape(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - rcon1 = randnum(0.2,0.8) - rcon2 = randnum(0.2,0.8) - - vertArray = [] - - #place L shape - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,rcon1)) - vertArray.append(makePoint(0,1)) - vertArray.append(makePoint(rcon2,1)) - vertArray.append(makePoint(rcon2,rcon1)) - vertArray.append(makePoint(1,rcon1)) - vertArray.append(makePoint(1,0)) - vertArray.append(makePoint(rcon2,0)) - topVertArray = extrudedoodad(vertArray,height) - - #This fills in the bottom of doodad with faceness - face = Face() - face.v.extend([vertArray[0],vertArray[1],vertArray[4],vertArray[7]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[1],vertArray[2],vertArray[3],vertArray[4]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[4],vertArray[5],vertArray[6],vertArray[7]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - #This fills in the top with faceness - face = Face() - face.v.extend([topVertArray[0],topVertArray[1],topVertArray[4],topVertArray[7]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[1],topVertArray[2],topVertArray[3],topVertArray[4]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[4],topVertArray[5],topVertArray[6],topVertArray[7]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - -#The "T" Shape -def TShape(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - rcony = randnum(0.25,0.75) - rconx1 = randnum(0.1,0.49) - rconx2 = randnum(0.51,0.9) - - vertArray = [] - - #place T shape - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,rcony)) - vertArray.append(makePoint(rconx1,rcony)) - vertArray.append(makePoint(rconx1,1)) - vertArray.append(makePoint(rconx2,1)) - vertArray.append(makePoint(rconx2,rcony)) - vertArray.append(makePoint(1,rcony)) - vertArray.append(makePoint(1,0)) - vertArray.append(makePoint(rconx2,0)) - vertArray.append(makePoint(rconx1,0)) - topVertArray = extrudedoodad(vertArray,height) - - #fills bottom with faceness - face = Face() - face.v.extend([vertArray[0],vertArray[1],vertArray[2],vertArray[9]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[2],vertArray[3],vertArray[4],vertArray[5]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[5],vertArray[6],vertArray[7],vertArray[8]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[8],vertArray[9],vertArray[2],vertArray[5]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - #fills top with faceness - face = Face() - face.v.extend([topVertArray[0],topVertArray[1],topVertArray[2],topVertArray[9]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[2],topVertArray[3],topVertArray[4],topVertArray[5]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[5],topVertArray[6],topVertArray[7],topVertArray[8]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[8],topVertArray[9],topVertArray[2],topVertArray[5]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - -#The "S" or "Z" Shapes -def SShape(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - rcony1 = randnum(0.1,0.49) - rcony2 = randnum(0.51,0.9) - rconx1 = randnum(0.1,0.49) - rconx2 = randnum(0.51,0.9) - - vertArray = [] - - #place S shape - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,rcony1)) - vertArray.append(makePoint(rconx1,rcony1)) - vertArray.append(makePoint(rconx1,rcony2)) - vertArray.append(makePoint(rconx1,1)) - vertArray.append(makePoint(rconx2,1)) - vertArray.append(makePoint(1,1)) - vertArray.append(makePoint(1,rcony2)) - vertArray.append(makePoint(rconx2,rcony2)) - vertArray.append(makePoint(rconx2,rcony1)) - vertArray.append(makePoint(rconx2,0)) - vertArray.append(makePoint(rconx1,0)) - topVertArray = extrudedoodad(vertArray,height) - - #fills bottom with faceness - face = Face() - face.v.extend([vertArray[0],vertArray[1],vertArray[2],vertArray[11]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[2],vertArray[9],vertArray[10],vertArray[11]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[2],vertArray[3],vertArray[8],vertArray[9]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[3],vertArray[4],vertArray[5],vertArray[8]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[5],vertArray[6],vertArray[7],vertArray[8]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - #fills top with faceness - face = Face() - face.v.extend([topVertArray[0],topVertArray[1],topVertArray[2],topVertArray[11]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[2],topVertArray[9],topVertArray[10],topVertArray[11]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[2],topVertArray[3],topVertArray[8],topVertArray[9]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[3],topVertArray[4],topVertArray[5],topVertArray[8]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[5],topVertArray[6],topVertArray[7],topVertArray[8]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - -def ZShape(facel, Xsize, Ysize, height): - #globaling junk - global face - global tosel - global doodadMesh - - face = Face() - face = facel - - findDoodadCenter(Xsize, Ysize) - - rcony1 = randnum(0.1,0.49) - rcony2 = randnum(0.51,0.9) - rconx1 = randnum(0.1,0.49) - rconx2 = randnum(0.51,0.9) - - vertArray = [] - - #place Z shape - vertArray.append(makePoint(0,0)) - vertArray.append(makePoint(0,rcony1)) - vertArray.append(makePoint(0,rcony2)) - vertArray.append(makePoint(rconx1,rcony2)) - vertArray.append(makePoint(rconx2,rcony2)) - vertArray.append(makePoint(rconx2,1)) - vertArray.append(makePoint(1,1)) - vertArray.append(makePoint(1,rcony2)) - vertArray.append(makePoint(1,rcony1)) - vertArray.append(makePoint(rconx2,rcony1)) - vertArray.append(makePoint(rconx1,rcony1)) - vertArray.append(makePoint(rconx1,0)) - topVertArray = extrudedoodad(vertArray,height) - - #fills bottom with faceness - face = Face() - face.v.extend([vertArray[0],vertArray[1],vertArray[10],vertArray[11]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[1],vertArray[2],vertArray[3],vertArray[10]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[3],vertArray[4],vertArray[9],vertArray[10]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[4],vertArray[7],vertArray[8],vertArray[9]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([vertArray[4],vertArray[5],vertArray[6],vertArray[7]]) - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - - #fills top with faceness - face = Face() - face.v.extend([topVertArray[0],topVertArray[1],topVertArray[10],topVertArray[11]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[1],topVertArray[2],topVertArray[3],topVertArray[10]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[3],topVertArray[4],topVertArray[9],topVertArray[10]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[4],topVertArray[7],topVertArray[8],topVertArray[9]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - face = Face() - face.v.extend([topVertArray[4],topVertArray[5],topVertArray[6],topVertArray[7]]) - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or doodTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = doodTopMat-1 - doodadMesh.faces.append(face) - diff --git a/release/scripts/bpymodules/dxfColorMap.py b/release/scripts/bpymodules/dxfColorMap.py deleted file mode 100644 index 66c0bd4e9a2..00000000000 --- a/release/scripts/bpymodules/dxfColorMap.py +++ /dev/null @@ -1,282 +0,0 @@ -# dictionary mapping AutoCAD color indexes with Blender colors - -# -------------------------------------------------------------------------- -# color_map.py Final by Ed Blake (AKA Kitsu) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -color_map = { - 0:[0.0, 0.0, 0.0], - 1:[0.99609375, 0.0, 0.0], - 2:[0.99609375, 0.99609375, 0.0], - 3:[0.0, 0.99609375, 0.0], - 4:[0.0, 0.99609375, 0.99609375], - 5:[0.0, 0.0, 0.99609375], - 6:[0.99609375, 0.0, 0.99609375], - 7:[0.99609375, 0.99609375, 0.99609375], - 8:[0.25390625, 0.25390625, 0.25390625], - 9:[0.5, 0.5, 0.5], - 10:[0.99609375, 0.0, 0.0], - 11:[0.99609375, 0.6640625, 0.6640625], - 12:[0.73828125, 0.0, 0.0], - 13:[0.73828125, 0.4921875, 0.4921875], - 14:[0.50390625, 0.0, 0.0], - 15:[0.50390625, 0.3359375, 0.3359375], - 16:[0.40625, 0.0, 0.0], - 17:[0.40625, 0.26953125, 0.26953125], - 18:[0.30859375, 0.0, 0.0], - 19:[0.30859375, 0.20703125, 0.20703125], - 20:[0.99609375, 0.24609375, 0.0], - 21:[0.99609375, 0.74609375, 0.6640625], - 22:[0.73828125, 0.1796875, 0.0], - 23:[0.73828125, 0.55078125, 0.4921875], - 24:[0.50390625, 0.12109375, 0.0], - 25:[0.50390625, 0.375, 0.3359375], - 26:[0.40625, 0.09765625, 0.0], - 27:[0.40625, 0.3046875, 0.26953125], - 28:[0.30859375, 0.07421875, 0.0], - 29:[0.30859375, 0.23046875, 0.20703125], - 30:[0.99609375, 0.49609375, 0.0], - 31:[0.99609375, 0.828125, 0.6640625], - 32:[0.73828125, 0.3671875, 0.0], - 33:[0.73828125, 0.61328125, 0.4921875], - 34:[0.50390625, 0.25, 0.0], - 35:[0.50390625, 0.41796875, 0.3359375], - 36:[0.40625, 0.203125, 0.0], - 37:[0.40625, 0.3359375, 0.26953125], - 38:[0.30859375, 0.15234375, 0.0], - 39:[0.30859375, 0.2578125, 0.20703125], - 40:[0.99609375, 0.74609375, 0.0], - 41:[0.99609375, 0.9140625, 0.6640625], - 42:[0.73828125, 0.55078125, 0.0], - 43:[0.73828125, 0.67578125, 0.4921875], - 44:[0.50390625, 0.375, 0.0], - 45:[0.50390625, 0.4609375, 0.3359375], - 46:[0.40625, 0.3046875, 0.0], - 47:[0.40625, 0.37109375, 0.26953125], - 48:[0.30859375, 0.23046875, 0.0], - 49:[0.30859375, 0.28515625, 0.20703125], - 50:[0.99609375, 0.99609375, 0.0], - 51:[0.99609375, 0.99609375, 0.6640625], - 52:[0.73828125, 0.73828125, 0.0], - 53:[0.73828125, 0.73828125, 0.4921875], - 54:[0.50390625, 0.50390625, 0.0], - 55:[0.50390625, 0.50390625, 0.3359375], - 56:[0.40625, 0.40625, 0.0], - 57:[0.40625, 0.40625, 0.26953125], - 58:[0.30859375, 0.30859375, 0.0], - 59:[0.30859375, 0.30859375, 0.20703125], - 60:[0.74609375, 0.99609375, 0.0], - 61:[0.9140625, 0.99609375, 0.6640625], - 62:[0.55078125, 0.73828125, 0.0], - 63:[0.67578125, 0.73828125, 0.4921875], - 64:[0.375, 0.50390625, 0.0], - 65:[0.4609375, 0.50390625, 0.3359375], - 66:[0.3046875, 0.40625, 0.0], - 67:[0.37109375, 0.40625, 0.26953125], - 68:[0.23046875, 0.30859375, 0.0], - 69:[0.28515625, 0.30859375, 0.20703125], - 70:[0.49609375, 0.99609375, 0.0], - 71:[0.828125, 0.99609375, 0.6640625], - 72:[0.3671875, 0.73828125, 0.0], - 73:[0.61328125, 0.73828125, 0.4921875], - 74:[0.25, 0.50390625, 0.0], - 75:[0.41796875, 0.50390625, 0.3359375], - 76:[0.203125, 0.40625, 0.0], - 77:[0.3359375, 0.40625, 0.26953125], - 78:[0.15234375, 0.30859375, 0.0], - 79:[0.2578125, 0.30859375, 0.20703125], - 80:[0.24609375, 0.99609375, 0.0], - 81:[0.74609375, 0.99609375, 0.6640625], - 82:[0.1796875, 0.73828125, 0.0], - 83:[0.55078125, 0.73828125, 0.4921875], - 84:[0.12109375, 0.50390625, 0.0], - 85:[0.375, 0.50390625, 0.3359375], - 86:[0.09765625, 0.40625, 0.0], - 87:[0.3046875, 0.40625, 0.26953125], - 88:[0.07421875, 0.30859375, 0.0], - 89:[0.23046875, 0.30859375, 0.20703125], - 90:[0.0, 0.99609375, 0.0], - 91:[0.6640625, 0.99609375, 0.6640625], - 92:[0.0, 0.73828125, 0.0], - 93:[0.4921875, 0.73828125, 0.4921875], - 94:[0.0, 0.50390625, 0.0], - 95:[0.3359375, 0.50390625, 0.3359375], - 96:[0.0, 0.40625, 0.0], - 97:[0.26953125, 0.40625, 0.26953125], - 98:[0.0, 0.30859375, 0.0], - 99:[0.20703125, 0.30859375, 0.20703125], - 100:[0.0, 0.99609375, 0.24609375], - 101:[0.6640625, 0.99609375, 0.74609375], - 102:[0.0, 0.73828125, 0.1796875], - 103:[0.4921875, 0.73828125, 0.55078125], - 104:[0.0, 0.50390625, 0.12109375], - 105:[0.3359375, 0.50390625, 0.375], - 106:[0.0, 0.40625, 0.09765625], - 107:[0.26953125, 0.40625, 0.3046875], - 108:[0.0, 0.30859375, 0.07421875], - 109:[0.20703125, 0.30859375, 0.23046875], - 110:[0.0, 0.99609375, 0.49609375], - 111:[0.6640625, 0.99609375, 0.828125], - 112:[0.0, 0.73828125, 0.3671875], - 113:[0.4921875, 0.73828125, 0.61328125], - 114:[0.0, 0.50390625, 0.25], - 115:[0.3359375, 0.50390625, 0.41796875], - 116:[0.0, 0.40625, 0.203125], - 117:[0.26953125, 0.40625, 0.3359375], - 118:[0.0, 0.30859375, 0.15234375], - 119:[0.20703125, 0.30859375, 0.2578125], - 120:[0.0, 0.99609375, 0.74609375], - 121:[0.6640625, 0.99609375, 0.9140625], - 122:[0.0, 0.73828125, 0.55078125], - 123:[0.4921875, 0.73828125, 0.67578125], - 124:[0.0, 0.50390625, 0.375], - 125:[0.3359375, 0.50390625, 0.4609375], - 126:[0.0, 0.40625, 0.3046875], - 127:[0.26953125, 0.40625, 0.37109375], - 128:[0.0, 0.30859375, 0.23046875], - 129:[0.20703125, 0.30859375, 0.28515625], - 130:[0.0, 0.99609375, 0.99609375], - 131:[0.6640625, 0.99609375, 0.99609375], - 132:[0.0, 0.73828125, 0.73828125], - 133:[0.4921875, 0.73828125, 0.73828125], - 134:[0.0, 0.50390625, 0.50390625], - 135:[0.3359375, 0.50390625, 0.50390625], - 136:[0.0, 0.40625, 0.40625], - 137:[0.26953125, 0.40625, 0.40625], - 138:[0.0, 0.30859375, 0.30859375], - 139:[0.20703125, 0.30859375, 0.30859375], - 140:[0.0, 0.74609375, 0.99609375], - 141:[0.6640625, 0.9140625, 0.99609375], - 142:[0.0, 0.55078125, 0.73828125], - 143:[0.4921875, 0.67578125, 0.73828125], - 144:[0.0, 0.375, 0.50390625], - 145:[0.3359375, 0.4609375, 0.50390625], - 146:[0.0, 0.3046875, 0.40625], - 147:[0.26953125, 0.37109375, 0.40625], - 148:[0.0, 0.23046875, 0.30859375], - 149:[0.20703125, 0.28515625, 0.30859375], - 150:[0.0, 0.49609375, 0.99609375], - 151:[0.6640625, 0.828125, 0.99609375], - 152:[0.0, 0.3671875, 0.73828125], - 153:[0.4921875, 0.61328125, 0.73828125], - 154:[0.0, 0.25, 0.50390625], - 155:[0.3359375, 0.41796875, 0.50390625], - 156:[0.0, 0.203125, 0.40625], - 157:[0.26953125, 0.3359375, 0.40625], - 158:[0.0, 0.15234375, 0.30859375], - 159:[0.20703125, 0.2578125, 0.30859375], - 160:[0.0, 0.24609375, 0.99609375], - 161:[0.6640625, 0.74609375, 0.99609375], - 162:[0.0, 0.1796875, 0.73828125], - 163:[0.4921875, 0.55078125, 0.73828125], - 164:[0.0, 0.12109375, 0.50390625], - 165:[0.3359375, 0.375, 0.50390625], - 166:[0.0, 0.09765625, 0.40625], - 167:[0.26953125, 0.3046875, 0.40625], - 168:[0.0, 0.07421875, 0.30859375], - 169:[0.20703125, 0.23046875, 0.30859375], - 170:[0.0, 0.0, 0.99609375], - 171:[0.6640625, 0.6640625, 0.99609375], - 172:[0.0, 0.0, 0.73828125], - 173:[0.4921875, 0.4921875, 0.73828125], - 174:[0.0, 0.0, 0.50390625], - 175:[0.3359375, 0.3359375, 0.50390625], - 176:[0.0, 0.0, 0.40625], - 177:[0.26953125, 0.26953125, 0.40625], - 178:[0.0, 0.0, 0.30859375], - 179:[0.20703125, 0.20703125, 0.30859375], - 180:[0.24609375, 0.0, 0.99609375], - 181:[0.74609375, 0.6640625, 0.99609375], - 182:[0.1796875, 0.0, 0.73828125], - 183:[0.55078125, 0.4921875, 0.73828125], - 184:[0.12109375, 0.0, 0.50390625], - 185:[0.375, 0.3359375, 0.50390625], - 186:[0.09765625, 0.0, 0.40625], - 187:[0.3046875, 0.26953125, 0.40625], - 188:[0.07421875, 0.0, 0.30859375], - 189:[0.23046875, 0.20703125, 0.30859375], - 190:[0.49609375, 0.0, 0.99609375], - 191:[0.828125, 0.6640625, 0.99609375], - 192:[0.3671875, 0.0, 0.73828125], - 193:[0.61328125, 0.4921875, 0.73828125], - 194:[0.25, 0.0, 0.50390625], - 195:[0.41796875, 0.3359375, 0.50390625], - 196:[0.203125, 0.0, 0.40625], - 197:[0.3359375, 0.26953125, 0.40625], - 198:[0.15234375, 0.0, 0.30859375], - 199:[0.2578125, 0.20703125, 0.30859375], - 200:[0.74609375, 0.0, 0.99609375], - 201:[0.9140625, 0.6640625, 0.99609375], - 202:[0.55078125, 0.0, 0.73828125], - 203:[0.67578125, 0.4921875, 0.73828125], - 204:[0.375, 0.0, 0.50390625], - 205:[0.4609375, 0.3359375, 0.50390625], - 206:[0.3046875, 0.0, 0.40625], - 207:[0.37109375, 0.26953125, 0.40625], - 208:[0.23046875, 0.0, 0.30859375], - 209:[0.28515625, 0.20703125, 0.30859375], - 210:[0.99609375, 0.0, 0.99609375], - 211:[0.99609375, 0.6640625, 0.99609375], - 212:[0.73828125, 0.0, 0.73828125], - 213:[0.73828125, 0.4921875, 0.73828125], - 214:[0.50390625, 0.0, 0.50390625], - 215:[0.50390625, 0.3359375, 0.50390625], - 216:[0.40625, 0.0, 0.40625], - 217:[0.40625, 0.26953125, 0.40625], - 218:[0.30859375, 0.0, 0.30859375], - 219:[0.30859375, 0.20703125, 0.30859375], - 220:[0.99609375, 0.0, 0.74609375], - 221:[0.99609375, 0.6640625, 0.9140625], - 222:[0.73828125, 0.0, 0.55078125], - 223:[0.73828125, 0.4921875, 0.67578125], - 224:[0.50390625, 0.0, 0.375], - 225:[0.50390625, 0.3359375, 0.4609375], - 226:[0.40625, 0.0, 0.3046875], - 227:[0.40625, 0.26953125, 0.37109375], - 228:[0.30859375, 0.0, 0.23046875], - 229:[0.30859375, 0.20703125, 0.28515625], - 230:[0.99609375, 0.0, 0.49609375], - 231:[0.99609375, 0.6640625, 0.828125], - 232:[0.73828125, 0.0, 0.3671875], - 233:[0.73828125, 0.4921875, 0.61328125], - 234:[0.50390625, 0.0, 0.25], - 235:[0.50390625, 0.3359375, 0.41796875], - 236:[0.40625, 0.0, 0.203125], - 237:[0.40625, 0.26953125, 0.3359375], - 238:[0.30859375, 0.0, 0.15234375], - 239:[0.30859375, 0.20703125, 0.2578125], - 240:[0.99609375, 0.0, 0.24609375], - 241:[0.99609375, 0.6640625, 0.74609375], - 242:[0.73828125, 0.0, 0.1796875], - 243:[0.73828125, 0.4921875, 0.55078125], - 244:[0.50390625, 0.0, 0.12109375], - 245:[0.50390625, 0.3359375, 0.375], - 246:[0.40625, 0.0, 0.09765625], - 247:[0.40625, 0.26953125, 0.3046875], - 248:[0.30859375, 0.0, 0.07421875], - 249:[0.30859375, 0.20703125, 0.23046875], - 250:[0.19921875, 0.19921875, 0.19921875], - 251:[0.3125, 0.3125, 0.3125], - 252:[0.41015625, 0.41015625, 0.41015625], - 253:[0.5078125, 0.5078125, 0.5078125], - 254:[0.7421875, 0.7421875, 0.7421875], - 255:[0.99609375, 0.99609375, 0.99609375], -} diff --git a/release/scripts/bpymodules/dxfImportObjects.py b/release/scripts/bpymodules/dxfImportObjects.py deleted file mode 100644 index 960c4c1ac15..00000000000 --- a/release/scripts/bpymodules/dxfImportObjects.py +++ /dev/null @@ -1,1326 +0,0 @@ -"""This module provides wrapper objects for dxf entities. - - The wrappers expect a "dxf object" as input. The dxf object is - an object with a type and a data attribute. Type is a lowercase - string matching the 0 code of a dxf entity. Data is a list containing - dxf objects or lists of [code, data] pairs. - - This module is not general, and is only for dxf import. -""" - -# -------------------------------------------------------------------------- -# DXF Import Objects v0.8 by Ed Blake (AKA Kitsu) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- -from math import * - - -# from Stani's dxf writer v1.1 (c)www.stani.be (GPL) -#---color values -BYBLOCK=0 -BYLAYER=256 - -#---block-type flags (bit coded values, may be combined): -ANONYMOUS =1 # This is an anonymous block generated by hatching, associative dimensioning, other internal operations, or an application -NON_CONSTANT_ATTRIBUTES =2 # This block has non-constant attribute definitions (this bit is not set if the block has any attribute definitions that are constant, or has no attribute definitions at all) -XREF =4 # This block is an external reference (xref) -XREF_OVERLAY =8 # This block is an xref overlay -EXTERNAL =16 # This block is externally dependent -RESOLVED =32 # This is a resolved external reference, or dependent of an external reference (ignored on input) -REFERENCED =64 # This definition is a referenced external reference (ignored on input) - -#---mtext flags -#attachment point -TOP_LEFT = 1 -TOP_CENTER = 2 -TOP_RIGHT = 3 -MIDDLE_LEFT = 4 -MIDDLE_CENTER = 5 -MIDDLE_RIGHT = 6 -BOTTOM_LEFT = 7 -BOTTOM_CENTER = 8 -BOTTOM_RIGHT = 9 -#drawing direction -LEFT_RIGHT = 1 -TOP_BOTTOM = 3 -BY_STYLE = 5 #the flow direction is inherited from the associated text style -#line spacing style (optional): -AT_LEAST = 1 #taller characters will override -EXACT = 2 #taller characters will not override - -#---polyline flags -CLOSED =1 # This is a closed polyline (or a polygon mesh closed in the M direction) -CURVE_FIT =2 # Curve-fit vertices have been added -SPLINE_FIT =4 # Spline-fit vertices have been added -POLYLINE_3D =8 # This is a 3D polyline -POLYGON_MESH =16 # This is a 3D polygon mesh -CLOSED_N =32 # The polygon mesh is closed in the N direction -POLYFACE_MESH =64 # The polyline is a polyface mesh -CONTINOUS_LINETYPE_PATTERN =128 # The linetype pattern is generated continuously around the vertices of this polyline - -#---text flags -#horizontal -LEFT = 0 -CENTER = 1 -RIGHT = 2 -ALIGNED = 3 #if vertical alignment = 0 -MIDDLE = 4 #if vertical alignment = 0 -FIT = 5 #if vertical alignment = 0 -#vertical -BASELINE = 0 -BOTTOM = 1 -MIDDLE = 2 -TOP = 3 -class Object: - """Empty container class for dxf objects""" - - def __init__(self, _type=''): - """_type expects a string value.""" - self.type = _type - self.name = '' - self.data = [] - - def __str__(self): - if self.name: - return self.name - else: - return self.type - - def __repr__(self): - return str(self.data) - - def get_type(self, kind=''): - """Despite the name, this method actually returns all objects of type 'kind' from self.data.""" - if type: - objects = [] - for item in self.data: - if type(item) != list and item.type == kind: - # we want this type of object - objects.append(item) - elif type(item) == list and item[0] == kind: - # we want this type of data - objects.append(item[1]) - return objects - - -class Layer: - """Class for objects representing dxf layers.""" - - def __init__(self, obj): - """Expects an entity object of type line as input.""" - self.type = obj.type - self.data = obj.data[:] - - self.name = obj.get_type(2)[0] - self.color = obj.get_type(62)[0] - self.flags = obj.get_type(70)[0] - self.frozen = self.flags&1 - - - - def __repr__(self): - return "%s: name - %s, color - %s" %(self.__class__.__name__, self.name, self.color) - - - -class Line: - """Class for objects representing dxf lines.""" - - def __init__(self, obj): - """Expects an entity object of type line as input.""" - if not obj.type == 'line': - raise TypeError, "Wrong type %s for line object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - - self.points = self.get_points(obj.data) - - - - - def get_points(self, data): - """Gets start and end points for a line type object. - - Lines have a fixed number of points (two) and fixed codes for each value. - """ - - # start x, y, z and end x, y, z = 0 - sx, sy, sz, ex, ey, ez = 0, 0, 0, 0, 0, 0 - for item in data: - if item[0] == 10: # 10 = x - sx = item[1] - elif item[0] == 20: # 20 = y - sy = item[1] - elif item[0] == 30: # 30 = z - sz = item[1] - elif item[0] == 11: # 11 = x - ex = item[1] - elif item[0] == 21: # 21 = y - ey = item[1] - elif item[0] == 31: # 31 = z - ez = item[1] - return [[sx, sy, sz], [ex, ey, ez]] - - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - -class LWpolyline: - """Class for objects representing dxf LWpolylines.""" - - def __init__(self, obj): - """Expects an entity object of type lwpolyline as input.""" - if not obj.type == 'lwpolyline': - raise TypeError, "Wrong type %s for polyline object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.num_points = obj.get_type(90)[0] - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - self.elevation = obj.get_type(38) - if self.elevation: - self.elevation = self.elevation[0] - else: - self.elevation = 0 - - self.flags = obj.get_type(70) - if self.flags: - self.flags = self.flags[0] - else: - self.flags = 0 - - self.closed = self.flags&1 # byte coded, 1 = closed, 128 = plinegen - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.points = self.get_points(obj.data) - self.extrusion = self.get_extrusion(obj.data) - - - - - - - def get_points(self, data): - """Gets points for a polyline type object. - - Polylines have no fixed number of verts, and - each vert can have a number of properties. - Verts should be coded as - 10:xvalue - 20:yvalue - 40:startwidth or 0 - 41:endwidth or 0 - 42:bulge or 0 - for each vert - """ - num = self.num_points - point = None - points = [] - for item in data: - if item[0] == 10: # 10 = x - if point: - points.append(point) - point = Vertex() - point.x = item[1] - elif item[0] == 20: # 20 = y - point.y = item[1] - elif item[0] == 40: # 40 = start width - point.swidth = item[1] - elif item[0] == 41: # 41 = end width - point.ewidth = item[1] - elif item[0] == 42: # 42 = bulge - point.bulge = item[1] - points.append(point) - return points - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - -class Polyline: - """Class for objects representing dxf LWpolylines.""" - - def __init__(self, obj): - """Expects an entity object of type polyline as input.""" - if not obj.type == 'polyline': - raise TypeError, "Wrong type %s for polyline object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - self.points = [] - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - self.elevation = obj.get_type(30) - if self.elevation: - self.elevation = self.elevation[0] - else: - self.elevation = 0 - - self.flags = obj.get_type(70) - if self.flags: - self.flags = self.flags[0] - else: - self.flags = 0 - - self.closed = self.flags&1 # byte coded, 1 = closed, 128 = plinegen - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.extrusion = self.get_extrusion(obj.data) - - - - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - -class Vertex(object): - """Generic vertex object used by polylines (and maybe others).""" - - def __init__(self, obj=None): - """Initializes vertex data. - - The optional obj arg is an entity object of type vertex. - """ - self.loc = [0,0,0] - self.bulge = 0 - self.swidth = 0 - self.ewidth = 0 - self.flags = 0 - - if obj is not None: - if not obj.type == 'vertex': - raise TypeError, "Wrong type %s for vertex object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - self.get_props(obj.data) - - - def get_props(self, data): - """Gets coords for a vertex type object. - - Each vert can have a number of properties. - Verts should be coded as - 10:xvalue - 20:yvalue - 40:startwidth or 0 - 41:endwidth or 0 - 42:bulge or 0 - """ - for item in data: - if item[0] == 10: # 10 = x - self.x = item[1] - elif item[0] == 20: # 20 = y - self.y = item[1] - elif item[0] == 30: # 30 = z - self.z = item[1] - elif item[0] == 40: # 40 = start width - self.swidth = item[1] - elif item[0] == 41: # 41 = end width - self.ewidth = item[1] - elif item[0] == 42: # 42 = bulge - self.bulge = item[1] - elif item[0] == 70: # 70 = vert flags - self.flags = item[1] - - - def __len__(self): - return 3 - - - def __getitem__(self, key): - return self.loc[key] - - - def __setitem__(self, key, value): - if key in [0,1,2]: - self.loc[key] - - - def __iter__(self): - return self.loc.__iter__() - - - def __str__(self): - return str(self.loc) - - - def __repr__(self): - return "Vertex %s, swidth=%s, ewidth=%s, bulge=%s" %(self.loc, self.swidth, self.ewidth, self.bulge) - - - def getx(self): - return self.loc[0] - - def setx(self, value): - self.loc[0] = value - - x = property(getx, setx) - - - def gety(self): - return self.loc[1] - - def sety(self, value): - self.loc[1] = value - - y = property(gety, sety) - - - def getz(self): - return self.loc[2] - - def setz(self, value): - self.loc[2] = value - - z = property(getz, setz) - - - -class Text: - """Class for objects representing dxf Text.""" - - def __init__(self, obj): - """Expects an entity object of type text as input.""" - if not obj.type == 'text': - raise TypeError, "Wrong type %s for text object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.height = obj.get_type(40)[0] - self.value = obj.get_type(1)[0] # The text string value - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - self.rotation = obj.get_type(50) # radians? - if not self.rotation: - self.rotation = 0 - else: - self.rotation = self.rotation[0] - - self.width_factor = obj.get_type(41) # Scaling factor along local x axis - if not self.width_factor: - self.width_factor = 1 - else: - self.width_factor = self.width_factor[0] - - self.oblique = obj.get_type(51) # skew in degrees -90 <= oblique <= 90 - if not self.oblique: - self.oblique = 0 - else: - self.oblique = self.oblique[0] - - self.halignment = obj.get_type(72) # horiz. alignment - if not self.halignment: # 0=left, 1=center, 2=right, 3=aligned, 4=middle, 5=fit - self.halignment = 0 - else: - self.halignment = self.halignment[0] - - self.valignment = obj.get_type(73) # vert. alignment - if not self.valignment: # 0=baseline, 1=bottom, 2=middle, 3=top - self.valignment = 0 - else: - self.valignment = self.valignment[0] - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data, self.halignment, self.valignment) - self.extrusion = self.get_extrusion(obj.data) - - - - - def get_loc(self, data, halign, valign): - """Gets adjusted location for text type objects. - - If group 72 and/or 73 values are nonzero then the first alignment point values - are ignored and AutoCAD calculates new values based on the second alignment - point and the length and height of the text string itself (after applying the - text style). If the 72 and 73 values are zero or missing, then the second - alignment point is meaningless. - - I don't know how to calc text size... - """ - # bottom left x, y, z and justification x, y, z = 0 - x, y, z, jx, jy, jz = 0, 0, 0, 0, 0, 0 - for item in data: - if item[0] == 10: # 10 = x - x = item[1] - elif item[0] == 20: # 20 = y - y = item[1] - elif item[0] == 30: # 30 = z - z = item[1] - elif item[0] == 11: # 11 = x - jx = item[1] - elif item[0] == 21: # 21 = y - jy = item[1] - elif item[0] == 31: # 31 = z - jz = item[1] - - if halign or valign: - x, y, z = jx, jy, jz - return [x, y, z] - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, value - %s" %(self.__class__.__name__, self.layer, self.value) - - - -class Mtext: - """Class for objects representing dxf Mtext.""" - - def __init__(self, obj): - """Expects an entity object of type mtext as input.""" - if not obj.type == 'mtext': - raise TypeError, "Wrong type %s for mtext object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.height = obj.get_type(40)[0] - self.width = obj.get_type(41)[0] - self.alignment = obj.get_type(71)[0] # alignment 1=TL, 2=TC, 3=TR, 4=ML, 5=MC, 6=MR, 7=BL, 8=BC, 9=BR - self.value = self.get_text(obj.data) # The text string value - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - self.rotation = obj.get_type(50) # radians - if not self.rotation: - self.rotation = 0 - else: - self.rotation = self.rotation[0] - - self.width_factor = obj.get_type(42) # Scaling factor along local x axis - if not self.width_factor: - self.width_factor = 1 - else: - self.width_factor = self.width_factor[0] - - self.line_space = obj.get_type(44) # percentage of default - if not self.line_space: - self.line_space = 1 - else: - self.line_space = self.line_space[0] - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data) - self.extrusion = self.get_extrusion(obj.data) - - - - - - def get_text(self, data): - """Reconstructs mtext data from dxf codes.""" - primary = '' - secondary = [] - for item in data: - if item[0] == 1: # There should be only one primary... - primary = item[1] - elif item[0] == 3: # There may be any number of extra strings (in order) - secondary.append(item[1]) - if not primary: - #raise ValueError, "Empty Mtext Object!" - string = "Empty Mtext Object!" - if not secondary: - string = primary.replace(r'\P', '\n') - else: - string = ''.join(secondary)+primary - string = string.replace(r'\P', '\n') - return string - def get_loc(self, data): - """Gets location for a mtext type objects. - - Mtext objects have only one point indicating location. - """ - loc = [0,0,0] - for item in data: - if item[0] == 10: # 10 = x - loc[0] = item[1] - elif item[0] == 20: # 20 = y - loc[1] = item[1] - elif item[0] == 30: # 30 = z - loc[2] = item[1] - return loc - - - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, value - %s" %(self.__class__.__name__, self.layer, self.value) - - - -class Circle: - """Class for objects representing dxf Circles.""" - - def __init__(self, obj): - """Expects an entity object of type circle as input.""" - if not obj.type == 'circle': - raise TypeError, "Wrong type %s for circle object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.radius = obj.get_type(40)[0] - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data) - self.extrusion = self.get_extrusion(obj.data) - - - - - - def get_loc(self, data): - """Gets the center location for circle type objects. - - Circles have a single coord location. - """ - loc = [0, 0, 0] - for item in data: - if item[0] == 10: # 10 = x - loc[0] = item[1] - elif item[0] == 20: # 20 = y - loc[1] = item[1] - elif item[0] == 30: # 30 = z - loc[2] = item[1] - return loc - - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, radius - %s" %(self.__class__.__name__, self.layer, self.radius) - - - -class Arc: - """Class for objects representing dxf arcs.""" - - def __init__(self, obj): - """Expects an entity object of type arc as input.""" - if not obj.type == 'arc': - raise TypeError, "Wrong type %s for arc object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.radius = obj.get_type(40)[0] - self.start_angle = obj.get_type(50)[0] - self.end_angle = obj.get_type(51)[0] - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data) - self.extrusion = self.get_extrusion(obj.data) - - - - - - def get_loc(self, data): - """Gets the center location for arc type objects. - - Arcs have a single coord location. - """ - loc = [0, 0, 0] - for item in data: - if item[0] == 10: # 10 = x - loc[0] = item[1] - elif item[0] == 20: # 20 = y - loc[1] = item[1] - elif item[0] == 30: # 30 = z - loc[2] = item[1] - return loc - - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, radius - %s" %(self.__class__.__name__, self.layer, self.radius) - - - -class BlockRecord: - """Class for objects representing dxf block_records.""" - - def __init__(self, obj): - """Expects an entity object of type block_record as input.""" - if not obj.type == 'block_record': - raise TypeError, "Wrong type %s for block_record object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.name = obj.get_type(2)[0] - - # optional data (with defaults) - self.insertion_units = obj.get_type(70) - if not self.insertion_units: - self.insertion_units = None - else: - self.insertion_units = self.insertion_units[0] - - self.insert_units = obj.get_type(1070) - if not self.insert_units: - self.insert_units = None - else: - self.insert_units = self.insert_units[0] - - - - - - - def __repr__(self): - return "%s: name - %s, insert units - %s" %(self.__class__.__name__, self.name, self.insertion_units) - - - - -class Block: - """Class for objects representing dxf blocks.""" - - def __init__(self, obj): - """Expects an entity object of type block as input.""" - if not obj.type == 'block': - raise TypeError, "Wrong type %s for block object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.flags = obj.get_type(70)[0] - self.entities = Object('block_contents') - self.entities.data = objectify([ent for ent in obj.data if type(ent) != list]) - - # optional data (with defaults) - self.name = obj.get_type(3) - if self.name: - self.name = self.name[0] - else: - self.name = '' - - self.path = obj.get_type(1) - if self.path: - self.path = self.path[0] - else: - self.path = '' - - self.discription = obj.get_type(4) - if self.discription: - self.discription = self.discription[0] - else: - self.discription = '' - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data) - - - - - - def get_loc(self, data): - """Gets the insert point of the block.""" - loc = [0, 0, 0] - for item in data: - if type(item) != list: - continue - if item[0] == 10: # 10 = x - loc[0] = item[1] - elif item[0] == 20: # 20 = y - loc[1] = item[1] - elif item[0] == 30: # 30 = z - loc[2] = item[1] - return loc - - - - def __repr__(self): - return "%s: name - %s, description - %s, xref-path - %s" %(self.__class__.__name__, self.name, self.discription, self.path) - - - - -class Insert: - """Class for objects representing dxf inserts.""" - - def __init__(self, obj): - """Expects an entity object of type insert as input.""" - if not obj.type == 'insert': - raise TypeError, "Wrong type %s for insert object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.block = obj.get_type(2)[0] - - # optional data (with defaults) - self.rotation = obj.get_type(50) - if self.rotation: - self.rotation = self.rotation[0] - else: - self.rotation = 0 - - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data) - self.scale = self.get_scale(obj.data) - self.rows, self.columns = self.get_array(obj.data) - self.extrusion = self.get_extrusion(obj.data) - - - - - - def get_loc(self, data): - """Gets the center location for circle type objects. - - Circles have a single coord location. - """ - loc = [0, 0, 0] - for item in data: - if item[0] == 10: # 10 = x - loc[0] = item[1] - elif item[0] == 20: # 20 = y - loc[1] = item[1] - elif item[0] == 30: # 30 = z - loc[2] = item[1] - return loc - - - - def get_scale(self, data): - """Gets the x/y/z scale factor for the block. - """ - scale = [1, 1, 1] - for item in data: - if item[0] == 41: # 41 = x scale - scale[0] = item[1] - elif item[0] == 42: # 42 = y scale - scale[1] = item[1] - elif item[0] == 43: # 43 = z scale - scale[2] = item[1] - return scale - - - - def get_array(self, data): - """Returns the pair (row number, row spacing), (column number, column spacing).""" - columns = 1 - rows = 1 - cspace = 0 - rspace = 0 - for item in data: - if item[0] == 70: # 70 = columns - columns = item[1] - elif item[0] == 71: # 71 = rows - rows = item[1] - if item[0] == 44: # 44 = columns - cspace = item[1] - elif item[0] == 45: # 45 = rows - rspace = item[1] - return (rows, rspace), (columns, cspace) - - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, block - %s" %(self.__class__.__name__, self.layer, self.block) - - - - -class Ellipse: - """Class for objects representing dxf ellipses.""" - - def __init__(self, obj): - """Expects an entity object of type ellipse as input.""" - if not obj.type == 'ellipse': - raise TypeError, "Wrong type %s for ellipse object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # required data - self.ratio = obj.get_type(40)[0] - self.start_angle = obj.get_type(41)[0] - self.end_angle = obj.get_type(42)[0] - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.loc = self.get_loc(obj.data) - self.major = self.get_major(obj.data) - self.extrusion = self.get_extrusion(obj.data) - self.radius = sqrt(self.major[0]**2 + self.major[0]**2 + self.major[0]**2) - - - - - def get_loc(self, data): - """Gets the center location for arc type objects. - - Arcs have a single coord location. - """ - loc = [0, 0, 0] - for item in data: - if item[0] == 10: # 10 = x - loc[0] = item[1] - elif item[0] == 20: # 20 = y - loc[1] = item[1] - elif item[0] == 30: # 30 = z - loc[2] = item[1] - return loc - - - - def get_major(self, data): - """Gets the major axis for ellipse type objects. - - The ellipse major axis defines the rotation of the ellipse and its radius. - """ - loc = [0, 0, 0] - for item in data: - if item[0] == 11: # 11 = x - loc[0] = item[1] - elif item[0] == 21: # 21 = y - loc[1] = item[1] - elif item[0] == 31: # 31 = z - loc[2] = item[1] - return loc - - - - def get_extrusion(self, data): - """Find the axis of extrusion. - - Used to get the objects Object Coordinate System (ocs). - """ - vec = [0,0,1] - for item in data: - if item[0] == 210: # 210 = x - vec[0] = item[1] - elif item[0] == 220: # 220 = y - vec[1] = item[1] - elif item[0] == 230: # 230 = z - vec[2] = item[1] - return vec - - - def __repr__(self): - return "%s: layer - %s, radius - %s" %(self.__class__.__name__, self.layer, self.radius) - - - -class Face: - """Class for objects representing dxf 3d faces.""" - - def __init__(self, obj): - """Expects an entity object of type 3dfaceplot as input.""" - if not obj.type == '3dface': - raise TypeError, "Wrong type %s for 3dface object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - - # optional data (with defaults) - self.space = obj.get_type(67) - if self.space: - self.space = self.space[0] - else: - self.space = 0 - - self.color_index = obj.get_type(62) - if self.color_index: - self.color_index = self.color_index[0] - else: - self.color_index = BYLAYER - - discard, self.layer, discard_index = get_layer(obj.data) - del obj.data[discard_index] - self.points = self.get_points(obj.data) - - - - - def get_points(self, data): - """Gets 3-4 points for a 3d face type object. - - Faces have three or optionally four verts. - """ - - a = [0, 0, 0] - b = [0, 0, 0] - c = [0, 0, 0] - d = False - for item in data: - # ----------- a ------------- - if item[0] == 10: # 10 = x - a[0] = item[1] - elif item[0] == 20: # 20 = y - a[1] = item[1] - elif item[0] == 30: # 30 = z - a[2] = item[1] - # ----------- b ------------- - elif item[0] == 11: # 11 = x - b[0] = item[1] - elif item[0] == 21: # 21 = y - b[1] = item[1] - elif item[0] == 31: # 31 = z - b[2] = item[1] - # ----------- c ------------- - elif item[0] == 12: # 12 = x - c[0] = item[1] - elif item[0] == 22: # 22 = y - c[1] = item[1] - elif item[0] == 32: # 32 = z - c[2] = item[1] - # ----------- d ------------- - elif item[0] == 13: # 13 = x - d = [0, 0, 0] - d[0] = item[1] - elif item[0] == 23: # 23 = y - d[1] = item[1] - elif item[0] == 33: # 33 = z - d[2] = item[1] - out = [a,b,c] - if d: - out.append(d) - return out - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - -def get_name(data): - """Get the name of an object from its object data. - - Returns a pair of (data_item, name) where data_item is the list entry where the name was found - (the data_item can be used to remove the entry from the object data). Be sure to check - name not None before using the returned values! - """ - value = None - for i, item in enumerate(data): - if item[0] == 2: - value = item[1] - break - return item, value, i - -def get_layer(data): - """Expects object data as input. - - Returns (entry, layer_name, entry_index) where entry is the data item that provided the layer name. - """ - value = None - for i, item in enumerate(data): - if item[0] == 8: - value = item[1] - break - return item, value, i - - -# type to object map -type_map = { - 'line':Line, - 'lwpolyline':LWpolyline, - 'text':Text, - 'mtext':Mtext, - 'circle':Circle, - 'arc':Arc, - 'layer':Layer, - 'block_record':BlockRecord, - 'block':Block, - 'insert':Insert, - 'ellipse':Ellipse, - '3dface':Face -} - -def objectify(data): - """Expects a section type object's data as input. - - Maps object data to the correct object type. - """ - objects = [] # colector for finished objects - known_types = type_map.keys() # so we don't have to call foo.keys() every iteration - index = 0 - while index < len(data): - item = data[index] - if type(item) != list and item.type in known_types: - # proccess the object and append the resulting object - objects.append(type_map[item.type](item)) - elif type(item) != list and item.type == 'table': - item.data = objectify(item.data) # tables have sub-objects - objects.append(item) - elif type(item) != list and item.type == 'polyline': - pline = Polyline(item) - while 1: - index += 1 - item = data[index] - if item.type == 'vertex': - v = Vertex(item) - pline.points.append(v) - elif item.type == 'seqend': - break - else: - print "Error: non-vertex found before seqend!" - index -= 1 - break - objects.append(pline) - else: - # we will just let the data pass un-harrased - objects.append(item) - index += 1 - return objects -if __name__ == "__main__": - print "No example yet!" \ No newline at end of file diff --git a/release/scripts/bpymodules/dxfLibrary.py b/release/scripts/bpymodules/dxfLibrary.py deleted file mode 100644 index ccd8ef9b625..00000000000 --- a/release/scripts/bpymodules/dxfLibrary.py +++ /dev/null @@ -1,880 +0,0 @@ -#dxfLibrary.py : provides functions for generating DXF files -# -------------------------------------------------------------------------- -__version__ = "v1.33 - 2009.06.16" -__author__ = "Stani Michiels(Stani), Remigiusz Fiedler(migius)" -__license__ = "GPL" -__url__ = "http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_dxf" -__bpydoc__ ="""The library to export geometry data to DXF format r12 version. - -Copyright %s -Version %s -License %s -Homepage %s - -See the homepage for documentation. -Dedicated thread on BlenderArtists: http://blenderartists.org/forum/showthread.php?t=136439 - -IDEAs: -- - -TODO: -- add support for DXFr14 (needs extended file header) -- add support for SPLINEs (possible first in DXFr14 version) -- add user preset for floating point precision (3-16?) - -History -v1.33 - 2009.06.16 by migius - - modif _point(): converts all coords to floats - - modif LineType class: implement elements - - added VPORT class, incl. defaults - - fix Insert class -v1.32 - 2009.06.06 by migius - - modif Style class: changed defaults to widthFactor=1.0, obliqueAngle=0.0 - - modif Text class: alignment parameter reactivated -v1.31 - 2009.06.02 by migius - - modif _Entity class: added paperspace,elevation -v1.30 - 2009.05.28 by migius - - bugfix 3dPOLYLINE/POLYFACE: VERTEX needs x,y,z coordinates, index starts with 1 not 0 -v1.29 - 2008.12.28 by Yorik - - modif POLYLINE to support bulge segments -v1.28 - 2008.12.13 by Steeve/BlenderArtists - - bugfix for EXTMIN/EXTMAX to suit Cycas-CAD -v1.27 - 2008.10.07 by migius - - beautifying output code: keys whitespace prefix - - refactoring DXF-strings format: NewLine moved to the end of -v1.26 - 2008.10.05 by migius - - modif POLYLINE to support POLYFACE -v1.25 - 2008.09.28 by migius - - modif FACE class for r12 -v1.24 - 2008.09.27 by migius - - modif POLYLINE class for r12 - - changing output format from r9 to r12(AC1009) -v1.1 (20/6/2005) by www.stani.be/python/sdxf - - Python library to generate dxf drawings -______________________________________________________________ -""" % (__author__,__version__,__license__,__url__) - -# -------------------------------------------------------------------------- -# DXF Library: copyright (C) 2005 by Stani Michiels (AKA Stani) -# 2008/2009 modif by Remigiusz Fiedler (AKA migius) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - - -#import Blender -#from Blender import Mathutils, Window, Scene, sys, Draw -#import BPyMessages - -try: - import copy - #from struct import pack -except: - copy = None - -####1) Private (only for developpers) -_HEADER_POINTS=['insbase','extmin','extmax'] - -#---helper functions----------------------------------- -def _point(x,index=0): - """Convert tuple to a dxf point""" - #print 'deb: _point=', x #------------- - return '\n'.join([' %s\n%s'%((i+1)*10+index,float(x[i])) for i in range(len(x))]) - -def _points(plist): - """Convert a list of tuples to dxf points""" - out = '\n'.join([_point(plist[i],i)for i in range(len(plist))]) - return out - -#---base classes---------------------------------------- -class _Call: - """Makes a callable class.""" - def copy(self): - """Returns a copy.""" - return copy.deepcopy(self) - - def __call__(self,**attrs): - """Returns a copy with modified attributes.""" - copied=self.copy() - for attr in attrs:setattr(copied,attr,attrs[attr]) - return copied - -#------------------------------------------------------- -class _Entity(_Call): - """Base class for _common group codes for entities.""" - def __init__(self,paperspace=None,color=None,layer='0', - lineType=None,lineTypeScale=None,lineWeight=None, - extrusion=None,elevation=None,thickness=None, - parent=None): - """None values will be omitted.""" - self.paperspace = paperspace - self.color = color - self.layer = layer - self.lineType = lineType - self.lineTypeScale = lineTypeScale - self.lineWeight = lineWeight - self.extrusion = extrusion - self.elevation = elevation - self.thickness = thickness - #self.visible = visible - self.parent = parent - - def _common(self): - """Return common group codes as a string.""" - if self.parent:parent=self.parent - else:parent=self - result ='' - if parent.paperspace==1: result+=' 67\n1\n' - if parent.layer!=None: result+=' 8\n%s\n'%parent.layer - if parent.color!=None: result+=' 62\n%s\n'%parent.color - if parent.lineType!=None: result+=' 6\n%s\n'%parent.lineType - # TODO: if parent.lineWeight!=None: result+='370\n%s\n'%parent.lineWeight - # TODO: if parent.visible!=None: result+='60\n%s\n'%parent.visible - if parent.lineTypeScale!=None: result+=' 48\n%s\n'%parent.lineTypeScale - if parent.elevation!=None: result+=' 38\n%s\n'%parent.elevation - if parent.thickness!=None: result+=' 39\n%s\n'%parent.thickness - if parent.extrusion!=None: result+='%s\n'%_point(parent.extrusion,200) - return result - -#-------------------------- -class _Entities: - """Base class to deal with composed objects.""" - def __dxf__(self): - return [] - - def __str__(self): - return ''.join([str(x) for x in self.__dxf__()]) - -#-------------------------- -class _Collection(_Call): - """Base class to expose entities methods to main object.""" - def __init__(self,entities=[]): - self.entities=copy.copy(entities) - #link entities methods to drawing - for attr in dir(self.entities): - if attr[0]!='_': - attrObject=getattr(self.entities,attr) - if callable(attrObject): - setattr(self,attr,attrObject) - -####2) Constants -#---color values -BYBLOCK=0 -BYLAYER=256 - -#---block-type flags (bit coded values, may be combined): -ANONYMOUS =1 # This is an anonymous block generated by hatching, associative dimensioning, other internal operations, or an application -NON_CONSTANT_ATTRIBUTES =2 # This block has non-constant attribute definitions (this bit is not set if the block has any attribute definitions that are constant, or has no attribute definitions at all) -XREF =4 # This block is an external reference (xref) -XREF_OVERLAY =8 # This block is an xref overlay -EXTERNAL =16 # This block is externally dependent -RESOLVED =32 # This is a resolved external reference, or dependent of an external reference (ignored on input) -REFERENCED =64 # This definition is a referenced external reference (ignored on input) - -#---mtext flags -#attachment point -TOP_LEFT = 1 -TOP_CENTER = 2 -TOP_RIGHT = 3 -MIDDLE_LEFT = 4 -MIDDLE_CENTER = 5 -MIDDLE_RIGHT = 6 -BOTTOM_LEFT = 7 -BOTTOM_CENTER = 8 -BOTTOM_RIGHT = 9 -#drawing direction -LEFT_RIGHT = 1 -TOP_BOTTOM = 3 -BY_STYLE = 5 #the flow direction is inherited from the associated text style -#line spacing style (optional): -AT_LEAST = 1 #taller characters will override -EXACT = 2 #taller characters will not override - -#---polyline flags -CLOSED =1 # This is a closed polyline (or a polygon mesh closed in the M direction) -CURVE_FIT =2 # Curve-fit vertices have been added -SPLINE_FIT =4 # Spline-fit vertices have been added -POLYLINE_3D =8 # This is a 3D polyline -POLYGON_MESH =16 # This is a 3D polygon mesh -CLOSED_N =32 # The polygon mesh is closed in the N direction -POLYFACE_MESH =64 # The polyline is a polyface mesh -CONTINOUS_LINETYPE_PATTERN =128 # The linetype pattern is generated continuously around the vertices of this polyline - -#---text flags -#horizontal -LEFT = 0 -CENTER = 1 -RIGHT = 2 -ALIGNED = 3 #if vertical alignment = 0 -MIDDLE = 4 #if vertical alignment = 0 -FIT = 5 #if vertical alignment = 0 -#vertical -BASELINE = 0 -BOTTOM = 1 -MIDDLE = 2 -TOP = 3 - -####3) Classes -#---entitities ----------------------------------------------- -#-------------------------- -class Arc(_Entity): - """Arc, angles in degrees.""" - def __init__(self,center=(0,0,0),radius=1, - startAngle=0.0,endAngle=90,**common): - """Angles in degrees.""" - _Entity.__init__(self,**common) - self.center=center - self.radius=radius - self.startAngle=startAngle - self.endAngle=endAngle - def __str__(self): - return ' 0\nARC\n%s%s\n 40\n%s\n 50\n%s\n 51\n%s\n'%\ - (self._common(),_point(self.center), - self.radius,self.startAngle,self.endAngle) - -#----------------------------------------------- -class Circle(_Entity): - """Circle""" - def __init__(self,center=(0,0,0),radius=1,**common): - _Entity.__init__(self,**common) - self.center=center - self.radius=radius - def __str__(self): - return ' 0\nCIRCLE\n%s%s\n 40\n%s\n'%\ - (self._common(),_point(self.center),self.radius) - -#----------------------------------------------- -class Face(_Entity): - """3dface""" - def __init__(self,points,**common): - _Entity.__init__(self,**common) - while len(points)<4: #fix for r12 format - points.append(points[-1]) - self.points=points - - def __str__(self): - out = ' 0\n3DFACE\n%s%s\n' %(self._common(),_points(self.points)) - #print 'deb:out=', out #------------------- - return out - -#----------------------------------------------- -class Insert(_Entity): - """Block instance.""" - def __init__(self,name,point=(0,0,0), - xscale=None,yscale=None,zscale=None, - cols=None,colspacing=None,rows=None,rowspacing=None, - rotation=None, - **common): - _Entity.__init__(self,**common) - self.name=name - self.point=point - self.xscale=xscale - self.yscale=yscale - self.zscale=zscale - self.cols=cols - self.colspacing=colspacing - self.rows=rows - self.rowspacing=rowspacing - self.rotation=rotation - - def __str__(self): - result=' 0\nINSERT\n 2\n%s\n%s%s\n'%\ - (self.name,self._common(),_point(self.point)) - if self.xscale!=None:result+=' 41\n%s\n'%self.xscale - if self.yscale!=None:result+=' 42\n%s\n'%self.yscale - if self.zscale!=None:result+=' 43\n%s\n'%self.zscale - if self.rotation:result+=' 50\n%s\n'%self.rotation - if self.cols!=None:result+=' 70\n%s\n'%self.cols - if self.colspacing!=None:result+=' 44\n%s\n'%self.colspacing - if self.rows!=None:result+=' 71\n%s\n'%self.rows - if self.rowspacing!=None:result+=' 45\n%s\n'%self.rowspacing - return result - -#----------------------------------------------- -class Line(_Entity): - """Line""" - def __init__(self,points,**common): - _Entity.__init__(self,**common) - self.points=points - def __str__(self): - return ' 0\nLINE\n%s%s\n' %( - self._common(), _points(self.points)) - - -#----------------------------------------------- -class PolyLine(_Entity): - def __init__(self,points,org_point=[0,0,0],flag=0,width=None,**common): - #width = number, or width = list [width_start=None, width_end=None] - #for 2d-polyline: points = [ [x, y, z, width_start=None, width_end=None, bulge=0 or None], ...] - #for 3d-polyline: points = [ [x, y, z], ...] - #for polyface: points = [points_list, faces_list] - _Entity.__init__(self,**common) - self.points=points - self.org_point=org_point - self.flag=flag - self.polyface = False - self.polyline2d = False - self.faces = [] # dummy value - self.width= None # dummy value - if self.flag & POLYFACE_MESH: - self.polyface=True - self.points=points[0] - self.faces=points[1] - self.p_count=len(self.points) - self.f_count=len(self.faces) - elif not self.flag & POLYLINE_3D: - self.polyline2d = True - if width: - if type(width)!='list': - width=[width,width] - self.width=width - - def __str__(self): - result= ' 0\nPOLYLINE\n%s 70\n%s\n' %(self._common(),self.flag) - result+=' 66\n1\n' - result+='%s\n' %_point(self.org_point) - if self.polyface: - result+=' 71\n%s\n' %self.p_count - result+=' 72\n%s\n' %self.f_count - elif self.polyline2d: - if self.width!=None: result+=' 40\n%s\n 41\n%s\n' %(self.width[0],self.width[1]) - for point in self.points: - result+=' 0\nVERTEX\n' - result+=' 8\n%s\n' %self.layer - if self.polyface: - result+='%s\n' %_point(point[0:3]) - result+=' 70\n192\n' - elif self.polyline2d: - result+='%s\n' %_point(point[0:2]) - if len(point)>4: - width1, width2 = point[3], point[4] - if width1!=None: result+=' 40\n%s\n' %width1 - if width2!=None: result+=' 41\n%s\n' %width2 - if len(point)==6: - bulge = point[5] - if bulge: result+=' 42\n%s\n' %bulge - else: - result+='%s\n' %_point(point[0:3]) - for face in self.faces: - result+=' 0\nVERTEX\n' - result+=' 8\n%s\n' %self.layer - result+='%s\n' %_point(self.org_point) - result+=' 70\n128\n' - result+=' 71\n%s\n' %face[0] - result+=' 72\n%s\n' %face[1] - result+=' 73\n%s\n' %face[2] - if len(face)==4: result+=' 74\n%s\n' %face[3] - result+=' 0\nSEQEND\n' - result+=' 8\n%s\n' %self.layer - return result - -#----------------------------------------------- -class Point(_Entity): - """Point.""" - def __init__(self,points=None,**common): - _Entity.__init__(self,**common) - self.points=points - def __str__(self): # TODO: - return ' 0\nPOINT\n%s%s\n' %(self._common(), - _points(self.points) - ) - -#----------------------------------------------- -class Solid(_Entity): - """Colored solid fill.""" - def __init__(self,points=None,**common): - _Entity.__init__(self,**common) - self.points=points - def __str__(self): - return ' 0\nSOLID\n%s%s\n' %(self._common(), - _points(self.points[:2]+[self.points[3],self.points[2]]) - ) - - -#----------------------------------------------- -class Text(_Entity): - """Single text line.""" - def __init__(self,text='',point=(0,0,0),alignment=None, - flag=None,height=1,justifyhor=None,justifyver=None, - rotation=None,obliqueAngle=None,style=None,xscale=None,**common): - _Entity.__init__(self,**common) - self.text=text - self.point=point - self.alignment=alignment - self.flag=flag - self.height=height - self.justifyhor=justifyhor - self.justifyver=justifyver - self.rotation=rotation - self.obliqueAngle=obliqueAngle - self.style=style - self.xscale=xscale - def __str__(self): - result= ' 0\nTEXT\n%s%s\n 40\n%s\n 1\n%s\n'%\ - (self._common(),_point(self.point),self.height,self.text) - if self.rotation: result+=' 50\n%s\n'%self.rotation - if self.xscale: result+=' 41\n%s\n'%self.xscale - if self.obliqueAngle: result+=' 51\n%s\n'%self.obliqueAngle - if self.style: result+=' 7\n%s\n'%self.style - if self.flag: result+=' 71\n%s\n'%self.flag - if self.justifyhor: result+=' 72\n%s\n'%self.justifyhor - if self.alignment: result+='%s\n'%_point(self.alignment,1) - if self.justifyver: result+=' 73\n%s\n'%self.justifyver - return result - -#----------------------------------------------- -class Mtext(Text): - """Surrogate for mtext, generates some Text instances.""" - def __init__(self,text='',point=(0,0,0),width=250,spacingFactor=1.5,down=0,spacingWidth=None,**options): - Text.__init__(self,text=text,point=point,**options) - if down:spacingFactor*=-1 - self.spacingFactor=spacingFactor - self.spacingWidth=spacingWidth - self.width=width - self.down=down - def __str__(self): - texts=self.text.replace('\r\n','\n').split('\n') - if not self.down:texts.reverse() - result='' - x=y=0 - if self.spacingWidth:spacingWidth=self.spacingWidth - else:spacingWidth=self.height*self.spacingFactor - for text in texts: - while text: - result+='%s\n'%Text(text[:self.width], - point=(self.point[0]+x*spacingWidth, - self.point[1]+y*spacingWidth, - self.point[2]), - alignment=self.alignment,flag=self.flag,height=self.height, - justifyhor=self.justifyhor,justifyver=self.justifyver, - rotation=self.rotation,obliqueAngle=self.obliqueAngle, - style=self.style,xscale=self.xscale,parent=self - ) - text=text[self.width:] - if self.rotation:x+=1 - else:y+=1 - return result[1:] - -#----------------------------------------------- -##class _Mtext(_Entity): -## """Mtext not functioning for minimal dxf.""" -## def __init__(self,text='',point=(0,0,0),attachment=1, -## charWidth=None,charHeight=1,direction=1,height=100,rotation=0, -## spacingStyle=None,spacingFactor=None,style=None,width=100, -## xdirection=None,**common): -## _Entity.__init__(self,**common) -## self.text=text -## self.point=point -## self.attachment=attachment -## self.charWidth=charWidth -## self.charHeight=charHeight -## self.direction=direction -## self.height=height -## self.rotation=rotation -## self.spacingStyle=spacingStyle -## self.spacingFactor=spacingFactor -## self.style=style -## self.width=width -## self.xdirection=xdirection -## def __str__(self): -## input=self.text -## text='' -## while len(input)>250: -## text+='3\n%s\n'%input[:250] -## input=input[250:] -## text+='1\n%s\n'%input -## result= '0\nMTEXT\n%s\n%s\n40\n%s\n41\n%s\n71\n%s\n72\n%s%s\n43\n%s\n50\n%s\n'%\ -## (self._common(),_point(self.point),self.charHeight,self.width, -## self.attachment,self.direction,text, -## self.height, -## self.rotation) -## if self.style:result+='7\n%s\n'%self.style -## if self.xdirection:result+='%s\n'%_point(self.xdirection,1) -## if self.charWidth:result+='42\n%s\n'%self.charWidth -## if self.spacingStyle:result+='73\n%s\n'%self.spacingStyle -## if self.spacingFactor:result+='44\n%s\n'%self.spacingFactor -## return result - -#---tables --------------------------------------------------- -#----------------------------------------------- -class Block(_Collection): - """Use list methods to add entities, eg append.""" - def __init__(self,name,layer='0',flag=0,base=(0,0,0),entities=[]): - self.entities=copy.copy(entities) - _Collection.__init__(self,entities) - self.layer=layer - self.name=name - self.flag=0 - self.base=base - def __str__(self): # TODO: - e=''.join([str(x)for x in self.entities]) - return ' 0\nBLOCK\n 8\n%s\n 2\n%s\n 70\n%s\n%s\n 3\n%s\n%s 0\nENDBLK\n'%\ - (self.layer,self.name.upper(),self.flag,_point(self.base),self.name.upper(),e) - -#----------------------------------------------- -class Layer(_Call): - """Layer""" - def __init__(self,name='pydxf',color=7,lineType='continuous',flag=64): - self.name=name - self.color=color - self.lineType=lineType - self.flag=flag - def __str__(self): - return ' 0\nLAYER\n 2\n%s\n 70\n%s\n 62\n%s\n 6\n%s\n'%\ - (self.name.upper(),self.flag,self.color,self.lineType) - -#----------------------------------------------- -class LineType(_Call): - """Custom linetype""" - def __init__(self,name='CONTINUOUS',description='Solid line',elements=[0.0],flag=0): - self.name=name - self.description=description - self.elements=copy.copy(elements) - self.flag=flag - def __str__(self): - result = ' 0\nLTYPE\n 2\n%s\n 70\n%s\n 3\n%s\n 72\n65\n'%\ - (self.name.upper(),self.flag,self.description) - if self.elements: - elements = ' 73\n%s\n' %(len(self.elements)-1) - elements += ' 40\n%s\n' %(self.elements[0]) - for e in self.elements[1:]: - elements += ' 49\n%s\n' %e - result += elements - return result - - -#----------------------------------------------- -class Style(_Call): - """Text style""" - def __init__(self,name='standard',flag=0,height=0,widthFactor=1.0,obliqueAngle=0.0, - mirror=0,lastHeight=1,font='arial.ttf',bigFont=''): - self.name=name - self.flag=flag - self.height=height - self.widthFactor=widthFactor - self.obliqueAngle=obliqueAngle - self.mirror=mirror - self.lastHeight=lastHeight - self.font=font - self.bigFont=bigFont - def __str__(self): - return ' 0\nSTYLE\n 2\n%s\n 70\n%s\n 40\n%s\n 41\n%s\n 50\n%s\n 71\n%s\n 42\n%s\n 3\n%s\n 4\n%s\n'%\ - (self.name.upper(),self.flag,self.flag,self.widthFactor, - self.obliqueAngle,self.mirror,self.lastHeight, - self.font.upper(),self.bigFont.upper()) - -#----------------------------------------------- -class VPort(_Call): - def __init__(self,name,flag=0, - leftBottom=(0.0,0.0), - rightTop=(1.0,1.0), - center=(0.5,0.5), - snap_base=(0.0,0.0), - snap_spacing=(0.1,0.1), - grid_spacing=(0.1,0.1), - direction=(0.0,0.0,1.0), - target=(0.0,0.0,0.0), - height=1.0, - ratio=1.0, - lens=50, - frontClipping=0, - backClipping=0, - snap_rotation=0, - twist=0, - mode=0, - circle_zoom=100, - fast_zoom=1, - ucsicon=1, - snap_on=0, - grid_on=0, - snap_style=0, - snap_isopair=0 - ): - self.name=name - self.flag=flag - self.leftBottom=leftBottom - self.rightTop=rightTop - self.center=center - self.snap_base=snap_base - self.snap_spacing=snap_spacing - self.grid_spacing=grid_spacing - self.direction=direction - self.target=target - self.height=float(height) - self.ratio=float(ratio) - self.lens=float(lens) - self.frontClipping=float(frontClipping) - self.backClipping=float(backClipping) - self.snap_rotation=float(snap_rotation) - self.twist=float(twist) - self.mode=mode - self.circle_zoom=circle_zoom - self.fast_zoom=fast_zoom - self.ucsicon=ucsicon - self.snap_on=snap_on - self.grid_on=grid_on - self.snap_style=snap_style - self.snap_isopair=snap_isopair - def __str__(self): - output = [' 0', 'VPORT', - ' 2', self.name, - ' 70', self.flag, - _point(self.leftBottom), - _point(self.rightTop,1), - _point(self.center,2), # View center point (in DCS) - _point(self.snap_base,3), - _point(self.snap_spacing,4), - _point(self.grid_spacing,5), - _point(self.direction,6), #view direction from target (in WCS) - _point(self.target,7), - ' 40', self.height, - ' 41', self.ratio, - ' 42', self.lens, - ' 43', self.frontClipping, - ' 44', self.backClipping, - ' 50', self.snap_rotation, - ' 51', self.twist, - ' 71', self.mode, - ' 72', self.circle_zoom, - ' 73', self.fast_zoom, - ' 74', self.ucsicon, - ' 75', self.snap_on, - ' 76', self.grid_on, - ' 77', self.snap_style, - ' 78', self.snap_isopair - ] - - output_str = '' - for s in output: - output_str += '%s\n' %s - return output_str - - - -#----------------------------------------------- -class View(_Call): - def __init__(self,name,flag=0, - width=1, - height=1, - center=(0.5,0.5), - direction=(0,0,1), - target=(0,0,0), - lens=50, - frontClipping=0, - backClipping=0, - twist=0,mode=0 - ): - self.name=name - self.flag=flag - self.width=float(width) - self.height=float(height) - self.center=center - self.direction=direction - self.target=target - self.lens=float(lens) - self.frontClipping=float(frontClipping) - self.backClipping=float(backClipping) - self.twist=float(twist) - self.mode=mode - def __str__(self): - output = [' 0', 'VIEW', - ' 2', self.name, - ' 70', self.flag, - ' 40', self.height, - _point(self.center), - ' 41', self.width, - _point(self.direction,1), - _point(self.target,2), - ' 42', self.lens, - ' 43', self.frontClipping, - ' 44', self.backClipping, - ' 50', self.twist, - ' 71', self.mode - ] - output_str = '' - for s in output: - output_str += '%s\n' %s - return output_str - -#----------------------------------------------- -def ViewByWindow(name,leftBottom=(0,0),rightTop=(1,1),**options): - width=abs(rightTop[0]-leftBottom[0]) - height=abs(rightTop[1]-leftBottom[1]) - center=((rightTop[0]+leftBottom[0])*0.5,(rightTop[1]+leftBottom[1])*0.5) - return View(name=name,width=width,height=height,center=center,**options) - -#---drawing -#----------------------------------------------- -class Drawing(_Collection): - """Dxf drawing. Use append or any other list methods to add objects.""" - def __init__(self,insbase=(0.0,0.0,0.0),extmin=(0.0,0.0,0.0),extmax=(0.0,0.0,0.0), - layers=[Layer()],linetypes=[LineType()],styles=[Style()],blocks=[], - views=[],vports=[],entities=None,fileName='test.dxf'): - # TODO: replace list with None,arial - if not entities: - entities=[] - _Collection.__init__(self,entities) - self.insbase=insbase - self.extmin=extmin - self.extmax=extmax - self.layers=copy.copy(layers) - self.linetypes=copy.copy(linetypes) - self.styles=copy.copy(styles) - self.views=copy.copy(views) - self.vports=copy.copy(vports) - self.blocks=copy.copy(blocks) - self.fileName=fileName - #private - #self.acadver='9\n$ACADVER\n1\nAC1006\n' - self.acadver=' 9\n$ACADVER\n 1\nAC1009\n' - """DXF AutoCAD-Release format codes - AC1021 2008, 2007 - AC1018 2006, 2005, 2004 - AC1015 2002, 2000i, 2000 - AC1014 R14,14.01 - AC1012 R13 - AC1009 R12,11 - AC1006 R10 - AC1004 R9 - AC1002 R2.6 - AC1.50 R2.05 - """ - - def _name(self,x): - """Helper function for self._point""" - return ' 9\n$%s\n' %x.upper() - - def _point(self,name,x): - """Point setting from drawing like extmin,extmax,...""" - return '%s%s' %(self._name(name),_point(x)) - - def _section(self,name,x): - """Sections like tables,blocks,entities,...""" - if x: xstr=''.join(x) - else: xstr='' - return ' 0\nSECTION\n 2\n%s\n%s 0\nENDSEC\n'%(name.upper(),xstr) - - def _table(self,name,x): - """Tables like ltype,layer,style,...""" - if x: xstr=''.join(x) - else: xstr='' - return ' 0\nTABLE\n 2\n%s\n 70\n%s\n%s 0\nENDTAB\n'%(name.upper(),len(x),xstr) - - def __str__(self): - """Returns drawing as dxf string.""" - header=[self.acadver]+[self._point(attr,getattr(self,attr))+'\n' for attr in _HEADER_POINTS] - header=self._section('header',header) - - tables=[self._table('vport',[str(x) for x in self.vports]), - self._table('ltype',[str(x) for x in self.linetypes]), - self._table('layer',[str(x) for x in self.layers]), - self._table('style',[str(x) for x in self.styles]), - self._table('view',[str(x) for x in self.views]), - ] - tables=self._section('tables',tables) - - blocks=self._section('blocks',[str(x) for x in self.blocks]) - - entities=self._section('entities',[str(x) for x in self.entities]) - - all=''.join([header,tables,blocks,entities,' 0\nEOF\n']) - return all - - def saveas(self,fileName): - self.fileName=fileName - self.save() - - def save(self): - test=open(self.fileName,'w') - test.write(str(self)) - test.close() - - -#---extras -#----------------------------------------------- -class Rectangle(_Entity): - """Rectangle, creates lines.""" - def __init__(self,point=(0,0,0),width=1,height=1,solid=None,line=1,**common): - _Entity.__init__(self,**common) - self.point=point - self.width=width - self.height=height - self.solid=solid - self.line=line - def __str__(self): - result='' - points=[self.point,(self.point[0]+self.width,self.point[1],self.point[2]), - (self.point[0]+self.width,self.point[1]+self.height,self.point[2]), - (self.point[0],self.point[1]+self.height,self.point[2]),self.point] - if self.solid: - result+= Solid(points=points[:-1],parent=self.solid) - if self.line: - for i in range(4): - result+= Line(points=[points[i],points[i+1]],parent=self) - return result[1:] - -#----------------------------------------------- -class LineList(_Entity): - """Like polyline, but built of individual lines.""" - def __init__(self,points=[],org_point=[0,0,0],closed=0,**common): - _Entity.__init__(self,**common) - self.closed=closed - self.points=copy.copy(points) - def __str__(self): - if self.closed:points=self.points+[self.points[0]] - else: points=self.points - result='' - for i in range(len(points)-1): - result+= Line(points=[points[i],points[i+1]],parent=self) - return result[1:] - -#----------------------------------------------------- -def test(): - #Blocks - b=Block('test') - b.append(Solid(points=[(0,0,0),(1,0,0),(1,1,0),(0,1,0)],color=1)) - b.append(Arc(center=(1,0,0),color=2)) - - #Drawing - d=Drawing() - #tables - d.blocks.append(b) #table blocks - d.styles.append(Style()) #table styles - d.views.append(View('Normal')) #table view - d.views.append(ViewByWindow('Window',leftBottom=(1,0),rightTop=(2,1))) #idem - - #entities - d.append(Circle(center=(1,1,0),color=3)) - d.append(Face(points=[(0,0,0),(1,0,0),(1,1,0),(0,1,0)],color=4)) - d.append(Insert('test',point=(3,3,3),cols=5,colspacing=2)) - d.append(Line(points=[(0,0,0),(1,1,1)])) - d.append(Mtext('Click on Ads\nmultiple lines with mtext',point=(1,1,1),color=5,rotation=90)) - d.append(Text('Please donate!',point=(3,0,1))) - #d.append(Rectangle(point=(2,2,2),width=4,height=3,color=6,solid=Solid(color=2))) - d.append(Solid(points=[(4,4,0),(5,4,0),(7,8,0),(9,9,0)],color=3)) - #d.append(PolyLine(points=[(1,1,1),(2,1,1),(2,2,1),(1,2,1)],flag=1,color=1)) - - #d.saveas('c:\\test.dxf') - d.saveas('test.dxf') - -#----------------------------------------------------- -if __name__=='__main__': - if not copy: - Draw.PupMenu('Error%t|This script requires a full python install') - else: test() - \ No newline at end of file diff --git a/release/scripts/bpymodules/dxfReader.py b/release/scripts/bpymodules/dxfReader.py deleted file mode 100644 index df4ebc309e4..00000000000 --- a/release/scripts/bpymodules/dxfReader.py +++ /dev/null @@ -1,381 +0,0 @@ -"""This module provides a function for reading dxf files and parsing them into a useful tree of objects and data. - - The convert function is called by the readDXF fuction to convert dxf strings into the correct data based - on their type code. readDXF expects a (full path) file name as input. -""" - -# -------------------------------------------------------------------------- -# DXF Reader v0.9 by Ed Blake (AKA Kitsu) -# 2008.05.08 modif.def convert() by Remigiusz Fiedler (AKA migius) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -#from dxfImportObjects import * - -class Object: - """Empty container class for dxf objects""" - - def __init__(self, _type='', block=False): - """_type expects a string value.""" - self.type = _type - self.name = '' - self.data = [] - - def __str__(self): - if self.name: - return self.name - else: - return self.type - - def __repr__(self): - return str(self.data) - - def get_type(self, kind=''): - """Despite the name, this method actually returns all objects of type 'kind' from self.data.""" - if type: - objects = [] - for item in self.data: - if type(item) != list and item.type == kind: - # we want this type of object - objects.append(item) - elif type(item) == list and item[0] == kind: - # we want this type of data - objects.append(item[1]) - return objects - - -class InitializationError(Exception): pass - -class StateMachine: - """(finite) State Machine from the great David Mertz's great Charming Python article.""" - - def __init__(self): - self.handlers = [] - self.startState = None - self.endStates = [] - - def add_state(self, handler, end_state=0): - """All states and handlers are functions which return - a state and a cargo.""" - self.handlers.append(handler) - if end_state: - self.endStates.append(handler) - def set_start(self, handler): - """Sets the starting handler function.""" - self.startState = handler - - - def run(self, cargo=None): - if not self.startState: - raise InitializationError,\ - "must call .set_start() before .run()" - if not self.endStates: - raise InitializationError, \ - "at least one state must be an end_state" - handler = self.startState - while 1: - (newState, cargo) = handler(cargo) - #print cargo - if newState in self.endStates: - return newState(cargo) - #break - elif newState not in self.handlers: - raise RuntimeError, "Invalid target %s" % newState - else: - handler = newState - -def get_name(data): - """Get the name of an object from its object data. - - Returns a pair of (data_item, name) where data_item is the list entry where the name was found - (the data_item can be used to remove the entry from the object data). Be sure to check - name not None before using the returned values! - """ - value = None - for item in data: - if item[0] == 2: - value = item[1] - break - return item, value - -def get_layer(data): - """Expects object data as input. - - Returns (entry, layer_name) where entry is the data item that provided the layer name. - """ - value = None - for item in data: - if item[0] == 8: - value = item[1] - break - return item, value - - -def convert(code, value): - """Convert a string to the correct Python type based on its dxf code. - code types: - ints = 60-79, 170-179, 270-289, 370-389, 400-409, 1060-1070 - longs = 90-99, 420-429, 440-459, 1071 - floats = 10-39, 40-59, 110-139, 140-149, 210-239, 460-469, 1010-1059 - hex = 105, 310-379, 390-399 - strings = 0-9, 100, 102, 300-309, 410-419, 430-439, 470-479, 999, 1000-1009 - """ - if 59 < code < 80 or 169 < code < 180 or 269 < code < 290 or 369 < code < 390 or 399 < code < 410 or 1059 < code < 1071: - value = int(float(value)) - elif 89 < code < 100 or 419 < code < 430 or 439 < code < 460 or code == 1071: - value = long(float(value)) - elif 9 < code < 60 or 109 < code < 150 or 209 < code < 240 or 459 < code < 470 or 1009 < code < 1060: - value = float(value) - elif code == 105 or 309 < code < 380 or 389 < code < 400: - value = int(value, 16) # should be left as string? - else: # it's already a string so do nothing - pass - return value - - -def findObject(infile, kind=''): - """Finds the next occurance of an object.""" - obj = False - while 1: - line = infile.readline() - if not line: # readline returns '' at eof - return False - if not obj: # We're still looking for our object code - if line.lower().strip() == '0': - obj = True # found it - else: # we are in an object definition - if kind: # if we're looking for a particular kind - if line.lower().strip() == kind: - obj = Object(line.lower().strip()) - break - else: # otherwise take anything non-numeric - if line.lower().strip() not in string.digits: - obj = Object(line.lower().strip()) - break - obj = False # whether we found one or not it's time to start over - return obj - -def handleObject(infile): - """Add data to an object until end of object is found.""" - line = infile.readline() - if line.lower().strip() == 'section': - return 'section' # this would be a problem - elif line.lower().strip() == 'endsec': - return 'endsec' # this means we are done with a section - else: # add data to the object until we find a new object - obj = Object(line.lower().strip()) - obj.name = obj.type - done = False - data = [] - while not done: - line = infile.readline() - if not data: - if line.lower().strip() == '0': - #we've found an object, time to return - return obj - else: - # first part is always an int - data.append(int(line.lower().strip())) - else: - data.append(convert(data[0], line.strip())) - obj.data.append(data) - data = [] - -def handleTable(table, infile): - """Special handler for dealing with nested table objects.""" - item, name = get_name(table.data) - if name: # We should always find a name - table.data.remove(item) - table.name = name.lower() - # This next bit is from handleObject - # handleObject should be generalized to work with any section like object - while 1: - obj = handleObject(infile) - if obj.type == 'table': - print "Warning: previous table not closed!" - return table - elif obj.type == 'endtab': - return table # this means we are done with the table - else: # add objects to the table until one of the above is found - table.data.append(obj) - - - - -def handleBlock(block, infile): - """Special handler for dealing with nested table objects.""" - item, name = get_name(block.data) - if name: # We should always find a name - block.data.remove(item) - block.name = name - # This next bit is from handleObject - # handleObject should be generalized to work with any section like object - while 1: - obj = handleObject(infile) - if obj.type == 'block': - print "Warning: previous block not closed!" - return block - elif obj.type == 'endblk': - return block # this means we are done with the table - else: # add objects to the table until one of the above is found - block.data.append(obj) - - - - -"""These are the states/functions used in the State Machine. -states: - start - find first section - start_section - add data, find first object - object - add obj-data, watch for next obj (called directly by start_section) - end_section - look for next section or eof - end - return results -""" - -def start(cargo): - """Expects the infile as cargo, initializes the cargo.""" - #print "Entering start state!" - infile = cargo - drawing = Object('drawing') - section = findObject(infile, 'section') - if section: - return start_section, (infile, drawing, section) - else: - return error, (infile, "Failed to find any sections!") - -def start_section(cargo): - """Expects [infile, drawing, section] as cargo, builds a nested section object.""" - #print "Entering start_section state!" - infile = cargo[0] - drawing = cargo[1] - section = cargo[2] - # read each line, if it is an object declaration go to object mode - # otherwise create a [index, data] pair and add it to the sections data. - done = False - data = [] - while not done: - line = infile.readline() - - if not data: # if we haven't found a dxf code yet - if line.lower().strip() == '0': - # we've found an object - while 1: # no way out unless we find an end section or a new section - obj = handleObject(infile) - if obj == 'section': # shouldn't happen - print "Warning: failed to close previous section!" - return end_section, (infile, drawing) - elif obj == 'endsec': # This section is over, look for the next - drawing.data.append(section) - return end_section, (infile, drawing) - elif obj.type == 'table': # tables are collections of data - obj = handleTable(obj, infile) # we need to find all there contents - section.data.append(obj) # before moving on - elif obj.type == 'block': # the same is true of blocks - obj = handleBlock(obj, infile) # we need to find all there contents - section.data.append(obj) # before moving on - else: # found another sub-object - section.data.append(obj) - else: - data.append(int(line.lower().strip())) - else: # we have our code, now we just need to convert the data and add it to our list. - data.append(convert(data[0], line.strip())) - section.data.append(data) - data = [] -def end_section(cargo): - """Expects (infile, drawing) as cargo, searches for next section.""" - #print "Entering end_section state!" - infile = cargo[0] - drawing = cargo[1] - section = findObject(infile, 'section') - if section: - return start_section, (infile, drawing, section) - else: - return end, (infile, drawing) - -def end(cargo): - """Expects (infile, drawing) as cargo, called when eof has been reached.""" - #print "Entering end state!" - infile = cargo[0] - drawing = cargo[1] - #infile.close() - return drawing - -def error(cargo): - """Expects a (infile, string) as cargo, called when there is an error during processing.""" - #print "Entering error state!" - infile = cargo[0] - err = cargo[1] - infile.close() - print "There has been an error:" - print err - return False - -def readDXF(filename, objectify): - """Given a file name try to read it as a dxf file. - - Output is an object with the following structure - drawing - header - header data - classes - class data - tables - table data - blocks - block data - entities - entity data - objects - object data - where foo data is a list of sub-objects. True object data - is of the form [code, data]. -""" - infile = open(filename) - - sm = StateMachine() - sm.add_state(error, True) - sm.add_state(end, True) - sm.add_state(start_section) - sm.add_state(end_section) - sm.add_state(start) - sm.set_start(start) - try: - drawing = sm.run(infile) - if drawing: - drawing.name = filename - for obj in drawing.data: - item, name = get_name(obj.data) - if name: - obj.data.remove(item) - obj.name = name.lower() - setattr(drawing, name.lower(), obj) - # Call the objectify function to cast - # raw objects into the right types of object - obj.data = objectify(obj.data) - #print obj.name - finally: - infile.close() - return drawing -if __name__ == "__main__": - filename = r".\examples\block-test.dxf" - drawing = readDXF(filename) - for item in drawing.entities.data: - print item diff --git a/release/scripts/bpymodules/mesh_gradient.py b/release/scripts/bpymodules/mesh_gradient.py deleted file mode 100644 index e582a30152b..00000000000 --- a/release/scripts/bpymodules/mesh_gradient.py +++ /dev/null @@ -1,229 +0,0 @@ -# This is not to be used directly, vertexGradientPick can be used externaly - -import Blender -import BPyMesh -import BPyWindow - -mouseViewRay= BPyWindow.mouseViewRay -from Blender import Mathutils, Window, Scene, Draw, sys -from Blender.Mathutils import Vector, Intersect, LineIntersect, AngleBetweenVecs -LMB= Window.MButs['L'] - -def mouseup(): - # Loop until click - mouse_buttons = Window.GetMouseButtons() - while not mouse_buttons & LMB: - sys.sleep(10) - mouse_buttons = Window.GetMouseButtons() - while mouse_buttons & LMB: - sys.sleep(10) - mouse_buttons = Window.GetMouseButtons() - -def mousedown_wait(): - # If the menu has just been pressed dont use its mousedown, - mouse_buttons = Window.GetMouseButtons() - while mouse_buttons & LMB: - mouse_buttons = Window.GetMouseButtons() - -eps= 0.0001 -def vertexGradientPick(ob, MODE): - #MODE 0 == VWEIGHT, 1 == VCOL - - me= ob.getData(mesh=1) - if not me.faceUV: me.faceUV= True - - Window.DrawProgressBar (0.0, '') - - mousedown_wait() - - if MODE==0: - act_group= me.activeGroup - if act_group == None: - mousedown_wait() - Draw.PupMenu('Error, mesh has no active group.') - return - - # Loop until click - Window.DrawProgressBar (0.25, 'Click to set gradient start') - mouseup() - - obmat= ob.matrixWorld - screen_x, screen_y = Window.GetMouseCoords() - mouseInView, OriginA, DirectionA = mouseViewRay(screen_x, screen_y, obmat) - if not mouseInView or not OriginA: - return - - # get the mouse weight - - if MODE==0: - pickValA= BPyMesh.pickMeshGroupWeight(me, act_group, OriginA, DirectionA) - if MODE==1: - pickValA= BPyMesh.pickMeshGroupVCol(me, OriginA, DirectionA) - - Window.DrawProgressBar (0.75, 'Click to set gradient end') - mouseup() - - TOALPHA= Window.GetKeyQualifiers() & Window.Qual.SHIFT - - screen_x, screen_y = Window.GetMouseCoords() - mouseInView, OriginB, DirectionB = mouseViewRay(screen_x, screen_y, obmat) - if not mouseInView or not OriginB: - return - - if not TOALPHA: # Only get a second opaque value if we are not blending to alpha - if MODE==0: pickValB= BPyMesh.pickMeshGroupWeight(me, act_group, OriginB, DirectionB) - else: - pickValB= BPyMesh.pickMeshGroupVCol(me, OriginB, DirectionB) - else: - if MODE==0: pickValB= 0.0 - else: pickValB= [0.0, 0.0, 0.0] # Dummy value - - # Neither points touched a face - if pickValA == pickValB == None: - return - - # clicking on 1 non face is fine. just set the weight to 0.0 - if pickValA==None: - pickValA= 0.0 - - # swap A/B - OriginA, OriginB= OriginB, OriginA - DirectionA, DirectionB= DirectionB, DirectionA - pickValA, pickValB= pickValA, pickValB - - TOALPHA= True - - if pickValB==None: - pickValB= 0.0 - TOALPHA= True - - # set up 2 lines so we can measure their distances and calc the gradient - - # make a line 90d to the grad in screenspace. - if (OriginA-OriginB).length <= eps: # Persp view. same origin different direction - cross_grad= DirectionA.cross(DirectionB) - ORTHO= False - - else: # Ortho - Same direction, different origin - cross_grad= DirectionA.cross(OriginA-OriginB) - ORTHO= True - - cross_grad.normalize() - cross_grad= cross_grad * 100 - - lineA= (OriginA, OriginA+(DirectionA*100)) - lineB= (OriginB, OriginB+(DirectionB*100)) - - if not ORTHO: - line_angle= AngleBetweenVecs(lineA[1], lineB[1])/2 - line_mid= (lineA[1]+lineB[1])*0.5 - - VSEL= [False] * (len(me.verts)) - - # Get the selected faces and apply the selection to the verts. - for f in me.faces: - if f.sel: - for v in f.v: - VSEL[v.index]= True - groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) - - - - def grad_weight_from_co(v): - ''' - Takes a vert and retuens its gradient radio between A and B - ''' - - if not VSEL[v.index]: # Not bart of a selected face? - return None, None - - v_co= v.co - # make a line 90d to the 2 lines the user clicked. - vert_line= (v_co - cross_grad, v_co + cross_grad) - - xA= LineIntersect(vert_line[0], vert_line[1], lineA[0], lineA[1]) - xB= LineIntersect(vert_line[0], vert_line[1], lineB[0], lineB[1]) - - if not xA or not xB: # Should never happen but support it anyhow - return None, None - - wA= (xA[0]-xA[1]).length - wB= (xB[0]-xB[1]).length - - wTot= wA+wB - if not wTot: # lines are on the same point. - return None, None - - ''' - Get the length of the line between both intersections on the - 2x view lines. - if the dist between lineA+VertLine and lineB+VertLine is - greater then the lenth between lineA and lineB intersection points, it means - that the verts are not inbetween the 2 lines. - ''' - lineAB_length= (xA[1]-xB[1]).length - - # normalzie - wA= wA/wTot - wB= wB/wTot - - if ORTHO: # Con only use line length method with parelelle lines - if wTot > lineAB_length+eps: - # vert is outside the range on 1 side. see what side of the grad - if wA>wB: wA, wB= 1.0, 0.0 - else: wA, wB= 0.0, 1.0 - else: - # PERSP, lineA[0] is the same origin as lineB[0] - - # Either xA[0] or xB[0] can be used instead of a possible x_mid between the 2 - # as long as the point is inbetween lineA and lineB it dosent matter. - a= AngleBetweenVecs(lineA[0]-xA[0], line_mid) - if a>line_angle: - # vert is outside the range on 1 side. see what side of the grad - if wA>wB: wA, wB= 1.0, 0.0 - else: wA, wB= 0.0, 1.0 - - return wA, wB - - - grad_weights= [grad_weight_from_co(v) for v in me.verts] - - - if MODE==0: - for v in me.verts: - i= v.index - if VSEL[i]: - wA, wB = grad_weights[i] - if wA != None: # and wB - if TOALPHA: - # Do alpha by using the exiting weight for - try: pickValB= vWeightDict[i][act_group] - except: pickValB= 0.0 # The weights not there? assume zero - # Mix2 2 opaque weights - vWeightDict[i][act_group]= pickValB*wA + pickValA*wB - - else: # MODE==1 VCol - for f in me.faces: - if f.sel: - f_v= f.v - for i in xrange(len(f_v)): - v= f_v[i] - wA, wB = grad_weights[v.index] - - c= f.col[i] - - if TOALPHA: - pickValB= c.r, c.g, c.b - - c.r = int(pickValB[0]*wA + pickValA[0]*wB) - c.g = int(pickValB[1]*wA + pickValA[1]*wB) - c.b = int(pickValB[2]*wA + pickValA[2]*wB) - - - - - # Copy weights back to the mesh. - BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) - Window.DrawProgressBar (1.0, '') - - diff --git a/release/scripts/bpymodules/meshtools.py b/release/scripts/bpymodules/meshtools.py deleted file mode 100644 index 274a12ea6da..00000000000 --- a/release/scripts/bpymodules/meshtools.py +++ /dev/null @@ -1,355 +0,0 @@ -# $Id$ -# -# +---------------------------------------------------------+ -# | Copyright (c) 2001 Anthony D'Agostino | -# | http://www.redrival.com/scorpius | -# | scorpius@netzero.com | -# | September 28, 2002 | -# +---------------------------------------------------------+ -# | Common Functions & Global Variables For All IO Modules | -# +---------------------------------------------------------+ - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - -import Blender -import sys - -show_progress = 1 # Set to 0 for faster performance -average_vcols = 1 # Off for per-face, On for per-vertex -overwrite_mesh_name = 0 # Set to 0 to increment object-name version - -blender_version = Blender.Get('version') -blender_version_str = `blender_version`[0] + '.' + `blender_version`[1:] - -try: - import operator -except: - msg = "Error: you need a full Python install to run this script." - meshtools.print_boxed(msg) - Blender.Draw.PupMenu("ERROR%t|"+msg) - -# ================================= -# === Append Faces To Face List === -# ================================= -def append_faces(mesh, faces, facesuv, uvcoords): - for i in xrange(len(faces)): - if not i%100 and show_progress: Blender.Window.DrawProgressBar(float(i)/len(faces), "Generating Faces") - numfaceverts=len(faces[i]) - if numfaceverts == 2: #This is not a face is an edge - if mesh.edges == None: #first run - mesh.addEdgeData() - #rev_face = revert(cur_face) - i1 = faces[i][0] - i2 = faces[i][1] - ee = mesh.addEdge(mesh.verts[i1],mesh.verts[i2]) - ee.flag |= Blender.NMesh.EdgeFlags.EDGEDRAW - ee.flag |= Blender.NMesh.EdgeFlags.EDGERENDER - elif numfaceverts in [3,4]: # This face is a triangle or quad - face = Blender.NMesh.Face() - for j in xrange(numfaceverts): - index = faces[i][j] - face.v.append(mesh.verts[index]) - if len(uvcoords) > 1: - uvidx = facesuv[i][j] - face.uv.append(uvcoords[uvidx]) - face.mode = 0 - face.col = [Blender.NMesh.Col()]*4 - mesh.faces.append(face) - else: # Triangulate n-sided convex polygon. - a, b, c = 0, 1, 2 # Indices of first triangle. - for j in xrange(numfaceverts-2): # Number of triangles in polygon. - face = Blender.NMesh.Face() - face.v.append(mesh.verts[faces[i][a]]) - face.v.append(mesh.verts[faces[i][b]]) - face.v.append(mesh.verts[faces[i][c]]) - b = c; c += 1 - mesh.faces.append(face) - #face.smooth = 1 - -# =================================== -# === Append Verts to Vertex List === -# =================================== -def append_verts(mesh, verts, normals): - #print "Number of normals:", len(normals) - #print "Number of verts :", len(verts) - for i in xrange(len(verts)): - if not i%100 and show_progress: Blender.Window.DrawProgressBar(float(i)/len(verts), "Generating Verts") - x, y, z = verts[i] - mesh.verts.append(Blender.NMesh.Vert(x, y, z)) - if normals: - mesh.verts[i].no[0] = normals[i][0] - mesh.verts[i].no[1] = normals[i][1] - mesh.verts[i].no[2] = normals[i][2] - -# =========================== -# === Create Blender Mesh === -# =========================== -def create_mesh(verts, faces, objname, facesuv=[], uvcoords=[], normals=[]): - if normals: normal_flag = 0 - else: normal_flag = 1 - mesh = Blender.NMesh.GetRaw() - append_verts(mesh, verts, normals) - append_faces(mesh, faces, facesuv, uvcoords) - if not overwrite_mesh_name: - objname = versioned_name(objname) - ob= Blender.NMesh.PutRaw(mesh, objname, normal_flag) # Name the Mesh - ob.name= objname # Name the Object - Blender.Redraw() - -# ============================== -# === Increment Name Version === -# ============================== -def versioned_name(objname): - existing_names = [] - for object in Blender.Object.Get(): - existing_names.append(object.name) - existing_names.append(object.getData(name_only=1)) - if objname in existing_names: # don't over-write other names - try: - name, ext = objname.split('.') - except ValueError: - name, ext = objname, '' - try: - num = int(ext) - root = name - except ValueError: - root = objname - for i in xrange(1, 1000): - objname = "%s.%03d" % (root, i) - if objname not in existing_names: - break - return objname - -# =========================== -# === Print Text In A Box === -# =========================== -def print_boxed(text): - lines = text.splitlines() - maxlinelen = max(map(len, lines)) - if sys.platform[:3] == "win": - print chr(218)+chr(196) + chr(196)*maxlinelen + chr(196)+chr(191) - for line in lines: - print chr(179) + ' ' + line.ljust(maxlinelen) + ' ' + chr(179) - print chr(192)+chr(196) + chr(196)*maxlinelen + chr(196)+chr(217) - else: - print '+-' + '-'*maxlinelen + '-+' - for line in lines: print '| ' + line.ljust(maxlinelen) + ' |' - print '+-' + '-'*maxlinelen + '-+' - print '\a\r', # beep when done - -# =============================================== -# === Get euler angles from a rotation matrix === -# =============================================== -def mat2euler(mat): - angle_y = -math.asin(mat[0][2]) - c = math.cos(angle_y) - if math.fabs(c) > 0.005: - angle_x = math.atan2(mat[1][2]/c, mat[2][2]/c) - angle_z = math.atan2(mat[0][1]/c, mat[0][0]/c) - else: - angle_x = 0.0 - angle_z = -math.atan2(mat[1][0], mat[1][1]) - return (angle_x, angle_y, angle_z) - -# ========================== -# === Transpose A Matrix === -# ========================== -def transpose(A): - S = len(A) - T = len(A[0]) - B = [[None]*S for i in xrange(T)] - for i in xrange(T): - for j in xrange(S): - B[i][j] = A[j][i] - return B - -# ======================= -# === Apply Transform === -# ======================= -def apply_transform(vertex, matrix): - x, y, z = vertex - xloc, yloc, zloc = matrix[3][0], matrix[3][1], matrix[3][2] - xcomponent = x*matrix[0][0] + y*matrix[1][0] + z*matrix[2][0] + xloc - ycomponent = x*matrix[0][1] + y*matrix[1][1] + z*matrix[2][1] + yloc - zcomponent = x*matrix[0][2] + y*matrix[1][2] + z*matrix[2][2] + zloc - vertex = [xcomponent, ycomponent, zcomponent] - return vertex - -# ========================= -# === Has Vertex Colors === -# ========================= -def has_vertex_colors(mesh): - # My replacement/workaround for hasVertexColours() - # The docs say: - # "Warning: If a mesh has both vertex colours and textured faces, - # this function will return False. This is due to the way Blender - # deals internally with the vertex colours array (if there are - # textured faces, it is copied to the textured face structure and - # the original array is freed/deleted)." - try: - return mesh.faces[0].col[0] - except: - return 0 - -# =========================== -# === Generate Edge Table === -# =========================== -def generate_edgetable(mesh): - edge_table = {} - numfaces = len(mesh.faces) - - for i in xrange(numfaces): - if not i%100 and show_progress: - Blender.Window.DrawProgressBar(float(i)/numfaces, "Generating Edge Table") - if len(mesh.faces[i].v) == 4: # Process Quadrilaterals - generate_entry_from_quad(mesh, i, edge_table) - elif len(mesh.faces[i].v) == 3: # Process Triangles - generate_entry_from_tri(mesh, i, edge_table) - else: # Skip This Face - print "Face #", i, "was skipped." - - # === Sort Edge_Table Keys & Add Edge Indices === - i = 0 - keys = edge_table.keys() - keys.sort() - for key in keys: - edge_table[key][6] = i - i += 1 - - # === Replace Tuples With Indices === - for key in keys: - for i in [2,3,4,5]: - if edge_table.has_key(edge_table[key][i]): - edge_table[key][i] = edge_table[edge_table[key][i]][6] - else: - keyrev = (edge_table[key][i][1], edge_table[key][i][0]) - edge_table[key][i] = edge_table[keyrev][6] - - return edge_table - -# ================================ -# === Generate Entry From Quad === -# ================================ -def generate_entry_from_quad(mesh, i, edge_table): - vertex4, vertex3, vertex2, vertex1 = mesh.faces[i].v - - if has_vertex_colors(mesh): - vcolor4, vcolor3, vcolor2, vcolor1 = mesh.faces[i].col - Acol = (vcolor1.r/255.0, vcolor1.g/255.0, vcolor1.b/255.0) - Bcol = (vcolor2.r/255.0, vcolor2.g/255.0, vcolor2.b/255.0) - Ccol = (vcolor3.r/255.0, vcolor3.g/255.0, vcolor3.b/255.0) - Dcol = (vcolor4.r/255.0, vcolor4.g/255.0, vcolor4.b/255.0) - - # === verts are upper case, edges are lower case === - A, B, C, D = vertex1.index, vertex2.index, vertex3.index, vertex4.index - a, b, c, d = (A, B), (B, C), (C, D), (D, A) - - if edge_table.has_key((B, A)): - edge_table[(B, A)][1] = i - edge_table[(B, A)][4] = d - edge_table[(B, A)][5] = b - if has_vertex_colors(mesh): edge_table[(B, A)][8] = Bcol - else: - if has_vertex_colors(mesh): - edge_table[(A, B)] = [i, None, d, b, None, None, None, Bcol, None] - else: - edge_table[(A, B)] = [i, None, d, b, None, None, None] - - if edge_table.has_key((C, B)): - edge_table[(C, B)][1] = i - edge_table[(C, B)][4] = a - edge_table[(C, B)][5] = c - if has_vertex_colors(mesh): edge_table[(C, B)][8] = Ccol - else: - if has_vertex_colors(mesh): - edge_table[(B, C)] = [i, None, a, c, None, None, None, Ccol, None] - else: - edge_table[(B, C)] = [i, None, a, c, None, None, None] - - if edge_table.has_key((D, C)): - edge_table[(D, C)][1] = i - edge_table[(D, C)][4] = b - edge_table[(D, C)][5] = d - if has_vertex_colors(mesh): edge_table[(D, C)][8] = Dcol - else: - if has_vertex_colors(mesh): - edge_table[(C, D)] = [i, None, b, d, None, None, None, Dcol, None] - else: - edge_table[(C, D)] = [i, None, b, d, None, None, None] - - if edge_table.has_key((A, D)): - edge_table[(A, D)][1] = i - edge_table[(A, D)][4] = c - edge_table[(A, D)][5] = a - if has_vertex_colors(mesh): edge_table[(A, D)][8] = Acol - else: - if has_vertex_colors(mesh): - edge_table[(D, A)] = [i, None, c, a, None, None, None, Acol, None] - else: - edge_table[(D, A)] = [i, None, c, a, None, None, None] - -# ==================================== -# === Generate Entry From Triangle === -# ==================================== -def generate_entry_from_tri(mesh, i, edge_table): - vertex3, vertex2, vertex1 = mesh.faces[i].v - - if has_vertex_colors(mesh): - vcolor3, vcolor2, vcolor1, _vcolor4_ = mesh.faces[i].col - Acol = (vcolor1.r/255.0, vcolor1.g/255.0, vcolor1.b/255.0) - Bcol = (vcolor2.r/255.0, vcolor2.g/255.0, vcolor2.b/255.0) - Ccol = (vcolor3.r/255.0, vcolor3.g/255.0, vcolor3.b/255.0) - - # === verts are upper case, edges are lower case === - A, B, C = vertex1.index, vertex2.index, vertex3.index - a, b, c = (A, B), (B, C), (C, A) - - if edge_table.has_key((B, A)): - edge_table[(B, A)][1] = i - edge_table[(B, A)][4] = c - edge_table[(B, A)][5] = b - if has_vertex_colors(mesh): edge_table[(B, A)][8] = Bcol - else: - if has_vertex_colors(mesh): - edge_table[(A, B)] = [i, None, c, b, None, None, None, Bcol, None] - else: - edge_table[(A, B)] = [i, None, c, b, None, None, None] - - if edge_table.has_key((C, B)): - edge_table[(C, B)][1] = i - edge_table[(C, B)][4] = a - edge_table[(C, B)][5] = c - if has_vertex_colors(mesh): edge_table[(C, B)][8] = Ccol - else: - if has_vertex_colors(mesh): - edge_table[(B, C)] = [i, None, a, c, None, None, None, Ccol, None] - else: - edge_table[(B, C)] = [i, None, a, c, None, None, None] - - if edge_table.has_key((A, C)): - edge_table[(A, C)][1] = i - edge_table[(A, C)][4] = b - edge_table[(A, C)][5] = a - if has_vertex_colors(mesh): edge_table[(A, C)][8] = Acol - else: - if has_vertex_colors(mesh): - edge_table[(C, A)] = [i, None, b, a, None, None, None, Acol, None] - else: - edge_table[(C, A)] = [i, None, b, a, None, None, None] - diff --git a/release/scripts/bpymodules/paths_ai2obj.py b/release/scripts/bpymodules/paths_ai2obj.py deleted file mode 100644 index 6eb5023a8d4..00000000000 --- a/release/scripts/bpymodules/paths_ai2obj.py +++ /dev/null @@ -1,506 +0,0 @@ -# -*- coding: latin-1 -*- -""" -paths_ai2obj.py -# --------------------------------------------------------------- -Copyright (c) jm soler juillet/novembre 2004-april 2007, -# --------------------------------------------------------------- - released under GNU Licence - for the Blender 2.45 Python Scripts Bundle. -Ce programme est libre, vous pouvez le redistribuer et/ou -le modifier selon les termes de la Licence Publique Générale GNU -publiée par la Free Software Foundation (version 2 ou bien toute -autre version ultérieure choisie par vous). - -Ce programme est distribué car potentiellement utile, mais SANS -AUCUNE GARANTIE, ni explicite ni implicite, y compris les garanties -de commercialisation ou d'adaptation dans un but spécifique. -Reportez-vous à la Licence Publique Générale GNU pour plus de détails. - -Vous devez avoir reçu une copie de la Licence Publique Générale GNU -en même temps que ce programme ; si ce n'est pas le cas, écrivez à la -Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, -MA 02111-1307, États-Unis. - - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# --------------------------------------------------------------- -#---------------------------------------------- -# -# Page officielle : -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_import_ai_en.htm -# Communiquer les problemes et erreurs sur: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -#---------------------------------------------- - -#Changelog -#---------------------------------------------- -# 0.1.1 : 2004/08/03, bug in boundingbox reading when Value are negative -# 0.1.2 : 2005/06/12, gmove tranformation properties -# 0.1.3 : 2005/06/25, added a __name__ test to use the script alone -# 0.1.4 : 2005/06/25, closepath improvements -# 0.1.5 : 2005/06/25, ... -# 0.1.6 : 2005/06/26, warning for compacted file - compatibility increased up to AI 10.0 plain text -# 0.1.7 : 2005/06/25, two more closepath improvements -# -# 0.1.8 : 2006/07/03, two more closepath improvements -# 0.1.9 : 2007/05/06, modif on the method that gets the last object on - the list data -# 2008/03/12, Added character encoding line so french text -# does not break python interpreters. - -""" -SHARP_IMPORT=0 -SCALE=1 -NOTHING_TODO=1 -AI_VERSION='' - -GSTACK = [] -GSCALE = [] -GTRANSLATE = [] - -import sys -#oldpath=sys.path -import Blender -BLversion=Blender.Get('version') - -try: - import nt - os=nt - os.sep='\\' - -except: - import posix - os=posix - os.sep='/' - -def isdir(path): - try: - st = os.stat(path) - return 1 - except: - return 0 - -def split(pathname): - if pathname.find(os.sep)!=-1: - k0=pathname.split(os.sep) - else: - if os.sep=='/': - k0=pathname.split('\\') - else: - k0=pathname.split('/') - - directory=pathname.replace(k0[len(k0)-1],'') - Name=k0[len(k0)-1] - return directory, Name - -def join(l0,l1): - return l0+os.sep+l1 - -os.isdir=isdir -os.split=split -os.join=join - -def filtreFICHIER(nom): - f=open(nom,'rU') - t=f.readlines() - f.close() - - if len(t)>1 and t[0].find('EPSF')==-1: - return t - else: - name = "OK?%t| Not a valid file or an empty file ... " # if no %xN int is set, indices start from 1 - result = Blender.Draw.PupMenu(name) - - return 'false' - -#=============================== -# Data -#=============================== -#=============================== -# Blender Curve Data -#=============================== -objBEZIER=0 -objSURFACE=5 -typBEZIER3D=1 #3D -typBEZIER2D=9 #2D - -class Bez: - def __init__(self): - self.co=[] - self.ha=[0,0] - self.tag='' - -class ITEM: - def __init__(self): - self.type = typBEZIER3D, - self.pntsUV = [0,0] - self.resolUV = [32,0] - self.orderUV = [0,0] - self.flagUV = [0,0] - self.Origine = [0.0,0.0] - self.beziers_knot = [] - -class COURBE: - def __init__(self): - self.magic_number='3DG3' - self.type = objBEZIER - self.number_of_items = 0 - self.ext1_ext2 = [0,0] - self.matrix = """0.0 0.0 1.0 0.0 -0.0 1.0 0.0 0.0 -0.0 0.0 1.0 0.0 -0.0 0.0 0.0 1.0 """ - self.ITEM = {} - -courbes=COURBE() - -PATTERN={} - -BOUNDINGBOX={'rec':[],'coef':1.0} -npat=0 -#===================================================================== -#======== name of the curve in teh courbes dictionnary =============== -#===================================================================== -n0=0 - -#===================================================================== -#====================== current Point ================================ -#===================================================================== -CP=[0.0,0.0] #currentPoint - - -# modifs 12/06/2005 -#===================================================================== -#====================== current transform ============================ -#===================================================================== -class transform: - def __init__(self,matrix=[1,0,01],x=0.0,y=0.0): - self.matrix=matrix[:] - self.xy=[x,y] - -def G_move(l,a): - global GSCALE, GTRANSLATE, GSTACK - #print GSCALE, GTRANSLATE, GSTACK - return str((float(l)+GTRANSLATE[a]+GSTACK[-1].xy[a])*GSCALE[a]) -# modifs 12/06/2005 - - -#===================================================================== -#===== to compare last position to the original move to displacement = -#===== needed for cyclic efinition ================================= -#===================================================================== -def test_egalitedespositions(f1,f2): - if f1[0]==f2[0] and f1[1]==f2[1]: - return Blender.TRUE - else: - return Blender.FALSE - - -def Open_GEOfile(dir,nom): - if BLversion>=233: - in_editmode = Blender.Window.EditMode() - if in_editmode: Blender.Window.EditMode(0) - Blender.Load(dir+nom+'OOO.obj', 1) - BO=Blender.Scene.GetCurrent().objects.active - BO.RotY=0.0 - BO.RotX=1.57 - BO.makeDisplayList() - Blender.Window.RedrawAll() - else: - print "Not yet implemented" - -def create_GEOtext(courbes): - global SCALE, B, BOUNDINGBOX - r=BOUNDINGBOX['rec'] - - if SCALE==1: - SCALE=1.0 - elif SCALE==2: - SCALE=r[2]-r[0] - elif SCALE==3: - SCALE=r[3]-r[1] - - t=[] - t.append(courbes.magic_number+'\n') - t.append(str(courbes.type)+'\n') - t.append(str(courbes.number_of_items)+'\n') - t.append(str(courbes.ext1_ext2[0])+' '+str(courbes.ext1_ext2[1])+'\n') - t.append(courbes.matrix+'\n') - - for k in courbes.ITEM.keys(): - if len(courbes.ITEM[k].beziers_knot)>1 : - t.append("%s\n"%courbes.ITEM[k].type) - t.append("%s %s \n"%(courbes.ITEM[k].pntsUV[0],courbes.ITEM[k].pntsUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].resolUV[0],courbes.ITEM[k].resolUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].orderUV[0],courbes.ITEM[k].orderUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].flagUV[0],courbes.ITEM[k].flagUV[1])) - - flag =courbes.ITEM[k].flagUV[0] - - for k2 in range(len(courbes.ITEM[k].beziers_knot)): - #print k2 - k1 =courbes.ITEM[k].beziers_knot[k2] - t.append("%4f 0.0 %4f \n"%(float(k1.co[2])/SCALE,float(k1.co[3])/SCALE)) - t.append("%4f 0.0 %4f \n"%(float(k1.co[4])/SCALE,float(k1.co[5])/SCALE)) - t.append("%4f 0.0 %4f \n"%(float(k1.co[0])/SCALE,float(k1.co[1])/SCALE)) - - t.append(str(k1.ha[0])+' '+str(k1.ha[1])+'\n') - return t - -def save_GEOfile(dir,nom,t): - f=open(dir+nom+'OOO.obj','w') - f.writelines(t) - f.close() - #warning = "REMINDER : %t | Do not forget to rename your blender file NOW ! %x1" - #result = Blender.Draw.PupMenu(warning) - - -#===================================================================== -#===== AI format : DEBUT ========================= -#===================================================================== -def mouvement_vers(l,n0,CP): - if n0 in courbes.ITEM.keys(): - n0+=1 - - CP=[l[-3].replace('d',''),l[-2]] - courbes.ITEM[n0]=ITEM() - courbes.ITEM[n0].Origine=[l[-3].replace('d',''),l[-2]] - - B=Bez() - B.co=[CP[0],CP[1],CP[0],CP[1],CP[0],CP[1]] - B.ha=[0,0] - B.tag=l[-1] - - courbes.ITEM[n0].beziers_knot.append(B) - - return courbes,n0,CP - -def courbe_vers_c(l,l2, n0,CP): #c,C - - B=Bez() - B.co=[l[4],l[5],l[2],l[3],l[4],l[5]] - B.tag=l[-1] - B.ha=[0,0] - - BP=courbes.ITEM[n0].beziers_knot[-1] - - BP.co[0]=l[0] - BP.co[1]=l[1] - - courbes.ITEM[n0].beziers_knot.append(B) - - CP=[B.co[4],B.co[5]] - return courbes,n0,CP - - -def courbe_vers_v(l,n0,CP): #v-V - - B=Bez() - B.tag=l[-1] - B.co=[l[2],l[3],l[0],l[1],l[2],l[3]] - B.ha=[0,0] - - courbes.ITEM[n0].beziers_knot.append(B) - - CP=[B.co[4],B.co[5]] - return courbes,n0,CP - -def courbe_vers_y(l,n0,CP): #y - B=Bez() - B.tag=l[-1] - B.co=[l[2],l[3],l[2],l[3],l[2],l[3]] - B.ha=[0,0] - - BP=courbes.ITEM[n0].beziers_knot[-1] - BP.co[0]=l[0] - BP.co[1]=l[1] - - courbes.ITEM[n0].beziers_knot.append(B) - CP=[B.co[4],B.co[5]] - return courbes,n0,CP - - -def ligne_tracee_l(l,n0,CP): - B=Bez() - B.tag=l[-1] - B.co=[l[0],l[1],l[0],l[1],l[0],l[1]] - B.ha=[0,0] - - BP=courbes.ITEM[n0].beziers_knot[-1] - - courbes.ITEM[n0].beziers_knot.append(B) - CP=[B.co[4],B.co[5]] - return courbes,n0,CP - -def ligne_fermee(l,n0,CP): - courbes.ITEM[n0].flagUV[0]=1 - - if len(courbes.ITEM[n0].beziers_knot)>1: - BP=courbes.ITEM[n0].beziers_knot[-1] - BP0=courbes.ITEM[n0].beziers_knot[0] - - if BP.tag not in ['l','L']: - BP.co[0]=BP0.co[0] #4-5 point prec - BP.co[1]=BP0.co[1] - - del courbes.ITEM[n0].beziers_knot[0] - return courbes,n0,CP - -def passe(l,n0,CP): - return courbes,n0,CP - -Actions= { "C" : courbe_vers_c, - "c" : courbe_vers_c, - "V" : courbe_vers_v, - "v" : courbe_vers_v, - "Y" : courbe_vers_y, - "y" : courbe_vers_y, - "m" : mouvement_vers, - "l" : ligne_tracee_l, - "L" : ligne_tracee_l, - "F" : passe, - "f" : ligne_fermee, - "B" : passe, - "b" : ligne_fermee, - "S" : passe, - "s" : ligne_fermee, - "N" : ligne_fermee, - "n" : passe, - } - -TAGcourbe=Actions.keys() - -def pik_pattern(t,l): - global npat, PATTERN, BOUNDINGBOX, AI_VERSION - while t[l].find('%%EndSetup')!=0: - if t[l].find('%%Creator: Adobe Illustrator(R)')!=-1: - print t[l] - AI_VERSION=t[l].split()[-1] - print AI_VERSION - - if t[l].find('%%BoundingBox:')!=-1: - t[l]=t[l][t[l].find(':')+1:] - l0=t[l].split() - BOUNDINGBOX['rec']=[float(l0[-4]),float(l0[-3]),float(l0[-2]),float(l0[-1])] - r=BOUNDINGBOX['rec'] - BOUNDINGBOX['coef']=(r[3]-r[1])/(r[2]-r[0]) - #print l, - if t[l].find('BeginPattern')!=-1: - nomPattern=t[l][t[l].find('(')+1:t[l].find(')')] - PATTERN[nomPattern]={} - - if t[l].find('BeginPatternLayer')!=-1: - npat+=1 - PATTERN[nomPattern][npat]=[] - while t[l].find('EndPatternLayer')==-1: - #print t[l] - PATTERN[nomPattern][npat].append(l) - l+=1 - if l+10: - if len(PATTERN.keys() )>0: - #print len(PATTERN.keys() ) - warning = "Pattern list (for info not used): %t| " - p0=1 - for P in PATTERN.keys(): - warning+="%s %%x%s|"%(P,p0) - p0+=1 - Padd = Blender.Draw.PupMenu(warning) - - t=create_GEOtext(courbes) - save_GEOfile(dir,name[0],t) - - # 0.1.8 --------------------------------- - # [O.select(0) for O in Blender.Scene.getCurrent().getChildren()] - # 0.1.8 --------------------------------- - - Open_GEOfile(dir,name[0]) - - # 0.1.8 --------------------------------- - Blender.Object.Get()[-1].setName(name[0]) - # 0.1.8 --------------------------------- - - else: - pass -#===================================================================== -#====================== AI format mouvements ========================= -#===================================================================== -#========================================================= -# une sorte de contournement qui permet d'utiliser la fonction -# et de documenter les variables Window.FileSelector -#========================================================= -def fonctionSELECT(nom): - global NOTHING_TODO,AI_VERSION - scan_FILE(nom) - if NOTHING_TODO==1: - warning = "AI %s compatible file "%AI_VERSION+" but nothing to do ? %t| Perhaps a compacted file ... " - NOTHING = Blender.Draw.PupMenu(warning) - -if __name__=="__main__": - Blender.Window.FileSelector (fonctionSELECT, 'SELECT AI FILE') -#sys.path=oldpath diff --git a/release/scripts/bpymodules/paths_eps2obj.py b/release/scripts/bpymodules/paths_eps2obj.py deleted file mode 100644 index e1643c3bf40..00000000000 --- a/release/scripts/bpymodules/paths_eps2obj.py +++ /dev/null @@ -1,452 +0,0 @@ -#---------------------------------------------- -# (c) jm soler juillet 2004-juin 2005 , released under Blender Artistic Licence -# for the Blender 2.34-2.37 Python Scripts Bundle. -# -# last update: 06/05/2007 -#---------------------------------------------- -# Page officielle : -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_import_eps.htm -# Communiquer les problemes et erreurs sur: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -#---------------------------------------------- -SHARP_IMPORT = 0 -SCALE = 1.0 -scale = 1 - -import sys -#oldpath=sys.path - -import Blender -from Blender import Draw -BLversion=Blender.Get('version') - -try: - import nt - os=nt - os.sep='\\' - -except: - import posix - os=posix - os.sep='/' - -def isdir(path): - try: - st = os.stat(path) - return 1 - except: - return 0 - -def split(pathname): - if pathname.find(os.sep)!=-1: - k0=pathname.split(os.sep) - else: - if os.sep=='/': - k0=pathname.split('\\') - else: - k0=pathname.split('/') - - directory=pathname.replace(k0[len(k0)-1],'') - Name=k0[len(k0)-1] - return directory, Name - -def join(l0,l1): - return l0+os.sep+l1 - -os.isdir=isdir -os.split=split -os.join=join - -def filtreFICHIER(nom): - f=open(nom,'rU') - t=f.readlines() - f.close() - if len(t)==1 and t[0].find('\r'): - t=t[0].split('\r') - if len(t)>1 and t[0].find('PS-Adobe-3.0')==-1 and t[0].find('EPSF')==-1: - return t - else: - name = "OK?%t| Not a valid file or an empty file or... %x1| not a pure PS-Adobe-2.0 file %x2 " - result = Blender.Draw.PupMenu(name) - return 'false' - -#=============================== -# Data -#=============================== -#=============================== -# Blender Curve Data -#=============================== -objBEZIER=0 -objSURFACE=5 -typBEZIER3D=1 #3D -typBEZIER2D=9 #2D - -class Bez: - def __init__(self): - self.co=[] - self.ha=[0,0] - -class ITEM: - def __init__(self): - self.type = typBEZIER3D, - self.pntsUV = [0,0] - self.resolUV = [32,0] - self.orderUV = [0,0] - self.flagUV = [0,0] - self.Origine = [0.0,0.0] - self.beziers_knot = [] - -class COURBE: - def __init__(self): - self.magic_number='3DG3' - self.type = objBEZIER - self.number_of_items = 0 - self.ext1_ext2 = [0,0] - self.matrix = """0.0 0.0 1.0 0.0 -0.0 1.0 0.0 0.0 -0.0 0.0 1.0 0.0 -0.0 0.0 0.0 1.0 """ #- right-handed object matrix. Used to determine position, rotation and size - self.ITEM = {} - -courbes=COURBE() -PATTERN={} -BOUNDINGBOX={'rec':[],'coef':1.0} -npat=0 -#===================================================================== -#======== name of the curve in teh courbes dictionnary =============== -#===================================================================== -n0=0 - -#===================================================================== -#====================== current Point ================================ -#===================================================================== -CP=[0.0,0.0] #currentPoint - -# modifs 12/06/2005 -#===================================================================== -#====================== current transform ============================ -#===================================================================== -class transform: - def __init__(self,matrix=[1,0,01],x=0.0,y=0.0): - self.matrix=matrix[:] - self.xy=[x,y] - -GSTACK = [] -stack=transform() -GSTACK.append(stack) - -GSCALE = [1.0,1.0] -GTRANSLATE = [0.0,0.0] - -def G_move(l,a): - global GSCALE, GTRANSLATE, GSTACK - #print GSCALE, GTRANSLATE, GSTACK - return str((float(l)+GTRANSLATE[a]+GSTACK[-1].xy[a])*GSCALE[a]) -# modifs 12/06/2005 - -#===================================================================== -#===== to compare last position to the original move to displacement = -#===== needed for cyclic efinition ================================= -#===================================================================== -def test_egalitedespositions(f1,f2): - if f1[0]==f2[0] and f1[1]==f2[1]: - return Blender.TRUE - else: - return Blender.FALSE - - -def Open_GEOfile(dir,nom): - global SCALE,BOUNDINGBOX, scale - if BLversion>=233: - Blender.Load(dir+nom+'OOO.obj', 1) - BO=Blender.Scene.GetCurrent().objects.active - BO.RotY=3.1416 - BO.RotZ=3.1416 - BO.RotX=3.1416/2.0 - if scale==1: - BO.LocY+=BOUNDINGBOX['rec'][3] - else: - BO.LocY+=BOUNDINGBOX['rec'][3]/SCALE - - BO.makeDisplayList() - Blender.Window.RedrawAll() - else: - print "Not yet implemented" - -def create_GEOtext(courbes): - global SCALE, B, BOUNDINGBOX,scale - r=BOUNDINGBOX['rec'] - - if scale==1: - SCALE=1.0 - elif scale==2: - SCALE=r[2]-r[0] - elif scale==3: - SCALE=r[3]-r[1] - - t=[] - t.append(courbes.magic_number+'\n') - t.append(str(courbes.type)+'\n') - t.append(str(courbes.number_of_items)+'\n') - t.append(str(courbes.ext1_ext2[0])+' '+str(courbes.ext1_ext2[1])+'\n') - t.append(courbes.matrix+'\n') - - for k in courbes.ITEM.keys(): - t.append("%s\n"%courbes.ITEM[k].type) - t.append("%s %s \n"%(courbes.ITEM[k].pntsUV[0],courbes.ITEM[k].pntsUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].resolUV[0],courbes.ITEM[k].resolUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].orderUV[0],courbes.ITEM[k].orderUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].flagUV[0],courbes.ITEM[k].flagUV[1])) - - flag =courbes.ITEM[k].flagUV[0] - - for k2 in range(flag,len(courbes.ITEM[k].beziers_knot)): - k1 =courbes.ITEM[k].beziers_knot[k2] - t.append("%4f 0.0 %4f \n"%(float(k1.co[0])/SCALE,float(k1.co[1])/SCALE)) - t.append("%4f 0.0 %4f \n"%(float(k1.co[2])/SCALE,float(k1.co[3])/SCALE)) - t.append("%4f 0.0 %4f \n"%(float(k1.co[4])/SCALE,float(k1.co[5])/SCALE)) - t.append(str(k1.ha[0])+' '+str(k1.ha[1])+'\n') - return t - -def save_GEOfile(dir,nom,t): - f=open(dir+nom+'OOO.obj','w') - f.writelines(t) - f.close() - - #name = "REMINDER : %t | Do not forget to rename your blender file NOW ! %x1" - #result = Blender.Draw.PupMenu(name) - - -#===================================================================== -#===== EPS format : DEBUT ========================= -#===================================================================== -def mouvement_vers(l,n0,CP): - if n0 in courbes.ITEM.keys(): - #if test_egalitedespositions(courbes.ITEM[n0].Origine,CP): - # courbes.ITEM[n0].flagUV[0]=1 - n0+=1 - CP=[l[-3].replace('d',''),l[-2]] - else: - CP=[l[-3].replace('d',''),l[-2]] - #i= - courbes.ITEM[n0]=ITEM() - courbes.ITEM[n0].Origine=[l[-3].replace('d',''),l[-2]] - - B=Bez() - B.co=[G_move(CP[0],0), - G_move(CP[1],1), - G_move(CP[0],0), - G_move(CP[1],1), - G_move(CP[0],0), - G_move(CP[1],1)] - - B.ha=[0,0] - courbes.ITEM[n0].beziers_knot.append(B) - - return courbes,n0,CP - -def rmouvement_vers(l,n0,CP): - if n0 in courbes.ITEM.keys(): - #if test_egalitedespositions(courbes.ITEM[n0].Origine,CP): - # courbes.ITEM[n0].flagUV[0]=1 - n0+=1 - CP=["%4f"%(float(l[-3])+float(CP[0])),"%4f"%(float(l[-2])+float(CP[1]))] - else: - CP=["%4f"%(float(l[-3])+float(CP[0])),"%4f"%(float(l[-2])+float(CP[1]))] - #i= - courbes.ITEM[n0]=ITEM() - courbes.ITEM[n0].Origine=[l[-3].replace('d',''),l[-2]] - - B=Bez() - B.co=[CP[0],CP[1],CP[0],CP[1],CP[0],CP[1]] - B.ha=[0,0] - courbes.ITEM[n0].beziers_knot.append(B) - return courbes,n0,CP - -def courbe_vers_c(l, l2, n0,CP): #c,C - """ - B=Bez() - B.co=[l[0],l[1],l[2],l[3],l[4],l[5]] - B.ha=[0,0] - - courbes.ITEM[n0].beziers_knot.append(B) - """ - B=Bez() - B.co=[G_move(l[2],0), - G_move(l[3],1), - G_move(l[4],0), - G_move(l[5],1), - G_move(l[0],0), - G_move(l[1],1)] - if len(courbes.ITEM[n0].beziers_knot)==1: - CP=[l[0],l[1]] - courbes.ITEM[n0].Origine=[l[0],l[1]] - if l[-1]=='C': - B.ha=[2,2] - else: - B.ha=[0,0] - courbes.ITEM[n0].beziers_knot.append(B) - if len(l2)>1 and l2[-1] in Actions.keys(): - B.co[-2]=G_move(l2[0],0) - B.co[-1]=G_move(l2[1],1) - else: - B.co[-2]=G_move(CP[0],0) - B.co[-1]=G_move(CP[1],1) - return courbes,n0,CP - -def ligne_tracee_l(l,n0,CP): - B=Bez() - B.co=[G_move(l[0],0), - G_move(l[1],1), - G_move(l[0],0), - G_move(l[1],1), - G_move(l[0],0), - G_move(l[1],1)] - B.ha=[0,0] - courbes.ITEM[n0].beziers_knot.append(B) - CP=[l[0],l[1]] - return courbes,n0,CP - -def rligne_tracee_l(l,n0,CP): - B=Bez() - B.co=["%4f"%(float(l[0])+float(CP[0])), - "%4f"%(float(l[1])+float(CP[1])), - "%4f"%(float(l[0])+float(CP[0])), - "%4f"%(float(l[1])+float(CP[1])), - "%4f"%(float(l[0])+float(CP[0])), - "%4f"%(float(l[1])+float(CP[1]))] - B.ha=[0,0] - courbes.ITEM[n0].beziers_knot.append(B) - CP=[l[0],l[1]] - return courbes,n0,CP - -Actions= { "curveto" : courbe_vers_c, - "curveto" : courbe_vers_c, - "moveto" : mouvement_vers, - "rmoveto" : mouvement_vers, - "lineto" : ligne_tracee_l, - "rlineto" : rligne_tracee_l -} - -TAGcourbe=Actions.keys() - -""" -def pik_pattern(t,l): - global npat, PATTERN, BOUNDINGBOX - while t[l].find('%%EndSetup')!=0: - if t[l].find('%%BoundingBox:')!=-1: - l0=t[l].split() - BOUNDINGBOX['rec']=[float(l0[-4]),float(l0[-3]),float(l0[-2]),float(l0[-1])] - r=BOUNDINGBOX['rec'] - BOUNDINGBOX['coef']=(r[3]-r[1])/(r[2]-r[0]) - print l, - if t[l].find('BeginPatternLayer')!=-1: - npat+=1 - PATTERN[npat]=[] - while t[l].find('EndPatternLayer')==-1: - print t[l] - PATTERN[npat].append(l) - l+=1 - if l+10: - if len(PATTERN.keys() )>0: - #print len(PATTERN.keys() ) - pass - t=create_GEOtext(courbes) - save_GEOfile(dir,name[0],t) - Open_GEOfile(dir,name[0]) - - # 03 juillet 2006 ---------------------- - Blender.Object.Get()[-1].setName(name[0]) - # 03 juillet 2006 ---------------------- - - else: - pass - - -#===================================================================== -#====================== EPS format mouvements ========================= -#===================================================================== -#========================================================= -# une sorte de contournement qui permet d'utiliser la fonction -# et de documenter les variables Window.FileSelector -#========================================================= -def fonctionSELECT(nom): - scan_FILE(nom) - -if __name__=="__main__": - Blender.Window.FileSelector (fonctionSELECT, 'SELECT EPS FILE') - - diff --git a/release/scripts/bpymodules/paths_gimp2obj.py b/release/scripts/bpymodules/paths_gimp2obj.py deleted file mode 100644 index c2ce9718c71..00000000000 --- a/release/scripts/bpymodules/paths_gimp2obj.py +++ /dev/null @@ -1,363 +0,0 @@ -# -*- coding: latin-1 -*- -""" -#---------------------------------------------- -# (c) jm soler juillet 2004, -#---------------------------------------------- - released under GNU Licence - for the Blender 2.45 Python Scripts Bundle. -Ce programme est libre, vous pouvez le redistribuer et/ou -le modifier selon les termes de la Licence Publique Générale GNU -publiée par la Free Software Foundation (version 2 ou bien toute -autre version ultérieure choisie par vous). - -Ce programme est distribué car potentiellement utile, mais SANS -AUCUNE GARANTIE, ni explicite ni implicite, y compris les garanties -de commercialisation ou d'adaptation dans un but spécifique. -Reportez-vous à la Licence Publique Générale GNU pour plus de détails. - -Vous devez avoir reçu une copie de la Licence Publique Générale GNU -en même temps que ce programme ; si ce n'est pas le cas, écrivez à la -Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, -MA 02111-1307, États-Unis. - - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -""" - -# --------------------------------------------------------------- -# last update : 07/05/2007 -#---------------------------------------------- -# Page officielle : -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_import_gimp.htm -# Communiquer les problemes et erreurs sur: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -# Modification History: -# 2008-03-12 Added character encoding line so french text does not break -# python interpreters. -#--------------------------------------------- - -SHARP_IMPORT=0 -SCALE=1 - -import sys -#oldpath=sys.path - -import Blender -BLversion=Blender.Get('version') - -try: - import nt - os=nt - os.sep='\\' - -except: - import posix - os=posix - os.sep='/' - -def isdir(path): - try: - st = os.stat(path) - return 1 - except: - return 0 - -def split(pathname): - if pathname.find(os.sep)!=-1: - k0=pathname.split(os.sep) - else: - if os.sep=='/': - k0=pathname.split('\\') - else: - k0=pathname.split('/') - - directory=pathname.replace(k0[len(k0)-1],'') - Name=k0[len(k0)-1] - return directory, Name - -def join(l0,l1): - return l0+os.sep+l1 - -os.isdir=isdir -os.split=split -os.join=join - -def filtreFICHIER(nom): - f=open(nom,'r') - t=f.readlines() - f.close() - if len(t)==1 and t[0].find('\r'): - t=t[0].split('\r') - if len(t)>1 and t[1].find('#POINTS:')==0: - return t - else: - warning = "OK?%t| Not a valid file or an empty file ... " # if no %xN int is set, indices start from 1 - result = Blender.Draw.PupMenu(warning) - return "false" - -#=============================== -# Data -#=============================== -#=============================== -# Blender Curve Data -#=============================== -objBEZIER=0 -objSURFACE=5 -typBEZIER3D=1 #3D -typBEZIER2D=9 #2D - -class Bez: - def __init__(self): - self.co=[] - self.ha=[0,0] - - def echo(self): - #print 'co = ', self.co - #print 'ha = ', self.ha - pass - -class ITEM: - def __init__(self): - self.type = typBEZIER3D, - self.pntsUV = [0,0] - self.resolUV = [32,0] - self.orderUV = [0,0] - self.flagUV = [0,0] - self.Origine = [0.0,0.0] - self.beziers_knot = [] - -class COURBE: - def __init__(self): - self.magic_number='3DG3' - self.type = objBEZIER - self.number_of_items = 0 - self.ext1_ext2 = [0,0] - self.matrix = """0.0 0.0 1.0 0.0 -0.0 1.0 0.0 0.0 -0.0 0.0 1.0 0.0 -0.0 0.0 0.0 1.0 """ #- right-handed object matrix. Used to determine position, rotation and size - self.ITEM = {} - -courbes=COURBE() -PATTERN={} -BOUNDINGBOX={'rec':[],'coef':1.0} -npat=0 -#===================================================================== -#======== name of the curve in the courbes dictionnary =============== -#===================================================================== -n0=0 - -#===================================================================== -#====================== current Point ================================ -#===================================================================== -CP=[0.0,0.0] #currentPoint - -def MINMAX(b): - global BOUNDINGBOX - r=BOUNDINGBOX['rec'] - for m in range(0,len(b)-2,2): - #print m, m+1 , len(b)-1 - #print b[m], r, r[0] - if float(b[m])r[2]: r[2]=float(b[m]) - - if float(b[m+1])r[3]: r[3]=float(b[m+1]) - -#===================================================================== -#===== to compare last position to the original move to displacement = -#===== needed for cyclic efinition ================================= -#===================================================================== -def test_egalitedespositions(f1,f2): - if f1[0]==f2[0] and f1[1]==f2[1]: - return Blender.TRUE - else: - return Blender.FALSE - - -def Open_GEOfile(dir,nom): - if BLversion>=233: - Blender.Load(dir+nom+'OOO.obj', 1) - BO=Blender.Scene.GetCurrent().objects.active - BO.LocZ=1.0 - BO.makeDisplayList() - Blender.Window.RedrawAll() - else: - print "Not yet implemented" - -def create_GEOtext(courbes): - global SCALE, B, BOUNDINGBOX - r=BOUNDINGBOX['rec'] - if SCALE==1: - SCALE=1.0 - elif SCALE==2: - SCALE=r[2]-r[0] - elif SCALE==3: - SCALE=r[3]-r[1] - - t=[] - t.append(courbes.magic_number+'\n') - t.append(str(courbes.type)+'\n') - t.append(str(courbes.number_of_items)+'\n') - t.append(str(courbes.ext1_ext2[0])+' '+str(courbes.ext1_ext2[1])+'\n') - t.append(courbes.matrix+'\n') - - for k in courbes.ITEM.keys(): - - t.append("%s\n"%courbes.ITEM[k].type) - - t.append("%s %s \n"%(courbes.ITEM[k].pntsUV[0],courbes.ITEM[k].pntsUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].resolUV[0],courbes.ITEM[k].resolUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].orderUV[0],courbes.ITEM[k].orderUV[1])) - t.append("%s %s \n"%(courbes.ITEM[k].flagUV[0],courbes.ITEM[k].flagUV[1])) - - flag =0#courbes.ITEM[k].flagUV[0] - - for k2 in range(flag,len(courbes.ITEM[k].beziers_knot)): - k1 =courbes.ITEM[k].beziers_knot[k2] - t.append("%4f 0.0 %4f \n"%(float(k1.co[0])/SCALE,float(k1.co[1])/SCALE)) - t.append("%4f 0.0 %4f \n"%(float(k1.co[4])/SCALE,float(k1.co[5])/SCALE)) - t.append("%4f 0.0 %4f \n"%(float(k1.co[2])/SCALE,float(k1.co[3])/SCALE)) - t.append(str(k1.ha[0])+' '+str(k1.ha[1])+'\n') - return t - -def save_GEOfile(dir,nom,t): - f=open(dir+nom+'OOO.obj','w') - f.writelines(t) - f.close() - #warning = "REMINDER : %t | Do not forget to rename your blender file NOW ! %x1" - #result = Blender.Draw.PupMenu(warning) - - -#===================================================================== -#===== GIMP format : DEBUT ========================= -#===================================================================== -CLOSED=0 - -def mouvement_vers(l,l1,l2,n0): - global BOUNDINGBOX, CP - if l[1] == '3' : - n0+=1 - courbes.ITEM[n0]=ITEM() - courbes.ITEM[n0].Origine=[l[-3],l[-1],] - courbes.ITEM[n0-1].beziers_knot[0].co[0]=CP[0] - courbes.ITEM[n0-1].beziers_knot[0].co[1]=CP[1] - CP=[l2[-3], l2[-1]] - - elif l[1]=='1' and (n0 not in courbes.ITEM.keys()): - courbes.ITEM[n0]=ITEM() - courbes.ITEM[n0].Origine=[l[-3],l[-1],] - CP=[l2[-3], l2[-1]] - - B=Bez() - B.co=[ CP[0],CP[1], - l1[-3], l1[-1], - l[-3], l[-1]] - - CP=[l2[-3], l2[-1]] - - if BOUNDINGBOX['rec']==[]: - BOUNDINGBOX['rec']=[float(l2[-3]), float(l2[-1]), float(l[-3]), float(l[-1])] - B.ha=[0,0] - - """ - if len( courbes.ITEM[n0].beziers_knot)>=1: - courbes.ITEM[n0].beziers_knot[-1].co[2]=l1[-3] - courbes.ITEM[n0].beziers_knot[-1].co[3]=l1[-1] - """ - - MINMAX(B.co) - courbes.ITEM[n0].beziers_knot.append(B) - return courbes,n0 - -Actions= { "1" : mouvement_vers, - "3" : mouvement_vers } - -TAGcourbe=Actions.keys() - -def scan_FILE(nom): - global CP, courbes, SCALE, MAX, MIN, CLOSED - dir,name=split(nom) - name=name.split('.') - #print name - n0=0 - result=0 - t=filtreFICHIER(nom) - if t!="false": - if not SHARP_IMPORT: - warning = "Select Size : %t| As is %x1 | Scale on Height %x2| Scale on Width %x3" - SCALE = Blender.Draw.PupMenu(warning) - npat=0 - l=0 - while l 0: - t=create_GEOtext(courbes) - save_GEOfile(dir,name[0],t) - Open_GEOfile(dir,name[0]) - # 0.1.8 --------------------------------- - Blender.Object.Get()[-1].setName(name[0]) - # 0.1.8 --------------------------------- - - else: - pass - -#===================================================================== -#====================== GIMP Path format mouvements ========================= -#===================================================================== -#========================================================= -# une sorte de contournement qui permet d'utiliser la fonction -# et de documenter les variables Window.FileSelector -#========================================================= -def fonctionSELECT(nom): - scan_FILE(nom) - -if __name__=="__main__": - Blender.Window.FileSelector (fonctionSELECT, 'SELECT GIMP FILE') - diff --git a/release/scripts/bpymodules/paths_svg2obj.py b/release/scripts/bpymodules/paths_svg2obj.py deleted file mode 100644 index 6bab6dcbfd8..00000000000 --- a/release/scripts/bpymodules/paths_svg2obj.py +++ /dev/null @@ -1,1651 +0,0 @@ -# -*- coding: latin-1 -*- -""" -SVG 2 OBJ translater, 0.5.9o -Copyright (c) jm soler juillet/novembre 2004-april 2009, -# --------------------------------------------------------------- - released under GNU Licence - for the Blender 2.42 Python Scripts Bundle. -Ce programme est libre, vous pouvez le redistribuer et/ou -le modifier selon les termes de la Licence Publique Générale GNU -publiée par la Free Software Foundation (version 2 ou bien toute -autre version ultérieure choisie par vous). - -Ce programme est distribué car potentiellement utile, mais SANS -AUCUNE GARANTIE, ni explicite ni implicite, y compris les garanties -de commercialisation ou d'adaptation dans un but spécifique. -Reportez-vous à la Licence Publique Générale GNU pour plus de détails. - -Vous devez avoir reçu une copie de la Licence Publique Générale GNU -en même temps que ce programme ; si ce n'est pas le cas, écrivez à la -Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, -MA 02111-1307, États-Unis. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -# --------------------------------------------------------------- -# -#--------------------------------------------------------------------------- -# Page officielle : -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_import_svg.htm -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_import_svg_en.htm -# Communiquer les problemes et erreurs sur: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -#--------------------------------------------------------------------------- - ---Old Concept : translate SVG file in GEO .obj file and try to load it. - was removed for the Blender 2.4x release. - .-- Curiousity : the original matrix must be : - | - | 0.0 0.0 1.0 0.0 - | 0.0 1.0 0.0 0.0 - | 0.0 0.0 1.0 0.0 - | 0.0 0.0 0.0 1.0 - | - | and not: - | 1.0 0.0 0.0 0.0 - | 0.0 1.0 0.0 0.0 - | 0.0 0.0 1.0 0.0 - | 0.0 0.0 0.0 1.0 - | - '-- Possible bug : sometime, the new curves object's RotY value - jumps to -90.0 degrees without any reason. - ---Options : - SHARP_IMPORT = 0 - choise between "As is", "Devide by height" and "Devide by width" - SHARP_IMPORT = 1 - no choise - - - -All commands are managed: - M : absolute move to - Z : close path - L : absolute line to - C : absolute curve to - S : absolute curve to with only one handle - H : absolute horizontal line to - V : absolute vertical line to - - l : relative line to 2004/08/03 - c : relative curve to 2004/08/03 - s : relative curve to with only one handle - h : relative horizontal line to - v : relative vertical line to - - A : curve_to_a, - V : draw_line_v, - H : draw_line_h, - Z : close_z, - Q : curve_to_q, - T : curve_to_t, - a : curve_to_a, - v : draw_line_v, - h : draw_line_h, - z : close_z, - q : curve_to_q, - - transfrom for tag - transform for tag - -The circle, rectangle closed or open polygons lines are managed too. - -Changelog: - 0.1.1 : - control file without extension - 0.2.0 : - improved reading of several data of the same type - following the same command (for gimp import) - 0.2.1 : - better choice for viewboxing ( takes the viewbox if found, - instead of x,y,width and height - 0.2.2 : - read compact path data from Illustrator 10 - 0.2.3 : - read a few new relative displacements - 0.2.4 : - better hash for command followed by a lone data - (h,v) or uncommun number (a) - 0.2.5 : - correction for gimp import - 0.2.6 : - correction for illustrator 10 SVG - 0.2.7 : - correction for inskape 0.40 cvs SVG - 0.2.8 : - correction for inskape plain SVG - 0.3 : - reading of the transform properties added : - translate - 0.3.1 : - compatibility restored with gimp - 0.3.2 : - transform properties added (june, 15-16): - scale, - rotate, - matrix, - skew - - added a test on __name__ to load the script - outside from the blender menu - 0.3.3 : - matrix transform content control - 0.3.4 : - paths data reading rewritten (19/06/05) - 0.3.5 : - test on empty curve (22/06/05) - - removed overlayed points - 0.3.6 : - rewriting of the bezier point contruction to correct - a problem in the connection between L type point and - C or S type point - 0.3.7 : - code correction for bezier knot in Curveto command when - the command close a path - 0.3.8 : - code was aded to manage quadratic bezier, - Q,q command and T,t commands, as a normal blender's bezier point - - The last modications does not work with gimp 2.0 svg export . - corrected too . - 0.3.9 : - Path's A,a command for ellipse's arc . - 0.4.0 : - To speed up the function filter_DATA was removed and text - variables are changed into numeric variables - 0.4.1 : - svg, groups and shapes hierarchy added - - now transform properties are computed using a stack with all - parented groups - - removed or replaced useless functions : - - skewY, skewX transforms - - radians in rotate transform - 0.4.2 : - Added functon to translate others shapes in path - rect, line, polyline, polygon - 0.4.3 : - various corrections - text font (id property exported by Adobe Illustrator are between coma) - function to code s tag has been rewritten - 0.4.4 : - various corrections - to oblige the script to understand a line feed just after - a tag . Rarely encountered problem, but it exits in a svg file - format exported by a outliner script for mesh . - 0.4.5 : - update for CVS only, at least blender 2.38 and upper - no BezTriple module in older version - added a createCURVES function to avoid to use - the OBJ format export/import . - Perhaps problems with cyclic curves . If a closed curve - does not appear closed in blender, enter edit mode select - all knot with Akey, do a Hkey to set handle type (without - this the knot are recalculated) , and finally use the Ckey - to close the curve . - Should work ... not guaranted . - 0.4.6 : - cyclic flag ... - 0.4.7 : - Management of the svgz files . the complete python or the - gzip.py file is needed . - Little improvement of the curve drawing using the createCURVES - function - 0.4.8 : - short modif for a fantasy font case in the OOo svg format - ('viewbox' is written 'viewBox', for instance) . - Note that (at this time, 2006/05/01, 1OOo exports in svg - but does not read its own export - 0.4.9 : - skipped version : private test - 0.5.0 : - the script worked perfectly with Blender 2.41 but in Blender - 2.42, use the original svg name file + 'OOO.obj' to - write a videoscape file made blender crash under window XP when - the script loaded it . Curiously, use a more simple - name with a sole 'O' solved this problem . - - script returned errors on open path : corrected - - in b2.42, several successive imports seem to be added to - the same original curve . So now the script automaticaly - renames the last group of imported curve with the original - name file . - 0.5.1 : - without join option in the internal curve creation function - 0.5.2 : - the createCURVES() function has been cleanded . Now it works - fine but all bezier curves are joined in the same curve object . - 0.5.3 : - removed two things : - 1/ the ajustement function to increase speed . 35 % faster : - 5690 curves and 30254 points in 11 seconds . User should do - a ctrl-a on the object . - 2/ the import method menu . No reason to choose between the - old extern curve creat and the new intern curve creation - this last one is largely faster . - 0.5.4 : - translation of the functions' name + improvment in the dict lookup . - Quite 15% faster . 9.75 seconds instead of 11 to load the file test . - A test was also added to find the fill style so now the script closes - these curves even if they are not defined as closed in the strict path - commands . - The old non used functions have been completely removed . - 0.5.5 : - Modifs for architect users . - 0.5.6 : - Exec was removed from the collect_ATTRIBUTS function . - Other uses was evaluated. - 0.5.7 : - Wash down of some handle problems. - - 0.5.8 : - 2007/3/9 - Wash down of the last exec and correction of a - problem with the curve's first beztriple handle - which was not recorded at first time . - - Added some units managements - - Correction of the rotate matrix - - Correction of the skew matrix - - change in the wash_DATA function suggested by cambo - - added __slot__ in class Bez, ITEM and CURVE suggested by cambo - - remove unused properties in class ITEM and CURVE - - 0.5.9 : - 2007/3/28 - - many improvements for faster and clearer code suggested by cambo and martin. - replacement of "%s" statement by str function. - - correction of an error in the scale transform management - - correction in the management of the stack transformation that rise an error - under python 2.5 but curiously not with python 2.4 - - 0.5.9a : - 2007/3/29 - - Again a lot of minors corrections - - Backward to 0.5.8 of the function that manages float numbers exported - by the Adobe Illustrator's SVG. After a lot of tests it seems that this oldest - version is also faster too . - - correction (bad) on handle management with V and H commands. - - 0.5.9b : - 2007/3/31 - - one or two minor corrections - - now the new object curve is added in the current layer. - - short modif in the scale menu... - - 0.5.9d : - 2007/4/5 - - when a svg file containts several curves they can be imported in - separate object. - - managment of paths' name when paths are imported as separate curves. - - a menu was added to select between separate or joined curves - - management of colors - - 0.5.9e : - 2007/4/7 - - corrected a scale problem that only appears when one uses beveldepth - - in separate curve option, name is also given to the curve data - - added the list of svg's color names (147) and modified the color's method - to work with. - - 0.5.9h : - 2007/5/2 - - script was updated with the modifs by cambo - - removed all debug statements - - correction of a zero division error in the calc_arc function. - - 0.5.9f: - 2007/15/7 - - Correction de plusieurs bugs sur l'attributions des couleurs et le nommage - des courbes - - 0.5.9i : - ??/??/?? - - Patch externe réalisé sur blender.org project. - - 0.5.9j : - 08/11/2008 - 0.5.9k : - 14/01/2009 - 0.5.9l : - 31/01/2009 - 0.5.9n : - 01/02/2009 - 0.5.9o : - 04/04/2009, remove pattern if it made with path. - - -================================================================================== -==================================================================================""" -SHARP_IMPORT=0 -SCALE=1 -scale_=1 -DEBUG = 0 -DEVELOPPEMENT=0 -TESTCOLOR=0 - -LAST_ID='' -LAST_COLOR=[0.0,0.0,0.0,0.0] -SEPARATE_CURVES=0 -USE_COLORS=0 -PATTERN=0 - -SVGCOLORNAMELIST={ 'aliceblue':[240, 248, 255] ,'antiquewhite':[250, 235, 215] -,'aqua':[ 0, 255, 255], 'aquamarine':[127, 255, 212] -,'azure':[240, 255, 255], 'beige':[245, 245, 220] -,'bisque':[255, 228, 196], 'black':[ 0, 0, 0] -,'blanchedalmond':[255, 235, 205] ,'blue':[ 0, 0, 255] -,'blueviolet':[138, 43, 226],'brown':[165, 42, 42] -,'burlywood':[222, 184, 135],'cadetblue':[ 95, 158, 160] -,'chartreuse':[127, 255, 0] ,'chocolate':[210, 105, 30] -,'coral':[255, 127, 80],'cornflowerblue':[100, 149, 237] -,'cornsilk':[255, 248, 220],'crimson':[220, 20, 60] -,'cyan':[ 0, 255, 255],'darkblue':[ 0, 0, 139] -,'darkcyan':[ 0, 139, 139],'darkgoldenrod':[184, 134, 11] -,'darkgray':[169, 169, 169],'darkgreen':[ 0, 100, 0] -,'darkgrey':[169, 169, 169],'darkkhaki':[189, 183, 107] -,'darkmagenta':[139, 0, 139],'darkolivegreen':[ 85, 107, 47] -,'darkorange':[255, 140, 0],'darkorchid':[153, 50, 204] -,'darkred':[139, 0, 0],'darksalmon':[233, 150, 122] -,'darkseagreen':[143, 188, 143],'darkslateblue':[ 72, 61, 139] -,'darkslategray':[ 47, 79, 79],'darkslategrey':[ 47, 79, 79] -,'darkturquoise':[ 0, 206, 209],'darkviolet':[148, 0, 211] -,'deeppink':[255, 20, 147],'deepskyblue':[ 0, 191, 255] -,'dimgray':[105, 105, 105],'dimgrey':[105, 105, 105] -,'dodgerblue':[ 30, 144, 255],'firebrick':[178, 34, 34] -,'floralwhite':[255, 250, 240],'forestgreen':[ 34, 139, 34] -,'fuchsia':[255, 0, 255],'gainsboro':[220, 220, 220] -,'ghostwhite':[248, 248, 255],'gold':[255, 215, 0] -,'goldenrod':[218, 165, 32],'gray':[128, 128, 128] -,'grey':[128, 128, 128],'green':[ 0, 128, 0] -,'greenyellow':[173, 255, 47],'honeydew':[240, 255, 240] -,'hotpink':[255, 105, 180],'indianred':[205, 92, 92] -,'indigo':[ 75, 0, 130],'ivory':[255, 255, 240] -,'khaki':[240, 230, 140],'lavender':[230, 230, 250] -,'lavenderblush':[255, 240, 245],'lawngreen':[124, 252, 0] -,'lemonchiffon':[255, 250, 205],'lightblue':[173, 216, 230] -,'lightcoral':[240, 128, 128],'lightcyan':[224, 255, 255] -,'lightgoldenrodyellow':[250, 250, 210],'lightgray':[211, 211, 211] -,'lightgreen':[144, 238, 144],'lightgrey':[211, 211, 211] -,'lightpink':[255, 182, 193],'lightsalmon':[255, 160, 122] -,'lightseagreen':[ 32, 178, 170],'lightskyblue':[135, 206, 250] -,'lightslategray':[119, 136, 153],'lightslategrey':[119, 136, 153] -,'lightsteelblue':[176, 196, 222],'lightyellow':[255, 255, 224] -,'lime':[ 0, 255, 0],'limegreen':[ 50, 205, 50] -,'linen':[250, 240, 230],'magenta':[255, 0, 255] -,'maroon':[128, 0, 0],'mediumaquamarine':[102, 205, 170] -,'mediumblue':[ 0, 0, 205],'mediumorchid':[186, 85, 211] -,'mediumpurple':[147, 112, 219],'mediumseagreen':[ 60, 179, 113] -,'mediumslateblue':[123, 104, 238],'mediumspringgreen':[ 0, 250, 154] -,'mediumturquoise':[ 72, 209, 204],'mediumvioletred':[199, 21, 133] -,'midnightblue':[ 25, 25, 112],'mintcream':[245, 255, 250] -,'mistyrose':[255, 228, 225],'moccasin':[255, 228, 181] -,'navajowhite':[255, 222, 173],'navy':[ 0, 0, 128] -,'oldlace':[253, 245, 230],'olive':[128, 128, 0] -,'olivedrab':[107, 142, 35],'orange':[255, 165, 0] -,'orangered':[255, 69, 0],'orchid':[218, 112, 214] -,'palegoldenrod':[238, 232, 170],'palegreen':[152, 251, 152] -,'paleturquoise':[175, 238, 238],'palevioletred':[219, 112, 147] -,'papayawhip':[255, 239, 213],'peachpuff':[255, 218, 185] -,'peru':[205, 133, 63],'pink':[255, 192, 203] -,'plum':[221, 160, 221],'powderblue':[176, 224, 230] -,'purple':[128, 0, 128],'red':[255, 0, 0] -,'rosybrown':[188, 143, 143],'royalblue':[ 65, 105, 225] -,'saddlebrown':[139, 69, 19],'salmon':[250, 128, 114] -,'sandybrown':[244, 164, 96],'seagreen':[ 46, 139, 87] -,'seashell':[255, 245, 238],'sienna':[160, 82, 45] -,'silver':[192, 192, 192],'skyblue':[135, 206, 235] -,'slateblue':[106, 90, 205],'slategray':[112, 128, 144] -,'slategrey':[112, 128, 144],'snow':[255, 250, 250] -,'springgreen':[ 0, 255, 127],'steelblue':[ 70, 130, 180] -,'tan':[210, 180, 140],'teal':[ 0, 128, 128] -,'thistle':[216, 191, 216],'tomato':[255, 99, 71] -,'turquoise':[ 64, 224, 208],'violet':[238, 130, 238] -,'wheat':[245, 222, 179],'white':[255, 255, 255] -,'whitesmoke':[245, 245, 245],'yellow':[255, 255, 0] -,'yellowgreen':[154, 205, 50]} - - -import sys -from math import cos,sin,tan, atan2, pi, ceil -PI=pi -import Blender -from Blender import Mathutils - -try: - import nt - os=nt - os.sep='\\' - -except: - import posix - os=posix - os.sep='/' - -def isdir(path): - try: - st = os.stat(path) - return 1 - except: - return 0 - -def split(pathname): - if os.sep in pathname: - k0=pathname.split(os.sep) - else: - if os.sep=='/': - k0=pathname.split('\\') - else: - k0=pathname.split('/') - directory=pathname.replace(k0[len(k0)-1],'') - Name=k0[len(k0)-1] - return directory, Name - -def join(l0,l1): - return l0+os.sep+l1 - -os.isdir=isdir -os.split=split -os.join=join - -def filterFILE(nom): - """ - Function filterFILE - - in : string nom , filename - out : string t , if correct filecontaint - - read the file's content and try to see if the format - is correct . - - Lit le contenu du fichier et en fait une pre-analyse - pour savoir s'il merite d'etre traite . - """ - # ---------- - # 0.4.7 - # ---------- - if nom.upper().endswith('.SVGZ'): - try : - import gzip - tz=gzip.GzipFile(nom) - t=tz.read() - except: - name = "ERROR: fail to import gzip module or gzip error ... " - result = Blender.Draw.PupMenu(name) - return "false" - else: - f=open(nom,'rU') - t=f.read() - f.close() - # ---------- - # 0.4.7 : end - # ---------- - # ----------------- - # pre-format ... - # ----------------- - # -------------------- - # 0.4.4 '\r','' --> '\r',' ' - # '\n','' --> '\n',' ' - #-------------------- - t=t.replace('\r',' ') - t=t.replace('\n',' ') - t=t.replace('svg:','') - #-------------------- - # may be needed in some import case when the - # file is saved from a mozilla display - #-------------------- - t=t.replace(chr(0),'') - if not '= 0.0\ - and abs(f1[5])-abs(f2[5])< EPSILON and abs(f1[5])-abs(f2[5])>= 0.0 : - return 1 - else: - return 0 - - -#-------------------- -# 0.4.5 : for blender cvs 2.38 .... -#-------------------- -def createCURVES(curves, name): - """ - internal curves creation - """ - global SCALE, B, BOUNDINGBOX,scale_, SEPARATE_CURVES - global USE_COLORS - from Blender import Curve, Object, Scene, BezTriple - HANDLE={'C':BezTriple.HandleTypes.FREE,'L':BezTriple.HandleTypes.VECT} - r=BOUNDINGBOX['rec'] - - if scale_==3: - SCALE=1.0 - elif scale_==1: - SCALE=r[2]-r[0] - elif scale_==2: - SCALE=r[3]-r[1] - - scene = Scene.GetCurrent() - scene.objects.selected = [] - - if not SEPARATE_CURVES: - c = Curve.New() - c.setResolu(24) - - MATNAME=[] - nloc=0.0 - - def new_MATERIAL(val): - # ----------------------- - # have to create a material - #------------------------ - if val.matname and val.matname in MATNAME: - mat = Blender.Material.Get(val.matname) - elif val.matname: - mat = Blender.Material.New(val.matname) - mat.rgbCol = [val.color[0]/255.0, val.color[1]/255.0, val.color[2]/255.0] - else: - mat = Blender.Material.New(val.id) - mat.rgbCol = [val.color[0]/255.0, val.color[1]/255.0, val.color[2]/255.0] - return [mat] - - for I,val in curves.ITEM.iteritems(): - if SEPARATE_CURVES: - c = Curve.New() - c.setResolu(24) - if USE_COLORS and val.mat: - c.materials=new_MATERIAL(val) - - bzn=0 - if val.beziers_knot[-1].tag in ['L','l','V','v','H','h'] and\ - test_samelocations(val.beziers_knot[-1].co,val.beziers_knot[0].co): - del val.beziers_knot[-1] - - for k2 in xrange(0,len(val.beziers_knot)): - bz= [co for co in val.beziers_knot[k2].co] - if bzn==0: - cp1 = bz[4]/SCALE, bz[5]/-SCALE,0.0, bz[0]/SCALE, bz[1]/-SCALE,0.0, bz[2]/SCALE,bz[3]/-SCALE,0.0, - beztriple1 = BezTriple.New(cp1) - bez = c.appendNurb(beztriple1) - bez[0].handleTypes=(HANDLE[val.beziers_knot[k2].ha[0]],HANDLE[val.beziers_knot[k2].ha[1]]) - bzn = 1 - else: - cp2 = bz[4]/SCALE,bz[5]/-SCALE,0.0 , bz[0]/SCALE, bz[1]/-SCALE,0.0, bz[2]/SCALE,bz[3]/-SCALE,0.0 - beztriple2 = BezTriple.New(cp2) - beztriple2.handleTypes= (HANDLE[val.beziers_knot[k2].ha[0]],HANDLE[val.beziers_knot[k2].ha[1]]) - bez.append(beztriple2) - - if val.flagUV[0]==1 or val.fill==1: - #-------------------- - # 0.4.6 : cyclic flag ... - #-------------------- - bez.flagU += 1 - - if SEPARATE_CURVES: - ob = scene.objects.new(c,val.id) - scene.objects.active = ob - ob.setLocation(0.0,0.0,nloc) - nloc+=0.0001 - c.update() - - if not SEPARATE_CURVES: - ob = scene.objects.new(c,name) - scene.objects.active = ob - c.update() - -#===================================================================== -#===== SVG format : DEBUT ========================= -#===================================================================== -#-------------------- -# 0.5.8, needed with the new -# tranform evaluation -#-------------------- -pxUNIT={'pt':1.25, - 'pc':15.0, - 'mm':3.543307, - 'cm':35.43307, - 'in':90.0, - 'em':1.0, # should be taken from font size - # but not currently managed - 'ex':1.0, # should be taken from font size - # but not currently managed - '%':1.0, - } - -#-------------------- -# 0.4.2 -# 0.5.8, to remove exec -#-------------------- -def rect(prp): - """ - build rectangle paths - """ - D=[] - if 'x' not in prp: x=0.0 - else : x=float(prp['x']) - if 'y' not in prp: y=0.0 - else : y=float(prp['y']) - #-------------------- - # 0.5.8 - #-------------------- - try: - height=float(prp['height']) - except: - pxUNIT['%']=(BOUNDINGBOX['rec'][3]-BOUNDINGBOX['rec'][1])/100.0 - for key in pxUNIT:#.keys(): - if key in prp['height']: - height=float(prp['height'].replace(key,''))*pxUNIT[key] - try: - width=float(prp['width']) - except: - pxUNIT['%']=(BOUNDINGBOX['rec'][2]-BOUNDINGBOX['rec'][0])/100.0 - for key in pxUNIT:#.keys(): - if key in prp['width']: - width=float(prp['width'].replace(key,''))*pxUNIT[key] - #-------------------- - # 0.5.8, end - #-------------------- - """ - normal rect - x,y - h1 - *----------* - | | - | | - | | - *----------* v1 - h2 - """ - if 'rx' not in prp or 'rx' not in prp: - D=['M',str(x),str(y),'h',str(width),'v',str(height),'h',str(-width),'z'] - else : - rx=float(prp['rx']) - if 'ry' not in prp : - ry=float(prp['rx']) - else : ry=float(prp['ry']) - if 'rx' in prp and prp['rx']<0.0: rx*=-1 - if 'ry' in prp and prp['ry']<0.0: ry*=-1 - """ - rounded corner - - x,y M h1 - ---*----------* - / \ - / \ - v2 * * c1 - | | - | | - | | - c3 * * v2 - \ / - \ / - *----------* - h2 c2 - """ - - D=['M',str(x+rx),str(y), - 'h',str(width-2*rx), - 'c',str(rx),'0.0',str(rx),str(ry),str(rx),str(ry), - 'v',str(height-ry), - 'c','0.0',str(ry),str(-rx),str(ry),str(-rx),str(ry), - 'h',str(-width+2*rx), - 'c',str(-rx),'0.0',str(-rx),str(-ry),str(-rx),str(-ry), - 'v',str(-height+ry), - 'c','0.0','0.0','0.0',str(-ry),str(rx),str(-ry), - 'z'] - - return D - -#-------------------- -# 0.4.2 -# 0.5.8, to remove exec -#-------------------- -def circle(prp): - if 'cx' not in prp: cx=0.0 - else : cx =float(prp['cx']) - if 'cy' not in prp: cy=0.0 - else : cy =float(prp['cy']) - r = float(prp['r']) - D=['M',str(cx),str(cy+r), - 'C',str(cx-r), str(cy+r*0.552),str(cx-0.552*r),str(cy+r), str(cx),str(cy+r), - 'C',str(cx+r*0.552), str(cy+r), str(cx+r), str(cy+r*0.552), str(cx+r),str(cy), - 'C',str(cx+r), str(cy-r*0.552),str(cx+r*0.552),str(cy-r),str(cx), str(cy-r), - 'C',str(cx-r*0.552), str(cy-r), str(cx-r), str(cy-r*0.552),str(cx-r),str(cy), - 'Z'] - return D - -#-------------------- -# 0.4.2 -# 0.5.8, to remove exec -#-------------------- -def ellipse(prp): - if 'cx' not in prp: cx=0.0 - else : cx =float(prp['cx']) - if 'cy' not in prp: cy=0.0 - else : cy =float(prp['cy']) - ry = float(prp['rx']) - rx = float(prp['ry']) - D=['M',str(cx),str(cy+rx), - 'C',str(cx-ry),str(cy+rx*0.552),str(cx-0.552*ry),str(cy+rx),str(cx),str(cy+rx), - 'C',str(cx+ry*0.552),str(cy+rx),str(cx+ry),str(cy+rx*0.552),str(cx+ry),str(cy), - 'C',str(cx+ry),str(cy-rx*0.552),str(cx+ry*0.552),str(cy-rx),str(cx),str(cy-rx), - 'C',str(cx-ry*0.552),str(cy-rx),str(cx-ry),str(cy-rx*0.552),str(cx-ry),str(cy), - 'z'] - return D - -#-------------------- -# 0.4.2 -# 0.5.8, to remove exec -#-------------------- -def line(prp): - D=['M',str(prp['x1']),str(prp['y1']), - 'L',str(prp['x2']),str(prp['y2'])] - return D - -#-------------------- -# 0.4.2 -# 0.5.8, to remove exec -#-------------------- -def polyline(prp): - if 'points' in prp: - points=prp['points'].split(' ') - np=0 - for p in points: - if p!='': - p=p.split(',') - if np==0: - D=['M',str(p[0]),str(p[1])] - np+=1 - else: - D.append('L'); D.append(str(p[0])); D.append(str(p[1])) - return D - else: - return [] - -#-------------------- -# 0.4.2 -# 0.5.8, to remove exec -#-------------------- -def polygon(prp): - D=polyline(prp) - if D!=[]: - D.append('Z') - return D - - -#-------------------- -# 0.5.8, to remove exec -#-------------------- -OTHERSSHAPES={ 'rect' : rect, - 'line' : line, - 'polyline': polyline, - 'polygon' : polygon, - 'circle' : circle, - 'ellipse' : ellipse} - -#-------------------- -# 0.3.9 -#-------------------- -def calc_arc (cpx,cpy, rx, ry, ang, fa , fs , x, y) : - """ - Calc arc paths - """ - rx=abs(rx) - ry=abs(ry) - px=abs((cos(ang)*(cpx-x)+sin(ang)*(cpy-y))*0.5)**2.0 - py=abs((cos(ang)*(cpy-y)-sin(ang)*(cpx-x))*0.5)**2.0 - rpx=rpy=0.0 - if abs(rx)>0.0: rpx=px/(rx**2.0) - if abs(ry)>0.0: rpy=py/(ry**2.0) - pl=rpx+rpy - if pl>1.0: - pl=pl**0.5;rx*=pl;ry*=pl - carx=sarx=cary=sary=0.0 - if abs(rx)>0.0: - carx=cos(ang)/rx;sarx=sin(ang)/rx - if abs(ry)>0.0: - cary=cos(ang)/ry;sary=sin(ang)/ry - x0=(carx)*cpx+(sarx)*cpy - y0=(-sary)*cpx+(cary)*cpy - x1=(carx)*x+(sarx)*y - y1=(-sary)*x+(cary)*y - d=(x1-x0)*(x1-x0)+(y1-y0)*(y1-y0) - if abs(d)>0.0 :sq=1.0/d-0.25 - else: sq=-0.25 - if sq<0.0 :sq=0.0 - sf=sq**0.5 - if fs==fa :sf=-sf - xc=0.5*(x0+x1)-sf*(y1-y0) - yc=0.5*(y0+y1)+sf*(x1-x0) - ang_0=atan2(y0-yc,x0-xc) - ang_1=atan2(y1-yc,x1-xc) - ang_arc=ang_1-ang_0; - if (ang_arc < 0.0 and fs==1) : - ang_arc += 2.0 * PI - elif (ang_arc>0.0 and fs==0) : - ang_arc-=2.0*PI - n_segs=int(ceil(abs(ang_arc*2.0/(PI*0.5+0.001)))) - P=[] - for i in xrange(n_segs): - ang0=ang_0+i*ang_arc/n_segs - ang1=ang_0+(i+1)*ang_arc/n_segs - ang_demi=0.25*(ang1-ang0) - t=2.66666*sin(ang_demi)*sin(ang_demi)/sin(ang_demi*2.0) - x1=xc+cos(ang0)-t*sin(ang0) - y1=yc+sin(ang0)+t*cos(ang0) - x2=xc+cos(ang1) - y2=yc+sin(ang1) - x3=x2+t*sin(ang1) - y3=y2-t*cos(ang1) - P.append([[(cos(ang)*rx)*x1+(-sin(ang)*ry)*y1, - (sin(ang)*rx)*x1+(cos(ang)*ry)*y1], - [(cos(ang)*rx)*x3+(-sin(ang)*ry)*y3, - (sin(ang)*rx)*x3+(cos(ang)*ry)*y3], - [(cos(ang)*rx)*x2+(-sin(ang)*ry)*y2, - (sin(ang)*rx)*x2+(cos(ang)*ry)*y2]]) - return P - -#-------------------- -# 0.3.9 -#-------------------- -def curve_to_a(curves, c,D,n0,CP): #A,a - global SCALE - l=[float(D[c[1]+1]),float(D[c[1]+2]),float(D[c[1]+3]), - int(D[c[1]+4]),int(D[c[1]+5]),float(D[c[1]+6]),float(D[c[1]+7])] - if c[0]=='a': - l[5]=l[5] + CP[0] - l[6]=l[6] + CP[1] - B=Bez() - B.co=[ CP[0], CP[1], CP[0], CP[1], CP[0], CP[1] ] - B.ha=['C','C'] - B.tag=c[0] - POINTS= calc_arc (CP[0],CP[1], - l[0], l[1], l[2]*(PI / 180.0), - l[3], l[4], - l[5], l[6] ) - for p in POINTS : - B=Bez() - B.co=[ p[2][0],p[2][1], p[0][0],p[0][1], p[1][0],p[1][1]] - B.ha=['C','C'] - B.tag='C' - BP=curves.ITEM[n0].beziers_knot[-1] - BP.co[2]=B.co[2] - BP.co[3]=B.co[3] - curves.ITEM[n0].beziers_knot.append(B) - BP=curves.ITEM[n0].beziers_knot[-1] - BP.co[2]=BP.co[0] - BP.co[3]=BP.co[1] - CP=[l[5], l[6]] - #---------- 059m------------ - if len(D)>c[1]+7 and D[c[1]+8] not in TAGcourbe : - c[1]+=7 - curves,n0,CP=curve_to_a(curves, c, D, n0,CP) - #---------- 059m------------ - return curves,n0,CP - -def move_to(curves, c, D, n0,CP, proprietes): - global DEBUG,TAGcourbe, LAST_ID - global USE_COLORS - - l=[float(D[c[1]+1]),float(D[c[1]+2])] - - if c[0]=='m': - l=[l[0]+CP[0], - l[1] + CP[1]] - - if n0 in curves.ITEM: - n0+=1 - CP=[l[0],l[1]] - curves.ITEM[n0]=ITEM() - - if 'id' in proprietes: - curves.ITEM[n0].id=proprietes['id'] - else: - curves.ITEM[n0].id=LAST_ID - - proprietes['n'].append(n0) - if USE_COLORS: - pr= proprietes.get('fill') # None or the property - if pr != None: - if '#' in pr: - i=1 - curves.ITEM[n0].color=[int(pr[i:i+2],16),int(pr[i+2:i+4],16),int(pr[i+4:i+6],16)] - curves.ITEM[n0].mat=1 - elif pr in SVGCOLORNAMELIST: - Courbe[n].color=SVGCOLORNAMELIST[pr] - Courbe[n].mat=1 - - B=Bez() - B.co=[CP[0],CP[1],CP[0],CP[1],CP[0],CP[1]] - B.ha=['L','C'] - B.tag=c[0] - curves.ITEM[n0].beziers_knot.append(B) - return curves,n0,CP - -def close_z(curves, c,D,n0,CP): #Z,z - curves.ITEM[n0].flagUV[0]=1 - if len(curves.ITEM[n0].beziers_knot)>1: - BP=curves.ITEM[n0].beziers_knot[-1] - BP0=curves.ITEM[n0].beziers_knot[0] - if BP.tag in ['c','C','s','S',]: - BP.co[2]=BP0.co[2] #4-5 point prec - BP.co[3]=BP0.co[3] - del curves.ITEM[n0].beziers_knot[0] - else: - del curves.ITEM[n0] - n0-=1 - return curves,n0,CP - -def curve_to_q(curves, c,D,n0,CP): #Q,q - l=[float(D[c[1]+1]),float(D[c[1]+2]),float(D[c[1]+3]),float(D[c[1]+4])] - if c[0]=='q': - l=[l[0]+CP[0], l[1]+CP[1], l[2]+CP[0], l[3]+CP[1]] - B=Bez() - B.co=[l[2], l[3], l[2], l[3], l[0], l[1]] #plus toucher au 2-3 - B.ha=['C','C'] - B.tag=c[0] - BP=curves.ITEM[n0].beziers_knot[-1] - BP.co[2]=BP.co[0] - BP.co[3]=BP.co[1] - curves.ITEM[n0].beziers_knot.append(B) - CP=[l[2],l[3]] - #if DEBUG==1: pass - if len(D)>c[1]+5 and D[c[1]+5] not in TAGcourbe : - c[1]+=4 - curves,n0,CP=curve_to_q(curves, c, D, n0,CP) - return curves,n0,CP - -def curve_to_t(curves, c,D,n0,CP): #T,t - l=[float(D[c[1]+1]),float(D[c[1]+2])] - if c[0]=='t': - l=[l[0]+CP[0], l[1]+CP[1]] - B=Bez() - B.co=[l[0], l[1], l[0], l[1], l[0], l[1]] #plus toucher au 2-3 - B.ha=['C','C'] - B.tag=c[0] - BP=curves.ITEM[n0].beziers_knot[-1] - l0=build_SYMETRIC([BP.co[0],BP.co[1],BP.co[4],BP.co[5]]) - if BP.tag in ['q','Q','t','T','m','M']: - BP.co[2]=l0[2] - BP.co[3]=l0[3] - curves.ITEM[n0].beziers_knot.append(B) - CP=[l[0],l[1]] - if len(D)>c[1]+3 and D[c[1]+3] not in TAGcourbe : - c[1]+=4 - curves,n0,CP=curve_to_t(curves, c, D, n0,CP) - return curves,n0,CP - -#-------------------- -# 0.4.3 : rewritten -#-------------------- -def build_SYMETRIC(l): - X=l[2]-(l[0]-l[2]) - Y=l[3]-(l[1]-l[3]) - return X,Y - -def curve_to_s(curves, c,D,n0,CP): #S,s - l=[float(D[c[1]+1]), - float(D[c[1]+2]), - float(D[c[1]+3]), - float(D[c[1]+4])] - if c[0]=='s': - l=[l[0]+CP[0], l[1]+CP[1], - l[2]+CP[0], l[3]+CP[1]] - B=Bez() - B.co=[l[2],l[3],l[2],l[3],l[0],l[1]] #plus toucher au 2-3 - B.ha=['C','C'] - B.tag=c[0] - BP=curves.ITEM[n0].beziers_knot[-1] - #-------------------- - # 0.4.3 - #-------------------- - BP.co[2],BP.co[3]=build_SYMETRIC([BP.co[4],BP.co[5],BP.co[0],BP.co[1]]) - curves.ITEM[n0].beziers_knot.append(B) - #-------------------- - # 0.4.3 - #-------------------- - CP=[l[2],l[3]] - if len(D)>c[1]+5 and D[c[1]+5] not in TAGcourbe : - c[1]+=4 - curves,n0,CP=curve_to_c(curves, c, D, n0,CP) - return curves,n0,CP - -def curve_to_c(curves, c, D, n0,CP): #c,C - l=[float(D[c[1]+1]),float(D[c[1]+2]),float(D[c[1]+3]), - float(D[c[1]+4]),float(D[c[1]+5]),float(D[c[1]+6])] - if c[0]=='c': - l=[l[0]+CP[0], - l[1]+CP[1], - l[2]+CP[0], - l[3]+CP[1], - l[4]+CP[0], - l[5]+CP[1]] - B=Bez() - B.co=[l[4], - l[5], - l[4], - l[5], - l[2], - l[3]] #plus toucher au 2-3 - - - B.ha=['C','C'] - B.tag=c[0] - BP=curves.ITEM[n0].beziers_knot[-1] - BP.co[2]=l[0] - BP.co[3]=l[1] - BP.ha[1]='C' - curves.ITEM[n0].beziers_knot.append(B) - CP=[l[4],l[5]] - if len(D)>c[1]+7 and D[c[1]+7] not in TAGcourbe : - c[1]+=6 - curves,n0,CP=curve_to_c(curves, c, D, n0,CP) - return curves,n0,CP - -def draw_line_l(curves, c, D, n0,CP): #L,l - - l=[float(D[c[1]+1]),float(D[c[1]+2])] - if c[0]=='l': - l=[l[0]+CP[0], - l[1]+CP[1]] - B=Bez() - B.co=[l[0],l[1], - l[0],l[1], - l[0],l[1]] - - B.ha=['L','L'] - B.tag=c[0] - BP=curves.ITEM[n0].beziers_knot[-1] - BP.ha[1]='L' - - curves.ITEM[n0].beziers_knot.append(B) - CP=[B.co[4],B.co[5]] - - if len(D)>c[1]+3 and D[c[1]+3] not in TAGcourbe : - c[1]+=2 - curves,n0,CP=draw_line_l(curves, c, D, n0,CP) #L - - return curves,n0,CP - -def draw_line_h(curves, c,D,n0,CP): #H,h - if c[0]=='h': - l=[float(D[c[1]+1])+float(CP[0]),CP[1]] - else: - l=[float(D[c[1]+1]),CP[1]] - B=Bez() - B.co=[l[0],l[1],l[0],l[1],l[0],l[1]] - B.ha=['L','L'] - B.tag=c[0] - #BP=curves.ITEM[n0].beziers_knot[-1] - #BP.ha[0]='L' - curves.ITEM[n0].beziers_knot.append(B) - CP=[l[0],l[1]] - return curves,n0,CP - -def draw_line_v(curves, c,D,n0,CP): #V, v - if c[0]=='v': - l=[CP[0], float(D[c[1]+1])+CP[1]] - else: - l=[CP[0], float(D[c[1]+1])] - - B=Bez() - B.co=[l[0],l[1],l[0],l[1],l[0],l[1]] - B.ha=['L','L'] - B.tag=c[0] - #BP=curves.ITEM[n0].beziers_knot[-1] - #BP.ha[0]='L' - curves.ITEM[n0].beziers_knot.append(B) - CP=[l[0],l[1]] - return curves,n0,CP - -Actions= { "C" : curve_to_c, - "A" : curve_to_a, - "S" : curve_to_s, - "M" : move_to, - "V" : draw_line_v, - "L" : draw_line_l, - "H" : draw_line_h, - "Z" : close_z, - "Q" : curve_to_q, - "T" : curve_to_t, - - "c" : curve_to_c, - "a" : curve_to_a, - "s" : curve_to_s, - "m" : move_to, - "v" : draw_line_v, - "l" : draw_line_l, - "h" : draw_line_h, - "z" : close_z, - "q" : curve_to_q, - "T" : curve_to_t -} - -TAGcourbe=Actions.keys() -TAGtransform=['M','L','C','S','H','V','T','Q'] -tagTRANSFORM=0 - -def wash_DATA(ndata): - if ndata: - ndata = ndata.strip() - - if ndata[0]==',':ndata=ndata[1:] - if ndata[-1]==',':ndata=ndata[:-1] - - #-------------------- - # 0.4.0 : 'e' - #-------------------- - ni=0 - i = ndata.find('-',ni) - if i != -1: - while i>-1 : - i = ndata.find('-',ni) - # 059l ------ - if i>0 : - if ndata[i-1] not in [' ',',','e']: - ndata=ndata[:i]+','+ndata[i:] - ni=i+2 - else: - ni=i+1 - elif i>-1: - ni=1 - # 059l ------ - - ndata=ndata.replace(',,',',') - ndata=ndata.replace(' ',',') - ndata=ndata.split(',') - ndata=[i for i in ndata if i] #059a - - return ndata - -#-------------------- -# 0.3.4 : - read data rewrittten -#-------------------- -def list_DATA(DATA): - """ - This function translate a text in a list of - correct commandswith the right number of waited - values for each of them . For example : - d="'M0,14.0 z" becomes ['M','0.0','14.0','z'] - """ - # ---------------------------------------- - # borner les differents segments qui devront etre - # traites - # pour cela construire une liste avec chaque - # position de chaque emplacement tag de type - # commande path... - # ---------------------------------------- - tagplace=[] - for d in Actions: - b1=0 - while True: - i = DATA.find(d,b1) - if i==-1: break - tagplace.append(i) - b1=i+1 - #------------------------------------------ - # cette liste doit etre traites dans l'ordre - # d'apparition des tags - #------------------------------------------ - tagplace.sort() - - tpn=range(len(tagplace)) - - - #-------------------- - # 0.3.5 :: short data, only one tag - #-------------------- - if len(tagplace)-1>0: - DATA2=[] - for t in tpn[:-1]: - DATA2.append(DATA[tagplace[t]:tagplace[t]+1]) - ndata=DATA[tagplace[t]+1:tagplace[t+1]] - - if DATA2[-1] not in ['z','Z'] : - ndata=wash_DATA(ndata) - DATA2.extend(ndata) - - DATA2.append(DATA[tagplace[t+1]:tagplace[t+1]+1]) - - if DATA2[-1] not in ['z','Z'] and len(DATA)-1>=tagplace[t+1]+1: - ndata=DATA[tagplace[t+1]+1:] - ndata=wash_DATA(ndata) - DATA2.extend(ndata) #059a - - else: - #-------------------- - # 0.3.5 : short data,only one tag - #-------------------- - DATA2=[] - DATA2.append(DATA[tagplace[0]:tagplace[0]+1]) - ndata=DATA[tagplace[0]+1:] - ndata=wash_DATA(ndata) - DATA2.extend(ndata) - return DATA2 - -#---------------------------------------------- -# 0.3 -# 0.5.8, to remove exec -#---------------------------------------------- -def translate(t): - tx=t[0] - ty=t[1] - return [1, 0, tx], [0, 1, ty],[0,0,1] - -#---------------------------------------------- -# 0.3.2 -# 0.5.8, to remove exec -#---------------------------------------------- -def scale(s): - sx=s[0] - if len(s)>1: sy=s[1] - else: sy=sx - return [sx, 0, 0], [0, sy, 0],[0,0,1] - -#---------------------------------------------- -# 0.4.1 : transslate a in radians -# 0.5.8, to remove exec -#---------------------------------------------- -def rotate(t): - a=t[0] - return [cos(a*3.1416/180.0), -sin(a*3.1416/180.0), 0], [sin(a*3.1416/180.0), cos(a*3.1416/180.0),0],[0,0,1] - -#---------------------------------------------- -# 0.3.2 -# 0.5.8, to remove exec -#---------------------------------------------- -def skewx(t): - a=t[0] - return [1, tan(a*3.1416/180.0), 0], [0, 1, 0],[0,0,1] - -#---------------------------------------------- -# 0.4.1 -# 0.5.8, to remove exec -#---------------------------------------------- -def skewy(t): - a=t[0] - return [1, 0, 0], [tan(a*3.1416/180.0), 1 , 0],[0,0,1] - -#---------------------------------------------- -# 0.3.2 -# 0.5.8, to remove exec -#---------------------------------------------- -def matrix(t): - a,b,c,d,e,f=t - return [a,c,e],[b,d,f],[0,0,1] - -#-------------------- -# 0.5.8, to remove exec -#-------------------- -matrixTRANSFORM={ 'translate':translate, - 'scale':scale, - 'rotate':rotate, - 'skewx':skewx, - 'skewy':skewy, - 'matrix':matrix - } - -#---------------------------------------------- -# 0.4.2 : rewritten -# 0.5.8 : to remove exec uses. -#---------------------------------------------- -def control_CONTAINT(txt): - """ - the transforms' descriptions can be sole or several - and separators might be forgotten - """ - t0=0 - tlist=[] - while txt.count(')',t0)>0: - t1=txt.find(')',t0) - nt0=txt[t0:t1+1] - t2=nt0[nt0.find('(')+1:-1] - val=nt0[:nt0.find('(')] - - while t2.find(' ')!=-1: - t2=t2.replace(' ',' ') - while t2.find(', ')!=-1: #059l - t2=t2.replace(', ',',') #059l - - t2=t2.replace(' ',',') - t2=[float(t) for t in t2.split(',')] - - if val=='rotate' : - t3=t2 - if len(t3)==3: - tlist.append(['translate',[t3[1],t3[2]]]) - tlist.append(['rotate',[t3[0]/180.0*3.1416]]) - tlist.append(['translate',[-t3[1],-t3[2]]]) - else: - tlist.append(['rotate',[t3[0]]]) - else: - tlist.append([val,t2]) - t0=t1+1 - return tlist - - -def curve_FILL(Courbe,proprietes): - global USE_COLORS - for n in proprietes['n']: - pr = proprietes['style'] - if n in Courbe and 'fill:' in pr: - if not 'fill:none' in pr: - Courbe[n].fill=1 - if USE_COLORS: - i= pr.find('fill:#') - if i != -1: - i= i+6 - Courbe[n].color=[int(pr[i:i+2],16),int(pr[i+2:i+4],16),int(pr[i+4:i+6],16)] - Courbe[n].mat=1 - elif ';fill-opacity' in pr: - if pr.find('fill:url')==-1: - i= pr.find('fill:')+5 - i2= pr.find(';',i) - COLORNAME= pr[i:i2] - Courbe[n].color=SVGCOLORNAMELIST[COLORNAME] - Courbe[n].mat=1 - elif 'color:' in pr: - i= pr.find('color:')+6 - i2= pr.find(';',i) - COLORNAME= pr[i:i2] - Courbe[n].color=SVGCOLORNAMELIST[COLORNAME] - Courbe[n].mat=1 - else : - COLORNAME= 'white' - Courbe[n].color=SVGCOLORNAMELIST[COLORNAME] - Courbe[n].mat=1 - -#---------------------------------------------- -# 0.4.1 : apply transform stack -#---------------------------------------------- -def curve_TRANSFORM(Courbe,proprietes): - # 1/ unpack the STACK - # create a matrix for each transform - ST=[] - for st in proprietes['stack'] : - if st and type(st)==list: - for t in st: - code = control_CONTAINT(t) - a,b,c=matrixTRANSFORM[code[0][0]](code[0][1][:]) - T=Mathutils.Matrix(a,b,c) - ST.append(T) - elif st : - code = control_CONTAINT(st) - a,b,c=matrixTRANSFORM[code[0][0]](code[0][1][:]) - T=Mathutils.Matrix(a,b,c) - ST.append(T) - if 'transform' in proprietes: - for trans in control_CONTAINT(proprietes['transform']): - #-------------------- - # 0.5.8, to remove exec - #-------------------- - a,b,c=matrixTRANSFORM[trans[0].strip()](trans[1][:]) #059 - T=Mathutils.Matrix(a,b,c) - ST.append(T) - ST.reverse() - for n in proprietes['n']: - if n in Courbe: - for bez0 in Courbe[n].beziers_knot: - bez=bez0.co - for b in [0,2,4]: - for t in ST: - v=t * Mathutils.Vector([bez[b],bez[b+1],1.0]) #059a - bez[b]=v[0] - bez[b+1]=v[1] - -def filter(d): - for nn in d: - if nn not in '0123456789.': #059a - d=d.replace(nn,"") - return d - -def get_BOUNDBOX(BOUNDINGBOX,SVG): - if 'viewbox' not in SVG: - h=float(filter(SVG['height'])) - - w=float(filter(SVG['width'])) - BOUNDINGBOX['rec']=[0.0,0.0,w,h] - r=BOUNDINGBOX['rec'] - BOUNDINGBOX['coef']=w/h - else: - viewbox=SVG['viewbox'].split() - BOUNDINGBOX['rec']=[float(viewbox[0]),float(viewbox[1]),float(viewbox[2]),float(viewbox[3])] - r=BOUNDINGBOX['rec'] - BOUNDINGBOX['coef']=(r[2]-r[0])/(r[3]-r[1]) - return BOUNDINGBOX - -#---------------------------------------------- -# 0.4.1 : attributs ex : 'id=', 'transform=', 'd=' ... -#---------------------------------------------- -def collect_ATTRIBUTS(data): - #---------------------------------------------- - # 0.4.8 : short modif for a fantasy font case - # in the OOo svg format ('viewbox' is - # written 'viewBox', for instance) - #---------------------------------------------- - data=data.replace(' ',' ').lower() - ELEM={'TYPE':data[1:data.find(' ')]} - t1=len(data) - t2=0 - ct=data.count('="') - while ct>0: - t0=data.find('="',t2) - t2=data.find(' ',t2)+1 - id=data[t2:t0] - t2=data.find('"',t0+2) - if id!='d': - ELEM[id]=data[t0+2:t2].replace('\\','/') - else: - ELEM[id]=[] - ELEM[id].append(t0+2) - ELEM[id].append(t2) - ct=data.count('="',t2) - return ELEM - -# -------------------------------------------- -# 0.4.1 : to avoid to use sax and ths xml -# tools of the complete python -# -------------------------------------------- -def build_HIERARCHY(t): - global CP, curves, SCALE, DEBUG, BOUNDINGBOX, scale_, tagTRANSFORM - global LAST_ID, PATTERN - TRANSFORM=0 - t=t.replace('\t',' ') - while t.find(' ')!=-1: t=t.replace(' ',' ') - n0=0 - t0=t1=0 - #baliste=[] - balisetype=['?','?','/','/','!','!'] - BALISES=['D', #DECL_TEXTE', - 'D', #DECL_TEXTE', - 'F', #FERMANTE', - 'E', #ELEM_VIDE', - 'd', #DOC', - 'R', #REMARQUES', - 'C', #CONTENU', - 'O' #OUVRANTE' - ] - STACK=[] - while t1-1: - t0=t.find('<',t0) - t1=t.find('>',t0) - ouvrante=0 - #-------------------- - # 0.4.4 , add 'else:' and 'break' to the 'if' statement - #-------------------- - if t0>-1 and t1>-1: - if t[t0+1] in balisetype: - b=balisetype.index(t[t0+1]) - - if t[t0+2]=='-': - b=balisetype.index(t[t0+1])+1 - - balise=BALISES[b] - - if b==2: - parent=STACK.pop(-1) - if parent!=None and TRANSFORM>0: - TRANSFORM-=1 - - elif t[t1-1] in balisetype: - balise=BALISES[balisetype.index(t[t1-1])+1] - - else: - t2=t.find(' ',t0) - if t2>t1: t2=t1 - ouvrante=1 - NOM=t[t0+1:t2] - - - if '-1: - balise=BALISES[-1] - if NOM=='pattern' and not PATTERN: - t1=t.find('',t0)+len('') - balise=BALISES[-3] - else: - balise=BALISES[-2] - - if balise=='E' or balise=='O': - - proprietes=collect_ATTRIBUTS(t[t0:t1+ouvrante]) - - if 'id' in proprietes: - LAST_ID=proprietes['id'] - - if balise=='O' and 'transform' in proprietes: - STACK.append(proprietes['transform']) - TRANSFORM+=1 - elif balise=='O' : - STACK.append(None) - - proprietes['stack']=STACK[:] - D=[] - - if proprietes['TYPE'] in ['path'] and (proprietes['d'][1]-proprietes['d'][0]>1): - D=list_DATA(t[proprietes['d'][0]+t0:proprietes['d'][1]+t0]) - - elif proprietes['TYPE'] in OTHERSSHAPES: - #-------------------- - # 0.5.8, to remove exec - #-------------------- - D=OTHERSSHAPES[proprietes['TYPE']](proprietes) - - #elif proprietes['TYPE'] in ['pattern']: - # print 'pattern' - # D='' - - CP=[0.0,0.0] - if len(D)>0: - cursor=0 - proprietes['n']=[] - for cell in D: - - if len(cell)>=1 and cell[0] in TAGcourbe: - #-------------------- - # 0.5.8, to remove exec - #-------------------- - if cell[0] in ['m','M']: - curves,n0,CP=Actions[cell](curves, [cell,cursor], D, n0,CP,proprietes) - else: - curves,n0,CP=Actions[cell](curves, [cell,cursor], D, n0,CP) - - cursor+=1 - if TRANSFORM>0 or 'transform' in proprietes : - curve_TRANSFORM(curves.ITEM,proprietes) - - if 'style' in proprietes : - curve_FILL(curves.ITEM,proprietes) - - - elif proprietes['TYPE'] == 'svg': - BOUNDINGBOX = get_BOUNDBOX(BOUNDINGBOX,proprietes) - else: - #-------------------- - # 0.4.4 - #-------------------- - break - t1+=1 - t0=t1 - -def scan_FILE(nom): - global CP, curves, SCALE, DEBUG, BOUNDINGBOX, scale_, tagTRANSFORM - global SEPARATE_CURVES, USE_COLORS, PATTERN - - dir,name=split(nom) - name=name.split('.') - result=0 - #Choise=1 - t1=Blender.sys.time() - t=filterFILE(nom) - if t!='false': - Blender.Window.EditMode(0) - if not SHARP_IMPORT: - togH = Blender.Draw.Create(1) - togW = Blender.Draw.Create(0) - togAS = Blender.Draw.Create(0) - togSP = Blender.Draw.Create(0) - togCOL = Blender.Draw.Create(0) - Pattern= Blender.Draw.Create(0) - block=[\ - ("Clamp Width 1", togW, "Rescale the import with a Width of one unit"),\ - ("Clamp Height 1", togH, "Rescale the import with a Heightof one unit"),\ - ("No Rescaling", togAS, "No rescaling, the result can be very large"),\ - ("Separate Curves", togSP, "Create an object for each curve, Slower. May manage colors"),\ - ("Import Colors", togCOL, "try to import color if the path is set as 'fill'. Only With separate option"),\ - ("Import Patterns", Pattern, "import pattern content if it is made with paths.")] - retval = Blender.Draw.PupBlock("Import Options", block) - if togW.val: scale_=1 - elif togH.val: scale_=2 - elif togAS.val: scale_=3 - - if togSP.val: SEPARATE_CURVES=1 - - if togCOL.val and SEPARATE_CURVES : USE_COLORS=1 - - if Pattern.val : PATTERN =1 - - t1=Blender.sys.time() - # 0.4.1 : to avoid to use sax and the xml - # tools of the complete python - build_HIERARCHY(t) - r=BOUNDINGBOX['rec'] - curves.number_of_items=len(curves.ITEM) - for k, val in curves.ITEM.iteritems(): - val.pntsUV[0] =len(val.beziers_knot) - if curves.number_of_items>0 : #and Choise==1 : - #-------------------- - # 0.4.5 and 0.4.9 - #-------------------- - createCURVES(curves, name[0]) - else: - pass - print ' elapsed time : ',Blender.sys.time()-t1 - Blender.Redraw() - -#===================================================================== -#====================== SVG format mouvements ======================== -#===================================================================== -def functionSELECT(nom): - scan_FILE(nom) - - -if __name__=='__main__': - Blender.Window.FileSelector (functionSELECT, 'SELECT an .SVG FILE', '*.svg') \ No newline at end of file diff --git a/release/scripts/bvh_import.py b/release/scripts/bvh_import.py deleted file mode 100644 index 4134503c511..00000000000 --- a/release/scripts/bvh_import.py +++ /dev/null @@ -1,757 +0,0 @@ -#!BPY - -""" -Name: 'Motion Capture (.bvh)...' -Blender: 242 -Group: 'Import' -Tip: 'Import a (.bvh) motion capture file' -""" - -__author__ = "Campbell Barton" -__url__ = ("blender.org", "blenderartists.org") -__version__ = "1.90 06/08/01" - -__bpydoc__ = """\ -This script imports BVH motion capture data to Blender. -as empties or armatures. -""" - -# -------------------------------------------------------------------------- -# BVH Import v2.0 by Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -import bpy -import BPyMessages -Vector= Blender.Mathutils.Vector -Euler= Blender.Mathutils.Euler -Matrix= Blender.Mathutils.Matrix -RotationMatrix = Blender.Mathutils.RotationMatrix -TranslationMatrix= Blender.Mathutils.TranslationMatrix - -DEG2RAD = 0.017453292519943295 - -class bvh_node_class(object): - __slots__=(\ - 'name',# bvh joint name - 'parent',# bvh_node_class type or None for no parent - 'children',# a list of children of this type. - 'rest_head_world',# worldspace rest location for the head of this node - 'rest_head_local',# localspace rest location for the head of this node - 'rest_tail_world',# # worldspace rest location for the tail of this node - 'rest_tail_local',# # worldspace rest location for the tail of this node - 'channels',# list of 6 ints, -1 for an unused channel, otherwise an index for the BVH motion data lines, lock triple then rot triple - 'rot_order',# a triple of indicies as to the order rotation is applied. [0,1,2] is x/y/z - [None, None, None] if no rotation. - 'anim_data',# a list one tuple's one for each frame. (locx, locy, locz, rotx, roty, rotz) - 'has_loc',# Conveinience function, bool, same as (channels[0]!=-1 or channels[1]!=-1 channels[2]!=-1) - 'has_rot',# Conveinience function, bool, same as (channels[3]!=-1 or channels[4]!=-1 channels[5]!=-1) - 'temp')# use this for whatever you want - - def __init__(self, name, rest_head_world, rest_head_local, parent, channels, rot_order): - self.name= name - self.rest_head_world= rest_head_world - self.rest_head_local= rest_head_local - self.rest_tail_world= None - self.rest_tail_local= None - self.parent= parent - self.channels= channels - self.rot_order= rot_order - - # convenience functions - self.has_loc= channels[0] != -1 or channels[1] != -1 or channels[2] != -1 - self.has_rot= channels[3] != -1 or channels[4] != -1 or channels[5] != -1 - - - self.children= [] - - # list of 6 length tuples: (lx,ly,lz, rx,ry,rz) - # even if the channels arnt used they will just be zero - # - self.anim_data= [(0,0,0,0,0,0)] - - - def __repr__(self): - return 'BVH name:"%s", rest_loc:(%.3f,%.3f,%.3f), rest_tail:(%.3f,%.3f,%.3f)' %\ - (self.name,\ - self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z,\ - self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z) - - - -# Change the order rotation is applied. -MATRIX_IDENTITY_3x3 = Matrix([1,0,0],[0,1,0],[0,0,1]) -MATRIX_IDENTITY_4x4 = Matrix([1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]) - -def eulerRotate(x,y,z, rot_order): - # Clamp all values between 0 and 360, values outside this raise an error. - mats=[RotationMatrix(x%360,3,'x'), RotationMatrix(y%360,3,'y'), RotationMatrix(z%360,3,'z')] - # print rot_order - # Standard BVH multiplication order, apply the rotation in the order Z,X,Y - return (mats[rot_order[2]]*(mats[rot_order[1]]* (mats[rot_order[0]]* MATRIX_IDENTITY_3x3))).toEuler() - -def read_bvh(file_path, GLOBAL_SCALE=1.0): - # File loading stuff - # Open the file for importing - file = open(file_path, 'rU') - - # Seperate into a list of lists, each line a list of words. - file_lines = file.readlines() - # Non standard carrage returns? - if len(file_lines) == 1: - file_lines = file_lines[0].split('\r') - - # Split by whitespace. - file_lines =[ll for ll in [ l.split() for l in file_lines] if ll] - - - # Create Hirachy as empties - - if file_lines[0][0].lower() == 'hierarchy': - #print 'Importing the BVH Hierarchy for:', file_path - pass - else: - raise 'ERROR: This is not a BVH file' - - bvh_nodes= {None:None} - bvh_nodes_serial = [None] - - channelIndex = -1 - - - lineIdx = 0 # An index for the file. - while lineIdx < len(file_lines) -1: - #... - if file_lines[lineIdx][0].lower() == 'root' or file_lines[lineIdx][0].lower() == 'joint': - - # Join spaces into 1 word with underscores joining it. - if len(file_lines[lineIdx]) > 2: - file_lines[lineIdx][1] = '_'.join(file_lines[lineIdx][1:]) - file_lines[lineIdx] = file_lines[lineIdx][:2] - - # MAY NEED TO SUPPORT MULTIPLE ROOT's HERE!!!, Still unsure weather multiple roots are possible.?? - - # Make sure the names are unique- Object names will match joint names exactly and both will be unique. - name = file_lines[lineIdx][1] - - #print '%snode: %s, parent: %s' % (len(bvh_nodes_serial) * ' ', name, bvh_nodes_serial[-1]) - - lineIdx += 2 # Incriment to the next line (Offset) - rest_head_local = Vector( GLOBAL_SCALE*float(file_lines[lineIdx][1]), GLOBAL_SCALE*float(file_lines[lineIdx][2]), GLOBAL_SCALE*float(file_lines[lineIdx][3]) ) - lineIdx += 1 # Incriment to the next line (Channels) - - # newChannel[Xposition, Yposition, Zposition, Xrotation, Yrotation, Zrotation] - # newChannel references indecies to the motiondata, - # if not assigned then -1 refers to the last value that will be added on loading at a value of zero, this is appended - # We'll add a zero value onto the end of the MotionDATA so this is always refers to a value. - my_channel = [-1, -1, -1, -1, -1, -1] - my_rot_order= [None, None, None] - rot_count= 0 - for channel in file_lines[lineIdx][2:]: - channel= channel.lower() - channelIndex += 1 # So the index points to the right channel - if channel == 'xposition': my_channel[0] = channelIndex - elif channel == 'yposition': my_channel[1] = channelIndex - elif channel == 'zposition': my_channel[2] = channelIndex - - elif channel == 'xrotation': - my_channel[3] = channelIndex - my_rot_order[rot_count]= 0 - rot_count+=1 - elif channel == 'yrotation': - my_channel[4] = channelIndex - my_rot_order[rot_count]= 1 - rot_count+=1 - elif channel == 'zrotation': - my_channel[5] = channelIndex - my_rot_order[rot_count]= 2 - rot_count+=1 - - channels = file_lines[lineIdx][2:] - - my_parent= bvh_nodes_serial[-1] # account for none - - - # Apply the parents offset accumletivly - if my_parent==None: - rest_head_world= Vector(rest_head_local) - else: - rest_head_world= my_parent.rest_head_world + rest_head_local - - bvh_node= bvh_nodes[name]= bvh_node_class(name, rest_head_world, rest_head_local, my_parent, my_channel, my_rot_order) - - # If we have another child then we can call ourselves a parent, else - bvh_nodes_serial.append(bvh_node) - - # Account for an end node - if file_lines[lineIdx][0].lower() == 'end' and file_lines[lineIdx][1].lower() == 'site': # There is somtimes a name after 'End Site' but we will ignore it. - lineIdx += 2 # Incriment to the next line (Offset) - rest_tail = Vector( GLOBAL_SCALE*float(file_lines[lineIdx][1]), GLOBAL_SCALE*float(file_lines[lineIdx][2]), GLOBAL_SCALE*float(file_lines[lineIdx][3]) ) - - bvh_nodes_serial[-1].rest_tail_world= bvh_nodes_serial[-1].rest_head_world + rest_tail - bvh_nodes_serial[-1].rest_tail_local= rest_tail - - - # Just so we can remove the Parents in a uniform way- End end never has kids - # so this is a placeholder - bvh_nodes_serial.append(None) - - if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0] == '}': # == ['}'] - bvh_nodes_serial.pop() # Remove the last item - - if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0].lower() == 'motion': - #print '\nImporting motion data' - lineIdx += 3 # Set the cursor to the first frame - break - - lineIdx += 1 - - - # Remove the None value used for easy parent reference - del bvh_nodes[None] - # Dont use anymore - del bvh_nodes_serial - - bvh_nodes_list= bvh_nodes.values() - - while lineIdx < len(file_lines): - line= file_lines[lineIdx] - for bvh_node in bvh_nodes_list: - #for bvh_node in bvh_nodes_serial: - lx= ly= lz= rx= ry= rz= 0.0 - channels= bvh_node.channels - anim_data= bvh_node.anim_data - if channels[0] != -1: - lx= GLOBAL_SCALE * float( line[channels[0]] ) - - if channels[1] != -1: - ly= GLOBAL_SCALE * float( line[channels[1]] ) - - if channels[2] != -1: - lz= GLOBAL_SCALE * float( line[channels[2]] ) - - if channels[3] != -1 or channels[4] != -1 or channels[5] != -1: - rx, ry, rz = eulerRotate(float( line[channels[3]] ), float( line[channels[4]] ), float( line[channels[5]] ), bvh_node.rot_order) - #x,y,z = x/10.0, y/10.0, z/10.0 # For IPO's 36 is 360d - - # Make interpolation not cross between 180d, thjis fixes sub frame interpolation and time scaling. - # Will go from (355d to 365d) rather then to (355d to 5d) - inbetween these 2 there will now be a correct interpolation. - - while anim_data[-1][3] - rx > 180: rx+=360 - while anim_data[-1][3] - rx < -180: rx-=360 - - while anim_data[-1][4] - ry > 180: ry+=360 - while anim_data[-1][4] - ry < -180: ry-=360 - - while anim_data[-1][5] - rz > 180: rz+=360 - while anim_data[-1][5] - rz < -180: rz-=360 - - # Done importing motion data # - anim_data.append( (lx, ly, lz, rx, ry, rz) ) - lineIdx += 1 - - # Assign children - for bvh_node in bvh_nodes.itervalues(): - bvh_node_parent= bvh_node.parent - if bvh_node_parent: - bvh_node_parent.children.append(bvh_node) - - # Now set the tip of each bvh_node - for bvh_node in bvh_nodes.itervalues(): - - if not bvh_node.rest_tail_world: - if len(bvh_node.children)==0: - # could just fail here, but rare BVH files have childless nodes - bvh_node.rest_tail_world = Vector(bvh_node.rest_head_world) - bvh_node.rest_tail_local = Vector(bvh_node.rest_head_local) - elif len(bvh_node.children)==1: - bvh_node.rest_tail_world= Vector(bvh_node.children[0].rest_head_world) - bvh_node.rest_tail_local= Vector(bvh_node.children[0].rest_head_local) - else: - # allow this, see above - #if not bvh_node.children: - # raise 'error, bvh node has no end and no children. bad file' - - # Removed temp for now - rest_tail_world= Vector(0,0,0) - rest_tail_local= Vector(0,0,0) - for bvh_node_child in bvh_node.children: - rest_tail_world += bvh_node_child.rest_head_world - rest_tail_local += bvh_node_child.rest_head_local - - bvh_node.rest_tail_world= rest_tail_world * (1.0/len(bvh_node.children)) - bvh_node.rest_tail_local= rest_tail_local * (1.0/len(bvh_node.children)) - - # Make sure tail isnt the same location as the head. - if (bvh_node.rest_tail_local-bvh_node.rest_head_local).length <= 0.001*GLOBAL_SCALE: - - bvh_node.rest_tail_local.y= bvh_node.rest_tail_local.y + GLOBAL_SCALE/10 - bvh_node.rest_tail_world.y= bvh_node.rest_tail_world.y + GLOBAL_SCALE/10 - - - - return bvh_nodes - - - -def bvh_node_dict2objects(bvh_nodes, IMPORT_START_FRAME= 1, IMPORT_LOOP= False): - - if IMPORT_START_FRAME<1: - IMPORT_START_FRAME= 1 - - scn= bpy.data.scenes.active - scn.objects.selected = [] - - objects= [] - - def add_ob(name): - ob = scn.objects.new('Empty') - objects.append(ob) - return ob - - # Add objects - for name, bvh_node in bvh_nodes.iteritems(): - bvh_node.temp= add_ob(name) - - # Parent the objects - for bvh_node in bvh_nodes.itervalues(): - bvh_node.temp.makeParent([ bvh_node_child.temp for bvh_node_child in bvh_node.children ], 1, 0) # ojbs, noninverse, 1 = not fast. - - # Offset - for bvh_node in bvh_nodes.itervalues(): - # Make relative to parents offset - bvh_node.temp.loc= bvh_node.rest_head_local - - # Add tail objects - for name, bvh_node in bvh_nodes.iteritems(): - if not bvh_node.children: - ob_end= add_ob(name + '_end') - bvh_node.temp.makeParent([ob_end], 1, 0) # ojbs, noninverse, 1 = not fast. - ob_end.loc= bvh_node.rest_tail_local - - - # Animate the data, the last used bvh_node will do since they all have the same number of frames - for current_frame in xrange(len(bvh_node.anim_data)): - Blender.Set('curframe', current_frame+IMPORT_START_FRAME) - - for bvh_node in bvh_nodes.itervalues(): - lx,ly,lz,rx,ry,rz= bvh_node.anim_data[current_frame] - - rest_head_local= bvh_node.rest_head_local - bvh_node.temp.loc= rest_head_local.x+lx, rest_head_local.y+ly, rest_head_local.z+lz - - bvh_node.temp.rot= rx*DEG2RAD,ry*DEG2RAD,rz*DEG2RAD - - bvh_node.temp.insertIpoKey(Blender.Object.IpoKeyTypes.LOCROT) - - scn.update(1) - return objects - - - -def bvh_node_dict2armature(bvh_nodes, IMPORT_START_FRAME= 1, IMPORT_LOOP= False): - - if IMPORT_START_FRAME<1: - IMPORT_START_FRAME= 1 - - - # Add the new armature, - scn = bpy.data.scenes.active - scn.objects.selected = [] - - arm_data= bpy.data.armatures.new() - arm_ob = scn.objects.new(arm_data) - scn.objects.context = [arm_ob] - scn.objects.active = arm_ob - - # Put us into editmode - arm_data.makeEditable() - - # Get the average bone length for zero length bones, we may not use this. - average_bone_length= 0.0 - nonzero_count= 0 - for bvh_node in bvh_nodes.itervalues(): - l= (bvh_node.rest_head_local-bvh_node.rest_tail_local).length - if l: - average_bone_length+= l - nonzero_count+=1 - - # Very rare cases all bones couldbe zero length??? - if not average_bone_length: - average_bone_length = 0.1 - else: - # Normal operation - average_bone_length = average_bone_length/nonzero_count - - - - ZERO_AREA_BONES= [] - for name, bvh_node in bvh_nodes.iteritems(): - # New editbone - bone= bvh_node.temp= Blender.Armature.Editbone() - - bone.name= name - arm_data.bones[name]= bone - - bone.head= bvh_node.rest_head_world - bone.tail= bvh_node.rest_tail_world - - # ZERO AREA BONES. - if (bone.head-bone.tail).length < 0.001: - if bvh_node.parent: - ofs= bvh_node.parent.rest_head_local- bvh_node.parent.rest_tail_local - if ofs.length: # is our parent zero length also?? unlikely - bone.tail= bone.tail+ofs - else: - bone.tail.y= bone.tail.y+average_bone_length - else: - bone.tail.y= bone.tail.y+average_bone_length - - ZERO_AREA_BONES.append(bone.name) - - - for bvh_node in bvh_nodes.itervalues(): - if bvh_node.parent: - # bvh_node.temp is the Editbone - - # Set the bone parent - bvh_node.temp.parent= bvh_node.parent.temp - - # Set the connection state - if not bvh_node.has_loc and\ - bvh_node.parent and\ - bvh_node.parent.temp.name not in ZERO_AREA_BONES and\ - bvh_node.parent.rest_tail_local == bvh_node.rest_head_local: - bvh_node.temp.options= [Blender.Armature.CONNECTED] - - # Replace the editbone with the editbone name, - # to avoid memory errors accessing the editbone outside editmode - for bvh_node in bvh_nodes.itervalues(): - bvh_node.temp= bvh_node.temp.name - - arm_data.update() - - # Now Apply the animation to the armature - - # Get armature animation data - pose= arm_ob.getPose() - pose_bones= pose.bones - - action = Blender.Armature.NLA.NewAction("Action") - action.setActive(arm_ob) - #xformConstants= [ Blender.Object.Pose.LOC, Blender.Object.Pose.ROT ] - - # Replace the bvh_node.temp (currently an editbone) - # With a tuple (pose_bone, armature_bone, bone_rest_matrix, bone_rest_matrix_inv) - for bvh_node in bvh_nodes.itervalues(): - bone_name= bvh_node.temp # may not be the same name as the bvh_node, could have been shortened. - pose_bone= pose_bones[bone_name] - rest_bone= arm_data.bones[bone_name] - bone_rest_matrix = rest_bone.matrix['ARMATURESPACE'].rotationPart() - - bone_rest_matrix_inv= Matrix(bone_rest_matrix) - bone_rest_matrix_inv.invert() - - bone_rest_matrix_inv.resize4x4() - bone_rest_matrix.resize4x4() - bvh_node.temp= (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv) - - - # Make a dict for fast access without rebuilding a list all the time. - xformConstants_dict={ - (True,True): [Blender.Object.Pose.LOC, Blender.Object.Pose.ROT],\ - (False,True): [Blender.Object.Pose.ROT],\ - (True,False): [Blender.Object.Pose.LOC],\ - (False,False): [],\ - } - - - # KEYFRAME METHOD, SLOW, USE IPOS DIRECT - - # Animate the data, the last used bvh_node will do since they all have the same number of frames - for current_frame in xrange(len(bvh_node.anim_data)-1): # skip the first frame (rest frame) - # print current_frame - - #if current_frame==40: # debugging - # break - - # Dont neet to set the current frame - for bvh_node in bvh_nodes.itervalues(): - pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv= bvh_node.temp - lx,ly,lz,rx,ry,rz= bvh_node.anim_data[current_frame+1] - - if bvh_node.has_rot: - # Set the rotation, not so simple - bone_rotation_matrix= Euler(rx,ry,rz).toMatrix() - bone_rotation_matrix.resize4x4() - pose_bone.quat= (bone_rest_matrix * bone_rotation_matrix * bone_rest_matrix_inv).toQuat() - - if bvh_node.has_loc: - # Set the Location, simple too - pose_bone.loc= (\ - TranslationMatrix(Vector(lx, ly, lz) - bvh_node.rest_head_local ) *\ - bone_rest_matrix_inv).translationPart() # WHY * 10? - just how pose works - - # Get the transform - xformConstants= xformConstants_dict[bvh_node.has_loc, bvh_node.has_rot] - - - if xformConstants: - # Insert the keyframe from the loc/quat - pose_bone.insertKey(arm_ob, current_frame+IMPORT_START_FRAME, xformConstants, True ) - - # First time, set the IPO's to linear - if current_frame==0: - for ipo in action.getAllChannelIpos().itervalues(): - if ipo: - for cur in ipo: - cur.interpolation = Blender.IpoCurve.InterpTypes.LINEAR - if IMPORT_LOOP: - cur.extend = Blender.IpoCurve.ExtendTypes.CYCLIC - - - - - # END KEYFRAME METHOD - - - """ - # IPO KEYFRAME SETTING - # Add in the IPOs by adding keyframes, AFAIK theres no way to add IPOs to an action so I do this :/ - for bvh_node in bvh_nodes.itervalues(): - pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv= bvh_node.temp - - # Get the transform - xformConstants= xformConstants_dict[bvh_node.has_loc, bvh_node.has_rot] - if xformConstants: - pose_bone.loc[:]= 0,0,0 - pose_bone.quat[:]= 0,0,1,0 - # Insert the keyframe from the loc/quat - pose_bone.insertKey(arm_ob, IMPORT_START_FRAME, xformConstants) - - - action_ipos= action.getAllChannelIpos() - - - for bvh_node in bvh_nodes.itervalues(): - has_loc= bvh_node.has_loc - has_rot= bvh_node.has_rot - - if not has_rot and not has_loc: - # No animation data - continue - - ipo= action_ipos[bvh_node.temp[0].name] # posebones name as key - - if has_loc: - curve_xloc= ipo[Blender.Ipo.PO_LOCX] - curve_yloc= ipo[Blender.Ipo.PO_LOCY] - curve_zloc= ipo[Blender.Ipo.PO_LOCZ] - - curve_xloc.interpolation= \ - curve_yloc.interpolation= \ - curve_zloc.interpolation= \ - Blender.IpoCurve.InterpTypes.LINEAR - - - if has_rot: - curve_wquat= ipo[Blender.Ipo.PO_QUATW] - curve_xquat= ipo[Blender.Ipo.PO_QUATX] - curve_yquat= ipo[Blender.Ipo.PO_QUATY] - curve_zquat= ipo[Blender.Ipo.PO_QUATZ] - - curve_wquat.interpolation= \ - curve_xquat.interpolation= \ - curve_yquat.interpolation= \ - curve_zquat.interpolation= \ - Blender.IpoCurve.InterpTypes.LINEAR - - # Get the bone - pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv= bvh_node.temp - - - def pose_rot(anim_data): - bone_rotation_matrix= Euler(anim_data[3], anim_data[4], anim_data[5]).toMatrix() - bone_rotation_matrix.resize4x4() - return tuple((bone_rest_matrix * bone_rotation_matrix * bone_rest_matrix_inv).toQuat()) # qw,qx,qy,qz - - def pose_loc(anim_data): - return tuple((TranslationMatrix(Vector(anim_data[0], anim_data[1], anim_data[2])) * bone_rest_matrix_inv).translationPart()) - - - last_frame= len(bvh_node.anim_data)+IMPORT_START_FRAME-1 - - if has_loc: - pose_locations= [pose_loc(anim_key) for anim_key in bvh_node.anim_data] - - # Add the start at the end, we know the start is just 0,0,0 anyway - curve_xloc.append((last_frame, pose_locations[-1][0])) - curve_yloc.append((last_frame, pose_locations[-1][1])) - curve_zloc.append((last_frame, pose_locations[-1][2])) - - if len(pose_locations) > 1: - ox,oy,oz= pose_locations[0] - x,y,z= pose_locations[1] - - for i in xrange(1, len(pose_locations)-1): # from second frame to second last frame - - nx,ny,nz= pose_locations[i+1] - xset= yset= zset= True # we set all these by default - if abs((ox+nx)/2 - x) < 0.00001: xset= False - if abs((oy+ny)/2 - y) < 0.00001: yset= False - if abs((oz+nz)/2 - z) < 0.00001: zset= False - - if xset: curve_xloc.append((i+IMPORT_START_FRAME, x)) - if yset: curve_yloc.append((i+IMPORT_START_FRAME, y)) - if zset: curve_zloc.append((i+IMPORT_START_FRAME, z)) - - # Set the old and use the new - ox,oy,oz= x,y,z - x,y,z= nx,ny,nz - - - if has_rot: - pose_rotations= [pose_rot(anim_key) for anim_key in bvh_node.anim_data] - - # Add the start at the end, we know the start is just 0,0,0 anyway - curve_wquat.append((last_frame, pose_rotations[-1][0])) - curve_xquat.append((last_frame, pose_rotations[-1][1])) - curve_yquat.append((last_frame, pose_rotations[-1][2])) - curve_zquat.append((last_frame, pose_rotations[-1][3])) - - - if len(pose_rotations) > 1: - ow,ox,oy,oz= pose_rotations[0] - w,x,y,z= pose_rotations[1] - - for i in xrange(1, len(pose_rotations)-1): # from second frame to second last frame - - nw, nx,ny,nz= pose_rotations[i+1] - wset= xset= yset= zset= True # we set all these by default - if abs((ow+nw)/2 - w) < 0.00001: wset= False - if abs((ox+nx)/2 - x) < 0.00001: xset= False - if abs((oy+ny)/2 - y) < 0.00001: yset= False - if abs((oz+nz)/2 - z) < 0.00001: zset= False - - if wset: curve_wquat.append((i+IMPORT_START_FRAME, w)) - if xset: curve_xquat.append((i+IMPORT_START_FRAME, x)) - if yset: curve_yquat.append((i+IMPORT_START_FRAME, y)) - if zset: curve_zquat.append((i+IMPORT_START_FRAME, z)) - - # Set the old and use the new - ow,ox,oy,oz= w,x,y,z - w,x,y,z= nw,nx,ny,nz - - # IPO KEYFRAME SETTING - """ - pose.update() - return arm_ob - - -#=============# -# TESTING # -#=============# - -#('/metavr/mocap/bvh/boxer.bvh') -#('/d/staggered_walk.bvh') -#('/metavr/mocap/bvh/dg-306-g.bvh') # Incompleate EOF -#('/metavr/mocap/bvh/wa8lk.bvh') # duplicate joint names, \r line endings. -#('/metavr/mocap/bvh/walk4.bvh') # 0 channels - -''' -import os -DIR = '/metavr/mocap/bvh/' -for f in ('/d/staggered_walk.bvh',): - #for f in os.listdir(DIR)[5:6]: - #for f in os.listdir(DIR): - if f.endswith('.bvh'): - s = Blender.Scene.New(f) - s.makeCurrent() - #file= DIR + f - file= f - print f - bvh_nodes= read_bvh(file, 1.0) - bvh_node_dict2armature(bvh_nodes, 1) -''' - -def load_bvh_ui(file, PREF_UI= True): - - if BPyMessages.Error_NoFile(file): - return - - Draw= Blender.Draw - - IMPORT_SCALE = Draw.Create(0.1) - IMPORT_START_FRAME = Draw.Create(1) - IMPORT_AS_ARMATURE = Draw.Create(1) - IMPORT_AS_EMPTIES = Draw.Create(0) - IMPORT_LOOP = Draw.Create(0) - - # Get USER Options - if PREF_UI: - pup_block = [\ - ('As Armature', IMPORT_AS_ARMATURE, 'Imports the BVH as an armature'),\ - ('As Empties', IMPORT_AS_EMPTIES, 'Imports the BVH as empties'),\ - ('Scale: ', IMPORT_SCALE, 0.001, 100.0, 'Scale the BVH, Use 0.01 when 1.0 is 1 metre'),\ - ('Start Frame: ', IMPORT_START_FRAME, 1, 30000, 'Frame to start BVH motion'),\ - ('Loop Animation', IMPORT_LOOP, 'Enable cyclic IPOs'),\ - ] - - if not Draw.PupBlock('BVH Import...', pup_block): - return - - print 'Attempting import BVH', file - - IMPORT_SCALE = IMPORT_SCALE.val - IMPORT_START_FRAME = IMPORT_START_FRAME.val - IMPORT_AS_ARMATURE = IMPORT_AS_ARMATURE.val - IMPORT_AS_EMPTIES = IMPORT_AS_EMPTIES.val - IMPORT_LOOP = IMPORT_LOOP.val - - if not IMPORT_AS_ARMATURE and not IMPORT_AS_EMPTIES: - Blender.Draw.PupMenu('No import option selected') - return - Blender.Window.WaitCursor(1) - # Get the BVH data and act on it. - t1= Blender.sys.time() - print '\tparsing bvh...', - bvh_nodes= read_bvh(file, IMPORT_SCALE) - print '%.4f' % (Blender.sys.time()-t1) - t1= Blender.sys.time() - print '\timporting to blender...', - if IMPORT_AS_ARMATURE: bvh_node_dict2armature(bvh_nodes, IMPORT_START_FRAME, IMPORT_LOOP) - if IMPORT_AS_EMPTIES: bvh_node_dict2objects(bvh_nodes, IMPORT_START_FRAME, IMPORT_LOOP) - - print 'Done in %.4f\n' % (Blender.sys.time()-t1) - Blender.Window.WaitCursor(0) - -def main(): - Blender.Window.FileSelector(load_bvh_ui, 'Import BVH', '*.bvh') - -if __name__ == '__main__': - #def foo(): - main() - ''' - scn = bpy.data.scenes.active - for ob in list(scn.objects): - if ob.name!='arm__': - scn.objects.unlink(ob) - load_bvh_ui('/test.bvh', False) - ''' \ No newline at end of file diff --git a/release/scripts/c3d_import.py b/release/scripts/c3d_import.py deleted file mode 100644 index 98f643cbab9..00000000000 --- a/release/scripts/c3d_import.py +++ /dev/null @@ -1,1244 +0,0 @@ -#!BPY -# -*- coding: latin-1 -*- -""" -Name: 'Motion Capture (.c3d)...' -Blender: 246 -Group: 'Import' -Tooltip: 'Import a C3D Motion Capture file' -""" -__script__ = "C3D Motion Capture file import" -__author__ = " Jean-Baptiste PERIN, Roger D. Wickes (rogerwickes@yahoo.com)" -__version__ = "0.9" -__url__ = ["Communicate problems and errors, BlenderArtists.org, Python forum"] -__email__= ["rogerwickes@yahoo.com", "c3d script"] -__bpydoc__ = """\ -c3d_import.py v0.8 - -Script loading Graphics Lab Motion Capture file, -Usage:
- - Run the script
- - Choose the file to open
- - Press Import C3D button
- -Version History: - 0.4: PERIN Released under Blender Artistic Licence - 0.5: WICKES used marker names, fixed 2.45 depricated call - 0.6: WICKES creates armature for each subject - 0.7: WICKES constrains armature to follow the empties (markers). Verified for shake hands s - 0.8: WICKES resolved DEC support issue - 0.9: BARTON removed scene name change, whitespace edits. WICKES added IK layers -""" - -#---------------------------------------------- -# (c) Jean-Baptiste PERIN december 2005, released under Blender Artistic Licence -# for the Blender 2.40 Python Scripts Bundle. -#---------------------------------------------- - -###################################################### -# This script imports a C3D file into blender. -# Loader is based on MATLAB C3D loader from -# Alan Morris, Toronto, October 1998 -# Jaap Harlaar, Amsterdam, april 2002 -###################################################### - -import string -import Blender -from Blender import * -import bpy -import struct -import BPyMessages -Vector= Blender.Mathutils.Vector -Euler= Blender.Mathutils.Euler -Matrix= Blender.Mathutils.Matrix -RotationMatrix = Blender.Mathutils.RotationMatrix -TranslationMatrix= Blender.Mathutils.TranslationMatrix - -#================= -# Global Variables, Constants, Defaults, and Shorthand References -#================= -# set senstitivity for displaying debug/console messages. 0=few, 100=max, including clicks at major steps -# debug(num,string) to conditionally display status/info in console window -DEBUG=Blender.Get('rt') - -# marker sets known in the world -HUMAN_CMU= "HumanRTKm.mkr" # The Human Real-Time capture marker set used by CMU -HUMAN_CMU2="HumanRT.mkr" # found in another file, seems same as others in that series -MARKER_SETS = [ HUMAN_CMU, HUMAN_CMU2 ] # marker sets that this program supports (can make an armature for) -XYZ_LIMIT= 10000 #max value for coordinates if in integer format - -# what layers to put stuff on in scene. 1 is selected, so everything goes there -# selecting only layer 2 shows only the armature moving, 12 shows only the empties -LAYERS_ARMOB= [1,2] -LAYERS_MARKER=[1,12] -LAYERS_IK=[1,11] -IK_PREFIX="ik_" # prefix in empty name: ik_prefix+subject prefix+bone name - -CLEAN=True # Should program ignore markers at (0,0,0) and beyond the outer limits? - -scn = Blender.Scene.GetCurrent() - -BCS=Blender.Constraint.Settings # shorthand dictionary - define with brace, reference with bracket -trackto={"+x":BCS.TRACKX, "+y":BCS.TRACKY, "+z":BCS.TRACKZ, "-x":BCS.TRACKNEGX, "-y":BCS.TRACKNEGY, "-z":BCS.TRACKNEGZ} -trackup={"x":BCS.UPX, "y":BCS.UPY, "z":BCS.UPZ} - -#=============================# -# Classes -#=============================# -class Marker: - def __init__(self, x, y, z): - self.x=0.0 - self.y=0.0 - self.z=0.0 - - def __repr__(self): #report on self, as in if just printed - return str("[x = "+str(self.x) +" y = " + str(self.y)+" z = "+ str(self.z)+"]") - -class ParameterGroup: - def __init__(self, nom, description, parameter): - self.name = nom - self.description = description - self.parameter = parameter - - def __repr__(self): - return self.name, " ", self.description, " ", self.parameter - -class Parameter: - def __init__(self, name, datatype, dim, data, description): - self.name = name - self.datatype = datatype - self.dim = dim - self.data = data - self.description = description - - def __repr__(self): - return self.name, " ", self.description, " ", self.dim - -class MyVector: - def __init__(self, fx,fy,fz): - self.x=fx - self.y=fy - self.z=fz - -class Mybone: - "information structure for bone generation and posing" - def __init__(self, name,vec,par,head,tail,const): - self.name=name # name of this bone. must be unique within armature - self.vec=vec # edit bone vector it points - self.parent=par # name of parent bone to locate head and form a chain - self.headMark=head # list of 0+ markers where the head of this non-parented bone should be placed - self.tailMark=tail # list of 0+ markers where the tip should be placed - self.const=const # list of 0+ constraint tuples to control posing - self.head=MyVector(0,0,0) #T-pose location - self.tail=MyVector(0,0,0) - def __repr__(self): - return '[Mybone "%s"]' % self.name - - -#=============================# -# functions/modules -#=============================# -def error(str): - Draw.PupMenu('ERROR%t|'+str) - return -def status(str): - Draw.PupMenu('STATUS%t|'+str+"|Continue?") - return -def debug(num,msg): #use log4j or just console here. - if DEBUG >= num: - print 'debug:', (' '*num), msg - #TODO: if level 0, make a text file in Blender file to record major stuff - return - -def names(ob): return ob.name - - -######### -# Cette fonction renvoie la liste des empties -# in : -# out : emp_list (List of Object) la liste des objets de type "Empty" -######### -def getEmpty(name): - obs = [ob for ob in scn.objects if ob.type=="Empty" and ob.name==name] - if len(obs)==0: - return None - elif len(obs)==1: - return obs[0] - else: - error("FATAL ERROR: %i empties %s in file" % (len(obs),ob[0])) -######### -# Cette fonction renvoie un empty -# in : objname : le nom de l'empty recherche -# out : myobj : l'empty cree ou retrouve -######### -def getOrCreateEmpty(objname): - myobj= getEmpty(objname) - if myobj==None: - myobj = scn.objects.new("Empty",objname) - debug(50,'Marker/Empty created %s' % myobj) - return myobj - -def getOrCreateCurve(ipo, curvename): - """ - Retrieve or create a Blender Ipo Curve named C{curvename} in the C{ipo} Ipo - - >>> import mylib - - >>> lIpo = GetOrCreateIPO("Une IPO") - >>> laCurve = getOrCreateCurve(lIpo, "RotX") - - Either an ipo curve named C{curvename} exists before the call then this curve is returned, - Or such a curve doesn't exist before the call .. then it is created into the c{ipo} Ipo and returned - - @type ipo: Blender Ipo - @param ipo: the Ipo in which the curve must be retrieved or created. - @type curvename: string - @param curvename: name of the IPO. - @rtype: Blender Curve - @return: a Blender Curve named C{curvename} in the C{ipo} Ipo - """ - try: - mycurve = ipo.getCurve(curvename) - if mycurve != None: - pass - else: - mycurve = ipo.addCurve(curvename) - except: - mycurve = ipo.addCurve(curvename) - return mycurve - -def eraseIPO (objectname): - object = Blender.Object.Get(objectname) - lIpo = object.getIpo() - if lIpo != None: - nbCurves = lIpo.getNcurves() - for i in range(nbCurves): - nbBezPoints = lIpo.getNBezPoints(i) - for j in range(nbBezPoints): - lIpo.delBezPoint(i) - -def comp_loc(emptyNameList): - myloc=Vector(0,0,0) - for emName in emptyNameList: - myobj = Blender.Object.Get(emName) - for i in range(3): - myloc[i]= myloc[i]+(myobj.loc[i]/len(emptyNameList)) #take the average loc of all marks - return myloc - -def comp_len(head, tail): # computes the length of a bone - headvec=comp_loc(head) - tailvec=comp_loc(tail) - netvec=headvec-tailvec - return netvec.length - -def createHumanCMU(): # human bone structure, makes a node set for CMU MoCap Lab - # order of bones: "spine","chest","neck","head",...face toward you in front view - # pose constraints are tuples of (type,target,influence,other-as-needed) - # constraint stack order is important. for proper bone pointing and orinetation: - # IK, then TT +YZ in world space. then LR XZ to 0 in world space, this points the bone, twists it, but then - # limits the rotation to the sidebar enpty with the Z facing it, and Y pointing along the bone. - nodes=[] # bonename, vector, parent, head targets, tail targets, constraint list - for i in range(23): nodes.append(Mybone("name","vec","par",[],[],[])) - nodes[0]= Mybone("root", "-Y","",["RBWT", "LBWT"],["RFWT", "LFWT", "RBWT", "LBWT"],[("LOC","RBWT",1.0),("LOC","LBWT",0.5),("IK","RFWT",1.0),("IK","LFWT",0.5),("TT","RBWT",1,"+YZ"),("LR","XZ",1)]) - nodes[1]= Mybone("spine","+Z","root",[],["STRN","T10"],[("IK","STRN",1.0),("IK","T10",0.5),("TT","STRN",1,"+YZ"),("LR","XZ",1)]) - nodes[2]= Mybone("chest","+Z","spine",[],["CLAV","C7"],[("IK","CLAV",1.0),("IK","C7",0.5),("TT","CLAV",1,"+YZ"),("LR","XZ",1)]) - nodes[3]= Mybone("neck", "+Z","chest",[],["RBHD","LBHD"],[("IK","RBHD",1.0),("IK","LBHD",0.5),("TT","LBHD",1,"+YZ"),("LR","XZ",1)]) - nodes[4]= Mybone("head" ,"-Y","neck",[],["RFHD","LFHD"],[("IK","RFHD",1.0),("IK","LFHD",0.5),("TT","LFHD",1,"+YZ"),("LR","XZ",1)]) - - nodes[5]= Mybone("shoulder.R","-X","chest",[],["RSHO"],[("IK","RSHO",1.0)]) - nodes[6]= Mybone("toparm.R", "-X","shoulder.R",[],["RELB"],[("IK","RELB",1.0),("TT","RUPA",1,"+YZ"),("LR","XZ",1)]) - nodes[7]= Mybone("lowarm.R", "-X","toparm.R",[],["RWRA","RWRB"],[("IK","RWRA",1.0),("IK","RWRB",0.5),("TT","RFRM",1,"+YZ"),("LR","XZ",1)]) - nodes[8]= Mybone("hand.R", "-X","lowarm.R",[],["RFIN"],[("IK","RFIN",1.0),("TT","RWRA",1,"+YZ"),("LR","XZ",1)]) #missing ,"RTHM" - - nodes[9]= Mybone("hip.R", "-X","root",[],["RFWT","RBWT"],[("IK","RFWT",1.0),("IK","RBWT",0.5)]) - nodes[10]=Mybone("topleg.R","-Z","hip.R",[],["RKNE"],[("IK","RKNE",1),("TT","RTHI",1,"+YZ"),("LR","XZ",1)]) - nodes[11]=Mybone("lowleg.R","-Z","topleg.R",[],["RANK","RHEE"],[("IK","RHEE",1.0),("TT","RSHN",1,"+YZ"),("LR","XZ",1)]) - nodes[12]=Mybone("foot.R", "-Y","lowleg.R",[],["RTOE","RMT5"],[("IK","RTOE",1.0),("IK","RMT5",0.2),("TT","RMT5",1,"+YZ")]) - nodes[13]=Mybone("toes.R", "-Y","foot.R",[],["RTOE"],[("IK","RTOE",1.0)]) - - nodes[14]=Mybone("shoulder.L","+X","chest",[],["LSHO"],[("IK","LSHO",1.0)]) - nodes[15]=Mybone("toparm.L", "+X","shoulder.L",[],["LELB"],[("IK","LELB",1.0),("TT","LUPA",1,"+YZ"),("LR","XZ",1)]) - nodes[16]=Mybone("lowarm.L", "+X","toparm.L",[],["LWRA","LWRB"],[("IK","LWRA",1.0),("IK","LWRB",0.5),("TT","LFRM",1,"+YZ"),("LR","XZ",1)]) - nodes[17]=Mybone("hand.L", "+X","lowarm.L",[],["LFIN"],[("IK","LFIN",1.0),("TT","RWRA",1,"+YZ"),("LR","XZ",1)]) #missing ,"LTHM" - - nodes[18]=Mybone("hip.L", "+X","root",[],["LFWT","LBWT"],[("IK","LFWT",1.0),("IK","LBWT",0.5)]) - nodes[19]=Mybone("topleg.L","-Z","hip.L",[],["LKNE"],[("IK","LKNE",1),("TT","LTHI",1,"+YZ"),("LR","XZ",1)]) - nodes[20]=Mybone("lowleg.L","-Z","topleg.L",[],["LANK","LHEE"],[("IK","LHEE",1.0),("TT","LSHN",1,"+YZ"),("LR","XZ",1)]) - nodes[21]=Mybone("foot.L", "-Y","lowleg.L",[],["LTOE","LMT5"],[("IK","LTOE",1.0),("IK","LMT5",0.2),("TT","LMT5",1,"+YZ"),("LR","XZ",1)]) - nodes[22]=Mybone("toes.L", "-Y","foot.L",[],["LTOE"],[("IK","LTOE",1.0)]) - return nodes - -def createNodes(marker_set): # make a list of bone name, parent, edit head loc, edit tail loc, pose constraints - #ultimately, I want to read in an XML file here that specifies the node trees for various marker sets - if marker_set==HUMAN_CMU: nodes= createHumanCMU() #load up and verify the file has the CMU marker set - elif marker_set==HUMAN_CMU2: nodes= createHumanCMU() - else: nodes=[] - return nodes -def findEntry(item,list): - for i in range(len(list)): - if item==list[i]: break - debug(100,"findEtnry %s is %i in list of %i items" % (item,i,len(list))) - return i -def makeNodes(prefix, markerList, empties, marker_set): #make sure the file has the nodes selected - nodes= createNodes(marker_set) # list has generic marker names; replace them with the actual object names created - #each entry in markerlist has a corresponding entry in empties in the same order - errList=[] - for i in range(len(nodes)): - node= nodes[i] - debug(60,"Adapting node %s to prefix %s" % (node,prefix)) - - #replace generic head markers with actual empty names - for im in range(len(node.headMark)): - marker= node.headMark[im] - mark= prefix+marker - imn= findEntry(mark,markerList) - if imn < len(markerList): - debug(90,"Adapating head marker %s to %s" % (marker,empties[imn].name)) - nodes[i].headMark[im]= empties[imn].name - else: errList.append([node.name,"head location",mark,node,2]) - - #replace generic tail markers with actual empty names - for im in range(len(node.tailMark)): - marker= node.tailMark[im] - mark= prefix+marker - imn= findEntry(mark,markerList) - if imn < len(markerList): - debug(90,"Adapating marker %s to %s" % (marker,empties[imn].name)) - nodes[i].tailMark[im]= empties[imn].name - else: errList.append([node.name,"tail location",mark,node,2]) - - #replace generic constraint markers (if the constraint references a marker) with empty name - for im in range(len(node.const)): - const=node.const[im] - if const[0] in ("LOC","IK","TT"): - marker=const[1] - mark= prefix+marker - imn= findEntry(mark,markerList) - if imn < len(markerList): - debug(90,"Adapating %s constraint marker %s to %s" % (const[0],marker,empties[imn].name)) - if const[0] in ("IK","LR","LOC"): - nodes[i].const[im]=(const[0], empties[imn].name, const[2]) - else: nodes[i].const[im]=(const[0], empties[imn].name, const[2], const[3]) - else: errList.append([node.name,const[0]+" constraint",mark,node,4]) - - if errList!=[]: #we have issues. - for err in errList: - debug(0,"Bone "+err[0]+" specifies "+err[2]+" as "+err[1]+"which was not specified in file.") - #need a popup here to ignore/cleanup node tree, or add the marker(?) or abort - usrOption= 1 - if usrOption==0: #ignore this marker (remove it) - for node in nodes: #find the bone in error - if node.name==err[0]: - print "Before",node - if err[3] in range(2,3): - node[err[3]].remove(err[2]) #find the marker in error and remove it - elif err[3]==4: #find the constraint and remove it - for const in node.const: - if const[1]==err[2]: node.const.remove(const) - print "After",node - elif usrOption==1: #add these markers as static empties, and user will automate them later - #and the bones will be keyed to them, so it will all be good. - #file may have just mis-named the empty, or the location can be derived based on other markers - em= getOrCreateEmpty(err[2]) - em.layers= LAYERS_MARKER - else: abort() #abend - if DEBUG==100: status("Nodes Updated") - return nodes #nodes may be updated - -def makeBones(arm,nodes): - debug(20,"Making %i edit bones" % len(nodes)) - for node in nodes: - bone= Blender.Armature.Editbone() - bone.name= node.name - arm.bones[bone.name]= bone #add it to the armature - debug(50,"Bone added: %s" % bone) - if bone.name <> node.name: - debug(0,"ERROR: duplicate node % name specified" % node.name) - node.name= bone.name #you may not get what you asked for - if node.parent!="": #parent - debug(60,"Bone parent: %s"%node.parent) - bone.parent= arm.bones[node.parent] - bone.options = [Armature.CONNECTED] - #compute head = average of the reference empties - if node.headMark==[]: # no head explicitly stated, must be tail of parent - for parnode in nodes: - if node.parent==parnode.name: break - node.headMark= parnode.tailMark - node.head= parnode.tail - else: node.head= comp_loc(node.headMark) #node head is specified, probably only for root. - - bone.head= node.head - debug(60,"%s bone head: (%0.2f, %0.2f, %0.2f)" % (bone.name,bone.head.x, bone.head.y, bone.head.z)) - mylen=comp_len(node.headMark,node.tailMark) # length of the bone as it was recorded for that person - # for our T position, compute the bone length, add it to the head vector component to get the tail - if node.vec[0]=="-": mylen=-mylen - debug(80,"Bone vector %s length %0.2f" %(node.vec,mylen)) - node.tail= Vector(node.head) - myvec=node.vec[1].lower() - if myvec=="x": node.tail.x+=mylen - elif myvec=="y": node.tail.y+=mylen - elif myvec=="z": node.tail.z+=mylen - else: - debug(0,"%s %s %s %s" % (node.vec,myvec,node.vec[0],node.vec[1])) - error("ERROR IN BONE SPEC ") - bone.tail= node.tail - debug(60,"Bone tail: (%i,%i,%i)" %(bone.tail.x, bone.tail.y, bone.tail.z)) - #Armature created in the T postion, but with bone lengths to match the marker set and subject - #when this is constrained to the markers, the recorded action will be relative to a know Rotation - #so that all recorded actions should be interchangeable. wooot! - #Only have to adjust starting object loc when matching up actions. - return #arm #updated - -def makeConstLoc(pbone,const): - const_new= pbone.constraints.append(Constraint.Type.COPYLOC) - const_new.name = const[0]+"-"+const[1] - const_target=Blender.Object.Get(const[1]) - const_new[BCS.TARGET]= const_target - const_new.influence = const[2] - return - -def makeConstLimRot(pbone,const): - const_new= pbone.constraints.append(Constraint.Type.LIMITROT) - const_new.name = const[0]+"-"+const[1] - for axis in const[1]: - if axis.lower()=="x": const_new[BCS.LIMIT] |= BCS.LIMIT_XROT #set - if axis.lower()=="y": const_new[BCS.LIMIT] |= BCS.LIMIT_YROT #set - if axis.lower()=="z": const_new[BCS.LIMIT] |= BCS.LIMIT_ZROT #set - const_new[BCS.OWNERSPACE]= BCS.SPACE_LOCAL - const_new.influence = const[2] - # fyi, const[Constraint.Settings.LIMIT] &= ~Constraint.Settings.LIMIT_XROT #reset - return - -def makeConstIK(prefix,pbone,const): - #Blender 246 only supports one IK Solver per bone, but we might want many, - # so we need to create a reference empty named after the bone - # that floats between the markers, so the bone can point to it as a singularity - myob= getOrCreateEmpty(IK_PREFIX+prefix+pbone.name) - myob.layers= LAYERS_IK - # note that this empty gets all the IK constraints added on as location constraints - myconst= myob.constraints.append(Constraint.Type.COPYLOC) - myconst.name=const[0]+"-"+const[1] - myconst[Constraint.Settings.TARGET]= Blender.Object.Get(const[1]) - myconst.influence = const[2] - - #point the bone once to the empty via IK - success=False - for myconst in pbone.constraints: - if myconst.type == Constraint.Type.IKSOLVER: success=True - if not(success): #add an IK constraint to the bone to point to the empty - #print pbone - myconst= pbone.constraints.append(Constraint.Type.IKSOLVER) - myconst.name = const[1] - myconst[BCS.TARGET]= myob - myconst.influence = const[2] - #const_new[Constraint.Settings.BONE]= ? - myconst[BCS.CHAINLEN]= 1 - myconst[BCS.USETIP]= True - myconst[BCS.STRETCH]= False - return - -def makeConstTT(pbone,const): - myconst= pbone.constraints.append(Constraint.Type.TRACKTO) - myconst.name=const[0]+"-"+const[1] - debug(70,"%s %s" % (myconst,const[3])) - myob= getEmpty(const[1]) - if myob!= None: - myconst[BCS.TARGET]= myob - myconst.influence = const[2] - #const[3] is the Track and the thrird char is the Up indicator - myconst[BCS.TRACK]= trackto[const[3][0:2].lower()] - myconst[BCS.UP]=trackup[const[3][2].lower()]#up direction - myconst[BCS.OWNERSPACE]= BCS.SPACE_LOCAL - myconst[BCS.TARGETSPACE]= [BCS.SPACE_LOCAL] - if const[3][1]==const[3][2]: debug(0,"WARNING: Track To axis and up axis should not be the same. Constraint is INACTIVE") - else: #marker not found. could be missing from this file, or an error in node spec - error("TrackTo Constraint for %s |specifies unknown marker %s" % (pbone.name,const[1])) - return - -def makePoses(prefix,arm_ob,nodes): # pose this armature object based on node requirements - #this is constraint-based posing, not hard-keyed posing. - #we do constraint-based first so that user can adjust the constraints, possibly smooth/tweak motion - # add additional bones or referneces/constraints, before baking to hard keyframes - - pose= arm_ob.getPose() - debug(0,"Posing %s %s" % (arm_ob, pose)) - for node in nodes: - debug(30, "examining %s" %node) - if len(node.const)>0: #constraints for this bone are desired - pbone = pose.bones[node.name] - debug(40,"Posing bone %s" %pbone) - for const in node.const: - debug(50,"Constraining %s by %s" %(pbone,const)) - if const[0]=="LOC":makeConstLoc(pbone,const) - elif const[0]=="IK": makeConstIK(prefix,pbone,const) - elif const[0]=="LR": makeConstLimRot(pbone,const) - elif const[0]=="TT": makeConstTT(pbone,const) - else: - error("FATAL: constraint %s not supported" %const[0]) - break - debug(10, "Posing complete. Cycling pose and edit mode") - pose.update() - return - -def make_arm(subject,prefix,markerList, emptyList,marker_set): - debug(10,"**************************") - debug(00, "**** Making Armature for %s..." % subject) - debug(10, "**************************") - # copied from bvh import bvh_node_dict2armature; trying to use similar process for further integtration down the road - # Add the new armature, - - nodes= makeNodes(prefix, markerList, emptyList, marker_set) #assume everyone in file uses the same mocap suit - # each person in the file may be different height, so each needs their own new armature to match marker location - -## obs= Blender.Object.Get() -## success=False -## for ob in obs: -## if ob.name==subject: -## success=True -## if success: -## menu="Human Armature already exists for this subject." -## menu+="%t|Create another in this scene" -## menu+="%l|Start a new scene" -## menu+="%l|Use this armature" -## menusel= Draw.PupMenu(menu) - - arm= Blender.Armature.New(subject) #make an armature. - debug(10,"Created Armature %s" % arm) - # Put us into editmode - arm.makeEditable() - arm.drawType = Armature.OCTAHEDRON - makeBones(arm,nodes) - scn = Blender.Scene.GetCurrent() #add it to the current scene. could create new scenes here as yaf - arm_ob= scn.objects.new(arm) #instance it in the scene. this is the new way for 2.46 to instance objects - arm_ob.name= subject #name it something like the person it represents - arm_ob.layers= LAYERS_ARMOB - debug(20,"Instanced Armature %s" % arm_ob) - arm.update() #exit editmode. Arm must be instanced as an object before you can save changes or pose it - Blender.Redraw() # show the world - if DEBUG==100: status("T-Bones made.") - - makePoses(prefix,arm_ob,nodes) #constrain arm_ob with these markers - - scn.update(1) #make everyone behave themselves in the scene, and respect the new constraints - return arm_ob - -def setupAnim(StartFrame, EndFrame, VideoFrameRate): - debug(100, 'VideoFrameRate is %i' %VideoFrameRate) - if VideoFrameRate<1: VideoFrameRate=1 - if VideoFrameRate>120: VideoFrameRate=120 - # set up anim panel for them - context=scn.getRenderingContext() - context.sFrame=StartFrame - context.eFrame=EndFrame - context.fps=int(VideoFrameRate) - - Blender.Set("curframe",StartFrame) - Blender.Redraw() - return - -def makeCloud(Nmarkers,markerList,StartFrame,EndFrame,Markers): - debug(10, "**************************") - debug(00, "*** Making Cloud Formation") - debug(10, "**************************") - empties=[] - ipos=[] - curvesX=[] - curvesY=[] - curvesZ=[] - debug(0, "%i Markers (empty cloud) will be put on layers %s" % (Nmarkers,LAYERS_MARKER)) - # Empty Cloud formation - for i in range(Nmarkers): - debug(100,"%i marker %s"%(i, markerList[i])) - emptyname = markerList[i] # rdw: to use meaningful names from Points parameter - em= getOrCreateEmpty(emptyname) #in this scene - em.layers= LAYERS_MARKER - #make a list of the actual empty - empties.append(em) - #assign it an ipo with the loc xyz curves - lipo = Ipo.New("Object",em.name) - ipos.append(lipo) - curvesX.append(getOrCreateCurve(ipos[i],'LocX')) - curvesY.append(getOrCreateCurve(ipos[i],'LocY')) - curvesZ.append(getOrCreateCurve(ipos[i],'LocZ')) - empties[i].setIpo(ipos[i]) - debug(30,"Cloud of %i empties created." % len(empties)) - NvideoFrames= EndFrame-StartFrame+1 - debug(10, "**************************") - debug(00, "**** Calculating Marker Ipo Curves over %i Frames ..." % NvideoFrames) - debug(10, "**************************") - err= index=0 #number of errors, logical frame - for frame in range(StartFrame,EndFrame+1): - if index==0: start=sys.time() - elif index==100: - tmp=(NvideoFrames-100)*(sys.time()-start)/6000 - debug(0,"%i minutes process time estimated" % tmp) - elif index >100: print index*100/(NvideoFrames-1),"% complete\r", - for i in range(Nmarkers): - if Markers[index][i].z < 0: Markers[index][i].z= -Markers[index][i].z - success=True - if CLEAN: #check for good data - # C3D marker decoding may have coordinates negative (improper sign bit decoding?) - myX= abs(Markers[index][i].x) - myY= abs(Markers[index][i].y) - myZ= Markers[index][i].z - if myX > 10000 or myY > 10000 or myZ > 10000: success=False - if myX <.01 and myY <.01 and myZ <.01: success=False # discontinuity in marker tracking (lost marker) - - if success: - curvesX[i].append((frame, Markers[index][i].x)) #2.46 knot method - curvesY[i].append((frame, Markers[index][i].y)) - curvesZ[i].append((frame, Markers[index][i].z)) - if frame==StartFrame: debug(40, "%s loc frame %i: (%0.2f, %0.2f, %0.2f)" % (markerList[i],frame,Markers[index][i].x,Markers[index][i].y,Markers[index][i].z)) - else: - err+=1 # some files have thousands... - #debug(30,"Point ignored for marker:%s frame %i: (%i, %i, %i)" % (markerList[i],frame,Markers[index][i].x,Markers[index][i].y,Markers[index][i].z)) - index += 1 - debug(70, "%i points ignored across all markers and frames. Recalculating..." % err) - - for i in range(Nmarkers): - curvesX[i].Recalc() - curvesY[i].Recalc() - curvesZ[i].Recalc() - Blender.Set('curframe', StartFrame) - Blender.Redraw() - if DEBUG==100: status("Clound formed") - return empties - -def getNumber(str, length): - if length==2: # unsigned short - return struct.unpack('H',str[0:2])[0], str[2:] - sum = 0 - for i in range(length): - #sum = (sum << 8) + ord(str[i]) for big endian - sum = sum + ord(str[i])*(2**(8*i)) - return sum, str[length:] -def unpackFloat(chunk,proctype): - #print proctype - myvar=chunk[0:4] - if proctype==2: #DEC-VAX - myvar=chunk[2:4]+chunk[0:2] #swap lo=hi word order pair - return struct.unpack('f',myvar[0:4])[0] - -def getFloat(chunk,proctype): - return unpackFloat(chunk, proctype), chunk[4:] -def parseFloat(chunk,ptr,proctype): - return unpackFloat(chunk[ptr:ptr+4], proctype), ptr+4 - - -def load_c3d(FullFileName): -# Input: FullFileName - file (including path) to be read -# -# Variable: -# Markers 3D-marker data [Nmarkers x NvideoFrames x Ndim(=3)] -# VideoFrameRate Frames/sec -# AnalogSignals Analog signals [Nsignals x NanalogSamples ] -# AnalogFrameRate Samples/sec -# Event Event(Nevents).time ..value ..name -# ParameterGroup ParameterGroup(Ngroups).Parameters(Nparameters).data ..etc. -# CameraInfo MarkerRelated CameraInfo [Nmarkers x NvideoFrames] -# ResidualError MarkerRelated ErrorInfo [Nmarkers x NvideoFrames] - - Markers=[]; - VideoFrameRate=120; - AnalogSignals=[]; - AnalogFrameRate=0; - Event=[]; - ParameterGroups=[]; - CameraInfo=[]; - ResidualError=[]; - - debug(10, "*********************") - debug(10, "**** Opening File ***") - debug(10, "*********************") - - #ind=findstr(FullFileName,'\'); - #if ind>0, FileName=FullFileName(ind(length(ind))+1:length(FullFileName)); else FileName=FullFileName; end - debug(0, "FileName = " + FullFileName) - fid=open(FullFileName,'rb'); # native format (PC-intel). ideasman says maybe rU - content = fid.read(); - content_memory = content - #Header section - NrecordFirstParameterblock, content = getNumber(content,1) # Reading record number of parameter section - - key, content = getNumber(content,1) - if key!=80: - error('File: does not comply to the C3D format') - fid.close() - return - #Paramter section - content = content[512*(NrecordFirstParameterblock-1)+1:] # first word ignored - #file format spec says that 3rd byte=NumberofParmaterRecords... but is ignored here. - proctype,content =getNumber(content,1) - proctype = proctype-83 - proctypes= ["unknown","(INTEL-PC)","(DEC-VAX)","(MIPS-SUN/SGI)"] - - if proctype in (1,2): debug(0, "Processor coding %s"%proctypes[proctype]) - elif proctype==3: debug(0,"Program untested with %s"%proctypes[proctype]) - else: - debug(0, "INVALID processor type %i"%proctype) - proctype=1 - debug(0,"OVERRIDE processor type %i"%proctype) - - #if proctype==2, - # fclose(fid); - # fid=fopen(FullFileName,'r','d'); % DEC VAX D floating point and VAX ordering - #end - debug(10, "***********************") - debug(00, "**** Reading Header ***") - debug(10, "***********************") - - # ############################################### - # ## ## - # ## read header ## - # ## ## - # ############################################### - - #%NrecordFirstParameterblock=fread(fid,1,'int8'); % Reading record number of parameter section - #%key1=fread(fid,1,'int8'); % key = 80; - - content = content_memory - #fseek(fid,2,'bof'); - content = content[2:] - - # - Nmarkers, content=getNumber(content, 2) - NanalogSamplesPerVideoFrame, content = getNumber(content, 2) - StartFrame, content = getNumber(content, 2) - EndFrame, content = getNumber(content, 2) - MaxInterpolationGap, content = getNumber(content, 2) - - Scale, content = getFloat(content,proctype) - - NrecordDataBlock, content = getNumber(content, 2) - NanalogFramesPerVideoFrame, content = getNumber(content, 2) - - if NanalogFramesPerVideoFrame > 0: - NanalogChannels=NanalogSamplesPerVideoFrame/NanalogFramesPerVideoFrame - else: - NanalogChannels=0 - - VideoFrameRate, content = getFloat(content,proctype) - - AnalogFrameRate=VideoFrameRate*NanalogFramesPerVideoFrame - NvideoFrames = EndFrame - StartFrame + 1 - - debug(0, "Scale= %0.2f" %Scale) - debug(0, "NanalogFramesPerVideoFrame= %i" %NanalogFramesPerVideoFrame) - debug(0, "Video Frame Rate= %i" %VideoFrameRate) - debug(0, "AnalogFrame Rate= %i"%AnalogFrameRate) - debug(0, "# markers= %i" %Nmarkers) - debug(0, "StartFrame= %i" %StartFrame) - debug(0, "EndFrame= %i" %EndFrame) - debug(0, "# Video Frames= %i" %NvideoFrames) - - if Scale>0: - debug(0, "Marker data is in integer format") - if Scale>(XYZ_LIMIT/32767): - Scale=XYZ_LIMIT/32767.0 - debug(0, "OVERRIDE: Max coordinate is %i, Scale changed to %0.2f" % (XYZ_LIMIT,Scale)) - else: debug(0, "Marker data is in floating point format") - if VideoFrameRate<1 or VideoFrameRate>120: - VideoFrameRate= 120 - debug(0, "OVERRIDE Video Frame Rate= %i" %VideoFrameRate) - if proctype not in (1,2): # Intel, DEC are known good - debug(0, "OVERRIDE|Program not tested with this encoding. Set to Intel") - proctype= 1 - - debug(10, "***********************") - debug(10, "**** Reading Events ...") - debug(10, "***********************") - - content = content_memory - content = content[298:] #bizarre .. ce devrait être 150 selon la doc rdw skips first 299 bytes? - - EventIndicator, content = getNumber(content, 2) - EventTime=[] - EventValue=[] - EventName=[] - - debug(0, "Event Indicator = %i" %EventIndicator) - if EventIndicator==12345: #rdw: somehow, this original code seems fishy, but I cannot deny it. - Nevents, content = getNumber(content, 2) - debug(0, "Nevents= %i" %Nevents) - content = content[2:] - if Nevents>0: - for i in range(Nevents): - letime, content = getFloat(content,proctype) - EventTime.append(letime) - content = content_memory - content = content[188*2:] - for i in range(Nevents): - lavalue, content = getNumber(content, 1) - EventValue.append(lavalue) - content = content_memory - content = content[198*2:] - for i in range(Nevents): - lenom = content[0:4] - content = content[4:] - EventName.append(lenom) - - debug(00, "***************************") - debug(00, "**** Reading Parameters ...") - debug(10, "***************************") - subjects=[] # a name would be nice, but human will do - prefixes=[] # added on to mocap marker names, one for each subject - marker_subjects = [] # hopefully will be specified in the file and known to this program - markerList=[] - ParameterGroups = [] - ParameterNumberIndex = [] - - content = content_memory - content = content[512*(NrecordFirstParameterblock-1):] - - dat1, content = getNumber(content, 1) - key2, content = getNumber(content, 1) - - NparameterRecords, content = getNumber(content, 1) - debug(100, "NparameterRecords=%i"%NparameterRecords) - proctype,content =getNumber(content,1) - proctype = proctype-83 # proctype: 1(INTEL-PC); 2(DEC-VAX); 3(MIPS-SUN/SGI) - - for i in range(NparameterRecords): - leparam = ParameterGroup(None, None, []) - ParameterGroups.append(leparam) - ParameterNumberIndex.append(0) - # - Ncharacters, content = getNumber(content, 1) - if Ncharacters>=128: - Ncharacters = -(2**8)+(Ncharacters) - GroupNumber, content = getNumber(content, 1) - if GroupNumber>=128: - GroupNumber = -(2**8)+(GroupNumber) - debug(80,"GroupNumber = %i, Nchar=%i" %(GroupNumber,Ncharacters)) - - while Ncharacters > 0: - if GroupNumber<0: - GroupNumber=abs(GroupNumber) - GroupName = content[0:Ncharacters] - content = content[Ncharacters:] - #print "Group Number = ", GroupNumber - ParameterGroups[GroupNumber].name = GroupName - #print "ParameterGroupName =", GroupName - offset, content = getNumber(content, 2) - deschars, content = getNumber(content, 1) - GroupDescription = content[0:deschars] - content = content[deschars:] - ParameterGroups[GroupNumber].description = GroupDescription - # - ParameterNumberIndex[GroupNumber]=0 - content = content[offset-3-deschars:] - else: - - ParameterNumberIndex[GroupNumber]=ParameterNumberIndex[GroupNumber]+1 - ParameterNumber=ParameterNumberIndex[GroupNumber] - #print "ParameterNumber=", ParameterNumber - ParameterGroups[GroupNumber].parameter.append(Parameter(None, None, [], [], None)) - ParameterName = content[0:Ncharacters] - content = content[Ncharacters:] - #print "ParameterName = ",ParameterName - if len(ParameterName)>0: - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].name=ParameterName - offset, content = getNumber(content, 2) - filepos = len(content_memory)-len(content) - nextrec = filepos+offset-2 - - type, content=getNumber(content, 1) - if type>=128: - type = -(2**8)+type - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].type=type - - dimnum, content=getNumber(content, 1) - if dimnum == 0: - datalength = abs(type) - else: - mult=1 - dimension=[] - for j in range (dimnum): - ladim, content = getNumber(content, 1) - dimension.append(ladim) - mult=mult*dimension[j] - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].dim.append(dimension[j]) - datalength = abs(type)*mult - - #print "ParameterNumber = ", ParameterNumber, " Group Number = ", GroupNumber - - if type==-1: - data = "" - wordlength=dimension[0] - if dimnum==2 and datalength>0: - for j in range(dimension[1]): - data=string.rstrip(content[0:wordlength]) - content = content[wordlength:] - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data.append(data) - elif dimnum==1 and datalength>0: - data=content[0:wordlength] - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data.append(data) # ??? - - myParam=string.rstrip(ParameterName) - myGroup=string.rstrip(GroupName) - msg= "-%s-%s-" % (myGroup,myParam) - if myGroup == "POINT": - if myParam== "LABELS": - # named in form of subject:marker. - # the list "empties" is a corresponding list of actual empty object names that make up the cloud - markerList= ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - debug(0, "%sLABELS = %i %s" %(msg, len(markerList),markerList)) #list of logical markers from 0 to n corresponding to points - elif myParam== "LABELS2": #more labels - # named in form of subject:marker. - # the list "empties" is a corresponding list of actual empty object names that make up the cloud - momarkList= ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - markerList+=momarkList - debug(0, "%sLABELS2 = %i %s" %(msg, len(momarkList),momarkList)) #list of logical markers from 0 to n corresponding to points - else: debug(70, "%s UNUSED = %s" %(msg,ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data)) - elif myGroup in ["SUBJECT", "SUBJECTS"]: #info about the actor - if myParam in ["NAME", "NAMES"]: - subjects= ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - debug(0, "%sNames of Subjects = %s" %(msg, subjects)) # might be useful in naming armatures - for i in range(len(subjects)): - subjects[i]=subjects[i].rstrip() - if subjects[i]=="": subjects[i]="Human" - elif myParam == "LABEL_PREFIXES": - prefixes = ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - debug(0, "%sMarker Prefixes = %s" %(msg, prefixes)) # to xlate marker name to that in file - for i in range(len(prefixes)): - prefixes[i]=prefixes[i].rstrip() - elif myParam== "MARKER_SETS": - marker_subjects= ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - debug(0, "%sMarker Set = %s"%(msg, marker_subjects)) # marker set that each subject was wearing - elif myParam== "MODEL_PARAM": - action= ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - debug(0, "%sModel Paramter = %s"%(msg,action)) # might be a good name for the blender scene - elif myParam== "LABELS": - # named in form of subject:marker. - # the list "empties" is a corresponding list of actual empty object names that make up the cloud - markerList= ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - debug(0, "%sLABELS = %i %s"%(msg, len(markerList),markerList)) #list of logical markers from 0 to n corresponding to points - else: debug(70, "%sUNUSED = %s"%(msg, ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data)) - else: - debug(70, "%sUNUSED = %s"%(msg, ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data)) - elif type == 1: - debug(100,"Block type %i is largely unsupported and untested."%type) - data = [] - Nparameters=datalength/abs(type) - debug(100, "Nparameters=%i"%Nparameters) - for i in range(Nparameters): - ladata,content = getNumber(content, 1) - data.append(ladata) - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data - #print ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - - #print "type boolean" - elif type == 2 and datalength>0: - debug(100,"Block type %i is largely unsupported and untested."%type) - data = [] - Nparameters=datalength/abs(type) - debug(100, "Nparameters=%i"%Nparameters) - for i in range(Nparameters): - ladata,content = getNumber(content, 2) - data.append(ladata) - #ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data - if dimnum>1: - #???? print "arg je comprends pas" - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data - #???ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=reshape(data,dimension) - else: - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data - #print ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - #pass - #print "type integer" - elif type == 4 and datalength>0: - debug(100,"Block type %i is largely unsupported and untested."%type) - data = [] - Nparameters=datalength/abs(type) - debug(100, "Nparameters=%i"%Nparameters) - for i in range(Nparameters): - ladata,content = getFloat(content,proctype) - data.append(ladata) - if dimnum>1: - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data - #print "arg je comprends pas" - #???ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=reshape(data,dimension) - else: - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data=data - #print ParameterGroups[GroupNumber].parameter[ParameterNumber-1].data - else: - debug(100,"Block type %i is largely unsupported and untested."%type) - #print "error" - pass - deschars, content= getNumber(content, 1) - if deschars>0: - description = content[0:deschars] - content = content[deschars:] - ParameterGroups[GroupNumber].parameter[ParameterNumber-1].description=description - - content = content_memory - content = content[nextrec:] - - Ncharacters,content = getNumber(content, 1) - if Ncharacters>=128: - Ncharacters = -(2**8)+(Ncharacters) - GroupNumber,content = getNumber(content, 1) - if GroupNumber>=128: - GroupNumber = -(2**8)+(GroupNumber) - debug(80,"GroupNumber = %i, Nchar=%i" %(GroupNumber,Ncharacters)) - - debug(00, "***************************") - debug(00, "**** Examining Parameters ...") - debug(10, "***************************") - - if len(subjects)==0: subjects=["Test"] #well, somebody got mocapped! - for i in range(0, len(subjects)-len(prefixes)): prefixes.append("") - for i in range(0, len(subjects)-len(marker_subjects)): marker_subjects.append(subjects[i]) - - #make a markerlist if they didn't - debug(0, "%i Markers specified, %i marker names supplied" %(Nmarkers,len(markerList))) - if len(markerList)==0: - debug(0, "File missing any POINT LABELS marker list. Making defaults") - #I guess just make cloud of empty.xxx - if len(markerList)XYZ_LIMIT or abs(myy)>XYZ_LIMIT or abs(myz)>XYZ_LIMIT: - err+=1 - if err>100: - debug(0, "Warning: 100 data points for markers seem way out there") - debug(0, "data read: (%i, %i, %i)" %(x,y,z)) - debug(0, "Consider revising Scale %0.2f" % Scale) - debug(0, "which now givs coordinates: (%i, %i, %i)" %(x*Scale,y*Scale,z*Scale)) - err=-0 - if abs(myx)>XYZ_LIMIT: myx= XYZ_LIMIT*myx/abs(myx) #preserve sign - if abs(myy)>XYZ_LIMIT: myy= XYZ_LIMIT*myy/abs(myy) #preserve sign - if abs(myz)>XYZ_LIMIT: myz= XYZ_LIMIT*myz/abs(myz) #preserve sign - Markers[i][j].x = myx - Markers[i][j].y = myy - Markers[i][j].z = myz - - a,ptr_read = parseFloat(content, ptr_read, proctype) - a = int(a) - highbyte = int(a/256) - lowbyte=a-highbyte*256 - CameraInfo[i][j] = highbyte - ResidualError[i][j] = lowbyte*abs(Scale) - #Monitor marker location to ensure data block is being parsed properly - if j==0: debug(90,"Frame %i loc of %s: (%i, %i, %i)" % (i,markerList[j],myx,myy,myz)) - if i==0: debug(50, "Initial loc of %s: (%i, %i, %i)" % (markerList[j],myx,myy,myz)) - - ptr_read+=residuals #skip over the following - #for j in range (NanalogFramesPerVideoFrame): - # for k in range(NanalogChannels): - # val, content = getNumber(content, 2) - # AnalogSignals[j+NanalogFramesPerVideoFrame*(i)][k]=val #??? i-1 - #else - # for i=1:NvideoFrames - # for j=1:Nmarkers - # Markers(i,j,1:3)=fread(fid,3,'int16')'.*Scale; - # ResidualError(i,j)=fread(fid,1,'int8'); - # CameraInfo(i,j)=fread(fid,1,'int8'); - # end - # waitbar(i/NvideoFrames) - # for j=1:NanalogFramesPerVideoFrame, - # AnalogSignals(j+NanalogFramesPerVideoFrame*(i-1),1:NanalogChannels)=... - # fread(fid,NanalogChannels,'int16')'; - # end - # end - #end - - else: #Scale is positive, but should be <1 to scale down, like 0.05 - two16= -2**16 - if len(content) < NvideoFrames*(Nmarkers*(6+2)+residuals): - error("%i bytes is not enough data for |%i frames|%i markers|%i residual" %(len(content),NvideoFrames,Nmarkers,residuals)) - #Note: I really tried to optimize this loop, since it was taking hours to process - for i in range(NvideoFrames): - if i==0: start=sys.time() - elif i==10: - tmp=(sys.time()-start)*NvideoFrames/600 - debug(0,"%i minutes remaining..." % tmp) - else: print "%i percent complete. On Frame %i Points procesed: %i\r" % (i*100/NvideoFrames,i,i*Nmarkers), - - for j in range(Nmarkers): - #x, content = getNumber(content,2) - # this is old skool signed int, not but not a short. - x = ord(content[ptr_read+0]) + (ord(content[ptr_read+1])<<8) - if x>32768: x+=two16 - y = ord(content[ptr_read+2]) + (ord(content[ptr_read+3])<<8) - if y>32768: y+=two16 - z = ord(content[ptr_read+4]) + (ord(content[ptr_read+5])<<8) - if z>32768: z+=two16 - -## -## x = ord(content[ptr_read]) + ord(content[ptr_read+1])*(2**8) -## ptr_read+=2 -## if x > 32768: -## x=-(2**16)+(x) -## #y, content = getNumber(content,2) -## y = ord(content[ptr_read]) + ord(content[ptr_read+1])*(2**8) -## ptr_read+=2 -## if y > 32768: -## y=-(2**16)+(y) -## #z, content = getNumber(content,2) -## z = ord(content[ptr_read]) + ord(content[ptr_read+1])*(2**8) -## ptr_read+=2 -## if z > 32768: -## z=-(2**16)+(z) -## -## print "(%i=%i, %i=%i, %i=%i)" %(x,myx,y,myy,z,myz) - - # for integers, I changed Scale above to avoid getting impossible numbers - Markers[i][j].x = x*Scale - Markers[i][j].y = y*Scale - Markers[i][j].z = z*Scale - -## ResidualError[i][j], content = getNumber(content, 1) -## CameraInfo[i][j], content = getNumber(content, 1) - #try to improve performance by: - ResidualError[i][j]= ord(content[ptr_read+6]) - CameraInfo[i][j]= ord(content[ptr_read+7]) - - content= content[ptr_read+8:] - ptr_read=0 - - if j==0: debug(100,"Frame %i loc of %s: %s" % (i,markerList[j],Markers[i][j])) - if i==0: debug(50, "Initial loc of %s: (%s)" % (markerList[j],Markers[i][j])) - - #for j in range (NanalogFramesPerVideoFrame): - # for k in range(NanalogChannels): - # val, content = getNumber(content, 2) - #AnalogSignals(j+NanalogFramesPerVideoFrame*(i-1),1:NanalogChannels)=val - ptr_read= residuals # skip over the above - print "\ndone with file." - fid.close() - - cloud= makeCloud(Nmarkers,markerList,StartFrame,EndFrame,Markers) - - setupAnim(StartFrame, EndFrame,VideoFrameRate) - - debug(10, "**************************") - debug(00, "**** Making %i Armatures" % len(subjects)) - debug(10, "**************************") - for i in range(len(subjects)): - marker_set= marker_subjects[i] - success=False - if len(marker_set)>0: - for trymark in MARKER_SETS: - if trymark[0:len(marker_set)]==marker_set: - marker_set=trymark - success=True - if success: - debug(0, "Armature for %s will be put on layers %s" % (subjects[i],LAYERS_ARMOB)) - debug(0, " based on an markers beginning with %s" % prefixes[i]) - ob= make_arm(subjects[i],prefixes[i],markerList,cloud,marker_set) - else: - debug(00, "Presently, this program can automatically create a constrained armature for marker sets %s" % MARKER_SETS) - debug(00, "%s uses an unknown marker set %s" % (subjects[i],marker_set)) - debug(10, "Have a nice day! If you figure out an armature node system for this cloud, please add it to the program.") - - debug(10, "**************************") - debug(00, "**** Conclusion") - minmax=[0,0,0,0,0,0] - for i in range(NvideoFrames): - for j in range(Nmarkers): - if minmax[0]>Markers[i][j].x: minmax[0]=Markers[i][j].x - if minmax[1]>Markers[i][j].y: minmax[1]=Markers[i][j].y - if minmax[2]>Markers[i][j].z: minmax[2]=Markers[i][j].z - if minmax[3] - - a camera called "10" will become active at frame 10.
- - a camera called "10,25,185" will become active at frames 10, 25 and 185. - -Notes:
- - This script creates another script named camera.py, which is linked to the current scene.
- - If there is already a text called "camera.py", but it's from an old version or is not recognized, -you can choose if you want to rename or overwrite it. - - Script inspired by Jean-Michel (jms) Soler's:
- http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_changerdecamera.htm -""" - - -# $Id$ -# -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004-2005: Regis Montoya -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -#Script inspired of the idea of this one : -#http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_changerdecamera.htm -# -#---------------------------------------------- -# R3gis Montoya (3R) -# -# Pout tout probleme a: -# cybercreator@free.fr -# --------------------------------------------- - -import Blender -from Blender import * -import string - -header = '# camera.py 1.3 scriptlink' - -camera_change_scriptlink = header + \ -''' -import Blender -def main(): - scn = Blender.Scene.GetCurrent() - frame = str(Blender.Get('curframe')) - - # change the camera if it has the current frame - for ob_cam in [ob for ob in scn.objects if ob.type == 'Camera']: - for number in ob_cam.name.split(','): - if number == frame: - scn.setCurrentCamera(ob_cam) - return -main() -''' - -def main(): - - # Get the text - try: cam_text = Blender.Text.Get('camera.py') - except: cam_text = None - - if cam_text: - if cam_text.asLines()[0] != header: - ret = Blender.Draw.PupMenu("WARNING: An old camera.py exists%t|Overwrite|Rename old version text") - if ret == -1: return # EXIT DO NOTHING - elif ret == 1: Text.unlink(cam_text) - elif ret == 2: cam_text.name = 'old_camera.txt' - cam_text = None - - if not cam_text: - scripting=Blender.Text.New('camera.py') - scripting.write(camera_change_scriptlink) - - scn=Scene.GetCurrent() - scriptlinks = scn.getScriptLinks('FrameChanged') - if not scriptlinks or ('camera.py' not in scriptlinks): - scn.addScriptLink('camera.py','FrameChanged') - Blender.Draw.PupMenu('FrameChange Scriptlink Added%t|Name camera objects to their activation frame numbers(s) seperated by commas|valid names are "1,10,46" or "1,10,200" or "200" (without quotation marks)') - Blender.Window.RedrawAll() - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/release/scripts/config.py b/release/scripts/config.py deleted file mode 100644 index cbf8e272b91..00000000000 --- a/release/scripts/config.py +++ /dev/null @@ -1,801 +0,0 @@ -#!BPY - -""" -Name: 'Scripts Config Editor' -Blender: 236 -Group: 'System' -Tooltip: 'View and edit available scripts configuration data' -""" - -__author__ = "Willian P. Germano" -__version__ = "0.1 2005/04/14" -__email__ = ('scripts', 'Author, wgermano:ig*com*br') -__url__ = ('blender', 'blenderartists.org') - -__bpydoc__ ="""\ -This script can be used to view and edit configuration data stored -by other scripts. - -Technical: this data is saved as dictionary keys with the -Blender.Registry module functions. It is persistent while Blender is -running and, if the script's author chose to, is also saved to a file -in the scripts config data dir. - -Usage: - -- Start Screen: - -To access any available key, select it from (one of) the menu(s). - -Hotkeys:
- ESC or Q: [Q]uit
- H: [H]elp - -- Keys Config Screen: - -This screen exposes the configuration data for the chosen script key. If the -buttons don't fit completely on the screen, you can scroll up or down with -arrow keys or a mouse wheel. Leave the mouse pointer over any button to get -a tooltip about that option. - -Any change can be reverted -- unless you have already applied it. - -If the key is already stored in a config file, there will be a toggle button -(called 'file') that controls whether the changes will be written back to -the file or not. If you just want to change the configuration for the current -session, simply unset that button. Note, though, that data from files has -precedence over those keys already loaded in Blender, so if you re-run this -config editor, unsaved changes will not be seen. - -Hotkeys:
- ESC: back to Start Screen
- Q: [Q]uit
- U: [U]ndo changes
- ENTER: apply changes (can't be reverted, then)
- UP, DOWN Arrows and mouse wheel: scroll text up / down - -Notes: - -a) Available keys are determined by which scripts you use. If the key you -expect isn't available (or maybe there are none or too few keys), either the -related script doesn't need or still doesn't support this feature or the key -has not been stored yet, in which case you just need to run that script once -to make its config data available. - -b) There are two places where config data files can be saved: the -bpydata/config/ dir (1) inside the default scripts dir or (2) inside the user -defined Python scripts dir -(User Preferences window -> File Paths tab -> Python path). If available, -(2) is the default and also the recommended option, because then fresh Blender -installations won't delete your config data. To use this option, simply set a -dir for Python scripts at the User Preferences window and make sure this dir -has the subdirs bpydata/ and bpydata/config/ inside it. - -c) The key called "General" in the "Other" menu has general config options. -All scripts where that data is relevant are recommended to access it and set -behaviors accordingly. -""" - -# $Id$ -# -# -------------------------------------------------------------------------- -# config.py version 0.1 2005/04/08 -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004: Willian P. Germano, wgermano _at_ ig.com.br -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender import Draw, BGL, Registry, Window, sys as bsys -from Blender.Window import Theme -from BPyRegistry import LoadConfigData, SaveConfigData, HasConfigData,\ - BPY_KEY_IN_FILE - -MAX_STR_LEN = 300 # max length for a string -MAX_ITEMS_NUM = 100 # max number for each type of button - -# --- -# The "General" configure options key is managed from this script. -verbose = True -confirm_overwrite = True - -tooltips = { - 'verbose': 'print script messages (info, warnings, errors) to the console', - 'confirm_overwrite': 'scripts should always confirm before overwriting files' -} - -CFG_LIST = ['verbose', 'confirm_overwrite', 'tooltips'] -KEY_NAME = 'General' - -def update_registry(): - rd = {} - for var in CFG_LIST: - exec("rd['%s']=%s" % (var, var)) - Registry.SetKey(KEY_NAME, rd, True) - -rd = Registry.GetKey('General', True) -if rd: - try: - for var in CFG_LIST[:-1]: # no need to update tooltips - exec("%s=rd['%s']" % (var, var)) - except: update_registry() - -else: - update_registry() -# --- - -# script globals: -CFGKEY = '' -LABELS = [] -GD = {} # groups dict (includes "Other" for unmapped keys) -INDEX = 0 # to pass button indices to fs callbacks -FREEKEY_IDX = 0 # index of set of keys not mapped to a script name -KEYMENUS = [] -ALL_SCRIPTS = {} -ALL_GROUPS = [] -START_SCREEN = 0 -CONFIG_SCREEN = 1 -DISK_UPDATE = True # write changed data to its config file - -ACCEPTED_TYPES = [bool, int, float, str, unicode] - -SCREEN = START_SCREEN - -SCROLL_DOWN = 0 - -# events: -BEVT_START = 50 -BEVT_EXIT = 0 + BEVT_START -BEVT_BACK = 1 + BEVT_START -BEVT_DISK = 2 + BEVT_START -BEVT_CANCEL = 3 + BEVT_START -BEVT_APPLY = 4 + BEVT_START -BEVT_HELP = 5 + BEVT_START -BEVT_DEL = 6 + BEVT_START -BEVT_KEYMENU = [] -BUT_KEYMENU = [] -BEVT_BOOL = 100 -BEVT_INT = BEVT_BOOL + MAX_ITEMS_NUM -BEVT_FLOAT = BEVT_BOOL + 2*MAX_ITEMS_NUM -BEVT_STR = BEVT_BOOL + 3*MAX_ITEMS_NUM -BEVT_BROWSEDIR = BEVT_BOOL + 4*MAX_ITEMS_NUM -BEVT_BROWSEFILE = BEVT_BOOL + 5*MAX_ITEMS_NUM -BUT_TYPES = { - bool: 0, - int: 0, - float: 0, - str: 0 -} - -# Function definitions: - -def get_keys(): - LoadConfigData() # loads all data from files in (u)scripts/bpydata/config/ - return [k for k in Registry.Keys() if k[0] != "_"] - - -def show_help(script = 'config.py'): - Blender.ShowHelp(script) - - -def fs_dir_callback(pathname): - global CFGKEY, INDEX - - pathname = bsys.dirname(pathname) - datatypes = CFGKEY.sorteddata - datatypes[str][INDEX][1] = pathname - - -def fs_file_callback(pathname): - global CFGKEY, INDEX - - datatypes = CFGKEY.sorteddata - datatypes[str][INDEX][1] = pathname - - -# parse Bpymenus file to get all script filenames -# (used to show help for a given key) -def fill_scripts_dict(): - global ALL_SCRIPTS, ALL_GROUPS - - group = '' - group_len = 0 - sep = bsys.sep - home = Blender.Get('homedir') - if not home: - errmsg = """ -Can't find Blender's home dir and so can't find the -Bpymenus file automatically stored inside it, which -is needed by this script. Please run the -Help -> System -> System Information script to get -information about how to fix this. -""" - raise SystemError, errmsg - fname = bsys.join(home, 'Bpymenus') - if not bsys.exists(fname): return False - f = file(fname, 'r') - lines = f.readlines() - f.close() - for l in lines: - if l.rfind('{') > 0: - group = l.split()[0] - ALL_GROUPS.append(group) - group_len += 1 - continue - elif l[0] != "'": continue - fields = l.split("'") - if len(fields) > 2: - menuname = fields[1].replace('...','') - fields = fields[2].split() - if len(fields) > 1: - fname = fields[1].split(sep)[-1] - i = 1 - while not fname.endswith('.py'): - i += 1 - fname = "%s %s" % (fname, fields[i]) - ALL_SCRIPTS[fname] = (menuname, group_len - 1) - return True - - -def map_to_registered_script(name): - global ALL_SCRIPTS - - if not name.endswith('.py'): - name = "%s.py" % name - if ALL_SCRIPTS.has_key(name): - return ALL_SCRIPTS[name] # == (menuname, group index) - return None - - -def reset(): - global LABELS, GD, KEYMENUS, KEYS - - # init_data is recalled when a key is deleted, so: - LABELS = [] - GD = {} - KEYMENUS = [] - KEYS = get_keys() - - -# gather all script info, fill gui menus -def init_data(): - global KEYS, GD, ALL_GROUPS, ALL_SCRIPTS, KEYMENUS, LABELS - global BUT_KEYMENU, BEVT_KEYMENU, FREEKEY_IDX - - for k in ALL_GROUPS: - GD[k] = [] - GD[None] = [] - - for k in KEYS: - res = map_to_registered_script(k) - if res: - GD[ALL_GROUPS[res[1]]].append((k, res[0])) - else: GD[None].append((k, k)) - - for k in GD.keys(): - if not GD[k]: GD.pop(k) - - if GD.has_key(None): - GD['Other'] = GD[None] - GD.pop(None) - FREEKEY_IDX = -1 - - BUT_KEYMENU = range(len(GD)) - - for k in GD.keys(): - kmenu = ['Configuration Keys: %s%%t' % k] - for j in GD[k]: - kmenu.append(j[1]) - kmenu = "|".join(kmenu) - KEYMENUS.append(kmenu) - LABELS.append(k) - - if FREEKEY_IDX < 0: - FREEKEY_IDX = LABELS.index('Other') - - length = len(KEYMENUS) - BEVT_KEYMENU = range(1, length + 1) - BUT_KEYMENU = range(length) - - -# for theme colors: -def float_colors(cols): - return map(lambda x: x / 255.0, cols) - - - -class Config: - - def __init__(self, key, has_group = True): - global DISK_UPDATE - - self.key = key - self.has_group = has_group - self.name = key - self.fromdisk = HasConfigData(key) & BPY_KEY_IN_FILE - if not self.fromdisk: DISK_UPDATE = False - else: DISK_UPDATE = True - - self.origdata = Registry.GetKey(key, True) - data = self.data = self.origdata.copy() - - if not data: - Draw.PupMenu('ERROR: couldn\'t find requested data') - self.data = None - return - - keys = data.keys() - nd = {} - for k in keys: - nd[k.lower()] = k - - if nd.has_key('tooltips'): - ndval = nd['tooltips'] - self.tips = data[ndval] - data.pop(ndval) - else: self.tips = 0 - - if nd.has_key('limits'): - ndval = nd['limits'] - self.limits = data[ndval] - data.pop(ndval) - else: self.limits = 0 - - if self.has_group: - scriptname = key - if not scriptname.endswith('.py'): - scriptname = "%s.py" % scriptname - elif nd.has_key('script'): - ndval = nd['script'] - scriptname = data[ndval] - data.pop(ndval) - if not scriptname.endswith('.py'): - scriptname = "%s.py" % scriptname - else: scriptname = None - - self.scriptname = scriptname - - self.sort() - - - def needs_update(self): # check if user changed data - data = self.data - new = self.sorteddata - - for vartype in new.keys(): - for i in new[vartype]: - if data[i[0]] != i[1]: return 1 - - return 0 # no changes - - - def update(self): # update original key - global DISK_UPDATE - - data = self.data - odata = self.origdata - new = self.sorteddata - for vartype in new.keys(): - for i in new[vartype]: - if data[i[0]] != i[1]: data[i[0]] = i[1] - if odata[i[0]] != i[1]: odata[i[0]] = i[1] - - if DISK_UPDATE: Registry.SetKey(self.key, odata, True) - - def delete(self): - global DISK_UPDATE - - delmsg = 'OK?%t|Delete key from memory' - if DISK_UPDATE: - delmsg = "%s and from disk" % delmsg - if Draw.PupMenu(delmsg) == 1: - Registry.RemoveKey(self.key, DISK_UPDATE) - return True - - return False - - - def revert(self): # revert to original key - data = self.data - new = self.sorteddata - for vartype in new.keys(): - for i in new[vartype]: - if data[i[0]] != i[1]: i[1] = data[i[0]] - - - def sort(self): # create a new dict with types as keys - global ACCEPTED_TYPES, BUT_TYPES - - data = self.data - datatypes = {} - keys = [k for k in data.keys() if k[0] != '_'] - for k in keys: - val = data[k] - tval = type(val) - if tval not in ACCEPTED_TYPES: continue - if not datatypes.has_key(tval): - datatypes[tval] = [] - datatypes[type(val)].append([k, val]) - if datatypes.has_key(unicode): - if not datatypes.has_key(str): datatypes[str] = datatypes[unicode] - else: - for i in datatypes[unicode]: datatypes[str].append(i) - datatypes.pop(unicode) - for k in datatypes.keys(): - dk = datatypes[k] - dk.sort() - dk.reverse() - BUT_TYPES[k] = range(len(dk)) - self.sorteddata = datatypes - - -# GUI: - -# gui callbacks: - -def gui(): # drawing the screen - - global SCREEN, START_SCREEN, CONFIG_SCREEN, KEYMENUS, LABELS - global BEVT_KEYMENU, BUT_KEYMENU, CFGKEY - global BUT_TYPES, SCROLL_DOWN, VARS_NUM - - WIDTH, HEIGHT = Window.GetAreaSize() - - theme = Theme.Get()[0] - tui = theme.get('ui') - ttxt = theme.get('text') - - COL_BG = float_colors(ttxt.back) - COL_TXT = ttxt.text - COL_TXTHI = ttxt.text_hi - - BGL.glClearColor(COL_BG[0],COL_BG[1],COL_BG[2],COL_BG[3]) - BGL.glClear(BGL.GL_COLOR_BUFFER_BIT) - BGL.glColor3ub(COL_TXT[0],COL_TXT[1], COL_TXT[2]) - - if SCREEN == START_SCREEN: - x = 10 - y = 10 - h = 20 - w = 90 - BGL.glRasterPos2i(x, y) - Draw.Text('Select a configuration key to access it. Press Q or ESC to leave.') - km_len = len(KEYMENUS) - km_columns = (WIDTH - x) / w - if km_columns == 0: km_rows = km_len - else: - km_rows = km_len / km_columns - if (km_len % km_columns): km_rows += 1 - if km_rows == 0: km_rows = 1 - ystart = y + 2*h*km_rows - if ystart > (HEIGHT - 70): ystart = HEIGHT - 70 - y = ystart - column = 1 - for i, km in enumerate(KEYMENUS): - column += 1 - BGL.glRasterPos2i(x + 2, y + h + 5) - Draw.Text(LABELS[i]) - BUT_KEYMENU[i] = Draw.Menu(km, BEVT_KEYMENU[i], - x, y, w - 10, h, 0, 'Choose a key to access its configuration data') - if column > km_columns: - column = 1 - y -= 2*h - if y < 35: break - x = 10 - else: x += w - x = 10 - y = 50 + ystart - BGL.glColor3ub(COL_TXTHI[0], COL_TXTHI[1], COL_TXTHI[2]) - BGL.glRasterPos2i(x, y) - Draw.Text('Scripts Configuration Editor') - Draw.PushButton('help', BEVT_HELP, x, 22, 45, 16, - 'View help information about this script (hotkey: H)') - - elif SCREEN == CONFIG_SCREEN: - x = y = 10 - h = 18 - data = CFGKEY.sorteddata - tips = CFGKEY.tips - fromdisk = CFGKEY.fromdisk - limits = CFGKEY.limits - VARS_NUM = 0 - for k in data.keys(): - VARS_NUM += len(data[k]) - lines = VARS_NUM + 5 # to account for header and footer - y = lines*h - if y > HEIGHT - 20: y = HEIGHT - 20 - BGL.glColor3ub(COL_TXTHI[0],COL_TXTHI[1], COL_TXTHI[2]) - BGL.glRasterPos2i(x, y) - Draw.Text('Scripts Configuration Editor') - y -= 20 - BGL.glColor3ub(COL_TXT[0],COL_TXT[1], COL_TXT[2]) - txtsize = 10 - if HEIGHT < lines*h: - BGL.glRasterPos2i(10, 5) - txtsize += Draw.Text('Arrow keys or mouse wheel to scroll, ') - BGL.glRasterPos2i(txtsize, 5) - Draw.Text('Q or ESC to return.') - BGL.glRasterPos2i(x, y) - Draw.Text('Key: "%s"' % CFGKEY.name) - bh = 16 - bw = 45 - by = 16 - i = -1 - if CFGKEY.scriptname: - i = 0 - Draw.PushButton('help', BEVT_HELP, x, by, bw, bh, - 'Show documentation for the script that owns this key (hotkey: H)') - Draw.PushButton('back', BEVT_BACK, x + (1+i)*bw, by, bw, bh, - 'Back to config keys selection screen (hotkey: ESC)') - Draw.PushButton('exit', BEVT_EXIT, x + (2+i)*bw, by, bw, bh, - 'Exit from Scripts Config Editor (hotkey: Q)') - Draw.PushButton('revert', BEVT_CANCEL, x + (3+i)*bw, by, bw, bh, - 'Revert data to original values (hotkey: U)') - Draw.PushButton('apply', BEVT_APPLY, x + (4+i)*bw, by, bw, bh, - 'Apply changes, if any (hotkey: ENTER)') - delmsg = 'Delete this data key from memory' - if fromdisk: delmsg = "%s and from disk" % delmsg - Draw.PushButton('delete', BEVT_DEL, x + (5+i)*bw, by, bw, bh, - '%s (hotkey: DELETE)' % delmsg) - if fromdisk: - Draw.Toggle("file", BEVT_DISK, x + 3 + (6+i)*bw, by, bw, bh, DISK_UPDATE, - 'Update also the file where this config key is stored') - i = -1 - top = -1 - y -= 20 - yend = 30 - if data.has_key(bool) and y > 0: - lst = data[bool] - for l in lst: - top += 1 - i += 1 - if top < SCROLL_DOWN: continue - y -= h - if y < yend: break - w = 20 - tog = data[bool][i][1] - if tips and tips.has_key(l[0]): tooltip = tips[l[0]] - else: tooltip = "click to toggle" - BUT_TYPES[bool][i] = Draw.Toggle("", BEVT_BOOL + i, - x, y, w, h, tog, tooltip) - BGL.glRasterPos2i(x + w + 3, y + 5) - Draw.Text(l[0].lower().replace('_', ' ')) - i = -1 - y -= 5 - if data.has_key(int) and y > 0: - lst = data[int] - for l in lst: - w = 70 - top += 1 - i += 1 - if top < SCROLL_DOWN: continue - y -= h - if y < yend: break - val = data[int][i][1] - if limits: min, max = limits[l[0]] - else: min, max = 0, 10 - if tips and tips.has_key(l[0]): tooltip = tips[l[0]] - else: tooltip = "click / drag to change" - BUT_TYPES[int][i] = Draw.Number("", BEVT_INT + i, - x, y, w, h, val, min, max, tooltip) - BGL.glRasterPos2i(x + w + 3, y + 3) - Draw.Text(l[0].lower().replace('_', ' ')) - i = -1 - y -= 5 - if data.has_key(float) and y > 0: - lst = data[float] - for l in lst: - w = 70 - top += 1 - i += 1 - if top < SCROLL_DOWN: continue - y -= h - if y < yend: break - val = data[float][i][1] - if limits: min, max = limits[l[0]] - else: min, max = 0.0, 1.0 - if tips and tips.has_key(l[0]): tooltip = tips[l[0]] - else: tooltip = "click and drag to change" - BUT_TYPES[float][i] = Draw.Number("", BEVT_FLOAT + i, - x, y, w, h, val, min, max, tooltip) - BGL.glRasterPos2i(x + w + 3, y + 3) - Draw.Text(l[0].lower().replace('_', ' ')) - i = -1 - y -= 5 - if data.has_key(str) and y > 0: - lst = data[str] - for l in lst: - top += 1 - i += 1 - if top < SCROLL_DOWN: continue - y -= h - if y < yend: break - name = l[0].lower() - is_dir = is_file = False - if name.find('_dir', -4) > 0: is_dir = True - elif name.find('_file', -5) > 0: is_file = True - w = WIDTH - 20 - wbrowse = 50 - if is_dir and w > wbrowse: w -= wbrowse - if tips and tips.has_key(l[0]): tooltip = tips[l[0]] - else: tooltip = "click to write a new string" - name = name.replace('_',' ') + ': ' - if len(l[1]) > MAX_STR_LEN: - l[1] = l[1][:MAX_STR_LEN] - BUT_TYPES[str][i] = Draw.String(name, BEVT_STR + i, - x, y, w, h, l[1], MAX_STR_LEN, tooltip) - if is_dir: - Draw.PushButton('browse', BEVT_BROWSEDIR + i, x+w+1, y, wbrowse, h, - 'click to open a file selector (pick any file in the desired dir)') - elif is_file: - Draw.PushButton('browse', BEVT_BROWSEFILE + i, x + w + 1, y, 50, h, - 'click to open a file selector') - - -def fit_scroll(): - global SCROLL_DOWN, VARS_NUM - max = VARS_NUM - 1 # so last item is always visible - if SCROLL_DOWN > max: - SCROLL_DOWN = max - elif SCROLL_DOWN < 0: - SCROLL_DOWN = 0 - - -def event(evt, val): # input events - - global SCREEN, START_SCREEN, CONFIG_SCREEN - global SCROLL_DOWN, CFGKEY - - if not val: return - - if evt == Draw.ESCKEY: - if SCREEN == START_SCREEN: Draw.Exit() - else: - if CFGKEY.needs_update(): - if Draw.PupMenu('UPDATE?%t|Data was changed') == 1: - CFGKEY.update() - SCREEN = START_SCREEN - SCROLL_DOWN = 0 - Draw.Redraw() - return - elif evt == Draw.QKEY: - if SCREEN == CONFIG_SCREEN and CFGKEY.needs_update(): - if Draw.PupMenu('UPDATE?%t|Data was changed') == 1: - CFGKEY.update() - Draw.Exit() - return - elif evt == Draw.HKEY: - if SCREEN == START_SCREEN: show_help() - elif CFGKEY.scriptname: show_help(CFGKEY.scriptname) - return - - elif SCREEN == CONFIG_SCREEN: - if evt in [Draw.DOWNARROWKEY, Draw.WHEELDOWNMOUSE]: - SCROLL_DOWN += 1 - fit_scroll() - elif evt in [Draw.UPARROWKEY, Draw.WHEELUPMOUSE]: - SCROLL_DOWN -= 1 - fit_scroll() - elif evt == Draw.UKEY: - if CFGKEY.needs_update(): - CFGKEY.revert() - elif evt == Draw.RETKEY or evt == Draw.PADENTER: - if CFGKEY.needs_update(): - CFGKEY.update() - elif evt == Draw.DELKEY: - if CFGKEY.delete(): - reset() - init_data() - SCREEN = START_SCREEN - SCROLL_DOWN = 0 - else: return - Draw.Redraw() - - -def button_event(evt): # gui button events - - global SCREEN, START_SCREEN, CONFIG_SCREEN, CFGKEY, DISK_UPDATE - global BEVT_KEYMENU, BUT_KEYMENU, BUT_TYPES, SCROLL_DOWN, GD, INDEX - global BEVT_EXIT, BEVT_BACK, BEVT_APPLY, BEVT_CANCEL, BEVT_HELP, FREEKEY_IDX - - if SCREEN == START_SCREEN: - for e in BEVT_KEYMENU: - if evt == e: - index = e - 1 - k = BUT_KEYMENU[index].val - 1 - CFGKEY = Config(GD[LABELS[index]][k][0], index != FREEKEY_IDX) - if CFGKEY.data: - SCREEN = CONFIG_SCREEN - Draw.Redraw() - return - if evt == BEVT_EXIT: - Draw.Exit() - elif evt == BEVT_HELP: - show_help() - return - - elif SCREEN == CONFIG_SCREEN: - datatypes = CFGKEY.sorteddata - if evt >= BEVT_BROWSEFILE: - INDEX = evt - BEVT_BROWSEFILE - Window.FileSelector(fs_file_callback, 'Choose file') - elif evt >= BEVT_BROWSEDIR: - INDEX = evt - BEVT_BROWSEDIR - Window.FileSelector(fs_dir_callback, 'Choose any file') - elif evt >= BEVT_STR: - var = BUT_TYPES[str][evt - BEVT_STR].val - datatypes[str][evt - BEVT_STR][1] = var - elif evt >= BEVT_FLOAT: - var = BUT_TYPES[float][evt - BEVT_FLOAT].val - datatypes[float][evt - BEVT_FLOAT][1] = var - elif evt >= BEVT_INT: - var = BUT_TYPES[int][evt - BEVT_INT].val - datatypes[int][evt - BEVT_INT][1] = var - elif evt >= BEVT_BOOL: - var = datatypes[bool][evt - BEVT_BOOL][1] - if var == True: var = False - else: var = True - datatypes[bool][evt - BEVT_BOOL][1] = var - - elif evt == BEVT_BACK: - if SCREEN == CONFIG_SCREEN: - SCREEN = START_SCREEN - SCROLL_DOWN = 0 - Draw.Redraw() - elif evt == BEVT_EXIT: - if CFGKEY.needs_update(): - if Draw.PupMenu('UPDATE?%t|Data was changed') == 1: - CFGKEY.update() - Draw.Exit() - return - elif evt == BEVT_APPLY: - if CFGKEY.needs_update(): - CFGKEY.update() - elif evt == BEVT_CANCEL: - if CFGKEY.needs_update(): - CFGKEY.revert() - elif evt == BEVT_DEL: - if CFGKEY.delete(): - reset() - init_data() - SCREEN = START_SCREEN - SCROLL_DOWN = 0 - elif evt == BEVT_DISK: - if DISK_UPDATE: DISK_UPDATE = False - else: DISK_UPDATE = True - elif evt == BEVT_HELP: - show_help(CFGKEY.scriptname) - return - else: - return - Draw.Redraw() - -# End of definitions - - -KEYS = get_keys() - -if not KEYS: - Draw.PupMenu("NO DATA: please read this help screen") - Blender.ShowHelp('config.py') -else: - fill_scripts_dict() - init_data() - Draw.Register(gui, event, button_event) diff --git a/release/scripts/console.py b/release/scripts/console.py deleted file mode 100644 index c6ae22a86f5..00000000000 --- a/release/scripts/console.py +++ /dev/null @@ -1,861 +0,0 @@ -#!BPY - -""" -Name: 'Interactive Python Console' -Blender: 245 -Group: 'System' -Tooltip: 'Interactive Python Console' -""" - -__author__ = "Campbell Barton aka ideasman42" -__url__ = ["www.blender.org", "blenderartists.org", "www.python.org"] -__bpydoc__ = """\ -This is an interactive console, similar to Python's own command line interpreter. Since it is embedded in Blender, it has access to all Blender Python modules. - -Those completely new to Python are recommended to check the link button above -that points to its official homepage, with news, downloads and documentation. - -Usage:
- Type your code and hit "Enter" to get it executed.
- - Right mouse click: Console Menu (Save output, etc);
- - Mousewheel: Scroll text - - Arrow keys: command history and cursor;
- - Shift + Backspace: Backspace whole word;
- - Shift + Arrow keys: jump words;
- - Ctrl + (+/- or mousewheel): Zoom text size;
- - Ctrl + Enter: auto compleate based on variable names and modules loaded -- multiple choices popup a menu;
- - Shift + Enter: multiline functions -- delays executing code until only Enter is pressed. -""" - -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -import bpy -from Blender import * -import sys as python_sys -import StringIO - -# Constants -__DELIMETERS__ = '. ,=+-*/%<>&~][{}():\t' -__VARIABLE_DELIMETERS__ = ' ,=+-*/%<>&~{}():\t' - -__LINE_HISTORY__ = 500 - -global __FONT_SIZE__ - -__FONT_SIZES__ = ( ('tiny', 10), ('small', 12), ('normalfix', 14), ('large', 16) ) -__FONT_SIZE__ = 2 # index for the list above, normal default. - -global __CONSOLE_LINE_OFFSET__ -__CONSOLE_LINE_OFFSET__ = 0 - -cmdBuffer = [] # dosnt need to be global - -''' -# Generic Blender functions -def getActScriptWinRect(): - area = Window.GetAreaSize() - area = (area[0]-1, area[1]-1) - for scrInfo in Window.GetScreenInfo(Window.Types['SCRIPT'], 'win', ''): - if scrInfo['vertices'][2]-scrInfo['vertices'][0] == area[0]: - if scrInfo['vertices'][3]-scrInfo['vertices'][1] == area[1]: - return scrInfo['vertices'] - return None -''' - - -# menuText, # per group -def PupMenuLess(menu, groupSize=35): - more = [' more...'] - less = [' less...'] - - menuList= menu.split('|') - - # No Less Needed, just call. - if len(menuList) < groupSize: - return Draw.PupMenu(menu) - - title = menuList[0].split('%t')[0] - - # Split the list into groups - menuGroups = [[]] - for li in menuList[1:]: - if len(menuGroups[-1]) < groupSize: - menuGroups[-1].append(li) - else: - menuGroups.append([li]) - - # Stores teh current menu group we are looking at - groupIdx = 0 - while 1: - # Give us a title with the menu number - numTitle = [ ' '.join([title, str(groupIdx + 1), 'of', str(len(menuGroups)), '%t'])] - if groupIdx == 0: - menuString = '|'.join(numTitle + menuGroups[groupIdx] + more) - elif groupIdx == len(menuGroups)-1: - menuString = '|'.join(numTitle + less + menuGroups[groupIdx]) - else: # In the middle somewhere so Show a more and less - menuString = '|'.join(numTitle + less + menuGroups[groupIdx] + more) - result = Draw.PupMenu(menuString) - # User Exit - if result == -1: - return -1 - - if groupIdx == 0: # First menu - if result-1 < groupSize: - return result - else: # must be more - groupIdx +=1 - elif groupIdx == len(menuGroups): # Last Menu - if result == 1: # Must be less - groupIdx -= 1 - else: # Must be a choice - return result + (groupIdx*groupSize) - - else: - if result == 1: # Must be less - groupIdx -= 1 - elif result-2 == groupSize: - groupIdx +=1 - else: - return result - 1 + (groupIdx*groupSize) - - - -# Use newstyle classes, Im not bothering with inheretence -# but slots are faster. -class cmdLine(object): - __slots__ = [\ - 'cmd', # is the command string, or any other message - 'type',# type: 0:user input 1:program feedback 2:error message. 3:option feedback - 'exe' # 0- not yet executed 1:executed - ] - def __init__(self, cmd, type, exe): - self.cmd = cmd - self.type = type - self.exe = exe - -# Include external file with internal namespace -def include(filename): - file = open(filename, 'r') - filedata = file.read() - file.close() - return compile(filedata, filename, 'exec') - -# Writes command line data to a blender text file. -def writeCmdData(type): - newText = Text.New('command_output.py', 1) - if type == 3: newText.write('\n'.join( [ myCmd.cmd for myCmd in cmdBuffer ] )) - else: newText.write('\n'.join( [ myCmd.cmd for myCmd in cmdBuffer if myCmd.type is type] )) - Draw.PupMenu('%s written' % newText.name) - -def insertCmdData(): - texts = list(bpy.data.texts) - textNames = [tex.name for tex in texts] - if textNames: - choice = Draw.PupMenu('|'.join(textNames)) - if choice != -1: - text = texts[choice-1] - - # Add the text! - for l in text.asLines(): - cmdBuffer.append(cmdLine('%s ' % l, 0, 0)) - Draw.Redraw() - - -COLLECTED_VAR_NAMES = {} # a list of keys, each key has a list of absolute paths - -# Pain and simple recursice dir(), accepts a string -unused_types = str, dict, list, float, int, str, type, tuple, type(dir), type(None) -def rdir(dirString, depth=0): - #print ' ' * depth, dirString - # MAX DEPTH SET HERE - if depth > 5: - # print 'maxdepoth reached.' - return - - global COLLECTED_VAR_NAMES - dirStringSplit = dirString.split('.') - - exec('value=' + dirString) - - if type(value) in unused_types: - # print 'bad type' - return - dirList = dir(value) - - for dirItem in dirList: - if dirItem.startswith('_'): - continue - - dirData = None - try: - # Rare cases this can mess up, material.shader was a problem. - exec('dirData = %s.%s' % (dirString, dirItem)) - #print dirData - except: - # Dont bother with this data. - continue - #print 'HEY', dirData, dirItem - #if type(dirItem) != str: - # print dirItem, type(dirItem) - - if dirItem not in COLLECTED_VAR_NAMES: # .keys() - COLLECTED_VAR_NAMES[dirItem] = [] - - # Add the string - # splitD = dirString.split('"')[-2] - - # Example of dirString - # __CONSOLE_VAR_DICT__["Main"].scenes.active.render - - # Works but can be faster - # splitD = dirString.replace('__CONSOLE_VAR_DICT__["', '').replace('"]', '') - - splitD = dirString[22:].replace('"]', '') - - if splitD not in COLLECTED_VAR_NAMES[dirItem]: - # print dirItem, dirString, splitD, - COLLECTED_VAR_NAMES[dirItem].append(splitD) - - - # Stops recursice stuff, overlooping - #print type(dirItem) - #if type(dirData) == types.ClassType or \ - # type(dirData) == types.ModuleType: - type_dirData = type(dirData) - if type_dirData not in unused_types: - # print type(dirData), dirItem - # Dont loop up dirs for strings ints etc. - if dirItem not in dirStringSplit: - rdir( '%s.%s' % (dirString, dirItem), depth+1) - ''' - elif depth == 0: # Add local variables - # print type(dirData), dirItem - # Dont loop up dirs for strings ints etc. - if dirItem not in dirStringSplit: - rdir( '%s.%s' % (dirString, dirItem), depth+1) - ''' - -def recursive_dir(): - global COLLECTED_VAR_NAMES - - for name in __CONSOLE_VAR_DICT__: # .keys() - if not name.startswith('_'): # Dont pick names like __name__ - rdir('__CONSOLE_VAR_DICT__["%s"]' % name) - #print COLLECTED_VAR_NAMES - COLLECTED_VAR_NAMES[name] = [''] - return COLLECTED_VAR_NAMES - -# Runs the code line(s) the user has entered and handle errors -# As well as feeding back the output into the blender window. -def runUserCode(__USER_CODE_STRING__): - global __CONSOLE_VAR_DICT__ # We manipulate the variables here. loading and saving from localspace to this global var. - - # Open A File like object to write all output to, that would useually be printed. - python_sys.stdout.flush() # Get rid of whatever came before - __FILE_LIKE_STRING__ = StringIO.StringIO() # make a new file like string, this saves up from making a file. - __STD_OUTPUT__ = python_sys.stdout # we need to store the normal output. - python_sys.stdout=__FILE_LIKE_STRING__ # Now anything printed will be written to the file like string. - - # Try and run the user entered line(s) - try: - # Load all variabls from global dict to local space. - __TMP_VAR_NAME__ = __TMP_VAR__ = '' # so as not to raise an error when del'ing - - for __TMP_VAR_NAME__, __TMP_VAR__ in __CONSOLE_VAR_DICT__.items(): - exec('%s%s' % (__TMP_VAR_NAME__,'=__TMP_VAR__')) - del __TMP_VAR_NAME__ - del __TMP_VAR__ - - # Now all the vars are loaded, execute the code. # Newline thanks to phillip, - exec(compile(__USER_CODE_STRING__, 'blender_cmd.py', 'single')) #exec(compile(__USER_CODE_STRING__, 'blender_cmd.py', 'exec')) - - # Flush global dict, allow the user to remove items. - __CONSOLE_VAR_DICT__ = {} - - __TMP_VAR_NAME__ = '' # so as not to raise an error when del'ing - # Write local veriables to global __CONSOLE_VAR_DICT__ - for __TMP_VAR_NAME__ in dir(): - if __TMP_VAR_NAME__ != '__FILE_LIKE_STRING__' and\ - __TMP_VAR_NAME__ != '__STD_OUTPUT__' and\ - __TMP_VAR_NAME__ != '__TMP_VAR_NAME__' and\ - __TMP_VAR_NAME__ != '__USER_CODE_STRING__': - - # Execute the local > global coversion. - exec('%s%s' % ('__CONSOLE_VAR_DICT__[__TMP_VAR_NAME__]=', __TMP_VAR_NAME__)) - del __TMP_VAR_NAME__ - - except: # Prints the REAL exception. - error = '%s: %s' % (python_sys.exc_type, python_sys.exc_value) - for errorLine in error.split('\n'): - cmdBuffer.append(cmdLine(errorLine, 2, None)) # new line to type into - - python_sys.stdout = __STD_OUTPUT__ # Go back to output to the normal blender console - - # Copy all new output to cmdBuffer - - __FILE_LIKE_STRING__.seek(0) # the readline function requires that we go back to the start of the file. - - for line in __FILE_LIKE_STRING__.readlines(): - cmdBuffer.append(cmdLine(line, 1, None)) - - cmdBuffer.append(cmdLine(' ', 0, 0)) # new line to type into - python_sys.stdout=__STD_OUTPUT__ - __FILE_LIKE_STRING__.close() - - - - - -#------------------------------------------------------------------------------# -# event handling code # -#------------------------------------------------------------------------------# -def handle_event(evt, val): - - # Insert Char into the cammand line - def insCh(ch): # Instert a char - global cursor - # Later account for a cursor variable - cmdBuffer[-1].cmd = ('%s%s%s' % ( cmdBuffer[-1].cmd[:cursor], ch, cmdBuffer[-1].cmd[cursor:])) - - #------------------------------------------------------------------------------# - # Define Complex Key Actions # - #------------------------------------------------------------------------------# - def actionEnterKey(): - global histIndex, cursor - - def getIndent(string): - # Gather white space to add in the previous line - # Ignore the last char since its padding. - whiteSpace = '' - #for i in range(len(cmdBuffer[-1].cmd)): - for i in xrange(len(string)-1): - if cmdBuffer[-1].cmd[i] == ' ' or cmdBuffer[-1].cmd[i] == '\t': - whiteSpace += string[i] - else: - break - return whiteSpace - - # Autocomplete - if Window.GetKeyQualifiers() & Window.Qual.CTRL: - actionAutoCompleate() - return - - # Are we in the middle of a multiline part or not? - # try be smart about it - if cmdBuffer[-1].cmd.split('#')[0].rstrip().endswith(':'): - # : indicates an indent is needed - cmdBuffer.append(cmdLine('\t%s ' % getIndent(cmdBuffer[-1].cmd), 0, 0)) - print ': indicates an indent is needed' - - elif cmdBuffer[-1].cmd[0] in [' ', '\t'] and len(cmdBuffer[-1].cmd) > 1 and cmdBuffer[-1].cmd.split(): - # white space at the start means he havnt finished the multiline. - cmdBuffer.append(cmdLine('%s ' % getIndent(cmdBuffer[-1].cmd), 0, 0)) - print 'white space at the start means he havnt finished the multiline.' - - elif Window.GetKeyQualifiers() & Window.Qual.SHIFT: - # Crtl forces multiline - cmdBuffer.append(cmdLine('%s ' % getIndent(cmdBuffer[-1].cmd), 0, 0)) - print 'Crtl forces multiline' - - else: # Execute multiline code block - - # Multiline code will still run with 1 line, - multiLineCode = ['if 1:'] # End of the multiline first. - - # Seek the start of the file multiline - i = 1 - while cmdBuffer[-i].exe == 0: - i+=1 - - while i > 1: - i-=1 - - if cmdBuffer[-i].cmd == ' ':# Tag as an output type so its not used in the key history - cmdBuffer[-i].type = 1 - else: # Tab added at the start for added if 1: statement - multiLineCode.append('\t%s' % cmdBuffer[-i].cmd ) - - # Mark as executed - cmdBuffer[-i].exe = 1 - - multiLineCode.append('\tpass') # reverse will make this the start. - - # Dubug, print the code that is executed. - #for m in multiLineCode: print m - - runUserCode('\n'.join(multiLineCode)) - - # Clear the output based on __LINE_HISTORY__ - if len(cmdBuffer) > __LINE_HISTORY__: - cmdBuffer[:__LINE_HISTORY__] = [] - - histIndex = cursor = -1 # Reset cursor and history - - def actionUpKey(): - global histIndex - if abs(histIndex)+1 >= len(cmdBuffer): - histIndex = -1 - - # When wrapping allow 1 plank lines - if cmdBuffer[-1].cmd != ' ': - cmdBuffer[-1].cmd = ' ' - return - - histIndex_orig = histIndex - histIndex -= 1 - - while (cmdBuffer[histIndex].type != 0 and abs(histIndex) < len(cmdBuffer)) or \ - ( cmdBuffer[histIndex].cmd == cmdBuffer[histIndex_orig].cmd): - histIndex -= 1 - - if cmdBuffer[histIndex].type == 0: # we found one - cmdBuffer[-1].cmd = cmdBuffer[histIndex].cmd - - def actionDownKey(): - global histIndex - if histIndex >= -2: - histIndex = -len(cmdBuffer) - - # When wrapping allow 1 plank lines - if cmdBuffer[-1].cmd != ' ': - cmdBuffer[-1].cmd = ' ' - return - - histIndex_orig = histIndex - histIndex += 1 - while (cmdBuffer[histIndex].type != 0 and histIndex != -2) or \ - ( cmdBuffer[histIndex].cmd == cmdBuffer[histIndex_orig].cmd): - - histIndex += 1 - - if cmdBuffer[histIndex].type == 0: # we found one - cmdBuffer[-1].cmd = cmdBuffer[histIndex].cmd - - def actionRightMouse(): - global __FONT_SIZE__ - choice = Draw.PupMenu('Console Menu%t|Write Input Data (white)|Write Output Data (blue)|Write Error Data (red)|Write All Text|%l|Insert Blender text|%l|Font Size|%l|Clear Output|Quit') - - if choice == 1: - writeCmdData(0) # type 0 user - elif choice == 2: - writeCmdData(1) # type 1 user output - elif choice == 3: - writeCmdData(2) # type 2 errors - elif choice == 4: - writeCmdData(3) # All - elif choice == 6: - insertCmdData() # Insert text from Blender and run it. - elif choice == 8: - # Fontsize. - font_choice = Draw.PupMenu('Font Size%t|Large|Normal|Small|Tiny') - if font_choice != -1: - if font_choice == 1: - __FONT_SIZE__ = 3 - elif font_choice == 2: - __FONT_SIZE__ = 2 - elif font_choice == 3: - __FONT_SIZE__ = 1 - elif font_choice == 4: - __FONT_SIZE__ = 0 - Draw.Redraw() - elif choice == 10: # Clear all output - cmdBuffer[:] = [cmd for cmd in cmdBuffer if cmd.type == 0] # keep user input - Draw.Redraw() - elif choice == 11: # Exit - Draw.Exit() - - - # Auto compleating, quite complex- use recutsice dir for the moment. - def actionAutoCompleate(): # Ctrl + Tab - if not cmdBuffer[-1].cmd[:cursor].split(): - return - - - RECURSIVE_DIR = recursive_dir() - - # get last name of user input - editVar = cmdBuffer[-1].cmd[:cursor] - # Split off spaces operators etc from the staryt of the command so we can use the startswith function. - for splitChar in __VARIABLE_DELIMETERS__: - editVar = editVar[:-1].split(splitChar)[-1] + editVar[-1] - - - # Now we should have the var by its self - if editVar: - possibilities = [] - - for __TMP_VAR_NAME__ in RECURSIVE_DIR: #.keys(): - #print '\t', __TMP_VAR_NAME__ - if __TMP_VAR_NAME__ == editVar: - # print 'ADITVAR IS A VAR' - pass - ''' - elif __TMP_VAR_NAME__.startswith( editVar ): - print __TMP_VAR_NAME__, 'aaa' - possibilities.append( __TMP_VAR_NAME__ ) - ''' - possibilities.append( __TMP_VAR_NAME__ ) - - if len(possibilities) == 1: - cmdBuffer[-1].cmd = ('%s%s%s' % (cmdBuffer[-1].cmd[:cursor - len(editVar)], possibilities[0], cmdBuffer[-1].cmd[cursor:])) - - elif possibilities: # If its not just [] - # -1 with insert is the second last. - - # Text choice - #cmdBuffer.insert(-1, cmdLine('options: %s' % ' '.join(possibilities), 3, None)) - - menuList = [] # A lits of tuples- ABSOLUTE, RELATIVE - - for __TMP_VAR_NAME__ in possibilities: - for usage in RECURSIVE_DIR[__TMP_VAR_NAME__]: - # Account for non absolute (variables for eg.) - if usage: # not '' - absName = '%s.%s' % (usage, __TMP_VAR_NAME__) - - if __TMP_VAR_NAME__.startswith(editVar): - menuList.append( # Used for names and can be entered when pressing shift. - (absName, # Absolute name - __TMP_VAR_NAME__) # Relative name, non shift - ) - - #else: - # if absName.find(editVar) != -1: - # menuList.append((__TMP_VAR_NAME__, __TMP_VAR_NAME__)) # Used for names and can be entered when pressing shift. - - # No items to display? no menu - if not menuList: - return - - menuList.sort() - - choice = PupMenuLess( # Menu for the user to choose the autocompleate - 'Choices (Shift for local name, Ctrl for Docs)%t|' + # Title Text - '|'.join(['%s, %s' % m for m in menuList])) # Use Absolute names m[0] - - if choice != -1: - if Window.GetKeyQualifiers() & Window.Qual.CTRL: # Help - cmdBuffer[-1].cmd = ('help(%s%s) ' % (cmdBuffer[-1].cmd[:cursor - len(editVar)], menuList[choice-1][0])) - elif Window.GetKeyQualifiers() & Window.Qual.SHIFT: # Put in the long name - cmdBuffer[-1].cmd = ('%s%s%s' % (cmdBuffer[-1].cmd[:cursor - len(editVar)], menuList[choice-1][1], cmdBuffer[-1].cmd[cursor:])) - else: # Only paste in the Short name - cmdBuffer[-1].cmd = ('%s%s%s' % (cmdBuffer[-1].cmd[:cursor - len(editVar)], menuList[choice-1][0], cmdBuffer[-1].cmd[cursor:])) - - - else: - # print 'NO EDITVAR' - return - - # ------------------end------------------ # - - # Quit from menu only - #if (evt == Draw.ESCKEY and not val): - # Draw.Exit() - if evt == Draw.MOUSEX or evt == Draw.MOUSEY: # AVOID TOO MANY REDRAWS. - return - - - global cursor - global histIndex - global __FONT_SIZE__ - global __CONSOLE_LINE_OFFSET__ - - ascii = Blender.event - - resetScroll = True - - #------------------------------------------------------------------------------# - # key codes and key handling # - #------------------------------------------------------------------------------# - - # UP DOWN ARROW KEYS, TO TRAVERSE HISTORY - if (evt == Draw.UPARROWKEY and val): actionUpKey() - elif (evt == Draw.DOWNARROWKEY and val): actionDownKey() - - elif (evt == Draw.RIGHTARROWKEY and val): - if Window.GetKeyQualifiers() & Window.Qual.SHIFT: - wordJump = False - newCursor = cursor+1 - while newCursor<0: - - if cmdBuffer[-1].cmd[newCursor] not in __DELIMETERS__: - newCursor+=1 - else: - wordJump = True - break - if wordJump: # Did we find a new cursor pos? - cursor = newCursor - else: - cursor = -1 # end of line - else: - cursor +=1 - if cursor > -1: - cursor = -1 - - elif (evt == Draw.LEFTARROWKEY and val): - if Window.GetKeyQualifiers() & Window.Qual.SHIFT: - wordJump = False - newCursor = cursor-1 - while abs(newCursor) < len(cmdBuffer[-1].cmd): - - if cmdBuffer[-1].cmd[newCursor] not in __DELIMETERS__ or\ - newCursor == cursor: - newCursor-=1 - else: - wordJump = True - break - if wordJump: # Did we find a new cursor pos? - cursor = newCursor - else: - cursor = -len(cmdBuffer[-1].cmd) # Start of line - - else: - if len(cmdBuffer[-1].cmd) > abs(cursor): - cursor -=1 - - elif (evt == Draw.HOMEKEY and val): - cursor = -len(cmdBuffer[-1].cmd) - - elif (evt == Draw.ENDKEY and val): - cursor = -1 - - elif (evt == Draw.TABKEY and val): - insCh('\t') - - elif (evt == Draw.BACKSPACEKEY and val): - if Window.GetKeyQualifiers() & Window.Qual.SHIFT: - i = -1 - for d in __DELIMETERS__: - i = max(i, cmdBuffer[-1].cmd[:cursor-1].rfind(d)) - if i == -1: - i=0 - cmdBuffer[-1].cmd = ('%s%s' % (cmdBuffer[-1].cmd[:i] , cmdBuffer[-1].cmd[cursor:])) - - else: - # Normal backspace. - cmdBuffer[-1].cmd = ('%s%s' % (cmdBuffer[-1].cmd[:cursor-1] , cmdBuffer[-1].cmd[cursor:])) - - elif (evt == Draw.DELKEY and val) and cursor < -1: - cmdBuffer[-1].cmd = cmdBuffer[-1].cmd[:cursor] + cmdBuffer[-1].cmd[cursor+1:] - cursor +=1 - - elif ((evt == Draw.RETKEY or evt == Draw.PADENTER) and val): - actionEnterKey() - - elif (evt == Draw.RIGHTMOUSE and not val): actionRightMouse(); return - - elif (evt == Draw.PADPLUSKEY or evt == Draw.EQUALKEY or evt == Draw.WHEELUPMOUSE) and val and Window.GetKeyQualifiers() & Window.Qual.CTRL: - __FONT_SIZE__ += 1 - __FONT_SIZE__ = min(len(__FONT_SIZES__)-1, __FONT_SIZE__) - elif (evt == Draw.PADMINUS or evt == Draw.MINUSKEY or evt == Draw.WHEELDOWNMOUSE) and val and Window.GetKeyQualifiers() & Window.Qual.CTRL: - __FONT_SIZE__ -=1 - __FONT_SIZE__ = max(0, __FONT_SIZE__) - - - elif evt == Draw.WHEELUPMOUSE and val: - __CONSOLE_LINE_OFFSET__ += 1 - __CONSOLE_LINE_OFFSET__ = min(len(cmdBuffer)-2, __CONSOLE_LINE_OFFSET__) - resetScroll = False - - elif evt == Draw.WHEELDOWNMOUSE and val: - __CONSOLE_LINE_OFFSET__ -= 1 - __CONSOLE_LINE_OFFSET__ = max(0, __CONSOLE_LINE_OFFSET__) - resetScroll = False - - - elif ascii: - insCh(chr(ascii)) - else: - return # dont redraw. - - # If the user types in anything then scroll to bottom. - if resetScroll: - __CONSOLE_LINE_OFFSET__ = 0 - Draw.Redraw() - - -def draw_gui(): - # Get the bounds from ObleGL directly - __CONSOLE_RECT__ = BGL.Buffer(BGL.GL_FLOAT, 4) - BGL.glGetFloatv(BGL.GL_SCISSOR_BOX, __CONSOLE_RECT__) - __CONSOLE_RECT__= __CONSOLE_RECT__.list - - # Clear the screen - BGL.glClearColor(0.0, 0.0, 0.0, 1.0) - BGL.glClear(BGL.GL_COLOR_BUFFER_BIT) # use it to clear the color buffer - - - # Fixed margin. use a margin since 0 margin can be hard to seewhen close to a crt's edge. - margin = 4 - - # Convenience - FNT_NAME, FNT_HEIGHT = __FONT_SIZES__[__FONT_SIZE__] - - # Draw cursor location colour - if __CONSOLE_LINE_OFFSET__ == 0: - cmd2curWidth = Draw.GetStringWidth(cmdBuffer[-1].cmd[:cursor], FNT_NAME) - BGL.glColor3f(0.8, 0.2, 0.2) - if cmd2curWidth == 0: - BGL.glRecti(margin,2,margin+2, FNT_HEIGHT+2) - else: - BGL.glRecti(margin + cmd2curWidth-2,2, margin+cmd2curWidth, FNT_HEIGHT+2) - - BGL.glColor3f(1,1,1) - # Draw the set of cammands to the buffer - consoleLineIdx = __CONSOLE_LINE_OFFSET__ + 1 - wrapLineIndex = 0 - while consoleLineIdx < len(cmdBuffer) and __CONSOLE_RECT__[3] > (consoleLineIdx - __CONSOLE_LINE_OFFSET__) * FNT_HEIGHT: - if cmdBuffer[-consoleLineIdx].type == 0: - BGL.glColor3f(1, 1, 1) - elif cmdBuffer[-consoleLineIdx].type == 1: - BGL.glColor3f(.3, .3, 1) - elif cmdBuffer[-consoleLineIdx].type == 2: - BGL.glColor3f(1.0, 0, 0) - elif cmdBuffer[-consoleLineIdx].type == 3: - BGL.glColor3f(0, 0.8, 0) - else: - BGL.glColor3f(1, 1, 0) - - if consoleLineIdx == 1: # user input - BGL.glRasterPos2i(margin, (FNT_HEIGHT * (consoleLineIdx-__CONSOLE_LINE_OFFSET__)) - 8) - Draw.Text(cmdBuffer[-consoleLineIdx].cmd, FNT_NAME) - else: # WRAP - lwid = Draw.GetStringWidth(cmdBuffer[-consoleLineIdx].cmd, FNT_NAME) - if margin + lwid > __CONSOLE_RECT__[2]: - wrapLineList = [] - wtext = cmdBuffer[-consoleLineIdx].cmd - wlimit = len(wtext) - chunksz = int(( __CONSOLE_RECT__[2] - margin ) / (lwid / len(wtext))) - lstart = 0 - fsize = FNT_NAME - while lstart < wlimit: - lend = min(lstart+chunksz,wlimit) - ttext = wtext[lstart:lend] - while lend < wlimit and Draw.GetStringWidth(ttext, fsize) + margin < __CONSOLE_RECT__[2]: - lend += 1 - ttext = wtext[lstart:lend] - while lend > lstart+1 and Draw.GetStringWidth(ttext, fsize) + margin > __CONSOLE_RECT__[2]: - lend -= 1 - ttext = wtext[lstart:lend] - wrapLineList.append(ttext) - lstart = lend - # Now we have a list of lines, draw them (OpenGLs reverse ordering requires this odd change) - wrapLineList.reverse() - for wline in wrapLineList: - BGL.glRasterPos2i(margin, (FNT_HEIGHT*((consoleLineIdx-__CONSOLE_LINE_OFFSET__) + wrapLineIndex)) - 8) - Draw.Text(wline, FNT_NAME) - wrapLineIndex += 1 - wrapLineIndex-=1 # otherwise we get a silly extra line. - - else: # no wrapping. - - BGL.glRasterPos2i(margin, (FNT_HEIGHT * ((consoleLineIdx-__CONSOLE_LINE_OFFSET__)+wrapLineIndex)) - 8) - Draw.Text(cmdBuffer[-consoleLineIdx].cmd, FNT_NAME) - consoleLineIdx += 1 - -# This recieves the event index, call a function from here depending on the event. -def handle_button_event(evt): - pass - - -# Run the console -__CONSOLE_VAR_DICT__ = {} # Initialize var dict - - -# Print Startup lines, add __bpydoc__ to the console startup. -for l in __bpydoc__.split('
'): - cmdBuffer.append( cmdLine(l, 1, None) ) - - -histIndex = cursor = -1 # How far back from the first letter are we? - in current CMD line, history if for moving up and down lines. - -# Autoexec, startup code. -scriptDir = Get('scriptsdir') -console_autoexec = None -if scriptDir: - if not scriptDir.endswith(Blender.sys.sep): - scriptDir += Blender.sys.sep - - console_autoexec = '%s%s' % (scriptDir, 'console_autoexec.py') - - if not sys.exists(console_autoexec): - # touch the file - try: - open(console_autoexec, 'w').close() - cmdBuffer.append(cmdLine('...console_autoexec.py not found, making new in scripts dir', 1, None)) - except: - cmdBuffer.append(cmdLine('...console_autoexec.py could not write, this is ok', 1, None)) - scriptDir = None # make sure we only use this for console_autoexec.py - - if not sys.exists(console_autoexec): - console_autoexec = None - - else: - cmdBuffer.append(cmdLine('...Using existing console_autoexec.py in scripts dir', 1, None)) - - - -#-Autoexec---------------------------------------------------------------------# -# Just use the function to jump into local naming mode. -# This is so we can loop through all of the autoexec functions / vars and add them to the __CONSOLE_VAR_DICT__ -def include_console(includeFile): - global __CONSOLE_VAR_DICT__ # write autoexec vars to this. - - # Execute an external py file as if local - exec(include(includeFile)) - -def standard_imports(): - # Write local to global __CONSOLE_VAR_DICT__ for reuse, - - exec('%s%s' % ('__CONSOLE_VAR_DICT__["bpy"]=', 'bpy')) - exec('%s%s' % ('__CONSOLE_VAR_DICT__["Blender"]=', 'Blender')) - - for ls in (dir(), dir(Blender)): - for __TMP_VAR_NAME__ in ls: - # Execute the local > global coversion. - exec('%s%s' % ('__CONSOLE_VAR_DICT__[__TMP_VAR_NAME__]=', __TMP_VAR_NAME__)) - - # Add dummy imports to input so output scripts to a text file work as expected - cmdBuffer.append(cmdLine('import bpy', 0, 1)) - cmdBuffer.append(cmdLine('import Blender', 0, 1)) # pretend we have been executed, as we kindof have. - cmdBuffer.append(cmdLine('from Blender import *', 0, 1)) - -if scriptDir and console_autoexec: - include_console(console_autoexec) # pass the blender module - -standard_imports() # import Blender and bpy - -#-end autoexec-----------------------------------------------------------------# - - -# Append new line to write to -cmdBuffer.append(cmdLine(' ', 0, 0)) - -#------------------------------------------------------------------------------# -# register the event handling code, GUI # -#------------------------------------------------------------------------------# -def main(): - Draw.Register(draw_gui, handle_event, handle_button_event) - -if __name__ == '__main__': - main() diff --git a/release/scripts/discombobulator.py b/release/scripts/discombobulator.py deleted file mode 100644 index 6dbb4e5382b..00000000000 --- a/release/scripts/discombobulator.py +++ /dev/null @@ -1,1526 +0,0 @@ -#!BPY - -""" -Name: 'Discombobulator' -Blender: 237 -Group: 'Mesh' -Tip: 'Adds random geometry to a mesh' -""" - -__author__ = "Evan J. Rosky (syrux)" -__url__ = ("Script's homepage, http://evan.nerdsofparadise.com/programs/discombobulator/index.html") -__version__ = "237" -__bpydoc__ = """\ -Discombobulator adds random geometry to a mesh. - -As an example, this script can easily give a "high-tech" -look to walls and spaceships. - -Definitions:
- - Protrusions: extrusions of each original face on the mesh. -You may have from 1 to 4 protrusions on each face.
- - Taper: The tips of each protrusion will be a percentage -smaller than the base.
- - Doodads: small extruded blocks/shapes that are randomly placed -about the top of a protrusion or face. - - -Usage:
- Input your settings, make sure the mesh you would like to modify -is selected (active) and then click on "Discombobulate".
- See the scripts tutorial page (on the homepage) for more info. - - -New Features:
- - Will use existing materials if there are any.
- - Clicking "Assign materials by part" will allow assigning -of different material indices to Protrusion or Doodad Sides -and Tops in the gui element below it.
- - Setting a material index to 0 will use whatever material -is assigned to the face that is discombobulated. - - You can now scroll using the arrow keys. - - -Notes:
- - Modifications can be restricted to selected faces -by setting "Only selected faces" for protrusions and/or -doodads.
- - It's possible to restrict mesh generation to add only -protrusions or only doodads instead of both.
- - You may also choose to have Discombobulator select the -tops of created protrusions by clicking the corresponding -number of protrusion buttons under "Select Tops". You may -also do the same for doodads by choosing "Select Doodads" and -"Only Select Tops". You may choose to select the whole doodads -by leaving "Only Select Tops" off.
- - By selecting "Deselect Selected" you can have -discombobulator deselect everything but the selections it -makes.
- - The "Face %" option will set the percentage of faces that -will be modified either for the doodads or the protrusions.
- - "Copy Before Modifying" will create a new object with the -modifications where leaving it off will overwrite the original -mesh.
- -You can find more information at the Link above. -""" - - -# $Id$ -# -# Updated 2006-09-26 -# Changes since last version: -# > Works with Blender CVS and hopefully with Blender 2.40. -# > Swaps min/max values when min>max rather than complaining. -# > Will keep previously assigned materials. -# > Now allows user to assign custom material indices to -# Protrusion and Doodad Sides and Tops. -# > The initial Gui Layout will change depending on the aspect -# ratio of the window it is in. -# > Using the arrow keys will scroll the gui. -# -# -------------------------------------------------------------------------- -# Discombobulator v2.1b -# by Evan J. Rosky, 2005 -# This plugin is protected by the GPL: Gnu Public Licence -# GPL - http://www.gnu.org/copyleft/gpl.html -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2005: Evan J. Rosky -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -#Hit Alt-P to run - -import Blender -from Blender import NMesh,Object,Material,Window,Types,Scene -from Blender.NMesh import Vert,Face -from Blender.Mathutils import * - -import defaultdoodads -import BPyMathutils -from BPyMathutils import genrand -a = BPyMathutils.sgenrand(int(round(Rand(1000,99999),0))) - -#Create random numbers -def randnum(low,high): - num = genrand() - num = num*(high-low) - num = num+low - return num - -#Object Vars -newmesh = NMesh.GetRaw() -materialArray = [0] - -#Material Vars -reassignMats = 0 -protSideMat = 1 -protTopMat = 2 -doodSideMat = 3 -doodTopMat = 4 -thereAreMats = 0 -currmat = 0 - -#Global Vars -makenewobj = 1 -errortext = "Remember to select an object." -editmode = 0 - -#Protrusion Vars -makeprots = 1 -faceschangedpercent = 1.0 -minimumheight = 0.2 -maximumheight = 0.4 -subface1 = 1 -subface2 = 1 -subface3 = 1 -subface4 = 1 -subfaceArray = [1,2,3,4] -minsubfaces = 1 -minimumtaperpercent = 0.15 -maximumtaperpercent = 0.35 -useselectedfaces = 0 -selectface1 = 1 -selectface2 = 1 -selectface3 = 1 -selectface4 = 1 -deselface = 1 - -#Doodad Vars -makedoodads = 1 -doodadfacepercent = 1.0 -selectdoodad = 0 -onlyonprotrusions = 0 -doodonselectedfaces = 0 -selectdoodadtoponly = 0 -doodad1 = 1 -doodad2 = 1 -doodad3 = 1 -doodad4 = 1 -doodad5 = 1 -doodad6 = 1 -doodadminperface = 2 -doodadmaxperface = 6 -doodadminsize = 0.15 -doodadmaxsize = 0.45 -doodadminheight = 0.0 -doodadmaxheight = 0.1 -doodadArray = [1,2,3,4,5,6] - -def makeSubfaceArray(): - global subfaceArray - global subface1 - global subface2 - global subface3 - global subface4 - - subfaceArray = [] - if subface1 > 0: - subfaceArray.append(1) - if subface2 > 0: - subfaceArray.append(2) - if subface3 > 0: - subfaceArray.append(3) - if subface4 > 0: - subfaceArray.append(4) - -def makeDoodadArray(): - global doodadArray - global doodad1 - global doodad2 - global doodad3 - global doodad4 - global doodad5 - global doodad6 - - doodadArray = [] - if doodad1 > 0: - doodadArray.append(1) - if doodad2 > 0: - doodadArray.append(2) - if doodad3 > 0: - doodadArray.append(3) - if doodad4 > 0: - doodadArray.append(4) - if doodad5 > 0: - doodadArray.append(5) - if doodad6 > 0: - doodadArray.append(6) - -def extrude(mid,nor,protrusion,v1,v2,v3,v4,tosel=1,flipnor=0): - taper = 1 - randnum(minimumtaperpercent,maximumtaperpercent) - newmesh_verts = newmesh.verts - newmesh_faces = newmesh.faces - - vert = newmesh_verts[v1] - point = (vert.co - mid)*taper + mid + protrusion*Vector(nor) - ver = Vert(point[0],point[1],point[2]) - ver.sel = tosel - newmesh_verts.append(ver) - vert = newmesh_verts[v2] - point = (vert.co - mid)*taper + mid + protrusion*Vector(nor) - ver = Vert(point[0],point[1],point[2]) - ver.sel = tosel - newmesh_verts.append(ver) - vert = newmesh_verts[v3] - point = (vert.co - mid)*taper + mid + protrusion*Vector(nor) - ver = Vert(point[0],point[1],point[2]) - ver.sel = tosel - newmesh_verts.append(ver) - vert = newmesh_verts[v4] - point = (vert.co - mid)*taper + mid + protrusion*Vector(nor) - ver = Vert(point[0],point[1],point[2]) - ver.sel = tosel - newmesh_verts.append(ver) - - faceindex = len(newmesh_verts) - 4 - - #side face 1 - face = Face([newmesh_verts[v1], newmesh_verts[v2], newmesh_verts[faceindex+1], newmesh_verts[faceindex]]) - if flipnor != 0: - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh_faces.append(face) - - #side face 2 - face = Face([newmesh_verts[v2], newmesh_verts[v3], newmesh_verts[faceindex+2], newmesh_verts[faceindex+1]]) - if flipnor != 0: - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh_faces.append(face) - - #side face 3 - face = Face([newmesh_verts[v3], newmesh_verts[v4], newmesh_verts[faceindex+3], newmesh_verts[faceindex+2]]) - if flipnor != 0: - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh_faces.append(face) - - #side face 4 - face = Face([newmesh_verts[v4], newmesh_verts[v1], newmesh_verts[faceindex], newmesh_verts[faceindex+3]]) - if flipnor != 0: - face.v.reverse() - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh_faces.append(face) - - #top face - face = Face(newmesh_verts[-4:]) - if flipnor != 0: - face.v.reverse() - if tosel == 1: - face.sel = 1 - if thereAreMats == 1: - if reassignMats == 0 or protTopMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protTopMat-1 - newmesh_faces.append(face) - return face - -#Sets the global protrusion values -def setProtrusionValues(p0,p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12,p13,p14,p15): - - #Protrusions - global makeprots - global minimumtaperpercent - global maximumtaperpercent - global faceschangedpercent - global minimumheight - global maximumheight - global subface1 - global subface2 - global subface3 - global subface4 - global useselectedfaces - global selectface1 - global selectface2 - global selectface3 - global selectface4 - global deselface - global subfaceArray - - #Protrusions - makeprots = p0 - faceschangedpercent = p1 - minimumheight = p2 - maximumheight = p3 - subface1 = p4 - subface2 = p5 - subface3 = p6 - subface4 = p7 - minimumtaperpercent = p8 - maximumtaperpercent = p9 - useselectedfaces = p10 - selectface1 = p11 - selectface2 = p12 - selectface3 = p13 - selectface4 = p14 - deselface = p15 - makeSubfaceArray() - if len(subfaceArray) == 0: - makeprots = 0 - - if minimumheight > maximumheight: - a = maximumheight - maximimheight = minimumheight - minimumheight = a - elif minimumtaperpercent > maximumtaperpercent: - a = maximumtaperpercent - maximimtaperpercent = minimumtaperpercent - minimumtaperpercent = a - -#Sets the global Doodad values -def setDoodadValues(d0,d1,d2,d3,d4,d5,d6,d7,d8,d9,d10,d11,d12,d13,d14,d15,d16,d17): - - #Doodads - global makedoodads - global doodadfacepercent - global selectdoodad - global onlyonprotrusions - global doodad1 - global doodad2 - global doodad3 - global doodad4 - global doodad5 - global doodad6 - global doodadminperface - global doodadmaxperface - global doodadminsize - global doodadmaxsize - global doodadminheight - global doodadmaxheight - global doodadArray - global doodonselectedfaces - global selectdoodadtoponly - - #Doodads - makedoodads = d0 - doodadfacepercent = d1 - selectdoodad = d2 - onlyonprotrusions = d3 - doodad1 = d4 - doodad2 = d5 - doodad3 = d6 - doodad4 = d7 - doodad5 = d8 - doodad6 = d9 - doodadminperface = d10 - doodadmaxperface = d11 - doodadminsize = d12 - doodadmaxsize = d13 - doodadminheight = d14 - doodadmaxheight = d15 - doodonselectedfaces = d16 - selectdoodadtoponly = d17 - makeDoodadArray() - if len(doodadArray) == 0: - makedoodads = 0 - - elif doodadminperface > doodadmaxperface: - a = doodadmaxperface - doodadmaxperface = doodadminperface - doodadminperface = a - elif doodadminsize > doodadmaxsize: - a = doodadmaxsize - doodadmaxsize = doodadminsize - doodadminsize = a - elif doodadminheight > doodadmaxheight: - a = doodadmaxheight - doodadmaxheight = doodadminheight - doodadminheight = a - -#Sets other global values -def setOtherValues(g0,m0,m1,m2,m3,m4): - - #Global - global reassignMats - global makenewobj - global protSideMat - global protTopMat - global doodSideMat - global doodTopMat - - #Get Misc Variables - makenewobj = g0 - reassignMats = m0 - protSideMat = m1 - protTopMat = m2 - doodSideMat = m3 - doodTopMat = m4 - -def discombobulate(): - - #Global - global origmesh - global newmesh - global makenewobj - global origobj - global newobj - global messagetext - global errortext - global editmode - - #Protrusions - global makeprots - global minimumtaperpercent - global maximumtaperpercent - global faceschangedpercent - global minimumheight - global maximumheight - global subface1 - global subface2 - global subface3 - global subface4 - global useselectedfaces - global selectface1 - global selectface2 - global selectface3 - global selectface4 - global deselface - global subfaceArray - - #Doodads - global makedoodads - global doodadfacepercent - global selectdoodad - global onlyonprotrusions - global doodad1 - global doodad2 - global doodad3 - global doodad4 - global doodad5 - global doodad6 - global doodadminperface - global doodadmaxperface - global doodadminsize - global doodadmaxsize - global doodadminheight - global doodadmaxheight - global doodadArray - global doodonselectedfaces - global selectdoodadtoponly - - #Global - global materialArray - global reassignMats - global protSideMat - global protTopMat - global doodSideMat - global doodTopMat - global thereAreMats - global currmat - - origobj = Scene.GetCurrent().objects.active - if not origobj: - glRasterPos2d(10,50) - errortext = "YOU MUST SELECT AN OBJECT!" - messagetext = ErrorText(errortext) - Blender.Redraw() - return - - #Leave Editmode - editmode = Window.EditMode() - if editmode: Window.EditMode(0) - - #Get Major Variables - - origmesh = origobj.getData() - - if origobj.type != 'Mesh': - glRasterPos2d(10,50) - errortext = "OBJECT MUST BE MESH!" - messagetext = ErrorText(errortext) - Blender.Redraw() - return - - newmesh = NMesh.GetRaw() - materialArray = origmesh.getMaterials() - if len(materialArray) < 1: - thereAreMats = 0 - else: - thereAreMats = 1 - - #add material indices if necessary (only up to 4) - if thereAreMats == 1 and reassignMats == 1: - if len(materialArray) < 4: - if protSideMat > 4: protSideMat = 4 - if protTopMat > 4: protTopMat = 4 - if doodSideMat > 4: doodSideMat = 4 - if doodTopMat > 4: doodTopMat = 4 - else: - if protSideMat > len(materialArray): protSideMat = len(materialArray) - if protTopMat > len(materialArray): protTopMat = len(materialArray) - if doodSideMat > len(materialArray): doodSideMat = len(materialArray) - if doodTopMat > len(materialArray): doodTopMat = len(materialArray) - - #This only does something if there are less than 4 verts - for matind in [protSideMat,protTopMat,doodSideMat,doodTopMat]: - if matind > len(materialArray) and matind <= 4: - for i in xrange(len(materialArray),matind+1): - materialArray.append(Material.New("AddedMat " + str(i))) - - #Sets the materials - newmesh.setMaterials(materialArray) - - #Set the doodad settings - defaultdoodads.settings(selectdoodadtoponly,materialArray,reassignMats,thereAreMats,doodSideMat,doodTopMat) - #defaultdoodads.settings(selectdoodadtoponly,materialArray,reassignMats,thereAreMats,currmat) - - newmesh.verts.extend(origmesh.verts) - - #Start modifying faces - for currface in origmesh.faces: - - currmat = currface.materialIndex - defaultdoodads.setCurrMat(currmat) - - #Check if it is a triangle - if len(currface.v)<4: - face = Face([newmesh.verts[currface.v[0].index],newmesh.verts[currface.v[1].index],newmesh.verts[currface.v[2].index]]) - if thereAreMats == 1: - face.materialIndex = currmat - newmesh.faces.append(face) - continue - - #Check whether or not to make protrusions - if makeprots == 0: - face = Face([newmesh.verts[currface.v[0].index],newmesh.verts[currface.v[1].index],newmesh.verts[currface.v[2].index],newmesh.verts[currface.v[3].index]]) - if thereAreMats == 1: - face.materialIndex = currmat - newmesh.faces.append(face) - if makedoodads == 1 and onlyonprotrusions == 0: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray,face, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray,face, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - continue - - #Check if only changing selected faces - if useselectedfaces == 1: - #check if currface is selected - if currface.sel: - a = 1 - else: - face = Face([newmesh.verts[currface.v[0].index],newmesh.verts[currface.v[1].index],newmesh.verts[currface.v[2].index],newmesh.verts[currface.v[3].index]]) - if thereAreMats == 1: - face.materialIndex = currmat - newmesh.faces.append(face) - if makedoodads == 1 and onlyonprotrusions == 0: - if doodonselectedfaces != 1: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray,face, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - continue - #Check if face should be modified by random chance - if randnum(0,1)>faceschangedpercent: - face = Face([newmesh.verts[currface.v[0].index],newmesh.verts[currface.v[1].index],newmesh.verts[currface.v[2].index],newmesh.verts[currface.v[3].index]]) - if thereAreMats == 1: - face.materialIndex = currmat - newmesh.faces.append(face) - if makedoodads == 1 and onlyonprotrusions == 0: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray,face, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray,face, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - continue - - center = Vector([0,0,0]) - for pt in currface.v: - center = center + pt.co - center = center / len(currface.v) - - #Determine amount of subfaces - subfaces = round(randnum(1,len(subfaceArray)),0) - subfaces = subfaceArray[(int(subfaces) - 1)] - - ######################## START DEALING WITH PROTRUSIONS ##################### - - if subfaces == 1: - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,currface.v[0].index,currface.v[1].index,currface.v[2].index,currface.v[3].index,selectface1) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - - elif subfaces == 2: - orientation = int(round(randnum(0,1))) - p1 = currface.v[orientation] - p2 = currface.v[orientation + 1] - p3 = ((p2.co - p1.co)/2) + p1.co - ve1 = Vert(p3[0],p3[1],p3[2]) - ve1.sel = 0 - p1 = currface.v[2 + orientation] - if orientation < 0.5: - p2 = currface.v[3] - else: - p2 = currface.v[0] - p3 = ((p2.co - p1.co)/2) + p1.co - ve2 = Vert(p3[0],p3[1],p3[2]) - ve2.sel = 0 - if orientation < 0.5: - verti = currface.v[3] - p3 = verti.index - v1 = p3 - verti = currface.v[0] - p0 = verti.index - v2 = p0 - else: - verti = currface.v[0] - p0 = verti.index - v1 = p0 - verti = currface.v[1] - p1 = verti.index - v2 = p1 - newmesh.verts.append(ve1) - newmesh.verts.append(ve2) - index = len(newmesh.verts) - 2 - v4 = index + 1 - v3 = index - center = Vector([0, 0, 0]) - for pt in [newmesh.verts[v1],newmesh.verts[v2],newmesh.verts[v3],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,v1,v2,v3,v4,selectface2) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - if orientation < 0.5: - verti = currface.v[1] - p1 = verti.index - v1 = p1 - verti = currface.v[2] - p2 = verti.index - v2 = p2 - else: - verti = currface.v[2] - p2 = verti.index - v1 = p2 - verti = currface.v[3] - p3 = verti.index - v2 = p3 - center = Vector([0]*3) - for pt in [newmesh.verts[v1],newmesh.verts[v2],newmesh.verts[v3],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,v1,v2,v4,v3,selectface2) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - if orientation < 0.5: - face = Face([newmesh.verts[p0],newmesh.verts[p1],newmesh.verts[v3]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[p2],newmesh.verts[p3],newmesh.verts[v4]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - else: - face = Face([newmesh.verts[p1],newmesh.verts[p2],newmesh.verts[v3]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[p3],newmesh.verts[p0],newmesh.verts[v4]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - - elif subfaces == 3: - layer2inds = [] - layer2verts = [] - orientation = int(round(randnum(0,1))) - rotation = int(round(randnum(0,1))) - p1 = currface.v[orientation] - p2 = currface.v[orientation + 1] - p3 = ((p2.co - p1.co)/2) + p1.co - ve1 = Vert(p3[0],p3[1],p3[2]) - ve1.sel = 0 - p1 = currface.v[2 + orientation] - if orientation < 0.5: - p2 = currface.v[3] - else: - p2 = currface.v[0] - p3 = ((p2.co - p1.co)/2) + p1.co - ve2 = Vert(p3[0],p3[1],p3[2]) - ve2.sel = 0 - fp = [] - - #make first protrusion - if rotation < 0.5: - if orientation < 0.5: - verti = currface.v[3] - fp.append(verti.index) - v1 = verti.index - verti = currface.v[0] - fp.append(verti.index) - v2 = verti.index - layer2verts.extend([newmesh.verts[currface.v[1].index],newmesh.verts[currface.v[2].index]]) - else: - verti = currface.v[0] - fp.append(verti.index) - v1 = verti.index - verti = currface.v[1] - fp.append(verti.index) - v2 = verti.index - layer2verts.extend([newmesh.verts[currface.v[2].index],newmesh.verts[currface.v[3].index]]) - newmesh.verts.append(ve1) - newmesh.verts.append(ve2) - index = len(newmesh.verts) - 2 - v4 = index + 1 - v3 = index - center = Vector([0]*3) - for pt in [newmesh.verts[v1],newmesh.verts[v2],newmesh.verts[v3],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - layer2inds.extend([v3,v4]) - tempface = extrude(center,currface.no,prot,v1,v2,v3,v4,selectface3) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - #Still first protrusion - else: - if orientation < 0.5: - verti = currface.v[1] - fp.append(verti.index) - v1 = verti.index - verti = currface.v[2] - fp.append(verti.index) - v2 = verti.index - layer2verts.extend([newmesh.verts[currface.v[0].index],newmesh.verts[currface.v[3].index]]) - else: - verti = currface.v[2] - fp.append(verti.index) - v1 = verti.index - verti = currface.v[3] - fp.append(verti.index) - v2 = verti.index - layer2verts.extend([newmesh.verts[currface.v[1].index],newmesh.verts[currface.v[0].index]]) - newmesh.verts.append(ve2) - newmesh.verts.append(ve1) - index = len(newmesh.verts) - 2 - v4 = index - v3 = index + 1 - center = Vector([0]*3) - for pt in [newmesh.verts[v1],newmesh.verts[v2],newmesh.verts[v3],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - layer2inds.extend([index, index +1]) - tempface = extrude(center,currface.no,prot,v1,v2,v4,v3,selectface3) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - - #split next rect(pre-arranged, no orientation crud)--make flag in extruder for only one existing vert in mesh - p1 = newmesh.verts[layer2inds[0]] - p2 = newmesh.verts[layer2inds[1]] - p3 = ((p2.co - p1.co)/2) + p1.co - ve3 = Vert(p3[0],p3[1],p3[2]) - ve3.sel = 0 - p1 = layer2verts[0] - p2 = layer2verts[1] - p3 = ((p2.co - p1.co)/2) + p1.co - ve4 = Vert(p3[0],p3[1],p3[2]) - ve4.sel = 0 - newmesh.verts.append(ve3) - newmesh.verts.append(ve4) - tempindex = len(newmesh.verts) - 2 - v5 = tempindex - v6 = tempindex + 1 - verti = layer2verts[0] - t0 = verti.index - center = Vector([0]*3) - for pt in [newmesh.verts[v5],newmesh.verts[v6],newmesh.verts[t0],newmesh.verts[v3]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - if rotation < 0.5: flino = 1 - else: flino = 0 - tempface = extrude(center,currface.no,prot,v3,v5,v6,t0,selectface3,flino) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - if rotation < 0.5: - fpt = t0 - face = Face([newmesh.verts[fp[1]],newmesh.verts[fpt],newmesh.verts[v3]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - else: - fpt = t0 - face = Face([newmesh.verts[fp[0]],newmesh.verts[v3],newmesh.verts[fpt]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - verti = layer2verts[1] - tempindex = verti.index - center = Vector([0]*3) - for pt in [newmesh.verts[v5],newmesh.verts[v6],newmesh.verts[tempindex],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,v6,v5,v4,tempindex,selectface3,flino) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - if rotation < 0.5: - face = Face([newmesh.verts[tempindex],newmesh.verts[fp[0]],newmesh.verts[v4]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[fpt],newmesh.verts[tempindex],newmesh.verts[v6]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - else: - face = Face([newmesh.verts[tempindex],newmesh.verts[v4],newmesh.verts[fp[1]]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[tempindex],newmesh.verts[fpt],newmesh.verts[v6]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - - else: - #get all points - verti = currface.v[0] - p0 = verti.index - - verti = currface.v[1] - p1 = verti.index - - pt = ((newmesh.verts[p1].co - newmesh.verts[p0].co)/2) + newmesh.verts[p0].co - v1 = Vert(pt[0],pt[1],pt[2]) - v1.sel = 0 - - verti = currface.v[2] - p2 = verti.index - - pt = ((newmesh.verts[p2].co - newmesh.verts[p1].co)/2) + newmesh.verts[p1].co - v2 = Vert(pt[0],pt[1],pt[2]) - v2.sel = 0 - - verti = currface.v[3] - p3 = verti.index - - pt = ((newmesh.verts[p3].co - newmesh.verts[p2].co)/2) + newmesh.verts[p2].co - v3 = Vert(pt[0],pt[1],pt[2]) - v3.sel = 0 - - pt = ((newmesh.verts[p0].co - newmesh.verts[p3].co)/2) + newmesh.verts[p3].co - v4 = Vert(pt[0],pt[1],pt[2]) - v4.sel = 0 - - pt = ((v3.co - v1.co)/2) + v1.co - m = Vert(pt[0],pt[1],pt[2]) - m.sel = 0 - - #extrusion 1 - newmesh.verts.extend([v1,m,v4]) - index = len(newmesh.verts) - 3 - v1 = index - m = index + 1 - v4 = index + 2 - center = Vector([0]*3) - for pt in [newmesh.verts[p0],newmesh.verts[v1],newmesh.verts[m],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,p0,v1,m,v4,selectface4) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - - #extrusion 2 - newmesh.verts.extend([v2]) - index = len(newmesh.verts) - 1 - v2 = index - center = Vector([0]*3) - for pt in [newmesh.verts[m],newmesh.verts[v1],newmesh.verts[p1],newmesh.verts[v2]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,m,v1,p1,v2,selectface4) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - - #extrusion 3 - newmesh.verts.extend([v3]) - index = len(newmesh.verts) - 1 - v3 = index - center = Vector([0]*3) - for pt in [newmesh.verts[m],newmesh.verts[v2],newmesh.verts[p2],newmesh.verts[v3]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,m,v2,p2,v3,selectface4) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - - #extrusion 4 - center = Vector([0]*3) - for pt in [newmesh.verts[m],newmesh.verts[v3],newmesh.verts[p3],newmesh.verts[v4]]: - center += pt.co - center = center/4 - prot = randnum(minimumheight,maximumheight) - tempface = extrude(center,currface.no,prot,v4,m,v3,p3,selectface4) - if makedoodads == 1: - if doodonselectedfaces == 1: - if currface.sel: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - else: - tempmesh = NMesh.GetRaw() - tempmesh = defaultdoodads.createDoodad(doodadArray, tempface, doodadminsize, doodadmaxsize, doodadminheight,doodadmaxheight, selectdoodad, doodadminperface, doodadmaxperface, doodadfacepercent) - newmesh.verts.extend(tempmesh.verts) - newmesh.faces.extend(tempmesh.faces) - - face = Face([newmesh.verts[p0],newmesh.verts[p1],newmesh.verts[v1]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[p1],newmesh.verts[p2],newmesh.verts[v2]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[p2],newmesh.verts[p3],newmesh.verts[v3]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - face = Face([newmesh.verts[p3],newmesh.verts[p0],newmesh.verts[v4]]) - if thereAreMats == 1: - if reassignMats == 0 or protSideMat == 0: - face.materialIndex = currmat - else: - face.materialIndex = protSideMat-1 - newmesh.faces.append(face) - - #NMesh.PutRaw(newmesh) - if deselface == 1: - for unvert in origmesh.verts: - newmesh.verts[unvert.index].sel = 0 - if makenewobj == 1: - newobj = origobj.__copy__() - newobj.link(newmesh) - scene = Blender.Scene.GetCurrent() - scene.objects.link(newobj) - origobj.sel = 0 - else: - origobj.link(newmesh) - - #Return to Editmode if previously in it - if editmode: Window.EditMode(1) - -####################### gui ###################### -from Blender.BGL import * -from Blender.Draw import * - -def ErrorText(errortext): - Window.WaitCursor(0) - Text(errortext) - PupMenu("ERROR: %s" % errortext.lower()) - -#Global Buttons -makenewobject = Create(makenewobj) -messagetext = Create(errortext) - -#Protrusion Buttons -doprots = Create(makeprots) -facechange = Create(faceschangedpercent*100) -minheight = Create(minimumheight) -maxheight = Create(maximumheight) -sub1 = Create(subface1) -sub2 = Create(subface2) -sub3 = Create(subface3) -sub4 = Create(subface4) -mintaper = Create(minimumtaperpercent*100) -maxtaper = Create(maximumtaperpercent*100) -useselected = Create(useselectedfaces) -selface1 = Create(selectface1) -selface2 = Create(selectface2) -selface3 = Create(selectface3) -selface4 = Create(selectface4) -deselectvertices = Create(deselface) -#selectbyverts = Create(vertselected) - -#Doodad Buttons -dodoodads = Create(makedoodads) -doodadfacechange = Create(doodadfacepercent*100) -seldoodad = Create(selectdoodad) -onprot = Create(onlyonprotrusions) -dood1 = Create(doodad1) -dood2 = Create(doodad2) -dood3 = Create(doodad3) -dood4 = Create(doodad4) -dood5 = Create(doodad5) -dood6 = Create(doodad6) -doodadminamount = Create(doodadminperface) -doodadmaxamount = Create(doodadmaxperface) -doodsizemin = Create(doodadminsize*100) -doodsizemax = Create(doodadmaxsize*100) -doodheightmin = Create(doodadminheight) -doodheightmax = Create(doodadmaxheight) -doodonselface = Create(doodonselectedfaces) -seldoodtop = Create(selectdoodadtoponly) - -#Material Buttons -assignNewMats = Create(reassignMats) -replProtSideIndex = Create(protSideMat) -replProtTopIndex = Create(protTopMat) -replDoodSideIndex = Create(doodSideMat) -replDoodTopIndex = Create(doodTopMat) - -# Events -EVENT_NONE = 1 -EVENT_DISCOMBOBULATE = 2 -EVENT_EXIT = 3 - -# Additions for moving gui -hadd = 0 -wadd = 0 -thadd = 410 -phadd = 245 -pwadd = 0 -dhadd = 55 -dwadd = 0 -ghadd = 10 -gwadd = 0 -mhadd = 55 -mwadd = 312 - -def colorbox(x,y,xright,bottom): - glColor3f(0.75, 0.75, 0.75) - glRecti(x + 1, y + 1, xright - 1, bottom - 1) - -firstDraw = 1 - -def draw(): - - #Protrusions - global doprots - global facechange - global minheight - global maxheight - global sub1 - global sub2 - global sub3 - global sub4 - global mintaper - global maxtaper - global useselected - global selface1 - global selface2 - global selface3 - global selface4 - global deselectvertices - #global selectbyverts - - #Doodads - global dodoodads - global doodadfacechange - global seldoodad - global onprot - global dood1 - global dood2 - global dood3 - global dood4 - global dood5 - global dood6 - global doodadminamount - global doodadmaxamount - global doodsizemin - global doodsizemax - global doodheightmin - global doodheightmax - global doodonselface - global seldoodtop - - #Materials - global assignNewMats - global replProtSideIndex - global replProtTopIndex - global replDoodSideIndex - global replDoodTopIndex - - #Global Settings - global makenewobject - global messagetext - global errortext - global EVENT_NONE,EVENT_DRAW,EVENT_EXIT,EVENT_UP,EVENT_DOWN,EVENT_LEFT,EVENT_RIGHT - - # Additions for moving gui - global hadd - global wadd - global thadd - global phadd - global pwadd - global dhadd - global dwadd - global ghadd - global gwadd - global mhadd - global mwadd - - #This is for creating the initial layout - global firstDraw - if(firstDraw == 1): - if(((Window.GetAreaSize()[1])*1.7) < Window.GetAreaSize()[0]): - thadd = 180 - phadd = 10 - dhadd = 10 - mhadd = 55 - ghadd = 10 - pwadd = 0 - dwadd = 305 - mwadd = 610 - gwadd = 610 - else: - thadd = 505 - phadd = 346 - dhadd = 160 - mhadd = 56 - ghadd = 10 - pwadd = 0 - dwadd = 0 - mwadd = 0 - gwadd = 0 - firstDraw = 0 - - - #Title :420high - glClearColor(0.6, 0.6, 0.6, 1.0) - glClear(GL_COLOR_BUFFER_BIT) - glColor3f(0.0,0.0,0.0) - glRasterPos2d(8+wadd, thadd+hadd) - Text("Discombobulator v2.1b") - - #Protrusion - colorbox(8+pwadd+wadd,150+phadd+hadd,312+pwadd+wadd,phadd-5+hadd) - glColor3f(0.0,0.0,0.0) - glRasterPos2d(12+pwadd+wadd, 140+phadd+hadd) - Text("Protrusions:") - doprots = Toggle("Make Protrusions",EVENT_NONE,12+pwadd+wadd,117+phadd+hadd,145,18,doprots.val,"Make Protrusions?") - facechange = Number("Face %: ",EVENT_NONE,162+pwadd+wadd,117+phadd+hadd,145,18,facechange.val,0,100,"Percentage of faces that will grow protrusions") - useselected = Toggle("Only selected faces",EVENT_NONE,12+pwadd+wadd,97+phadd+hadd,145,18,useselected.val,"If on, only selected faces will be modified") - deselectvertices = Toggle("Deselect Selected",EVENT_NONE,162+pwadd+wadd,97+phadd+hadd,145,18,deselectvertices.val,"Deselects any selected vertex except for ones selected by \"Select Tops\"") - - #Protrusion properties - glColor3f(0.0,0.0,0.0) - glRasterPos2d(12+pwadd+wadd, 80+phadd+hadd) - Text("Protrusion Properties:") - BeginAlign() - minheight = Number("Min Height: ",EVENT_NONE,12+pwadd+wadd,57+phadd+hadd,145,18,minheight.val,-100.0,100.0,"Minimum height of any protrusion") - maxheight = Number("Max Height: ",EVENT_NONE,162+pwadd+wadd,57+phadd+hadd,145,18,maxheight.val,-100.0,100.0,"Maximum height of any protrusion") - EndAlign() - BeginAlign() - mintaper = Number("Min Taper %: ",EVENT_NONE,12+pwadd+wadd,37+phadd+hadd,145,18,mintaper.val,0,100,"Minimum taper percentage of protrusion") - maxtaper = Number("Max Taper %: ",EVENT_NONE,162+pwadd+wadd,37+phadd+hadd,145,18,maxtaper.val,0,100,"Maximum taper percentage of protrusion") - EndAlign() - glRasterPos2d(19+pwadd+wadd, 22+phadd+hadd) - Text("Number of protrusions:") - BeginAlign() - sub1 = Toggle("1",EVENT_NONE,12+pwadd+wadd,phadd+hadd,34,18,sub1.val,"One Protrusion") - sub2 = Toggle("2",EVENT_NONE,48+pwadd+wadd,phadd+hadd,34,18,sub2.val,"Two Protrusions") - sub3 = Toggle("3",EVENT_NONE,84+pwadd+wadd,phadd+hadd,34,18,sub3.val,"Three Protrusions") - sub4 = Toggle("4",EVENT_NONE,120+pwadd+wadd,phadd+hadd,34,18,sub4.val,"Four Protrusions") - EndAlign() - glRasterPos2d(195+pwadd+wadd, 22+phadd+hadd) - Text("Select tops of:") - BeginAlign() - selface1 = Toggle("1",EVENT_NONE,165+pwadd+wadd,phadd+hadd,34,18,selface1.val,"Select the tip of the protrusion when it is created") - selface2 = Toggle("2",EVENT_NONE,201+pwadd+wadd,phadd+hadd,34,18,selface2.val,"Select the tips of each protrusion when they are created") - selface3 = Toggle("3",EVENT_NONE,237+pwadd+wadd,phadd+hadd,34,18,selface3.val,"Select the tips of each protrusion when they are created") - selface4 = Toggle("4",EVENT_NONE,273+pwadd+wadd,phadd+hadd,34,18,selface4.val,"Select the tips of each protrusion when they are created") - EndAlign() - #Doodads - colorbox(8+dwadd+wadd,175+dhadd+hadd,312+dwadd+wadd,dhadd-5+hadd) - glColor3f(0.0,0.0,0.0) - glRasterPos2d(12+dwadd+wadd, 165+dhadd+hadd) - Text("Doodads:") - BeginAlign() - dood1 = Toggle("1 Box",EVENT_NONE,12+dwadd+wadd,142+dhadd+hadd,45,18,dood1.val,"Creates a rectangular box") - dood2 = Toggle("2 Box",EVENT_NONE,61+dwadd+wadd,142+dhadd+hadd,45,18,dood2.val,"Creates 2 side-by-side rectangular boxes") - dood3 = Toggle("3 Box",EVENT_NONE,110+dwadd+wadd,142+dhadd+hadd,45,18,dood3.val,"Creates 3 side-by-side rectangular boxes") - EndAlign() - BeginAlign() - dood4 = Toggle("\"L\"",EVENT_NONE,164+dwadd+wadd,142+dhadd+hadd,45,18,dood4.val,"Creates a Tetris-style \"L\" shape") - dood5 = Toggle("\"T\"",EVENT_NONE,213+dwadd+wadd,142+dhadd+hadd,45,18,dood5.val,"Creates a Tetris-style \"T\" shape") - dood6 = Toggle("\"S\"",EVENT_NONE,262+dwadd+wadd,142+dhadd+hadd,45,18,dood6.val,"Creates a sort-of \"S\" or \"Z\" shape") - EndAlign() - dodoodads = Toggle("Make Doodads",EVENT_NONE,12+dwadd+wadd,120+dhadd+hadd,145,18,dodoodads.val,"Make Doodads?") - doodadfacechange = Number("Face %: ",EVENT_NONE,162+dwadd+wadd,120+dhadd+hadd,145,18,doodadfacechange.val,0,100,"Percentage of faces that will gain doodads") - seldoodad = Toggle("Select Doodads",EVENT_NONE,12+dwadd+wadd,100+dhadd+hadd,145,18,seldoodad.val,"Selects doodads when they are created") - seldoodtop = Toggle("Only Select Tops",EVENT_NONE,162+dwadd+wadd,100+dhadd+hadd,145,18,seldoodtop.val,"Only Selects tops of doodads when\"Select Doodads\" is on") - doodonselface = Toggle("Only selected faces",EVENT_NONE,12+dwadd+wadd,80+dhadd+hadd,145,18,doodonselface.val,"Only create doodads on selected faces") - onprot = Toggle("Only on Protrusions",EVENT_NONE,162+dwadd+wadd,80+dhadd+hadd,145,18,onprot.val,"Only place doodads on protrusions") - - #Doodad Properties - glColor3f(0.0,0.0,0.0) - glRasterPos2d(12+dwadd+wadd, 63+dhadd+hadd) - Text("Doodad Properties:") - BeginAlign() - doodadminamount = Number("Min Amount: ",EVENT_NONE,12+dwadd+wadd,40+dhadd+hadd,145,18,doodadminamount.val,0,100,"Minimum number of doodads per face") - doodadmaxamount = Number("Max Amount: ",EVENT_NONE,162+dwadd+wadd,40+dhadd+hadd,145,18,doodadmaxamount.val,0,100,"Maximum number of doodads per face") - EndAlign() - BeginAlign() - doodheightmin = Number("Min Height: ",EVENT_NONE,12+dwadd+wadd,20+dhadd+hadd,145,18,doodheightmin.val,0.0,100.0,"Minimum height of any doodad") - doodheightmax = Number("Max Height: ",EVENT_NONE,162+dwadd+wadd,20+dhadd+hadd,145,18,doodheightmax.val,0.0,100.0,"Maximum height of any doodad") - EndAlign() - BeginAlign() - doodsizemin = Number("Min Size %: ",EVENT_NONE,12+dwadd+wadd,dhadd+hadd,145,18,doodsizemin.val,0.0,100.0,"Minimum size of any doodad in percentage of face") - doodsizemax = Number("Max Size %: ",EVENT_NONE,162+dwadd+wadd,dhadd+hadd,145,18,doodsizemax.val,0.0,100.0,"Maximum size of any doodad in percentage of face") - EndAlign() - - #Materials - colorbox(8+mwadd+wadd,93+mhadd+hadd,312+mwadd+wadd,mhadd-5+hadd) - glColor3f(0.0,0.0,0.0) - glRasterPos2d(12+mwadd+wadd, 83+mhadd+hadd) - Text("Materials:") - glRasterPos2d(12+mwadd+wadd, 43+mhadd+hadd) - Text("Assigned Material Indices:") - assignNewMats = Toggle("Assign materials by part",EVENT_NONE,32+mwadd+wadd,60+mhadd+hadd,256,18,assignNewMats.val,"Otherwise, previous materials will be preserved") - replProtSideIndex = Number("Protrusion Sides:",EVENT_NONE,12+mwadd+wadd,20+mhadd+hadd,145,18,replProtSideIndex.val,0,16,"Material index assigned to sides of protrusions") - replProtTopIndex = Number("Protrusion Tops:",EVENT_NONE,162+mwadd+wadd,20+mhadd+hadd,145,18,replProtTopIndex.val,0,16,"Material index assigned to tops of protrusions") - replDoodSideIndex = Number("Doodad Sides:",EVENT_NONE,12+mwadd+wadd,mhadd+hadd,145,18,replDoodSideIndex.val,0,16,"Material index assigned to sides of doodads") - replDoodTopIndex = Number("Doodad Tops:",EVENT_NONE,162+mwadd+wadd,mhadd+hadd,145,18,replDoodTopIndex.val,0,16,"Material index assigned to tops and bottoms of doodads") - - #Global Parts - colorbox(8+gwadd+wadd,35+ghadd+hadd,312+gwadd+wadd,ghadd-5+hadd) - glColor3f(1.0,0.0,0.0) - glRasterPos2d(12+gwadd+wadd,25+ghadd+hadd) - messagetext = Text(errortext) - glColor3f(0.0,0.0,0.0) - makenewobject = Toggle("Copy Before Modifying",EVENT_NONE,162+gwadd+wadd,ghadd+hadd,145,18,makenewobject.val,"If selected, the original object will be copied before it is changed") - Button("Discombobulate",EVENT_DISCOMBOBULATE,12+gwadd+wadd,ghadd+hadd,100,18) - Button("Exit",EVENT_EXIT,120+gwadd+wadd,ghadd+hadd,30,18) - -def event(evt, val): - global wadd - global hadd - - if (evt == RIGHTARROWKEY and val): - wadd = wadd + 20 - Redraw(1) - if (evt == LEFTARROWKEY and val): - wadd = wadd - 20 - Redraw(1) - if (evt == UPARROWKEY and val): - hadd = hadd + 20 - Redraw(1) - if (evt == DOWNARROWKEY and val): - hadd = hadd - 20 - Redraw(1) - if (evt == QKEY and not val): - Exit() - -def bevent(evt): - - #Protrusions - global doprots - global facechange - global minheight - global maxheight - global sub1 - global sub2 - global sub3 - global sub4 - global mintaper - global maxtaper - global useselected - global selface1 - global selface2 - global selface3 - global selface4 - global deselectvertices - #global selectbyverts - - #Doodads - global dodoodads - global doodadfacechange - global seldoodad - global onprot - global dood1 - global dood2 - global dood3 - global dood4 - global dood5 - global dood6 - global doodadminamount - global doodadmaxamount - global doodsizemin - global doodsizemax - global doodheightmin - global doodheightmax - global doodonselface - global seldoodtop - - #Materials - global assignNewMats - global replProtSideIndex - global replProtTopIndex - global replDoodSideIndex - global replDoodTopIndex - - #Global Settings - global makenewobject - global messagetext - global errortext - global EVENT_NONE,EVENT_DRAW,EVENT_EXIT - - ######### Manages GUI events - if evt==EVENT_EXIT: - Exit() - elif evt==EVENT_DISCOMBOBULATE: - Window.WaitCursor(1) - setProtrusionValues(doprots.val,facechange.val/100,minheight.val,maxheight.val,sub1.val,sub2.val,sub3.val,sub4.val,mintaper.val/100,maxtaper.val/100,useselected.val,selface1.val,selface2.val,selface3.val,selface4.val,deselectvertices.val) - setDoodadValues(dodoodads.val,doodadfacechange.val/100,seldoodad.val,onprot.val,dood1.val,dood2.val,dood3.val,dood4.val,dood5.val,dood6.val,doodadminamount.val,doodadmaxamount.val,doodsizemin.val/100,doodsizemax.val/100,doodheightmin.val,doodheightmax.val,doodonselface.val,seldoodtop.val) - setOtherValues(makenewobject.val,assignNewMats.val,replProtSideIndex.val,replProtTopIndex.val,replDoodSideIndex.val,replDoodTopIndex.val) - discombobulate() - Window.WaitCursor(0) - Blender.Redraw() - -Register(draw, event, bevent) diff --git a/release/scripts/envelope_symmetry.py b/release/scripts/envelope_symmetry.py deleted file mode 100644 index a72e8c060b4..00000000000 --- a/release/scripts/envelope_symmetry.py +++ /dev/null @@ -1,174 +0,0 @@ -#!BPY - -""" -Name: 'Envelope Symmetry' -Blender: 234 -Group: 'Animation' -Tooltip: 'Make envelope symmetrical' -""" - -__author__ = "Jonas Petersen" -__url__ = ("blender", "blenderartists.org", "Script's homepage, http://www.mindfloaters.de/blender/", "thread at blender.org, http://www.blender.org/modules.php?op=modload&name=phpBB2&file=viewtopic&t=4858 ") -__version__ = "0.9 2004-11-10" -__doc__ = """\ -This script creates perfectly symmetrical envelope sets. It is part of the -envelop assignment tools. - -"Envelopes" are Mesh objects with names following this naming convention: - -: - -Please check the script's homepage and the thread at blender.org (last link button above) for more info. - -For this version users need to edit the script code to change default options. -""" - -# -------------------------------------------------------------------------- -# "Envelope Symmetry" by Jonas Petersen -# Version 0.9 - 10th November 2004 - first public release -# -------------------------------------------------------------------------- -# -# A script for creating perfectly symmetrical envelope sets. It is -# part of the envelope assignment tool. -# -# It is available in Object Mode via the menu item: -# -# Object -> Scripts -> Envelope Symmetry -# -# With default settings it will: -# -# - Look for bones -# -# Find the latest version at: http://www.mindfloaters.de/blender/ -# -# -------------------------------------------------------------------------- -# $Id$ -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004: Jonas Petersen, jonas at mindfloaters dot de -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - -# -------------------------------------------------------------------------- -# CONFIGURATION -# -------------------------------------------------------------------------- - -# Note: Theses values will later be editable via a gui interface -# within Blender. - -# The suffix for the reference and opposite envelope. -# The configuration of of the opposite envelope will be overwritten by -# the configuration of the reference envelope (shape, position, bone, weight). -# The default is REF_SUFFIX = '.L' and OPP_SUFFIX = '.R'. -REF_SUFFIX = '.R' -OPP_SUFFIX = '.L' - -# MIRROR_AXIS defines the axis in which bones are mirrored/aligned. -# Values: -# 0 for X (default) -# 1 for Y -# 2 for Z -MIRROR_AXIS = 0 - -# SEPARATOR is the character used to delimit the bone name and the weight -# in the envelope name. -SEPARATOR = ":" - -# -------------------------------------------------------------------------- -# END OF CONFIGURATION -# -------------------------------------------------------------------------- - -import Blender, math, sys -from Blender import Mathutils -from BPyNMesh import * - -def flipFace(v): - if len(v) == 3: v[0], v[1], v[2] = v[2], v[1], v[0] - elif len(v) == 4: v[0], v[1], v[2], v[3] = v[3], v[2], v[1], v[0] - -# return object with given object name (with variable parts) and mesh name -def getObjectByName(obj_name, mesh_name): - for obj in Blender.Scene.GetCurrent().objects: - if obj.type == "Mesh": -# if obj.getName()[0:len(obj_name)] == obj_name and obj.getData().name == mesh_name: - # use only mesh_name so bone name and weight (in the envelope name) - # can be changed by the user and mirrored by the script. - if obj.getData(name_only=1) == mesh_name: - return obj - return False - -SUFFIX_LEN = len(REF_SUFFIX); - -Blender.Window.EditMode(0) - -count = 0 -for obj in Blender.Scene.GetCurrent().objects: - if obj.type != 'Mesh': - continue - - count += 1 - name = obj.name - pos = name.find(SEPARATOR) - if (pos > -1): - ApplySizeAndRotation(obj) - - base_name = name[0:pos-SUFFIX_LEN] - suffix = name[pos-SUFFIX_LEN:pos] - weight = name[pos:len(name)] # the SEPARATOR following a float value - - if suffix == REF_SUFFIX: - mesh = obj.getData() - mirror_name = base_name + OPP_SUFFIX + weight - mirror_mesh_name = mesh.name + ".mirror" - - mirror_obj = getObjectByName(base_name + OPP_SUFFIX, mirror_mesh_name) - - if mirror_obj: - - # update vertices - - mirror_mesh = mirror_obj.getData() - for i in xrange(len(mesh.verts)): - org = mesh.verts[i] - mir = mirror_mesh.verts[i] - mir.co[0], mir.co[1], mir.co[2] = org.co[0], org.co[1], org.co[2] - mir.co[MIRROR_AXIS] *= -1 - - mirror_mesh.update() - else: - - # create mirror object - - mirror_mesh = obj.data - for face in mirror_mesh.faces: - flipFace(face.v) - for vert in mirror_mesh.verts: - vert.co[MIRROR_AXIS] *= -1 - - mirror_obj = Blender.NMesh.PutRaw(mirror_mesh, mirror_mesh_name) - - # update name, drawType and location - - mirror_obj.setName(mirror_name) - mirror_obj.drawType = obj.drawType - - loc = [obj.LocX, obj.LocY, obj.LocZ] - loc[MIRROR_AXIS] *= -1 - mirror_obj.setLocation(loc) - -Blender.Window.EditMode(0) diff --git a/release/scripts/export-iv-0.1.py b/release/scripts/export-iv-0.1.py deleted file mode 100644 index 647dd9c5518..00000000000 --- a/release/scripts/export-iv-0.1.py +++ /dev/null @@ -1,304 +0,0 @@ -#!BPY - -""" -Name: 'OpenInventor (.iv)...' -Blender: 236 -Group: 'Export' -Tip: 'Export to OpenInventor file format. (.iv)' -""" -__author__ = ("Radek Barton") -__url__ = ["http://blackhex.no-ip.org/"] -__email__ = ["scripts"] -__version__ = "0.1" - - -__bpydoc__ = """\ -This script exports to the Open Inventor format. - -Usage: - -Run this script from "File->Export" menu. - -Note: -""" -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Radek Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# - -import Blender -math_pi= 3.1415926535897931 - -def WriteHeader(file): - file.write("#Inventor V2.1 ascii\n\n") - file.write("Separator\n") - file.write("{\n") - file.write(" ShapeHints\n") - file.write(" {\n") - file.write(" vertexOrdering COUNTERCLOCKWISE\n") - file.write(" }\n") - -def WriteFooter(file): - file.write("}\n") - -def WriteMesh(file, ob): - file.write(" Separator\n") - file.write(" {\n") - file.write(" # %s\n" % ob.name) - WriteMatrix(file, ob) - mesh = ob.getData() - WriteMaterials(file, mesh) - WriteTexture(file, mesh) - WriteNormals(file, mesh) - WriteVertices(file, mesh) - WriteFaces(file, mesh) - file.write(" }\n") - -def WriteMatrix(file, ob): - matrix = ob.getMatrix() - file.write(" MatrixTransform\n") - file.write(" {\n") - file.write(" matrix\n") - for line in matrix: - file.write(" %.6f %.6f %.6f %.6f\n" % (line[0], line[1], line[2], line[3])) - file.write(" }\n") - -def WriteColors(file, mesh): - file.write(" vertexProperty VertexProperty\n") - file.write(" {\n") - file.write(" orderedRGBA\n") - file.write(" [\n") - for face in mesh.faces: - for I in xrange(len(face)): - file.write(" 0x%02x%02x%02x%02x,\n" % (face.col[I].r, - face.col[I].g, face.col[I].b, face.col[I].a)) - file.write(" ]\n") - file.write(" materialBinding PER_VERTEX\n") - file.write(" }\n") - -def WriteMaterials(file, mesh): - if mesh.materials: - file.write(" Material\n") - file.write(" {\n") - file.write(" ambientColor\n") - file.write(" [\n") - for mat in mesh.materials: - file.write(" %.6f %.6f %.6f,\n" % (mat.mirCol[0], mat.mirCol[1], - mat.mirCol[2])) - file.write(" ]\n") - file.write(" diffuseColor\n") - file.write(" [\n") - for mat in mesh.materials: - file.write(" %.6f %.6f %.6f,\n" % (mat.rgbCol[0], mat.rgbCol[1], - mat.rgbCol[2])) - file.write(" ]\n") - file.write(" specularColor\n") - file.write(" [\n") - for mat in mesh.materials: - file.write(" %.6f %.6f %.6f,\n" % (mat.specCol[0] * mat.spec / 2.0, - mat.specCol[1] * mat.spec / 2.0, mat.specCol[2] * mat.spec / 2.0)) - file.write(" ]\n") - file.write(" emissiveColor\n") - file.write(" [\n") - for mat in mesh.materials: - file.write(" %.6f %.6f %.6f,\n" % (mat.rgbCol[0] * mat.emit, - mat.rgbCol[1] * mat.emit, mat.rgbCol[0] * mat.emit)) - file.write(" ]\n") - file.write(" shininess\n") - file.write(" [\n") - for mat in mesh.materials: - file.write(" %.6f,\n" % (mat.hard / 255.0)) - file.write(" ]\n") - file.write(" transparency\n") - file.write(" [\n") - for mat in mesh.materials: - file.write(" %.6f,\n" % (1.0 - mat.alpha)) - file.write(" ]\n") - file.write(" }\n") - file.write(" MaterialBinding\n") - file.write(" {\n") - file.write(" value PER_FACE_INDEXED\n") - file.write(" }\n") - -def WriteTexture(file, mesh): - texture = mesh.faces[0].image # BAD Ju Ju - if texture: - file.write(" Texture2\n") - file.write(" {\n") - file.write(' filename "%s"\n' % texture.getName()) - file.write(" }\n") - file.write(" TextureCoordinate2\n") - file.write(" {\n") - file.write(" point\n") - file.write(" [\n") - if mesh.hasVertexUV(): - for vert in mesh.verts: - file.write(" %s %s,\n" % (vert.uvco[0], vert.uvco[1])) - file.write(" ]\n") - file.write(" }\n") - file.write(" TextureCoordinateBinding\n") - file.write(" {\n") - file.write(" value PER_VERTEX_INDEXED\n") - file.write(" }\n") - elif mesh.hasFaceUV(): - for face in mesh.faces: - for uv in face.uv: - file.write(" %.6f %.6f,\n" % (uv[0], uv[1])) - file.write(" ]\n") - file.write(" }\n") - file.write(" TextureCoordinateBinding\n") - file.write(" {\n") - file.write(" value PER_VERTEX\n") - file.write(" }\n") - -def WriteVertices(file, mesh): - file.write(" Coordinate3\n") - file.write(" {\n") - file.write(" point\n") - file.write(" [\n") - for vert in mesh.verts: - file.write(" %.6f %.6f %.6f,\n" % (vert[0], vert[1], vert[2])) - file.write(" ]\n") - file.write(" }\n") - -def WriteNormals(file, mesh): - file.write(" Normal\n") - file.write(" {\n") - file.write(" vector\n") - file.write(" [\n") - - # make copy of vertex normals - normals = [] - for face in mesh.faces: - if len(face.v) in [3, 4]: - if face.smooth: - for v in face.v: - normals.append(v.no) - else: - for v in face.v: - normals.append(face.no) - - # write normals - for no in normals: - file.write(" %.6f %.6f %.6f,\n" % (no[0], no[1], no[2])) - file.write(" ]\n") - file.write(" }\n") - - # write way how normals are binded - file.write(" NormalBinding\n") - file.write(" {\n") - file.write(" value PER_VERTEX\n") - file.write(" }\n") - -def WriteFaces(file, mesh): - file.write(" IndexedFaceSet\n") - file.write(" {\n") - - # write vertex paint - if mesh.hasVertexColours(): - WriteColors(file, mesh) - - # write material indexes - file.write(" materialIndex\n") - file.write(" [\n") - for face in mesh.faces: - file.write(" %i,\n" % face.mat); - file.write(" ]\n") - - # write faces with coordinate indexes - file.write(" coordIndex\n") - file.write(" [\n") - for face in mesh.faces: - face_v= face.v - if len(face_v) == 3: - file.write(" %i, %i, %i, -1,\n" % (face_v[0].index, - face_v[1].index, face_v[2].index)) - elif len(face_v) == 4: - file.write(" %i, %i, %i, %i, -1,\n" % (face_v[0].index, - face_v[1].index, face_v[2].index, face_v[3].index)) - file.write(" ]\n") - file.write(" }\n") - - -def WriteCamera(file, ob): - camera = ob.getData(); - # perspective camera - if camera.type == 0: - file.write(" PerspectiveCamera\n") - file.write(" {\n") - file.write(" nearDistance %s\n" % (camera.clipStart)) - file.write(" farDistance %s\n" % (camera.clipEnd)) - file.write(" }\n") - # ortho camera - else: - print camera.type - -def WriteLamp(file, ob): - lamp = ob.getData(); - # spot lamp - if lamp.type == 2: - file.write(" SpotLight\n") - file.write(" {\n") - file.write(" intensity %s\n" % (lamp.energy / 10.0)) - file.write(" color %s %s %s\n" % (lamp.col[0], lamp.col[1], lamp.col[2])) - #file.write(" location %s\n" % ()) - #file.write(" direction %s\n" % ()) - file.write(" dropOffRate %s\n" % (lamp.spotBlend)) - file.write(" cutOffAngle %s\n" % (lamp.spotSize * math_pi / 180.0)) - file.write(" }\n") - -# script main function -def ExportToIv(file_name): - scene = Blender.Scene.GetCurrent() - file = open(file_name, "w") - - # make lists of individual ob types - meshes = [] - lamps = [] - cameras = [] - for ob in scene.objects: - obtype= ob.type - if obtype == "Mesh": - meshes.append(ob); - #elif obtype == "Lamp": - # lamps.append(ob); - #elif obtype == "Camera": - # cameras.append(ob); - #else: - # print "Exporting %s objects isn't supported!" % ob.type - - # write header, footer and groups of ob types - WriteHeader(file); - #for camera in cameras: - # WriteCamera(file, camera); - #for lamp in lamps: - # WriteLamp(file, lamp) - for mesh in meshes: - WriteMesh(file, mesh) - WriteFooter(file) - - file.close() - -def FileSelectorCB(file_name): - if not file_name.lower().endswith('.iv'): - file_name += '.iv' - ExportToIv(file_name) - -if __name__ == '__main__': - Blender.Window.FileSelector(FileSelectorCB, "Export IV", Blender.sys.makename(ext='.iv')) diff --git a/release/scripts/export_dxf.py b/release/scripts/export_dxf.py deleted file mode 100644 index 17f2132fbe8..00000000000 --- a/release/scripts/export_dxf.py +++ /dev/null @@ -1,3041 +0,0 @@ -#!BPY - -""" - Name: 'Autodesk DXF (.dxf/dwg)' - Blender: 249 - Group: 'Export' - Tooltip: 'Export geometry to DXF/DWG-r12 (Drawing eXchange Format).' -""" - -__version__ = "1.35 - 2009.06.18" -__author__ = "Remigiusz Fiedler (AKA migius)" -__license__ = "GPL" -__url__ = "http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_dxf" -__bpydoc__ ="""The script exports Blender geometry to DXF format r12 version. - -Version %s -Copyright %s -License %s - -extern dependances: dxfLibrary.py, dxfColorMap.py (optionaly: DConvertCon.exe) - -CONTRIBUTORS: -Remigiusz Fiedler (AKA migius) -Alexandros Sigalas (AKA alxarch) -Stani Michiels (AKA stani) - -See the homepage for documentation. -url: %s - -IDEAs: -- HPGL output, usefull for correct scaled printing of 2d drawings - -TODO: -- export dupligroups and dupliverts as blocks (as option) -- optimize POLYFACE routine: remove double-vertices -- fix support for X,Y-rotated curves(to POLYLINEs): fix blender negative-matrix.invert() -- support hierarchies: groups, instances, parented structures -- support n/f-gons as POLYFACEs with invisible edges -- mapping materials to DXF-styles -- ProgressBar -- export rotation of Camera to VIEW/VPORT -- export parented Cameras to VIEW/VPORT -- wip: write drawing extends for automatic view positioning in CAD -- wip: fix text-objects in persp-projection -- wip: translate current 3D-View to *ACTIVE-VPORT -- wip: fix support Include-Duplis, cause not conform with INSERT-method - -History -v1.35 - 2009.06.18 by migius -- export multiple-instances of Curve-Objects as BLOCK/INSERTs -- added export Cameras (ortho and persp) to VPORTs, incl. clipping -- added export Cameras (ortho and persp) to VIEWs, incl. clipping -- export multiple-instances of Mesh-Objects as BLOCK/INSERTs -- on start prints dxfLibrary version -v1.34 - 2009.06.08 by migius -- export Lamps and Cameras as POINTs -- export passepartout for perspective projection -- added option for export objects only from visible layers -- optimized POLYFACE output: remove loose vertices in back-faces-mode -- cleaning code -- fix nasty bug in getExtrusion() -- support text-objects, also in ortho/persp-projection -- support XYmirrored 2d-curves to 2dPOLYLINEs -- support thickness and elevation for curve-objects -- fix extrusion 210-code (3d orientation vector) -- fix POLYFACE export, synchronized also dxfLibrary.py -- changed to the new 2.49 method Vector.cross() -- output style manager (first try) -v1.33 - 2009.05.25 by migius -- bugfix flipping normals in mirrored mesh-objects -- added UI-Button for future Shadow Generator -- support curve objects in projection-2d mode -- UI stuff: camera selector/manager -v1.32 - 2009.05.22 by migius -- debug mode for curve-objects: output redirect to Blender -- wip support 210-code(extrusion) calculation -- default settings for 2D and 3D export -v1.31 - 2009.05.18 by migius -- globals translated to GUI_A/B dictionary -- optimizing back-faces removal for "hidden-lines" mode -- presets for global location and scale (architecture) -- UI layout: scrollbars, pan with MMB/WHEEL, dynamic width -- new GUI with Draw.Register() from DXF-importer.py -v1.30 - 2008.12.14 by migius -- started work on GUI with Draw.Register() -v1.29 - 2009.04.11 by stani -- added DWG support, Stani Michiels idea for binding an extern DXF-DWG-converter -v1.28 - 2009.02.05 by Alexandros Sigalas (alxarch) -- added option to apply modifiers on exported meshes -- added option to also export duplicates (from dupliverts etc) -v1.28 - 2008.10.22 by migius -- workaround for PVert-bug on ubuntu (reported by Yorik) -- add support for FGons - ignore invisible_tagged edges -- add support for camera: ortho and perspective -v1.27 - 2008.10.07 by migius -- exclude Stani's DXF-Library to extern module -v1.26 - 2008.10.05 by migius -- add "hidden mode" substitut: back-faces removal -- add support for mesh ->POLYFACE -- optimized code for "Flat" procedure -v1.25 - 2008.09.28 by migius -- modif FACE class for r12 -- add mesh-polygon -> Bezier-curve converter (Yorik's code) -- add support for curves ->POLYLINEs -- add "3d-View to Flat" - geometry projection to XY-plane -v1.24 - 2008.09.27 by migius -- add start UI with preferences -- modif POLYLINE class for r12 -- changing output format from r9 to r12(AC1009) -v1.23 - 2008.09.26 by migius -- add finish message-box -v1.22 - 2008.09.26 by migius -- add support for curves ->LINEs -- add support for mesh-edges ->LINEs -v1.21 - 2008.06.04 by migius -- initial adaptation for Blender -v1.1 (20/6/2005) by Stani Michiels www.stani.be/python/sdxf -- Python library to generate dxf drawings -______________________________________________________________ -""" % (__author__,__version__,__license__,__url__) - -# -------------------------------------------------------------------------- -# Script copyright (C) 2008 Remigiusz Fiedler (AKA migius) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - - -import Blender -from Blender import Mathutils, Window, Scene, Draw, Camera, BezTriple -from Blender import Registry, Object, Mesh, Curve -import os -import subprocess - -try: - import dxfLibrary as DXF - #reload(DXF) - #reload(dxfLibrary) - #from dxfLibrary import * -except: - DXF=None - print "DXF-Exporter: error! found no dxfLibrary.py module in Blender script folder" - Draw.PupMenu("Error%t|found no dxfLibrary.py module in script folder") - - -import math -from math import atan, atan2, log10, sin, cos - -#pi = math.pi -#pi = 3.14159265359 -r2d = 180.0 / math.pi -d2r = math.pi / 180.0 -#note: d2r * angle == math.radians(angle) -#note: r2d * angle == math.degrees(angle) - - -#DEBUG = True #activates debug mode - - -#----globals------------------------------------------ -ONLYSELECTED = 1 # 0/1 = False/True -ONLYVISIBLE = 1 # ignore objects on invisible layers -POLYLINES = 1 # prefer POLYLINEs not LINEs -POLYFACES = 1 # prefer POLYFACEs not 3DFACEs -PROJECTION = 0 # output geometry will be projected to XYplane with Z=0.0 -HIDDEN_LINES = 0 #filter out hidden geometry -SHADOWS = 0 # sun/shadows simulation -CAMERA = 1 # selected camera index -PERSPECTIVE = 0 # projection (camera) type: perspective, opposite to orthographic -CAMERAVIEW = 0 # use camera for projection, opposite is 3d-view -INSTANCES = 1 # Export instances of Mesh/Curve as BLOCK/INSERTs on/off -APPLY_MODIFIERS = 1 -INCLUDE_DUPLIS = 0 -OUTPUT_DWG = 0 #optional save to DWG with extern converter - -G_SCALE = 1.0 #(0.0001-1000) global scaling factor for output dxf data -G_ORIGIN = [0.0,0.0,0.0] #global translation-vector (x,y,z) in Blender units -ELEVATION = 0.0 #standard elevation = coordinate Z value in Blender units - -BYBLOCK = 0 #DXF-attribute: assign property to BLOCK defaults -BYLAYER = None #256 #DXF-attribute: assign property to LAYER defaults -PREFIX = 'BF_' #used as prefix for DXF names -LAYERNAME_DEF = '' #default layer name -LAYERCOLOR_DEF = 7 #default layer color index -LAYERLTYPE_DEF = 0 #'CONTINUOUS' - default layer lineType -ENTITYLAYER_DEF = LAYERNAME_DEF #default entity color index -ENTITYCOLOR_DEF = BYLAYER #default entity color index -ENTITYLTYPE_DEF = BYLAYER #default entity lineType - -E_M = 0 -LAB = "scroll MMB/WHEEL . wip .. todo" #"*) parts under construction" -M_OBJ = 0 - -FILENAME_MAX = 180 #max length of path+file_name string (FILE_MAXDIR + FILE_MAXFILE) -NAMELENGTH_MAX = 80 #max_obnamelength in DXF, (limited to 256? ) -INIFILE_DEFAULT_NAME = 'exportDXF' -INIFILE_EXTENSION = '.ini' -INIFILE_HEADER = '#ExportDXF.py ver.1.0 config data' -INFFILE_HEADER = '#ExportDXF.py ver.1.0 analyze of DXF-data' - -BLOCKREGISTRY = {} # registry and map for BLOCKs -SCENE = None -WORLDX = Mathutils.Vector((1,0,0)) -WORLDY = Mathutils.Vector((0,1,0)) -WORLDZ = Mathutils.Vector((0,0,1)) - -AUTO = BezTriple.HandleTypes.AUTO -FREE = BezTriple.HandleTypes.FREE -VECT = BezTriple.HandleTypes.VECT -ALIGN = BezTriple.HandleTypes.ALIGN - - -#-------- DWG support ------------------------------------------ -extCONV_OK = True -extCONV = 'DConvertCon.exe' -extCONV_PATH = os.path.join(Blender.Get('scriptsdir'),extCONV) -if not os.path.isfile(extCONV_PATH): - extCONV_OK = False - extCONV_TEXT = 'DWG-Exporter: Abort, nothing done!|\ -Copy first %s into Blender script directory.|\ -More details in online Help.' %extCONV -else: - if not os.sys.platform.startswith('win'): - # check if Wine installed: - if subprocess.Popen(('which', 'winepath'), stdout=subprocess.PIPE).stdout.read().strip(): - extCONV_PATH = 'wine %s'%extCONV_PATH - else: - extCONV_OK = False - extCONV_TEXT = 'DWG-Exporter: Abort, nothing done!|\ -The external DWG-converter (%s) needs Wine installed on your system.|\ -More details in online Help.' %extCONV -#print 'extCONV_PATH = ', extCONV_PATH - - -#---------------------------------------------- -def updateMenuCAMERA(): - global CAMERAS - global MenuCAMERA - global MenuLIGHT - - scn = Scene.GetCurrent() - objs = scn.getChildren() - currcam = scn.getCurrentCamera() - if currcam: currcam = currcam.getName() - maincams = [] - MenuCAMERA = "Select Camera%t" - for cam in objs: - if cam.getType() == 'Camera': - if cam.getName()[0:4] != "Temp": - maincams.append(cam.getName()) - maincams.sort() - maincams.reverse() - CAMERAS = maincams - for i, cam in enumerate(CAMERAS): - if cam==currcam: - MenuCAMERA += "|* " + cam - else: MenuCAMERA += "| " + cam - MenuCAMERA += "|current 3d-View" - MenuLIGHT = "Select Sun%t| *todo" - - -#---------------------------------------------- -def updateCAMERA(): - global CAMERA, GUI_A - #CAMERA = 1 - scn = Scene.GetCurrent() - currcam = scn.getCurrentCamera() - if currcam: currcam = currcam.getName() - if currcam in CAMERAS: - CAMERA = CAMERAS.index(currcam)+1 - GUI_A['camera_selected'].val = CAMERA - -#---------------------------------------------- -def gotoCAMERA(): - cam = Object.Get(CAMERAS[CAMERA-1]) - #print 'deb: CAMERA, cam',CAMERA, cam - if cam.getType() != 'Camera': - sure = Draw.PupMenu("Info: %t| It is not a Camera Object.") - else: - scn = Scene.getCurrent() - scn.setCurrentCamera(cam) - Window.CameraView(0) - Window.Redraw() - updateMenuCAMERA() - - -#------- Duplicates support ---------------------------------------------- -def dupTest(object): - """ - Checks objects for duplicates enabled (any type) - object: Blender Object. - Returns: Boolean - True if object has any kind of duplicates enabled. - """ - if (object.enableDupFrames or \ - object.enableDupGroup or \ - object.enableDupVerts): - return True - else: - return False - -def getObjectsAndDuplis(oblist,MATRICES=False,HACK=False): - """ - Return a list of real objects and duplicates and optionally their matrices - oblist: List of Blender Objects - MATRICES: Boolean - Check to also get the objects matrices, default=False - HACK: Boolean - See note, default=False - Returns: List of objects or - List of tuples of the form:(ob,matrix) if MATRICES is set to True - NOTE: There is an ugly hack here that excludes all objects whose name - starts with "dpl_" to exclude objects that are parented to a duplicating - object, User must name objects properly if hack is used. - """ - - result = [] - for ob in oblist: - if INCLUDE_DUPLIS and dupTest(ob): - dup_obs=ob.DupObjects - if len(dup_obs): - for dup_ob, dup_mx in dup_obs: - if MATRICES: - result.append((dup_ob,dup_mx)) - else: - result.append(dup_ob) - else: - if HACK: - if ob.getName()[0:4] != "dpl_": - if MATRICES: - mx = ob.mat - result.append((ob,mx)) - else: - result.append(ob) - else: - if MATRICES: - mx = ob.mat - result.append((ob,mx)) - else: - result.append(ob) - return result - -#----------------------------------------------------- -def hidden_status(faces, mx, mx_n): - # sort out back-faces = with normals pointed away from camera - #print 'HIDDEN_LINES: caution! not full implemented yet' - front_faces = [] - front_edges = [] - for f in faces: - #print 'deb: face=', f #--------- - #print 'deb: dir(face)=', dir(f) #--------- - # get its normal-vector in localCS - vec_normal = f.no.copy() - #print 'deb: vec_normal=', vec_normal #------------------ - # must be transfered to camera/view-CS - vec_normal *= mx_n - #vec_normal *= mb.rotationPart() - #print 'deb:2vec_normal=', vec_normal #------------------ - #vec_normal *= mw0.rotationPart() - #print 'deb:3vec_normal=', vec_normal, '\n' #------------------ - - - frontFace = False - if not PERSPECTIVE: #for ortho mode ---------- - # normal must point the Z direction-hemisphere - if vec_normal[2] > 0.00001: - frontFace = True - else: - v = f.verts[0] - vert = Mathutils.Vector(v.co) * mx - if Mathutils.DotVecs(vert, vec_normal) < 0.00001: - frontFace = True - - if frontFace: - front_faces.append(f.index) - for key in f.edge_keys: - #this test can be done faster with set() - if key not in front_edges: - front_edges.append(key) - - #print 'deb: amount of visible faces=', len(front_faces) #--------- - #print 'deb: visible faces=', front_faces #--------- - #print 'deb: amount of visible edges=', len(front_edges) #--------- - #print 'deb: visible edges=', front_edges #--------- - return front_faces, front_edges - - -#---- migration to 2.49------------------------------------------------- -if 'cross' in dir(Mathutils.Vector()): - #Draw.PupMenu('DXF exporter: Abort%t|This script version works for Blender up 2.49 only!') - def M_CrossVecs(v1,v2): - return v1.cross(v2) #for up2.49 - def M_DotVecs(v1,v2): - return v1.dot(v2) #for up2.49 -else: - def M_CrossVecs(v1,v2): - return Mathutils.CrossVecs(v1,v2) #for pre2.49 - def M_DotVecs(v1,v2): - return Mathutils.DotVecs(v1,v2) #for pre2.49 - - -#----------------------------------------------------- -def getExtrusion(matrix): - """calculates DXF-Extrusion = Arbitrary Xaxis and Zaxis vectors - - """ - AZaxis = matrix[2].copy().resize3D().normalize() # = ArbitraryZvector - Extrusion = [AZaxis[0],AZaxis[1],AZaxis[2]] - if AZaxis[2]==1.0: - Extrusion = None - AXaxis = matrix[0].copy().resize3D() # = ArbitraryXvector - else: - threshold = 1.0 / 64.0 - if abs(AZaxis[0]) < threshold and abs(AZaxis[1]) < threshold: - # AXaxis is the intersection WorldPlane and ExtrusionPlane - AXaxis = M_CrossVecs(WORLDY,AZaxis) - else: - AXaxis = M_CrossVecs(WORLDZ,AZaxis) - #print 'deb:\n' #------------- - #print 'deb:getExtrusion() Extrusion=', Extrusion #--------- - return Extrusion, AXaxis.normalize() - - -#----------------------------------------------------- -def getZRotation(AXaxis, rot_matrix_invert): - """calculates ZRotation = angle between ArbitraryXvector and obj.matrix.Xaxis - - """ - # this works: Xaxis is the obj.matrix-Xaxis vector - # but not correct for all orientations - #Xaxis = matrix[0].copy().resize3D() # = ArbitraryXvector - ##Xaxis.normalize() # = ArbitraryXvector - #ZRotation = - Mathutils.AngleBetweenVecs(Xaxis,AXaxis) #output in radians - - # this works for all orientations, maybe a bit faster - # transform AXaxis into OCS:Object-Coord-System - #rot_matrix = normalizeMat(matrix.rotationPart()) - #rot_matrix_invert = rot_matrix.invert() - vec = AXaxis * rot_matrix_invert - ##vec = AXaxis * matrix.copy().invert() - ##vec.normalize() # not needed for atan2() - #print '\ndeb:getExtrusion() vec=', vec #--------- - ZRotation = - atan2(vec[1],vec[0]) #output in radians - - #print 'deb:ZRotation() ZRotation=', ZRotation*r2d #--------- - return ZRotation - - -#------------------------------------------ -def normalizeMat(matrix): - mat12 = matrix.copy() - mat12 = [Mathutils.Vector(v).normalize() for v in mat12] - if len(mat12)>3: - matr12 = Mathutils.Matrix(mat12[0],mat12[1],mat12[2],mat12[3]) - else: - matr12 = Mathutils.Matrix(mat12[0],mat12[1],mat12[2]) - return matr12 - - -#----------------------------------------------------- -def projected_co(verts, matrix): - """ converts coordinates of points from OCS to WCS->ScreenCS - needs matrix: a projection matrix - needs verts: a list of vectors[x,y,z] - returns a list of [x,y,z] - """ - #print 'deb:projected_co() verts=', verts #--------- - temp_verts = [Mathutils.Vector(v)*matrix for v in verts] - #print 'deb:projected_co() temp_verts=', temp_verts #--------- - - if GUI_A['Z_force_on'].val: locZ = GUI_A['Z_elev'].val - else: locZ = 0.0 - - if PROJECTION: - if PERSPECTIVE: - clipStart = 10.0 - for v in temp_verts: - coef = - clipStart / v[2] - v[0] *= coef - v[1] *= coef - v[2] = locZ - for v in temp_verts: - v[2] = locZ - temp_verts = [v[:3] for v in temp_verts] - #print 'deb:projected_co() out_verts=', temp_verts #--------- - return temp_verts - - -#----------------------------------------------------- -def isLeftHand(matrix): - #Is the matrix a left-hand-system, or not? - ma = matrix.rotationPart() - crossXY = M_CrossVecs(ma[0], ma[1]) - check = M_DotVecs(ma[2], crossXY) - if check < 0.00001: return 1 - return 0 - - -#----------------------------------------------------- -def exportMesh(ob, mx, mx_n, me=None, **common): - """converts Mesh-Object to desired projection and representation(DXF-Entity type) - """ - global BLOCKREGISTRY - entities = [] - block = None - #print 'deb:exportMesh() given common=', common #--------- - if me==None: - me = ob.getData(mesh=1) - else: - me.getFromObject(ob) - # idea: me.transform(mx); get verts data; me.transform(mx_inv)= back to the origin state - # the .transform-method is fast, but bad, cause invasive: - # it manipulates original geometry and by retransformation lefts back rounding-errors - # we dont want to manipulate original data! - #temp_verts = me.verts[:] #doesn't work on ubuntu(Yorik), bug? - if me.verts: - #print 'deb:exportMesh() started' #--------- - - #print 'deb:exportMesh() ob.name=', ob.name #--------- - #print 'deb:exportMesh() me.name=', me.name #--------- - #print 'deb:exportMesh() me.users=', me.users #--------- - # check if there are more instances of this mesh (if used by other objects), then write to BLOCK/INSERT - if GUI_A['instances_on'].val and me.users>1 and not PROJECTION: - if me.name in BLOCKREGISTRY.keys(): - insert_name = BLOCKREGISTRY[me.name] - # write INSERT to entities - entities = exportInsert(ob, mx,insert_name, **common) - else: - # generate geom_output in ObjectCS - allpoints = [v.co for v in me.verts] - identity_matrix = Mathutils.Matrix().identity() - allpoints = projected_co(allpoints, identity_matrix) - #allpoints = toGlobalOrigin(allpoints) - faces=[] - edges=[] - for e in me.edges: edges.append(e.key) - faces = [[v.index for v in f.verts] for f in me.faces] - entities = writeMeshEntities(allpoints, edges, faces, **common) - if entities: # if not empty block - # write BLOCK definition and INSERT entity - # BLOCKREGISTRY = dictionary 'blender_name':'dxf_name'.append(me.name) - BLOCKREGISTRY[me.name]=validDXFr12name(('ME_'+ me.name)) - insert_name = BLOCKREGISTRY[me.name] - block = DXF.Block(insert_name,flag=0,base=(0,0,0),entities=entities) - # write INSERT as entity - entities = exportInsert(ob, mx, insert_name, **common) - - else: # no other instances, so go the standard way - allpoints = [v.co for v in me.verts] - allpoints = projected_co(allpoints, mx) - allpoints = toGlobalOrigin(allpoints) - faces=[] - edges=[] - if me.faces and PROJECTION and HIDDEN_LINES: - #if DEBUG: print 'deb:exportMesh HIDDEN_LINES mode' #--------- - faces, edges = hidden_status(me.faces, mx, mx_n) - faces = [[v.index for v in me.faces[f_nr].verts] for f_nr in faces] - else: - #if DEBUG: print 'deb:exportMesh STANDARD mode' #--------- - for e in me.edges: edges.append(e.key) - #faces = [f.index for f in me.faces] - faces = [[v.index for v in f.verts] for f in me.faces] - #faces = [[allpoints[v.index] for v in f.verts] for f in me.faces] - #print 'deb: allpoints=\n', allpoints #--------- - #print 'deb: edges=\n', edges #--------- - #print 'deb: faces=\n', faces #--------- - if isLeftHand(mx): # then change vertex-order in every face - for f in faces: - f.reverse() - #f = [f[-1]] + f[:-1] #TODO: might be needed - #print 'deb: faces=\n', faces #--------- - entities = writeMeshEntities(allpoints, edges, faces, **common) - - return entities, block - - -#------------------------------------------------- -def writeMeshEntities(allpoints, edges, faces, **common): - """help routine for exportMesh() - """ - entities = [] - - c = mesh_as_list[GUI_A['mesh_as'].val] - if 'POINTs'==c: # export Mesh as multiple POINTs - for p in allpoints: - dxfPOINT = DXF.Point(points=[p],**common) - entities.append(dxfPOINT) - elif 'LINEs'==c or (not faces): - if edges and allpoints: - if DEBUG: mesh_drawBlender(allpoints, edges, None) #deb: draw to blender scene - for e in edges: - points = [allpoints[e[0]], allpoints[e[1]]] - dxfLINE = DXF.Line(points, **common) - entities.append(dxfLINE) - elif faces: - if c in ('POLYFACE','POLYLINE'): - if allpoints: - #TODO: purge allpoints: left only vertices used by faces - if DEBUG: mesh_drawBlender(allpoints, None, faces) #deb: draw to scene - if not (PROJECTION and HIDDEN_LINES): - faces = [[v+1 for v in f] for f in faces] - else: - # for back-Faces-mode remove face-free verts - map=verts_state= [0]*len(allpoints) - for f in faces: - for v in f: - verts_state[v]=1 - if 0 in verts_state: # if dirty state - i,newverts=0,[] - for used_i,used in enumerate(verts_state): - if used: - newverts.append(allpoints[used_i]) - map[used_i]=i - i+=1 - allpoints = newverts - faces = [[map[v]+1 for v in f] for f in faces] - dxfPOLYFACE = DXF.PolyLine([allpoints, faces], flag=64, **common) - #print '\n deb: dxfPOLYFACE=',dxfPOLYFACE #------------- - entities.append(dxfPOLYFACE) - elif '3DFACEs'==c: - if DEBUG: mesh_drawBlender(allpoints, None, faces) #deb: draw to scene - for f in faces: - #print 'deb: face=', f #--------- - points = [allpoints[key] for key in f] - #points = [p.co[:3] for p in points] - #print 'deb: pointsXX=\n', points #--------- - dxfFACE = DXF.Face(points, **common) - entities.append(dxfFACE) - - return entities - - -#----------------------------------------------------- -def mesh_drawBlender(vertList, edgeList, faceList, name="dxfMesh", flatten=False, AT_CUR=True, link=True): - #print 'deb:mesh_drawBlender started XXXXXXXXXXXXXXXXXX' #--------- - ob = Object.New("Mesh",name) - me = Mesh.New(name) - #print 'deb: vertList=\n', vertList #--------- - #print 'deb: edgeList=\n', edgeList #--------- - #print 'deb: faceList=\n', faceList #--------- - me.verts.extend(vertList) - if edgeList: me.edges.extend(edgeList) - if faceList: me.faces.extend(faceList) - if flatten: - for v in me.verts: v.co.z = 0.0 - ob.link(me) - if link: - sce = Scene.getCurrent() - sce.objects.link(ob) - #me.triangleToQuad() - if AT_CUR: - cur_loc = Window.GetCursorPos() - ob.setLocation(cur_loc) - Blender.Redraw() - #return ob - -#----------------------------------------------------- -def curve_drawBlender(vertList, org_point=[0.0,0.0,0.0], closed=0, name="dxfCurve", flatten=False, AT_CUR=True, link=True): - #print 'deb:curve_drawBlender started XXXXXXXXXXXXXXXXXX' #--------- - ob = Object.New("Curve",name) - cu = Curve.New(name) - #print 'deb: vertList=\n', vertList #--------- - curve = cu.appendNurb(BezTriple.New(vertList[0])) - for p in vertList[1:]: - curve.append(BezTriple.New(p)) - for point in curve: - #point.handleTypes = [VECT, VECT] - point.handleTypes = [FREE, FREE] - point.radius = 1.0 - curve.flagU = closed # 0 sets the curve not cyclic=open - cu.setResolu(6) - cu.update() #important for handles calculation - if flatten: - for v in cu.verts: v.co.z = 0.0 - ob.link(cu) - if link: - sce = Scene.getCurrent() - sce.objects.link(ob) - #me.triangleToQuad() - if AT_CUR: - cur_loc = Window.GetCursorPos() - ob.setLocation(cur_loc) - elif org_point: - cur_loc=org_point - ob.setLocation(cur_loc) - Blender.Redraw() - #return ob - - -#----------------------------------------------------- -def toGlobalOrigin(points): - """relocates points to the new location - needs a list of points [x,y,z] - """ - if GUI_A['g_origin_on'].val: - for p in points: - p[0] += G_ORIGIN[0] - p[1] += G_ORIGIN[1] - p[2] += G_ORIGIN[2] - return points - - -#----------------------------------------------------- -def exportEmpty(ob, mx, mw, **common): - """converts Empty-Object to desired projection and representation(DXF-Entity type) - """ - p = Mathutils.Vector(ob.loc) - [p] = projected_co([p], mx) - [p] = toGlobalOrigin([p]) - - entities = [] - c = empty_as_list[GUI_A['empty_as'].val] - if c=="POINT": # export Empty as POINT - dxfPOINT = DXF.Point(points=[p],**common) - entities.append(dxfPOINT) - return entities - -#----------------------------------------------------- -def exportCamera(ob, mx, mw, **common): - """converts Camera-Object to desired projection and representation(DXF-Entity type) - """ - location = Mathutils.Vector(ob.loc) - [location] = projected_co([location], mx) - [location] = toGlobalOrigin([location]) - view_name=validDXFr12name(('CAM_'+ ob.name)) - - camera = Camera.Get(ob.getData(name_only=True)) - #print 'deb: camera=', dir(camera) #------------------ - if camera.type=='persp': - mode = 1+2+4+16 - # mode flags: 1=persp, 2=frontclip, 4=backclip,16=FrontZ - elif camera.type=='ortho': - mode = 0+2+4+16 - - leftBottom=(0.0,0.0) # default - rightTop=(1.0,1.0) # default - center=(0.0,0.0) # default - - direction = Mathutils.Vector(0.0,0.0,1.0) * mx.rotationPart() # in W-C-S - direction.normalize() - target=Mathutils.Vector(ob.loc) - direction # in W-C-S - #ratio=1.0 - width=height= camera.scale # for ortho-camera - lens = camera.lens # for persp-camera - frontClipping = -(camera.clipStart - 1.0) - backClipping = -(camera.clipEnd - 1.0) - - entities, vport, view = [], None, None - c = camera_as_list[GUI_A['camera_as'].val] - if c=="POINT": # export as POINT - dxfPOINT = DXF.Point(points=[location],**common) - entities.append(dxfPOINT) - elif c=="VIEW": # export as VIEW - view = DXF.View(name=view_name, - center=center, width=width, height=height, - frontClipping=frontClipping,backClipping=backClipping, - direction=direction,target=target,lens=lens,mode=mode - ) - elif c=="VPORT": # export as VPORT - vport = DXF.VPort(name=view_name, - center=center, ratio=1.0, height=height, - frontClipping=frontClipping,backClipping=backClipping, - direction=direction,target=target,lens=lens,mode=mode - ) - return entities, vport, view - -#----------------------------------------------------- -def exportLamp(ob, mx, mw, **common): - """converts Lamp-Object to desired projection and representation(DXF-Entity type) - """ - p = Mathutils.Vector(ob.loc) - [p] = projected_co([p], mx) - [p] = toGlobalOrigin([p]) - - entities = [] - c = lamp_as_list[GUI_A['lamp_as'].val] - if c=="POINT": # export as POINT - dxfPOINT = DXF.Point(points=[p],**common) - entities.append(dxfPOINT) - return entities - -#----------------------------------------------------- -def exportInsert(ob, mx, insert_name, **common): - """converts Object to DXF-INSERT in given orientation - """ - WCS_loc = ob.loc # WCS_loc is object location in WorldCoordSystem - sizeX = ob.SizeX - sizeY = ob.SizeY - sizeZ = ob.SizeZ - rotX = ob.RotX - rotY = ob.RotY - rotZ = ob.RotZ - #print 'deb: sizeX=%s, sizeY=%s' %(sizeX, sizeY) #--------- - - Thickness,Extrusion,ZRotation,Elevation = None,None,None,None - - AXaxis = mx[0].copy().resize3D() # = ArbitraryXvector - if not PROJECTION: - #Extrusion, ZRotation, Elevation = getExtrusion(mx) - Extrusion, AXaxis = getExtrusion(mx) - - entities = [] - - if 1: - if not PROJECTION: - ZRotation,Zrotmatrix,OCS_origin,ECS_origin = getTargetOrientation(mx,Extrusion,\ - AXaxis,WCS_loc,sizeX,sizeY,sizeZ,rotX,rotY,rotZ) - ZRotation *= r2d - point = ECS_origin - else: #TODO: fails correct location - point1 = Mathutils.Vector(ob.loc) - [point] = projected_co([point1], mx) - if PERSPECTIVE: - clipStart = 10.0 - coef = -clipStart / (point1*mx)[2] - #print 'deb: coef=', coef #-------------- - #TODO: ? sizeX *= coef - #sizeY *= coef - #sizeZ *= coef - - #print 'deb: point=', point #-------------- - [point] = toGlobalOrigin([point]) - - #if DEBUG: text_drawBlender(textstr,points,OCS_origin) #deb: draw to scene - common['extrusion']= Extrusion - #common['elevation']= Elevation - #print 'deb: common=', common #------------------ - if 0: #DEBUG - #linepoints = [[0,0,0], [AXaxis[0],AXaxis[1],AXaxis[2]]] - linepoints = [[0,0,0], point] - dxfLINE = DXF.Line(linepoints,**common) - entities.append(dxfLINE) - - xscale=sizeX - yscale=sizeY - zscale=sizeZ - cols=None - colspacing=None - rows=None - rowspacing=None - - dxfINSERT = DXF.Insert(insert_name,point=point,rotation=ZRotation,\ - xscale=xscale,yscale=yscale,zscale=zscale,\ - cols=cols,colspacing=colspacing,rows=rows,rowspacing=rowspacing,\ - **common) - entities.append(dxfINSERT) - - return entities - - -#----------------------------------------------------- -def exportText(ob, mx, mw, **common): - """converts Text-Object to desired projection and representation(DXF-Entity type) - """ - text3d = ob.getData() - textstr = text3d.getText() - WCS_loc = ob.loc # WCS_loc is object location in WorldCoordSystem - sizeX = ob.SizeX - sizeY = ob.SizeY - sizeZ = ob.SizeZ - rotX = ob.RotX - rotY = ob.RotY - rotZ = ob.RotZ - #print 'deb: sizeX=%s, sizeY=%s' %(sizeX, sizeY) #--------- - - Thickness,Extrusion,ZRotation,Elevation = None,None,None,None - - AXaxis = mx[0].copy().resize3D() # = ArbitraryXvector - if not PROJECTION: - #Extrusion, ZRotation, Elevation = getExtrusion(mx) - Extrusion, AXaxis = getExtrusion(mx) - - # no thickness/width for TEXTs converted into ScreenCS - if text3d.getExtrudeDepth(): - Thickness = text3d.getExtrudeDepth() * sizeZ - - #Horizontal text justification type, code 72, (optional, default = 0) - # integer codes (not bit-coded) - #0=left, 1=center, 2=right - #3=aligned, 4=middle, 5=fit - Alignment = None - alignment = text3d.getAlignment().value - if alignment in (1,2): Alignment = alignment - - textHeight = text3d.getSize() / 1.7 - textFlag = 0 - if sizeX < 0.0: textFlag |= 2 # set flag for horizontal mirrored - if sizeZ < 0.0: textFlag |= 4 # vertical mirrored - - entities = [] - c = text_as_list[GUI_A['text_as'].val] - - if c=="TEXT": # export text as TEXT - if not PROJECTION: - ZRotation,Zrotmatrix,OCS_origin,ECS_origin = getTargetOrientation(mx,Extrusion,\ - AXaxis,WCS_loc,sizeX,sizeY,sizeZ,rotX,rotY,rotZ) - ZRotation *= r2d - point = ECS_origin - else: #TODO: fails correct location - point1 = Mathutils.Vector(ob.loc) - [point] = projected_co([point1], mx) - if PERSPECTIVE: - clipStart = 10.0 - coef = -clipStart / (point1*mx)[2] - textHeight *= coef - #print 'deb: coef=', coef #-------------- - - #print 'deb: point=', point #-------------- - [point] = toGlobalOrigin([point]) - point2 = point - - #if DEBUG: text_drawBlender(textstr,points,OCS_origin) #deb: draw to scene - common['extrusion']= Extrusion - #common['elevation']= Elevation - common['thickness']= Thickness - #print 'deb: common=', common #------------------ - if 0: #DEBUG - #linepoints = [[0,0,0], [AXaxis[0],AXaxis[1],AXaxis[2]]] - linepoints = [[0,0,0], point] - dxfLINE = DXF.Line(linepoints,**common) - entities.append(dxfLINE) - - dxfTEXT = DXF.Text(text=textstr,point=point,alignment=point2,rotation=ZRotation,\ - flag=textFlag,height=textHeight,justifyhor=Alignment,**common) - entities.append(dxfTEXT) - if Thickness: - common['thickness']= -Thickness - dxfTEXT = DXF.Text(text=textstr,point=point,alignment=point2,rotation=ZRotation,\ - flag=textFlag,height=textHeight,justifyhor=Alignment,**common) - entities.append(dxfTEXT) - return entities - - -#------------------------------------------- -def euler2matrix(rx, ry, rz): - """creates full 3D rotation matrix (optimized) - needs rx, ry, rz angles in radians - """ - #print 'rx, ry, rz: ', rx, ry, rz - A, B = sin(rx), cos(rx) - C, D = sin(ry), cos(ry) - E, F = sin(rz), cos(rz) - AC, BC = A*C, B*C - return Mathutils.Matrix([D*F, D*E, -C], - [AC*F-B*E, AC*E+B*F, A*D], - [BC*F+A*E, BC*E-A*F, B*D]) - - -#----------------------------------------------------- -def getTargetOrientation(mx,Extrusion,AXaxis,WCS_loc,sizeX,sizeY,sizeZ,rotX,rotY,rotZ): - """given - """ - if 1: - rot_matrix = normalizeMat(mx.rotationPart()) - #TODO: workaround for blender negative-matrix.invert() - # partially done: works only for rotX,rotY==0.0 - if sizeX<0.0: rot_matrix[0] *= -1 - if sizeY<0.0: rot_matrix[1] *= -1 - #if sizeZ<0.0: rot_matrix[2] *= -1 - rot_matrix_invert = rot_matrix.invert() - else: #TODO: to check, why below rot_matrix_invert is not equal above one - rot_euler_matrix = euler2matrix(rotX,rotY,rotZ) - rot_matrix_invert = euler2matrix(-rotX,-rotY,-rotZ) - - # OCS_origin is Global_Origin in ObjectCoordSystem - OCS_origin = Mathutils.Vector(WCS_loc) * rot_matrix_invert - #print 'deb: OCS_origin=', OCS_origin #--------- - - ZRotation = rotZ - if Extrusion!=None: - ZRotation = getZRotation(AXaxis,rot_matrix_invert) - #Zrotmatrix = Mathutils.RotationMatrix(-ZRotation, 3, "Z") - rs, rc = sin(ZRotation), cos(ZRotation) - Zrotmatrix = Mathutils.Matrix([rc, rs,0.0],[-rs,rc,0.0],[0.0,0.0,1.0]) - #print 'deb: Zrotmatrix=\n', Zrotmatrix #-------------- - - # ECS_origin is Global_Origin in EntityCoordSystem - ECS_origin = OCS_origin * Zrotmatrix - #print 'deb: ECS_origin=', ECS_origin #--------- - #TODO: it doesnt work yet for negative scaled curve-objects! - return ZRotation,Zrotmatrix,OCS_origin,ECS_origin - - -#----------------------------------------------------- -def exportCurve(ob, mx, mw, **common): - """converts Curve-Object to desired projection and representation(DXF-Entity type) - """ - entities = [] - block = None - curve = ob.getData() - #print 'deb: curve=', dir(curve) #--------- - # TODO: should be: if curve.users>1 and not (PERSPECTIVE or (PROJECTION and HIDDEN_MODE): - if GUI_A['instances_on'].val and curve.users>1 and not PROJECTION: - if curve.name in BLOCKREGISTRY.keys(): - insert_name = BLOCKREGISTRY[curve.name] - # write INSERT to entities - entities = exportInsert(ob, mx,insert_name, **common) - else: - # generate geom_output in ObjectCS - imx = Mathutils.Matrix().identity() - WCS_loc = [0,0,0] # WCS_loc is object location in WorldCoordSystem - #print 'deb: WCS_loc=', WCS_loc #--------- - sizeX = sizeY = sizeZ = 1.0 - rotX = rotY = rotZ = 0.0 - Thickness,Extrusion,ZRotation,Elevation = None,None,None,None - ZRotation,Zrotmatrix,OCS_origin,ECS_origin = None,None,None,None - AXaxis = imx[0].copy().resize3D() # = ArbitraryXvector - OCS_origin = [0,0,0] - if not PROJECTION: - #Extrusion, ZRotation, Elevation = getExtrusion(mx) - Extrusion, AXaxis = getExtrusion(imx) - - # no thickness/width for POLYLINEs converted into Screen-C-S - #print 'deb: curve.ext1=', curve.ext1 #--------- - if curve.ext1: Thickness = curve.ext1 * sizeZ - if curve.ext2 and sizeX==sizeY: - Width = curve.ext2 * sizeX - if "POLYLINE"==curve_as_list[GUI_A['curve_as'].val]: # export as POLYLINE - ZRotation,Zrotmatrix,OCS_origin,ECS_origin = getTargetOrientation(imx,Extrusion,\ - AXaxis,WCS_loc,sizeX,sizeY,sizeZ,rotX,rotY,rotZ) - - entities = writeCurveEntities(curve, imx, - Thickness,Extrusion,ZRotation,Elevation,AXaxis,Zrotmatrix, - WCS_loc,OCS_origin,ECS_origin,sizeX,sizeY,sizeZ, - **common) - - if entities: # if not empty block - # write BLOCK definition and INSERT entity - # BLOCKREGISTRY = dictionary 'blender_name':'dxf_name'.append(me.name) - BLOCKREGISTRY[curve.name]=validDXFr12name(('CU_'+ curve.name)) - insert_name = BLOCKREGISTRY[curve.name] - block = DXF.Block(insert_name,flag=0,base=(0,0,0),entities=entities) - # write INSERT as entity - entities = exportInsert(ob, mx, insert_name, **common) - - else: # no other instances, so go the standard way - WCS_loc = ob.loc # WCS_loc is object location in WorldCoordSystem - #print 'deb: WCS_loc=', WCS_loc #--------- - sizeX = ob.SizeX - sizeY = ob.SizeY - sizeZ = ob.SizeZ - rotX = ob.RotX - rotY = ob.RotY - rotZ = ob.RotZ - #print 'deb: sizeX=%s, sizeY=%s' %(sizeX, sizeY) #--------- - - Thickness,Extrusion,ZRotation,Elevation = None,None,None,None - ZRotation,Zrotmatrix,OCS_origin,ECS_origin = None,None,None,None - AXaxis = mx[0].copy().resize3D() # = ArbitraryXvector - OCS_origin = [0,0,0] - if not PROJECTION: - #Extrusion, ZRotation, Elevation = getExtrusion(mx) - Extrusion, AXaxis = getExtrusion(mx) - - # no thickness/width for POLYLINEs converted into Screen-C-S - #print 'deb: curve.ext1=', curve.ext1 #--------- - if curve.ext1: Thickness = curve.ext1 * sizeZ - if curve.ext2 and sizeX==sizeY: - Width = curve.ext2 * sizeX - if "POLYLINE"==curve_as_list[GUI_A['curve_as'].val]: # export as POLYLINE - ZRotation,Zrotmatrix,OCS_origin,ECS_origin = getTargetOrientation(mx,Extrusion,\ - AXaxis,WCS_loc,sizeX,sizeY,sizeZ,rotX,rotY,rotZ) - entities = writeCurveEntities(curve, mx, - Thickness,Extrusion,ZRotation,Elevation,AXaxis,Zrotmatrix, - WCS_loc,OCS_origin,ECS_origin,sizeX,sizeY,sizeZ, - **common) - - return entities, block - - -#------------------------------------------------- -def writeCurveEntities(curve, mx, - Thickness,Extrusion,ZRotation,Elevation,AXaxis,Zrotmatrix, - WCS_loc,OCS_origin,ECS_origin,sizeX,sizeY,sizeZ, - **common): - """help routine for exportCurve() - """ - entities = [] - - if 1: - for cur in curve: - #print 'deb: START cur=', cur #-------------- - points = [] - if cur.isNurb(): - for point in cur: - #print 'deb:isNurb point=', point #--------- - vec = point[0:3] - #print 'deb: vec=', vec #--------- - pkt = Mathutils.Vector(vec) - #print 'deb: pkt=', pkt #--------- - points.append(pkt) - else: - for point in cur: - #print 'deb:isBezier point=', point.getTriple() #--------- - vec = point.getTriple()[1] - #print 'deb: vec=', vec #--------- - pkt = Mathutils.Vector(vec) - #print 'deb: pkt=', pkt #--------- - points.append(pkt) - - #print 'deb: points', points #-------------- - if len(points)>1: - c = curve_as_list[GUI_A['curve_as'].val] - - if c=="POLYLINE": # export Curve as POLYLINE - if not PROJECTION: - # recalculate points(2d=X,Y) into Entity-Coords-System - for p in points: # list of vectors - p[0] *= sizeX - p[1] *= sizeY - p2 = p * Zrotmatrix - p2[0] += ECS_origin[0] - p2[1] += ECS_origin[1] - p[0],p[1] = p2[0],p2[1] - else: - points = projected_co(points, mx) - #print 'deb: points', points #-------------- - - if cur.isCyclic(): closed = 1 - else: closed = 0 - points = toGlobalOrigin(points) - - if DEBUG: curve_drawBlender(points,OCS_origin,closed) #deb: draw to scene - - common['extrusion']= Extrusion - ##common['rotation']= ZRotation - ##common['elevation']= Elevation - common['thickness']= Thickness - #print 'deb: common=', common #------------------ - - if 0: #DEBUG - p=AXaxis[:3] - entities.append(DXF.Line([[0,0,0], p],**common)) - p=ECS_origin[:3] - entities.append(DXF.Line([[0,0,0], p],**common)) - common['color']= 5 - p=OCS_origin[:3] - entities.append(DXF.Line([[0,0,0], p],**common)) - #OCS_origin=[0,0,0] #only debug---------------- - dxfPLINE = DXF.PolyLine(points,OCS_origin,closed,**common) - entities.append(dxfPLINE) - - dxfPLINE = DXF.PolyLine(points,OCS_origin,closed,**common) - entities.append(dxfPLINE) - if Thickness: - common['thickness']= -Thickness - dxfPLINE = DXF.PolyLine(points,OCS_origin,closed,**common) - entities.append(dxfPLINE) - - elif c=="LINEs": # export Curve as multiple LINEs - points = projected_co(points, mx) - if cur.isCyclic(): points.append(points[0]) - #print 'deb: points', points #-------------- - points = toGlobalOrigin(points) - - if DEBUG: curve_drawBlender(points,WCS_loc,closed) #deb: draw to scene - common['extrusion']= Extrusion - common['elevation']= Elevation - common['thickness']= Thickness - #print 'deb: common=', common #------------------ - for i in range(len(points)-1): - linepoints = [points[i], points[i+1]] - dxfLINE = DXF.Line(linepoints,**common) - entities.append(dxfLINE) - if Thickness: - common['thickness']= -Thickness - for i in range(len(points)-1): - linepoints = [points[i], points[i+1]] - dxfLINE = DXF.Line(linepoints,**common) - entities.append(dxfLINE) - - elif c=="POINTs": # export Curve as multiple POINTs - points = projected_co(points, mx) - for p in points: - dxfPOINT = DXF.Point(points=[p],**common) - entities.append(dxfPOINT) - return entities - - -#----------------------------------------------------- -def getClipBox(camera): - """calculates Field-of-View-Clipping-Box of given Camera - returns clip_box: a list of vertices - returns matr: translation matrix - """ - sce = Scene.GetCurrent() - context = sce.getRenderingContext() - #print 'deb: context=\n', context #------------------ - sizeX = context.sizeX - sizeY = context.sizeY - ratioXY = sizeX/float(sizeY) - #print 'deb: size X,Y, ratio=', sizeX, sizeY, ratioXY #------------------ - - clip1_Z = - camera.clipStart - clip2_Z = - camera.clipEnd - #print 'deb: clip Start=', camera.clipStart #------------------ - #print 'deb: clip End=', camera.clipEnd #------------------ - - if camera.type=='ortho': - scale = camera.scale - #print 'deb: camscale=', scale #------------------ - clip1shiftX = clip2shiftX = camera.shiftX * scale - clip1shiftY = clip2shiftY = camera.shiftY * scale - clip1_X = scale * 0.5 - clip1_Y = scale * 0.5 - if ratioXY > 1.0: clip1_Y /= ratioXY - else: clip1_X *= ratioXY - clip2_X = clip1_X - clip2_Y = clip1_Y - - near = clip1_Z - far = clip2_Z - right, left = clip1_X, -clip1_X - top, bottom = clip1_Y, -clip1_Y - - scaleX = 2.0/float(right - left) - x3 = -float(right + left)/float(right - left) - scaleY = 2.0/float(top - bottom) - y3 = -float(top + bottom)/float(top - bottom) - scaleZ = 1.0/float(far - near) - z3 = -float(near)/float(far - near) - - matrix = Mathutils.Matrix( [scaleX, 0.0, 0.0, x3], - [0.0, scaleY, 0.0, y3], - [0.0, 0.0, scaleZ, z3], - [0.0, 0.0, 0.0, 1.0]) - - elif camera.type=='persp': - #viewpoint = [0.0, 0.0, 0.0] #camera's coordinate system, hehe - #lens = camera.lens - angle = camera.angle - #print 'deb: cam angle=', angle #------------------ - shiftX = camera.shiftX - shiftY = camera.shiftY - fov_coef = atan(angle * d2r) - fov_coef *= 1.3 #incl. passpartou - clip1_k = clip1_Z * fov_coef - clip2_k = clip2_Z * fov_coef - clip1shiftX = - camera.shiftX * clip1_k - clip2shiftX = - camera.shiftX * clip2_k - clip1shiftY = - camera.shiftY * clip1_k - clip2shiftY = - camera.shiftY * clip2_k - clip1_X = clip1_Y = clip1_k * 0.5 - clip2_X = clip2_Y = clip2_k * 0.5 - if ratioXY > 1.0: - clip1_Y /= ratioXY - clip2_Y /= ratioXY - else: - clip1_X *= ratioXY - clip2_X *= ratioXY - - near = clip1_Z - far = clip2_Z - right, left = clip1_X, -clip1_X - top, bottom = clip1_Y, -clip1_Y - #return Matrix( [scaleX, 0.0, x2, 0.0], - #[0.0, scaleY, y2, 0.0], - #[0.0, 0.0, scaleZ, wZ], - #[0.0, 0.0, -1.0, 0.0]) - matrix = Mathutils.Matrix( [(2.0 * near)/float(right - left), 0.0, float(right + left)/float(right - left), 0.0], - [0.0, (2.0 * near)/float(top - bottom), float(top + bottom)/float(top - bottom), 0.0], - [0.0, 0.0, -float(far + near)/float(far - near), -(2.0 * far * near)/float(far - near)], - [0.0, 0.0, -1.0, 0.0]) - - - clip_box = [ - -clip1_X + clip1shiftX, clip1_X + clip1shiftX, - -clip1_Y + clip1shiftY, clip1_Y + clip1shiftY, - -clip2_X + clip2shiftX, clip2_X + clip2shiftX, - -clip2_Y + clip2shiftY, clip2_Y + clip2shiftY, - clip1_Z, clip2_Z] - #print 'deb: clip_box=\n', clip_box #------------------ - #drawClipBox(clip_box) - return clip_box, matrix - - -#----------------------------------------------------- -def drawClipBox(clip_box): - """debug tool: draws Clipping-Box of a Camera View - """ - min_X1, max_X1, min_Y1, max_Y1,\ - min_X2, max_X2, min_Y2, max_Y2,\ - min_Z, max_Z = clip_box - verts = [] - verts.append([min_X1, min_Y1, min_Z]) - verts.append([max_X1, min_Y1, min_Z]) - verts.append([max_X1, max_Y1, min_Z]) - verts.append([min_X1, max_Y1, min_Z]) - verts.append([min_X2, min_Y2, max_Z]) - verts.append([max_X2, min_Y2, max_Z]) - verts.append([max_X2, max_Y2, max_Z]) - verts.append([min_X2, max_Y2, max_Z]) - faces = [[0,1,2,3],[4,5,6,7]] - newmesh = Mesh.New() - newmesh.verts.extend(verts) - newmesh.faces.extend(faces) - - plan = Object.New('Mesh','clip_box') - plan.link(newmesh) - sce = Scene.GetCurrent() - sce.objects.link(plan) - plan.setMatrix(sce.objects.camera.matrix) - - -#------------------------------------------------- -def getCommons(ob): - """set up common attributes for output style: - color=None - extrusion=None - layer='0', - lineType=None - lineTypeScale=None - lineWeight=None - thickness=None - parent=None - """ - - layers = ob.layers #gives a list e.g.[1,5,19] - if layers: ob_layer_nr = layers[0] - #print 'ob_layer_nr=', ob_layer_nr #-------------- - - materials = ob.getMaterials() - if materials: - ob_material = materials[0] - ob_mat_color = ob_material.rgbCol - else: ob_mat_color, ob_material = None, None - #print 'ob_mat_color, ob_material=', ob_mat_color, ob_material #-------------- - - data = ob.getData() - data_materials = ob.getMaterials() - if data_materials: - data_material = data_materials[0] - data_mat_color = data_material.rgbCol - else: data_mat_color, data_material = None, None - #print 'data_mat_color, data_material=', data_mat_color, data_material #-------------- - - entitylayer = ENTITYLAYER_DEF - c = entitylayer_from_list[GUI_A['entitylayer_from'].val] - #["default_LAYER","obj.name","obj.layer","obj.material","obj.data.name","obj.data.material","..vertexgroup","..group","..map_table"] - if c=="default_LAYER": - entitylayer = LAYERNAME_DEF - elif c=="obj.layer" and ob_layer_nr: - entitylayer = 'LAYER'+ str(ob_layer_nr) - elif c=="obj.material" and ob_material: - entitylayer = ob_material.name - elif c=="obj.name": - entitylayer = ob.name - elif c=="obj.data.material" and ob_material: - entitylayer = data_material.name - elif c=="obj.data.name": - entitylayer = data.name - entitylayer = validDXFr12name(PREFIX+entitylayer) - if entitylayer=="": entitylayer = "BF_0" - - entitycolor = ENTITYCOLOR_DEF - c = entitycolor_from_list[GUI_A['entitycolor_from'].val] - if c=="default_COLOR": - entitycolor = LAYERCOLOR_DEF - elif c=="BYLAYER": - entitycolor = BYLAYER - elif c=="BYBLOCK": - entitycolor = BYBLOCK - elif c=="obj.layer" and ob_layer_nr: - entitycolor = ob_layer_nr - elif c=="obj.color" and ob.color: - entitycolor = col2DXF(ob.color) - elif c=="obj.material" and ob_mat_color: - entitycolor = col2DXF(ob_mat_color) - elif c=="obj.data.material" and data_mat_color: - entitycolor = col2DXF(data_mat_color) - #if entitycolor!=None: layercolor = entitycolor - - entityltype = ENTITYLTYPE_DEF - c = entityltype_from_list[GUI_A['entityltype_from'].val] - if c=="default_LTYPE": - entityltype = LAYERLTYPE_DEF - elif c=="BYLAYER": - entityltype = BYLAYER - elif c=="BYBLOCK": - entityltype = BYBLOCK - elif c: - entityltype = c - - return entitylayer,entitycolor,entityltype - - -#----------------------------------------------------- -def do_export(export_list, filepath): - global PERSPECTIVE, CAMERAVIEW, BLOCKREGISTRY - Window.WaitCursor(1) - t = Blender.sys.time() - - # init Drawing --------------------- - d=DXF.Drawing() - # add Tables ----------------- - # initialized automatic: d.blocks.append(b) #section BLOCKS - # initialized automatic: d.styles.append(DXF.Style()) #table STYLE - - #table LTYPE --------------- - #d.linetypes.append(DXF.LineType(name='CONTINUOUS',description='--------',elements=[0.0])) - d.linetypes.append(DXF.LineType(name='DOT',description='. . . . . . .',elements=[0.25, 0.0, -0.25])) - d.linetypes.append(DXF.LineType(name='DASHED',description='__ __ __ __ __',elements=[0.8, 0.5, -0.3])) - d.linetypes.append(DXF.LineType(name='DASHDOT',description='__ . __ . __ .',elements=[1.0, 0.5, -0.25, 0.0, -0.25])) - d.linetypes.append(DXF.LineType(name='DIVIDE',description='____ . . ____ . . ',elements=[1.25, 0.5, -0.25, 0.0, -0.25, 0.0, -0.25])) - d.linetypes.append(DXF.LineType(name='BORDER',description='__ __ . __ __ . ',elements=[1.75, 0.5, -0.25, 0.5, -0.25, 0.0, -0.25])) - d.linetypes.append(DXF.LineType(name='HIDDEN',description='__ __ __ __ __',elements=[0.4, 0.25, -0.25])) - d.linetypes.append(DXF.LineType(name='CENTER',description='____ _ ____ _ __',elements=[2.0, 1.25, -0.25, 0.25, -0.25])) - - #d.vports.append(DXF.VPort('*ACTIVE')) - d.vports.append(DXF.VPort('*ACTIVE',center=(-5.0,1.0),height=10.0)) - #d.vports.append(DXF.VPort('*ACTIVE',leftBottom=(-100.0,-60.0),rightTop=(100.0,60.0))) - #d.views.append(DXF.View('Normal')) #table view - d.views.append(DXF.ViewByWindow('BF_TOPVIEW',leftBottom=(-100,-60),rightTop=(100,60))) #idem - - # add Entities -------------------- - BLOCKREGISTRY = {} # registry and map for BLOCKs - PERSPECTIVE = 0 - something_ready = 0 - selected_len = len(export_list) - sce = Scene.GetCurrent() - - mw = Mathutils.Matrix( [1.0, 0.0, 0.0, 0.0], - [0.0, 1.0, 0.0, 0.0], - [0.0, 0.0, 1.0, 0.0], - [0.0, 0.0, 0.0, 1.0]) - if PROJECTION: - if CAMERA ',\n' - # replace: '{' -> '\n{\n' - # replace: '}' -> '\n}\n' - output_str = ',\n'.join(output_str.split(',')) - output_str = '\n}'.join(output_str.split('}')) - output_str = '{\n'.join(output_str.split('{')) - try: - f = file(iniFile, 'w') - f.write(INIFILE_HEADER + '\n# this is a comment line\n') - f.write(output_str) - f.close() - #Draw.PupMenu('DXF-Exporter: INI-file: Done!%t|config-data saved in ' + '\'%s\'' %iniFile) - except: - Draw.PupMenu('DXF-Exporter: INI-file: Error!%t|failure by writing to ' + '\'%s\'|no config-data saved!' %iniFile) - - else: - Draw.PupMenu('DXF-Exporter: INI-file: Alert!%t|no valid name/extension for INI-file selected!') - print "DXF-Exporter: Alert!: no valid INI-file selected." - if not iniFile: - if dxfFileName.val.lower().endswith('.dxf'): - iniFileName.val = dxfFileName.val[0:-4] + INIFILE_EXTENSION - - -def loadConfig(): #remi--todo----------------------------------------------- - """Load settings/config/materials from INI-file. - - TODO: Read material-assignements from config-file. - """ - #20070724 buggy Window.FileSelector(loadConfigFile, 'Load config data from INI-file', inifilename) - global iniFileName, GUI_A, GUI_B - - iniFile = iniFileName.val - update_RegistryKey('iniFileName', iniFile) - #print 'deb:loadConfig iniFile: ', iniFile #---------------------- - if iniFile.lower().endswith(INIFILE_EXTENSION) and Blender.sys.exists(iniFile): - f = file(iniFile, 'r') - header_str = f.readline() - if header_str.startswith(INIFILE_HEADER): - data_str = f.read() - f.close() - #print 'deb:loadConfig data_str from %s: \n' %iniFile , data_str #----------------- - data = eval(data_str) - for k, v in data[0].iteritems(): - try: GUI_A[k].val = v - except: GUI_A[k] = Draw.Create(v) - for k, v in data[1].iteritems(): - try: GUI_B[k].val = v - except: GUI_B[k] = Draw.Create(v) - else: - f.close() - Draw.PupMenu('DXF-Exporter: INI-file: Alert!%t|no valid header in INI-file: ' + '\'%s\'' %iniFile) - else: - Draw.PupMenu('DXF-Exporter: INI-file: Alert!%t|no valid INI-file selected!') - print "DXF-Exporter: Alert!: no valid INI-file selected." - if not iniFileName: - if dxfFileName.val.lower().endswith('.dxf'): - iniFileName.val = dxfFileName.val[0:-4] + INIFILE_EXTENSION - - - -def updateConfig(keywords, drawTypes): #----------------------------------------------- - """updates GUI_settings with given dictionaries - - """ - global GUI_A, GUI_B - #print 'deb:lresetDefaultConfig keywords_org: \n', keywords_org #--------- - for k, v in keywords.iteritems(): - GUI_A[k].val = v - for k, v in drawTypes.iteritems(): - GUI_B[k].val = v - -def resetDefaultConfig(): #----------------------------------------------- - """Resets settings/config/materials to defaults. - - """ - #print 'deb:lresetDefaultConfig keywords_org: \n', keywords_org #--------- - updateConfig(keywords_org, drawTypes_org) - - -def presetConfig_polyline(activate): #----------------------------------------------- - """Sets settings/config for polygon representation: POLYLINE(FACE) or LINEs/3DFACEs. - - """ - global GUI_A - if activate: - GUI_A['to_polyline_on'].val = 1 - GUI_A['mesh_as'].val = 1 - GUI_A['curve_as'].val = 1 - else: - GUI_A['to_polyline_on'].val = 0 - GUI_A['mesh_as'].val = 0 - GUI_A['curve_as'].val = 0 - -def resetDefaultConfig_2D(): #----------------------------------------------- - """Sets settings/config/materials to defaults 2D. - - """ - keywords2d = { - 'projection_on' : 1, - 'fill_on' : 1, - 'text_as' : 0, - 'group_as' : 0, - } - - drawTypes2d = { - 'bmesh' : 1, - 'bcurve': 1, - 'surface':0, - 'bmeta' : 0, - 'text' : 1, - 'empty' : 1, - 'group' : 1, - 'parent' : 1, - #'proxy' : 0, - #'camera': 0, - #'lamp' : 0, - - } - presetConfig_polyline(1) - updateConfig(keywords2d, drawTypes2d) - -def resetDefaultConfig_3D(): #----------------------------------------------- - """Sets settings/config/materials to defaults 3D. - - """ - keywords3d = { - 'projection_on' : 0, - 'fill_on' : 0, - 'text_as' : 0, - 'group_as' : 0, - } - - drawTypes3d = { - 'bmesh' : 1, - 'bcurve': 1, - 'surface':0, - 'bmeta' : 0, - 'text' : 0, - 'empty' : 1, - 'group' : 1, - 'parent' : 1, - #'proxy' : 0, - #'camera': 1, - #'lamp' : 1, - } - presetConfig_polyline(1) - updateConfig(keywords3d, drawTypes3d) - - -def inputGlobalScale(): - """Pop-up UI-Block for global scale factor - """ - global GUI_A - #print 'deb:inputGlobalScale ##########' #------------ - x_scale = Draw.Create(GUI_A['g_scale'].val) - block = [] - #block.append("global translation vector:") - block.append(("", x_scale, 0.0, 10000000.0)) - - retval = Draw.PupBlock("set global scale factor:", block) - - GUI_A['g_scale'].val = float(x_scale.val) - - -def inputOriginVector(): - """Pop-up UI-Block for global translation vector - """ - global GUI_A - #print 'deb:inputOriginVector ##########' #------------ - x_origin = Draw.Create(GUI_A['g_originX'].val) - y_origin = Draw.Create(GUI_A['g_originY'].val) - z_origin = Draw.Create(GUI_A['g_originZ'].val) - block = [] - #block.append("global translation vector:") - block.append(("X: ", x_origin, -100000000.0, 100000000.0)) - block.append(("Y: ", y_origin, -100000000.0, 100000000.0)) - block.append(("Z: ", z_origin, -100000000.0, 100000000.0)) - - retval = Draw.PupBlock("set global translation vector:", block) - - GUI_A['g_originX'].val = x_origin.val - GUI_A['g_originY'].val = y_origin.val - GUI_A['g_originZ'].val = z_origin.val - - -def update_globals(): #----------------------------------------------------------------- - """ update globals if GUI_A changed - """ - global ONLYSELECTED,ONLYVISIBLE, DEBUG,\ - PROJECTION, HIDDEN_LINES, CAMERA, \ - G_SCALE, G_ORIGIN,\ - PREFIX, LAYERNAME_DEF, LAYERCOLOR_DEF, LAYERLTYPE_DEF,\ - APPLY_MODIFIERS, INCLUDE_DUPLIS,\ - OUTPUT_DWG - #global POLYLINES - - ONLYSELECTED = GUI_A['only_selected_on'].val - ONLYVISIBLE = GUI_A['only_visible_on'].val - """ - POLYLINES = GUI_A['to_polyline_on'].val - if GUI_A['curve_as'].val==1: POLYLINES=1 - else: POLYLINES=0 - """ - - if GUI_A['optimization'].val==0: DEBUG = 1 - else: DEBUG = 0 - PROJECTION = GUI_A['projection_on'].val - HIDDEN_LINES = GUI_A['hidden_lines_on'].val - CAMERA = GUI_A['camera_selected'].val - G_SCALE = GUI_A['g_scale'].val - if GUI_A['g_origin_on'].val: - G_ORIGIN[0] = GUI_A['g_originX'].val - G_ORIGIN[1] = GUI_A['g_originY'].val - G_ORIGIN[2] = GUI_A['g_originZ'].val - if GUI_A['g_scale_on'].val: - G_ORIGIN[0] *= G_SCALE - G_ORIGIN[1] *= G_SCALE - G_ORIGIN[2] *= G_SCALE - - PREFIX = GUI_A['prefix_def'].val - LAYERNAME_DEF = GUI_A['layername_def'].val - LAYERCOLOR_DEF = GUI_A['layercolor_def'].val - LAYERLTYPE_DEF = layerltype_def_list[GUI_A['layerltype_def'].val] - - APPLY_MODIFIERS = GUI_A['apply_modifiers_on'].val - INCLUDE_DUPLIS = GUI_A['include_duplis_on'].val - OUTPUT_DWG = GUI_A['outputDWG_on'].val - #print 'deb: GUI HIDDEN_LINES=', HIDDEN_LINES #--------- - #print 'deb: GUI GUI_A: ', GUI_A['hidden_lines_on'].val #--------------- - #print 'deb: GUI GUI_B: ', GUI_B #--------------- - - -def draw_UI(): #----------------------------------------------------------------- - """ Draw startUI and setup Settings. - """ - global GUI_A, GUI_B #__version__ - global user_preset, iniFileName, dxfFileName, config_UI, g_scale_as - global model_space_on - global SCROLL - - global mPAN_X, menu_orgX, mPAN_Xmax - global mPAN_Y, menu_orgY, mPAN_Ymax - global menu__Area, headerArea, screenArea, scrollArea - - size=Buffer(GL_FLOAT, 4) - glGetFloatv(GL_SCISSOR_BOX, size) #window X,Y,sizeX,sizeY - size= size.list - #print '-------------size:', size #-------------------------- - for s in [0,1,2,3]: size[s]=int(size[s]) - window_Area = [0,0,size[2],size[3]-2] - scrollXArea = [0,0,window_Area[2],15] - scrollYArea = [0,0,15,window_Area[3]] - - menu_orgX = -mPAN_X - #menu_orgX = 0 #scrollW - #if menu_pan: menu_orgX -= mPAN_X - if menu_orgX < -mPAN_Xmax: menu_orgX, mPAN_X = -mPAN_Xmax,mPAN_Xmax - if menu_orgX > 0: menu_orgX, mPAN_X = 0,0 - - menu_orgY = -mPAN_Y - #if menu_pan: menu_orgY -= mPAN_Y - if menu_orgY < -mPAN_Ymax: menu_orgY, mPAN_Y = -mPAN_Ymax,mPAN_Ymax - if menu_orgY > 0: menu_orgY, mPAN_Y = 0,0 - - - menu_margin = 10 - butt_margin = 10 - common_column = int((window_Area[2] - (3 * butt_margin) - (2 * menu_margin)-30) / 4.0) - common_column = 70 - # This is for easy layout changes - but_0c = common_column #button 1.column width - but_1c = common_column #button 1.column width - but_2c = common_column #button 2.column - but_3c = common_column #button 3.column - menu_w = (3 * butt_margin) + but_0c + but_1c + but_2c + but_3c #menu width - - simple_menu_h = 260 - extend_menu_h = 345 - menu_h = simple_menu_h # y is menu upper.y - if config_UI.val: - menu_h += extend_menu_h - - mPAN_Xmax = menu_w-window_Area[2]+50 - mPAN_Ymax = menu_h-window_Area[3]+30 - - y = menu_h - x = 0 #menu left.x - x +=menu_orgX+20 - y +=menu_orgY+20 - - - but0c = x + menu_margin #buttons 0.column position.x - but1c = but0c + but_0c + butt_margin - but2c = but1c + but_1c + butt_margin - but3c = but2c + but_2c + butt_margin - but4c = but3c + but_3c - - # Here starts menu ----------------------------------------------------- - #glClear(GL_COLOR_BUFFER_BIT) - #glRasterPos2d(8, 125) - - - ui_box(x, y, x+menu_w+menu_margin*2, y-menu_h) - y -= 20 - Draw.Label("DXF(r12)-Exporter v" + __version__, but0c, y, menu_w, 20) - - if config_UI.val: - b0, b0_ = but0c, but_0c-20 + butt_margin - b1, b1_ = but1c-20, but_1c+20 - y_top = y - - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_B['bmesh'] = Draw.Toggle('Mesh', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['bmesh'].val, "Export Mesh-Objects on/off") - if GUI_B['bmesh'].val: - GUI_A['mesh_as'] = Draw.Menu(mesh_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['mesh_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['bcurve'] = Draw.Toggle('Curve', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['bcurve'].val, "Export Curve-Objects on/off") - if GUI_B['bcurve'].val: - GUI_A['curve_as'] = Draw.Menu(curve_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['curve_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['surface'] = Draw.Toggle('..Surface', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['surface'].val, "(*todo) Export Surface-Objects on/off") - if GUI_B['surface'].val: - GUI_A['surface_as'] = Draw.Menu(surface_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['surface_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['bmeta'] = Draw.Toggle('..Meta', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['bmeta'].val, "(*todo) Export Meta-Objects on/off") - if GUI_B['bmeta'].val: - GUI_A['meta_as'] = Draw.Menu(meta_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['meta_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['text'] = Draw.Toggle('Text', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['text'].val, "Export Text-Objects on/off") - if GUI_B['text'].val: - GUI_A['text_as'] = Draw.Menu(text_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['text_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['empty'] = Draw.Toggle('Empty', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['empty'].val, "Export Empty-Objects on/off") - if GUI_B['empty'].val: - GUI_A['empty_as'] = Draw.Menu(empty_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['empty_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y_down = y - # ----------------------------------------------- - - y = y_top - b0, b0_ = but2c, but_2c-20 + butt_margin - b1, b1_ = but3c-20, but_3c+20 - - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_B['group'] = Draw.Toggle('..Group', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['group'].val, "(*todo) Export Group-Relationships on/off") - if GUI_B['group'].val: - GUI_A['group_as'] = Draw.Menu(group_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['group_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['parent'] = Draw.Toggle('..Parent', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['parent'].val, "(*todo) Export Parent-Relationships on/off") - if GUI_B['parent'].val: - GUI_A['parent_as'] = Draw.Menu(parent_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['parent_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['proxy'] = Draw.Toggle('..Proxy', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['proxy'].val, "(*todo) Export Proxy-Objects on/off") - if GUI_B['proxy'].val: - GUI_A['proxy_as'] = Draw.Menu(proxy_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['proxy_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['camera'] = Draw.Toggle('Camera', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['camera'].val, "(*wip) Export Camera-Objects on/off") - if GUI_B['camera'].val: - GUI_A['camera_as'] = Draw.Menu(camera_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['camera_as'].val, "Select target DXF-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['lamp'] = Draw.Toggle('Lamp', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['lamp'].val, "(*wip) Export Lamp-Objects on/off") - if GUI_B['lamp'].val: - GUI_A['lamp_as'] = Draw.Menu(lamp_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['lamp_as'].val, "Select target DXF-object") - Draw.EndAlign() - - - if y < y_down: y_down = y - # -----end supported objects-------------------------------------- - - y_top = y_down - y = y_top - y -= 10 - y -= 20 - but_ = menu_w / 6 - b0 = but0c + (menu_w - but_*6)/2 - Draw.BeginAlign() - #GUI_A['dummy_on'] = Draw.Toggle('-', EVENT_NONE, b0+but_*0, y, but_, 20, GUI_A['dummy_on'].val, "placeholder only on/off") - GUI_A['paper_space_on'] = Draw.Toggle('Paper', EVENT_NONE, b0+but_*0, y, but_, 20, GUI_A['paper_space_on'].val, "Export to Paper-Space, otherwise to Model-Space on/off") - GUI_A['layFrozen_on'] = Draw.Toggle ('..frozen', EVENT_NONE, b0+but_*1, y, but_, 20, GUI_A['layFrozen_on'].val, "(*todo) Support LAYER.frozen status on/off") - GUI_A['materialFilter_on'] = Draw.Toggle('..material', EVENT_NONE, b0+but_*2, y, but_, 20, GUI_A['materialFilter_on'].val, "(*todo) Material filtering on/off") - GUI_A['colorFilter_on'] = Draw.Toggle('..color', EVENT_NONE, b0+but_*3, y, but_, 20, GUI_A['colorFilter_on'].val, "(*todo) Color filtering on/off") - GUI_A['groupFilter_on'] = Draw.Toggle('..group', EVENT_NONE, b0+but_*4, y, but_, 20, GUI_A['groupFilter_on'].val, "(*todo) Group filtering on/off") - GUI_A['objectFilter_on'] = Draw.Toggle('..object', EVENT_NONE, b0+but_*5, y, but_, 20, GUI_A['objectFilter_on'].val, "(*todo) Object filtering on/off") - Draw.EndAlign() - - # -----end filters-------------------------------------- - - b0, b0_ = but0c, but_0c + butt_margin - b1, b1_ = but1c, but_1c - - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_A['g_origin_on'] = Draw.Toggle('Location', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['g_origin_on'].val, "Global relocate all objects on/off") - if GUI_A['g_origin_on'].val: - tmp = Draw.PushButton('=', EVENT_ORIGIN, b1, y, 20, 20, "Edit relocation-vector (x,y,z in DXF units)") - origin_str = '(%.4f, %.4f, %.4f)' % ( - GUI_A['g_originX'].val, - GUI_A['g_originY'].val, - GUI_A['g_originZ'].val - ) - tmp = Draw.Label(origin_str, b1+20, y, 300, 20) - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['g_scale_on'] = Draw.Toggle('Scale', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['g_scale_on'].val, "Global scale all objects on/off") - if GUI_A['g_scale_on'].val: - g_scale_as = Draw.Menu(g_scale_list, EVENT_SCALE, b1, y, 45, 20, g_scale_as.val, "Factor for scaling the DXFdata") - if g_scale_as.val == 12: - pass - else: - if g_scale_as.val == 6: #scale inches to meters - GUI_A['g_scale'].val = 0.0254000 - elif g_scale_as.val == 7: #scale feets to meters - GUI_A['g_scale'].val = 0.3048000 - elif g_scale_as.val == 8: #scale yards to meters - GUI_A['g_scale'].val = 0.9144000 - else: - GUI_A['g_scale'].val = 10.0 ** int(g_scale_as.val) - scale_float = GUI_A['g_scale'].val - if scale_float < 0.000001 or scale_float > 1000000: - scale_str = ' = %s' % GUI_A['g_scale'].val - else: - scale_str = ' = %.6f' % GUI_A['g_scale'].val - Draw.Label(scale_str, b1+45, y, 200, 20) - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['Z_force_on'] = Draw.Toggle('Elevation', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['Z_force_on'].val, "Overwrite Z-coordinates (flatten geometry) on/off") - if GUI_A['Z_force_on'].val: - GUI_A['Z_elev'] = Draw.Number('', EVENT_NONE, b1, y, b1_, 20, GUI_A['Z_elev'].val, -1000, 1000, "Set value for default Z-coordinate (in DXF units)") - Draw.EndAlign() - - """ - y -= 30 - Draw.BeginAlign() - GUI_A['material_on'] = Draw.Toggle('.material', EVENT_REDRAW, b0, y, b0_-20, 20, GUI_A['material_on'].val, "Support for material assignment on/off") - if GUI_A['material_on'].val: - GUI_A['material_to'] = Draw.Menu(material_to_menu, EVENT_NONE, b1-20, y, b1_+20, 20, GUI_A['material_to'].val, "Material assigned to?") - Draw.EndAlign() - """ - - #b0, b0_ = but0c, but_0c + butt_margin - b0, b0_ = but0c, 50 - b1, b1_ = b0+b0_, but_0c-b0_+ but_1c + butt_margin - b2, b2_ = but2c, but_2c - b3, b3_ = but3c, but_3c - - y -= 30 - Draw.Label('Output:', b0, y, b0_, 20) - Draw.Label('LAYER:', b1, y, b1_, 20) - Draw.Label('COLOR:', b2, y, b2_, 20) - Draw.Label('LINETYPE:', b3, y, b3_, 20) - #Draw.Label('LINESIZE:', b4, y, b4_, 20) - - y -= 20 - Draw.BeginAlign() - GUI_A['prefix_def'] = Draw.String('', EVENT_NONE, b0, y, b0_, 20, GUI_A['prefix_def'].val, 10, "Type Prefix for LAYERs") - GUI_A['layername_def'] = Draw.String('', EVENT_NONE, b1, y, b1_, 20, GUI_A['layername_def'].val, 10, "Type default LAYER name") - GUI_A['layercolor_def'] = Draw.Number('', EVENT_NONE, b2, y, b2_, 20, GUI_A['layercolor_def'].val, 1, 255, "Set default COLOR. (0=BYBLOCK,256=BYLAYER)") - GUI_A['layerltype_def'] = Draw.Menu(layerltype_def_menu, EVENT_NONE, b3, y, b3_, 20, GUI_A['layerltype_def'].val, "Set default LINETYPE") - Draw.EndAlign() - - y -= 25 - Draw.Label('Style:', b0, y, b0_, 20) - Draw.BeginAlign() - GUI_A['entitylayer_from'] = Draw.Menu(entitylayer_from_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['entitylayer_from'].val, "entity LAYER assigned to?") - GUI_A['entitycolor_from'] = Draw.Menu(entitycolor_from_menu, EVENT_NONE, b2, y, b2_, 20, GUI_A['entitycolor_from'].val, "entity COLOR assigned to?") - GUI_A['entityltype_from'] = Draw.Menu(entityltype_from_menu, EVENT_NONE, b3, y, b3_, 20, GUI_A['entityltype_from'].val, "Set entity LINETYPE") - Draw.EndAlign() - - y -= 10 - - y_down = y - # -----end material,translate,scale------------------------------------------ - - - #-------------------------------------- - y_top = y_down - y = y_top - - y -= 30 - Draw.BeginAlign() - Draw.PushButton('INI file >', EVENT_CHOOSE_INI, but0c, y, but_0c, 20, 'Select INI-file with file selector') - iniFileName = Draw.String(' :', EVENT_NONE, but1c, y, menu_w-but_1c-60, 20, iniFileName.val, FILENAME_MAX, "Write here the name of the INI-file") - but = but4c-60 - Draw.PushButton('#', EVENT_PRESETS, but, y, 20, 20, "Toggle Preset-INI-files") - Draw.PushButton('L', EVENT_LOAD_INI, but+20, y, 20, 20, 'Loads configuration from selected ini-file: %s' % iniFileName.val) - Draw.PushButton('S', EVENT_SAVE_INI, but+40, y, 20, 20, 'Saves configuration to selected ini-file: %s' % iniFileName.val) - Draw.EndAlign() - - bm = butt_margin/2 - - y -= 10 - y -= 20 - Draw.BeginAlign() - Draw.PushButton('DXFfile >', EVENT_CHOOSE_DXF, but0c, y, but_0c, 20, 'Select DXF-file with file selector') - dxfFileName = Draw.String(' :', EVENT_NONE, but1c, y, menu_w-but_0c-menu_margin, 20, dxfFileName.val, FILENAME_MAX, "Type path/name of output DXF-file") - Draw.EndAlign() - - y -= 30 - config_UI = Draw.Toggle('CONFIG', EVENT_REDRAW, but0c, y, but_0c+bm, 20, config_UI.val, 'Advanced configuration on/off' ) - Draw.BeginAlign() - but, but_ = but1c, but_1c+bm - but_ /= 3 - Draw.PushButton('X', EVENT_RESET, but, y, 15, 20, "Reset configuration to defaults") - Draw.PushButton('2D', EVENT_PRESET2D, but+but_, y, but_, 20, 'Set to standard configuration for 2D export') - Draw.PushButton('3D', EVENT_PRESET3D, but+(but_*2), y, but_, 20, 'Set to standard configuration for 3D import') - Draw.EndAlign() - - - y -= 30 - b0, b0_ = but0c, but_0c + butt_margin +but_1c - GUI_A['only_selected_on'] = Draw.Toggle('Export Selection', EVENT_NONE, b0, y, b0_, 20, GUI_A['only_selected_on'].val, "Export only selected geometry on/off") - b0, b0_ = but2c, but_2c + butt_margin + but_3c - Draw.BeginAlign() - GUI_A['projection_on'] = Draw.Toggle('2d Projection', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['projection_on'].val, "Export a 2d Projection according 3d-View or Camera-View on/off") - if GUI_A['projection_on'].val: - GUI_A['camera_selected'] = Draw.Menu(MenuCAMERA, EVENT_CAMERA, b0, y-20, b0_-20, 20, GUI_A['camera_selected'].val, 'Choose the camera to be rendered') - Draw.PushButton('>', EVENT_setCAMERA, b0+b0_-20, y-20, 20, 20, 'switch to selected Camera - make it active') - GUI_A['hidden_lines_on'] = Draw.Toggle('Remove backFaces', EVENT_NONE, b0, y-40, b0_, 20, GUI_A['hidden_lines_on'].val, "Filter out backFaces on/off") - #GUI_A['shadows_on'] = Draw.Toggle('..Shadows', EVENT_REDRAW, b0, y-60, but_2c, 20, GUI_A['shadows_on'].val, "(*todo) Shadow tracing on/off") - #GUI_A['light_on'] = Draw.Menu(MenuLIGHT, EVENT_LIGHT, but3c, y-60, but_3c, 20, GUI_A['light_on'].val, '(*todo) Choose the light source(sun) to be rendered') - Draw.EndAlign() - - y -= 20 - b0, b0_ = but0c, but_0c + butt_margin +but_1c - GUI_A['only_visible_on'] = Draw.Toggle('Visible only', EVENT_PRESETPLINE, b0, y, b0_, 20, GUI_A['only_visible_on'].val, "Export only from visible layers on/off") - #b0, b0_ = but2c, but_2c + butt_margin + but_3c - - y -= 20 - b0, b0_ = but0c, but_0c + butt_margin +but_1c - GUI_A['to_polyline_on'] = Draw.Toggle('POLYLINE-Mode', EVENT_PRESETPLINE, b0, y, b0_, 20, GUI_A['to_polyline_on'].val, "Export to POLYLINE/POLYFACEs, otherwise to LINEs/3DFACEs on/off") - #b0, b0_ = but2c, but_2c + butt_margin + but_3c - - y -= 20 - b0, b0_ = but0c, but_0c + butt_margin +but_1c - GUI_A['instances_on'] = Draw.Toggle('Instances as BLOCKs', EVENT_NONE, b0, y, b0_, 20, GUI_A['instances_on'].val, "Export instances (multi-users) of Mesh/Curve as BLOCK/INSERTs on/off") - #b0, b0_ = but2c, but_2c + butt_margin + but_3c - - y -= 20 - b0, b0_ = but0c, but_0c + butt_margin +but_1c - GUI_A['apply_modifiers_on'] = Draw.Toggle('Apply Modifiers', EVENT_NONE, b0, y, b0_, 20, GUI_A['apply_modifiers_on'].val, "Apply modifier stack to mesh objects before export on/off") - #b0, b0_ = but2c, but_2c + butt_margin + but_3c - - y -= 20 - b0, b0_ = but0c, but_0c + butt_margin +but_1c - GUI_A['include_duplis_on'] = Draw.Toggle('Include Duplis', EVENT_NONE, b0, y, b0_, 20, GUI_A['include_duplis_on'].val, "Export Duplicates (dupliverts, dupliframes, dupligroups) on/off") - #b0, b0_ = but2c, but_2c + butt_margin + but_3c - - - - y -= 30 - Draw.PushButton('EXIT', EVENT_EXIT, but0c, y, but_0c+bm, 20, '' ) - Draw.PushButton('HELP', EVENT_HELP, but1c, y, but_1c+bm, 20, 'goes to online-Manual on wiki.blender.org') - GUI_A['optimization'] = Draw.Number('', EVENT_NONE, but2c, y, 40, 20, GUI_A['optimization'].val, 0, 3, "Optimization Level: 0=Debug/Draw-in, 1=Verbose, 2=ProgressBar, 3=SilentMode") - GUI_A['outputDWG_on'] = Draw.Toggle('DWG*', EVENT_NONE, but2c, y+20, 40, 20, GUI_A['outputDWG_on'].val, "converts DXF to DWG (needs external converter) on/off") - - Draw.BeginAlign() - Draw.PushButton('START EXPORT', EVENT_START, but2c+40, y, but_2c-40+but_3c+butt_margin, 40, 'Start the export process. For Cancel go to console and hit Ctrl-C') - Draw.EndAlign() - - y -= 20 - #Draw.BeginAlign() - #Draw.Label(' ', but0c-menu_margin, y, menu_margin, 20) - #Draw.Label(LAB, but0c, y, menu_w, 20) - Draw.Label(LAB, 30, y, menu_w, 20) - #Draw.Label(' ', but0c+menu_w, y, menu_margin, 20) - #Draw.EndAlign() - - ui_scrollbarX(menu_orgX, menu_w+50, scrollXArea, c_fg, c_bg) - ui_scrollbarY(menu_orgY, menu_h+30, scrollYArea, c_fg, c_bg) - - - - -#-- END GUI Stuf----------------------------------------------------- - -c0=[0.2,0.2,0.2,0.0] -c1=[0.7,0.7,0.9,0.0] -c2=[0.71,0.71,0.71,0.0] -c3=[0.4,0.4,0.4,0.0] -c4=[0.95,0.95,0.9,0.0] -c5=[0.64,0.64,0.64,0] -c6=[0.75,0.75,0.75,0] -c7=[0.6,0.6,0.6,0] -c8=[1.0,0.0,0.0,0] -c9=[0.7,0.0,0.0,0] -c10=[0.64,0.81,0.81,0] -c11=[0.57,0.71,0.71,0] -c_nor= c5[:3] -c_act= c10[:3] -c_sel= c11[:3] -c_tx = c0[:3] -c_fg = c2[:3] -c_bg = c5[:3] - -def ui_rect(coords,color): - [X1,Y1,X2,Y2],[r,g,b] = coords,color - glColor3f(r,g,b) - glRecti(X1,Y1,X2,Y2) -def ui_rectA(coords,color): - [X1,Y1,X2,Y2],[r,g,b,a] = coords,color - glColor4f(r,g,b,a) - glRecti(X1,Y1,X2,Y2) #integer coords - #glRectf(X1,Y1,X2,Y2) #floating coords -def ui_line(coords,color): - [X1,Y1,X2,Y2],[r,g,b] = coords,color - glColor3f(r,g,b) - glBegin(GL_LINES) - glVertex2i(X1,Y1) - glVertex2i(X2,Y2) - glEnd() -def ui_panel(posX,posY,L,H,color): - [r,g,b] = color - ui_rect([posX+4,posY-4,posX+L+4,posY-H-4],[.55,.55,.55]) #1st shadow - ui_rect([posX+3,posY-3,posX+L+3,posY-H-3],[.45,.45,.45]) - ui_rect([posX+3,posY-3,posX+L+2,posY-H-2],[.30,.30,.30]) #2nd shadow - ui_rect([posX,posY-H,posX+L,posY],[r,g,b]) #Main - ui_rect([posX+3,posY-19,posX+L-3,posY-2],[.75*r,.75*g,.75*b]) #Titlebar - ui_line([posX+3,posY-19,posX+3,posY-2],[.25,.25,.25]) - ui_line([posX+4,posY-19,posX+4,posY-2],[(r+.75)/4,(g+.75)/4,(b+.75)/4]) - ui_line([posX+4,posY-2,posX+L-3,posY-2],[(r+.75)/4,(g+.75)/4,(b+.75)/4]) -def ui_box(x,y,xright,bottom): - color = [0.75, 0.75, 0.75] - coords = x+1,y+1,xright-1,bottom-1 - ui_rect(coords,color) - -def ui_scrollbarX(Focus,PanelH,Area, color_fg, color_bg): - # Area = ScrollBarArea - # point1=down/left, point2=top/right - P1X,P1Y,P2X,P2Y = Area - AreaH = P2X-P1X - if PanelH > AreaH: - Slider = int(AreaH * (AreaH / float(PanelH))) - if Slider<3: Slider = 3 #minimal slider heigh - posX = -int(AreaH * (Focus / float(PanelH))) - ui_rect([P1X,P1Y,P2X,P2Y], color_bg) - ui_rect([P1X+posX,P1Y+3,P1X+posX+Slider,P2Y-3], color_fg) - -def ui_scrollbarY(Focus,PanelH,Area, color_fg, color_bg): - # Area = ScrollBarArea - # point1=down/left, point2=top/right - P1X,P1Y,P2X,P2Y = Area - AreaH = P2Y-P1Y - if PanelH > AreaH: - Slider = int(AreaH * (AreaH / float(PanelH))) - if Slider<3: Slider = 3 #minimal slider heigh - posY = -int(AreaH * (Focus / float(PanelH))) - ui_rect([P1X,P1Y,P2X-1,P2Y], color_bg) - #ui_rect([P1X+3,P2Y-posY,P2X-4,P2Y-posY-Slider], color_fg) - ui_rect([P1X+3,P1Y+posY,P2X-4,P1Y+posY+Slider], color_fg) - - -#------------------------------------------------------------ -def dxf_callback(input_filename): - global dxfFileName - dxfFileName.val=input_filename -# dirname == Blender.sys.dirname(Blender.Get('filename')) -# update_RegistryKey('DirName', dirname) -# update_RegistryKey('dxfFileName', input_filename) - -def ini_callback(input_filename): - global iniFileName - iniFileName.val=input_filename - -#------------------------------------------------------------ -def getSpaceRect(): - __UI_RECT__ = Buffer(GL_FLOAT, 4) - glGetFloatv(GL_SCISSOR_BOX, __UI_RECT__) - __UI_RECT__ = __UI_RECT__.list - return (int(__UI_RECT__[0]), int(__UI_RECT__[1]), int(__UI_RECT__[2]), int(__UI_RECT__[3])) - -def getRelMousePos(mco, winRect): - # mco = Blender.Window.GetMouseCoords() - if pointInRect(mco, winRect): - return (mco[0] - winRect[0], mco[1] - winRect[1]) - return None - - -def pointInRect(pt, rect): - if rect[0] < pt[0] < rect[0]+rect[2] and\ - rect[1] < pt[1] < rect[1]+rect[3]: - return True - else: - return False - - - -#--- variables UI menu --------------------------- -mco = [0,0] # mouse coordinaten -mbX, mbY = 0,0 # mouse buffer coordinaten -scrollW = 20 # width of scrollbar -rowH = 20 # height of menu raw -menu__H = 2 * rowH +5 # height of menu bar -headerH = 1 * rowH # height of column header bar -scroll_left = True # position of scrollbar -menu_bottom = False # position of menu -edit_mode = False # indicator/activator -iconlib_mode = False # indicator/activator -icon_maps = [] #[['blenderbuttons.png',12,25,20,21], -#['referenceicons.png',12,25,20,21]] -help_text = False # indicator/activator -menu_pan = False # indicator/activator -compact_DESIGN = True # toggle UI -showLINK = True # toggle Links -filterList=[-1,-1,-1,-1,-1] -dubbleclik_delay = 0.25 - -PAN_X,PAN_Y = 0,0 # pan coordinates in characters -mPAN_X,mPAN_Y = 0,0 # manu pan coordinates in characters -menu_orgX = 0 -menu_orgY = 0 -mPAN_Xmax = 800 -mPAN_Ymax = 800 - - -#------------------------------------------------------------ -def event(evt, val): - global mbX, mbY, UP, UP0, scroll_pan, FOCUS_fix - global menu_bottom, scroll_left, mco - global PAN_X, PAN_Y, PAN_X0, PAN_Y0 - global mPAN_X, mPAN_Y, mPAN_X0, mPAN_Y0, menu_pan - - #if Blender.event: - # print 'Blender.event:%s, evt:%s' %(Blender.event, evt) #------------ - - if evt in (Draw.QKEY, Draw.ESCKEY) and not val: - print 'DXF-Exporter *** end ***' #--------------------- - Draw.Exit() - - elif val: - if evt==Draw.MIDDLEMOUSE: - mco2 = Window.GetMouseCoords() - relativeMouseCo = getRelMousePos(mco2, getSpaceRect()) - if relativeMouseCo != None: - #rect = [menu__X1,menu__Y1,menu__X2,menu__Y2] - if 1: #pointInRect(relativeMouseCo, menu__Area): - menu_pan = True - mPAN_X0 = mPAN_X - mPAN_Y0 = mPAN_Y - mco = mco2 - elif evt == Draw.MOUSEY or evt == Draw.MOUSEX: - if menu_pan: - mco2 = Window.GetMouseCoords() - mbX = mco2[0]-mco[0] - mbY = mco2[1]-mco[1] - mPAN_X = mPAN_X0 - mbX - mPAN_Y = mPAN_Y0 - mbY - #print mbX, mbY #-------------------- - Draw.Redraw() - elif evt == Draw.WHEELDOWNMOUSE: - mPAN_Y -= 80 - Draw.Redraw() - elif evt == Draw.WHEELUPMOUSE: - mPAN_Y += 80 - Draw.Redraw() - else: # = if val==False: - if evt==Draw.LEFTMOUSE: - scroll_pan = False - elif evt==Draw.MIDDLEMOUSE: - menu_pan = False - -def bevent(evt): - global config_UI, user_preset - global CAMERA, GUI_A - - ######### Manages GUI events - if (evt==EVENT_EXIT): - Draw.Exit() - print 'DXF-Exporter *** end ***' #--------------------- - elif (evt==EVENT_CHOOSE_INI): - Window.FileSelector(ini_callback, "INI-file Selection", '*.ini') - elif (evt==EVENT_REDRAW): - Draw.Redraw() - elif (evt==EVENT_RESET): - resetDefaultConfig() - Draw.Redraw() - elif (evt==EVENT_PRESET2D): - resetDefaultConfig_2D() - Draw.Redraw() - elif (evt==EVENT_PRESET3D): - resetDefaultConfig_3D() - Draw.Redraw() - elif evt in (EVENT_CAMERA,EVENT_LIGHT): - CAMERA = GUI_A['camera_selected'].val - if CAMERA==len(CAMERAS)+1: - doAllCameras = True - else: - pass #print 'deb: CAMERAS=',CAMERAS #---------------- - Draw.Redraw() - elif (evt==EVENT_setCAMERA): - if CAMERA 5: user_preset = 0; index = '' - iniFileName.val = INIFILE_DEFAULT_NAME + index + INIFILE_EXTENSION - Draw.Redraw() - elif (evt==EVENT_HELP): - try: - import webbrowser - webbrowser.open('http://wiki.blender.org/index.php?title=Scripts/Manual/Export/autodesk_dxf') - except: - Draw.PupMenu('DXF-Exporter: HELP Alert!%t|no connection to manual-page on Blender-Wiki! try:|\ -http://wiki.blender.org/index.php?title=Scripts/Manual/Export/autodesk_dxf') - Draw.Redraw() - elif (evt==EVENT_LOAD_INI): - loadConfig() - Draw.Redraw() - elif (evt==EVENT_SAVE_INI): - saveConfig() - Draw.Redraw() - elif (evt==EVENT_DXF_DIR): - dxfFile = dxfFileName.val - dxfPathName = '' - if '/' in dxfFile: - dxfPathName = '/'.join(dxfFile.split('/')[:-1]) + '/' - elif '\\' in dxfFile: - dxfPathName = '\\'.join(dxfFile.split('\\')[:-1]) + '\\' - dxfFileName.val = dxfPathName + '*.dxf' -# dirname == Blender.sys.dirname(Blender.Get('filename')) -# update_RegistryKey('DirName', dirname) -# update_RegistryKey('dxfFileName', dxfFileName.val) - GUI_A['only_selected_on'].val = 1 - Draw.Redraw() - elif (evt==EVENT_CHOOSE_DXF): - filename = '' # '*.dxf' - if dxfFileName.val: filename = dxfFileName.val - Window.FileSelector(dxf_callback, "DXF-file Selection", filename) - elif (evt==EVENT_START): - dxfFile = dxfFileName.val - #print 'deb: dxfFile file: ', dxfFile #---------------------- - if E_M: dxfFileName.val, dxfFile = e_mode(dxfFile) #evaluation mode - update_RegistryKey('dxfFileName', dxfFileName.val) - update_globals() - if dxfFile.lower().endswith('*.dxf'): - if Draw.PupMenu('DXF-Exporter: OK?|will write multiple DXF-files, one for each Scene, in:|%s' % dxfFile) == 1: - global UI_MODE - UI_MODE = False - #TODO: multi_export(dxfFile[:-5]) # cut last 5 characters '*.dxf' - Draw.Redraw() - UI_MODE = True - else: - Draw.Redraw() - elif dxfFile.lower()[-4:] in ('.dxf','.dwg'): # and Blender.sys.exists(dxfFile): - print 'preparing for export ---' #Standard Mode: activated - filepath = dxfFile - sce = Scene.GetCurrent() - if ONLYSELECTED: sel_group = sce.objects.selected - else: sel_group = sce.objects - - if ONLYVISIBLE: - sel_group_temp = [] - layerlist = sce.getLayers() - for ob in sel_group: - for lay in ob.layers: - if lay in layerlist: - sel_group_temp.append(ob) - break - sel_group = sel_group_temp - - export_list = getObjectsAndDuplis(sel_group,MATRICES=True) - - if export_list: do_export(export_list, filepath) - else: - print "Abort: selection was empty, no object to export!" - Draw.PupMenu('DXF Exporter: nothing exported!|empty selection!') - else: - Draw.PupMenu('DXF-Exporter: Alert!%t|no valid DXF-file selected!') - print "DXF-Exporter: error, no valid DXF-file selected! try again" - Draw.Redraw() - - - - -def multi_export(DIR): #TODO: - """Imports all DXF-files from directory DIR. - - """ - global SCENE - batchTIME = Blender.sys.time() - #if #DIR == "": DIR = os.path.curdir - if DIR == "": DIR = Blender.sys.dirname(Blender.Get('filename')) - print 'Multifiles Import from %s' %DIR - files = \ - [Blender.sys.join(DIR, f) for f in os.listdir(DIR) if f.lower().endswith('.dxf')] - if not files: - print '...None DXF-files found. Abort!' - return - - i = 0 - for dxfFile in files: - i += 1 - print '\nDXF-file', i, 'of', len(files) #,'\nImporting', dxfFile - if ONLYSELECTED: - _dxf_file = dxfFile.split('/')[-1].split('\\')[-1] - _dxf_file = _dxf_file[:-4] # cut last char:'.dxf' - _dxf_file = _dxf_file[:NAMELENGTH_MAX] #? [-NAMELENGTH_MAX:]) - SCENE = Blender.Scene.New(_dxf_file) - SCENE.makeCurrent() - #or so? Blender.Scene.makeCurrent(_dxf_file) - #sce = bpy.data.scenes.new(_dxf_file) - #bpy.data.scenes.active = sce - else: - SCENE = Blender.Scene.GetCurrent() - SCENE.objects.selected = [] # deselect all - main(dxfFile) - #Blender.Redraw() - - print 'TOTAL TIME: %.6f' % (Blender.sys.time() - batchTIME) - print '\a\r', # beep when done - - -#----------------------------------------------------- -if __name__=='__main__': - - if DXF: - print '\n\n\n' - print 'DXF-Exporter v%s *** start ***' %(__version__) #--------------------- - print 'with Library %s' %(DXF.__version__) #--------------------- - if not DXF.copy: - print "DXF-Exporter: dxfLibrary.py script requires a full Python install" - Draw.PupMenu('Error%t|The dxfLibrary.py script requires a full Python install') - else: - #Window.FileSelector(dxf_export_ui, 'EXPORT DXF', Blender.sys.makename(ext='.dxf')) - # recall last used DXF-file and INI-file names - dxffilename = check_RegistryKey('dxfFileName') - #print 'deb:start dxffilename:', dxffilename #---------------- - if dxffilename: dxfFileName.val = dxffilename - else: - dirname = Blender.sys.dirname(Blender.Get('filename')) - #print 'deb:start dirname:', dirname #---------------- - dxfFileName.val = Blender.sys.join(dirname, '') - inifilename = check_RegistryKey('iniFileName') - if inifilename: iniFileName.val = inifilename - - updateMenuCAMERA() - updateCAMERA() - - Draw.Register(draw_UI, event, bevent) - - \ No newline at end of file diff --git a/release/scripts/export_fbx.py b/release/scripts/export_fbx.py deleted file mode 100644 index 50357cbfa75..00000000000 --- a/release/scripts/export_fbx.py +++ /dev/null @@ -1,3084 +0,0 @@ -#!BPY -""" -Name: 'Autodesk FBX (.fbx)...' -Blender: 249 -Group: 'Export' -Tooltip: 'Selection to an ASCII Autodesk FBX ' -""" -__author__ = "Campbell Barton" -__url__ = ['www.blender.org', 'blenderartists.org'] -__version__ = "1.2" - -__bpydoc__ = """\ -This script is an exporter to the FBX file format. - -http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx -""" -# -------------------------------------------------------------------------- -# FBX Export v0.1 by Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -try: - import time - # import os # only needed for batch export, nbot used yet -except: - time = None # use this to check if they have python modules installed - -# for python 2.3 support -try: - set() -except: - try: - from sets import Set as set - except: - set = None # so it complains you dont have a ! - -# os is only needed for batch 'own dir' option -try: - import os -except: - os = None - -import Blender -import bpy -from Blender.Mathutils import Matrix, Vector, RotationMatrix - -import BPyObject -import BPyMesh -import BPySys -import BPyMessages - -## This was used to make V, but faster not to do all that -##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}' -##v = range(255) -##for c in valid: v.remove(ord(c)) -v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] -invalid = ''.join([chr(i) for i in v]) -def cleanName(name): - for ch in invalid: name = name.replace(ch, '_') - return name -del v, i - - -def copy_file(source, dest): - file = open(source, 'rb') - data = file.read() - file.close() - - file = open(dest, 'wb') - file.write(data) - file.close() - - -def copy_images(dest_dir, textures): - if not dest_dir.endswith(Blender.sys.sep): - dest_dir += Blender.sys.sep - - image_paths = set() - for tex in textures: - image_paths.add(Blender.sys.expandpath(tex.filename)) - - # Now copy images - copyCount = 0 - for image_path in image_paths: - if Blender.sys.exists(image_path): - # Make a name for the target path. - dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1] - if not Blender.sys.exists(dest_image_path): # Image isnt alredy there - print '\tCopying "%s" > "%s"' % (image_path, dest_image_path) - try: - copy_file(image_path, dest_image_path) - copyCount+=1 - except: - print '\t\tWarning, file failed to copy, skipping.' - - print '\tCopied %d images' % copyCount - -mtx4_identity = Matrix() - -# testing -mtx_x90 = RotationMatrix( 90, 3, 'x') # used -#mtx_x90n = RotationMatrix(-90, 3, 'x') -#mtx_y90 = RotationMatrix( 90, 3, 'y') -#mtx_y90n = RotationMatrix(-90, 3, 'y') -#mtx_z90 = RotationMatrix( 90, 3, 'z') -#mtx_z90n = RotationMatrix(-90, 3, 'z') - -#mtx4_x90 = RotationMatrix( 90, 4, 'x') -mtx4_x90n = RotationMatrix(-90, 4, 'x') # used -#mtx4_y90 = RotationMatrix( 90, 4, 'y') -mtx4_y90n = RotationMatrix(-90, 4, 'y') # used -mtx4_z90 = RotationMatrix( 90, 4, 'z') # used -mtx4_z90n = RotationMatrix(-90, 4, 'z') # used - -def strip_path(p): - return p.split('\\')[-1].split('/')[-1] - -# Used to add the scene name into the filename without using odd chars -sane_name_mapping_ob = {} -sane_name_mapping_mat = {} -sane_name_mapping_tex = {} -sane_name_mapping_take = {} -sane_name_mapping_group = {} - -# Make sure reserved names are not used -sane_name_mapping_ob['Scene'] = 'Scene_' -sane_name_mapping_ob['blend_root'] = 'blend_root_' - -def increment_string(t): - name = t - num = '' - while name and name[-1].isdigit(): - num = name[-1] + num - name = name[:-1] - if num: return '%s%d' % (name, int(num)+1) - else: return name + '_0' - - - -# todo - Disallow the name 'Scene' and 'blend_root' - it will bugger things up. -def sane_name(data, dct): - #if not data: return None - - if type(data)==tuple: # materials are paired up with images - data, other = data - use_other = True - else: - other = None - use_other = False - - if data: name = data.name - else: name = None - orig_name = name - - if other: - orig_name_other = other.name - name = '%s #%s' % (name, orig_name_other) - else: - orig_name_other = None - - # dont cache, only ever call once for each data type now, - # so as to avoid namespace collision between types - like with objects <-> bones - #try: return dct[name] - #except: pass - - if not name: - name = 'unnamed' # blank string, ASKING FOR TROUBLE! - else: - #name = BPySys.cleanName(name) - name = cleanName(name) # use our own - - while name in dct.itervalues(): name = increment_string(name) - - if use_other: # even if other is None - orig_name_other will be a string or None - dct[orig_name, orig_name_other] = name - else: - dct[orig_name] = name - - return name - -def sane_obname(data): return sane_name(data, sane_name_mapping_ob) -def sane_matname(data): return sane_name(data, sane_name_mapping_mat) -def sane_texname(data): return sane_name(data, sane_name_mapping_tex) -def sane_takename(data): return sane_name(data, sane_name_mapping_take) -def sane_groupname(data): return sane_name(data, sane_name_mapping_group) - -def derived_paths(fname_orig, basepath, FORCE_CWD=False): - ''' - fname_orig - blender path, can be relative - basepath - fname_rel will be relative to this - FORCE_CWD - dont use the basepath, just add a ./ to the filename. - use when we know the file will be in the basepath. - ''' - fname = Blender.sys.expandpath(fname_orig) - fname_strip = strip_path(fname) - if FORCE_CWD: fname_rel = '.' + Blender.sys.sep + fname_strip - else: fname_rel = Blender.sys.relpath(fname, basepath) - if fname_rel.startswith('//'): fname_rel = '.' + Blender.sys.sep + fname_rel[2:] - return fname, fname_strip, fname_rel - - -def mat4x4str(mat): - return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([ f for v in mat for f in v ]) - -def meshNormalizedWeights(me): - try: # account for old bad BPyMesh - groupNames, vWeightList = BPyMesh.meshWeight2List(me) - except: - return [],[] - - if not groupNames: - return [],[] - - for i, vWeights in enumerate(vWeightList): - tot = 0.0 - for w in vWeights: - tot+=w - - if tot: - for j, w in enumerate(vWeights): - vWeights[j] = w/tot - - return groupNames, vWeightList - -header_comment = \ -'''; FBX 6.1.0 project file -; Created by Blender FBX Exporter -; for support mail: ideasman42@gmail.com -; ---------------------------------------------------- - -''' - -# This func can be called with just the filename -def write(filename, batch_objects = None, \ - EXP_OBS_SELECTED = True, - EXP_MESH = True, - EXP_MESH_APPLY_MOD = True, - EXP_MESH_HQ_NORMALS = False, - EXP_ARMATURE = True, - EXP_LAMP = True, - EXP_CAMERA = True, - EXP_EMPTY = True, - EXP_IMAGE_COPY = False, - GLOBAL_MATRIX = Matrix(), - ANIM_ENABLE = True, - ANIM_OPTIMIZE = True, - ANIM_OPTIMIZE_PRECISSION = 6, - ANIM_ACTION_ALL = False, - BATCH_ENABLE = False, - BATCH_GROUP = True, - BATCH_SCENE = False, - BATCH_FILE_PREFIX = '', - BATCH_OWN_DIR = False - ): - - # ----------------- Batch support! - if BATCH_ENABLE: - if os == None: BATCH_OWN_DIR = False - - fbxpath = filename - - # get the path component of filename - tmp_exists = Blender.sys.exists(fbxpath) - - if tmp_exists != 2: # a file, we want a path - while fbxpath and fbxpath[-1] not in ('/', '\\'): - fbxpath = fbxpath[:-1] - if not filename: - Draw.PupMenu('Error%t|Directory does not exist!') - return - - tmp_exists = Blender.sys.exists(fbxpath) - - if tmp_exists != 2: - Draw.PupMenu('Error%t|Directory does not exist!') - return - - if not fbxpath.endswith(Blender.sys.sep): - fbxpath += Blender.sys.sep - del tmp_exists - - - if BATCH_GROUP: - data_seq = bpy.data.groups - else: - data_seq = bpy.data.scenes - - # call this function within a loop with BATCH_ENABLE == False - orig_sce = bpy.data.scenes.active - - - new_fbxpath = fbxpath # own dir option modifies, we need to keep an original - for data in data_seq: # scene or group - newname = BATCH_FILE_PREFIX + BPySys.cleanName(data.name) - - - if BATCH_OWN_DIR: - new_fbxpath = fbxpath + newname + Blender.sys.sep - # path may alredy exist - # TODO - might exist but be a file. unlikely but should probably account for it. - - if Blender.sys.exists(new_fbxpath) == 0: - os.mkdir(new_fbxpath) - - - filename = new_fbxpath + newname + '.fbx' - - print '\nBatch exporting %s as...\n\t"%s"' % (data, filename) - - if BATCH_GROUP: #group - # group, so objects update properly, add a dummy scene. - sce = bpy.data.scenes.new() - sce.Layers = (1<<20) -1 - bpy.data.scenes.active = sce - for ob_base in data.objects: - sce.objects.link(ob_base) - - sce.update(1) - - # TODO - BUMMER! Armatures not in the group wont animate the mesh - - else:# scene - - - data_seq.active = data - - - # Call self with modified args - # Dont pass batch options since we alredy usedt them - write(filename, data.objects, - False, - EXP_MESH, - EXP_MESH_APPLY_MOD, - EXP_MESH_HQ_NORMALS, - EXP_ARMATURE, - EXP_LAMP, - EXP_CAMERA, - EXP_EMPTY, - EXP_IMAGE_COPY, - GLOBAL_MATRIX, - ANIM_ENABLE, - ANIM_OPTIMIZE, - ANIM_OPTIMIZE_PRECISSION, - ANIM_ACTION_ALL - ) - - if BATCH_GROUP: - # remove temp group scene - bpy.data.scenes.unlink(sce) - - bpy.data.scenes.active = orig_sce - - return # so the script wont run after we have batch exported. - - # end batch support - - # Use this for working out paths relative to the export location - basepath = Blender.sys.dirname(filename) - - # ---------------------------------------------- - # storage classes - class my_bone_class: - __slots__ =(\ - 'blenName',\ - 'blenBone',\ - 'blenMeshes',\ - 'restMatrix',\ - 'parent',\ - 'blenName',\ - 'fbxName',\ - 'fbxArm',\ - '__pose_bone',\ - '__anim_poselist') - - def __init__(self, blenBone, fbxArm): - - # This is so 2 armatures dont have naming conflicts since FBX bones use object namespace - self.fbxName = sane_obname(blenBone) - - self.blenName = blenBone.name - self.blenBone = blenBone - self.blenMeshes = {} # fbxMeshObName : mesh - self.fbxArm = fbxArm - self.restMatrix = blenBone.matrix['ARMATURESPACE'] - - # not used yet - # self.restMatrixInv = self.restMatrix.copy().invert() - # self.restMatrixLocal = None # set later, need parent matrix - - self.parent = None - - # not public - pose = fbxArm.blenObject.getPose() - self.__pose_bone = pose.bones[self.blenName] - - # store a list if matricies here, (poseMatrix, head, tail) - # {frame:posematrix, frame:posematrix, ...} - self.__anim_poselist = {} - - ''' - def calcRestMatrixLocal(self): - if self.parent: - self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.copy().invert() - else: - self.restMatrixLocal = self.restMatrix.copy() - ''' - def setPoseFrame(self, f): - # cache pose info here, frame must be set beforehand - - # Didnt end up needing head or tail, if we do - here it is. - ''' - self.__anim_poselist[f] = (\ - self.__pose_bone.poseMatrix.copy(),\ - self.__pose_bone.head.copy(),\ - self.__pose_bone.tail.copy() ) - ''' - - self.__anim_poselist[f] = self.__pose_bone.poseMatrix.copy() - - # get pose from frame. - def getPoseMatrix(self, f):# ---------------------------------------------- - return self.__anim_poselist[f] - ''' - def getPoseHead(self, f): - #return self.__pose_bone.head.copy() - return self.__anim_poselist[f][1].copy() - def getPoseTail(self, f): - #return self.__pose_bone.tail.copy() - return self.__anim_poselist[f][2].copy() - ''' - # end - - def getAnimParRelMatrix(self, frame): - #arm_mat = self.fbxArm.matrixWorld - #arm_mat = self.fbxArm.parRelMatrix() - if not self.parent: - #return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore - return mtx4_z90 * self.getPoseMatrix(frame) - else: - #return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat))) * (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).invert() - return (mtx4_z90 * (self.getPoseMatrix(frame))) * (mtx4_z90 * self.parent.getPoseMatrix(frame)).invert() - - # we need thes because cameras and lights modified rotations - def getAnimParRelMatrixRot(self, frame): - return self.getAnimParRelMatrix(frame) - - def flushAnimData(self): - self.__anim_poselist.clear() - - - class my_object_generic: - # Other settings can be applied for each type - mesh, armature etc. - def __init__(self, ob, matrixWorld = None): - self.fbxName = sane_obname(ob) - self.blenObject = ob - self.fbxGroupNames = [] - self.fbxParent = None # set later on IF the parent is in the selection. - if matrixWorld: self.matrixWorld = matrixWorld * GLOBAL_MATRIX - else: self.matrixWorld = ob.matrixWorld * GLOBAL_MATRIX - self.__anim_poselist = {} # we should only access this - - def parRelMatrix(self): - if self.fbxParent: - return self.matrixWorld * self.fbxParent.matrixWorld.copy().invert() - else: - return self.matrixWorld - - def setPoseFrame(self, f): - self.__anim_poselist[f] = self.blenObject.matrixWorld.copy() - - def getAnimParRelMatrix(self, frame): - if self.fbxParent: - #return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].copy().invert() ) * GLOBAL_MATRIX - return (self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert() - else: - return self.__anim_poselist[frame] * GLOBAL_MATRIX - - def getAnimParRelMatrixRot(self, frame): - type = self.blenObject.type - if self.fbxParent: - matrix_rot = (((self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert())).rotationPart() - else: - matrix_rot = (self.__anim_poselist[frame] * GLOBAL_MATRIX).rotationPart() - - # Lamps need to be rotated - if type =='Lamp': - matrix_rot = mtx_x90 * matrix_rot - elif ob and type =='Camera': - y = Vector(0,1,0) * matrix_rot - matrix_rot = matrix_rot * RotationMatrix(90, 3, 'r', y) - - return matrix_rot - - # ---------------------------------------------- - - - - - - print '\nFBX export starting...', filename - start_time = Blender.sys.time() - try: - file = open(filename, 'w') - except: - return False - - sce = bpy.data.scenes.active - world = sce.world - - - # ---------------------------- Write the header first - file.write(header_comment) - if time: - curtime = time.localtime()[0:6] - else: - curtime = (0,0,0,0,0,0) - # - file.write(\ -'''FBXHeaderExtension: { - FBXHeaderVersion: 1003 - FBXVersion: 6100 - CreationTimeStamp: { - Version: 1000 - Year: %.4i - Month: %.2i - Day: %.2i - Hour: %.2i - Minute: %.2i - Second: %.2i - Millisecond: 0 - } - Creator: "FBX SDK/FBX Plugins build 20070228" - OtherFlags: { - FlagPLE: 0 - } -}''' % (curtime)) - - file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime) - file.write('\nCreator: "Blender3D version %.2f"' % Blender.Get('version')) - - pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way - - # --------------- funcs for exporting - def object_tx(ob, loc, matrix, matrix_mod = None): - ''' - Matrix mod is so armature objects can modify their bone matricies - ''' - if isinstance(ob, Blender.Types.BoneType): - - # we know we have a matrix - # matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod) - matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore - - parent = ob.parent - if parent: - #par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod) - par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore - matrix = matrix * par_matrix.copy().invert() - - matrix_rot = matrix.rotationPart() - - loc = tuple(matrix.translationPart()) - scale = tuple(matrix.scalePart()) - rot = tuple(matrix_rot.toEuler()) - - else: - # This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore - #if ob and not matrix: matrix = ob.matrixWorld * GLOBAL_MATRIX - if ob and not matrix: raise "error: this should never happen!" - - matrix_rot = matrix - #if matrix: - # matrix = matrix_scale * matrix - - if matrix: - loc = tuple(matrix.translationPart()) - scale = tuple(matrix.scalePart()) - - matrix_rot = matrix.rotationPart() - # Lamps need to be rotated - if ob and ob.type =='Lamp': - matrix_rot = mtx_x90 * matrix_rot - rot = tuple(matrix_rot.toEuler()) - elif ob and ob.type =='Camera': - y = Vector(0,1,0) * matrix_rot - matrix_rot = matrix_rot * RotationMatrix(90, 3, 'r', y) - rot = tuple(matrix_rot.toEuler()) - else: - rot = tuple(matrix_rot.toEuler()) - else: - if not loc: - loc = 0,0,0 - scale = 1,1,1 - rot = 0,0,0 - - return loc, rot, scale, matrix, matrix_rot - - def write_object_tx(ob, loc, matrix, matrix_mod= None): - ''' - We have loc to set the location if non blender objects that have a location - - matrix_mod is only used for bones at the moment - ''' - loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod) - - file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc) - file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot) - file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale) - return loc, rot, scale, matrix, matrix_rot - - def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None): - # if the type is 0 its an empty otherwise its a mesh - # only difference at the moment is one has a color - file.write(''' - Properties60: { - Property: "QuaternionInterpolate", "bool", "",0 - Property: "Visibility", "Visibility", "A+",1''') - - loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod) - - # Rotation order, note, for FBX files Iv loaded normal order is 1 - # setting to zero. - # eEULER_XYZ = 0 - # eEULER_XZY - # eEULER_YZX - # eEULER_YXZ - # eEULER_ZXY - # eEULER_ZYX - - file.write(''' - Property: "RotationOffset", "Vector3D", "",0,0,0 - Property: "RotationPivot", "Vector3D", "",0,0,0 - Property: "ScalingOffset", "Vector3D", "",0,0,0 - Property: "ScalingPivot", "Vector3D", "",0,0,0 - Property: "TranslationActive", "bool", "",0 - Property: "TranslationMin", "Vector3D", "",0,0,0 - Property: "TranslationMax", "Vector3D", "",0,0,0 - Property: "TranslationMinX", "bool", "",0 - Property: "TranslationMinY", "bool", "",0 - Property: "TranslationMinZ", "bool", "",0 - Property: "TranslationMaxX", "bool", "",0 - Property: "TranslationMaxY", "bool", "",0 - Property: "TranslationMaxZ", "bool", "",0 - Property: "RotationOrder", "enum", "",0 - Property: "RotationSpaceForLimitOnly", "bool", "",0 - Property: "AxisLen", "double", "",10 - Property: "PreRotation", "Vector3D", "",0,0,0 - Property: "PostRotation", "Vector3D", "",0,0,0 - Property: "RotationActive", "bool", "",0 - Property: "RotationMin", "Vector3D", "",0,0,0 - Property: "RotationMax", "Vector3D", "",0,0,0 - Property: "RotationMinX", "bool", "",0 - Property: "RotationMinY", "bool", "",0 - Property: "RotationMinZ", "bool", "",0 - Property: "RotationMaxX", "bool", "",0 - Property: "RotationMaxY", "bool", "",0 - Property: "RotationMaxZ", "bool", "",0 - Property: "RotationStiffnessX", "double", "",0 - Property: "RotationStiffnessY", "double", "",0 - Property: "RotationStiffnessZ", "double", "",0 - Property: "MinDampRangeX", "double", "",0 - Property: "MinDampRangeY", "double", "",0 - Property: "MinDampRangeZ", "double", "",0 - Property: "MaxDampRangeX", "double", "",0 - Property: "MaxDampRangeY", "double", "",0 - Property: "MaxDampRangeZ", "double", "",0 - Property: "MinDampStrengthX", "double", "",0 - Property: "MinDampStrengthY", "double", "",0 - Property: "MinDampStrengthZ", "double", "",0 - Property: "MaxDampStrengthX", "double", "",0 - Property: "MaxDampStrengthY", "double", "",0 - Property: "MaxDampStrengthZ", "double", "",0 - Property: "PreferedAngleX", "double", "",0 - Property: "PreferedAngleY", "double", "",0 - Property: "PreferedAngleZ", "double", "",0 - Property: "InheritType", "enum", "",0 - Property: "ScalingActive", "bool", "",0 - Property: "ScalingMin", "Vector3D", "",1,1,1 - Property: "ScalingMax", "Vector3D", "",1,1,1 - Property: "ScalingMinX", "bool", "",0 - Property: "ScalingMinY", "bool", "",0 - Property: "ScalingMinZ", "bool", "",0 - Property: "ScalingMaxX", "bool", "",0 - Property: "ScalingMaxY", "bool", "",0 - Property: "ScalingMaxZ", "bool", "",0 - Property: "GeometricTranslation", "Vector3D", "",0,0,0 - Property: "GeometricRotation", "Vector3D", "",0,0,0 - Property: "GeometricScaling", "Vector3D", "",1,1,1 - Property: "LookAtProperty", "object", "" - Property: "UpVectorProperty", "object", "" - Property: "Show", "bool", "",1 - Property: "NegativePercentShapeSupport", "bool", "",1 - Property: "DefaultAttributeIndex", "int", "",0''') - if ob and type(ob) != Blender.Types.BoneType: - # Only mesh objects have color - file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') - file.write('\n\t\t\tProperty: "Size", "double", "",100') - file.write('\n\t\t\tProperty: "Look", "enum", "",1') - - return loc, rot, scale, matrix, matrix_rot - - - # -------------------------------------------- Armatures - #def write_bone(bone, name, matrix_mod): - def write_bone(my_bone): - file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName) - file.write('\n\t\tVersion: 232') - - #poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3] - poseMatrix = write_object_props(my_bone.blenBone)[3] # dont apply bone matricies anymore - pose_items.append( (my_bone.fbxName, poseMatrix) ) - - - # file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length) - file.write('\n\t\t\tProperty: "Size", "double", "",1') - - #((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length) - - """ - file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\ - ((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length) - """ - - file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\ - (my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length) - - #file.write('\n\t\t\tProperty: "LimbLength", "double", "",1') - file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8') - file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 1') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Skeleton"') - file.write('\n\t}') - - def write_camera_switch(): - file.write(''' - Model: "Model::Camera Switcher", "CameraSwitcher" { - Version: 232''') - - write_object_props() - file.write(''' - Property: "Color", "Color", "A",0.8,0.8,0.8 - Property: "Camera Index", "Integer", "A+",100 - } - MultiLayer: 0 - MultiTake: 1 - Hidden: "True" - Shading: W - Culling: "CullingOff" - Version: 101 - Name: "Model::Camera Switcher" - CameraId: 0 - CameraName: 100 - CameraIndexName: - }''') - - def write_camera_dummy(name, loc, near, far, proj_type, up): - file.write('\n\tModel: "Model::%s", "Camera" {' % name ) - file.write('\n\t\tVersion: 232') - write_object_props(None, loc) - - file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') - file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0') - file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40') - file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0') - file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0') - file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63') - file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0') - file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1') - file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1') - file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0') - file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1') - file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2') - file.write('\n\t\t\tProperty: "GateFit", "enum", "",0') - file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486') - file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0') - file.write('\n\t\t\tProperty: "AspectW", "double", "",320') - file.write('\n\t\t\tProperty: "AspectH", "double", "",200') - file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1') - file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3') - file.write('\n\t\t\tProperty: "ShowName", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0') - file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0') - file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near) - file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far) - file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816') - file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612') - file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333') - file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1') - file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4') - file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1') - file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0') - file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2') - file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100') - file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1') - file.write('\n\t\t\tProperty: "LockMode", "bool", "",0') - file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0') - file.write('\n\t\t\tProperty: "FitImage", "bool", "",0') - file.write('\n\t\t\tProperty: "Crop", "bool", "",0') - file.write('\n\t\t\tProperty: "Center", "bool", "",1') - file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1') - file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0') - file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5') - file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1') - file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0') - file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1') - file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333') - file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0') - file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100') - file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50') - file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50') - file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type) - file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0') - file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0') - file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0') - file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5') - file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200') - file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0') - file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777') - file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7') - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 0') - file.write('\n\t\tHidden: "True"') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Camera"') - file.write('\n\t\tGeometryVersion: 124') - file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc) - file.write('\n\t\tUp: %i,%i,%i' % up) - file.write('\n\t\tLookAt: 0,0,0') - file.write('\n\t\tShowInfoOnMoving: 1') - file.write('\n\t\tShowAudio: 0') - file.write('\n\t\tAudioColor: 0,1,0') - file.write('\n\t\tCameraOrthoZoom: 1') - file.write('\n\t}') - - def write_camera_default(): - # This sucks but to match FBX converter its easier to - # write the cameras though they are not needed. - write_camera_dummy('Producer Perspective', (0,71.3,287.5), 10, 4000, 0, (0,1,0)) - write_camera_dummy('Producer Top', (0,4000,0), 1, 30000, 1, (0,0,-1)) - write_camera_dummy('Producer Bottom', (0,-4000,0), 1, 30000, 1, (0,0,-1)) - write_camera_dummy('Producer Front', (0,0,4000), 1, 30000, 1, (0,1,0)) - write_camera_dummy('Producer Back', (0,0,-4000), 1, 30000, 1, (0,1,0)) - write_camera_dummy('Producer Right', (4000,0,0), 1, 30000, 1, (0,1,0)) - write_camera_dummy('Producer Left', (-4000,0,0), 1, 30000, 1, (0,1,0)) - - def write_camera(my_cam): - ''' - Write a blender camera - ''' - render = sce.render - width = render.sizeX - height = render.sizeY - aspect = float(width)/height - - data = my_cam.blenObject.data - - file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName ) - file.write('\n\t\tVersion: 232') - loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix()) - - file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0') - file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % data.angle) - file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1') - file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026') - file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shiftX) # not sure if this is in the correct units? - file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shiftY) # ditto - file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0') - file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0') - file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1') - file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1') - file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0') - file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1') - file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2') - file.write('\n\t\t\tProperty: "GateFit", "enum", "",0') - file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0') - file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width) - file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height) - - '''Camera aspect ratio modes. - 0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant. - 1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value. - 2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels. - 3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value. - 4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value. - - Definition at line 234 of file kfbxcamera.h. ''' - - file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2') - - file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3') - file.write('\n\t\t\tProperty: "ShowName", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0') - file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1') - file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0') - file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clipStart) - file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clipStart) - file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0') - file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0') - file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect) - file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1') - file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0') - file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1') - file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0') - file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2') - file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100') - file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0') - file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1') - file.write('\n\t\t\tProperty: "LockMode", "bool", "",0') - file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0') - file.write('\n\t\t\tProperty: "FitImage", "bool", "",0') - file.write('\n\t\t\tProperty: "Crop", "bool", "",0') - file.write('\n\t\t\tProperty: "Center", "bool", "",1') - file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1') - file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0') - file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5') - file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1') - file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0') - file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1') - file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect) - file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0') - file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100') - file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50') - file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50') - file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0') - file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0') - file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0') - file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0') - file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5') - file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200') - file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0') - file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777') - file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0') - file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7') - - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 0') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Camera"') - file.write('\n\t\tGeometryVersion: 124') - file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc) - file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Vector(0,1,0) * matrix_rot) ) - file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Vector(0,0,-1)*matrix_rot) ) - - #file.write('\n\t\tUp: 0,0,0' ) - #file.write('\n\t\tLookAt: 0,0,0' ) - - file.write('\n\t\tShowInfoOnMoving: 1') - file.write('\n\t\tShowAudio: 0') - file.write('\n\t\tAudioColor: 0,1,0') - file.write('\n\t\tCameraOrthoZoom: 1') - file.write('\n\t}') - - def write_light(my_light): - light = my_light.blenObject.data - file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName) - file.write('\n\t\tVersion: 232') - - write_object_props(my_light.blenObject, None, my_light.parRelMatrix()) - - # Why are these values here twice?????? - oh well, follow the holy sdk's output - - # Blender light types match FBX's, funny coincidence, we just need to - # be sure that all unsupported types are made into a point light - #ePOINT, - #eDIRECTIONAL - #eSPOT - light_type = light.type - if light_type > 2: light_type = 1 # hemi and area lights become directional - - mode = light.mode - if mode & Blender.Lamp.Modes.RayShadow or mode & Blender.Lamp.Modes.Shadows: - do_shadow = 1 - else: - do_shadow = 0 - - if mode & Blender.Lamp.Modes.OnlyShadow or (mode & Blender.Lamp.Modes.NoDiffuse and mode & Blender.Lamp.Modes.NoSpecular): - do_light = 0 - else: - do_light = 1 - - scale = abs(GLOBAL_MATRIX.scalePart()[0]) # scale is always uniform in this case - - file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type) - file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0') - file.write('\n\t\t\tProperty: "GoboProperty", "object", ""') - file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1') - file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200 - file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale)) - file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50') - file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.col)) - file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200 - file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale)) - file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50') - file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type) - file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light) - file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1') - file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0') - file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1') - file.write('\n\t\t\tProperty: "GoboProperty", "object", ""') - file.write('\n\t\t\tProperty: "DecayType", "enum", "",0') - file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.dist) - file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0') - file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0') - file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0') - file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0') - file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0') - file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0') - file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow) - file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1') - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 0') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - file.write('\n\t\tTypeFlags: "Light"') - file.write('\n\t\tGeometryVersion: 124') - file.write('\n\t}') - - # matrixOnly is not used at the moment - def write_null(my_null = None, fbxName = None, matrixOnly = None): - # ob can be null - if not fbxName: fbxName = my_null.fbxName - - file.write('\n\tModel: "Model::%s", "Null" {' % fbxName) - file.write('\n\t\tVersion: 232') - - # only use this for the root matrix at the moment - if matrixOnly: - poseMatrix = write_object_props(None, None, matrixOnly)[3] - - else: # all other Null's - if my_null: poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3] - else: poseMatrix = write_object_props()[3] - - pose_items.append((fbxName, poseMatrix)) - - file.write(''' - } - MultiLayer: 0 - MultiTake: 1 - Shading: Y - Culling: "CullingOff" - TypeFlags: "Null" - }''') - - # Material Settings - if world: world_amb = world.getAmb() - else: world_amb = (0,0,0) # Default value - - def write_material(matname, mat): - file.write('\n\tMaterial: "Material::%s", "" {' % matname) - - # Todo, add more material Properties. - if mat: - mat_cold = tuple(mat.rgbCol) - mat_cols = tuple(mat.specCol) - #mat_colm = tuple(mat.mirCol) # we wont use the mirror color - mat_colamb = tuple([c for c in world_amb]) - - mat_dif = mat.ref - mat_amb = mat.amb - mat_hard = (float(mat.hard)-1)/5.10 - mat_spec = mat.spec/2.0 - mat_alpha = mat.alpha - mat_emit = mat.emit - mat_shadeless = mat.mode & Blender.Material.Modes.SHADELESS - if mat_shadeless: - mat_shader = 'Lambert' - else: - if mat.diffuseShader == Blender.Material.Shaders.DIFFUSE_LAMBERT: - mat_shader = 'Lambert' - else: - mat_shader = 'Phong' - else: - mat_cols = mat_cold = 0.8, 0.8, 0.8 - mat_colamb = 0.0,0.0,0.0 - # mat_colm - mat_dif = 1.0 - mat_amb = 0.5 - mat_hard = 20.0 - mat_spec = 0.2 - mat_alpha = 1.0 - mat_emit = 0.0 - mat_shadeless = False - mat_shader = 'Phong' - - file.write('\n\t\tVersion: 102') - file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower()) - file.write('\n\t\tMultiLayer: 0') - - file.write('\n\t\tProperties60: {') - file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader) - file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0') - file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) # emit and diffuse color are he same in blender - file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit) - - file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb) - file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb) - file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) - file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif) - file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0') - file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1') - file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha)) - if not mat_shadeless: - file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols) - file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec) - file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0') - file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0') - file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1') - file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0') - file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb) - file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold) - if not mat_shadeless: - file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols) - file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard) - file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha) - if not mat_shadeless: - file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0') - - file.write('\n\t\t}') - file.write('\n\t}') - - def write_video(texname, tex): - # Same as texture really! - file.write('\n\tVideo: "Video::%s", "Clip" {' % texname) - - file.write(''' - Type: "Clip" - Properties60: { - Property: "FrameRate", "double", "",0 - Property: "LastFrame", "int", "",0 - Property: "Width", "int", "",0 - Property: "Height", "int", "",0''') - if tex: - fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY) - else: - fname = fname_strip = fname_rel = '' - - file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip) - - - file.write(''' - Property: "StartFrame", "int", "",0 - Property: "StopFrame", "int", "",0 - Property: "PlaySpeed", "double", "",1 - Property: "Offset", "KTime", "",0 - Property: "InterlaceMode", "enum", "",0 - Property: "FreeRunning", "bool", "",0 - Property: "Loop", "bool", "",0 - Property: "AccessMode", "enum", "",0 - } - UseMipMap: 0''') - - file.write('\n\t\tFilename: "%s"' % fname_strip) - if fname_strip: fname_strip = '/' + fname_strip - file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # make relative - file.write('\n\t}') - - - def write_texture(texname, tex, num): - # if tex == None then this is a dummy tex - file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname) - file.write('\n\t\tType: "TextureVideoClip"') - file.write('\n\t\tVersion: 202') - # TODO, rare case _empty_ exists as a name. - file.write('\n\t\tTextureName: "Texture::%s"' % texname) - - file.write(''' - Properties60: { - Property: "Translation", "Vector", "A+",0,0,0 - Property: "Rotation", "Vector", "A+",0,0,0 - Property: "Scaling", "Vector", "A+",1,1,1''') - file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num) - - - # WrapModeU/V 0==rep, 1==clamp, TODO add support - file.write(''' - Property: "TextureTypeUse", "enum", "",0 - Property: "CurrentTextureBlendMode", "enum", "",1 - Property: "UseMaterial", "bool", "",0 - Property: "UseMipMap", "bool", "",0 - Property: "CurrentMappingType", "enum", "",0 - Property: "UVSwap", "bool", "",0''') - - file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clampX) - file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clampY) - - file.write(''' - Property: "TextureRotationPivot", "Vector3D", "",0,0,0 - Property: "TextureScalingPivot", "Vector3D", "",0,0,0 - Property: "VideoProperty", "object", "" - }''') - - file.write('\n\t\tMedia: "Video::%s"' % texname) - - if tex: - fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY) - else: - fname = fname_strip = fname_rel = '' - - file.write('\n\t\tFileName: "%s"' % fname_strip) - file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # need some make relative command - - file.write(''' - ModelUVTranslation: 0,0 - ModelUVScaling: 1,1 - Texture_Alpha_Source: "None" - Cropping: 0,0,0,0 - }''') - - def write_deformer_skin(obname): - ''' - Each mesh has its own deformer - ''' - file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname) - file.write(''' - Version: 100 - MultiLayer: 0 - Type: "Skin" - Properties60: { - } - Link_DeformAcuracy: 50 - }''') - - # in the example was 'Bip01 L Thigh_2' - def write_sub_deformer_skin(my_mesh, my_bone, weights): - - ''' - Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers - So the SubDeformer needs the mesh-object name as a prefix to make it unique - - Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer, - a but silly but dosnt really matter - ''' - file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName)) - - file.write(''' - Version: 100 - MultiLayer: 0 - Type: "Cluster" - Properties60: { - Property: "SrcModel", "object", "" - Property: "SrcModelReference", "object", "" - } - UserData: "", ""''') - - # Support for bone parents - if my_mesh.fbxBoneParent: - if my_mesh.fbxBoneParent == my_bone: - # TODO - this is a bit lazy, we could have a simple write loop - # for this case because all weights are 1.0 but for now this is ok - # Parent Bones arent used all that much anyway. - vgroup_data = [(j, 1.0) for j in xrange(len(my_mesh.blenData.verts))] - else: - # This bone is not a parent of this mesh object, no weights - vgroup_data = [] - - else: - # Normal weight painted mesh - if my_bone.blenName in weights[0]: - # Before we used normalized wright list - #vgroup_data = me.getVertsFromGroup(bone.name, 1) - group_index = weights[0].index(my_bone.blenName) - vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]] - else: - vgroup_data = [] - - file.write('\n\t\tIndexes: ') - - i = -1 - for vg in vgroup_data: - if i == -1: - file.write('%i' % vg[0]) - i=0 - else: - if i==23: - file.write('\n\t\t') - i=0 - file.write(',%i' % vg[0]) - i+=1 - - file.write('\n\t\tWeights: ') - i = -1 - for vg in vgroup_data: - if i == -1: - file.write('%.8f' % vg[1]) - i=0 - else: - if i==38: - file.write('\n\t\t') - i=0 - file.write(',%.8f' % vg[1]) - i+=1 - - if my_mesh.fbxParent: - # TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible! - m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() ) - else: - # Yes! this is it... - but dosnt work when the mesh is a. - m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() ) - - #m = mtx4_z90 * my_bone.restMatrix - matstr = mat4x4str(m) - matstr_i = mat4x4str(m.invert()) - - file.write('\n\t\tTransform: %s' % matstr_i) # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/ - file.write('\n\t\tTransformLink: %s' % matstr) - file.write('\n\t}') - - def write_mesh(my_mesh): - - me = my_mesh.blenData - - # if there are non NULL materials on this mesh - if my_mesh.blenMaterials: do_materials = True - else: do_materials = False - - if my_mesh.blenTextures: do_textures = True - else: do_textures = False - - do_uvs = me.faceUV - - - file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName) - file.write('\n\t\tVersion: 232') # newline is added in write_object_props - - poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3] - pose_items.append((my_mesh.fbxName, poseMatrix)) - - file.write('\n\t\t}') - file.write('\n\t\tMultiLayer: 0') - file.write('\n\t\tMultiTake: 1') - file.write('\n\t\tShading: Y') - file.write('\n\t\tCulling: "CullingOff"') - - - # Write the Real Mesh data here - file.write('\n\t\tVertices: ') - i=-1 - - for v in me.verts: - if i==-1: - file.write('%.6f,%.6f,%.6f' % tuple(v.co)); i=0 - else: - if i==7: - file.write('\n\t\t'); i=0 - file.write(',%.6f,%.6f,%.6f'% tuple(v.co)) - i+=1 - - file.write('\n\t\tPolygonVertexIndex: ') - i=-1 - for f in me.faces: - fi = [v.index for v in f] - # flip the last index, odd but it looks like - # this is how fbx tells one face from another - fi[-1] = -(fi[-1]+1) - fi = tuple(fi) - if i==-1: - if len(f) == 3: file.write('%i,%i,%i' % fi ) - else: file.write('%i,%i,%i,%i' % fi ) - i=0 - else: - if i==13: - file.write('\n\t\t') - i=0 - if len(f) == 3: file.write(',%i,%i,%i' % fi ) - else: file.write(',%i,%i,%i,%i' % fi ) - i+=1 - - file.write('\n\t\tEdges: ') - i=-1 - for ed in me.edges: - if i==-1: - file.write('%i,%i' % (ed.v1.index, ed.v2.index)) - i=0 - else: - if i==13: - file.write('\n\t\t') - i=0 - file.write(',%i,%i' % (ed.v1.index, ed.v2.index)) - i+=1 - - file.write('\n\t\tGeometryVersion: 124') - - file.write(''' - LayerElementNormal: 0 { - Version: 101 - Name: "" - MappingInformationType: "ByVertice" - ReferenceInformationType: "Direct" - Normals: ''') - - i=-1 - for v in me.verts: - if i==-1: - file.write('%.15f,%.15f,%.15f' % tuple(v.no)); i=0 - else: - if i==2: - file.write('\n '); i=0 - file.write(',%.15f,%.15f,%.15f' % tuple(v.no)) - i+=1 - file.write('\n\t\t}') - - # Write Face Smoothing - file.write(''' - LayerElementSmoothing: 0 { - Version: 102 - Name: "" - MappingInformationType: "ByPolygon" - ReferenceInformationType: "Direct" - Smoothing: ''') - - i=-1 - for f in me.faces: - if i==-1: - file.write('%i' % f.smooth); i=0 - else: - if i==54: - file.write('\n '); i=0 - file.write(',%i' % f.smooth) - i+=1 - - file.write('\n\t\t}') - - # Write Edge Smoothing - file.write(''' - LayerElementSmoothing: 1 { - Version: 101 - Name: "" - MappingInformationType: "ByEdge" - ReferenceInformationType: "Direct" - Smoothing: ''') - - SHARP = Blender.Mesh.EdgeFlags.SHARP - i=-1 - for ed in me.edges: - if i==-1: - file.write('%i' % ((ed.flag&SHARP)!=0)); i=0 - else: - if i==54: - file.write('\n '); i=0 - file.write(',%i' % ((ed.flag&SHARP)!=0)) - i+=1 - - file.write('\n\t\t}') - del SHARP - - - # Write VertexColor Layers - # note, no programs seem to use this info :/ - collayers = [] - if me.vertexColors: - collayers = me.getColorLayerNames() - collayer_orig = me.activeColorLayer - for colindex, collayer in enumerate(collayers): - me.activeColorLayer = collayer - file.write('\n\t\tLayerElementColor: %i {' % colindex) - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: "%s"' % collayer) - - file.write(''' - MappingInformationType: "ByPolygonVertex" - ReferenceInformationType: "IndexToDirect" - Colors: ''') - - i = -1 - ii = 0 # Count how many Colors we write - - for f in me.faces: - for col in f.col: - if i==-1: - file.write('%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0)) - i=0 - else: - if i==7: - file.write('\n\t\t\t\t') - i=0 - file.write(',%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0)) - i+=1 - ii+=1 # One more Color - - file.write('\n\t\t\tColorIndex: ') - i = -1 - for j in xrange(ii): - if i == -1: - file.write('%i' % j) - i=0 - else: - if i==55: - file.write('\n\t\t\t\t') - i=0 - file.write(',%i' % j) - i+=1 - - file.write('\n\t\t}') - - - - # Write UV and texture layers. - uvlayers = [] - if do_uvs: - uvlayers = me.getUVLayerNames() - uvlayer_orig = me.activeUVLayer - for uvindex, uvlayer in enumerate(uvlayers): - me.activeUVLayer = uvlayer - file.write('\n\t\tLayerElementUV: %i {' % uvindex) - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: "%s"' % uvlayer) - - file.write(''' - MappingInformationType: "ByPolygonVertex" - ReferenceInformationType: "IndexToDirect" - UV: ''') - - i = -1 - ii = 0 # Count how many UVs we write - - for f in me.faces: - for uv in f.uv: - if i==-1: - file.write('%.6f,%.6f' % tuple(uv)) - i=0 - else: - if i==7: - file.write('\n ') - i=0 - file.write(',%.6f,%.6f' % tuple(uv)) - i+=1 - ii+=1 # One more UV - - file.write('\n\t\t\tUVIndex: ') - i = -1 - for j in xrange(ii): - if i == -1: - file.write('%i' % j) - i=0 - else: - if i==55: - file.write('\n\t\t\t\t') - i=0 - file.write(',%i' % j) - i+=1 - - file.write('\n\t\t}') - - if do_textures: - file.write('\n\t\tLayerElementTexture: %i {' % uvindex) - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: "%s"' % uvlayer) - - if len(my_mesh.blenTextures) == 1: - file.write('\n\t\t\tMappingInformationType: "AllSame"') - else: - file.write('\n\t\t\tMappingInformationType: "ByPolygon"') - - file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"') - file.write('\n\t\t\tBlendMode: "Translucent"') - file.write('\n\t\t\tTextureAlpha: 1') - file.write('\n\t\t\tTextureId: ') - - if len(my_mesh.blenTextures) == 1: - file.write('0') - else: - texture_mapping_local = {None:-1} - - i = 0 # 1 for dummy - for tex in my_mesh.blenTextures: - if tex: # None is set above - texture_mapping_local[tex] = i - i+=1 - - i=-1 - for f in me.faces: - img_key = f.image - - if i==-1: - i=0 - file.write( '%s' % texture_mapping_local[img_key]) - else: - if i==55: - file.write('\n ') - i=0 - - file.write(',%s' % texture_mapping_local[img_key]) - i+=1 - - else: - file.write(''' - LayerElementTexture: 0 { - Version: 101 - Name: "" - MappingInformationType: "NoMappingInformation" - ReferenceInformationType: "IndexToDirect" - BlendMode: "Translucent" - TextureAlpha: 1 - TextureId: ''') - file.write('\n\t\t}') - - me.activeUVLayer = uvlayer_orig - - # Done with UV/textures. - - if do_materials: - file.write('\n\t\tLayerElementMaterial: 0 {') - file.write('\n\t\t\tVersion: 101') - file.write('\n\t\t\tName: ""') - - if len(my_mesh.blenMaterials) == 1: - file.write('\n\t\t\tMappingInformationType: "AllSame"') - else: - file.write('\n\t\t\tMappingInformationType: "ByPolygon"') - - file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"') - file.write('\n\t\t\tMaterials: ') - - if len(my_mesh.blenMaterials) == 1: - file.write('0') - else: - # Build a material mapping for this - material_mapping_local = {} # local-mat & tex : global index. - - for j, mat_tex_pair in enumerate(my_mesh.blenMaterials): - material_mapping_local[mat_tex_pair] = j - - len_material_mapping_local = len(material_mapping_local) - - mats = my_mesh.blenMaterialList - - i=-1 - for f in me.faces: - try: mat = mats[f.mat] - except:mat = None - - if do_uvs: tex = f.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/ - else: tex = None - - if i==-1: - i=0 - file.write( '%s' % (material_mapping_local[mat, tex])) # None for mat or tex is ok - else: - if i==55: - file.write('\n\t\t\t\t') - i=0 - - file.write(',%s' % (material_mapping_local[mat, tex])) - i+=1 - - file.write('\n\t\t}') - - file.write(''' - Layer: 0 { - Version: 100 - LayerElement: { - Type: "LayerElementNormal" - TypedIndex: 0 - }''') - - if do_materials: - file.write(''' - LayerElement: { - Type: "LayerElementMaterial" - TypedIndex: 0 - }''') - - # Always write this - if do_textures: - file.write(''' - LayerElement: { - Type: "LayerElementTexture" - TypedIndex: 0 - }''') - - if me.vertexColors: - file.write(''' - LayerElement: { - Type: "LayerElementColor" - TypedIndex: 0 - }''') - - if do_uvs: # same as me.faceUV - file.write(''' - LayerElement: { - Type: "LayerElementUV" - TypedIndex: 0 - }''') - - - file.write('\n\t\t}') - - if len(uvlayers) > 1: - for i in xrange(1, len(uvlayers)): - - file.write('\n\t\tLayer: %i {' % i) - file.write('\n\t\t\tVersion: 100') - - file.write(''' - LayerElement: { - Type: "LayerElementUV"''') - - file.write('\n\t\t\t\tTypedIndex: %i' % i) - file.write('\n\t\t\t}') - - if do_textures: - - file.write(''' - LayerElement: { - Type: "LayerElementTexture"''') - - file.write('\n\t\t\t\tTypedIndex: %i' % i) - file.write('\n\t\t\t}') - - file.write('\n\t\t}') - - if len(collayers) > 1: - # Take into account any UV layers - layer_offset = 0 - if uvlayers: layer_offset = len(uvlayers)-1 - - for i in xrange(layer_offset, len(collayers)+layer_offset): - file.write('\n\t\tLayer: %i {' % i) - file.write('\n\t\t\tVersion: 100') - - file.write(''' - LayerElement: { - Type: "LayerElementColor"''') - - file.write('\n\t\t\t\tTypedIndex: %i' % i) - file.write('\n\t\t\t}') - file.write('\n\t\t}') - file.write('\n\t}') - - def write_group(name): - file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name) - - file.write(''' - Properties60: { - Property: "MultiLayer", "bool", "",0 - Property: "Pickable", "bool", "",1 - Property: "Transformable", "bool", "",1 - Property: "Show", "bool", "",1 - } - MultiLayer: 0 - }''') - - - # add meshes here to clear because they are not used anywhere. - meshes_to_clear = [] - - ob_meshes = [] - ob_lights = [] - ob_cameras = [] - # in fbx we export bones as children of the mesh - # armatures not a part of a mesh, will be added to ob_arms - ob_bones = [] - ob_arms = [] - ob_null = [] # emptys - - # List of types that have blender objects (not bones) - ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null] - - groups = [] # blender groups, only add ones that have objects in the selections - materials = {} # (mat, image) keys, should be a set() - textures = {} # should be a set() - - tmp_ob_type = ob_type = None # incase no objects are exported, so as not to raise an error - - # if EXP_OBS_SELECTED is false, use sceens objects - if not batch_objects: - if EXP_OBS_SELECTED: tmp_objects = sce.objects.context - else: tmp_objects = sce.objects - else: - tmp_objects = batch_objects - - if EXP_ARMATURE: - # This is needed so applying modifiers dosnt apply the armature deformation, its also needed - # ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes. - # set every armature to its rest, backup the original values so we done mess up the scene - ob_arms_orig_rest = [arm.restPosition for arm in bpy.data.armatures] - - for arm in bpy.data.armatures: - arm.restPosition = True - - if ob_arms_orig_rest: - for ob_base in bpy.data.objects: - #if ob_base.type == 'Armature': - ob_base.makeDisplayList() - - # This causes the makeDisplayList command to effect the mesh - Blender.Set('curframe', Blender.Get('curframe')) - - - for ob_base in tmp_objects: - for ob, mtx in BPyObject.getDerivedObjects(ob_base): - #for ob in [ob_base,]: - tmp_ob_type = ob.type - if tmp_ob_type == 'Camera': - if EXP_CAMERA: - ob_cameras.append(my_object_generic(ob, mtx)) - elif tmp_ob_type == 'Lamp': - if EXP_LAMP: - ob_lights.append(my_object_generic(ob, mtx)) - elif tmp_ob_type == 'Armature': - if EXP_ARMATURE: - # TODO - armatures dont work in dupligroups! - if ob not in ob_arms: ob_arms.append(ob) - # ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)" - elif tmp_ob_type == 'Empty': - if EXP_EMPTY: - ob_null.append(my_object_generic(ob, mtx)) - elif EXP_MESH: - origData = True - if tmp_ob_type != 'Mesh': - me = bpy.data.meshes.new() - try: me.getFromObject(ob) - except: me = None - if me: - meshes_to_clear.append( me ) - mats = me.materials - origData = False - else: - # Mesh Type! - if EXP_MESH_APPLY_MOD: - me = bpy.data.meshes.new() - me.getFromObject(ob) - - # so we keep the vert groups - if EXP_ARMATURE: - orig_mesh = ob.getData(mesh=1) - if orig_mesh.getVertGroupNames(): - ob.copy().link(me) - # If new mesh has no vgroups we can try add if verts are teh same - if not me.getVertGroupNames(): # vgroups were not kept by the modifier - if len(me.verts) == len(orig_mesh.verts): - groupNames, vWeightDict = BPyMesh.meshWeight2Dict(orig_mesh) - BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) - - # print ob, me, me.getVertGroupNames() - meshes_to_clear.append( me ) - origData = False - mats = me.materials - else: - me = ob.getData(mesh=1) - mats = me.materials - - # Support object colors - tmp_colbits = ob.colbits - if tmp_colbits: - tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too. - for i in xrange(16): - if tmp_colbits & (1< fbxObject mapping - # this is needed for groups as well as fbxParenting - bpy.data.objects.tag = False - tmp_obmapping = {} - for ob_generic in ob_all_typegroups: - for ob_base in ob_generic: - ob_base.blenObject.tag = True - tmp_obmapping[ob_base.blenObject] = ob_base - - # Build Groups from objects we export - for blenGroup in bpy.data.groups: - fbxGroupName = None - for ob in blenGroup.objects: - if ob.tag: - if fbxGroupName == None: - fbxGroupName = sane_groupname(blenGroup) - groups.append((fbxGroupName, blenGroup)) - - tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName) # also adds to the objects fbxGroupNames - - groups.sort() # not really needed - - # Assign parents using this mapping - for ob_generic in ob_all_typegroups: - for my_ob in ob_generic: - parent = my_ob.blenObject.parent - if parent and parent.tag: # does it exist and is it in the mapping - my_ob.fbxParent = tmp_obmapping[parent] - - - del tmp_obmapping - # Finished finding groups we use - - - materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.iterkeys()] - textures = [(sane_texname(tex), tex) for tex in textures.iterkeys() if tex] - materials.sort() # sort by name - textures.sort() - - camera_count = 8 - file.write(''' - -; Object definitions -;------------------------------------------------------------------ - -Definitions: { - Version: 100 - Count: %i''' % (\ - 1+1+camera_count+\ - len(ob_meshes)+\ - len(ob_lights)+\ - len(ob_cameras)+\ - len(ob_arms)+\ - len(ob_null)+\ - len(ob_bones)+\ - bone_deformer_count+\ - len(materials)+\ - (len(textures)*2))) # add 1 for the root model 1 for global settings - - del bone_deformer_count - - file.write(''' - ObjectType: "Model" { - Count: %i - }''' % (\ - 1+camera_count+\ - len(ob_meshes)+\ - len(ob_lights)+\ - len(ob_cameras)+\ - len(ob_arms)+\ - len(ob_null)+\ - len(ob_bones))) # add 1 for the root model - - file.write(''' - ObjectType: "Geometry" { - Count: %i - }''' % len(ob_meshes)) - - if materials: - file.write(''' - ObjectType: "Material" { - Count: %i - }''' % len(materials)) - - if textures: - file.write(''' - ObjectType: "Texture" { - Count: %i - }''' % len(textures)) # add 1 for an empty tex - file.write(''' - ObjectType: "Video" { - Count: %i - }''' % len(textures)) # add 1 for an empty tex - - tmp = 0 - # Add deformer nodes - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - tmp+=1 - - # Add subdeformers - for my_bone in ob_bones: - tmp += len(my_bone.blenMeshes) - - if tmp: - file.write(''' - ObjectType: "Deformer" { - Count: %i - }''' % tmp) - del tmp - - # we could avoid writing this possibly but for now just write it - - file.write(''' - ObjectType: "Pose" { - Count: 1 - }''') - - if groups: - file.write(''' - ObjectType: "GroupSelection" { - Count: %i - }''' % len(groups)) - - file.write(''' - ObjectType: "GlobalSettings" { - Count: 1 - } -}''') - - file.write(''' - -; Object properties -;------------------------------------------------------------------ - -Objects: {''') - - # To comply with other FBX FILES - write_camera_switch() - - # Write the null object - write_null(None, 'blend_root')# , GLOBAL_MATRIX) - - for my_null in ob_null: - write_null(my_null) - - for my_arm in ob_arms: - write_null(my_arm) - - for my_cam in ob_cameras: - write_camera(my_cam) - - for my_light in ob_lights: - write_light(my_light) - - for my_mesh in ob_meshes: - write_mesh(my_mesh) - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - write_bone(my_bone) - - write_camera_default() - - for matname, (mat, tex) in materials: - write_material(matname, mat) # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard) - - # each texture uses a video, odd - for texname, tex in textures: - write_video(texname, tex) - i = 0 - for texname, tex in textures: - write_texture(texname, tex, i) - i+=1 - - for groupname, group in groups: - write_group(groupname) - - # NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do. - - # Write armature modifiers - # TODO - add another MODEL? - because of this skin definition. - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - write_deformer_skin(my_mesh.fbxName) - - # Get normalized weights for temorary use - if my_mesh.fbxBoneParent: - weights = None - else: - weights = meshNormalizedWeights(my_mesh.blenData) - - #for bonename, bone, obname, bone_mesh, armob in ob_bones: - for my_bone in ob_bones: - if me in my_bone.blenMeshes.itervalues(): - write_sub_deformer_skin(my_mesh, my_bone, weights) - - # Write pose's really weired, only needed when an armature and mesh are used together - # each by themselves dont need pose data. for now only pose meshes and bones - - file.write(''' - Pose: "Pose::BIND_POSES", "BindPose" { - Type: "BindPose" - Version: 100 - Properties60: { - } - NbPoseNodes: ''') - file.write(str(len(pose_items))) - - - for fbxName, matrix in pose_items: - file.write('\n\t\tPoseNode: {') - file.write('\n\t\t\tNode: "Model::%s"' % fbxName ) - if matrix: file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix)) - else: file.write('\n\t\t\tMatrix: %s' % mat4x4str(mtx4_identity)) - file.write('\n\t\t}') - - file.write('\n\t}') - - - # Finish Writing Objects - # Write global settings - file.write(''' - GlobalSettings: { - Version: 1000 - Properties60: { - Property: "UpAxis", "int", "",1 - Property: "UpAxisSign", "int", "",1 - Property: "FrontAxis", "int", "",2 - Property: "FrontAxisSign", "int", "",1 - Property: "CoordAxis", "int", "",0 - Property: "CoordAxisSign", "int", "",1 - Property: "UnitScaleFactor", "double", "",100 - } - } -''') - file.write('}') - - file.write(''' - -; Object relations -;------------------------------------------------------------------ - -Relations: {''') - - file.write('\n\tModel: "Model::blend_root", "Null" {\n\t}') - - for my_null in ob_null: - file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName) - - for my_arm in ob_arms: - file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName) - - for my_mesh in ob_meshes: - file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName) - - # TODO - limbs can have the same name for multiple armatures, should prefix. - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName) - - for my_cam in ob_cameras: - file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName) - - for my_light in ob_lights: - file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName) - - file.write(''' - Model: "Model::Producer Perspective", "Camera" { - } - Model: "Model::Producer Top", "Camera" { - } - Model: "Model::Producer Bottom", "Camera" { - } - Model: "Model::Producer Front", "Camera" { - } - Model: "Model::Producer Back", "Camera" { - } - Model: "Model::Producer Right", "Camera" { - } - Model: "Model::Producer Left", "Camera" { - } - Model: "Model::Camera Switcher", "CameraSwitcher" { - }''') - - for matname, (mat, tex) in materials: - file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname) - - if textures: - for texname, tex in textures: - file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname) - for texname, tex in textures: - file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname) - - # deformers - modifiers - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName) - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - for fbxMeshObName in my_bone.blenMeshes: # .keys() - fbxMeshObName - # is this bone effecting a mesh? - file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName)) - - # This should be at the end - # file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}') - - for groupname, group in groups: - file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname) - - file.write('\n}') - file.write(''' - -; Object connections -;------------------------------------------------------------------ - -Connections: {''') - - # NOTE - The FBX SDK dosnt care about the order but some importers DO! - # for instance, defining the material->mesh connection - # before the mesh->blend_root crashes cinema4d - - - # write the fake root node - file.write('\n\tConnect: "OO", "Model::blend_root", "Model::Scene"') - - for ob_generic in ob_all_typegroups: # all blender 'Object's we support - for my_ob in ob_generic: - if my_ob.fbxParent: - file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName)) - else: - file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_ob.fbxName) - - if materials: - for my_mesh in ob_meshes: - # Connect all materials to all objects, not good form but ok for now. - for mat, tex in my_mesh.blenMaterials: - if mat: mat_name = mat.name - else: mat_name = None - - if tex: tex_name = tex.name - else: tex_name = None - - file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName)) - - if textures: - for my_mesh in ob_meshes: - if my_mesh.blenTextures: - # file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName) - for tex in my_mesh.blenTextures: - if tex: - file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName)) - - for texname, tex in textures: - file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname)) - - for my_mesh in ob_meshes: - if my_mesh.fbxArm: - file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName)) - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - for fbxMeshObName in my_bone.blenMeshes: # .keys() - file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName)) - - # limbs -> deformers - # for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - for fbxMeshObName in my_bone.blenMeshes: # .keys() - file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName)) - - - #for bonename, bone, obname, me, armob in ob_bones: - for my_bone in ob_bones: - # Always parent to armature now - if my_bone.parent: - file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName) ) - else: - # the armature object is written as an empty and all root level bones connect to it - file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName) ) - - # groups - if groups: - for ob_generic in ob_all_typegroups: - for ob_base in ob_generic: - for fbxGroupName in ob_base.fbxGroupNames: - file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName)) - - for my_arm in ob_arms: - file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_arm.fbxName) - - file.write('\n}') - - - # Needed for scene footer as well as animation - render = sce.render - - # from the FBX sdk - #define KTIME_ONE_SECOND KTime (K_LONGLONG(46186158000)) - def fbx_time(t): - # 0.5 + val is the same as rounding. - return int(0.5 + ((t/fps) * 46186158000)) - - fps = float(render.fps) - start = render.sFrame - end = render.eFrame - if end < start: start, end = end, start - if start==end: ANIM_ENABLE = False - - # animations for these object types - ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms - - if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]: - - frame_orig = Blender.Get('curframe') - - if ANIM_OPTIMIZE: - ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION - - # default action, when no actions are avaioable - tmp_actions = [None] # None is the default action - blenActionDefault = None - action_lastcompat = None - - if ANIM_ACTION_ALL: - bpy.data.actions.tag = False - tmp_actions = list(bpy.data.actions) - - - # find which actions are compatible with the armatures - # blenActions is not yet initialized so do it now. - tmp_act_count = 0 - for my_arm in ob_arms: - - # get the default name - if not blenActionDefault: - blenActionDefault = my_arm.blenAction - - arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones]) - - for action in tmp_actions: - - action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) ) - - if action_chan_names: # at least one channel matches. - my_arm.blenActionList.append(action) - action.tag = True - tmp_act_count += 1 - - # incase there is no actions applied to armatures - action_lastcompat = action - - if tmp_act_count: - # unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature. - if not blenActionDefault: - blenActionDefault = action_lastcompat - - del action_lastcompat - - file.write(''' -;Takes and animation section -;---------------------------------------------------- - -Takes: {''') - - if blenActionDefault: - file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault)) - else: - file.write('\n\tCurrent: "Default Take"') - - for blenAction in tmp_actions: - # we have tagged all actious that are used be selected armatures - if blenAction: - if blenAction.tag: - print '\taction: "%s" exporting...' % blenAction.name - else: - print '\taction: "%s" has no armature using it, skipping' % blenAction.name - continue - - if blenAction == None: - # Warning, this only accounts for tmp_actions being [None] - file.write('\n\tTake: "Default Take" {') - act_start = start - act_end = end - else: - # use existing name - if blenAction == blenActionDefault: # have we alredy got the name - file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name]) - else: - file.write('\n\tTake: "%s" {' % sane_takename(blenAction)) - - tmp = blenAction.getFrameNumbers() - if tmp: - act_start = min(tmp) - act_end = max(tmp) - del tmp - else: - # Fallback on this, theres not much else we can do? :/ - # when an action has no length - act_start = start - act_end = end - - # Set the action active - for my_bone in ob_arms: - if blenAction in my_bone.blenActionList: - ob.action = blenAction - # print '\t\tSetting Action!', blenAction - # sce.update(1) - - file.write('\n\t\tFileName: "Default_Take.tak"') # ??? - not sure why this is needed - file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed - file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed - - file.write(''' - - ;Models animation - ;----------------------------------------------------''') - - - # set pose data for all bones - # do this here incase the action changes - ''' - for my_bone in ob_bones: - my_bone.flushAnimData() - ''' - i = act_start - while i <= act_end: - Blender.Set('curframe', i) - for ob_generic in ob_anim_lists: - for my_ob in ob_generic: - #Blender.Window.RedrawAll() - if ob_generic == ob_meshes and my_ob.fbxArm: - # We cant animate armature meshes! - pass - else: - my_ob.setPoseFrame(i) - - i+=1 - - - #for bonename, bone, obname, me, armob in ob_bones: - for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms): - - for my_ob in ob_generic: - - if ob_generic == ob_meshes and my_ob.fbxArm: - # do nothing, - pass - else: - - file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName) # ??? - not sure why this is needed - file.write('\n\t\t\tVersion: 1.1') - file.write('\n\t\t\tChannel: "Transform" {') - - context_bone_anim_mats = [ (my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in xrange(act_start, act_end+1) ] - - # ---------------- - # ---------------- - for TX_LAYER, TX_CHAN in enumerate('TRS'): # transform, rotate, scale - - if TX_CHAN=='T': context_bone_anim_vecs = [mtx[0].translationPart() for mtx in context_bone_anim_mats] - elif TX_CHAN=='S': context_bone_anim_vecs = [mtx[0].scalePart() for mtx in context_bone_anim_mats] - elif TX_CHAN=='R': - # Was.... - # elif TX_CHAN=='R': context_bone_anim_vecs = [mtx[1].toEuler() for mtx in context_bone_anim_mats] - # - # ...but we need to use the previous euler for compatible conversion. - context_bone_anim_vecs = [] - prev_eul = None - for mtx in context_bone_anim_mats: - if prev_eul: prev_eul = mtx[1].toEuler(prev_eul) - else: prev_eul = mtx[1].toEuler() - context_bone_anim_vecs.append(prev_eul) - - file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN) # translation - - for i in xrange(3): - # Loop on each axis of the bone - file.write('\n\t\t\t\t\tChannel: "%s" {'% ('XYZ'[i])) # translation - file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i] ) - file.write('\n\t\t\t\t\t\tKeyVer: 4005') - - if not ANIM_OPTIMIZE: - # Just write all frames, simple but in-eficient - file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start)) - file.write('\n\t\t\t\t\t\tKey: ') - frame = act_start - while frame <= act_end: - if frame!=act_start: - file.write(',') - - # Curve types are 'C,n' for constant, 'L' for linear - # C,n is for bezier? - linear is best for now so we can do simple keyframe removal - file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame-1), context_bone_anim_vecs[frame-act_start][i] )) - frame+=1 - else: - # remove unneeded keys, j is the frame, needed when some frames are removed. - context_bone_anim_keys = [ (vec[i], j) for j, vec in enumerate(context_bone_anim_vecs) ] - - # last frame to fisrt frame, missing 1 frame on either side. - # removeing in a backwards loop is faster - #for j in xrange( (act_end-act_start)-1, 0, -1 ): - # j = (act_end-act_start)-1 - j = len(context_bone_anim_keys)-2 - while j > 0 and len(context_bone_anim_keys) > 2: - # print j, len(context_bone_anim_keys) - # Is this key the same as the ones next to it? - - # co-linear horizontal... - if abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j-1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and\ - abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j+1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT: - - del context_bone_anim_keys[j] - - else: - frame_range = float(context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j-1][1]) - frame_range_fac1 = (context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j][1]) / frame_range - frame_range_fac2 = 1.0 - frame_range_fac1 - - if abs(((context_bone_anim_keys[j-1][0]*frame_range_fac1 + context_bone_anim_keys[j+1][0]*frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT: - del context_bone_anim_keys[j] - else: - j-=1 - - # keep the index below the list length - if j > len(context_bone_anim_keys)-2: - j = len(context_bone_anim_keys)-2 - - if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]: - # This axis has no moton, its okay to skip KeyCount and Keys in this case - pass - else: - # We only need to write these if there is at least one - file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys)) - file.write('\n\t\t\t\t\t\tKey: ') - for val, frame in context_bone_anim_keys: - if frame != context_bone_anim_keys[0][1]: # not the first - file.write(',') - # frame is alredy one less then blenders frame - file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val )) - - if i==0: file.write('\n\t\t\t\t\t\tColor: 1,0,0') - elif i==1: file.write('\n\t\t\t\t\t\tColor: 0,1,0') - elif i==2: file.write('\n\t\t\t\t\t\tColor: 0,0,1') - - file.write('\n\t\t\t\t\t}') - file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER+1) ) - file.write('\n\t\t\t\t}') - - # --------------- - - file.write('\n\t\t\t}') - file.write('\n\t\t}') - - # end the take - file.write('\n\t}') - - # end action loop. set original actions - # do this after every loop incase actions effect eachother. - for my_bone in ob_arms: - my_bone.blenObject.action = my_bone.blenAction - - file.write('\n}') - - Blender.Set('curframe', frame_orig) - - else: - # no animation - file.write('\n;Takes and animation section') - file.write('\n;----------------------------------------------------') - file.write('\n') - file.write('\nTakes: {') - file.write('\n\tCurrent: ""') - file.write('\n}') - - - # write meshes animation - #for obname, ob, mtx, me, mats, arm, armname in ob_meshes: - - - # Clear mesh data Only when writing with modifiers applied - for me in meshes_to_clear: - me.verts = None - - - - # --------------------------- Footer - if world: - has_mist = world.mode & 1 - mist_intense, mist_start, mist_end, mist_height = world.mist - world_hor = world.hor - else: - has_mist = mist_intense = mist_start = mist_end = mist_height = 0 - world_hor = 0,0,0 - - file.write('\n;Version 5 settings') - file.write('\n;------------------------------------------------------------------') - file.write('\n') - file.write('\nVersion5: {') - file.write('\n\tAmbientRenderSettings: {') - file.write('\n\t\tVersion: 101') - file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb)) - file.write('\n\t}') - file.write('\n\tFogOptions: {') - file.write('\n\t\tFlogEnable: %i' % has_mist) - file.write('\n\t\tFogMode: 0') - file.write('\n\t\tFogDensity: %.3f' % mist_intense) - file.write('\n\t\tFogStart: %.3f' % mist_start) - file.write('\n\t\tFogEnd: %.3f' % mist_end) - file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor)) - file.write('\n\t}') - file.write('\n\tSettings: {') - file.write('\n\t\tFrameRate: "%i"' % int(fps)) - file.write('\n\t\tTimeFormat: 1') - file.write('\n\t\tSnapOnFrames: 0') - file.write('\n\t\tReferenceTimeIndex: -1') - file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start-1)) - file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end-1)) - file.write('\n\t}') - file.write('\n\tRendererSetting: {') - file.write('\n\t\tDefaultCamera: "Producer Perspective"') - file.write('\n\t\tDefaultViewingMode: 0') - file.write('\n\t}') - file.write('\n}') - file.write('\n') - - # Incase sombody imports this, clean up by clearing global dicts - sane_name_mapping_ob.clear() - sane_name_mapping_mat.clear() - sane_name_mapping_tex.clear() - - ob_arms[:] = [] - ob_bones[:] = [] - ob_cameras[:] = [] - ob_lights[:] = [] - ob_meshes[:] = [] - ob_null[:] = [] - - - # copy images if enabled - if EXP_IMAGE_COPY: - copy_images( basepath, [ tex[1] for tex in textures if tex[1] != None ]) - - print 'export finished in %.4f sec.' % (Blender.sys.time() - start_time) - return True - - -# -------------------------------------------- -# UI Function - not a part of the exporter. -# this is to seperate the user interface from the rest of the exporter. -from Blender import Draw, Window -EVENT_NONE = 0 -EVENT_EXIT = 1 -EVENT_REDRAW = 2 -EVENT_FILESEL = 3 - -GLOBALS = {} - -# export opts - -def do_redraw(e,v): GLOBALS['EVENT'] = e - -# toggle between these 2, only allow one on at once -def do_obs_sel(e,v): - GLOBALS['EVENT'] = e - GLOBALS['EXP_OBS_SCENE'].val = 0 - GLOBALS['EXP_OBS_SELECTED'].val = 1 - -def do_obs_sce(e,v): - GLOBALS['EVENT'] = e - GLOBALS['EXP_OBS_SCENE'].val = 1 - GLOBALS['EXP_OBS_SELECTED'].val = 0 - -def do_obs_sce(e,v): - GLOBALS['EVENT'] = e - GLOBALS['EXP_OBS_SCENE'].val = 1 - GLOBALS['EXP_OBS_SELECTED'].val = 0 - -def do_batch_type_grp(e,v): - GLOBALS['EVENT'] = e - GLOBALS['BATCH_GROUP'].val = 1 - GLOBALS['BATCH_SCENE'].val = 0 - -def do_batch_type_sce(e,v): - GLOBALS['EVENT'] = e - GLOBALS['BATCH_GROUP'].val = 0 - GLOBALS['BATCH_SCENE'].val = 1 - -def do_anim_act_all(e,v): - GLOBALS['EVENT'] = e - GLOBALS['ANIM_ACTION_ALL'][0].val = 1 - GLOBALS['ANIM_ACTION_ALL'][1].val = 0 - -def do_anim_act_cur(e,v): - if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val: - Draw.PupMenu('Warning%t|Cant use this with batch export group option') - else: - GLOBALS['EVENT'] = e - GLOBALS['ANIM_ACTION_ALL'][0].val = 0 - GLOBALS['ANIM_ACTION_ALL'][1].val = 1 - -def fbx_ui_exit(e,v): - GLOBALS['EVENT'] = e - -def do_help(e,v): - url = 'http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx' - print 'Trying to open web browser with documentation at this address...' - print '\t' + url - - try: - import webbrowser - webbrowser.open(url) - except: - Blender.Draw.PupMenu("Error%t|Opening a webbrowser requires a full python installation") - print '...could not open a browser window.' - - - -# run when export is pressed -#def fbx_ui_write(e,v): -def fbx_ui_write(filename): - - # Dont allow overwriting files when saving normally - if not GLOBALS['BATCH_ENABLE'].val: - if not BPyMessages.Warning_SaveOver(filename): - return - - GLOBALS['EVENT'] = EVENT_EXIT - - # Keep the order the same as above for simplicity - # the [] is a dummy arg used for objects - - Blender.Window.WaitCursor(1) - - # Make the matrix - GLOBAL_MATRIX = mtx4_identity - GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = GLOBALS['_SCALE'].val - if GLOBALS['_XROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n - if GLOBALS['_YROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n - if GLOBALS['_ZROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n - - ret = write(\ - filename, None,\ - GLOBALS['EXP_OBS_SELECTED'].val,\ - GLOBALS['EXP_MESH'].val,\ - GLOBALS['EXP_MESH_APPLY_MOD'].val,\ - GLOBALS['EXP_MESH_HQ_NORMALS'].val,\ - GLOBALS['EXP_ARMATURE'].val,\ - GLOBALS['EXP_LAMP'].val,\ - GLOBALS['EXP_CAMERA'].val,\ - GLOBALS['EXP_EMPTY'].val,\ - GLOBALS['EXP_IMAGE_COPY'].val,\ - GLOBAL_MATRIX,\ - GLOBALS['ANIM_ENABLE'].val,\ - GLOBALS['ANIM_OPTIMIZE'].val,\ - GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val,\ - GLOBALS['ANIM_ACTION_ALL'][0].val,\ - GLOBALS['BATCH_ENABLE'].val,\ - GLOBALS['BATCH_GROUP'].val,\ - GLOBALS['BATCH_SCENE'].val,\ - GLOBALS['BATCH_FILE_PREFIX'].val,\ - GLOBALS['BATCH_OWN_DIR'].val,\ - ) - - Blender.Window.WaitCursor(0) - GLOBALS.clear() - - if ret == False: - Draw.PupMenu('Error%t|Path cannot be written to!') - - -def fbx_ui(): - # Only to center the UI - x,y = GLOBALS['MOUSE'] - x-=180; y-=0 # offset... just to get it centered - - Draw.Label('Export Objects...', x+20,y+165, 200, 20) - - if not GLOBALS['BATCH_ENABLE'].val: - Draw.BeginAlign() - GLOBALS['EXP_OBS_SELECTED'] = Draw.Toggle('Selected Objects', EVENT_REDRAW, x+20, y+145, 160, 20, GLOBALS['EXP_OBS_SELECTED'].val, 'Export selected objects on visible layers', do_obs_sel) - GLOBALS['EXP_OBS_SCENE'] = Draw.Toggle('Scene Objects', EVENT_REDRAW, x+180, y+145, 160, 20, GLOBALS['EXP_OBS_SCENE'].val, 'Export all objects in this scene', do_obs_sce) - Draw.EndAlign() - - Draw.BeginAlign() - GLOBALS['_SCALE'] = Draw.Number('Scale:', EVENT_NONE, x+20, y+120, 140, 20, GLOBALS['_SCALE'].val, 0.01, 1000.0, 'Scale all data, (Note! some imports dont support scaled armatures)') - GLOBALS['_XROT90'] = Draw.Toggle('Rot X90', EVENT_NONE, x+160, y+120, 60, 20, GLOBALS['_XROT90'].val, 'Rotate all objects 90 degrese about the X axis') - GLOBALS['_YROT90'] = Draw.Toggle('Rot Y90', EVENT_NONE, x+220, y+120, 60, 20, GLOBALS['_YROT90'].val, 'Rotate all objects 90 degrese about the Y axis') - GLOBALS['_ZROT90'] = Draw.Toggle('Rot Z90', EVENT_NONE, x+280, y+120, 60, 20, GLOBALS['_ZROT90'].val, 'Rotate all objects 90 degrese about the Z axis') - Draw.EndAlign() - - y -= 35 - - Draw.BeginAlign() - GLOBALS['EXP_EMPTY'] = Draw.Toggle('Empty', EVENT_NONE, x+20, y+120, 60, 20, GLOBALS['EXP_EMPTY'].val, 'Export empty objects') - GLOBALS['EXP_CAMERA'] = Draw.Toggle('Camera', EVENT_NONE, x+80, y+120, 60, 20, GLOBALS['EXP_CAMERA'].val, 'Export camera objects') - GLOBALS['EXP_LAMP'] = Draw.Toggle('Lamp', EVENT_NONE, x+140, y+120, 60, 20, GLOBALS['EXP_LAMP'].val, 'Export lamp objects') - GLOBALS['EXP_ARMATURE'] = Draw.Toggle('Armature', EVENT_NONE, x+200, y+120, 60, 20, GLOBALS['EXP_ARMATURE'].val, 'Export armature objects') - GLOBALS['EXP_MESH'] = Draw.Toggle('Mesh', EVENT_REDRAW, x+260, y+120, 80, 20, GLOBALS['EXP_MESH'].val, 'Export mesh objects', do_redraw) #, do_axis_z) - Draw.EndAlign() - - if GLOBALS['EXP_MESH'].val: - # below mesh but - Draw.BeginAlign() - GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Toggle('Modifiers', EVENT_NONE, x+260, y+100, 80, 20, GLOBALS['EXP_MESH_APPLY_MOD'].val, 'Apply modifiers to mesh objects') #, do_axis_z) - GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Toggle('HQ Normals', EVENT_NONE, x+260, y+80, 80, 20, GLOBALS['EXP_MESH_HQ_NORMALS'].val, 'Generate high quality normals') #, do_axis_z) - Draw.EndAlign() - - GLOBALS['EXP_IMAGE_COPY'] = Draw.Toggle('Copy Image Files', EVENT_NONE, x+20, y+80, 160, 20, GLOBALS['EXP_IMAGE_COPY'].val, 'Copy image files to the destination path') #, do_axis_z) - - - Draw.Label('Export Armature Animation...', x+20,y+45, 300, 20) - - GLOBALS['ANIM_ENABLE'] = Draw.Toggle('Enable Animation', EVENT_REDRAW, x+20, y+25, 160, 20, GLOBALS['ANIM_ENABLE'].val, 'Export keyframe animation', do_redraw) - if GLOBALS['ANIM_ENABLE'].val: - Draw.BeginAlign() - GLOBALS['ANIM_OPTIMIZE'] = Draw.Toggle('Optimize Keyframes', EVENT_REDRAW, x+20, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE'].val, 'Remove double keyframes', do_redraw) - if GLOBALS['ANIM_OPTIMIZE'].val: - GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Number('Precission: ', EVENT_NONE, x+180, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val, 1, 16, 'Tolerence for comparing double keyframes (higher for greater accuracy)') - Draw.EndAlign() - - Draw.BeginAlign() - GLOBALS['ANIM_ACTION_ALL'][1] = Draw.Toggle('Current Action', EVENT_REDRAW, x+20, y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][1].val, 'Use actions currently applied to the armatures (use scene start/end frame)', do_anim_act_cur) - GLOBALS['ANIM_ACTION_ALL'][0] = Draw.Toggle('All Actions', EVENT_REDRAW, x+180,y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][0].val, 'Use all actions for armatures', do_anim_act_all) - Draw.EndAlign() - - - Draw.Label('Export Batch...', x+20,y-60, 300, 20) - GLOBALS['BATCH_ENABLE'] = Draw.Toggle('Enable Batch', EVENT_REDRAW, x+20, y-80, 160, 20, GLOBALS['BATCH_ENABLE'].val, 'Automate exporting multiple scenes or groups to files', do_redraw) - - if GLOBALS['BATCH_ENABLE'].val: - Draw.BeginAlign() - GLOBALS['BATCH_GROUP'] = Draw.Toggle('Group > File', EVENT_REDRAW, x+20, y-105, 160, 20, GLOBALS['BATCH_GROUP'].val, 'Export each group as an FBX file', do_batch_type_grp) - GLOBALS['BATCH_SCENE'] = Draw.Toggle('Scene > File', EVENT_REDRAW, x+180, y-105, 160, 20, GLOBALS['BATCH_SCENE'].val, 'Export each scene as an FBX file', do_batch_type_sce) - - # Own dir requires OS module - if os: - GLOBALS['BATCH_OWN_DIR'] = Draw.Toggle('Own Dir', EVENT_NONE, x+20, y-125, 80, 20, GLOBALS['BATCH_OWN_DIR'].val, 'Create a dir for each exported file') - GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+100, y-125, 240, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ') - else: - GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+20, y-125, 320, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ') - - - Draw.EndAlign() - - #y+=80 - - ''' - Draw.BeginAlign() - GLOBALS['FILENAME'] = Draw.String('path: ', EVENT_NONE, x+20, y-170, 300, 20, GLOBALS['FILENAME'].val, 64, 'Prefix each file with this name ') - Draw.PushButton('..', EVENT_FILESEL, x+320, y-170, 20, 20, 'Select the path', do_redraw) - ''' - # Until batch is added - # - - - #Draw.BeginAlign() - Draw.PushButton('Online Help', EVENT_REDRAW, x+20, y-160, 100, 20, 'Open online help in a browser window', do_help) - Draw.PushButton('Cancel', EVENT_EXIT, x+130, y-160, 100, 20, 'Exit the exporter', fbx_ui_exit) - Draw.PushButton('Export', EVENT_FILESEL, x+240, y-160, 100, 20, 'Export the fbx file', do_redraw) - - #Draw.PushButton('Export', EVENT_EXIT, x+180, y-160, 160, 20, 'Export the fbx file', fbx_ui_write) - #Draw.EndAlign() - - # exit when mouse out of the view? - # GLOBALS['EVENT'] = EVENT_EXIT - -#def write_ui(filename): -def write_ui(): - - # globals - GLOBALS['EVENT'] = EVENT_REDRAW - #GLOBALS['MOUSE'] = Window.GetMouseCoords() - GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] - GLOBALS['FILENAME'] = '' - ''' - # IF called from the fileselector - if filename == None: - GLOBALS['FILENAME'] = filename # Draw.Create(Blender.sys.makename(ext='.fbx')) - else: - GLOBALS['FILENAME'].val = filename - ''' - GLOBALS['EXP_OBS_SELECTED'] = Draw.Create(1) # dont need 2 variables but just do this for clarity - GLOBALS['EXP_OBS_SCENE'] = Draw.Create(0) - - GLOBALS['EXP_MESH'] = Draw.Create(1) - GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Create(1) - GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Create(0) - GLOBALS['EXP_ARMATURE'] = Draw.Create(1) - GLOBALS['EXP_LAMP'] = Draw.Create(1) - GLOBALS['EXP_CAMERA'] = Draw.Create(1) - GLOBALS['EXP_EMPTY'] = Draw.Create(1) - GLOBALS['EXP_IMAGE_COPY'] = Draw.Create(0) - # animation opts - GLOBALS['ANIM_ENABLE'] = Draw.Create(1) - GLOBALS['ANIM_OPTIMIZE'] = Draw.Create(1) - GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Create(4) # decimal places - GLOBALS['ANIM_ACTION_ALL'] = [Draw.Create(0), Draw.Create(1)] # not just the current action - - # batch export options - GLOBALS['BATCH_ENABLE'] = Draw.Create(0) - GLOBALS['BATCH_GROUP'] = Draw.Create(1) # cant have both of these enabled at once. - GLOBALS['BATCH_SCENE'] = Draw.Create(0) # see above - GLOBALS['BATCH_FILE_PREFIX'] = Draw.Create(Blender.sys.makename(ext='_').split('\\')[-1].split('/')[-1]) - GLOBALS['BATCH_OWN_DIR'] = Draw.Create(0) - # done setting globals - - # Used by the user interface - GLOBALS['_SCALE'] = Draw.Create(1.0) - GLOBALS['_XROT90'] = Draw.Create(True) - GLOBALS['_YROT90'] = Draw.Create(False) - GLOBALS['_ZROT90'] = Draw.Create(False) - - # best not do move the cursor - # Window.SetMouseCoords(*[i/2 for i in Window.GetScreenSize()]) - - # hack so the toggle buttons redraw. this is not nice at all - while GLOBALS['EVENT'] != EVENT_EXIT: - - if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val and GLOBALS['ANIM_ACTION_ALL'][1].val: - #Draw.PupMenu("Warning%t|Cant batch export groups with 'Current Action' ") - GLOBALS['ANIM_ACTION_ALL'][0].val = 1 - GLOBALS['ANIM_ACTION_ALL'][1].val = 0 - - if GLOBALS['EVENT'] == EVENT_FILESEL: - if GLOBALS['BATCH_ENABLE'].val: - txt = 'Batch FBX Dir' - name = Blender.sys.expandpath('//') - else: - txt = 'Export FBX' - name = Blender.sys.makename(ext='.fbx') - - Blender.Window.FileSelector(fbx_ui_write, txt, name) - #fbx_ui_write('/test.fbx') - break - - Draw.UIBlock(fbx_ui, 0) - - - # GLOBALS.clear() -#test = [write_ui] -if __name__ == '__main__': - # Cant call the file selector first because of a bug in the interface that crashes it. - # Blender.Window.FileSelector(write_ui, 'Export FBX', Blender.sys.makename(ext='.fbx')) - #write('/scratch/test.fbx') - #write_ui('/scratch/test.fbx') - - if not set: - Draw.PupMenu('Error%t|A full install of python2.3 or python 2.4+ is needed to run this script.') - else: - write_ui() diff --git a/release/scripts/export_lightwave_motion.py b/release/scripts/export_lightwave_motion.py deleted file mode 100644 index 562e44f3a2b..00000000000 --- a/release/scripts/export_lightwave_motion.py +++ /dev/null @@ -1,157 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: <- these words are ignored -Name: 'Lightwave Motion (.mot)...' -Blender: 241 -Group: 'Export' -Tip: 'Export Loc Rot Size chanels to a Lightwave .mot file' -""" - -__author__ = "Daniel Salazar (ZanQdo)" -__url__ = ("blender", "blenderartists.org", -"e-mail: zanqdo@gmail.com") -__version__ = "16/04/08" - -__bpydoc__ = """\ -This script exports the selected object's motion channels to Lightwave -motion files (.mot). - -Usage: -Run the script with one or more objects selected (any kind), frames exported -are between Start and End frames in Render buttons. - -""" - -# $Id$ -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2003, 2004: A Vanpoucke -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- -import Blender as B -import math as M -#------------------------------------ -#Declarados: -TotalCanales = 9 -#------------------------------------ - -def FuncionPrincipal (Dir): - B.Window.WaitCursor(1) - ObjSelect = B.Object.GetSelected() - - if not ObjSelect: - B.Draw.PupMenu('Select 1 or more objects, aborting.') - return - - if not Dir.lower().endswith('.mot'): - Dir += '.mot' - - - SC = B.Scene.GetCurrent() - SCR = SC.getRenderingContext() - - for ob in ObjSelect: - origName= NombreObjeto= ob.name - print '----\nExporting Object "%s" motion file...' % origName - - FrameA = B.Get('curframe') - FrameP = B.Get('staframe') - FrameF = B.Get('endframe') - - FrameRate = float(SCR.framesPerSec()) - - #--------------------------------------------- - - # Replace danger characters by '_' - for ch in ' /\\~!@#$%^&*()+=[];\':",./<>?\t\r\n': - NombreObjeto = NombreObjeto.replace(ch, '_') - - # Check for file path extension - if len(ObjSelect) > 1: - DirN= '%s_%s.mot' % (Dir[:-4], NombreObjeto) - else: - DirN= Dir - - # Open the file - File = open(DirN,'w') - File.write ('LWMO\n3\n\n') # 3 is the version number. - - # number of channels - File.write ('NumChannels %i\n' % TotalCanales) - - # ---------------------------- - # Main Cycle - - def CicloPrimario(NumCanal): - B.Set('curframe', FrameP) - - File.write ('Channel %i\n{ Envelope\n %i\n' % (NumCanal, (FrameF - FrameP + 1))) - - FrameA = FrameP - while FrameA < (FrameF + 1): - - B.Set('curframe', FrameA) - - mat= ob.mat # Worldspace matrix - - if NumCanal == 0: - Val = mat.translationPart().x - elif NumCanal == 1: - Val = mat.translationPart().z - elif NumCanal == 2: - Val = mat.translationPart().y - elif NumCanal == 3: - Val = M.radians (-mat.toEuler().z) - elif NumCanal == 4: - Val = M.radians (-mat.toEuler().x) - elif NumCanal == 5: - Val = M.radians (-mat.toEuler().y) - elif NumCanal == 6: - Val = mat.scalePart().x - elif NumCanal == 7: - Val = mat.scalePart().z - elif NumCanal == 8: - Val = mat.scalePart().y - File.write (' Key %f %f 3 0 0 0 0 0 0\n' % (Val, (FrameA/FrameRate))) - - FrameA += 1 - # Ending Stuff - File.write (' Behaviors 1 1\n}\n') - - NumObjetoActual = len(ObjSelect) - Iteraciones = 0 - ProgBarVal = 0.0 - while Iteraciones < TotalCanales: - CicloPrimario(Iteraciones) - - # Start Progress Bar - B.Window.DrawProgressBar(ProgBarVal, origName) - ProgBarVal = (float(Iteraciones) / TotalCanales) * 0.98 - Iteraciones += 1 - - B.Window.DrawProgressBar(1.0, '') # Done - print '\nDone, %s motion file location is:\n%s\n' % (origName, DirN) - B.Window.WaitCursor(0) - -# Check if there are selected objects -def main(): - B.Window.FileSelector(FuncionPrincipal, "Write .mot File", B.sys.makename(ext='.mot')) - -if __name__=='__main__': - main() diff --git a/release/scripts/export_m3g.py b/release/scripts/export_m3g.py deleted file mode 100644 index c74e7acbcd3..00000000000 --- a/release/scripts/export_m3g.py +++ /dev/null @@ -1,3074 +0,0 @@ -#!BPY -# coding: utf-8 -""" Registration info for Blender menus: -Name: 'M3G (.m3g, .java)...' -Blender: 244 -Group: 'Export' -Tooltip: 'Export to M3G' -""" -#------------------------------------------------------------------------ -# M3G exporter for blender 2.37 or above -# -# Source: http://www.nelson-games.de/bl2m3g/source -# -# $Id$ -# -# Author: Gerhard Völkl -# -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2005: gerhard völkl gkvoelkl@yahoo.de -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - -# ***** END GPL LICENCE BLOCK ***** -# -# To use script: -# 1.) load this file in the text window. -# (press SHIFT+F11, Open New via Datablock button) -# 2.) make sure your mouse is over the text edit window and -# run this script. (press ALT+P) -# Or: -# copy to the scripts directory and it will appear in the -# export list. (Needs 2.32 or higher) -# -# Based on informations from: -# wrl2export.py from Rick Kimball and others -# --------------------------------------------------------------------------# -# History 0.2 -# * maximal Precision in VertexArray (with algorithms from Kalle Raita) -# * IPO Animation with mesh: Rotation, Translation and Size -# History 0.3 -# * to find a 3d object in your java programm you can assign a userID -# your blender object has name 'cube#01' your 3d object will have ID 01 -# the number after '#' is taken -# * more than one material per mesh can be used -# * uv texture support (implemented by Aki Koskinen and Juha Laitinen) -# The image which is bound to the faces will be exportet within m3g-file -# Limitations by M3G-API: -# The width and height of the image must be non-negative powers of two, -# but they need not to be equal. Maximum value is 256. -# *.java export: Only PNG images can be used. -# History 0.4 -# * check limitation of texture images (credit to MASTER_ZION for Brasil) -# * Better light: The light modeles of Blender and M3G are naturally -# different. So the export script trys to translate as much as possible -# -# M3G Light type Blender Light type -# -------------------------------------------------------------- -# AMIENT Light Not available as light type in Blender -# DIRECTIONAL Light SUN -# OMNIdirectional light LAMP -# SPOT light SPOT -# not translated HEMI -# not translated AREA -# -# Attributs of M3G Lights: -# -# Attenuation (OMNI,SPOT): -# Intensity of light changes with distance -# The attenuation factor is 1 / (c + l d + q d2) -# where d is the distance between the light and the vertex being lighted -# and c, l, q are the constant, linear, and quadratic coefficients. -# In Blender exists much complex posibilies. To simplify exporter uses -# only button Dist: distance at which the light intensity is half -# the Energy -# Color (ALL) -# Color of light -# Intensity (ALL) -# The RGB color of this Light is multiplied component-wise with the -# intensity. In Blender : energy -# SpotAngle (SPOT) -# the spot cone angle for this Light -# In Blender: spotSize -# SpotExponent (SPOT) -# The spot exponent controls the distribution of the intensity of -# this Light within the spot cone, such that larger values yield -# a more concentrated cone. In Blender: SpotBl -# -# * Some GUI for options -# First prototype of GUI was created using RipSting's Blender-Python -# GUI designer. Download at Http://oregonstate.edu/~dennisa/Blender/BPG/ -# -# * Ambiente light -# Information is taken by world ambiente attribute -# -# * Parenting Part 1 -# In Blender the Empty object is used to group objects. All objects -# which have the same empty as parent are the member of the same group. -# -# empty <-- Parent of -- element 1 -# <-- Parent of -- element 2 -# -# is translated in M3G -# -# group-Node -- Member --> element 1 -# -- Member --> element 2 -# -# In Blender every object can be the parent of every other object -# In M3G that is not possible. Only a group object can be parent. -# (Or the world object which is derived from group). -# That will come later as Parenting Part 2 -# -# * Backface Culling -# you can use backface culling, if option "use backface culloing" is on. -# Culling will be set in PolygonMode object of every mesh. The correct -# winding is controlled. -# History 0.5 -#* Bone Animation - Armature (Part 1) -# -# Armature is the skeleton for skinned meshes. It stores the bones in -# rest position (more information http://www.blender.org/cms/How_Armatures_work.634.0.html) -# You can work in Blender with bones and meshes in different ways. In -# this first attempt only the use of vertex groups is assisted. -# -# Blender-Objekts translated into M3G-Objects -# -# MESH SkinnedMesh -# | | -# v v -# ARMATURE Group -# | | -# v v -# BONE_1 Group -# Group_second -# | | -# V v -# BONE_2 Group -# Group_secound -# -# Every bone is translated into two groups at the moment, because -# the second bone is needed to do the animation in an easy way. -# -# The animations in Blender for meshes are stored in action objects. -# -# Blender Objects translated into M3G-Objects -# -# ARMATURE -# | activ -# v -# ACTION ANIMATIONCONTROLLER -# | 1..n ^ -# v ANIMATIONTRACK --> Group_second -# IPOs | -# v -# KEYSEQUENZE -# -# One action is translated into one animationcontroller. One IPO is -# translated in one KEYSEQUENZE and one ANIMATIONTRACK. -# -# At the moment only the active action of the armature object is translated. -# -#* Print Info, if type of light is used that is not supported -# -# History 0.5 -# -#* New Option exportAllAction (default value: false) -# If that option is true, all actions will be exported - not only the active -# action. -# At the moment you can only assign one action to one armature. -# To know which action is used with which armature the action -# needs a special name : -# #AE# - -# Example: Name of action : walk#A10E250#02 -# Name of armature : man#10 -# End Frame: 250 -# -# History 0.6 -# Include the same image only one time into the m3g-file -# -# All the following changes of this version was made by Claus Hoefele -# -#* Until now all vertices of the faces was been written. -# Now the vertices will be used again if possible: -# normal and texture coordinates of to vertices have to be the same -# -#* Smooth/solid shading can now be defined for every single material: -# in Editing panel (F9)>Link and Materials -# -#* This script uses now correctly the TexFace and Shadless Buttons in -# Shading panel (F5)>Material buttons>Material box. -# TexFace switches on/off the Export of texture coordinates. -# Shadeless does the some with the normal coordinates -# -#* The GUI was redesigned in a PupBlock -# -#* Options: -# -#** Texturing Enabled: Switches on/off export of textures and texture -# coordinates. Attention: the TextFace button switches only -# for one mesh -#** Texturing External: the textures will be included it mg3-file or -# exported in seperate file -#** Lighting Enabled: turns on/off export of lights and normal completly -# Attention: Shadeless only for one mesh -#** Persp. Correction: turns on/off perspective correction in PolygonMode. -#** Smooth Shading: turns on/off smooth shading in PolygonMode. -# -#* Textures in external references are used again (with ImageFactory) -# -#* Blender function: Double Sided button in Editing Context -# (F9)>Mesh panel) -# turn on/off PolygonMode.CULL_BACK anzuschalten. -# -#* Script ingnores meshes that have no faces -# -# History 0.7 -# -# * Exporter can work with texture coordinates greater 1 and smaller 0 -# -# * Adler32 did not work always correct. New implementation made. -# -# * Modul shutil is not needed any longer. Exporter has its own copy_file. -# (realized and inspired by ideasman_42 and Martin Neumann) -# -# History 0.8 -# -# * Blender works with SpotAngles 1..180 but M3G works only with 0..90 -# M3G use the 'half angle' (cut off angle) (Thanks to Martin Storsjö) -# -# * Error fixed: Texture coordinates was not calculated correct. -# (Thanks to Milan Piskla, Vlad, Max Gilead, Regis Cosnier ...) -# -# * New options in GUI: -# M3G Version 2.0 : Will export M3G files Vers. 2.0 in future -# Game Physics: Adds Game Physics infos for NOPE API -# -# --------------------------------------------------------------------------# -# TODO: Export only selected mesh -# TODO: Optimize Bones <--> Vertex Group mapping -# TODO: Compressed File -# TODO: MTex - Support -# TODO: By Rotating use SQUAD instead of Beziere. It's smoother -import Blender -from Blender import Types,Lamp,Material,Texture,Window,Registry,Draw -from Blender.BGL import * -from Blender.Object import * -from Blender.Camera import * -from Blender.Mesh import * -from array import array -import sys, struct, zlib -from inspect import * -from types import * -from Blender.Mathutils import * -from os.path import * -#import rpdb2 - -# ---- Helper Functions -------------------------------------------------------# -def copy_file(source, dest): - file = open(source, 'rb') - data = file.read() - file.close() - - file = open(dest, 'wb') - file.write(data) - file.close() - -def tracer(frame, event, arg): - '''Global trace function''' - if event=='call': - tmp = getargvalues(frame) - print event, frame.f_code.co_name, frame.f_lineno, \ - formatargvalues(tmp[0],tmp[1],tmp[2],tmp[3]) - elif event=='line': - print event, frame.f_code.co_name, frame.f_lineno - #print event, frame.f_code.co_name, frame.f_lineno, \ - # getsourcelines(frame.f_code)[frame.f_lineno] - elif event=='return': - print event, frame.f_code.co_name, frame.f_lineno, "->", arg - return tracer - -def doSearchDeep(inList,outList): - '''Does deepsearch for all elements in inList''' - for element in inList: - if element != None : outList = element.searchDeep(outList) - return outList - - -def getId(aObject): - ''' returns 0 if Object is None: M3G value for null''' - if aObject == None: return 0 - return aObject.id - -def toJavaBoolean(aValue): - ''' returns java equivalent to boolean''' - if aValue: - return 'true' - else : - return 'false' - -def sign(a): - if a<0 : return -1 - elif a>0 : return 1 - else : return 0 - -def isOrderClockWise(v,normal): - ''' returns true, if order of vertices is clockwise. Important for - culling ''' - # (v2-v0)x(v2-v1)=surface_normal - # - if type(v[0]) is Types.MVertType: - mNormal = TriangleNormal(Vector(v[0].co),Vector(v[1].co),Vector(v[2].co)) - else: - mNormal = TriangleNormal(Vector(v[0]),Vectot(v[1]),Vector(v[2])) - #print "normal ",mNormal.normalize() - #print "BNormal ",normal.normalize() - - # Do not use any longer. Blender does it correct - - result = (sign(normal.x)==sign(mNormal.x) and - sign(normal.y)==sign(mNormal.y) and - sign(normal.z)==sign(mNormal.z)) - #print "Result ",result - - return True - - -# ---- M3G Types --------------------------------------------------------------# -class M3GVertexList: - def __init__(self, wrapList): - self.mlist = wrapList - - def __getitem__(self, key): - item = self.mlist[key] - if type(item) is Types.MVertType: - result =(item.co[0],item.co[1],item.co[2]) - else: - result = item - return result - -class M3GBoneReference: - def __init__(self,first,count): - self.firstVertex=first #UInt32 - self.vertexCount=count #UInt32 - - -class M3GBone: - def __init__(self): - self.verts=[] #List of influenced verts - self.transformNode=None #ObjectIndex - self.references = [] #References to Verts that are needed - self.weight=0 #Int32 - - - def setVerts(self,aVerts): - self.verts = aVerts - self.createReferences() - - def createReferences(self): - #print "createReference::len(verts) ",len(self.verts) - if len(self.verts)==0: return #No Verts available - self.verts.sort() - ref = [] - list = [] - last = self.verts[0]-1 - count = 0 - for vert in self.verts: - #print "vert ",vert - if vert==last+1: - list.append(vert) - else: - ref.append(M3GBoneReference(list[0],len(list))) - #print list[0],len(list) - list=[vert] - last=vert - #print "list ",list - if len(list)>0: - ref.append(M3GBoneReference(list[0],len(list))) - self.references = ref - - -class M3GVector3D: - def __init__(self,ax=0.0,ay=0.0,az=0.0): - self.x = ax #Float32 - self.y = ay #Float32 - self.z = az #Float32 - - def writeJava(self): - return str(self.x)+"f, "+str(self.y)+"f, "+str(self.z)+"f" - - def getData(self): - return struct.pack("<3f",self.x,self.y,self.z) - - def getDataLength(self): - return struct.calcsize("<3f") - -class M3GMatrix: - """ A 4x4 generalized matrix. The 16 elements of the - matrix are output in the same order as they are - retrieved using the API Transform.get method. In - other words, in this order: - 0 1 2 3 - 4 5 6 7 - 8 9 10 11 - 12 13 14 15 """ - def __init__(self): - self.elements=16 * [0.0] #Float32 - - def identity(self): - self.elements[ 0] = 1.0 - self.elements[ 5] = 1.0 - self.elements[10] = 1.0 - self.elements[15] = 1.0 - - def getData(self): - return struct.pack('<16f',self.elements[0],self.elements[1], - self.elements[2],self.elements[3], - self.elements[4],self.elements[5], - self.elements[6],self.elements[7], - self.elements[8],self.elements[9], - self.elements[10],self.elements[11], - self.elements[12],self.elements[13], - self.elements[14],self.elements[15]) - - def getDataLength(self): - return struct.calcsize('<16f') - - -class M3GColorRGB: - """ A color, with no alpha information. Each compo- - nent is scaled so that 0x00 is 0.0, and 0xFF is 1.0. - """ - def __init__(self,ared=0,agreen=0,ablue=0): - self.red = ared #Byte - self.green = agreen #Byte - self.blue = ablue #Byte - - def writeJava(self): - return "0x"+("%02X%02X%02X%02X" % (0.0, self.red, self.green, self.blue)) - - def getData(self): - return struct.pack('3B',self.red,self.green,self.blue) - - def getDataLength(self): - return struct.calcsize('3B') - - -class M3GColorRGBA: - """ A color, with alpha information. Each component - is scaled so that 0x00 is 0.0, and 0xFF is 1.0. The - alpha value is scaled so that 0x00 is completely - transparent, and 0xFF is completely opaque. - """ - def __init__(self,ared=0,agreen=0,ablue=0,aalpha=0): - self.red = ared #Byte - self.green = agreen #Byte - self.blue = ablue #Byte - self.alpha = aalpha #Byte - - def writeJava(self): - return "0x"+("%02X%02X%02X%02X" % (self.alpha, self.red, self.green, self.blue)) - - def getData(self): - return struct.pack('4B',self.red,self.green,self.blue,self.alpha) - - def getDataLength(self): - return struct.calcsize('4B') - - -#ObjectIndex -#The index of a previously encountered object in -#the file. Although this is serialized as a single -#unsigned integer, it is included in the compound -#type list because of the additional semantic infor- -#mation embodied in its type. A value of 0 is -#reserved to indicate a null reference; actual object indices start from 1. Object indices must refer -#only to null or to an object which has already been -#created during the input deserialization of a file - -#they must be less than or equal to the index of the -#object in which they appear. Other values are dis- -#allowed and must be treated as errors. -#UInt32 -#index; - -# ---- M3G Proxy --------------------------------------------------------------- # -class M3GProxy: - def __init__(self): - self.name = "" - self.id=0 - self.ObjectType=0 - self.binaryFormat='' - - def __repr__(self): - return "<"+str(self.__class__)[9:] + ":" + str(self.name) + ":" + str(self.id) + ">" - - -class M3GHeaderObject(M3GProxy): - def __init__(self): - M3GProxy.__init__(self) - self.M3GHeaderObject_binaryFormat = ' 0: - value += struct.calcsize('<'+str(len(self.animationTracks))+'I') - return value - - def writeJava(self,aWriter,aCreate): - if aCreate : pass #Abstract! Could not be created - if len(self.animationTracks) > 0 : - aWriter.write(2) - for iTrack in self.animationTracks: - aWriter.write(2,"BL%i.addAnimationTrack(BL%i);" % (self.id,iTrack.id)) - - -class M3GTransformable(M3GObject3D): - def __init__(self): - M3GObject3D.__init__(self) - self.hasComponentTransform=False #Boolean - #IF hasComponentTransform==TRUE, THEN - self.translation=M3GVector3D(0,0,0) #Vector3D - self.scale=M3GVector3D(1,1,1) #Vector3D - self.orientationAngle=0 #Float32 - self.orientationAxis=M3GVector3D(0,0,0) #Vector3D undefined - #END - self.hasGeneralTransform=False #Boolean - #IF hasGeneralTransform==TRUE, THEN - self.transform = M3GMatrix() #Matrix identity - self.transform.identity() - #END - #If either hasComponentTransform or hasGeneralTransform is false, the omitted fields will be - #initialized to their default values (equivalent to an identity transform in both cases). - - def writeJava(self,aWriter,aCreate): - if aCreate: pass #Abstract Base Class! Cant't be created - M3GObject3D.writeJava(self,aWriter,False) - if self.hasGeneralTransform : - aWriter.write(2,"float[] BL%i_matrix = {" % (self.id)) - aWriter.writeList(self.transform.elements,4,"f") - aWriter.write(2,"};") - aWriter.write(2) - aWriter.write(2,"Transform BL%i_transform = new Transform();" % (self.id)) - aWriter.write(2,"BL%i_transform.set(BL%i_matrix);" % (self.id,self.id)) - aWriter.write(2,"BL%i.setTransform(BL%i_transform);" % (self.id,self.id)) - aWriter.write(2) - if self.hasComponentTransform: - aWriter.write(2,("BL%i.setTranslation("+self.translation.writeJava()+");") - %(self.id)) - - def getData(self): - data = M3GObject3D.getData(self) - data += struct.pack(" 1: - aWriter.write(2,"IndexBuffer[] BL%i_indexArray = {" % (self.id)) - aWriter.write(4,",".join(["BL%i" %(i.id) for i in self.indexBuffer ])) - aWriter.write(2," };") - aWriter.write(2) - aWriter.write(2,"Appearance[] BL%i_appearanceArray = {" % (self.id)) - aWriter.write(4,",".join(["BL%i" %(i.id) for i in self.appearance ])) - aWriter.write(2," };") - aWriter.write(2) - aWriter.write(2,"%s BL%i = new %s(BL%i,BL%i_indexArray,BL%i_appearanceArray%s);" % \ - (aClassName,self.id,aClassName,self.vertexBuffer.id, self.id,self.id,aExtension)) - else: - #print "indexBuffer", len(self.indexBuffer) - #print "appearance", len(self.appearance) - aWriter.write(2,"%s BL%i = new %s(BL%i,BL%i,BL%i%s);" % \ - (aClassName, - self.id, - aClassName, - self.vertexBuffer.id, - self.indexBuffer[0].id, - self.appearance[0].id, - aExtension)) - M3GNode.writeJava(self,aWriter,False) - aWriter.write(2) - - -class M3GSkinnedMesh(M3GMesh): - def __init__(self,aVertexBuffer=None, aIndexBuffer=[], aAppearance=[]): - M3GMesh.__init__(self,aVertexBuffer, aIndexBuffer, aAppearance) - self.ObjectType=16 - self.skeleton=None #ObjectIndex - self.bones={} - #print"M3GSkinnedMesh.__init__::self.vertexBuffer:",self.vertexBuffer - ##ObjectIndex skeleton; - ##UInt32 transformReferenceCount; - ##FOR each bone reference... - ## ObjectIndex transformNode; - ## UInt32 firstVertex; - ## UInt32 vertexCount; - ## Int32 weight; - ##END - - def searchDeep(self,alist): - alist = doSearchDeep([self.skeleton],alist) - return M3GMesh.searchDeep(self,alist) - - def addSecondBone(self): - secondBones = {} - for bone in self.bones.values(): - bone2 = M3GBone() - bone2.verts=bone.verts - bone.verts=[] - mGroup = M3GGroup() - mGroup.name=bone.transformNode.name+"_second" - bone2.transformNode=mGroup - bone2.references = bone.references - bone.references = [] - bone2.weight = bone.weight - bone.weight=0 - mGroup.children = bone.transformNode.children - bone.transformNode.children = [mGroup] - mGroup.animationTracks=bone.transformNode.animationTracks - bone.transformNode.animationTracks = [] - secondBones[bone.transformNode.name+"_second"]=bone2 - for bone in secondBones.values(): - self.bones[bone.transformNode.name] = bone - - def getBlenderIndexes(self): - #print "M3GSkinnedMesh.vertexBuffer:",self.vertexBuffer - return self.vertexBuffer.positions.blenderIndexes - - def writeJava(self,aWriter,aCreate): - self.writeBaseJava(aWriter,aCreate,"SkinnedMesh", - (",BL%i" % (self.skeleton.id))) - aWriter.write(2,"//Transforms") - for bone in self.bones.values(): - #print "bone: ", bone - #print "bone.references: ", bone.references - for ref in bone.references: - aWriter.write(2,"BL%i.addTransform(BL%i,%i,%i,%i);" % - (self.id, - bone.transformNode.id,bone.weight, - ref.firstVertex, ref.vertexCount)) - aWriter.write(2) - - def getDataLength(self): - value = M3GMesh.getDataLength(self) - value += struct.calcsize(' element[i] : minimum[i] = element[i] - if maximum[i] < element[i] : maximum[i] = element[i] - #print i, maximum[i],element[i] - lrange=[0,0,0] - maxRange=0.0 - maxDimension=-1 - for i in range(3): #set bias - lrange[i] = maximum[i]-minimum[i] - self.bias[i] = minimum[i]*0.5+maximum[i]*0.5 - #print "Bias",i,self.bias[i],"min-max",minimum[i],maximum[i],"lrang",lrange[i] - if lrange[i] > maxRange: - maxRange = lrange[i] - maxDimension=i - self.scale = maxRange/65533.0 - #print "MaxRange ",maxRange - #print "scale",self.scale - - - def internalAutoScaling(self): - print "internalAutoScaling" - #Already done? - print self.components.typecode - if not self.autoscaling or self.components.typecode!="f":return - #Find bais and scale - minimum=[] - maximum=[] - for i in range(self.componentCount): - minimum.append(self.components[i]) - maximum.append(self.components[i]) - for i in range(0,len(self.components),self.componentCount): - for j in range(self.componentCount): - if minimum[j] > self.components[i+j] : minimum[j] = self.components[i+j] - if maximum[j] < self.components[i+j] : maximum[j] = self.components[i+j] - #print "i+j=",i+j,"min=",minimum[j],"max=",maximum[j],"elem=",self.components[i+j] - #print "min=", minimum - #print "max=", maximum - lrange=[0] * self.componentCount - maxRange=0.0 - maxDimension=-1 - for i in range(self.componentCount): #set bias - lrange[i] = maximum[i]-minimum[i] - self.bias[i] = minimum[i]*0.5+maximum[i]*0.5 - #print "Bias",i,self.bias[i],"min-max",minimum[i],maximum[i],"lrang",lrange[i] - if lrange[i] > maxRange: - maxRange = lrange[i] - maxDimension=i - maxValue=(2**(8*self.componentSize)*1.0)-2.0 - #print "MaxValue=",maxValue - self.scale = maxRange/maxValue - #print "MaxRange ",maxRange - #print "scale",self.scale - #Copy Components - oldArray=self.components - self.components=self.createComponentArray() - for i in range(0,len(oldArray),self.componentCount): - for j in range(self.componentCount): - element=int((oldArray[i+j]-self.bias[j])/self.scale) - #print "element",element - self.components.append(element) - # Reverse t coordinate because M3G uses a different 2D coordinate system than Blender. - if self.uvmapping: - for i in range(0,len(self.components),2): - self.components[i+1]= int(self.components[i+1]*(-1)) #Error in Version 0.7 - for i in range(len(self.components)): - if abs(self.components[i])>maxValue:raise Exception( i+". element too great/small!") - - def writeJava(self,aWriter,aCreate): - self.internalAutoScaling() - if aCreate: - aWriter.write(2,"// VertexArray " + self.name) - if self.componentSize == 1: - aWriter.write(2,"byte[] BL%i_array = {" % (self.id)) - else: - aWriter.write(2,"short[] BL%i_array = {" % (self.id)) - aWriter.writeList(self.components) - aWriter.write(2,"};") - aWriter.write(2) - aWriter.write(2,"VertexArray BL%i = new VertexArray(BL%i_array.length/%i,%i,%i);" % - (self.id,self.id, - self.componentCount,self.componentCount,self.componentSize)) - aWriter.write(2,"BL%i.set(0,BL%i_array.length/%i,BL%i_array);" % - (self.id,self.id,self.componentCount,self.id)) - M3GObject3D.writeJava(self,aWriter,False) - aWriter.write(2) - - - def getData(self): - self.internalAutoScaling() - self.vertexCount = len(self.components)/self.componentCount - data = M3GObject3D.getData(self) - data += struct.pack('<3BH',self.componentSize, - self.componentCount, - self.encoding, - self.vertexCount) - componentType = "" - if self.componentSize == 1: - componentType = "b" - else: - componentType = "h" - for element in self.components: - data += struct.pack('<'+componentType,element) - return data - - def getDataLength(self): - self.internalAutoScaling() - value = M3GObject3D.getDataLength(self) - value += struct.calcsize('<3BH') - componentType = "" - if self.componentSize == 1: - componentType = "b" - else: - componentType = "h" - value += struct.calcsize('<'+str(len(self.components))+componentType) - return value - - def append(self,element,index=None): - #print "type(element):",type(element) - if type(element) is Types.vectorType : - for i in range(3): - value = int((element[i]-self.bias[i])/self.scale) - #print "append:",i,element[i],(element[i]-self.bias[i]),value - self.components.append(value) - elif type(element) is Types.MVertType: - for i in range(3): - value = int((element.co[i]-self.bias[i])/self.scale) - #print "append:",i,element[i],(element[i]-self.bias[i]),value - self.components.append(value) - if index!=None: - key=str(len(self.blenderIndexes)) - #print"key,index:",key,index - self.blenderIndexes[key]=index - #print"blenderIndexes",self.blenderIndexes - else: - print "VertexArray.append: element=",element - self.components.append(element) - -class M3GVertexBuffer(M3GObject3D): - def __init__(self): - M3GObject3D.__init__(self) - self.ObjectType=21 - self.defaultColor=M3GColorRGBA(255,255,255) #ColorRGBA 0xFFFFFFFF (opaque white). - self.positions = None #ObjectIndex - self.positionBias = [0.0,0.0,0.0] #Float32[3] - self.positionScale = 1.0 #Float32 - self.normals = None #ObjectIndex - self.colors = None #ObjectIndex - self.texCoordArrays = [] - self.texcoordArrayCount = 0 #UInt32 -## #FOR each texture coordinate array... -## self.texCoords = [] #ObjectIndex -## self.texCoordBias=[] #Float32[3] -## self.texCoordScale=[] #;Float32 -## #END -## #If a texture coordinate array has only two components, the corresponding texCoordBias[2] element -## #must be 0.0. -## #Null texture coordinate arrays are never serialized, regardless of their position. A single texture -## #coordinate array will therefore always be serialized as belonging to texturing unit 0, regardless of -## #its original unit it was assigned to. -## #There are as many references in the texture coordinates array as there are active texture units for -## #this geometry. The texture coordinate references are loaded sequentially from texture unit 0. If the -## #implementation supports more texture units than are specified, these are left in their default, inactive -## #state, with a null texture coordinate reference and an undefined bias and scale. -## #If more texture coordinate references are specified than are supported by the implementation, then -## #this must be treated as an error, as it would be in the API. The application can then decide on an -## #appropriate course of action to handle this case. - - def searchDeep(self,alist): - if self.positions!=None: alist = self.positions.searchDeep(alist) - if self.normals != None: alist = self.normals.searchDeep(alist) - if self.colors!= None: alist = self.colors.searchDeep(alist) - alist = doSearchDeep(self.texCoordArrays, alist) - return M3GObject3D.searchDeep(self,alist) - - def setPositions(self,aVertexArray): - self.positions = aVertexArray - self.positionBias = aVertexArray.bias - self.positionScale = aVertexArray.scale - - def writeJava(self,aWriter,aCreate): - if aCreate: - aWriter.write(2,"//VertexBuffer"+self.name ) - aWriter.write(2,"VertexBuffer BL%i = new VertexBuffer();" % (self.id)) - aWriter.write(2,"float BL%i_Bias[] = { %ff, %ff, %ff};" % - (self.id,self.positionBias[0], - self.positionBias[1],self.positionBias[2])) - aWriter.write(2,"BL%i.setPositions(BL%i,%ff,BL%i_Bias);" % - (self.id, self.positions.id, - self.positionScale,self.id)) - aWriter.write(2,"BL%i.setNormals(BL%i);" % (self.id,self.normals.id)) - #if self.colors != None: aWriter.write(2,"BL%i.setTexCoords(0,BL%i,1.0f,null);" % - # (self.id,self.colors.id)) - lIndex = 0 - for iTexCoord in self.texCoordArrays: - aWriter.write(2,"float BL%i_%i_TexBias[] = { %ff, %ff, %ff};" % - (self.id,lIndex, iTexCoord.bias[0], - iTexCoord.bias[1],iTexCoord.bias[2])) - #int index, javax.microedition.m3g.VertexArray194 texCoords, float scale, float[] bias - aWriter.write(2,"BL%i.setTexCoords(%i,BL%i,%ff,BL%i_%i_TexBias);" % - (self.id, lIndex, iTexCoord.id, iTexCoord.scale,self.id,lIndex)) - lIndex += 1 - - M3GObject3D.writeJava(self,aWriter,False) - - - def getData(self): - self.texcoordArrayCount = len(self.texCoordArrays) - data = M3GObject3D.getData(self) - data += self.defaultColor.getData() - data += struct.pack(' 0 : - value += struct.calcsize('<' + str(len(self.indices)) + 'I') - value += struct.calcsize(' 0: - value+= struct.calcsize('<'+str(len(self.stripLengths))+'I') - return value - - -class M3GAppearance(M3GObject3D): - def __init__(self): - M3GObject3D.__init__(self) - self.ObjectType=3 - self.layer=0 #Byte - self.compositingMode=None #ObjectIndex - self.fog=None #ObjectIndex - self.polygonMode=None #ObjectIndex - self.material=None #ObjectIndex - self.textures=[] #;ObjectIndex[] - - def searchDeep(self,alist): - alist = doSearchDeep([self.compositingMode,self.fog, - self.polygonMode,self.material] - + self.textures,alist) - return M3GObject3D.searchDeep(self,alist) - - def getData(self): - data = M3GObject3D.getData(self) - data += struct.pack(" 0 : - value += struct.calcsize("<"+str(len(self.textures))+'I') - return value - - - def writeJava(self,aWriter,aCreate): - if aCreate: - aWriter.write(2,"//Appearance") - aWriter.write(2,"Appearance BL%i = new Appearance();" % (self.id)) - if self.compositingMode!= None: - aWriter.write(2,"BL%i.setPolygonMode(BL%i);" % - (self.id,self.compositingMode.id)) - if self.fog!=None: - aWriter.write(2,"BL%i.setFog(BL%i);" % - (self.id,self.fog.id)) - if self.polygonMode!=None: - aWriter.write(2,"BL%i.setPolygonMode(BL%i);" % - (self.id,self.polygonMode.id)) - if self.material!=None: - aWriter.write(2,"BL%i.setMaterial(BL%i);" % - (self.id,self.material.id)) - i=0 - for itexture in self.textures: - aWriter.write(2,"BL%i.setTexture(%i,BL%i);" % - (self.id,i,itexture.id)) - i =+ 1 - M3GObject3D.writeJava(self,aWriter,False) - aWriter.write(2) - -class M3GTexture2D(M3GTransformable): - #M3G imposes the following restrictions when assigning textures to a model: - #The dimensions must be powers of two (4, 8, 16, 32, 64, 128...). - - WRAP_REPEAT = 241 - WRAP_CLAMP = 240 - FILTER_BASE_LEVEL=208 - FILTER_LINEAR=209 - FILTER_NEAREST=210 - FUNC_ADD=224 - FUNC_BLEND=225 - FUNC_DECAL=226 - FUNC_MODULATE=227 - FUNC_REPLACE=228 - - def __init__(self,aImage): - M3GTransformable.__init__(self) - self.ObjectType=17 - self.Image = aImage #ObjectIndex - self.blendColor=M3GColorRGB(0,0,0) - self.blending=M3GTexture2D.FUNC_MODULATE #Byte - self.wrappingS=M3GTexture2D.WRAP_REPEAT #Byte - self.wrappingT=M3GTexture2D.WRAP_REPEAT #Byte - self.levelFilter=M3GTexture2D.FILTER_BASE_LEVEL #Byte - self.imageFilter=M3GTexture2D.FILTER_NEAREST #Byte - - def searchDeep(self,alist): - alist = doSearchDeep([self.Image],alist) - return M3GTransformable.searchDeep(self,alist) - - def getData(self): - data = M3GTransformable.getData(self) - data += struct.pack('#AE# - lError = "Armature name " + name + " is not ok. Perhaps you should set option 'ExportAllAction' to false." - #print "name ", name - lLetter = name.find("#") - if lLetter == -1 :raise Exception(lError) - if name[lLetter+1]!='A': raise Exception(lError) - lName = name[lLetter+2:] - #print "lName ", lName - lLetter = lName.find("E") - #print "lLetter ", lLetter - if lLetter == -1 :raise Exception(lError) - #print "lName[:]", lName[:0] - lArmatureID = int(lName[:lLetter]) - lName = lName[lLetter+1:] - lLetter = lName.find("#") - if lLetter == -1:raise Exception(lError) - lEndFrame = int(lName[:lLetter]) - lActionID = int(lName[lLetter+1:]) - return (lArmatureID,lEndFrame,lActionID) - - - def translateWorld(self,scene): - "creates world object" - world = M3GWorld() - - #Background - world.background = M3GBackground() - blWorld= scene.world - #AllWorlds = Blender.World.Get() # Set Color - #if len(AllWorlds)>=1: # world object available - if blWorld != None: - world.background.backgroundColor=self.translateRGBA(blWorld.getHor(),0) # horizon color of the first world - if mOptions.createAmbientLight & mOptions.lightingEnabled: - lLight = M3GLight() - lLight.mode = lLight.modes['AMBIENT'] - lLight.color = self.translateRGB(blWorld.getAmb()) - self.nodes.append(lLight) - - #TODO: Set background picture from world - - return world - - def translateEmpty(self,obj): - print "translate empty ..." - mGroup = M3GGroup() - self.translateToNode(obj,mGroup) - - def translateCamera(self,obj): - print "translate camera ..." - camera = obj.getData() - if camera.getType()!=0: - print "Only perscpectiv cameras will work korrekt" - return #Type=0 'perspectiv' Camera will be translated - mCamera = M3GCamera() - mCamera.projectionType=mCamera.PERSPECTIVE - mCamera.fovy=60.0 # TODO: Calculate fovy from Blender.lens - mCamera.AspectRatio=4.0/3.0 # TODO: different in every device - mCamera.near=camera.getClipStart() - mCamera.far=camera.getClipEnd() - self.translateToNode(obj,mCamera) - self.world.activeCamera = mCamera # Last one is always the active one - - - def translateMaterials(self, aMaterial, aMesh, aMatIndex, createNormals, createUvs): - print "translate materials ..." - - mAppearance = M3GAppearance() - - if createNormals: - mMaterial = M3GMaterial() - mMaterial.name = aMaterial.name - mMaterial.diffuseColor = self.translateRGBA(aMaterial.rgbCol,1.0) #ColorRGBA - #material.specularColor= self.translateRGB(mat.specCol) #ColorRGB - mAppearance.material = mMaterial - - if createUvs: - # Search file name in mesh face. - lImage = None - for iface in aMesh.faces: - if iface.mat==aMatIndex: - if iface.image != None: - lImage = iface.image - break - if lImage==None: - raise Exception("Mesh " + aMesh.name + ": No image found for uv-texture! Perhaps no uv-coordinates ?") - - # M3G requires textures to have power-of-two dimensions. - [width, height] = lImage.getSize() - powerWidth = 1 - while (powerWidth < width): - powerWidth *= 2 - powerHeight = 1 - while (powerHeight < height): - powerHeight *= 2 - if powerWidth != width or powerHeight != height: - raise Exception("Image " + lImage.filename + ": width and height must be power-of-two!") - - # ImageFactory reuses existing images. - mImage = ImageFactory.getImage(lImage, mOptions.textureExternal) - mTexture = M3GTexture2D(mImage) - mAppearance.textures.append(mTexture) - - mPolygonMode=M3GPolygonMode() - mPolygonMode.perspectiveCorrectionEnabled = mOptions.perspectiveCorrection - if not aMesh.mode & Modes.TWOSIDED: - mPolygonMode.culling=M3GPolygonMode.CULL_BACK - else: - mPolygonMode.culling=M3GPolygonMode.CULL_NONE - if mOptions.smoothShading: - mPolygonMode.shading=M3GPolygonMode.SHADE_SMOOTH - else: - mPolygonMode.shading=M3GPolygonMode.SHADE_FLAT - - mAppearance.polygonMode = mPolygonMode - - return mAppearance - - - def translateMesh(self,obj): - print "translate mesh ..." + str(obj) - - # Mesh data. - mesh = obj.getData(False, True) # get Mesh not NMesh object - if len(mesh.faces) <= 0: # no need to process empty meshes - print "Empty mesh " + str(obj) + " not processed." - return - - vertexBuffer = M3GVertexBuffer() - positions = M3GVertexArray(3, 2) # 3 coordinates - 2 bytes - if mOptions.autoscaling: positions.useMaxPrecision(mesh.verts) - indexBuffers = [] - appearances = [] - print str(len(mesh.materials)) + " material(s) found." - - # Texture coordinates. - createUvs = False - if mOptions.textureEnabled & mesh.faceUV: - for material in mesh.materials: - if material.getMode() & Material.Modes.TEXFACE: createUvs = True; - - if createUvs: - if mOptions.autoscaling: - uvCoordinates = M3GVertexArray(2,2,True,True) #2 coordinates - 2 bytes - autoscaling - else: - uvCoordinates = M3GVertexArray(2, 2) #2 coordinates - 2 bytes - uvCoordinates.bias[0] = 0.5 - uvCoordinates.bias[1] = 0.5 - uvCoordinates.bias[2] = 0.5 - uvCoordinates.scale = 1.0/65535.0 - else: - uvCoordinates = None - - # Normals. - createNormals = False - if mOptions.lightingEnabled: - for material in mesh.materials: - if not (material.getMode() & Material.Modes.SHADELESS): createNormals = True; - - if createNormals: - normals = M3GVertexArray(3, 1) # 3 coordinates - 1 byte - else: - normals = None - - # Create a submesh for each material. - for materialIndex, material in enumerate(mesh.materials): - faces = [face for face in mesh.faces if face.mat == materialIndex] - if len(faces) >= 0: - indexBuffers.append(self.translateFaces(faces, positions, normals, uvCoordinates, createNormals, createUvs)) - appearances.append(self.translateMaterials(material, mesh, materialIndex, createNormals, createUvs)) - - # If the above didn't result in any IndexBuffer (e.g. there's no material), write a single IndexBuffer - # with all faces and a default Appearance. - if len(indexBuffers) == 0: - indexBuffers.append(self.translateFaces(mesh.faces, positions, normals, uvCoordinates, createNormals, createUvs)) - appearances.append(M3GAppearance()) - - vertexBuffer.setPositions(positions) - if createNormals: vertexBuffer.normals = normals - if createUvs: vertexBuffer.texCoordArrays.append(uvCoordinates) - - parent = obj.getParent() - if parent!=None and parent.getType()=='Armature': #Armatures ? - mMesh = M3GSkinnedMesh(vertexBuffer,indexBuffers,appearances) - #print"vertexBuffer.positions:",vertexBuffer.positions - print"mMesh.vertexBuffer:",mMesh.vertexBuffer - self.translateArmature(parent,obj,mMesh) - else: - mMesh = M3GMesh(vertexBuffer,indexBuffers,appearances) - - self.translateToNode(obj,mMesh) - - #Do Animation - self.translateObjectIpo(obj,mMesh) - - def translateFaces(self, faces, positions, normals, uvCoordinates, createNormals, createUvs): - """Translates a list of faces into vertex data and triangle strips.""" - - # Create vertices and triangle strips. - indices = [0, 0, 0, 0] - triangleStrips = M3GTriangleStripArray() - - for face in faces: - for vertexIndex, vertex in enumerate(face.verts): - # Find candidates for sharing (vertices with same Blender ID). - vertexCandidateIds = [int(k) for k, v in positions.blenderIndexes.items() if v == vertex.index] - - # Check normal. - if createNormals and not face.smooth: - # For solid faces, a vertex can only be shared if the the face normal is - # the same as the normal of the shared vertex. - for candidateId in vertexCandidateIds[:]: - for j in range(3): - if face.no[j]*127 != normals.components[candidateId*3 + j]: - vertexCandidateIds.remove(candidateId) - break - - # Check texture coordinates. - if createUvs: - # If texture coordinates are required, a vertex can only be shared if the - # texture coordinates match. - for candidateId in vertexCandidateIds[:]: - s = int((face.uv[vertexIndex][0]-0.5)*65535) - t = int((0.5-face.uv[vertexIndex][1])*65535) - if (s != uvCoordinates.components[candidateId*2 + 0]) or (t != uvCoordinates.components[candidateId*2 + 1]): - vertexCandidateIds.remove(candidateId) - - if len(vertexCandidateIds) > 0: - # Share the vertex. - indices[vertexIndex] = vertexCandidateIds[0] - else: - # Create new vertex. - positions.append(vertex, vertex.index) - indices[vertexIndex] = len(positions.components)/3 - 1 - - # Normal. - if createNormals: - for j in range(3): - if face.smooth: - normals.append(int(vertex.no[j]*127)) # vertex normal - else: - normals.append(int(face.no[j]*127)) # face normal - - # Texture coordinates. - if createUvs: - lUvCoordinatesFound = True - print "face.uv[vertexIndex][0]:",face.uv[vertexIndex][0] - print "face.uv[vertexIndex][1]:",face.uv[vertexIndex][1] - if mOptions.autoscaling: - uvCoordinates.append(face.uv[vertexIndex][0]) - uvCoordinates.append(face.uv[vertexIndex][1]) - else: - uvCoordinates.append(int((face.uv[vertexIndex][0]-0.5)*65535)) - # Reverse t coordinate because M3G uses a different 2D coordinate system than Blender. - uvCoordinates.append(int((0.5-face.uv[vertexIndex][1])*65535)) - - # IndexBuffer. - triangleStrips.stripLengths.append(len(face.verts)) - if len(face.verts) > 3 : - triangleStrips.indices += [indices[1], indices[2], indices[0], indices[3]] # quad - else : - triangleStrips.indices += [indices[0], indices[1], indices[2]] # tri - - return triangleStrips - - - def translateObjectIpo(self,obj,aM3GObject): - if obj.getIpo() == None : return #No Ipo available - print "translate Ipo ..." - - lIpo = obj.getIpo() - self.translateIpo(lIpo,aM3GObject) - - - def translateIpo(self,aIpo,aM3GObject,aM3GAnimContr=None,aEndFrame=0): - #Print info about curves - #for iCurve in lIpo.getCurves(): - # print "Extrapolation",iCurve.getExtrapolation() #Constant, Extrapolation, Cyclic or Cyclic_extrapolation - # print "Interpolation",iCurve.getInterpolation() #Constant, Bezier, or Linear - # print "Name",iCurve.getName() - # for iPoint in iCurve.getPoints(): - # print "Knode points",iPoint.getPoints() - types = ['Loc','Rot','Size','Quat'] - - for type in types: - if aIpo.getCurve(type+'X'): - self.translateIpoCurve(aIpo,aM3GObject,type,aM3GAnimContr,aEndFrame) - - - def translateIpoCurve(self,aIpo,aM3GObject,aCurveType,aM3GAnimContr,aEndFrame=0): - - lContext = self.scene.getRenderingContext() - if aEndFrame==0: - lEndFrame = lContext.endFrame() - else: - lEndFrame = aEndFrame - - lTimePerFrame = 1.0 / lContext.framesPerSec() * 1000 - - lCurveX = aIpo.getCurve(aCurveType+'X') - lCurveY = aIpo.getCurve(aCurveType+'Y') - lCurveZ = aIpo.getCurve(aCurveType+'Z') - if aCurveType=='Quat': lCurveW = aIpo.getCurve(aCurveType+'W') - - lInterpolation = None - if aCurveType == 'Rot' or aCurveType == 'Quat': - lTrackType = M3GAnimationTrack.ORIENTATION - lNumComponents=4 - lCurveFactor= 10 #45 Degrees = 4,5 - if aCurveType == 'Quat': - lTrackType = M3GAnimationTrack.ORIENTATION - lNumComponents=4 - lCurveFactor= 1 - lInterpolation = M3GKeyframeSequence.SLERP - #lInterpolation = M3GKeyframeSequence.SQUAD - elif aCurveType == 'Size': - lTrackType = M3GAnimationTrack.SCALE - lNumComponents=3 - lCurveFactor=1 - else: - lTrackType = M3GAnimationTrack.TRANSLATION - lNumComponents=3 - lCurveFactor=1 - - mSequence = M3GKeyframeSequence(len(lCurveX.getPoints()), - lNumComponents, - lCurveX.getInterpolation(), - lInterpolation) - - #print 'ComponentCount',mSequence.componentCount - - mSequence.duration = lEndFrame * lTimePerFrame - mSequence.setRepeatMode(lCurveX.getExtrapolation()) - - lIndex = 0 - for iPoint in lCurveX.getPoints(): - lPoint = iPoint.getPoints() - - lPointList = [(lPoint[1]*lCurveFactor), - (lCurveY.evaluate(lPoint[0])*lCurveFactor), - (lCurveZ.evaluate(lPoint[0])*lCurveFactor)] - - #print "aCurveType ", aCurveType - - if aCurveType == 'Loc': - #print "PointList ", lPointList - #lorgTransVector = aM3GObject.blenderTransformMatrix.translationPart() - #ltrans = TranslationMatrix(Vector(lPointList)) - #ltrans2 = self.calculateChildMatrix(ltrans,aM3GObject.blenderTransformMatrix) - #lVector = ltrans2.translationPart() + lorgTransVector - #lPointList = [lVector.x, lVector.y,lVector.z] - #print "PointList ", lPointList - pass - - if aCurveType == 'Quat': - lPointList.append(lCurveW.evaluate(lPoint[0])*lCurveFactor) - #lQuat = Quaternion([lPointList[3],lPointList[0],lPointList[1],lPointList[2]]) - #print "Quat ", lQuat - #print "Quat.angel ", lQuat.angle - #print "Quat.axis ", lQuat.axis - #print "PointList ", lPointList - - #print "PointList",lPointList - - if aCurveType =='Rot': - lQuat = Euler(lPointList).toQuat() - #lPointList = [lQuat.w,lQuat.x,lQuat.y,lQuat.z] - lPointList = [lQuat.x,lQuat.y,lQuat.z,lQuat.w] - #print " Quat=", lPointList - - mSequence.setKeyframe(lIndex, - lPoint[0]*lTimePerFrame, - lPointList) - lIndex += 1 - mSequence.validRangeFirst = 0 - mSequence.validRangeLast = lIndex - 1 - - mTrack = M3GAnimationTrack(mSequence,lTrackType) - aM3GObject.animationTracks.append(mTrack) - if aM3GAnimContr==None: aM3GAnimContr = M3GAnimationController() - mTrack.animationController = aM3GAnimContr - - - def translateLamp(self,obj): - print "translate lamp ..." - lamp = obj.getData() - - #Type - lampType=lamp.getType() - if not lampType in [Lamp.Types.Lamp,Lamp.Types.Spot,Lamp.Types.Sun]: - print "INFO: Type of light is not supported. See documentation" - return #create not light; type not supported - mLight = M3GLight() - if lampType == Lamp.Types.Lamp: - mLight.mode = mLight.modes['OMNI'] - elif lampType == Lamp.Types.Spot: - mLight.mode = mLight.modes['SPOT'] - elif lampType == Lamp.Types.Sun: - mLight.mode = mLight.modes['DIRECTIONAL'] - #Attenuation (OMNI,SPOT): - if lampType in [Lamp.Types.Lamp,Lamp.Types.Spot]: - mLight.attenuationConstant = 0.0 - mLight.attenuationLinear = 2.0/lamp.dist - mLight.attenuationQuadratic = 0.0 - #Color - mLight.color = self.translateRGB(lamp.col) - #Energy - mLight.intensity = lamp.energy - #SpotAngle, SpotExponent (SPOT) - if lampType == Lamp.Types.Spot: - mLight.spotAngle = lamp.spotSize/2 - mLight.spotExponent = lamp.spotBlend - self.translateToNode(obj,mLight) - - - def translateCore(self,obj,node): - #Name - node.name = obj.name - node.userID = self.translateUserID(obj.name) - #Location - #node.translation=self.translateLoc(obj.LocX,obj.LocY,obj.LocZ - #node.hasComponentTransform=True - #Transform - #node.transform = self.translateMatrix(obj.getMatrix('localspace')) - if type(obj) is Types.BoneType: - #print "BoneMatrix ",obj.matrix['BONESPACE'] - node.transform = self.translateMatrix(obj.matrix['ARMATURESPACE']) - #'ARMATURESPACE' - this matrix of the bone in relation to the armature - #'BONESPACE' - the matrix of the bone in relation to itself - else: - node.transform = self.translateMatrix(obj.matrixWorld) - node.hasGeneralTransform=True - - - def translateToNode(self,obj,node): - self.translateCore(obj,node) - #Nodes - self.nodes.append(node) - #Link to Blender Object - node.blenderObj = obj - node.blenderMatrixWorld = obj.matrixWorld - lparent = None - if obj.getParent()!=None: - if obj.getParent().getType()!='Armature': - lparent = obj.getParent() - else: - if obj.getParent().getParent()!=None and obj.getParent().getParent().getType()!='Armature': - lparent = obj.getParent().getParent() - node.parentBlenderObj = lparent - - - def translateUserID(self, name): - id = 0 - start = name.find('#') - - # Use digits that follow the # sign for id. - if start != -1: - start += 1 - end = start - for char in name[start:]: - if char.isdigit(): - end += 1 - else: - break - - if end > start: - id = int(name[start:end]) - - return id - - def translateLoc(self,aLocX,aLocY,aLocZ): - return M3GVector3D(aLocX,aLocY,aLocZ) - - def translateRGB(self,color): - return M3GColorRGB(int(color[0]*255), - int(color[1]*255), - int(color[2]*255)) - - def translateRGBA(self,color,alpha): - return M3GColorRGBA(int(color[0]*255), - int(color[1]*255), - int(color[2]*255), - int(alpha*255)) - - def translateMatrix(self,aPyMatrix): - """ -  0   1   2   3  - 4   5   6   7  - 8   9  10  11 - 12  13  14  15 """ - #print "Matrix:", aPyMatrix - lMatrix = M3GMatrix() - lMatrix.elements[0] = aPyMatrix[0][0] - lMatrix.elements[1] = aPyMatrix[1][0] - lMatrix.elements[2] = aPyMatrix[2][0] - lMatrix.elements[3] = aPyMatrix[3][0] - lMatrix.elements[4] = aPyMatrix[0][1] - lMatrix.elements[5] = aPyMatrix[1][1] - lMatrix.elements[6] = aPyMatrix[2][1] - lMatrix.elements[7] = aPyMatrix[3][1] - lMatrix.elements[8] = aPyMatrix[0][2] - lMatrix.elements[9] = aPyMatrix[1][2] - lMatrix.elements[10] = aPyMatrix[2][2] - lMatrix.elements[11] = aPyMatrix[3][2] - lMatrix.elements[12] = aPyMatrix[0][3] - lMatrix.elements[13] = aPyMatrix[1][3] - lMatrix.elements[14] = aPyMatrix[2][3] - lMatrix.elements[15] = aPyMatrix[3][3] - return lMatrix - - -# ---- Exporter ---------------------------------------------------------------- # - -class M3GExporter: - "Exports Blender-Scene to M3D" - def __init__(self, aWriter): - self.writer = aWriter - - - def start(self): - print "Info: starting export ..." - #rpdb2.start_embedded_debugger("t",True) - Translator = M3GTranslator() - world = Translator.start() - - #sys.settrace(tracer) - exportList = self.createDeepSearchList(world) - externalReferences = [element for element in exportList if element.__class__ is M3GExternalReference] - exportList = [element for element in exportList if element.__class__ is not M3GExternalReference] - #sys.settrace(None) - - # 1 is reservated for HeaderObject. - i=1 - - # Next are the external references. - for element in externalReferences: - i += 1 - element.id = i - print "object ",element.id, element - - # And the standard scene objects. - for element in exportList: - i += 1 - element.id = i - print "object ",element.id, element - - self.writer.writeFile(world, exportList, externalReferences) - - print("Ready!") - - - def createDeepSearchList(self,aWorld): - "creates the right order for saving m3g : leafs first" - return aWorld.searchDeep([]) - - - -# ---- Writer ---------------------------------------------------------------- # -class JavaWriter: - "writes a java class which creates m3g-Scene in a j2me programm" - def __init__(self,aFilename): - self.filename = aFilename - self.classname = Blender.sys.basename(aFilename) - self.classname = self.classname[:-5] #without extention ".java" - self.outFile = file(aFilename,"w") - - def write(self, tab, zeile=""): - "writes to file" - #print "\t" * tab + zeile - print >>self.outFile, "\t" * tab + zeile - - def writeFile(self,aWorld,aExportList,externalReferences): - self.world = aWorld - self.writeHeader() - for element in aExportList: - element.writeJava(self,True) - self.writeFooter() - self.outFile.close() - - def writeHeader(self): - "writes class header" - self.write(0,"import javax.microedition.lcdui.Image;") - self.write(0,"import javax.microedition.m3g.*;") - self.write(0,"public final class "+self.classname+" {") - self.write(1,"public static World getRoot(Canvas3D aCanvas) {") - - def writeFooter(self): - self.write(1) - self.write(1,"return BL"+str(self.world.id)+";") - self.write(0,"}}") - - def writeList(self,alist,numberOfElementsPerLine=12,aType=""): - '''Writes numberOfElementsPerLine''' - line="" - lastLine="" - counter=0 - for element in alist: - if counter!=0: - line = line + "," + str(element) + aType - else: - line = str(element) + aType - counter = counter + 1 - if counter == numberOfElementsPerLine: - if len(lastLine)>0: - self.write(3,lastLine+",") - lastLine=line - line="" - counter = 0 - if len(lastLine)>0: - if len(line)>0: - self.write(3,lastLine+",") - else: - self.write(3,lastLine) - if len(line) > 0: self.write(3,line) - - def writeClass(self,aName,aM3GObject): - self.write(2) - self.write(2,"//"+aName+":"+aM3GObject.name) - - -class M3GSectionObject: - def __init__(self,aObject): - """Object Structure - Each object in the file represents one object in the - scene graph tree, and is stored in a chunk. The - structure of an object chunk is as follows: - Byte ObjectType - UInt32 Length - Byte[] Data""" - #ObjectType - #This field describes what type of object has been serialized. - #The values 0 and 0xFF are special: 0 represents the header object, - #and 0xFF represents an external reference. - #Example: Byte ObjectType = 14 - self.ObjectType = aObject.ObjectType - self.data = aObject.getData() - self.length = aObject.getDataLength() - - def getData(self): - data = struct.pack(' 2,1,0 - for v in f.v[2::-1]: - file.write(format_vec % tuple(v.co) ) - - try: mode= f.mode - except: mode= 0 - - if mode & Mesh.FaceModes.INVISIBLE: - file.write(PREF_INVIS_TEX.val) - else: - try: image= f.image - except: image= None - - if image: file.write(sys.splitext(sys.basename(image.filename))[0]) - else: file.write(PREF_NULL_TEX.val) - - # Texture stuff ignored for now - file.write(PREF_DEF_TEX_OPTS.val) - file.write('}\n') - - -def round_vec(v): - if PREF_GRID_SNAP.val: - return round(v.x), round(v.y), round(v.z) - else: - return tuple(v) - -def write_face2brush(file, face): - ''' - takes a face and writes it as a brush - each face is a cube/brush - ''' - - if PREF_GRID_SNAP.val: format_vec= '( %d %d %d ) ' - else: format_vec= '( %.8f %.8f %.8f ) ' - - - image_text= PREF_NULL_TEX.val - - try: mode= face.mode - except: mode= 0 - - if mode & Mesh.FaceModes.INVISIBLE: - image_text= PREF_INVIS_TEX.val - else: - try: image= face.image - except: image= None - if image: image_text = sys.splitext(sys.basename(image.filename))[0] - - # original verts as tuples for writing - orig_vco= [tuple(v.co) for v in face] - - # new verts that give the face a thickness - dist= PREF_SCALE.val * PREF_FACE_THICK.val - new_vco= [round_vec(v.co - (v.no * dist)) for v in face] - #new_vco= [round_vec(v.co - (face.no * dist)) for v in face] - - file.write('// brush from face\n{\n') - # front - for co in orig_vco[2::-1]: - file.write(format_vec % co ) - file.write(image_text) - # Texture stuff ignored for now - file.write(PREF_DEF_TEX_OPTS.val) - - - for co in new_vco[:3]: - file.write(format_vec % co ) - if mode & Mesh.FaceModes.TWOSIDE: - file.write(image_text) - else: - file.write(PREF_INVIS_TEX.val) - - # Texture stuff ignored for now - file.write(PREF_DEF_TEX_OPTS.val) - - # sides. - if len(orig_vco)==3: # Tri, it seemms tri brushes are supported. - index_pairs= ((0,1), (1,2), (2,0)) - else: - index_pairs= ((0,1), (1,2), (2,3), (3,0)) - - for i1, i2 in index_pairs: - for co in orig_vco[i1], orig_vco[i2], new_vco[i2]: - file.write( format_vec % co ) - file.write(PREF_INVIS_TEX.val) - file.write(PREF_DEF_TEX_OPTS.val) - - file.write('}\n') - -def is_cube_facegroup(faces): - ''' - Returens a bool, true if the faces make up a cube - ''' - # cube must have 6 faces - if len(faces) != 6: - print '1' - return False - - # Check for quads and that there are 6 unique verts - verts= {} - for f in faces: - if len(f)!= 4: - return False - - for v in f: - verts[v.index]= 0 - - if len(verts) != 8: - return False - - # Now check that each vert has 3 face users - for f in faces: - for v in f: - verts[v.index] += 1 - - for v in verts.itervalues(): - if v != 3: # vert has 3 users? - return False - - # Could we check for 12 unique edges??, probably not needed. - return True - -def is_tricyl_facegroup(faces): - ''' - is the face group a tri cylinder - Returens a bool, true if the faces make an extruded tri solid - ''' - - # cube must have 5 faces - if len(faces) != 5: - print '1' - return False - - # Check for quads and that there are 6 unique verts - verts= {} - tottri= 0 - for f in faces: - if len(f)== 3: - tottri+=1 - - for v in f: - verts[v.index]= 0 - - if len(verts) != 6 or tottri != 2: - return False - - # Now check that each vert has 3 face users - for f in faces: - for v in f: - verts[v.index] += 1 - - for v in verts.itervalues(): - if v != 3: # vert has 3 users? - return False - - # Could we check for 12 unique edges??, probably not needed. - return True - -def write_node_map(file, ob): - ''' - Writes the properties of an object (empty in this case) - as a MAP node as long as it has the property name - classname - returns True/False based on weather a node was written - ''' - props= [(p.name, p.data) for p in ob.game_properties] - - IS_MAP_NODE= False - for name, value in props: - if name=='classname': - IS_MAP_NODE= True - break - - if not IS_MAP_NODE: - return False - - # Write a node - file.write('{\n') - for name_value in props: - file.write('"%s" "%s"\n' % name_value) - if PREF_GRID_SNAP.val: - file.write('"origin" "%d %d %d"\n' % tuple([round(axis*PREF_SCALE.val) for axis in ob.getLocation('worldspace')]) ) - else: - file.write('"origin" "%.6f %.6f %.6f"\n' % tuple([axis*PREF_SCALE.val for axis in ob.getLocation('worldspace')]) ) - file.write('}\n') - return True - - -def export_map(filepath): - - pup_block = [\ - ('Scale:', PREF_SCALE, 1, 1000, 'Scale the blender scene by this value.'),\ - ('Face Width:', PREF_FACE_THICK, 0.01, 10, 'Thickness of faces exported as brushes.'),\ - ('Grid Snap', PREF_GRID_SNAP, 'snaps floating point values to whole numbers.'),\ - 'Null Texture',\ - ('', PREF_NULL_TEX, 1, 128, 'Export textureless faces with this texture'),\ - 'Unseen Texture',\ - ('', PREF_INVIS_TEX, 1, 128, 'Export invisible faces with this texture'),\ - ] - - if not Draw.PupBlock('map export', pup_block): - return - - Window.WaitCursor(1) - time= sys.time() - print 'Map Exporter 0.0' - file= open(filepath, 'w') - - - obs_mesh= [] - obs_lamp= [] - obs_surf= [] - obs_empty= [] - - SCALE_MAT= Mathutils.Matrix() - SCALE_MAT[0][0]= SCALE_MAT[1][1]= SCALE_MAT[2][2]= PREF_SCALE.val - - dummy_mesh= Mesh.New() - - TOTBRUSH= TOTLAMP= TOTNODE= 0 - - for ob in Object.GetSelected(): - type= ob.type - if type == 'Mesh': obs_mesh.append(ob) - elif type == 'Surf': obs_surf.append(ob) - elif type == 'Lamp': obs_lamp.append(ob) - elif type == 'Empty': obs_empty.append(ob) - - if obs_mesh or obs_surf: - # brushes and surf's must be under worldspan - file.write('\n// entity 0\n') - file.write('{\n') - file.write('"classname" "worldspawn"\n') - - - print '\twriting cubes from meshes' - for ob in obs_mesh: - dummy_mesh.getFromObject(ob.name) - - #print len(mesh_split2connected(dummy_mesh)) - - # Is the object 1 cube? - object-is-a-brush - dummy_mesh.transform(ob.matrixWorld*SCALE_MAT) # 1 to tx the normals also - - if PREF_GRID_SNAP.val: - for v in dummy_mesh.verts: - co= v.co - co.x= round(co.x) - co.y= round(co.y) - co.z= round(co.z) - - # High quality normals - BPyMesh.meshCalcNormals(dummy_mesh) - - # Split mesh into connected regions - for face_group in BPyMesh.mesh2linkedFaces(dummy_mesh): - if is_cube_facegroup(face_group): - write_cube2brush(file, face_group) - TOTBRUSH+=1 - elif is_tricyl_facegroup(face_group): - write_cube2brush(file, face_group) - TOTBRUSH+=1 - else: - for f in face_group: - write_face2brush(file, f) - TOTBRUSH+=1 - - #print 'warning, not exporting "%s" it is not a cube' % ob.name - - - dummy_mesh.verts= None - - - valid_dims= 3,5,7,9,11,13,15 - for ob in obs_surf: - ''' - Surf, patches - ''' - surf_name= ob.getData(name_only=1) - data= Curve.Get(surf_name) - mat = ob.matrixWorld*SCALE_MAT - - # This is what a valid patch looks like - - """ -// brush 0 -{ -patchDef2 -{ -NULL -( 3 3 0 0 0 ) -( -( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) ) -( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) ) -( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) ) -) -} -} - """ - for i, nurb in enumerate(data): - u= nurb.pointsU - v= nurb.pointsV - if u in valid_dims and v in valid_dims: - - file.write('// brush %d surf_name\n' % i) - file.write('{\n') - file.write('patchDef2\n') - file.write('{\n') - file.write('NULL\n') - file.write('( %d %d 0 0 0 )\n' % (u, v) ) - file.write('(\n') - - u_iter = 0 - for p in nurb: - - if u_iter == 0: - file.write('(') - - u_iter += 1 - - # add nmapping 0 0 ? - if PREF_GRID_SNAP.val: - file.write(' ( %d %d %d 0 0 )' % round_vec(Mathutils.Vector(p[0:3]) * mat)) - else: - file.write(' ( %.6f %.6f %.6f 0 0 )' % tuple(Mathutils.Vector(p[0:3]) * mat)) - - # Move to next line - if u_iter == u: - file.write(' )\n') - u_iter = 0 - - file.write(')\n') - file.write('}\n') - file.write('}\n') - - - # Debugging - # for p in nurb: print 'patch', p - - else: - print "NOT EXPORTING PATCH", surf_name, u,v, 'Unsupported' - - - if obs_mesh or obs_surf: - file.write('}\n') # end worldspan - - - print '\twriting lamps' - for ob in obs_lamp: - print '\t\t%s' % ob.name - lamp= ob.data - file.write('{\n') - file.write('"classname" "light"\n') - file.write('"light" "%.6f"\n' % (lamp.dist* PREF_SCALE.val)) - if PREF_GRID_SNAP.val: - file.write('"origin" "%d %d %d"\n' % tuple([round(axis*PREF_SCALE.val) for axis in ob.getLocation('worldspace')]) ) - else: - file.write('"origin" "%.6f %.6f %.6f"\n' % tuple([axis*PREF_SCALE.val for axis in ob.getLocation('worldspace')]) ) - file.write('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.col)) - file.write('"style" "0"\n') - file.write('}\n') - TOTLAMP+=1 - - - print '\twriting empty objects as nodes' - for ob in obs_empty: - if write_node_map(file, ob): - print '\t\t%s' % ob.name - TOTNODE+=1 - else: - print '\t\tignoring %s' % ob.name - - Window.WaitCursor(0) - - print 'Exported Map in %.4fsec' % (sys.time()-time) - print 'Brushes: %d Nodes: %d Lamps %d\n' % (TOTBRUSH, TOTNODE, TOTLAMP) - - -def main(): - Window.FileSelector(export_map, 'EXPORT MAP', '*.map') - -if __name__ == '__main__': main() -# export_map('/foo.map') diff --git a/release/scripts/export_mdd.py b/release/scripts/export_mdd.py deleted file mode 100644 index 4f99c9175fd..00000000000 --- a/release/scripts/export_mdd.py +++ /dev/null @@ -1,168 +0,0 @@ -#!BPY - -""" - Name: 'Vertex Keyframe Animation (.mdd)...' - Blender: 242 - Group: 'Export' - Tooltip: 'Animated mesh to MDD vertex keyframe file.' -""" - -__author__ = "Bill L.Nieuwendorp" -__bpydoc__ = """\ -This script Exports Lightwaves MotionDesigner format. - -The .mdd format has become quite a popular Pipeline format
-for moving animations from package to package. - -Be sure not to use modifiers that change the number or order of verts in the mesh -""" -#Please send any fixes,updates,bugs to Slow67_at_Gmail.com or cbarton_at_metavr.com -#Bill Niewuendorp -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - -import bpy -import Blender -from Blender import * -import BPyMessages -try: - from struct import pack -except: - pack = None - - -def zero_file(filepath): - ''' - If a file fails, this replaces it with 1 char, better not remove it? - ''' - file = open(filepath, 'w') - file.write('\n') # aparently macosx needs some data in a blank file? - file.close() - - -def check_vertcount(mesh,vertcount): - ''' - check and make sure the vertcount is consistent throghout the frame range - ''' - if len(mesh.verts) != vertcount: - Blender.Draw.PupMenu('Error%t|Number of verts has changed during animation|cannot export') - f.close() - zero_file(filepath) - return - - -def mdd_export(filepath, ob, PREF_STARTFRAME, PREF_ENDFRAME, PREF_FPS): - - Window.EditMode(0) - Blender.Window.WaitCursor(1) - mesh_orig = Mesh.New() - mesh_orig.getFromObject(ob.name) - - #Flip y and z - ''' - mat = Mathutils.Matrix() - mat[2][2] = -1 - rotmat = Mathutils.RotationMatrix(90, 4, 'x') - mat_flip = mat*rotmat - ''' - # Above results in this matrix - mat_flip= Mathutils.Matrix(\ - [1.0, 0.0, 0.0, 0.0],\ - [0.0, 0.0, 1.0, 0.0],\ - [0.0, 1.0, 0.0, 0.0],\ - [0.0, 0.0, 0.0, 1.0],\ - ) - - me_tmp = Mesh.New() # container mesh - - numverts = len(mesh_orig.verts) - numframes = PREF_ENDFRAME-PREF_STARTFRAME+1 - PREF_FPS= float(PREF_FPS) - f = open(filepath, 'wb') #no Errors yet:Safe to create file - - # Write the header - f.write(pack(">2i", numframes, numverts)) - - # Write the frame times (should we use the time IPO??) - f.write( pack(">%df" % (numframes), *[frame/PREF_FPS for frame in xrange(numframes)]) ) # seconds - - #rest frame needed to keep frames in sync - Blender.Set('curframe', PREF_STARTFRAME) - me_tmp.getFromObject(ob.name) - check_vertcount(me_tmp,numverts) - me_tmp.transform(ob.matrixWorld * mat_flip) - f.write(pack(">%df" % (numverts*3), *[axis for v in me_tmp.verts for axis in v.co])) - me_tmp.verts= None - - for frame in xrange(PREF_STARTFRAME,PREF_ENDFRAME+1):#in order to start at desired frame - Blender.Set('curframe', frame) - - me_tmp.getFromObject(ob.name) - - check_vertcount(me_tmp,numverts) - - me_tmp.transform(ob.matrixWorld * mat_flip) - - # Write the vertex data - f.write(pack(">%df" % (numverts*3), *[axis for v in me_tmp.verts for axis in v.co])) - - me_tmp.verts= None - f.close() - - print'MDD Exported: %s frames:%d\n'% (filepath, numframes-1) - Blender.Window.WaitCursor(0) - - -def mdd_export_ui(filepath): - # Dont overwrite - if not BPyMessages.Warning_SaveOver(filepath): - return - - scn= bpy.data.scenes.active - ob_act= scn.objects.active - if not ob_act or ob_act.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - - ctx = scn.getRenderingContext() - orig_frame = Blender.Get('curframe') - PREF_STARTFRAME= Blender.Draw.Create(int(ctx.startFrame())) - PREF_ENDFRAME= Blender.Draw.Create(int(ctx.endFrame())) - PREF_FPS= Blender.Draw.Create(ctx.fps) - - block = [\ - ("Start Frame: ", PREF_STARTFRAME, 1, 30000, "Start Bake from what frame?: Default 1"),\ - ("End Frame: ", PREF_ENDFRAME, 1, 30000, "End Bake on what Frame?"),\ - ("FPS: ", PREF_FPS, 1, 100, "Frames per second")\ - ] - - if not Blender.Draw.PupBlock("Export MDD", block): - return - - PREF_STARTFRAME, PREF_ENDFRAME=\ - min(PREF_STARTFRAME.val, PREF_ENDFRAME.val),\ - max(PREF_STARTFRAME.val, PREF_ENDFRAME.val) - - print (filepath, ob_act, PREF_STARTFRAME, PREF_ENDFRAME, PREF_FPS.val) - mdd_export(filepath, ob_act, PREF_STARTFRAME, PREF_ENDFRAME, PREF_FPS.val) - Blender.Set('curframe', orig_frame) - -if __name__=='__main__': - if not pack: - Draw.PupMenu('Error%t|This script requires a full python install') - - Blender.Window.FileSelector(mdd_export_ui, 'EXPORT MDD', sys.makename(ext='.mdd')) \ No newline at end of file diff --git a/release/scripts/export_obj.py b/release/scripts/export_obj.py deleted file mode 100644 index 7dffb5d2048..00000000000 --- a/release/scripts/export_obj.py +++ /dev/null @@ -1,933 +0,0 @@ -#!BPY - -""" -Name: 'Wavefront (.obj)...' -Blender: 249 -Group: 'Export' -Tooltip: 'Save a Wavefront OBJ File' -""" - -__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone" -__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org'] -__version__ = "1.22" - -__bpydoc__ = """\ -This script is an exporter to OBJ file format. - -Usage: - -Select the objects you wish to export and run this script from "File->Export" menu. -Selecting the default options from the popup box will be good in most cases. -All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d) -will be exported as mesh data. -""" - - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton 2007-2009 -# - V1.22- bspline import/export added (funded by PolyDimensions GmbH) -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -import Blender -from Blender import Mesh, Scene, Window, sys, Image, Draw -import BPyMesh -import BPyObject -import BPySys -import BPyMessages - -# Returns a tuple - path,extension. -# 'hello.obj' > ('hello', '.obj') -def splitExt(path): - dotidx = path.rfind('.') - if dotidx == -1: - return path, '' - else: - return path[:dotidx], path[dotidx:] - -def fixName(name): - if name == None: - return 'None' - else: - return name.replace(' ', '_') - -# A Dict of Materials -# (material.name, image.name):matname_imagename # matname_imagename has gaps removed. -MTL_DICT = {} - -def write_mtl(filename): - - world = Blender.World.GetCurrent() - if world: - worldAmb = world.getAmb() - else: - worldAmb = (0,0,0) # Default value - - file = open(filename, "w") - file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1]) - file.write('# Material Count: %i\n' % len(MTL_DICT)) - # Write material/image combinations we have used. - for key, (mtl_mat_name, mat, img) in MTL_DICT.iteritems(): - - # Get the Blender data for the material and the image. - # Having an image named None will make a bug, dont do it :) - - file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname - - if mat: - file.write('Ns %.6f\n' % ((mat.getHardness()-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's - file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.amb for c in worldAmb]) ) # Ambient, uses mirror colour, - file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.ref for c in mat.rgbCol]) ) # Diffuse - file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.spec for c in mat.specCol]) ) # Specular - file.write('Ni %.6f\n' % mat.IOR) # Refraction index - file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve) - - # 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting. - if mat.getMode() & Blender.Material.Modes['SHADELESS']: - file.write('illum 0\n') # ignore lighting - elif mat.getSpec() == 0: - file.write('illum 1\n') # no specular. - else: - file.write('illum 2\n') # light normaly - - else: - #write a dummy material here? - file.write('Ns 0\n') - file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour, - file.write('Kd 0.8 0.8 0.8\n') - file.write('Ks 0.8 0.8 0.8\n') - file.write('d 1\n') # No alpha - file.write('illum 2\n') # light normaly - - # Write images! - if img: # We have an image on the face! - file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image - - elif mat: # No face image. if we havea material search for MTex image. - for mtex in mat.getTextures(): - if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE: - try: - filename = mtex.tex.image.filename.split('\\')[-1].split('/')[-1] - file.write('map_Kd %s\n' % filename) # Diffuse mapping image - break - except: - # Texture has no image though its an image type, best ignore. - pass - - file.write('\n\n') - - file.close() - -def copy_file(source, dest): - file = open(source, 'rb') - data = file.read() - file.close() - - file = open(dest, 'wb') - file.write(data) - file.close() - - -def copy_images(dest_dir): - if dest_dir[-1] != sys.sep: - dest_dir += sys.sep - - # Get unique image names - uniqueImages = {} - for matname, mat, image in MTL_DICT.itervalues(): # Only use image name - # Get Texface images - if image: - uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default. - - # Get MTex images - if mat: - for mtex in mat.getTextures(): - if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE: - image_tex = mtex.tex.image - if image_tex: - try: - uniqueImages[image_tex] = image_tex - except: - pass - - # Now copy images - copyCount = 0 - - for bImage in uniqueImages.itervalues(): - image_path = sys.expandpath(bImage.filename) - if sys.exists(image_path): - # Make a name for the target path. - dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1] - if not sys.exists(dest_image_path): # Image isnt alredy there - print '\tCopying "%s" > "%s"' % (image_path, dest_image_path) - copy_file(image_path, dest_image_path) - copyCount+=1 - print '\tCopied %d images' % copyCount - - -def test_nurbs_compat(ob): - if ob.type != 'Curve': - return False - - for nu in ob.data: - if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier - return True - - return False - -def write_nurb(file, ob, ob_mat): - tot_verts = 0 - cu = ob.data - - # use negative indices - Vector = Blender.Mathutils.Vector - for nu in cu: - - if nu.type==0: DEG_ORDER_U = 1 - else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct - - if nu.type==1: - print "\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported" - continue - - if nu.knotsV: - print "\tWarning, surface:", ob.name, "only poly and nurbs curves supported" - continue - - if len(nu) <= DEG_ORDER_U: - print "\tWarning, orderU is lower then vert count, skipping:", ob.name - continue - - pt_num = 0 - do_closed = (nu.flagU & 1) - do_endpoints = (do_closed==0) and (nu.flagU & 2) - - for pt in nu: - pt = Vector(pt[0], pt[1], pt[2]) * ob_mat - file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2])) - pt_num += 1 - tot_verts += pt_num - - file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too - file.write('cstype bspline\n') # not ideal, hard coded - file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still - - curve_ls = [-(i+1) for i in xrange(pt_num)] - - # 'curv' keyword - if do_closed: - if DEG_ORDER_U == 1: - pt_num += 1 - curve_ls.append(-1) - else: - pt_num += DEG_ORDER_U - curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U] - - file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve - - # 'parm' keyword - tot_parm = (DEG_ORDER_U + 1) + pt_num - tot_parm_div = float(tot_parm-1) - parm_ls = [(i/tot_parm_div) for i in xrange(tot_parm)] - - if do_endpoints: # end points, force param - for i in xrange(DEG_ORDER_U+1): - parm_ls[i] = 0.0 - parm_ls[-(1+i)] = 1.0 - - file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] )) - - file.write('end\n') - - return tot_verts - -def write(filename, objects,\ -EXPORT_TRI=False, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_NORMALS_HQ=False,\ -EXPORT_UV=True, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False,\ -EXPORT_APPLY_MODIFIERS=True, EXPORT_ROTX90=True, EXPORT_BLEN_OBS=True,\ -EXPORT_GROUP_BY_OB=False, EXPORT_GROUP_BY_MAT=False, EXPORT_KEEP_VERT_ORDER=False,\ -EXPORT_POLYGROUPS=False, EXPORT_CURVE_AS_NURBS=True): - ''' - Basic write function. The context and options must be alredy set - This can be accessed externaly - eg. - write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. - ''' - - def veckey3d(v): - return round(v.x, 6), round(v.y, 6), round(v.z, 6) - - def veckey2d(v): - return round(v.x, 6), round(v.y, 6) - - def findVertexGroupName(face, vWeightMap): - """ - Searches the vertexDict to see what groups is assigned to a given face. - We use a frequency system in order to sort out the name because a given vetex can - belong to two or more groups at the same time. To find the right name for the face - we list all the possible vertex group names with their frequency and then sort by - frequency in descend order. The top element is the one shared by the highest number - of vertices is the face's group - """ - weightDict = {} - for vert in face: - vWeights = vWeightMap[vert.index] - for vGroupName, weight in vWeights: - weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight - - if weightDict: - alist = [(weight,vGroupName) for vGroupName, weight in weightDict.iteritems()] # sort least to greatest amount of weight - alist.sort() - return(alist[-1][1]) # highest value last - else: - return '(null)' - - - print 'OBJ Export path: "%s"' % filename - temp_mesh_name = '~tmp-mesh' - - time1 = sys.time() - scn = Scene.GetCurrent() - - file = open(filename, "w") - - # Write Header - file.write('# Blender3D v%s OBJ File: %s\n' % (Blender.Get('version'), Blender.Get('filename').split('/')[-1].split('\\')[-1] )) - file.write('# www.blender3d.org\n') - - # Tell the obj file what material file to use. - if EXPORT_MTL: - mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1]) - file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] )) - - # Get the container mesh. - used for applying modifiers and non mesh objects. - containerMesh = meshName = tempMesh = None - for meshName in Blender.NMesh.GetNames(): - if meshName.startswith(temp_mesh_name): - tempMesh = Mesh.Get(meshName) - if not tempMesh.users: - containerMesh = tempMesh - if not containerMesh: - containerMesh = Mesh.New(temp_mesh_name) - - if EXPORT_ROTX90: - mat_xrot90= Blender.Mathutils.RotationMatrix(-90, 4, 'x') - - del meshName - del tempMesh - - # Initialize totals, these are updated each object - totverts = totuvco = totno = 1 - - face_vert_index = 1 - - globalNormals = {} - - # Get all meshes - for ob_main in objects: - for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): - - # Nurbs curve support - if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob): - if EXPORT_ROTX90: - ob_mat = ob_mat * mat_xrot90 - - totverts += write_nurb(file, ob, ob_mat) - - continue - # end nurbs - - # Will work for non meshes now! :) - # getMeshFromObject(ob, container_mesh=None, apply_modifiers=True, vgroups=True, scn=None) - me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn) - if not me: - continue - - if EXPORT_UV: - faceuv= me.faceUV - else: - faceuv = False - - # We have a valid mesh - if EXPORT_TRI and me.faces: - # Add a dummy object to it. - has_quads = False - for f in me.faces: - if len(f) == 4: - has_quads = True - break - - if has_quads: - oldmode = Mesh.Mode() - Mesh.Mode(Mesh.SelectModes['FACE']) - - me.sel = True - tempob = scn.objects.new(me) - me.quadToTriangle(0) # more=0 shortest length - oldmode = Mesh.Mode(oldmode) - scn.objects.unlink(tempob) - - Mesh.Mode(oldmode) - - # Make our own list so it can be sorted to reduce context switching - faces = [ f for f in me.faces ] - - if EXPORT_EDGES: - edges = me.edges - else: - edges = [] - - if not (len(faces)+len(edges)+len(me.verts)): # Make sure there is somthing to write - continue # dont bother with this mesh. - - if EXPORT_ROTX90: - me.transform(ob_mat*mat_xrot90) - else: - me.transform(ob_mat) - - # High Quality Normals - if EXPORT_NORMALS and faces: - if EXPORT_NORMALS_HQ: - BPyMesh.meshCalcNormals(me) - else: - # transforming normals is incorrect - # when the matrix is scaled, - # better to recalculate them - me.calcNormals() - - # # Crash Blender - #materials = me.getMaterials(1) # 1 == will return None in the list. - materials = me.materials - - materialNames = [] - materialItems = materials[:] - if materials: - for mat in materials: - if mat: # !=None - materialNames.append(mat.name) - else: - materialNames.append(None) - # Cant use LC because some materials are None. - # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. - - # Possible there null materials, will mess up indicies - # but at least it will export, wait until Blender gets fixed. - materialNames.extend((16-len(materialNames)) * [None]) - materialItems.extend((16-len(materialItems)) * [None]) - - # Sort by Material, then images - # so we dont over context switch in the obj file. - if EXPORT_KEEP_VERT_ORDER: - pass - elif faceuv: - try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth)) - except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) - elif len(materials) > 1: - try: faces.sort(key = lambda a: (a.mat, a.smooth)) - except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) - else: - # no materials - try: faces.sort(key = lambda a: a.smooth) - except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth)) - - # Set the default mat to no material and no image. - contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get. - contextSmooth = None # Will either be true or false, set bad to force initialization switch. - - if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: - name1 = ob.name - name2 = ob.getData(1) - if name1 == name2: - obnamestring = fixName(name1) - else: - obnamestring = '%s_%s' % (fixName(name1), fixName(name2)) - - if EXPORT_BLEN_OBS: - file.write('o %s\n' % obnamestring) # Write Object name - else: # if EXPORT_GROUP_BY_OB: - file.write('g %s\n' % obnamestring) - - - # Vert - for v in me.verts: - file.write('v %.6f %.6f %.6f\n' % tuple(v.co)) - - # UV - if faceuv: - uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/ - - uv_dict = {} # could use a set() here - for f_index, f in enumerate(faces): - - for uv_index, uv in enumerate(f.uv): - uvkey = veckey2d(uv) - try: - uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] - except: - uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) - file.write('vt %.6f %.6f\n' % tuple(uv)) - - uv_unique_count = len(uv_dict) - del uv, uvkey, uv_dict, f_index, uv_index - # Only need uv_unique_count and uv_face_mapping - - # NORMAL, Smooth/Non smoothed. - if EXPORT_NORMALS: - for f in faces: - if f.smooth: - for v in f: - noKey = veckey3d(v.no) - if not globalNormals.has_key( noKey ): - globalNormals[noKey] = totno - totno +=1 - file.write('vn %.6f %.6f %.6f\n' % noKey) - else: - # Hard, 1 normal from the face. - noKey = veckey3d(f.no) - if not globalNormals.has_key( noKey ): - globalNormals[noKey] = totno - totno +=1 - file.write('vn %.6f %.6f %.6f\n' % noKey) - - if not faceuv: - f_image = None - - if EXPORT_POLYGROUPS: - # Retrieve the list of vertex groups - vertGroupNames = me.getVertGroupNames() - - currentVGroup = '' - # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to - vgroupsMap = [[] for _i in xrange(len(me.verts))] - for vertexGroupName in vertGroupNames: - for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1): - vgroupsMap[vIdx].append((vertexGroupName, vWeight)) - - for f_index, f in enumerate(faces): - f_v= f.v - f_smooth= f.smooth - f_mat = min(f.mat, len(materialNames)-1) - if faceuv: - f_image = f.image - f_uv= f.uv - - # MAKE KEY - if faceuv and f_image: # Object is always true. - key = materialNames[f_mat], f_image.name - else: - key = materialNames[f_mat], None # No image, use None instead. - - # Write the vertex group - if EXPORT_POLYGROUPS: - if vertGroupNames: - # find what vertext group the face belongs to - theVGroup = findVertexGroupName(f,vgroupsMap) - if theVGroup != currentVGroup: - currentVGroup = theVGroup - file.write('g %s\n' % theVGroup) - - # CHECK FOR CONTEXT SWITCH - if key == contextMat: - pass # Context alredy switched, dont do anything - else: - if key[0] == None and key[1] == None: - # Write a null material, since we know the context has changed. - if EXPORT_GROUP_BY_MAT: - file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.getData(1))) ) # can be mat_image or (null) - file.write('usemtl (null)\n') # mat, image - - else: - mat_data= MTL_DICT.get(key) - if not mat_data: - # First add to global dict so we can export to mtl - # Then write mtl - - # Make a new names from the mat and image name, - # converting any spaces to underscores with fixName. - - # If none image dont bother adding it to the name - if key[1] == None: - mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image - else: - mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image - - if EXPORT_GROUP_BY_MAT: - file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.getData(1)), mat_data[0]) ) # can be mat_image or (null) - - file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null) - - contextMat = key - if f_smooth != contextSmooth: - if f_smooth: # on now off - file.write('s 1\n') - contextSmooth = f_smooth - else: # was off now on - file.write('s off\n') - contextSmooth = f_smooth - - file.write('f') - if faceuv: - if EXPORT_NORMALS: - if f_smooth: # Smoothed, use vertex normals - for vi, v in enumerate(f_v): - file.write( ' %d/%d/%d' % (\ - v.index+totverts,\ - totuvco + uv_face_mapping[f_index][vi],\ - globalNormals[ veckey3d(v.no) ])) # vert, uv, normal - - else: # No smoothing, face normals - no = globalNormals[ veckey3d(f.no) ] - for vi, v in enumerate(f_v): - file.write( ' %d/%d/%d' % (\ - v.index+totverts,\ - totuvco + uv_face_mapping[f_index][vi],\ - no)) # vert, uv, normal - - else: # No Normals - for vi, v in enumerate(f_v): - file.write( ' %d/%d' % (\ - v.index+totverts,\ - totuvco + uv_face_mapping[f_index][vi])) # vert, uv - - face_vert_index += len(f_v) - - else: # No UV's - if EXPORT_NORMALS: - if f_smooth: # Smoothed, use vertex normals - for v in f_v: - file.write( ' %d//%d' % (\ - v.index+totverts,\ - globalNormals[ veckey3d(v.no) ])) - else: # No smoothing, face normals - no = globalNormals[ veckey3d(f.no) ] - for v in f_v: - file.write( ' %d//%d' % (\ - v.index+totverts,\ - no)) - else: # No Normals - for v in f_v: - file.write( ' %d' % (\ - v.index+totverts)) - - file.write('\n') - - # Write edges. - if EXPORT_EDGES: - LOOSE= Mesh.EdgeFlags.LOOSE - for ed in edges: - if ed.flag & LOOSE: - file.write('f %d %d\n' % (ed.v1.index+totverts, ed.v2.index+totverts)) - - # Make the indicies global rather then per mesh - totverts += len(me.verts) - if faceuv: - totuvco += uv_unique_count - me.verts= None - file.close() - - - # Now we have all our materials, save them - if EXPORT_MTL: - write_mtl(mtlfilename) - if EXPORT_COPY_IMAGES: - dest_dir = filename - # Remove chars until we are just the path. - while dest_dir and dest_dir[-1] not in '\\/': - dest_dir = dest_dir[:-1] - if dest_dir: - copy_images(dest_dir) - else: - print '\tError: "%s" could not be used as a base for an image path.' % filename - - print "OBJ Export time: %.2f" % (sys.time() - time1) - - - -def write_ui(filename): - - if not filename.lower().endswith('.obj'): - filename += '.obj' - - if not BPyMessages.Warning_SaveOver(filename): - return - - global EXPORT_APPLY_MODIFIERS, EXPORT_ROTX90, EXPORT_TRI, EXPORT_EDGES,\ - EXPORT_NORMALS, EXPORT_NORMALS_HQ, EXPORT_UV,\ - EXPORT_MTL, EXPORT_SEL_ONLY, EXPORT_ALL_SCENES,\ - EXPORT_ANIMATION, EXPORT_COPY_IMAGES, EXPORT_BLEN_OBS,\ - EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\ - EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS - - EXPORT_APPLY_MODIFIERS = Draw.Create(0) - EXPORT_ROTX90 = Draw.Create(1) - EXPORT_TRI = Draw.Create(0) - EXPORT_EDGES = Draw.Create(1) - EXPORT_NORMALS = Draw.Create(0) - EXPORT_NORMALS_HQ = Draw.Create(0) - EXPORT_UV = Draw.Create(1) - EXPORT_MTL = Draw.Create(1) - EXPORT_SEL_ONLY = Draw.Create(1) - EXPORT_ALL_SCENES = Draw.Create(0) - EXPORT_ANIMATION = Draw.Create(0) - EXPORT_COPY_IMAGES = Draw.Create(0) - EXPORT_BLEN_OBS = Draw.Create(0) - EXPORT_GROUP_BY_OB = Draw.Create(0) - EXPORT_GROUP_BY_MAT = Draw.Create(0) - EXPORT_KEEP_VERT_ORDER = Draw.Create(1) - EXPORT_POLYGROUPS = Draw.Create(0) - EXPORT_CURVE_AS_NURBS = Draw.Create(1) - - - # Old UI - ''' - # removed too many options are bad! - - # Get USER Options - pup_block = [\ - ('Context...'),\ - ('Selection Only', EXPORT_SEL_ONLY, 'Only export objects in visible selection. Else export whole scene.'),\ - ('All Scenes', EXPORT_ALL_SCENES, 'Each scene as a separate OBJ file.'),\ - ('Animation', EXPORT_ANIMATION, 'Each frame as a numbered OBJ file.'),\ - ('Object Prefs...'),\ - ('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object. May break vert order for morph targets.'),\ - ('Rotate X90', EXPORT_ROTX90 , 'Rotate on export so Blenders UP is translated into OBJs UP'),\ - ('Keep Vert Order', EXPORT_KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\ - ('Extra Data...'),\ - ('Edges', EXPORT_EDGES, 'Edges not connected to faces.'),\ - ('Normals', EXPORT_NORMALS, 'Export vertex normal data (Ignored on import).'),\ - ('High Quality Normals', EXPORT_NORMALS_HQ, 'Calculate high quality normals for rendering.'),\ - ('UVs', EXPORT_UV, 'Export texface UV coords.'),\ - ('Materials', EXPORT_MTL, 'Write a separate MTL file with the OBJ.'),\ - ('Copy Images', EXPORT_COPY_IMAGES, 'Copy image files to the export directory, never overwrite.'),\ - ('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\ - ('Grouping...'),\ - ('Objects', EXPORT_BLEN_OBS, 'Export blender objects as "OBJ objects".'),\ - ('Object Groups', EXPORT_GROUP_BY_OB, 'Export blender objects as "OBJ Groups".'),\ - ('Material Groups', EXPORT_GROUP_BY_MAT, 'Group by materials.'),\ - ] - - if not Draw.PupBlock('Export...', pup_block): - return - ''' - - # BEGIN ALTERNATIVE UI ******************* - if True: - - EVENT_NONE = 0 - EVENT_EXIT = 1 - EVENT_REDRAW = 2 - EVENT_EXPORT = 3 - - GLOBALS = {} - GLOBALS['EVENT'] = EVENT_REDRAW - #GLOBALS['MOUSE'] = Window.GetMouseCoords() - GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] - - def obj_ui_set_event(e,v): - GLOBALS['EVENT'] = e - - def do_split(e,v): - global EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_APPLY_MODIFIERS, KEEP_VERT_ORDER, EXPORT_POLYGROUPS - if EXPORT_BLEN_OBS.val or EXPORT_GROUP_BY_OB.val or EXPORT_GROUP_BY_MAT.val or EXPORT_APPLY_MODIFIERS.val: - EXPORT_KEEP_VERT_ORDER.val = 0 - else: - EXPORT_KEEP_VERT_ORDER.val = 1 - - def do_vertorder(e,v): - global EXPORT_BLEN_OBS, EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_APPLY_MODIFIERS, KEEP_VERT_ORDER - if EXPORT_KEEP_VERT_ORDER.val: - EXPORT_BLEN_OBS.val = EXPORT_GROUP_BY_OB.val = EXPORT_GROUP_BY_MAT.val = EXPORT_APPLY_MODIFIERS.val = 0 - else: - if not (EXPORT_BLEN_OBS.val or EXPORT_GROUP_BY_OB.val or EXPORT_GROUP_BY_MAT.val or EXPORT_APPLY_MODIFIERS.val): - EXPORT_KEEP_VERT_ORDER.val = 1 - - - def do_help(e,v): - url = __url__[0] - print 'Trying to open web browser with documentation at this address...' - print '\t' + url - - try: - import webbrowser - webbrowser.open(url) - except: - print '...could not open a browser window.' - - def obj_ui(): - ui_x, ui_y = GLOBALS['MOUSE'] - - # Center based on overall pup size - ui_x -= 165 - ui_y -= 140 - - global EXPORT_APPLY_MODIFIERS, EXPORT_ROTX90, EXPORT_TRI, EXPORT_EDGES,\ - EXPORT_NORMALS, EXPORT_NORMALS_HQ, EXPORT_UV,\ - EXPORT_MTL, EXPORT_SEL_ONLY, EXPORT_ALL_SCENES,\ - EXPORT_ANIMATION, EXPORT_COPY_IMAGES, EXPORT_BLEN_OBS,\ - EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\ - EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS - - Draw.Label('Context...', ui_x+9, ui_y+239, 220, 20) - Draw.BeginAlign() - EXPORT_SEL_ONLY = Draw.Toggle('Selection Only', EVENT_NONE, ui_x+9, ui_y+219, 110, 20, EXPORT_SEL_ONLY.val, 'Only export objects in visible selection. Else export whole scene.') - EXPORT_ALL_SCENES = Draw.Toggle('All Scenes', EVENT_NONE, ui_x+119, ui_y+219, 110, 20, EXPORT_ALL_SCENES.val, 'Each scene as a separate OBJ file.') - EXPORT_ANIMATION = Draw.Toggle('Animation', EVENT_NONE, ui_x+229, ui_y+219, 110, 20, EXPORT_ANIMATION.val, 'Each frame as a numbered OBJ file.') - Draw.EndAlign() - - - Draw.Label('Output Options...', ui_x+9, ui_y+189, 220, 20) - Draw.BeginAlign() - EXPORT_APPLY_MODIFIERS = Draw.Toggle('Apply Modifiers', EVENT_REDRAW, ui_x+9, ui_y+170, 110, 20, EXPORT_APPLY_MODIFIERS.val, 'Use transformed mesh data from each object. May break vert order for morph targets.', do_split) - EXPORT_ROTX90 = Draw.Toggle('Rotate X90', EVENT_NONE, ui_x+119, ui_y+170, 110, 20, EXPORT_ROTX90.val, 'Rotate on export so Blenders UP is translated into OBJs UP') - EXPORT_COPY_IMAGES = Draw.Toggle('Copy Images', EVENT_NONE, ui_x+229, ui_y+170, 110, 20, EXPORT_COPY_IMAGES.val, 'Copy image files to the export directory, never overwrite.') - Draw.EndAlign() - - - Draw.Label('Export...', ui_x+9, ui_y+139, 220, 20) - Draw.BeginAlign() - EXPORT_EDGES = Draw.Toggle('Edges', EVENT_NONE, ui_x+9, ui_y+120, 50, 20, EXPORT_EDGES.val, 'Edges not connected to faces.') - EXPORT_TRI = Draw.Toggle('Triangulate', EVENT_NONE, ui_x+59, ui_y+120, 70, 20, EXPORT_TRI.val, 'Triangulate quads.') - Draw.EndAlign() - Draw.BeginAlign() - EXPORT_MTL = Draw.Toggle('Materials', EVENT_NONE, ui_x+139, ui_y+120, 70, 20, EXPORT_MTL.val, 'Write a separate MTL file with the OBJ.') - EXPORT_UV = Draw.Toggle('UVs', EVENT_NONE, ui_x+209, ui_y+120, 31, 20, EXPORT_UV.val, 'Export texface UV coords.') - Draw.EndAlign() - Draw.BeginAlign() - EXPORT_NORMALS = Draw.Toggle('Normals', EVENT_NONE, ui_x+250, ui_y+120, 59, 20, EXPORT_NORMALS.val, 'Export vertex normal data (Ignored on import).') - EXPORT_NORMALS_HQ = Draw.Toggle('HQ', EVENT_NONE, ui_x+309, ui_y+120, 31, 20, EXPORT_NORMALS_HQ.val, 'Calculate high quality normals for rendering.') - Draw.EndAlign() - EXPORT_POLYGROUPS = Draw.Toggle('Polygroups', EVENT_REDRAW, ui_x+9, ui_y+95, 120, 20, EXPORT_POLYGROUPS.val, 'Export vertex groups as OBJ groups (one group per face approximation).') - - EXPORT_CURVE_AS_NURBS = Draw.Toggle('Nurbs', EVENT_NONE, ui_x+139, ui_y+95, 100, 20, EXPORT_CURVE_AS_NURBS.val, 'Export 3D nurbs curves and polylines as OBJ curves, (bezier not supported).') - - - Draw.Label('Blender Objects as OBJ:', ui_x+9, ui_y+59, 220, 20) - Draw.BeginAlign() - EXPORT_BLEN_OBS = Draw.Toggle('Objects', EVENT_REDRAW, ui_x+9, ui_y+39, 60, 20, EXPORT_BLEN_OBS.val, 'Export blender objects as "OBJ objects".', do_split) - EXPORT_GROUP_BY_OB = Draw.Toggle('Groups', EVENT_REDRAW, ui_x+69, ui_y+39, 60, 20, EXPORT_GROUP_BY_OB.val, 'Export blender objects as "OBJ Groups".', do_split) - EXPORT_GROUP_BY_MAT = Draw.Toggle('Material Groups', EVENT_REDRAW, ui_x+129, ui_y+39, 100, 20, EXPORT_GROUP_BY_MAT.val, 'Group by materials.', do_split) - Draw.EndAlign() - - EXPORT_KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+239, ui_y+39, 100, 20, EXPORT_KEEP_VERT_ORDER.val, 'Keep vert and face order, disables some other options. Use for morph targets.', do_vertorder) - - Draw.BeginAlign() - Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 20, 'Load the wiki page for this script', do_help) - Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 20, '', obj_ui_set_event) - Draw.PushButton('Export', EVENT_EXPORT, ui_x+229, ui_y+9, 110, 20, 'Export with these settings', obj_ui_set_event) - Draw.EndAlign() - - - # hack so the toggle buttons redraw. this is not nice at all - while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_EXPORT): - Draw.UIBlock(obj_ui, 0) - - if GLOBALS['EVENT'] != EVENT_EXPORT: - return - - # END ALTERNATIVE UI ********************* - - - if EXPORT_KEEP_VERT_ORDER.val: - EXPORT_BLEN_OBS.val = False - EXPORT_GROUP_BY_OB.val = False - EXPORT_GROUP_BY_MAT.val = False - EXPORT_APPLY_MODIFIERS.val = False - - Window.EditMode(0) - Window.WaitCursor(1) - - EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val - EXPORT_ROTX90 = EXPORT_ROTX90.val - EXPORT_TRI = EXPORT_TRI.val - EXPORT_EDGES = EXPORT_EDGES.val - EXPORT_NORMALS = EXPORT_NORMALS.val - EXPORT_NORMALS_HQ = EXPORT_NORMALS_HQ.val - EXPORT_UV = EXPORT_UV.val - EXPORT_MTL = EXPORT_MTL.val - EXPORT_SEL_ONLY = EXPORT_SEL_ONLY.val - EXPORT_ALL_SCENES = EXPORT_ALL_SCENES.val - EXPORT_ANIMATION = EXPORT_ANIMATION.val - EXPORT_COPY_IMAGES = EXPORT_COPY_IMAGES.val - EXPORT_BLEN_OBS = EXPORT_BLEN_OBS.val - EXPORT_GROUP_BY_OB = EXPORT_GROUP_BY_OB.val - EXPORT_GROUP_BY_MAT = EXPORT_GROUP_BY_MAT.val - EXPORT_KEEP_VERT_ORDER = EXPORT_KEEP_VERT_ORDER.val - EXPORT_POLYGROUPS = EXPORT_POLYGROUPS.val - EXPORT_CURVE_AS_NURBS = EXPORT_CURVE_AS_NURBS.val - - - base_name, ext = splitExt(filename) - context_name = [base_name, '', '', ext] # basename, scene_name, framenumber, extension - - # Use the options to export the data using write() - # def write(filename, objects, EXPORT_EDGES=False, EXPORT_NORMALS=False, EXPORT_MTL=True, EXPORT_COPY_IMAGES=False, EXPORT_APPLY_MODIFIERS=True): - orig_scene = Scene.GetCurrent() - if EXPORT_ALL_SCENES: - export_scenes = Scene.Get() - else: - export_scenes = [orig_scene] - - # Export all scenes. - for scn in export_scenes: - scn.makeCurrent() # If alredy current, this is not slow. - context = scn.getRenderingContext() - orig_frame = Blender.Get('curframe') - - if EXPORT_ALL_SCENES: # Add scene name into the context_name - context_name[1] = '_%s' % BPySys.cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied. - - # Export an animation? - if EXPORT_ANIMATION: - scene_frames = xrange(context.startFrame(), context.endFrame()+1) # up to and including the end frame. - else: - scene_frames = [orig_frame] # Dont export an animation. - - # Loop through all frames in the scene and export. - for frame in scene_frames: - if EXPORT_ANIMATION: # Add frame to the filename. - context_name[2] = '_%.6d' % frame - - Blender.Set('curframe', frame) - if EXPORT_SEL_ONLY: - export_objects = scn.objects.context - else: - export_objects = scn.objects - - full_path= ''.join(context_name) - - # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad. - # EXPORT THE FILE. - write(full_path, export_objects,\ - EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS,\ - EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL,\ - EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS,\ - EXPORT_ROTX90, EXPORT_BLEN_OBS,\ - EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,\ - EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS) - - Blender.Set('curframe', orig_frame) - - # Restore old active scene. - orig_scene.makeCurrent() - Window.WaitCursor(0) - - -if __name__ == '__main__': - Window.FileSelector(write_ui, 'Export Wavefront OBJ', sys.makename(ext='.obj')) diff --git a/release/scripts/faceselect_same_weights.py b/release/scripts/faceselect_same_weights.py deleted file mode 100644 index 967aedec363..00000000000 --- a/release/scripts/faceselect_same_weights.py +++ /dev/null @@ -1,111 +0,0 @@ -#!BPY -""" -Name: 'Same Weights...' -Blender: 245 -Group: 'FaceSelect' -Tooltip: 'Select same faces with teh same weight for the active group.' -""" - -__author__ = ["Campbell Barton aka ideasman42"] -__url__ = ["www.blender.org", "blenderartists.org", "www.python.org"] -__version__ = "0.1" -__bpydoc__ = """\ - -Select Same Weights - -Select same weights as the active face on the active group. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from Blender import Scene, Draw, Mesh -import BPyMesh - -def selSameWeights(me, PREF_TOLERENCE): - - # Check for missing data - if not me.faceUV: return - - act_group= me.activeGroup - if not act_group: return - - act_face = me.faces[me.activeFace] - if act_face == None: return - - - - groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) - - def get_face_weight(f): - ''' - Return the faces median weight and weight range. - ''' - wmin = 1.0 - wmax = 0.0 - w = 0.0 - for v in f: - try: - new_weight = vWeightDict[v.index][act_group] - if wmin > new_weight: wmin = new_weight - if wmax < new_weight: wmax = new_weight - w += new_weight - except: - pass - return w, wmax-wmin # weight, range - - weight_from, weight_range_from = get_face_weight(act_face) - for f in me.faces: - if (not f.sel) and f != act_face: - weight, weight_range = get_face_weight(f) - - # Compare the 2 faces weight difference and difference in their contrast. - if\ - abs(weight - weight_from) <= PREF_TOLERENCE and\ - abs(weight_range - weight_range_from) <= PREF_TOLERENCE: - f.sel = True - - -def main(): - scn= Scene.GetCurrent() - ob= scn.objects.active - - if not ob or ob.type != 'Mesh': - Draw.PupMenu('Error, no active mesh object, aborting.') - return - - me= ob.getData(mesh=1) - - PREF_TOLERENCE= Draw.Create(0.1) - - pup_block= [\ - ('Tolerence:', PREF_TOLERENCE, 0.01, 1.0, 'Tolerence for selecting faces of the same weight.'),\ - ] - - if not Draw.PupBlock('Select Same Weight...', pup_block): - return - - PREF_TOLERENCE= PREF_TOLERENCE.val - - selSameWeights(me, PREF_TOLERENCE) - -if __name__=='__main__': - main() \ No newline at end of file diff --git a/release/scripts/flt_defaultp.py b/release/scripts/flt_defaultp.py deleted file mode 100644 index 5c44fe29a6f..00000000000 --- a/release/scripts/flt_defaultp.py +++ /dev/null @@ -1 +0,0 @@ -pal = [-1,255,16711935,-16776961,-19529729,-19726337,-19922945,-20119553,-20316161,-20578305,-20840449,-21102593,-21364737,-21692417,-22020097,-22413313,-22806529,-23199745,-23658497,-24117249,-24641537,-25165825,-25755649,-26411009,-27066369,-27787265,-28573697,-29425665,-30343169,-31326209,-32374785,-33488897,-354549761,-371458049,-388366337,-405274625,-422182913,-439156737,-456130561,-473104385,-506855425,-540672001,-574488577,-608305153,-642121729,-676003841,-709885953,-760610817,-811335681,-862060545,-912850945,-980418561,-1048051713,-1115684865,-1183383553,-1267924993,-1352466433,-1453850625,-1555300353,-1656815617,-1775173633,-1893597185,-2028863489,2130771967,-1010376193,-1043996161,-1077681665,-1111367169,-1145052673,-1178738177,-1229200897,-1279663617,-1330126337,-1380654593,-1431182849,-1498488321,-1565793793,-1633164801,-1700535809,-1784684033,-1868832257,-1969823233,-2070814209,2123096575,2005262847,1887429119,1752752639,1601298943,1449779711,1281417727,1096278527,911073791,709026303,490201599,254534143,2023935,-1380857601,-1397700353,-1431320321,-1464940289,-1498560257,-1532180225,-1565865729,-1599551233,-1650013953,-1700476673,-1750939393,-1801402113,-1851864833,-1919170305,-1986475777,-2053781249,-2121086721,2089797887,2005649663,1904724223,1803798783,1686030591,1568262399,1450494207,1315883263,1164495103,1013041407,844810495,659736831,457885951,239192319,3655935,-1767919617,-1784762369,-1801605121,-1818447873,-1852067841,-1885687809,-1919307777,-1952927745,-1986547713,-2020167681,-2070564865,-2120962049,2123542527,2073079807,2022617087,1955377151,1888137215,1820897279,1736880127,1652797439,1568714751,1467854847,1366994943,1249357823,1131655167,997175295,862695423,711372799,560050175,391950335,207007743,5287935,2139657983,2122880767,2106103551,2089326335,2072549119,2055771903,2022217471,1988663039,1955108607,1921554175,1887934207,1854314239,1803917055,1753519871,1703122687,1652725503,1602328319,1535153919,1467979519,1400805119,1316853503,1232901887,1148950271,1048221439,947427071,846632703,729061119,611489535,477140735,326014719,174888703,6919935,1837268479,1820491263,1803714047,1786936831,1770159615,1753382399,1736605183,1719827967,1686273535,1652719103,1619164671,1585610239,1552055807,1518501375,1468169727,1417838079,1367506431,1317174783,1266843135,1199734271,1132625407,1065516543,998407679,914521599,830635519,729972223,629308927,528645631,411205119,293764607,159546879,8551935,-2086957569,-2103734785,-2120512001,-2137289217,2140900863,2107346431,2073791999,2040237311,2006682623,1973127935,1939573247,1906018559,1855686655,1805354751,1755022847,1704690943,1654359039,1587249919,1520140799,1453031679,1369145343,1285258751,1201372159,1100708351,1000044543,882603519,765162495,630943999,496725503,345729791,177956863,10183935,-1699437825,-1716215297,-1732992769,-1766547457,-1800102145,-1833656833,-1867211521,-1900766209,-1934320897,-1967875585,-2018207489,-2068539649,-2118871809,2125763327,2058653951,1991544575,1924435199,1857325823,1773438975,1689552127,1588888063,1488223999,1387559679,1270118143,1152676607,1018457855,884238847,733242623,565468927,397695231,213144319,11815935,-1311918593,-1345473281,-1379027969,-1412582657,-1446137345,-1479692289,-1513247233,-1546802177,-1597134337,-1647466497,-1697798657,-1748130817,-1798463233,-1865572865,-1932682497,-1999792129,-2083678977,2127401215,2043514111,1942849791,1842185215,1724743423,1607301631,1473082367,1338863103,1187866367,1020092415,852318207,667766783,466437887,248331519,13447935,-924398849,-957953793,-991508737,-1025063681,-1058618625,-1092173569,-1142505729,-1192837889,-1243170305,-1293502721,-1343835137,-1410944769,-1478054401,-1545164289,-1629051393,-1712938497,-1796825857,-1897490433,-1998155009,-2098819841,2078705407,1961263103,1827043583,1676046591,1525049599,1357275135,1172723199,971394047,753287423,518403327,283518975,15079935,-570434049,-603988993,-637543937,-671098881,-704653825,-754986241,-805318657,-855651073,-905983489,-973093377,-1040203265,-1107313153,-1174423041,-1258310401,-1342197761,-1442862593,-1543527425,-1644192257,-1761634561,-1879076865,-2013296641,2147450879,1996453631,1828678911,1660904191,1476351999,1275022335,1056915199,822030591,570368511,301928959,16711935,-503325185,-536880129,-570435073,-603990017,-637544961,-671100161,-721432577,-771764993,-822097409,-872430081,-922762753,-989872641,-1056982529,-1124092673,-1191202817,-1275090433,-1358978049,-1459642881,-1560307969,-1660973057,-1778415617,-1895858177,-2030078209,2113891583,1962894079,1795119103,1610566655,1426013951,1224683775,1006576127,771691007,520028415,-452993537,-469771265,-503326209,-536881153,-570436097,-603991297,-637546497,-671101697,-721434113,-771766785,-822099457,-872432129,-922764801,-989874945,-1056985089,-1124095489,-1191205889,-1275093505,-1358981377,-1459646465,-1560311809,-1677754369,-1795197185,-1912640257,-2046860545,2097108991,1946110975,1778335487,1593782527,1392452095,1174344191,939458815,-419439105,-436216833,-452994561,-469772289,-503327233,-536882433,-570437633,-603992833,-637548033,-671103489,-721436161,-771768833,-822101505,-872434433,-922767361,-989877761,-1056988161,-1124098561,-1207986433,-1291874305,-1375762433,-1476427777,-1577093377,-1694536449,-1811979521,-1946200065,-2080420865,2063548159,1912549631,1744773631,1560220159,1358889215,-385884673,-402662401,-419440129,-436217857,-452995585,-469773569,-503328769,-536883969,-570439169,-603994625,-637550081,-671105537,-721438209,-771771137,-822104065,-872437249,-922770433,-989880833,-1056991489,-1124102145,-1191213057,-1275101185,-1358989569,-1459655425,-1560321281,-1677764609,-1795208193,-1912652033,-2046873345,2097095167,1946096127,1778319615,-335553025,-352330753,-369108481,-385886209,-402663937,-419441921,-436219905,-452997889,-486553089,-520108545,-553664001,-587219457,-620774913,-654330625,-687886337,-738219521,-788552705,-838885889,-889219329,-939552769,-1006663681,-1073774593,-1140885761,-1224774401,-1308663041,-1392551937,-1493218305,-1593884929,-1711329025,-1828773377,-1962995201,-2097217281,-285221377,-301999105,-318776833,-335554561,-352332289,-369110273,-385888257,-402666241,-419444225,-436222465,-453000705,-469778945,-503334401,-536890113,-570445825,-604001793,-637557761,-671113729,-721447169,-771780609,-822114305,-872448001,-922781953,-989893377,-1057004801,-1124116481,-1191228417,-1275117825,-1359007489,-1459674625,-1560342017,-1677786881,-234889729,-234890241,-251667969,-268445697,-285223425,-302001409,-318779393,-335557377,-352335361,-369113601,-385891841,-402670081,-419448321,-436226817,-453005313,-469784065,-503340033,-536896001,-570452225,-604008449,-637564929,-671121409,-704678145,-755012353,-805346561,-855681025,-906015745,-973127937,-1040240385,-1107353089,-1174466049,-1258356481,-234889729,-234890241,-234890753,-234891265,-234891777,-234892545,-234893313,-234894081,-234894849,-251673089,-268451329,-285229569,-302007809,-318786305,-335564801,-352343553,-369122305,-385901057,-402680065,-419459073,-436238337,-453017601,-486574337,-520131329,-553688321,-587245569,-620803073,-654360833,-687918849,-738254337,-788590081,-838926081,-234889729,-234890241,-234890753,-234891265,-234891777,-234892545,-234893313,-234894081,-234894849,-234895873,-234896897,-234897921,-234898945,-234900225,-234901505,-234903041,-234904577,-234906113,-234907905,-234909697,-234911745,-251691009,-268470529,-285250305,-302030081,-318810113,-335590401,-352370945,-369151745,-385932801,-402714113,-419495681,-8705,-9217,-9729,-10241,-10753,-11521,-12289,-13057,-13825,-14849,-15873,-16897,-17921,-19201,-20481,-22017,-23553,-25089,-26881,-28673,-30721,-32769,-35073,-37633,-40193,-43009,-46081,-49409,-52993,-56833,-60929,-65281,-926209,-926721,-927233,-927745,-928257,-929025,-929793,-930561,-931329,-932353,-933377,-934401,-935425,-936705,-937985,-939521,-941057,-1008129,-1075457,-1142785,-1210369,-1277953,-1345793,-1413889,-1481985,-1550337,-1618945,-1687809,-1756929,-1826305,-1895937,-2031361,-926209,-926721,-927233,-927745,-928257,-929025,-929793,-996097,-1062401,-1128961,-1195521,-1262081,-1328641,-1395457,-1462273,-1529345,-1596417,-1663489,-1730817,-1798145,-1865729,-1998849,-2132225,-2265857,-2399489,-2533377,-2667521,-2867457,-3067649,-3268097,-3468801,-3669761,-926209,-992257,-1058305,-1124353,-1190401,-1256705,-1323009,-1389313,-1455617,-1522177,-1588737,-1655297,-1721857,-1788673,-1855489,-1988097,-2120705,-2253313,-2386177,-2519041,-2652161,-2785281,-2984193,-3183361,-3382529,-3581953,-3847169,-4112641,-4378369,-4644353,-4976129,-5308161,-1188353,-1254401,-1320449,-1386497,-1452545,-1518849,-1585153,-1651457,-1717761,-1784321,-1850881,-1982977,-2115073,-2247425,-2379777,-2512385,-2644993,-2777601,-2976001,-3174401,-3373057,-3571713,-3836161,-4100865,-4365569,-4630529,-4961281,-5292289,-5689089,-6086145,-6483457,-6946561,-1384961,-1451009,-1517057,-1583105,-1649153,-1715457,-1781761,-1848065,-1979905,-2112001,-2244097,-2376193,-2508289,-2640641,-2838529,-3036673,-3234817,-3432961,-3631361,-3895297,-4159489,-4423681,-4688129,-5018369,-5348609,-5744641,-6140929,-6537473,-6999809,-7462401,-7990785,-8584961,-1581569,-1647617,-1713665,-1779713,-1845761,-1977601,-2109441,-2241281,-2373121,-2505217,-2637313,-2769409,-2967041,-3164929,-3362817,-3560961,-3759105,-4022785,-4286721,-4550657,-4880385,-5210113,-5540097,-5935873,-6331649,-6793217,-7255041,-7782657,-8310529,-8904193,-9563649,-10223361,-1712641,-1778689,-1844737,-1976321,-2107905,-2239745,-2371585,-2503425,-2635265,-2767361,-2964993,-3162625,-3360257,-3558145,-3821569,-4085249,-4348929,-4612609,-4942081,-5271553,-5666817,-6062081,-6457601,-6918913,-7380225,-7907329,-8434689,-9027841,-9686785,-10345985,-11070977,-11861761,-1843713,-1975297,-2106881,-2238465,-2370049,-2501889,-2633729,-2765569,-2962945,-3160577,-3358209,-3555841,-3753473,-4016897,-4280321,-4544001,-4873217,-5202433,-5531905,-5926913,-6322177,-6782977,-7244033,-7770881,-8297729,-8890369,-9548801,-10207489,-10931969,-11722241,-12578305,-13500161,-1974785,-2106369,-2237953,-2369537,-2501121,-2632961,-2830337,-3027713,-3225089,-3422721,-3620353,-3883521,-4146689,-4410113,-4673537,-5002753,-5331969,-5726721,-6121729,-6582273,-7043073,-7503873,-8030465,-8622849,-9215233,-9873409,-10597377,-11387137,-12242689,-13164033,-14085633,-15138561,-2236929,-2368513,-2500097,-2631681,-2763265,-2960641,-3158017,-3355393,-3552769,-3815937,-4079105,-4342273,-4605441,-4934401,-5263361,-5658113,-6052865,-6447617,-6908161,-7368705,-7895041,-8421377,-9013505,-9671425,-10329345,-11053057,-11842561,-12697857,-13618945,-14605825,-15658497,-16776961] \ No newline at end of file diff --git a/release/scripts/flt_dofedit.py b/release/scripts/flt_dofedit.py deleted file mode 100644 index 36e8e4d2501..00000000000 --- a/release/scripts/flt_dofedit.py +++ /dev/null @@ -1,835 +0,0 @@ -#!BPY - -""" -Name: 'FLT DOF Editor' -Blender: 240 -Group: 'Misc' -Tooltip: 'Degree of Freedom editor for FLT nodes' -""" - -__author__ = "Geoffrey Bantle" -__version__ = "1.0 11/21/07" -__email__ = ('scripts', 'Author, ') -__url__ = ('blender', 'blenderartists.org') - -__bpydoc__ ="""\ -This script provides tools for working with OpenFlight databases in Blender. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. - -Feature overview and more availible at: -http://wiki.blender.org/index.php/Scripts/Manual/FLTools -""" - -# -------------------------------------------------------------------------- -# flt_palettemanager.py version 0.1 2005/04/08 -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2007: Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender.Draw as Draw -from Blender.BGL import * -import Blender -import flt_properties -reload(flt_properties) -from flt_properties import * - -#event codes -evcode = { - "DOF_MAKE" : 100, - "DOF_UPDATE" : 138, - "DOF_DELETE" : 101, - "DOF_TRANSX" : 102, - "DOF_TRANSY" : 103, - "DOF_TRANSZ" : 104, - "DOF_ROTX" : 105, - "DOF_ROTY" : 106, - "DOF_ROTZ" : 107, - "DOF_SCALEX" : 108, - "DOF_SCALEY" : 109, - "DOF_SCALEZ" : 110, - "DOF_MIN_TRANSX" : 111, - "DOF_MIN_TRANSY" : 112, - "DOF_MIN_TRANSZ" : 113, - "DOF_MIN_ROTX" : 114, - "DOF_MIN_ROTY" : 115, - "DOF_MIN_ROTZ" : 116, - "DOF_MIN_SCALEX" : 117, - "DOF_MIN_SCALEY" : 118, - "DOF_MIN_SCALEZ" : 119, - "DOF_MAX_TRANSX" : 120, - "DOF_MAX_TRANSY" : 121, - "DOF_MAX_TRANSZ" : 122, - "DOF_MAX_ROTX" : 123, - "DOF_MAX_ROTY" : 124, - "DOF_MAX_ROTZ" : 125, - "DOF_MAX_SCALEX" : 126, - "DOF_MAX_SCALEY" : 127, - "DOF_MAX_SCALEZ" : 128, - "DOF_STEP_TRANSX" : 129, - "DOF_STEP_TRANSY" : 130, - "DOF_STEP_TRANSZ" : 131, - "DOF_STEP_ROTX" : 132, - "DOF_STEP_ROTY" : 133, - "DOF_STEP_ROTZ" : 134, - "DOF_STEP_SCALEX" : 135, - "DOF_STEP_SCALEY" : 136, - "DOF_STEP_SCALEZ" : 137 -} - -#system -DOF_MAKE = None -DOF_UPDATE = None -DOF_DELETE = None - -#toggle buttons -DOF_TRANSX = None -DOF_TRANSY = None -DOF_TRANSZ = None -DOF_ROTX = None -DOF_ROTY = None -DOF_ROTZ = None -DOF_SCALEX = None -DOF_SCALEY = None -DOF_SCALEZ = None - -#Minimums -DOF_MIN_TRANSX = None -DOF_MIN_TRANSY = None -DOF_MIN_TRANSZ = None -DOF_MIN_ROTX = None -DOF_MIN_ROTY = None -DOF_MIN_ROTZ = None -DOF_MIN_SCALEX = None -DOF_MIN_SCALEY = None -DOF_MIN_SCALEZ = None - -#maximums -DOF_MAX_TRANSX = None -DOF_MAX_TRANSY = None -DOF_MAX_TRANSZ = None -DOF_MAX_ROTX = None -DOF_MAX_ROTY = None -DOF_MAX_ROTZ = None -DOF_MAX_SCALEX = None -DOF_MAX_SCALEY = None -DOF_MAX_SCALEZ = None - -#step -DOF_STEP_TRANSX = None -DOF_STEP_TRANSY = None -DOF_STEP_TRANSZ = None -DOF_STEP_ROTX = None -DOF_STEP_ROTY = None -DOF_STEP_ROTZ = None -DOF_STEP_SCALEX = None -DOF_STEP_SCALEY = None -DOF_STEP_SCALEZ = None - -#labels -DOF_ROTSTRING = None -DOF_TRANSTRING = None -DOF_SCALESTRING = None -DOF_EDITLABEL = None - -#make ID props easier/morereadable -zmin = '14d!ZMIN' -zmax = '15d!ZMAX' -zcur = '16d!ZCUR' -zstep = '17d!ZSTEP' -ymin = '18d!YMIN' -ymax = '19d!YMAX' -ycur = '20d!YCUR' -ystep = '21d!YSTEP' -xmin = '22d!XMIN' -xmax = '23d!XMAX' -xcur = '24d!XCUR' -xstep = '25d!XSTEP' -pitchmin = '26d!PITCH-MIN' -pitchmax = '27d!PITCH-MAX' -pitchcur = '28d!PITCH-CUR' -pitchstep = '29d!PITCH-STEP' -rollmin = '30d!ROLL-MIN' -rollmax = '31d!ROLL-MAX' -rollcur = '32d!ROLL-CUR' -rollstep = '33d!ROLL-STEP' -yawmin = '34d!YAW-MIN' -yawmax = '35d!YAW-MAX' -yawcur = '36d!YAW-CUR' -yawstep = '37d!YAW-STEP' -zscalemin = '38d!ZSIZE-MIN' -zscalemax = '39d!ZSIZE-MAX' -zscalecur = '40d!ZSIZE-CUR' -zscalestep = '41d!ZSIZE-STEP' -yscalemin = '42d!YSIZE-MIN' -yscalemax = '43d!YSIZE-MAX' -yscalecur = '44d!YSIZE-CUR' -yscalestep = '45d!YSIZE-STEP' -xscalemin = '46d!XSIZE-MIN' -xscalemax = '47d!XSIZE-MAX' -xscalecur = '48d!XSIZE-CUR' -xscalestep = '49d!XSIZE-STEP' - - - -def update_state(): - state = dict() - state["activeScene"] = Blender.Scene.GetCurrent() - state["activeObject"] = state["activeScene"].objects.active - if state["activeObject"] and not state["activeObject"].sel: - state["activeObject"] = None - state["activeMesh"] = None - if state["activeObject"] and state["activeObject"].type == 'Mesh': - state["activeMesh"] = state["activeObject"].getData(mesh=True) - - - state["activeFace"] = None - if state["activeMesh"]: - if state["activeMesh"].faceUV and state["activeMesh"].activeFace != None: - state["activeFace"] = state["activeMesh"].faces[state["activeMesh"].activeFace] - - - #update editmode - state["editmode"] = Blender.Window.EditMode() - - return state - -def idprops_append(object, typecode, props): - object.properties["FLT"] = dict() - object.properties["FLT"]['type'] = typecode - for prop in props: - object.properties["FLT"][prop] = props[prop] - object.properties["FLT"]['3t8!id'] = object.name - -def idprops_kill(): - state = update_state() - if state["activeObject"] and state["activeObject"].properties.has_key('FLT'): - state["activeObject"].properties.pop('FLT') - -def idprops_copy(source): - state = update_state() - if source.properties.has_key('FLT'): - for object in state["activeScene"].objects: - if object.sel and object != source and (state["activeScene"].Layers & object.Layers): - idprops_kill(object) - object.properties['FLT'] = dict() - for key in source.properties['FLT']: - object.properties['FLT'][key] = source.properties['FLT'][key] - -def select_by_typecode(typecode): - state = update_state() - - for object in state["activeScene"].objects: - if object.properties.has_key('FLT') and object.properties['FLT']['type'] == typecode and state["activeScene"].Layers & object.Layers: - object.select(1) - -def DOF_get_frame(): - state = update_state() - - if not state["activeObject"] and not id_props_type(state["activeObject"], 14): - return - - #Warning! assumes 1 BU == 10 meters. - #do origin - state["activeObject"].properties['FLT']['5d!ORIGX'] = state["activeObject"].getLocation('worldspace')[0]*10.0 - state["activeObject"].properties['FLT']['6d!ORIGY'] = state["activeObject"].getLocation('worldspace')[1]*10.0 - state["activeObject"].properties['FLT']['7d!ORIGZ'] = state["activeObject"].getLocation('worldspace')[2]*10.0 - #do X axis - x = Blender.Mathutils.Vector(1.0,0.0,0.0) - x = x * state["activeObject"].getMatrix('worldspace') - x = x * 10.0 - state["activeObject"].properties['FLT']['8d!XAXIS-X'] = x[0] - state["activeObject"].properties['FLT']['9d!XAXIS-Y'] = x[1] - state["activeObject"].properties['FLT']['10d!XAXIS-Z'] = x[2] - #do X/Y plane - x = Blender.Mathutils.Vector(0.0,1.0,0.0) - x.normalize() - x = x * state["activeObject"].getMatrix('worldspace') - x = x * 10.0 - state["activeObject"].properties['FLT']['11d!XYPLANE-X'] = x[0] - state["activeObject"].properties['FLT']['12d!XYPLANE-Y'] = x[1] - state["activeObject"].properties['FLT']['13d!XZPLANE-Z'] = x[2] - -def idprops_type(object, typecode): - if object.properties.has_key('FLT') and object.properties['FLT'].has_key('type') and object.properties['FLT']['type'] == typecode: - return True - return False - -#ui type code -def get_prop(typecode, prop): - - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], typecode): - props = state["activeObject"].properties['FLT'] - else: - props = flt_properties.FLTDOF - - return props[prop] - -def set_prop(typecode, prop, value): - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"],typecode): - state["activeObject"].properties['FLT'][prop] = value - -lockxtrans = (1 << 31) -lockytrans = (1 << 30) -lockztrans = (1 << 29) -lockxrot = (1 << 28) -lockyrot = (1 << 27) -lockzrot = (1 << 26) -lockxscale = (1 << 25) -lockyscale = (1 << 24) -lockzscale = (1 << 23) - -def get_lockmask(mask): - state = update_state() - if state["activeObject"]: - flag = get_prop(14,'50I!FLAG') - if flag & mask: - return True - return False - -def set_lockmask(mask): - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], 14): - oldvalue = state["activeObject"].properties['FLT']['50I!FLAG'] - oldvalue = struct.unpack('>I', struct.pack('>i', oldvalue))[0] - oldvalue |= mask - state["activeObject"].properties['FLT']['50I!FLAG'] = struct.unpack('>i', struct.pack(">I", oldvalue))[0] - -def clear_lockmask(mask): - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], 14): - oldvalue = state["activeObject"].properties['FLT']['50I!FLAG'] - oldvalue = struct.unpack('>I', struct.pack('>i', oldvalue))[0] - oldvalue &= ~mask - state["activeObject"].properties['FLT']['50I!FLAG'] = struct.unpack('>i',struct.pack('>I',oldvalue))[0] - - -def create_dof(): - state = update_state() - actobj = state["activeObject"] - if actobj and not idprops_type(actobj, 14): - idprops_kill() - idprops_append(actobj,14, flt_properties.FLTDOF) - DOF_get_frame() - - -def event(evt,val): - if evt == Draw.ESCKEY: - Draw.Exit() - -def but_event(evt): - global DOF_MAKE - global DOF_UPDATE - global DOF_DELETE - - global DOF_TRANSX - global DOF_TRANSY - global DOF_TRANSZ - global DOF_ROTX - global DOF_ROTY - global DOF_ROTZ - global DOF_SCALEX - global DOF_SCALEY - global DOF_SCALEZ - - global DOF_MIN_TRANSX - global DOF_MIN_TRANSY - global DOF_MIN_TRANSZ - global DOF_MIN_ROTX - global DOF_MIN_ROTY - global DOF_MIN_ROTZ - global DOF_MIN_SCALEX - global DOF_MIN_SCALEY - global DOF_MIN_SCALEZ - - global DOF_MAX_TRANSX - global DOF_MAX_TRANSY - global DOF_MAX_TRANSZ - global DOF_MAX_ROTX - global DOF_MAX_ROTY - global DOF_MAX_ROTZ - global DOF_MAX_SCALEX - global DOF_MAX_SCALEY - global DOF_MAX_SCALEZ - - global DOF_STEP_TRANSX - global DOF_STEP_TRANSY - global DOF_STEP_TRANSZ - global DOF_STEP_ROTX - global DOF_STEP_ROTY - global DOF_STEP_ROTZ - global DOF_STEP_SCALEX - global DOF_STEP_SCALEY - global DOF_STEP_SCALEZ - - #labels - global DOF_ROTSTRING - global DOF_TRANSTRING - global DOF_SCALESTRING - - - #masks - global lockxtrans - global lockytrans - global lockztrans - global lockxrot - global lockyrot - global lockzrot - global lockxscale - global lockyscale - global lockzscale - - global zmin - global zmax - global zcur - global zstep - global ymin - global ymax - global ycur - global ystep - global xmin - global xmax - global xcur - global xstep - global pitchmin - global pitchmax - global pitchcur - global pitchstep - global rollmin - global rollmax - global rollcur - global rollstep - global yawmin - global yawmax - global yawcur - global yawstep - global zscalemin - global zscalemax - global zscalecur - global zscalestep - global yscalemin - global yscalemax - global yscalecur - global yscalestep - global xscalemin - global xscalemax - global xscalecur - global xscalestep - - - - #do "system" events - if evt == evcode["DOF_MAKE"]: - create_dof() - - if evt == evcode["DOF_UPDATE"]: - DOF_get_frame() - - if evt == evcode["DOF_DELETE"]: - idprops_kill() - #do translation lock events - if evt == evcode["DOF_TRANSX"]: - if DOF_TRANSX.val == True: - set_lockmask(lockxtrans) - else: - clear_lockmask(lockxtrans) - - if evt == evcode["DOF_TRANSY"]: - if DOF_TRANSY.val == True: - set_lockmask(lockytrans) - else: - clear_lockmask(lockytrans) - - if evt == evcode["DOF_TRANSZ"]: - if DOF_TRANSZ.val == True: - set_lockmask(lockztrans) - else: - clear_lockmask(lockztrans) - - - #do rotation lock events - if evt == evcode["DOF_ROTX"]: - if DOF_ROTX.val == True: - set_lockmask(lockxrot) - else: - clear_lockmask(lockxrot) - - if evt == evcode["DOF_ROTY"]: - if DOF_ROTY.val == True: - set_lockmask(lockyrot) - else: - clear_lockmask(lockyrot) - - if evt == evcode["DOF_ROTZ"]: - if DOF_ROTZ.val == True: - set_lockmask(lockzrot) - else: - clear_lockmask(lockzrot) - - #do scale lock events - if evt == evcode["DOF_SCALEX"]: - if DOF_SCALEX.val == True: - set_lockmask(lockxscale) - else: - clear_lockmask(lockxscale) - - if evt == evcode["DOF_SCALEY"]: - if DOF_SCALEY.val == True: - set_lockmask(lockyscale) - else: - clear_lockmask(lockyscale) - - if evt == evcode["DOF_SCALEZ"]: - if DOF_SCALEZ.val == True: - set_lockmask(lockzscale) - else: - clear_lockmask(lockzscale) - - - #do translation buttons - if evt == evcode["DOF_MIN_TRANSX"]: - set_prop(14, xmin, DOF_MIN_TRANSX.val) - if evt == evcode["DOF_MAX_TRANSX"]: - set_prop(14,xmax, DOF_MAX_TRANSX.val) - if evt == evcode["DOF_STEP_TRANSX"]: - set_prop(14,xstep, DOF_STEP_TRANSX.val) - - if evt == evcode["DOF_MIN_TRANSY"]: - set_prop(14, ymin, DOF_MIN_TRANSY.val) - if evt == evcode["DOF_MAX_TRANSY"]: - set_prop(14,ymax, DOF_MAX_TRANSY.val) - if evt == evcode["DOF_STEP_TRANSY"]: - set_prop(14,ystep, DOF_STEP_TRANSY.val) - - if evt == evcode["DOF_MIN_TRANSZ"]: - set_prop(14, zmin, DOF_MIN_TRANSZ.val) - if evt == evcode["DOF_MAX_TRANSZ"]: - set_prop(14, zmax, DOF_MAX_TRANSZ.val) - if evt == evcode["DOF_STEP_TRANSZ"]: - set_prop(14, zstep, DOF_STEP_TRANSZ.val) - - #do rotation buttons - if evt == evcode["DOF_MIN_ROTX"]: - set_prop(14, pitchmin, DOF_MIN_ROTX.val) - if evt == evcode["DOF_MAX_ROTX"]: - set_prop(14, pitchmax, DOF_MAX_ROTX.val) - if evt == evcode["DOF_STEP_ROTX"]: - set_prop(14, pitchstep, DOF_STEP_ROTX.val) - - if evt == evcode["DOF_MIN_ROTY"]: - set_prop(14, rollmin, DOF_MIN_ROTY.val) - if evt == evcode["DOF_MAX_ROTY"]: - set_prop(14, rollmax, DOF_MAX_ROTY.val) - if evt == evcode["DOF_STEP_ROTY"]: - set_prop(14, rollstep, DOF_STEP_ROTY.val) - - if evt == evcode["DOF_MIN_ROTZ"]: - set_prop(14, yawmin, DOF_MIN_ROTZ.val) - if evt == evcode["DOF_MAX_ROTZ"]: - set_prop(14, yawmax, DOF_MAX_ROTZ.val) - if evt == evcode["DOF_STEP_ROTZ"]: - set_prop(14, yawstep, DOF_STEP_ROTZ.val) - - #do scale buttons - if evt == evcode["DOF_MIN_SCALEX"]: - set_prop(14, xscalemin, DOF_MIN_SCALEX.val) - if evt == evcode["DOF_MAX_SCALEX"]: - set_prop(14, xscalemax, DOF_MAX_SCALEX.val) - if evt == evcode["DOF_STEP_SCALEX"]: - set_prop(14, xscalestep, DOF_STEP_SCALEX.val) - - if evt == evcode["DOF_MIN_SCALEY"]: - set_prop(14, yscalemin, DOF_MIN_SCALEY.val) - if evt == evcode["DOF_MAX_SCALEY"]: - set_prop(14, yscalemax, DOF_MAX_SCALEY.val) - if evt == evcode["DOF_STEP_SCALEY"]: - set_prop(14, yscalestep, DOF_STEP_SCALEY.val) - - if evt == evcode["DOF_MIN_SCALEZ"]: - set_prop(14, zscalemin, DOF_MIN_SCALEZ.val) - if evt == evcode["DOF_MAX_SCALEZ"]: - set_prop(14, zscalemax, DOF_MAX_SCALEZ.val) - if evt == evcode["DOF_STEP_SCALEZ"]: - set_prop(14, zscalestep, DOF_STEP_SCALEZ.val) - - - Draw.Redraw(1) - Blender.Window.RedrawAll() - -def draw_propsheet(x,y): - #UI buttons - global DOF_MAKE - global DOF_UPDATE - global DOF_DELETE - - global DOF_TRANSX - global DOF_TRANSY - global DOF_TRANSZ - global DOF_ROTX - global DOF_ROTY - global DOF_ROTZ - global DOF_SCALEX - global DOF_SCALEY - global DOF_SCALEZ - - global DOF_MIN_TRANSX - global DOF_MIN_TRANSY - global DOF_MIN_TRANSZ - global DOF_MIN_ROTX - global DOF_MIN_ROTY - global DOF_MIN_ROTZ - global DOF_MIN_SCALEX - global DOF_MIN_SCALEY - global DOF_MIN_SCALEZ - - global DOF_MAX_TRANSX - global DOF_MAX_TRANSY - global DOF_MAX_TRANSZ - global DOF_MAX_ROTX - global DOF_MAX_ROTY - global DOF_MAX_ROTZ - global DOF_MAX_SCALEX - global DOF_MAX_SCALEY - global DOF_MAX_SCALEZ - - global DOF_STEP_TRANSX - global DOF_STEP_TRANSY - global DOF_STEP_TRANSZ - global DOF_STEP_ROTX - global DOF_STEP_ROTY - global DOF_STEP_ROTZ - global DOF_STEP_SCALEX - global DOF_STEP_SCALEY - global DOF_STEP_SCALEZ - - #labels - global DOF_ROTSTRING - global DOF_TRANSTRING - global DOF_SCALESTRING - global DOF_EDITLABEL - - #masks - global lockxtrans - global lockytrans - global lockztrans - global lockxrot - global lockyrot - global lockzrot - global lockxscale - global lockyscale - global lockzscale - - global zmin - global zmax - global zcur - global zstep - global ymin - global ymax - global ycur - global ystep - global xmin - global xmax - global xcur - global xstep - global pitchmin - global pitchmax - global pitchcur - global pitchstep - global rollmin - global rollmax - global rollcur - global rollstep - global yawmin - global yawmax - global yawcur - global yawstep - global zscalemin - global zscalemax - global zscalecur - global zscalestep - global yscalemin - global yscalemax - global yscalecur - global yscalestep - global xscalemin - global xscalemax - global xscalecur - global xscalestep - - - global evcode - - state = update_state() - - row_height = 20 - toggle_width = 50 - input_width = 100 - pad = 10 - origx = x - origy = (row_height * 15) + (pad * 15) - - - #editor label - x = origx - y = origy - #y = y - (row_height + pad) - DOF_EDITLABEL = Blender.Draw.Label("FLT Degree of Freedom Editor", x, y, 200, row_height) - - - #draw Translation limits - x = origx - y = y- (row_height + pad) - DOF_TRANSTRING = Blender.Draw.Label("Translation Limits", x, y, input_width, row_height) - - - #X limits - x = origx - y = y- (row_height + pad) - DOF_TRANSX = Blender.Draw.Toggle("LimX", evcode["DOF_TRANSX"], x, y, toggle_width, row_height, get_lockmask(lockxtrans), "") - x = x + (toggle_width + pad) - DOF_MIN_TRANSX = Blender.Draw.Number("MinX", evcode["DOF_MIN_TRANSX"], x, y, input_width, row_height,get_prop(14,xmin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_TRANSX = Blender.Draw.Number("MaxX", evcode["DOF_MAX_TRANSX"], x, y, input_width, row_height,get_prop(14,xmax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_TRANSX = Blender.Draw.Number("StepX", evcode["DOF_STEP_TRANSX"], x, y, input_width, row_height,get_prop(14,xstep), -1000000.0, 1000000.0, "") - - #Y limits - x = origx - y = y- (row_height + pad) - DOF_TRANSY = Blender.Draw.Toggle("LimY", evcode["DOF_TRANSY"], x, y, toggle_width, row_height, get_lockmask(lockytrans), "") - x = x + (toggle_width + pad) - DOF_MIN_TRANSY = Blender.Draw.Number("MinY", evcode["DOF_MIN_TRANSY"], x, y, input_width, row_height, get_prop(14,ymin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_TRANSY = Blender.Draw.Number("MaxY", evcode["DOF_MAX_TRANSY"], x, y, input_width, row_height, get_prop(14,ymax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_TRANSY = Blender.Draw.Number("StepY", evcode["DOF_STEP_TRANSY"], x, y, input_width, row_height, get_prop(14,ystep), -1000000.0, 1000000.0, "") - - #Z limits - x = origx - y = y- (row_height + pad) - DOF_TRANSZ = Blender.Draw.Toggle("LimZ", evcode["DOF_TRANSZ"], x, y, toggle_width, row_height, get_lockmask(lockztrans), "") - x = x + (toggle_width + pad) - DOF_MIN_TRANSZ = Blender.Draw.Number("MinZ", evcode["DOF_MIN_TRANSZ"], x, y, input_width, row_height, get_prop(14,zmin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_TRANSZ = Blender.Draw.Number("MaxZ", evcode["DOF_MAX_TRANSZ"], x, y, input_width, row_height, get_prop(14,zmax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_TRANSZ = Blender.Draw.Number("StepZ", evcode["DOF_STEP_TRANSZ"], x, y, input_width, row_height, get_prop(14,zstep), -1000000.0, 1000000.0, "") - - #draw Rotation limits - x = origx - y = y- (row_height + pad) - DOF_ROTSTRING = Blender.Draw.Label("Rotation Limits", x, y, input_width, row_height) - - #draw Rotation limits - #X limits - x = origx - y = y- (row_height + pad) - DOF_ROTX = Blender.Draw.Toggle("LimX", evcode["DOF_ROTX"], x, y, toggle_width, row_height, get_lockmask(lockxrot), "") - x = x + (toggle_width + pad) - DOF_MIN_ROTX = Blender.Draw.Number("MinX", evcode["DOF_MIN_ROTX"], x, y, input_width, row_height, get_prop(14,pitchmin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_ROTX = Blender.Draw.Number("MaxX", evcode["DOF_MAX_ROTX"], x, y, input_width, row_height, get_prop(14,pitchmax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_ROTX = Blender.Draw.Number("StepX", evcode["DOF_STEP_ROTX"], x, y, input_width, row_height, get_prop(14,pitchstep), -1000000.0, 1000000.0, "") - - #Y limits - x = origx - y = y- (row_height + pad) - DOF_ROTY = Blender.Draw.Toggle("LimY", evcode["DOF_ROTY"], x, y, toggle_width, row_height, get_lockmask(lockyrot), "") - x = x + (toggle_width + pad) - DOF_MIN_ROTY = Blender.Draw.Number("MinY", evcode["DOF_MIN_ROTY"], x, y, input_width, row_height, get_prop(14,rollmin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_ROTY = Blender.Draw.Number("MaxY", evcode["DOF_MAX_ROTY"], x, y, input_width, row_height, get_prop(14,rollmax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_ROTY = Blender.Draw.Number("StepY", evcode["DOF_STEP_ROTY"], x, y, input_width, row_height, get_prop(14,rollstep), -1000000.0, 1000000.0, "") - - #Z limits - x = origx - y = y- (row_height + pad) - DOF_ROTZ = Blender.Draw.Toggle("LimZ", evcode["DOF_ROTZ"], x, y, toggle_width, row_height, get_lockmask(lockzrot), "") - x = x + (toggle_width + pad) - DOF_MIN_ROTZ = Blender.Draw.Number("MinZ", evcode["DOF_MIN_ROTZ"], x, y, input_width, row_height, get_prop(14, yawmin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_ROTZ = Blender.Draw.Number("MaxZ", evcode["DOF_MAX_ROTZ"], x, y, input_width, row_height, get_prop(14, yawmax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_ROTZ = Blender.Draw.Number("StepZ", evcode["DOF_STEP_ROTZ"], x, y, input_width, row_height, get_prop(14, yawstep), -1000000.0, 1000000.0, "") - - - #draw Scale limits - x = origx - y = y- (row_height + pad) - DOF_SCALESTRING = Blender.Draw.Label("Scale Limits", x, y, input_width, row_height) - - #draw Scale limits - #X limits - x = origx - y = y- (row_height + pad) - DOF_SCALEX = Blender.Draw.Toggle("LimX", evcode["DOF_SCALEX"], x, y, toggle_width, row_height, get_lockmask(lockxscale), "") - x = x + (toggle_width + pad) - DOF_MIN_SCALEX = Blender.Draw.Number("MinX", evcode["DOF_MIN_SCALEX"], x, y, input_width, row_height, get_prop(14, xscalemin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_SCALEX = Blender.Draw.Number("MaxX", evcode["DOF_MAX_SCALEX"], x, y, input_width, row_height, get_prop(14, xscalemax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_SCALEX = Blender.Draw.Number("StepX", evcode["DOF_STEP_SCALEX"], x, y, input_width, row_height, get_prop(14, xscalestep), -1000000.0, 1000000.0, "") - - #Y limits - x = origx - y = y- (row_height + pad) - DOF_SCALEY = Blender.Draw.Toggle("LimY", evcode["DOF_SCALEY"], x, y, toggle_width, row_height, get_lockmask(lockyscale), "") - x = x + (toggle_width + pad) - DOF_MIN_SCALEY = Blender.Draw.Number("MinY", evcode["DOF_MIN_SCALEY"], x, y, input_width, row_height, get_prop(14, yscalemin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_SCALEY = Blender.Draw.Number("MaxY", evcode["DOF_MAX_SCALEY"], x, y, input_width, row_height, get_prop(14, yscalemax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_SCALEY = Blender.Draw.Number("StepY", evcode["DOF_STEP_SCALEY"], x, y, input_width, row_height, get_prop(14, yscalestep), -1000000.0, 1000000.0, "") - - #Z limits - x = origx - y = y- (row_height + pad) - DOF_SCALEZ = Blender.Draw.Toggle("LimZ", evcode["DOF_SCALEZ"], x, y, toggle_width, row_height, get_lockmask(lockzscale), "") - x = x + (toggle_width + pad) - DOF_MIN_SCALEZ = Blender.Draw.Number("MinZ", evcode["DOF_MIN_SCALEZ"], x, y, input_width, row_height, get_prop(14, zscalemin), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_MAX_SCALEZ = Blender.Draw.Number("MaxZ", evcode["DOF_MAX_SCALEZ"], x, y, input_width, row_height, get_prop(14, zscalemax), -1000000.0, 1000000.0, "") - x = x + (input_width + pad) - DOF_STEP_SCALEZ = Blender.Draw.Number("StepZ", evcode["DOF_STEP_SCALEZ"], x, y, input_width, row_height, get_prop(14, zscalestep), -1000000.0, 1000000.0, "") - - #System - x = origx - y = y - (row_height + (pad)*3) - DOF_MAKE = Blender.Draw.PushButton("Make DOF", evcode["DOF_MAKE"], x, y, input_width, row_height, "Make a Dof Node out of Active Object") - x = x + (input_width + pad) - DOF_UPDATE = Blender.Draw.PushButton("Grab Loc/Rot", evcode["DOF_UPDATE"], x, y, input_width, row_height, "Update the Dof Node position/orientation") - x = x + (input_width + pad) - DOF_DELETE = Blender.Draw.PushButton("Delete DOF", evcode["DOF_DELETE"], x, y, input_width, row_height, "Delete the Dof Node properties") - - - - -def gui(): - #draw the propsheet/toolbox. - psheety = 800 - #psheetx = psheety + 10 - draw_propsheet(20,psheety) - -Draw.Register(gui,event,but_event) - \ No newline at end of file diff --git a/release/scripts/flt_export.py b/release/scripts/flt_export.py deleted file mode 100644 index c099c8e62d1..00000000000 --- a/release/scripts/flt_export.py +++ /dev/null @@ -1,1697 +0,0 @@ -#!BPY -""" Registration info for Blender menus: -Name: 'OpenFlight (.flt)...' -Blender: 245 -Group: 'Export' -Tip: 'Export to OpenFlight v16.0 (.flt)' -""" - -__author__ = "Greg MacDonald, Geoffrey Bantle" -__version__ = "2.0 11/21/07" -__url__ = ("blender", "blenderartists.org", "Author's homepage, http://sourceforge.net/projects/blight/") -__bpydoc__ = """\ -This script exports v16.0 OpenFlight files. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. - -Feature overview and more availible at: -http://wiki.blender.org/index.php/Scripts/Manual/Export/openflight_flt -""" - -# flt_export.py is an OpenFlight exporter for blender. -# -# Copyright (C) 2005 Greg MacDonald, 2007 Blender Foundation. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - -import Blender -from Blender import Modifier -import os.path -import flt_properties -import flt_defaultp as defaultp -from flt_filewalker import FltOut -from flt_filewalker import FileFinder -from flt_properties import * -import shutil -import trace -import sys - -FF = FileFinder() -records = process_recordDefs() - -class ExporterOptions: - - def read_state(self): - reg = Blender.Registry.GetKey('flt_export',1) - if reg: - for key in self.state: - if reg.has_key(key): - self.state[key] = reg[key] - - def write_state(self): - d = dict() - for key in self.state: - d[key] = self.state[key] - Blender.Registry.SetKey('flt_export', d, 1) - def __init__(self): - self.verbose = 1 - self.tolerance = 0.001 - self.writevcol = True - - self.state = {'export_shading' : 0, - 'shading_default' : 45, - 'basepath' : os.path.dirname(Blender.Get('filename')), - 'scale': 1.0, - 'doxrefs' : 1, - 'attrib' : 0, - 'copytex' : 0, - 'transform' : 0, - 'xapp' : 1} - - #default externals path - if(os.path.exists(os.path.join(self.state['basepath'],'externals'))): - self.state['externalspath'] = os.path.join(self.state['basepath'],'externals') - else: - self.state['externalspath'] = self.state['basepath'] - - if(os.path.exists(os.path.join(self.state['basepath'],'textures'))): - self.state['texturespath'] = os.path.join(self.state['basepath'],'textures') - else: - self.state['texturespath'] = self.state['basepath'] - - self.state['xappath'] = '' - self.read_state() #read from registry - - -options = ExporterOptions() -tex_files = dict() #a list of (possibly) modified texture path names - -tex_layers = ['Layer0', 'Layer1', 'Layer2', 'Layer3', 'Layer4', 'Layer5', 'Layer6', 'Layer7'] -mask = 2147483648 -mtexmasks = [] -for i in xrange(7): - mtexmasks.append(mask) - mask = mask / 2 - -FLOAT_TOLERANCE = options.tolerance - -#need to move all this stuff to flt_properties.py. -identity_matrix = [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]] -alltypes = [2,4,14,11,73,63,111] -childtypes = { - 2 : [111,2,73,4,14,63], - 4 : [111], - 73 : [111,2,73,4,14,63], - 63 : [], - 14 : [111,2,73,4,14,63], - 111 : [] -} -recordlen = { - 2: 44, - 4: 28, - 73: 80, - 63: 216, - 14: 384, - 111: 156 -} - -def is_identity(m): - for i in xrange(4): - for j in xrange(4): - if abs(m[i][j] - identity_matrix[i][j]) > FLOAT_TOLERANCE: - return False - return True - -class MaterialDesc: - def __init__(self): - self.name = 'Blender' - - # Colors, List of 3 floats. - self.diffuse = [1.0, 1.0, 1.0] - self.specular = [1.0, 1.0, 1.0] - - # Scalars - self.ambient = 0.1 # [0.0, 1.0] - self.emissive = 0.0 # [0.0, 1.0] - self.shininess = 32.0 # Range is [0.0, 128.0] - self.alpha = 1.0 # Range is [0.0, 1.0] - -class VertexDesc: - def __init__(self, co=None, no=None, uv=None, fltindex=None,cindex=None): - if co: self.x, self.y, self.z = tuple(co) - else: self.x = self.y = self.z = 0.0 - if no: self.nx, self.ny, self.nz = tuple(no) - else: self.nx = self.ny = self.nz = 0.0 - if uv: self.u, self.v = tuple(uv) - else: self.u = self.v = 0.0 - if cindex: self.cindex = cindex - else: self.cindex = 127 - self.fltindex = fltindex - self.accum = 0 - -class shadowVert: - def __init__(self,bvert,object,world,normal): - global options - - self.co = Blender.Mathutils.Vector(bvert.co[0],bvert.co[1],bvert.co[2]) - #if world: - # vec = self.co - # vec = Blender.Mathutils.Vector(vec[0] * options.scale, vec[1] * options.scale, vec[2] * options.scale) #scale - # self.co = Blender.Mathutils.TranslationMatrix(vec) * (self.co * object.getMatrix('worldspace')) - - if normal: - #if world: - # self.no = Blender.Mathutils.Vector(normal * object.getMatrix('worldspace')).normalize() - #else: - self.no = Blender.Mathutils.Vector(normal[0],normal[1],normal[2]) - - else: - #if world: - #self.no = Blender.Mathutils.Vector(bvert.no * object.getMatrix('worldspace')).normalize() - #else: - self.no = Blender.Mathutils.Vector(bvert.no[0],bvert.no[1],bvert.no[2]) - - #do scaling factor - #if options.scale != 1.0: - #self.co[0] = self.co[0] * options.scale - #self.co[1] = self.co[1] * options.scale - #self.co[2] = self.co[2] * options.scale - - self.index = bvert.index - -class GlobalResourceRepository: - def new_face_name(self): - self.face_name += 1 - return 'f%i' % (self.face_name-1) - - def vertex_count(self): - return len(self.vertex_lst) - - def request_vertex_desc(self, i): - return self.vertex_lst[i] - - def request_vertex_index(self, object, mesh, face, vfindex, uvok,cindex): - - flatShadeNorm = None - vno = None - - - if type(face) is list: - vertex = face[vfindex] - elif str(type(face)) == "": - vertex = face - vno = Blender.Mathutils.Vector(0.0,0.0,1.0) - elif str(type(face)) == "": - if vfindex == 1: - vertex = face.v1 - elif vfindex == 2: - vertex = face.v2 - elif str(type(face)) == "": - if not face.smooth: - flatShadeNorm = face.no - vertex = face.v[vfindex] - else: - return None - - if not self.namehash.has_key(object.name): - self.namehash[object.name] = dict() - indexhash = self.namehash[object.name] - - #export in global space? THIS HAS BEEN MADE REDUNDANT... REMOVE ME - if not options.state['transform']: - vertex = shadowVert(vertex,object,True,flatShadeNorm) - else: - vertex = shadowVert(vertex,object,False,flatShadeNorm) - - if vno: - vertex.no = vno - - - #Check to see if this vertex has been visited before. If not, add - if not indexhash.has_key(vertex.index): - if uvok: - newvdesc = VertexDesc(vertex.co, vertex.no, face.uv[vfindex], self.nextvindex,cindex=cindex) - else: - newvdesc = VertexDesc(co=vertex.co, no=vertex.no,fltindex=self.nextvindex,cindex=cindex) - - indexhash[vertex.index] = [newvdesc] - self.vertex_lst.append(newvdesc) - self.nextvindex = self.nextvindex + 1 - return newvdesc.fltindex - - else: - desclist = indexhash[vertex.index] - if uvok: - faceu = face.uv[vfindex][0] - facev = face.uv[vfindex][1] - else: - faceu = 0.0 - facev = 0.0 - for vdesc in desclist: - if\ - abs(vdesc.x - vertex.co[0]) > FLOAT_TOLERANCE or\ - abs(vdesc.y - vertex.co[1]) > FLOAT_TOLERANCE or\ - abs(vdesc.z - vertex.co[2]) > FLOAT_TOLERANCE or\ - abs(vdesc.nx - vertex.no[0]) > FLOAT_TOLERANCE or\ - abs(vdesc.ny - vertex.no[1]) > FLOAT_TOLERANCE or\ - abs(vdesc.nz - vertex.no[2]) > FLOAT_TOLERANCE or\ - vdesc.cindex != cindex or\ - abs(vdesc.u - faceu) > FLOAT_TOLERANCE or\ - abs(vdesc.v - facev) > FLOAT_TOLERANCE: - pass - else: - return vdesc.fltindex - - #if we get this far, we didnt find a match. Add a new one and return - if uvok: - newvdesc = VertexDesc(vertex.co, vertex.no, face.uv[vfindex], self.nextvindex,cindex=cindex) - else: - newvdesc = VertexDesc(co=vertex.co, no=vertex.no,fltindex=self.nextvindex,cindex=cindex) - indexhash[vertex.index].append(newvdesc) - self.vertex_lst.append(newvdesc) - self.nextvindex = self.nextvindex + 1 - return newvdesc.fltindex - - - def request_texture_index(self, image): - match = None - for i in xrange(len(self.texture_lst)): - if self.texture_lst[i] != image: - continue - match = i - break - if match != None: - return match - else: - self.texture_lst.append(image) - return len(self.texture_lst) - 1 - - def request_texture_filename(self, index): - return Blender.sys.expandpath(self.texture_lst[index].getFilename()) - - def texture_count(self): - return len(self.texture_lst) - - def request_material_index(self, desc): - match = None - for i in xrange(len(self.material_lst)): - if self.material_lst[i].diffuse != desc.diffuse: - continue - if self.material_lst[i].specular != desc.specular: - continue - if self.material_lst[i].ambient != desc.ambient: - continue - if self.material_lst[i].emissive != desc.emissive: - continue - if self.material_lst[i].shininess != desc.shininess: - continue - if self.material_lst[i].alpha != desc.alpha: - continue - match = i - break - - if match != None: - return i - else: - self.material_lst.append(desc) - return len(self.material_lst) - 1 - - def request_material_desc(self, index): - return self.material_lst[index] - - def material_count(self): - return len(self.material_lst) - - # Returns not actual index but one that includes intensity information. - # color_index = 127*intensity + 128*actual_index - def request_color_index(self, col): - r,g,b = tuple(col) - m = max(r, g, b) - if m > 0.0: - intensity = m / 1.0 - r = int(round(r/m * 255.0)) - g = int(round(g/m * 255.0)) - b = int(round(b/m * 255.0)) - brightest = [r, g, b] - else: - brightest = [255, 255, 255] - intensity = 0.0 - - match = None - for i in xrange(len(self.color_lst)): - if self.color_lst[i] != brightest: - continue - - match = i - break - - if match != None: - index = match - else: - length = len(self.color_lst) - if length <= 1024: - self.color_lst.append(brightest) - index = length - else: - if options.verbose >= 1: - print 'Warning: Exceeded max color limit.' - index = 0 - - color_index = int(round(127.0*intensity)) + 128*index - return color_index - - # Returns color from actual index. - def request_max_color(self, index): - return self.color_lst[index] - - def color_count(self): - return len(self.color_lst) - - def __init__(self): - #Vertex handling - self.vertex_lst = [] - self.nextvindex = 0 - self.namehash = dict() - - self.texture_lst = [] - self.material_lst = [] - self.color_lst = [[255, 255, 255]] - self.face_name = 0 - -class Node: - # Gathers info from blender needed for export. - # The =[0] is a trick to emulate c-like static function variables - # that are persistant between calls. - def blender_export(self, level=[0]): - if self.object: - if options.verbose >= 2: - print '\t' * level[0], self.name, self.object.type - level[0] += 1 - - self.children.reverse() - for child in self.children: - child.blender_export() - - level[0] -= 1 - - # Exports this node's info to file. - def write(self): - pass - - def write_matrix(self): - if self.matrix and not is_identity(self.matrix): - self.header.fw.write_short(49) # Matrix opcode - self.header.fw.write_ushort(68) # Length of record - for i in xrange(4): - for j in xrange(4): - self.header.fw.write_float(self.matrix[i][j]) - - def write_push(self): - self.header.fw.write_short(10) - self.header.fw.write_ushort(4) - - def write_pop(self): - self.header.fw.write_short(11) - self.header.fw.write_ushort(4) - - def write_push_extension(self): - self.header.fw.write_short(21) - self.header.fw.write_ushort(24) - self.header.fw.pad(18) - self.header.fw.write_ushort(0) - - def write_pop_extension(self): - self.header.fw.write_short(22) - self.header.fw.write_ushort(24) - self.header.fw.pad(18) - self.header.fw.write_ushort(0) - - def write_longid(self, name): - length = len(name) - if length >= 8: - self.header.fw.write_short(33) # Long ID opcode - self.header.fw.write_ushort(length+5) # Length of record - self.header.fw.write_string(name, length+1) # name + zero terminator - - def write_comment(self,comment): - length = len(comment) - if length >= 65535: - comment = comment[:65530] - length = len(comment) - - pad = (length % 4) - 1 - if pad < 0: - pad = None - reclength = length + 5 - else: - reclength = length + 5 + pad - - self.header.fw.write_short(31) # Comment Opcode - self.header.fw.write_ushort(reclength) # Length of record is 4 + comment length + null terminator + pad - self.header.fw.write_string(comment,length+1) # comment + zero terminator - if pad: - self.header.fw.pad(pad) # pad to multiple of 4 bytes - - # Initialization sets up basic tree structure. - def __init__(self, parent, header, object,props): - global options - - self.header = header - self.object = object - if object: - self.name = self.object.name - if not options.state['transform']: - oloc = Blender.Mathutils.Vector(object.getLocation('worldspace')) - vec = Blender.Mathutils.Vector(oloc[0] * options.state['scale'], oloc[1] * options.state['scale'], oloc[2] * options.state['scale']) #scale - self.matrix = self.object.getMatrix('worldspace') * Blender.Mathutils.TranslationMatrix(vec - oloc) - else: - self.matrix = self.object.getMatrix('localspace') #do matrix mult here. - self.props = props - self.child_objects = self.header.parenthash[object.name] - else: - self.name = 'no name' - self.matrix = None - self.props = None - self.child_objects = self.header.child_objects - - self.children = [] - self.parent = parent - if parent: - parent.children.append(self) - - # Spawn children. - for child in self.child_objects: - if(not child.restrictDisplay): - childprops = None - ftype = None - if not child.properties.has_key('FLT'): - if child.type == 'Empty': - if child.DupGroup: - childprops = FLTXRef.copy() - ftype = 63 - else: - childprops = FLTGroup.copy() - ftype = 2 - elif child.type == 'Mesh': - if self.header.childhash[child.name] or not child.parent: - childprops = FLTGroup.copy() - ftype = 2 - else: - childprops = FLTObject.copy() - ftype = 4 - - else: - childprops = dict() - for prop in child.properties['FLT']: - childprops[prop] = child.properties['FLT'][prop] - ftype = child.properties['FLT']['type'] - - if ftype in self.childtypes and ftype in alltypes: - Newnode = FLTNode(self,header,child,childprops,ftype) - if child.type == 'Mesh': - self.header.mnodes.append(Newnode) -class FaceDesc: - def __init__(self): - self.vertex_index_lst = [] - self.mface = None - self.texture_index = 65535 - self.material_index = 65535 - self.color_index = 127 - self.renderstyle = 0 - self.twoside = 0 - self.name = None #uses next FLT name if not set... fix resolution of conflicts! - self.billboard = 0 - - #Multi-Tex info. Dosn't include first UV Layer! - self.uvlayer = list() #list of list of tuples for UV coordinates. - self.images = list() #list of texture indices for seperate UV layers - self.mtex = list() - self.subface = None #can either be 'Push' or 'Pop' - -def edge_get_othervert(vert, edge): - if edge.v1 == vert: - return edge.v2 - elif edge.v2 == vert: - return edge.v1 - return None - -class FLTNode(Node): - def walkLoop(self, targetvert, startvert, startedge, edgelist, visited, vedges, closeloop): - loop = [targetvert] - - curvert = startvert - curedge = startedge - visited[curedge] = True - found = False - - while not found: - loop.append(curvert) - disk = vedges[curvert.index] - if not closeloop: - if len(disk) == 1: - visited[curedge] = True - break - else: - if len(disk) < 2: #what? - visited[curedge] = True - return None - - if disk[0] == curedge: - curedge = disk[1] - else: - curedge = disk[0] - if curedge.v1.index == curvert.index: - curvert = curedge.v2 - else: - curvert = curedge.v1 - - visited[curedge] = True - - if(curvert == targetvert): - found = True - - return loop - - def buildVertFaces(self,vertuse): - for vert in self.exportmesh.verts: - if vertuse[vert.index][0] == False and vertuse[vert.index][1] == 0: - face_desc = FaceDesc() - face_desc.vertex_index_lst.append(self.header.GRR.request_vertex_index(self.object, self.exportmesh, vert, 0,0,0)) - face_desc.renderstyle = 3 - face_desc.color_index = 227 - self.face_lst.append(face_desc) - - def buildEdgeFaces(self,vertuse): - for edge in self.exportmesh.edges: - v1 = vertuse[edge.v1.index] - v2 = vertuse[edge.v2.index] - if v1[0] == False and v2[0] == False: - if v1[1] == 1 and v2[1] == 1: - face_desc = FaceDesc() - face_desc.vertex_index_lst.append(self.header.GRR.request_vertex_index(self.object, self.exportmesh, edge, 1, 0,0)) - face_desc.vertex_index_lst.append(self.header.GRR.request_vertex_index(self.object, self.exportmesh, edge, 2, 0,0)) - face_desc.renderstyle = 3 - face_desc.color_index = 227 - self.face_lst.append(face_desc) - - - def vertwalk(self, startvert, loop, disk, visited): - visited[startvert] = True - for edge in disk[startvert]: - othervert = edge_get_othervert(startvert, edge) - if not visited[othervert]: - loop.append(othervert) - self.vertwalk(othervert,loop,disk,visited) - - def buildOpenFacesNew(self, vertuse): - wireverts = list() - wiredges = list() - visited = dict() - disk = dict() - loops = list() - - for edge in self.exportmesh.edges: - v1 = vertuse[edge.v1.index] - v2 = vertuse[edge.v2.index] - if v1[0] == False and v2[0] == False: - if v1[1] < 3 and v2[1] < 3: - wireverts.append(edge.v1) - wireverts.append(edge.v2) - wiredges.append(edge) - - #build disk data - for vert in wireverts: - visited[vert] = False - disk[vert] = list() - for edge in wiredges: - disk[edge.v1].append(edge) - disk[edge.v2].append(edge) - - #first pass: do open faces - for vert in wireverts: - if not visited[vert] and vertuse[vert.index][1] == 1: - loop = list() - done = 0 - startvert = vert - while not done: - done = 1 - visited[startvert] = True - loop.append(startvert) - for edge in disk[startvert]: - othervert = edge_get_othervert(startvert, edge) - if not visited[othervert]: - done = 0 - startvert = othervert - break - if len(loop) > 2: loops.append( ('Open', loop) ) - for vert in wireverts: - if not visited[vert]: - loop = list() - done = 0 - startvert = vert - while not done: - done = 1 - visited[startvert] = True - loop.append(startvert) - for edge in disk[startvert]: - othervert = edge_get_othervert(startvert,edge) - if not visited[othervert]: - done = 0 - startvert = othervert - break - if len(loop) > 2: loops.append( ('closed', loop) ) - - #now go through the loops and append. - for l in loops: - (ftype, loop) = l - face_desc = FaceDesc() - for i,vert in enumerate(loop): - face_desc.vertex_index_lst.append(self.header.GRR.request_vertex_index(self.object,self.exportmesh,loop,i,0,0)) - if ftype == 'closed': - face_desc.renderstyle = 2 - else: - face_desc.renderstyle = 3 - face_desc.color_index = 227 - self.face_lst.append(face_desc) - - - - def sortFLTFaces(self,a,b): - aindex = a.getProperty("FLT_ORIGINDEX") - bindex = b.getProperty("FLT_ORIGINDEX") - - if aindex > bindex: - return 1 - elif aindex < bindex: - return -1 - return 0 - - def buildNormFaces(self): - - global options - meshlayers = self.exportmesh.getUVLayerNames() - oldlayer = self.exportmesh.activeUVLayer - uvok = 0 - subfaceok = 0 - subfacelevel = 0 - - #special case - if self.exportmesh.faceUV and len(meshlayers) == 1: - uvok = 1 - elif self.exportmesh.faceUV and tex_layers[0] in meshlayers: - self.exportmesh.activeUVLayer = tex_layers[0] - uvok = 1 - - #Sort faces according to the subfaces/FLT indices - if "FLT_ORIGINDEX" in self.exportmesh.faces.properties and "FLT_SFLEVEL" in self.exportmesh.faces.properties: - exportfaces = list() - for face in self.exportmesh.faces: - exportfaces.append(face) - exportfaces.sort(self.sortFLTFaces) - subfaceok = 1 - else: - exportfaces = self.exportmesh.faces - - # Faces described as lists of indices into the GRR's vertex_lst. - for face in exportfaces: - descs = list() - #first we export the face as normal - index_lst = [] - face_v = face.verts - for i, v in enumerate(face_v): - index_lst.append(self.header.GRR.request_vertex_index(self.object,self.exportmesh,face,i,uvok,0)) - face_desc = FaceDesc() - face_desc.vertex_index_lst = index_lst - face_desc.mface = face - descs.append(face_desc) - - #deal with subfaces - if subfaceok: - fsflevel = face.getProperty("FLT_SFLEVEL") - for face_desc in descs: - if fsflevel > subfacelevel: - face_desc.subface = 'Push' - subfacelevel = fsflevel - elif fsflevel < subfacelevel: - face_desc.subface = 'Pop' - subfacelevel = fsflevel - - - if uvok and (face.mode & Blender.Mesh.FaceModes.TWOSIDE): - face_desc.renderstyle = 1 - for face_desc in descs: - if "FLT_COL" in self.exportmesh.faces.properties: - color_index = face.getProperty("FLT_COL") -# if(color_index < 127): -# color_index = 127 #sanity check for face color indices - if(color_index == 0): - color_index = 127 - face_desc.color_index = color_index - else: - face_desc.color_index = 127 - if "FLT_ID" in self.exportmesh.faces.properties: - face_desc.name = face.getProperty("FLT_ID") #need better solution than this. - - if uvok and face.mode & Blender.Mesh.FaceModes["BILLBOARD"]: - face_desc.billboard = 1 - - self.face_lst.append(face_desc) - if uvok: - self.exportmesh.activeUVLayer = oldlayer - - def buildTexData(self): - - meshlayers = self.exportmesh.getUVLayerNames() - oldlayer = self.exportmesh.activeUVLayer - uvok = 0 - - if self.exportmesh.faceUV and len(meshlayers) == 1: - uvok = 1 - if self.exportmesh.faceUV and tex_layers[0] in meshlayers: - self.exportmesh.activeUVLayer = tex_layers[0] - uvok = 1 - - if uvok: - #do base layer. UVs have been stored on vertices directly already. - for i, face in enumerate(self.face_lst): - if face.mface: - mface = face.mface - image = mface.image - if image != None and mface.mode & Blender.Mesh.FaceModes["TEX"]: - index = self.header.GRR.request_texture_index(image) - else: - index = -1 - face.texture_index = index - - for i, face in enumerate(self.face_lst): - if face.mface: - mface_v = face.mface.v - for v in mface_v: - face.uvlayer.append([]) - - for layername in tex_layers[1:]: - if layername in meshlayers: - self.exportmesh.activeUVLayer=layername - for i, face in enumerate(self.face_lst): - if face.mface: - - face.mtex.append(layername) - mface = face.mface - mface_v = mface.v - image = mface.image - - if image != None and mface.mode & Blender.Mesh.FaceModes["TEX"]: - index = self.header.GRR.request_texture_index(image) - face.images.append(index) - else: - face.images.append(-1) - - for j, v in enumerate(mface_v): - face.uvlayer[j].append(tuple(mface.uv[j])) - if uvok: - self.exportmesh.activeUVLayer = oldlayer - def blender_export(self): - global options - Node.blender_export(self) - if self.opcode == 111: - self.exportmesh = Blender.Mesh.New() - self.exportmesh.getFromObject(self.object.name) - - for vert in self.exportmesh.verts: - if not options.state['transform']: - vec = vert.co - vec = Blender.Mathutils.Vector(vec[0] * options.state['scale'], vec[1] * options.state['scale'], vec[2] * options.state['scale']) #scale - vert.co = Blender.Mathutils.TranslationMatrix(vec) * (vert.co * self.object.getMatrix('worldspace')) - - if options.state['scale'] != 1.0: - vert.co = vert.co * options.state['scale'] - - if("FLT_VCOL") in self.mesh.verts.properties: - for v in self.exportmesh.verts: - self.vert_lst.append(self.header.GRR.request_vertex_index(self.object,self.exportmesh,v,0,0,v.getProperty("FLT_VCOL"))) - else: - for v in self.mesh.verts: - self.vert_lst.append(self.header.GRR.request_vertex_index(self.object,self.mesh,v,0,0,127)) - - - - elif self.mesh: - orig_mesh = self.object.getData(mesh=True) - self.exportmesh = Blender.Mesh.New() - default = None - - - if options.state['export_shading']: - mods = self.object.modifiers - hasedsplit = False - for mod in mods: - if mod.type == Blender.Modifier.Types.EDGESPLIT: - hasedsplit = True - break - if not hasedsplit: - default = mods.append(Modifier.Types.EDGESPLIT) - default[Modifier.Settings.EDGESPLIT_ANGLE] = options.state['shading_default'] - default[Modifier.Settings.EDGESPLIT_FROM_ANGLE] = True - default[Modifier.Settings.EDGESPLIT_FROM_SHARP] = False - self.object.makeDisplayList() - - self.exportmesh.getFromObject(self.object.name) - - #recalculate vertex positions - for vert in self.exportmesh.verts: - if not options.state['transform']: - vec = vert.co - vec = Blender.Mathutils.Vector(vec[0] * options.state['scale'], vec[1] * options.state['scale'], vec[2] * options.state['scale']) #scale - vert.co = Blender.Mathutils.TranslationMatrix(vec) * (vert.co * self.object.getMatrix('worldspace')) - - if options.state['scale'] != 1.0: - vert.co = vert.co * options.state['scale'] - - flipped = self.object.getMatrix('worldspace').determinant() - - if not options.state['transform']: - self.exportmesh.calcNormals() - - - if default: - #remove modifier from list - mods.remove(default) - self.object.makeDisplayList() - - #build some adjacency data - vertuse = list() - wiredges = list() - openends = list() - for v in self.exportmesh.verts: - vertuse.append([False,0]) - - #build face incidence data - for face in self.exportmesh.faces: - for i, v in enumerate(face.verts): - vertuse[v.index][0] = True - - for edge in self.exportmesh.edges: #count valance - vertuse[edge.v1.index][1] = vertuse[edge.v1.index][1] + 1 - vertuse[edge.v2.index][1] = vertuse[edge.v2.index][1] + 1 - - #create all face types - self.buildVertFaces(vertuse) - self.buildEdgeFaces(vertuse) - self.buildOpenFacesNew(vertuse) - self.buildNormFaces() - self.buildTexData() - - if not options.state['transform']: - if flipped < 0: - for vdesc in self.header.GRR.vertex_lst: - vdesc.accum = 0 - for face in self.face_lst: - face.vertex_index_lst.reverse() - for vert in face.vertex_index_lst: - self.header.GRR.vertex_lst[vert].accum = 1 - - for vdesc in self.header.GRR.vertex_lst: - if vdesc.accum: - vdesc.nx = vdesc.nx * -1 - vdesc.ny = vdesc.ny * -1 - vdesc.nz = vdesc.nz * -1 - - - def write_faces(self): - sublevel = 0 - for face_desc in self.face_lst: - if face_desc.name: - face_name = face_desc.name - else: - face_name = self.header.GRR.new_face_name() - - #grab the alpha value. - alpha = 0 - if face_desc.texture_index > -1: - try: - typestring = os.path.splitext(self.header.GRR.texture_lst[face_desc.texture_index].getFilename())[1] - if typestring == '.inta' or typestring == '.rgba': - alpha = 1 - except: - pass - - if not alpha: - for index in face_desc.images: - try: - typestring = os.path.splitext(self.header.GRR.texture_lst[index].getFilename())[1] - if typestring == '.inta' or typestring == '.rgba': - alpha = 1 - except: - pass - - if face_desc.billboard: - alpha = 2 - - if face_desc.subface: - if face_desc.subface == 'Push': - self.header.fw.write_short(19) - self.header.fw.write_ushort(4) - sublevel += 1 - else: - self.header.fw.write_short(20) - self.header.fw.write_ushort(4) - sublevel -= 1 - self.header.fw.write_short(5) # Face opcode - self.header.fw.write_ushort(80) # Length of record - self.header.fw.write_string(face_name, 8) # ASCII ID - self.header.fw.write_int(-1) # IR color code - self.header.fw.write_short(0) # Relative priority - self.header.fw.write_char(face_desc.renderstyle) # Draw type - self.header.fw.write_char(0) # Draw textured white. - self.header.fw.write_ushort(0) # Color name index - self.header.fw.write_ushort(0) # Alt color name index - self.header.fw.write_char(0) # Reserved - self.header.fw.write_char(alpha) # Template - self.header.fw.write_short(-1) # Detail tex pat index - if face_desc.texture_index == -1: - self.header.fw.write_ushort(65535) - else: - self.header.fw.write_ushort(face_desc.texture_index) # Tex pattern index - if face_desc.material_index == -1: - self.header.fw.write_ushort(65535) - else: - self.header.fw.write_ushort(face_desc.material_index) # material index - self.header.fw.write_short(0) # SMC code - self.header.fw.write_short(0) # Feature code - self.header.fw.write_int(0) # IR material code - self.header.fw.write_ushort(0) # transparency 0 = opaque - self.header.fw.write_uchar(0) # LOD generation control - self.header.fw.write_uchar(0) # line style index - self.header.fw.write_int(0) # Flags - self.header.fw.write_uchar(2) # Light mode - #self.header.fw.write_uchar(3) # Light mode - - self.header.fw.pad(7) # Reserved - self.header.fw.write_uint(0) # Packed color - self.header.fw.write_uint(0) # Packed alt color - self.header.fw.write_short(-1) # Tex map index - self.header.fw.write_short(0) # Reserved - self.header.fw.write_uint(face_desc.color_index) # Color index - self.header.fw.write_uint(127) # Alt color index - self.header.fw.write_short(0) # Reserved - self.header.fw.write_short(-1) # Shader index - - self.write_longid(face_name) - - - #Write Multitexture field if appropriate - mtex = len(face_desc.mtex) - if mtex: - uvmask = 0 - for layername in face_desc.mtex: - mask = mtexmasks[tex_layers.index(layername)-1] - uvmask |= mask - self.header.fw.write_ushort(52) # MultiTexture Opcode - self.header.fw.write_ushort(8 + (mtex * 8)) # Length - self.header.fw.write_uint(uvmask) # UV mask - for i in xrange(mtex): - if face_desc.images[i] == -1: - self.header.fw.write_ushort(65535) - else: - self.header.fw.write_ushort(face_desc.images[i]) # Tex pattern index - self.header.fw.write_ushort(0) # Tex effect - self.header.fw.write_ushort(0) # Tex Mapping index - self.header.fw.write_ushort(0) # Tex data. User defined - - self.write_push() - - # Vertex list record - self.header.fw.write_short(72) # Vertex list opcode - num_verts = len(face_desc.vertex_index_lst) - self.header.fw.write_ushort(4*num_verts+4) # Length of record - - for vert_index in face_desc.vertex_index_lst: - # Offset into vertex palette - self.header.fw.write_int(vert_index*64+8) - - #UV list record - if mtex: - #length = 8 + (numverts * multitex * 8) - self.header.fw.write_ushort(53) # UV List Ocode - self.header.fw.write_ushort(8 + (num_verts*mtex*8)) # Record Length - self.header.fw.write_uint(uvmask) # UV mask - for i, vert_index in enumerate(face_desc.vertex_index_lst): - for uv in face_desc.uvlayer[i]: - self.header.fw.write_float(uv[0]) #U coordinate - self.header.fw.write_float(uv[1]) #V coordinate - self.write_pop() - #clean up faces at the end of meshes.... - if sublevel: - self.header.fw.write_short(20) - self.header.fw.write_ushort(4) - - def write_lps(self): - # Vertex list record - self.write_push() - self.header.fw.write_short(72) # Vertex list opcode - num_verts = len(self.vert_lst) - self.header.fw.write_ushort(4*num_verts+4) # Length of record - - for vert_index in self.vert_lst: - # Offset into vertex palette - self.header.fw.write_int(vert_index*64+8) - self.write_pop() - def write(self): - self.header.fw.write_short(self.opcode) - self.header.fw.write_ushort(recordlen[self.opcode]) - exportdict = FLT_Records[self.opcode].copy() - if self.object: - self.props['3t8!id'] = self.object.name[:7] - for key in exportdict.keys(): - if self.props.has_key(key): - exportdict[key] = self.props[key] - - if self.opcode == 63 and options.state['externalspath']: - try: - exportdict['3t200!filename'] = os.path.join(options.state['externalspath'],self.object.DupGroup.name+'.flt').replace("\\", "/") - self.header.xrefnames.append(self.object.DupGroup.name) - except: - pass - - for key in records[self.opcode]: - (ftype,length,propname) = records[self.opcode][key] - write_prop(self.header.fw,ftype,exportdict[propname],length) - - if self.props.has_key('comment'): - self.write_comment(self.props['comment']) - - if self.object and self.object.properties.has_key('FLT') and self.object.properties['FLT'].has_key('EXT'): - datalen = len(self.object.properties['FLT']['EXT']['data']) - self.write_push_extension() - self.header.fw.write_short(100) - self.header.fw.write_ushort(24 + datalen) - for key in records[100]: - (ftype,length,propname) = records[100][key] - write_prop(self.header.fw,ftype,self.object.properties['FLT']['EXT'][propname],length) - #write extension data - for i in xrange(datalen): - self.header.fw.write_uchar(struct.unpack('>B', struct.pack('>B', self.object.properties['FLT']['EXT']['data'][i]))[0]) - self.write_pop_extension() - - - self.write_longid(self.name) #fix this! - - if options.state['transform'] or self.opcode == 63: - #writing transform matrix.... - self.write_matrix() - - if self.opcode == 111: - self.write_lps() - elif self.face_lst != [] or self.children: - self.write_push() - if self.face_lst != []: - #self.write_push() - self.write_faces() - #self.write_pop() - - if self.children: - #self.write_push() - for child in self.children: - child.write() - #self.write_pop() - self.write_pop() - - def __init__(self, parent, header, object,props,ftype): - self.opcode = ftype #both these next two lines need to be in the node class.... - self.childtypes = childtypes[self.opcode] - Node.__init__(self, parent, header, object,props) - self.face_lst = [] - self.vert_lst = [] #for light points. - self.mesh = None - self.uvlayer = 0 - self.flipx = False - self.flipy = False - self.flipz = False - - - if self.object.type == 'Mesh': - self.mesh = self.object.getData(mesh=True) - if(self.mesh.faceUV): - self.uvLayer = len(self.mesh.getUVLayerNames()) - -class Database(Node): - def write_header(self): - if options.verbose >= 2: - print 'Writing header.' - self.fw.write_short(1) # Header opcode - self.fw.write_ushort(324) # Length of record - self.fw.write_string('db', 8) # ASCII ID - self.fw.write_int(1600) # Revision Number - self.fw.pad(44) - self.fw.write_short(1) # Unit multiplier. - self.fw.write_char(0) # Units, 0 = meters - self.fw.write_char(0) # texwhite on new faces 0 = false - self.fw.write_uint(0x80000000) # misc flags set to saving vertex normals - self.fw.pad(24) - self.fw.write_int(0) # projection type, 0 = flat earth - self.fw.pad(30) - self.fw.write_short(1) # double precision - self.fw.write_int(100) # database origin type - self.fw.pad(88) - try: - self.fw.write_double(self.header.scene.properties['FLT']['origin lat']) #database origin lattitude - except: - self.fw.write_double(0) - try: - self.fw.write_double(self.header.scene.properties['FLT']['origin lon']) #database origin longitude - except: - self.fw.write_double(0) - self.fw.pad(32) - self.fw.write_int(0) # ellipsoid model, 0 = WSG 1984 - - self.fw.pad(52) - - def write_vert_pal(self): - if options.verbose >= 2: - print 'Writing vertex palette.' - # Write record for vertex palette - self.fw.write_short(67) # Vertex palette opcode. - self.fw.write_short(8) # Length of record - self.fw.write_int(self.GRR.vertex_count() * 64 + 8) # Length of everything. - # Write records for individual vertices. - for i in xrange(self.GRR.vertex_count()): - desc = self.GRR.request_vertex_desc(i) - self.fw.write_short(70) # Vertex with color normal and uv opcode. - self.fw.write_ushort(64) # Length of record - self.fw.write_ushort(0) # Color name index - self.fw.write_short(1 << 14) # Frozen Normal - self.fw.write_double(desc.x) - self.fw.write_double(desc.y) - self.fw.write_double(desc.z) - self.fw.write_float(desc.nx) - self.fw.write_float(desc.ny) - self.fw.write_float(desc.nz) - self.fw.write_float(desc.u) - self.fw.write_float(desc.v) - self.fw.pad(4) - self.fw.write_uint(desc.cindex) - self.fw.pad(4) - - def write_tex_pal(self): - if options.verbose >= 2: - print 'Writing texture palette.' - # Write record for texture palette - for i, img in enumerate(self.GRR.texture_lst): - filename = tex_files[img.name] - self.fw.write_short(64) # Texture palette opcode. - self.fw.write_short(216) # Length of record - self.fw.write_string(filename, 200) # Filename - self.fw.write_int(i) # Texture index - self.fw.write_int(0) # X - self.fw.write_int(0) # Y - - def write_mat_pal(self): - if options.verbose >= 2: - print 'Writing material palette.' - for i in xrange(self.GRR.material_count()): - desc = self.GRR.request_material_desc(i) - self.fw.write_short(113) # Material palette opcode. - self.fw.write_short(84) # Length of record - self.fw.write_int(i) # Material index - self.fw.write_string(desc.name, 12) # Material name - self.fw.write_uint(0x80000000) # Flags - self.fw.write_float(desc.ambient[0]) # Ambient color. - self.fw.write_float(desc.ambient[1]) # Ambient color. - self.fw.write_float(desc.ambient[2]) # Ambient color. - self.fw.write_float(desc.diffuse[0]) # Diffuse color. - self.fw.write_float(desc.diffuse[1]) # Diffuse color. - self.fw.write_float(desc.diffuse[2]) # Diffuse color. - self.fw.write_float(desc.specular[0]) # Specular color. - self.fw.write_float(desc.specular[1]) # Specular color. - self.fw.write_float(desc.specular[2]) # Specular color. - self.fw.write_float(desc.emissive[0]) # Emissive color. - self.fw.write_float(desc.emissive[1]) # Emissive color. - self.fw.write_float(desc.emissive[2]) # Emissive color. - self.fw.write_float(desc.shininess) - self.fw.write_float(desc.alpha) - self.fw.write_int(0) # Reserved - - def write_col_pal(self): - if options.verbose >= 2: - print 'Writing color palette.' - self.fw.write_short(32) # Color palette opcode. - self.fw.write_short(4228) # Length of record - self.fw.pad(128) - try: - cpalette = self.scene.properties['FLT']['Color Palette'] - except: - cpalette = defaultp.pal - count = len(cpalette) - for i in xrange(count): - color = struct.unpack('>BBBB',struct.pack('>i',cpalette[i])) - self.fw.write_uchar(color[3]) # alpha - self.fw.write_uchar(color[2]) # b - self.fw.write_uchar(color[1]) # g - self.fw.write_uchar(color[0]) # r - self.fw.pad(max(4096-count*4, 0)) - - def write(self): - self.write_header() - self.write_vert_pal() - self.write_tex_pal() - self.write_mat_pal() - self.write_col_pal() - - self.write_push() - - for child in self.children: - child.write() - self.write_pop() - - def export_textures(self,texturepath): - for i in xrange(self.GRR.texture_count()): - texture = self.GRR.texture_lst[i] - - if options.state['copytex']: - filename = os.path.normpath(os.path.join(options.state['texturespath'], os.path.basename(self.GRR.request_texture_filename(i)))) - else: - filename = os.path.normpath(self.GRR.request_texture_filename(i)) - - tex_files[texture.name] = filename - - def blender_export(self): - Node.blender_export(self) - self.export_textures(self) - return self.xrefnames - def __init__(self, scene, fw): - self.fw = fw - self.opcode = 1 - self.childtypes = [73,14,2,63] - self.scene = scene - self.childhash = dict() - self.parenthash = dict() - self.child_objects = list() - self.mnodes = list() - self.xrefnames = list() - for i in self.scene.objects: - self.parenthash[i.name] = list() - self.childhash[i.name] = False - for i in self.scene.objects: - if i.parent: - self.childhash[i.parent.name] = True - self.parenthash[i.parent.name].append(i) - else: - self.child_objects.append(i) - - self.GRR = GlobalResourceRepository() - Node.__init__(self, None, self, None,None) - -def write_attribute_files(): - for imgname in tex_files: - blentex = Blender.Image.Get(imgname) - exportdict = FLT_Records['Image'].copy() - - if blentex.properties.has_key('FLT'): - for key in exportdict.keys(): - if blentex.properties.has_key(key): - exportdict[key] = blentex.properties['FLT'][key] - - # ClampX/Y override - if blentex.clampX: - exportdict['11i!WrapU'] = 1 - if blentex.clampY: - exportdict['12i!WrapV'] = 1 - - exportdict['16i!Enviorment'] = 0 - - # File type - typecode = 0 - try: - typestring = os.path.splitext(blentex.getFilename())[1] - - if typestring == '.rgba': - typecode = 5 - elif typestring == '.rgb': - typecode = 4 - elif typestring == '.inta': - typecode = 3 - elif typestring == '.int': - typecode = 2 - except: - pass - - exportdict['7i!File Format'] = typecode - - fw = FltOut(tex_files[imgname] + '.attr') - size = blentex.getSize() - fw.write_int(size[0]) - fw.write_int(size[1]) - for key in records['Image']: - (ftype,length,propname) = records['Image'][key] - write_prop(fw,ftype,exportdict[propname],length) - fw.close_file() - -#globals used by the scene export function -exportlevel = None -xrefsdone = None - -def dbexport_internal(scene): - global exportlevel - global xrefsdone - global options - - if exportlevel == 0 or not options.state['externalspath']: - fname = os.path.join(options.state['basepath'],scene.name + '.flt') - else: - fname = os.path.join(options.state['externalspath'],scene.name + '.flt') - - fw = FltOut(fname) - db = Database(scene,fw) - - if options.verbose >= 1: - print 'Pass 1: Exporting ', scene.name,'.flt from Blender.\n' - - xreflist = db.blender_export() - if options.verbose >= 1: - print 'Pass 2: Writing %s\n' % fname - db.write() - fw.close_file() - - if options.state['doxrefs']: - for xname in xreflist: - try: - xrefscene = Blender.Scene.Get(xname) - except: - xrefscene = None - if xrefscene and xname not in xrefsdone: - xrefsdone.append(xname) - exportlevel+=1 - dbexport_internal(xrefscene) - exportlevel-=1 - return fname -#main database export function -def dbexport(): - global exportlevel - global xrefsdone - exportlevel = 0 - xrefsdone = list() - - Blender.Window.WaitCursor(True) - time1 = Blender.sys.time() # Start timing - - if options.verbose >= 1: - print '\nOpenFlight Exporter' - print 'Version:', __version__ - print 'Author: Greg MacDonald, Geoffrey Bantle' - print __url__[2] - print - - fname = dbexport_internal(Blender.Scene.GetCurrent()) - if options.verbose >=1: - print 'Done in %.4f sec.\n' % (Blender.sys.time() - time1) - Blender.Window.WaitCursor(False) - - #optional: Copy textures - if options.state['copytex']: - for imgname in tex_files: - #Check to see if texture exists in target directory - if not os.path.exists(tex_files[imgname]): - #Get original Blender file name - origpath = Blender.sys.expandpath(Blender.Image.Get(imgname).getFilename()) - #copy original to new - if os.path.exists(origpath): - shutil.copyfile(origpath,tex_files[imgname]) - - #optional: Write attribute files - if options.state['attrib']: - write_attribute_files() - - if options.state['xapp']: - cmd= options.state['xappath'] + " " + fname - status = os.system(cmd) - - -#Begin UI code -FLTExport = None -FLTClose = None -FLTLabel = None - -FLTBaseLabel = None -FLTTextureLabel = None -FLTXRefLabel = None - -FLTBaseString = None -FLTTextureString = None -FLTXRefString = None - -FLTBasePath = None -FLTTexturePath = None -FLTXRefPath = None - -FLTShadeExport = None -FLTShadeDefault = None - -FLTCopyTex = None -FLTDoXRef = None -FLTGlobal = None - -FLTScale = None - -FLTXAPP = None -FLTXAPPath = None -FLTXAPPString = None -FLTXAPPLabel = None -FLTXAPPChooser = None - -FLTAttrib = None - - -FLTWarn = None - -def setshadingangle(ID,val): - global options - options.state['shading_default'] = val -def setBpath(fname): - global options - options.state['basepath'] = os.path.dirname(fname) - #update xref and textures path too.... - if(os.path.exists(os.path.join(options.state['basepath'],'externals'))): - options.state['externalspath'] = os.path.join(options.state['basepath'],'externals') - if(os.path.exists(os.path.join(options.state['basepath'],'textures'))): - options.state['texturespath'] = os.path.join(options.state['basepath'],'textures') -def setexportscale(ID,val): - global options - options.state['scale'] = val - -def setTpath(fname): - global options - options.state['texturespath'] = os.path.dirname(fname) -def setXpath(fname): - global options - options.state['externalspath'] = os.path.dirname(fname) -def setXApath(fname): - global options - options.state['xappath'] = fname -def event(evt, val): - x = 1 -def but_event(evt): - global options - - global FLTExport - global FLTClose - global FLTLabel - - global FLTBaseLabel - global FLTTextureLabel - global FLTXRefLabel - - global FLTBaseString - global FLTTextureString - global FLTXRefString - - global FLTBasePath - global FLTTexturePath - global FLTXRefPath - - global FLTShadeExport - global FLTShadeDefault - - global FLTCopyTex - global FLTDoXRef - global FLTGlobal - - global FLTScale - - - global FLTXAPP - global FLTXAPPath - global FLTXAPPString - global FLTXAPPLabel - global FLTXAPPChooser - - global FLTAttrib - - global FLTWarn - - #choose base path for export - if evt == 4: - Blender.Window.FileSelector(setBpath, "DB Root", options.state['basepath']) - - #choose XREF path - if evt == 6: - Blender.Window.FileSelector(setXpath,"DB Externals",options.state['externalspath']) - - #choose texture path - if evt == 8: - Blender.Window.FileSelector(setTpath,"DB Textures",options.state['texturespath']) - - #export shading toggle - if evt == 9: - options.state['export_shading'] = FLTShadeExport.val - #export Textures - if evt == 11: - options.state['copytex']= FLTCopyTex.val - #export XRefs - if evt == 13: - options.state['doxrefs'] = FLTDoXRef.val - #export Transforms - if evt == 12: - options.state['transform'] = FLTGlobal.val - - if evt == 14: - options.state['xapp'] = FLTXAPP.val - if evt == 16: - Blender.Window.FileSelector(setXApath,"External Application",options.state['xappath']) - if evt == 20: - options.state['attrib'] = FLTAttrib.val - - #Export DB - if evt == 1: - try: - dbexport() - except Exception, inst: - import traceback - FLTWarn = Draw.PupBlock("Export Error", ["See console for output!"]) - traceback.print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback) - - #exit - if evt == 2: - Draw.Exit() - - options.write_state() - -from Blender.BGL import * -from Blender import Draw -def gui(): - - global options - - global FLTExport - global FLTClose - global FLTLabel - - global FLTBaseLabel - global FLTTextureLabel - global FLTXRefLabel - - global FLTBaseString - global FLTTextureString - global FLTXRefString - - global FLTBasePath - global FLTTexturePath - global FLTXRefPath - - global FLTShadeExport - global FLTShadeDefault - - global FLTCopyTex - global FLTDoXRef - global FLTGlobal - - global FLTScale - - global FLTXAPP - global FLTXAPPath - global FLTXAPPString - global FLTXAPPLabel - global FLTXAPPChooser - - global FLTAttrib - - glClearColor(0.880,0.890,0.730,1.0 ) - glClear(GL_COLOR_BUFFER_BIT) - - areas = Blender.Window.GetScreenInfo() - curarea = Blender.Window.GetAreaID() - curRect = None - - for area in areas: - if area['id'] == curarea: - curRect = area['vertices'] - break - - width = curRect[2] - curRect[0] - height = curRect[3] - curRect[1] - #draw from top to bottom.... - cx = 50 - #Draw Title Bar... - #glRasterPos2d(cx, curRect[3]-100) - #FLTLabel = Draw.Text("FLT Exporter V2.0",'large') - cy = height - 80 - - FLTBaseLabel = Draw.Label("Base Path:",cx,cy,100,20) - FLTBaseString = Draw.String("",3,cx+100,cy,300,20,options.state['basepath'],255,"Folder to export to") - FLTBaseChooser = Draw.PushButton("...",4,cx+400,cy,20,20,"Choose Folder") - - cy = cy-40 - - #externals path - FLTXRefLabel = Draw.Label("XRefs:",cx,cy,100,20) - FLTXRefString = Draw.String("",5,cx+100,cy,300,20,options.state['externalspath'],255,"Folder for external references") - FLTXRefChooser = Draw.PushButton("...",6,cx+400,cy,20,20,"Choose Folder") - cy = cy-40 - #Textures path - FLTTextureLabel = Draw.Label("Textures:",cx,cy,100,20) - FLTTextureString = Draw.String("",7,cx+100,cy,300,20,options.state['texturespath'],255,"Folder for texture files") - FLTTextureChooser = Draw.PushButton("...",8,cx+400,cy,20,20,"Choose Folder") - cy=cy-40 - #External application path - FLTXAPPLabel = Draw.Label("XApp:",cx,cy,100,20) - FLTXAPPString = Draw.String("",15,cx+100,cy,300,20,options.state['xappath'],255,"External application to launch when done") - FLTXAPPChooser = Draw.PushButton("...",16,cx+400, cy,20,20,"Choose Folder") - - cy = cy-60 - #Shading Options - FLTShadeExport = Draw.Toggle("Default Shading",9,cx,cy,100,20,options.state['export_shading'],"Turn on export of custom shading") - FLTShadDefault = Draw.Number("",10,cx + 120,cy,100,20,options.state['shading_default'],0.0,180.0,"Default shading angle for objects with no custom shading assigned",setshadingangle) - - cy = cy-40 - FLTScale = Draw.Number("Export Scale",14,cx,cy,220,20,options.state['scale'],0.0,100.0,"Export scaling factor",setexportscale) - - cy = cy-40 - #misc Options - FLTCopyTex = Draw.Toggle("Copy Textures",11,cx,cy,220,20,options.state['copytex'],"Copy textures to folder indicated above") - cy = cy-40 - FLTGlobal = Draw.Toggle("Export Transforms",12,cx,cy,220,20,options.state['transform'],"If unchecked, Global coordinates are used (recommended)") - cy = cy-40 - FLTDoXRef = Draw.Toggle("Export XRefs", 13,cx,cy,220,20,options.state['doxrefs'],"Export External references (only those below current scene!)") - cy = cy-40 - FLTXAPP = Draw.Toggle("Launch External App", 14, cx,cy,220,20,options.state['xapp'],"Launch External Application on export") - cy = cy-40 - FLTAttrib = Draw.Toggle("Write Attribute Files", 20, cx, cy, 220,20,options.state['attrib'], "Write Texture Attribute files") - #FLTXAPPATH = Draw.String("",15,cx,cy,300,20,options.xappath,255,"External application path") - - - #Draw export/close buttons - FLTExport = Draw.PushButton("Export",1,cx,20,100,20,"Export to FLT") - FLTClose = Draw.PushButton("Close", 2, cx+120,20,100,20,"Close window") - - -Draw.Register(gui,event,but_event) \ No newline at end of file diff --git a/release/scripts/flt_filewalker.py b/release/scripts/flt_filewalker.py deleted file mode 100644 index 4a9b86c45d2..00000000000 --- a/release/scripts/flt_filewalker.py +++ /dev/null @@ -1,286 +0,0 @@ -#!BPY - -# flt_filewalker.py is an utility module for OpenFlight IO scripts for blender. -# Copyright (C) 2005 Greg MacDonald -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - -__bpydoc__ ="""\ -File read/write module used by OpenFlight I/O and tool scripts. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. -""" - -import Blender -from struct import * -import re - -class FltIn: - def __init__(self, filename): - self.file = open(filename, 'rb') - self.position = 0 - self.next_position = 100000 - self.opcode = 0 - self.length = 0 - self.level = 0 - self.repeat = False # Repeat the last record. - - def begin_record(self): - if self.repeat == True: - self.repeat = False - else: - self.position += self.length - try: - self.file.seek(self.position) - input = self.file.read(4) - except: - print 'Parse Error!' - return False - - if not input: - self.close_file() - return False - - self.opcode = unpack('>h', input[:2])[0] - self.length = unpack('>H', input[-2:])[0] - - self.next_position = self.position + self.length - - return True - - def repeat_record(self): - self.repeat = True - - def get_opcode(self): - return self.opcode - - def get_level(self): - return self.level - - def up_level(self): - self.level += 1 - - def down_level(self): - self.level -= 1 - - def read_string(self, length): - s = '' - if self.file.tell() + length <= self.next_position: - start = self.file.tell() - for i in xrange(length): - char = self.file.read(1) - if char == '\x00': - break - s = s + char - - self.file.seek(start+length) -# else: -# print 'Warning: string truncated' - - return s - - def read_int(self): - if self.file.tell() + 4 <= self.next_position: - return unpack('>i', self.file.read(4))[0] - else: - #print 'Warning: int truncated' - return 0 - - def read_uint(self): - if self.file.tell() + 4 <= self.next_position: - return unpack('>I', self.file.read(4))[0] - else: - #print 'Warning: uint truncated' - return 0 - - def read_double(self): - if self.file.tell() + 8 <= self.next_position: - return unpack('>d', self.file.read(8))[0] - else: - #print 'Warning: double truncated' - return 0.0 - - def read_float(self): - if self.file.tell() + 4 <= self.next_position: - return unpack('>f', self.file.read(4))[0] - else: - #print 'Warning: float truncated' - return 0.0 - - def read_ushort(self): - if self.file.tell() + 2 <= self.next_position: - return unpack('>H', self.file.read(2))[0] - else: - #print 'Warning: ushort truncated' - return 0 - - def read_short(self): - if self.file.tell() + 2 <= self.next_position: - return unpack('>h', self.file.read(2))[0] - else: - #print 'Warning: short trunated' - return 0 - - def read_uchar(self): - if self.file.tell() + 1 <= self.next_position: - return unpack('>B', self.file.read(1))[0] - else: - #print 'Warning: uchar truncated' - return 0 - - def read_char(self): - if self.file.tell() + 1 <= self.next_position: - return unpack('>b', self.file.read(1))[0] - else: - #print 'Warning: char truncated' - return 0 - - def read_ahead(self, i): - if self.file.tell() + i <= self.next_position: - self.file.seek(i, 1) -# else: -# print 'Warning: attempt to seek past record' - - def get_length(self): - return self.length - - def close_file(self): - self.file.close() - -class FltOut: - # Length includes terminating null - def write_string(self, string, length): - if len(string) > length - 1: - str_len = length - 1 - else: - str_len = len(string) - - pad_len = length - str_len - - self.file.write(string[:str_len]) - - self.pad(pad_len) - - def write_int(self, a): - self.file.write( pack('>i', a) ) - - def write_uint(self, a): - self.file.write( pack('>I', a) ) - - def write_double(self, a): - self.file.write( pack('>d', a) ) - - def write_float(self, a): - self.file.write( pack('>f', a) ) - - def write_ushort(self, a): - self.file.write( pack('>H', a) ) - - def write_short(self, a): - self.file.write( pack('>h', a) ) - - def write_uchar(self, a): - self.file.write( pack('>B', a) ) - - def write_char(self, a): - self.file.write( pack('>b', a) ) - - def pad(self, reps): - for i in xrange(reps): - self.file.write('\x00') - - def close_file(self): - self.file.close() - - def __init__(self, filename): - self.file = open(filename, 'wb') - self.filename = filename - - -class FileFinder: - def add_file_to_search_path(self, filename): - dir = Blender.sys.dirname(filename) - if dir != None and dir != '': - self.search_dirs.append(dir) - - def strip_path(self, full_path): - # One of my flt files had a windows path with unix seperation. Basename - # returned the whole path + filename, which isn't expected. So my - # attempt to fix it is to replace all / or \ with the platform specific - # dir seperator. - # - # note: \\\\ is actually just one \ indirected twice, once for python - # then again for re.sub - if Blender.sys.sep == '\\': - full_path = re.sub('/', '\\\\', full_path) - elif Blender.sys.sep == '/': - full_path = re.sub('\\\\', '/', full_path) - - filename = Blender.sys.basename(full_path) - return filename - - def find(self, full_path): - if full_path == '': - return None - - # Seperate out the path. - dirname = Blender.sys.dirname(full_path) - - # Try it first. - if Blender.sys.exists(full_path): - if not dirname in self.search_dirs: - self.search_dirs.append(dirname) - return full_path - - # Maybe it's relative. - for path in self.search_dirs: - rel_full_path = Blender.sys.join(path, full_path) - if Blender.sys.exists(rel_full_path): - return rel_full_path - - # Search previous directories that have worked. - filename = self.strip_path(full_path) - for path in self.search_dirs: - t = Blender.sys.join(path, filename) - if Blender.sys.exists(t): - return t - - # Ask user where it is. - self.user_input = Blender.Draw.PupStrInput(filename + "? ", '', 100) - #self.user_input = None - if self.user_input != None: - t = Blender.sys.join(self.user_input, filename) - if Blender.sys.exists(t): - user_dirname = Blender.sys.dirname(t) - if not user_dirname in self.search_dirs: - self.search_dirs.append(user_dirname) - return t - - # Couldn't find it. - return None - - def __init__(self): - self.user_input = '' - self.current_file = '' - self.search_dirs = [] - - dir = Blender.Get('texturesdir') - if dir != None and dir != '': - self.search_dirs.append(dir) - - dir = Blender.sys.dirname(Blender.Get('filename')) - if dir != None and dir != '': - print dir - self.search_dirs.append(dir) - \ No newline at end of file diff --git a/release/scripts/flt_import.py b/release/scripts/flt_import.py deleted file mode 100644 index f8d31f7bb57..00000000000 --- a/release/scripts/flt_import.py +++ /dev/null @@ -1,2534 +0,0 @@ -#!BPY -""" Registration info for Blender menus: -Name: 'OpenFlight (.flt)...' -Blender: 245 -Group: 'Import' -Tip: 'Import OpenFlight (.flt)' -""" - - - -__author__ = "Greg MacDonald, Campbell Barton, Geoffrey Bantle" -__version__ = "2.0 11/21/07" -__url__ = ("blender", "blenderartists.org", "Author's homepage, http://sourceforge.net/projects/blight/") -__bpydoc__ = """\ -This script imports OpenFlight files into Blender. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. - -Feature overview and more availible at: -http://wiki.blender.org/index.php/Scripts/Manual/Import/openflight_fltss - -Note: This file is a grab-bag of old and new code. It needs some cleanup still. -""" - -# flt_import.py is an OpenFlight importer for blender. -# Copyright (C) 2005 Greg MacDonald, 2007 Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - -import Blender -import os -import BPyMesh -import BPyImage -import flt_filewalker -import flt_properties -import sys -reload(flt_properties) -from flt_properties import * - -#Globals. Should Clean these up and minimize their usage. - -typecodes = ['c','C','s','S','i','I','f','d','t'] -records = dict() - -FLTBaseLabel = None -FLTBaseString = None -FLTBaseChooser = None -FLTExport = None -FLTClose = None -FLTDoXRef = None -FLTScale = None -FLTShadeImport = None -FLTAttrib = None -FLTWarn = None - -Vector= Blender.Mathutils.Vector -FLOAT_TOLERANCE = 0.01 - -FF = flt_filewalker.FileFinder() -current_layer = 0x01 - -global_prefs = dict() -global_prefs['verbose']= 4 -global_prefs['get_texture'] = True -global_prefs['get_diffuse'] = True -global_prefs['get_specular'] = False -global_prefs['get_emissive'] = False -global_prefs['get_alpha'] = True -global_prefs['get_ambient'] = False -global_prefs['get_shininess'] = True -global_prefs['color_from_face'] = True -global_prefs['fltfile']= '' -global_prefs['smoothshading'] = 1 -global_prefs['doxrefs'] = 1 -global_prefs['scale'] = 1.0 -global_prefs['attrib'] = 0 -msg_once = False - -reg = Blender.Registry.GetKey('flt_import',1) -if reg: - for key in global_prefs: - if reg.has_key(key): - global_prefs[key] = reg[key] - - - -throw_back_opcodes = [2, 73, 4, 11, 96, 14, 91, 98, 63,111] # Opcodes that indicate its time to return control to parent. -do_not_report_opcodes = [76, 78, 79, 80, 81, 82, 94, 83, 33, 112, 101, 102, 97, 31, 103, 104, 117, 118, 120, 121, 124, 125] - -#Process FLT record definitions -for record in FLT_Records: - props = dict() - for prop in FLT_Records[record]: - position = '' - slice = 0 - (format,name) = prop.split('!') - for i in format: - if i not in typecodes: - position = position + i - slice = slice + 1 - else: - break - type = format[slice:] - length = type[1:] - if len(length) == 0: - length = 1 - else: - type = type[0] - length = int(length) - - props[int(position)] = (type,length,prop) - records[record] = props - -def col_to_gray(c): - return 0.3*c[0] + 0.59*c[1] + 0.11*c[2] -class MaterialDesc: - # Was going to use int(f*1000.0) instead of round(f,3), but for some reason - # round produces better results, as in less dups. - def make_key(self): - key = list() - if global_prefs['get_texture']: - if self.tex0: - key.append(self.tex0.getName()) - else: - key.append(None) - - if global_prefs['get_alpha']: - key.append(round(self.alpha, 3)) - else: - key.append(None) - - if global_prefs['get_shininess']: - key.append(round(self.shininess, 3)) - else: - key.append(None) - - if global_prefs['get_emissive']: - key.append(round(self.emissive, 3)) - else: - key.append(None) - - if global_prefs['get_ambient']: - key.append(round(self.ambient, 3)) - else: - key.append(None) - - if global_prefs['get_specular']: - for n in self.specular: - key.append(round(n, 3)) - else: - key.extend([None, None, None]) - - if global_prefs['get_diffuse']: - for n in self.diffuse: - key.append(round(n, 3)) - else: - key.extend([None, None, None]) - -# key.extend(self.face_props.values()) - - return tuple(key) - - def __init__(self): - self.name = 'Material' - # Colors, List of 3 floats. - self.diffuse = [1.0, 1.0, 1.0] - self.specular = [1.0, 1.0, 1.0] - - # Scalars - self.ambient = 0.0 # [0.0, 1.0] - self.emissive = 0.0 # [0.0, 1.0] - self.shininess = 0.5 # Range is [0.0, 2.0] - self.alpha = 1.0 # Range is [0.0, 1.0] - - self.tex0 = None - - # OpenFlight Face attributes - self.face_props = dict.fromkeys(['comment', 'ir color', 'priority', - 'draw type', 'texture white', 'template billboard', - 'smc', 'fid', 'ir material', 'lod generation control', - 'flags', 'light mode']) - -class VertexDesc: - def make_key(self): - return round(self.x, 6), round(self.y, 6), round(self.z, 6) - - def __init__(self): - - # Assign later, save memory, all verts have a loc - self.x = 0.0 - self.y = 0.0 - self.z = 0.0 - - - self.nx = 0.0 - self.ny = 0.0 - self.nz = 0.0 - - self.uv= Vector(0,0) - self.cindex = 127 #default/lowest - self.cnorm = False - -class LightPointAppDesc: - def make_key(self): - d = dict(self.props) - del d['id'] - del d['type'] - - if d['directionality'] != 0: # not omni - d['nx'] = 0.0 - d['ny'] = 0.0 - d['nz'] = 0.0 - - return tuple(d.values()) - - def __init__(self): - self.props = dict() - self.props.update({'type': 'LPA'}) - self.props.update({'id': 'ap'}) - # Attribs not found in inline lightpoint. - self.props.update({'visibility range': 0.0}) - self.props.update({'fade range ratio': 0.0}) - self.props.update({'fade in duration': 0.0}) - self.props.update({'fade out duration': 0.0}) - self.props.update({'LOD range ratio': 0.0}) - self.props.update({'LOD scale': 0.0}) - -class GlobalResourceRepository: - def request_lightpoint_app(self, desc, scene): - match = self.light_point_app.get(desc.make_key()) - - if match: - return match.getName() - else: - # Create empty and fill with properties. - name = desc.props['type'] + ': ' + desc.props['id'] - object = Blender.Object.New('Empty', name) - scene.objects.link(object) - object.Layers= current_layer - object.sel= 1 - - # Attach properties - for name, value in desc.props.iteritems(): - object.addProperty(name, value) - - self.light_point_app.update({desc.make_key(): object}) - - return object.getName() - - # Dont use request_vert - faster to make it from the vector direct. - """ - def request_vert(self, desc): - match = self.vert_dict.get(desc.make_key()) - - if match: - return match - else: - vert = Blender.Mathutils.Vector(desc.x, desc.y, desc.z) - ''' IGNORE_NORMALS - vert.no[0] = desc.nx - vert.no[1] = desc.ny - vert.no[2] = desc.nz - ''' - self.vert_dict.update({desc.make_key(): vert}) - return vert - """ - def request_mat(self, mat_desc): - match = self.mat_dict.get(mat_desc.make_key()) - if match: return match - - mat = Blender.Material.New(mat_desc.name) - - if mat_desc.tex0 != None: - mat.setTexture(0, mat_desc.tex0, Blender.Texture.TexCo.UV) - - mat.setAlpha(mat_desc.alpha) - mat.setSpec(mat_desc.shininess) - mat.setHardness(255) - mat.setEmit(mat_desc.emissive) - mat.setAmb(mat_desc.ambient) - mat.setSpecCol(mat_desc.specular) - mat.setRGBCol(mat_desc.diffuse) - - # Create a text object to store openflight face attribs until - # user properties can be set on materials. -# t = Blender.Text.New('FACE: ' + mat.getName()) -# -# for name, value in mat_desc.face_props.items(): -# t.write(name + '\n' + str(value) + '\n\n') - - self.mat_dict.update({mat_desc.make_key(): mat}) - - return mat - - def request_image(self, filename_with_path): - if not global_prefs['get_texture']: return None - return BPyImage.comprehensiveImageLoad(filename_with_path, global_prefs['fltfile']) # Use join in case of spaces - - def request_texture(self, image): - if not global_prefs['get_texture']: - return None - - tex = self.tex_dict.get(image.filename) - if tex: return tex - - tex = Blender.Texture.New(Blender.sys.basename(image.filename)) - tex.setImage(image) - tex.setType('Image') - self.tex_dict.update({image.filename: tex}) - return tex - - def __init__(self): - - #list of scenes xrefs belong to. - self.xrefs = dict() - # material - self.mat_dict = dict() - mat_lst = Blender.Material.Get() - for mat in mat_lst: - mat_desc = MaterialDesc() - mapto_lst = mat.getTextures() - if mapto_lst[0]: - mat_desc.tex0 = mapto_lst[0].tex - else: - mat_desc.tex0 = None - mat_desc.alpha = mat.getAlpha() - mat_desc.shininess = mat.getSpec() - mat_desc.emissive = mat.getEmit() - mat_desc.ambient = mat.getAmb() - mat_desc.specular = mat.getSpecCol() - mat_desc.diffuse = mat.getRGBCol() - - self.mat_dict.update({mat_desc.make_key(): mat}) - - # texture - self.tex_dict = dict() - tex_lst = Blender.Texture.Get() - - for tex in tex_lst: - img = tex.getImage() - # Only interested in textures with images. - if img: - self.tex_dict.update({img.filename: tex}) - - # vertex - # self.vert_dict = dict() - - # light point - self.light_point_app = dict() - -class Handler: - def in_throw_back_lst(self, opcode): - return opcode in self.throw_back_lst - - def handle(self, opcode): - return self.handler[opcode]() - - def handles(self, opcode): - return opcode in self.handler.iterkeys() - - def throws_back_all_unhandled(self): - return self.throw_back_unhandled - - def set_throw_back_lst(self, a): - self.throw_back_lst = a - - def set_throw_back_all_unhandled(self): - self.throw_back_unhandled = True - - def set_only_throw_back_specified(self): - self.throw_back_unhandled = False - - def set_handler(self, d): - self.handler = d - - def __init__(self): - # Dictionary of opcodes to handler methods. - self.handler = dict() - # Send all opcodes not handled to the parent node. - self.throw_back_unhandled = False - # If throw_back_unhandled is False then only throw back - # if the opcodes in throw_back are encountered. - self.throw_back_lst = list() - -class Node: - def blender_import(self): - if self.opcode in opcode_name and global_prefs['verbose'] >= 2: - for i in xrange(self.get_level()): - print ' ', - print opcode_name[self.opcode], - print '-', self.props['id'], - print '-', self.props['comment'], - - print - - for child in self.children: - child.blender_import() - -# Import comment. -# if self.props['comment'] != '': -# name = 'COMMENT: ' + self.props['id'] -# t = Blender.Text.New(name) -# t.write(self.props['comment']) -# self.props['comment'] = name - - # Always ignore extensions and anything in between them. - def parse_push_extension(self): - self.saved_handler = self.active_handler - self.active_handler = self.extension_handler - return True - - def parse_pop_extension(self): - self.active_handler = self.saved_handler - return True - - def parse_push(self): - self.header.fw.up_level() - # Ignore unknown children. - self.ignore_unhandled = True - # Don't do child records that might overwrite parent info. ex: longid - self.active_handler = self.child_handler - return True - - def parse_pop(self): - self.header.fw.down_level() - - if self.header.fw.get_level() == self.level: - return False - - return True - - def parse(self): - while self.header.fw.begin_record(): - opcode = self.header.fw.get_opcode() - - # Print out info on opcode and tree level. - if global_prefs['verbose'] >= 3: - p = '' - for i in xrange(self.header.fw.get_level()): - p = p + ' ' - if opcode in opcode_name: - p = p + opcode_name[opcode] - else: - if global_prefs['verbose'] >= 1: - print 'undocumented opcode', opcode - continue - - if self.global_handler.handles(opcode): - if global_prefs['verbose'] >= 3: - print p + ' handled globally' - if self.global_handler.handle(opcode) == False: - break - - elif self.active_handler.handles(opcode): - if global_prefs['verbose'] >= 4: - print p + ' handled' - if self.active_handler.handle(opcode) == False: - break - - else: - if self.active_handler.throws_back_all_unhandled(): - if global_prefs['verbose'] >= 3: - print p + ' handled elsewhere' - self.header.fw.repeat_record() - break - - elif self.active_handler.in_throw_back_lst(opcode): - if global_prefs['verbose'] >= 3: - print p + ' handled elsewhere' - self.header.fw.repeat_record() - break - - else: - if global_prefs['verbose'] >= 3: - print p + ' ignored' - elif global_prefs['verbose'] >= 1 and not opcode in do_not_report_opcodes and opcode in opcode_name: - print 'not handled' - - def get_level(self): - return self.level - - def parse_long_id(self): - self.props['id'] = self.header.fw.read_string(self.header.fw.get_length()-4) - return True - - def parse_comment(self): - self.props['comment'] = self.header.fw.read_string(self.header.fw.get_length()-4) - return True - - def parse_extension(self): - extension = dict() - props = records[100] - propkeys = props.keys() - propkeys.sort() - for position in propkeys: - (type,length,name) = props[position] - extension[name] = read_prop(self.header.fw,type,length) - #read extension data. - dstring = list() - for i in xrange(self.header.fw.get_length()-24): - dstring.append(self.header.fw.read_char()) - extension['data'] = dstring - self.extension = extension - def parse_record(self): - self.props['type'] = self.opcode - props = records[self.opcode] - propkeys = props.keys() - propkeys.sort() - for position in propkeys: - (type,length,name) = props[position] - self.props[name] = read_prop(self.header.fw,type,length) - try: #remove me! - self.props['id'] = self.props['3t8!id'] - except: - pass - def __init__(self, parent, header): - self.root_handler = Handler() - self.child_handler = Handler() - self.extension_handler = Handler() - self.global_handler = Handler() - - self.global_handler.set_handler({21: self.parse_push_extension}) - self.active_handler = self.root_handler - - # used by parse_*_extension - self.extension_handler.set_handler({22: self.parse_pop_extension}) - self.saved_handler = None - - self.header = header - self.children = list() - - self.parent = parent - - if parent: - parent.children.append(self) - - self.level = self.header.fw.get_level() - self.opcode = self.header.fw.get_opcode() - - self.props = {'id': 'unnamed', 'comment': '', 'type': 'untyped'} - -class VertexPalette(Node): - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - self.root_handler.set_handler({68: self.parse_vertex_c, - 69: self.parse_vertex_cn, - 70: self.parse_vertex_cnuv, - 71: self.parse_vertex_cuv}) - self.root_handler.set_throw_back_all_unhandled() - - self.vert_desc_lst = list() - self.blender_verts = list() - self.offset = 8 - # Used to create a map from byte offset to vertex index. - self.index = dict() - - - def blender_import(self): - self.blender_verts.extend([Vector(vert_desc.x, vert_desc.y, vert_desc.z) for vert_desc in self.vert_desc_lst ]) - - def parse_vertex_common(self): - # Add this vertex to an offset to index dictionary. - #self.index_lst.append( (self.offset, self.next_index) ) - self.index[self.offset]= len(self.index) - - # Get ready for next record. - self.offset += self.header.fw.get_length() - - v = VertexDesc() - - self.header.fw.read_ahead(2) - v.flags = self.header.fw.read_short() - - v.x = self.header.fw.read_double() - v.y = self.header.fw.read_double() - v.z = self.header.fw.read_double() - - return v - - def parse_vertex_post_common(self, v): - #if not v.flags & 0x2000: # 0x2000 = no color - #if v.flags & 0x1000: # 0x1000 = packed color - # v.a = self.header.fw.read_uchar() - # v.b = self.header.fw.read_uchar() - # v.g = self.header.fw.read_uchar() - # v.r = self.header.fw.read_uchar() - #else: - self.header.fw.read_ahead(4) #skip packed color - v.cindex = self.header.fw.read_uint() - self.vert_desc_lst.append(v) - return True - - def parse_vertex_c(self): - v = self.parse_vertex_common() - - self.parse_vertex_post_common(v) - - return True - - def parse_vertex_cn(self): - v = self.parse_vertex_common() - v.cnorm = True - v.nx = self.header.fw.read_float() - v.ny = self.header.fw.read_float() - v.nz = self.header.fw.read_float() - - self.parse_vertex_post_common(v) - - return True - - def parse_vertex_cuv(self): - v = self.parse_vertex_common() - - v.uv[:] = self.header.fw.read_float(), self.header.fw.read_float() - - self.parse_vertex_post_common(v) - - return True - - def parse_vertex_cnuv(self): - v = self.parse_vertex_common() - v.cnorm = True - v.nx = self.header.fw.read_float() - v.ny = self.header.fw.read_float() - v.nz = self.header.fw.read_float() - - v.uv[:] = self.header.fw.read_float(), self.header.fw.read_float() - - self.parse_vertex_post_common(v) - - return True - - def parse(self): # Run once per import - Node.parse(self) - - -class InterNode(Node): - def __init__(self): - self.object = None - self.mesh = None - self.swapmesh = None - self.hasMesh = False - self.faceLs= [] - self.matrix = None - self.vis = True - self.hasmtex = False - self.uvlayers = dict() - self.blayernames = dict() - self.subfacelevel = 0 - self.extension = None - - mask = 2147483648 - for i in xrange(7): - self.uvlayers[mask] = False - mask = mask / 2 - - ####################################################### - ## Begin Remove Doubles Replacement ## - ####################################################### - def __xvertsort(self,__a,__b): - (__vert, __x1) = __a - (__vert2,__x2) = __b - - if __x1 > __x2: - return 1 - elif __x1 < __x2: - return -1 - return 0 - def __calcFaceNorm(self,__face): - if len(__face) == 3: - return Blender.Mathutils.TriangleNormal(__face[0].co, __face[1].co, __face[2].co) - elif len(__face) == 4: - return Blender.Mathutils.QuadNormal(__face[0].co, __face[1].co, __face[2].co, __face[3].co) - - def __replaceFaceVert(self,__weldface, __oldvert, __newvert): - __index = None - for __i, __v in enumerate(__weldface): - if __v == __oldvert: - __index = __i - break - __weldface[__index] = __newvert - - def __matchEdge(self,__weldmesh, __edge1, __edge2): - if __edge1[0] in __weldmesh['Vertex Disk'][__edge2[1]] and __edge1[1] in __weldmesh['Vertex Disk'][__edge2[0]]: - return True - return False - #have to compare original faces! - def __faceWinding(self, __weldmesh, __face1, __face2): - - __f1edges = list() - __f2edges = list() - - __f1edges.append((__face1.verts[0], __face1.verts[1])) - __f1edges.append((__face1.verts[1], __face1.verts[2])) - if len(__face1.verts) == 3: - __f1edges.append((__face1.verts[2], __face1.verts[0])) - else: - __f1edges.append((__face1.verts[2], __face1.verts[3])) - __f1edges.append((__face1.verts[3], __face1.verts[0])) - - __f2edges.append((__face2.verts[0], __face2.verts[1])) - __f2edges.append((__face2.verts[1], __face2.verts[2])) - if len(__face2.verts) == 3: - __f2edges.append((__face2.verts[2], __face2.verts[0])) - else: - __f2edges.append((__face2.verts[2], __face2.verts[3])) - __f2edges.append((__face2.verts[3], __face2.verts[0])) - - - #find a matching edge - for __edge1 in __f1edges: - for __edge2 in __f2edges: - if self.__matchEdge(__weldmesh, __edge1, __edge2): #no more tests nessecary - return True - - return False - - def __floatcompare(self, __f1, __f2): - epsilon = 0.1 - if ((__f1 + epsilon) > __f2) and ((__f1 - epsilon) < __f2): - return True - return False - def __testFace(self,__weldmesh,__v1face, __v2face, __v1bface, __v2bface): - limit = 0.01 - __matchvert = None - #frst test (for real this time!). Are the faces the same face? - if __v1face == __v2face: - return False - - #first test: Do the faces possibly geometrically share more than two vertices? we should be comparing original faces for this? - Yes..... - __match = 0 - for __vert in __v1bface.verts: - for __vert2 in __v2bface.verts: - #if (abs(__vert.co[0] - __vert2.co[0]) <= limit) and (abs(__vert.co[1] - __vert2.co[1]) <= limit) and (abs(__vert.co[2] - __vert2.co[2]) <= limit): #this needs to be fixed! - if __vert2 in __weldmesh['Vertex Disk'][__vert] or __vert == __vert2: - __match += 1 - __matchvert = __vert2 - #avoid faces sharing more than two verts - if __match > 2: - return False - - #consistent winding for face normals - if __match == 2: - if not self.__faceWinding(__weldmesh, __v1bface, __v2bface): - return False - - #second test: Compatible normals.Anything beyond almost exact opposite is 'ok' - __v1facenorm = self.__calcFaceNorm(__v1face) - __v2facenorm = self.__calcFaceNorm(__v2face) - - #dont even mess with zero length faces - if __v1facenorm.length < limit: - return False - if __v2facenorm.length < limit: - return False - - __v1facenorm.normalize() - __v2facenorm.normalize() - - if __match == 1: - #special case, look for comparison of normals angle - __angle = Blender.Mathutils.AngleBetweenVecs(__v1facenorm, __v2facenorm) - if __angle > 70.0: - return False - - - - __v2facenorm = __v2facenorm.negate() - - if self.__floatcompare(__v1facenorm[0], __v2facenorm[0]) and self.__floatcompare(__v1facenorm[1], __v2facenorm[1]) and self.__floatcompare(__v1facenorm[2], __v2facenorm[2]): - return False - - #next test: dont weld a subface to a non-subface! - if __v1bface.getProperty("FLT_SFLEVEL") != __v2bface.getProperty("FLT_SFLEVEL"): - return False - - #final test: edge test - We dont want to create a non-manifold edge through our weld operation - - return True - - def __copyFaceData(self, __source, __target): - #copy vcolor layers. - __actColLayer = self.mesh.activeColorLayer - for __colorlayer in self.mesh.getColorLayerNames(): - self.mesh.activeColorLayer = __colorlayer - for __i, __col in enumerate(__source.col): - __target.col[__i].r = __col.r - __target.col[__i].g = __col.g - __target.col[__i].b = __col.b - - self.mesh.activeColorLayer = __actColLayer - #copy uv layers. - __actUVLayer = self.mesh.activeUVLayer - for __uvlayer in self.mesh.getUVLayerNames(): - self.mesh.activeUVLayer = __uvlayer - __target.image = __source.image - __target.mode = __source.mode - __target.smooth = __source.smooth - __target.transp = __source.transp - for __i, __uv in enumerate(__source.uv): - __target.uv[__i][0] = __uv[0] - __target.uv[__i][1] = __uv[1] - - self.mesh.activeUVLayer = __actUVLayer - #copy property layers - for __property in self.mesh.faces.properties: - __target.setProperty(__property, __source.getProperty(__property)) - - def findDoubles(self): - limit = 0.01 - sortblock = list() - double = dict() - for vert in self.mesh.verts: - double[vert] = None - sortblock.append((vert, vert.co[0] + vert.co[1] + vert.co[2])) - sortblock.sort(self.__xvertsort) - - a = 0 - while a < len(self.mesh.verts): - (vert,xsort) = sortblock[a] - b = a+1 - if not double[vert]: - while b < len(self.mesh.verts): - (vert2, xsort2) = sortblock[b] - if not double[vert2]: - #first test, simple distance - if (xsort2 - xsort) > limit: - break - #second test, more expensive - if (abs(vert.co[0] - vert2.co[0]) <= limit) and (abs(vert.co[1] - vert2.co[1]) <= limit) and (abs(vert.co[2] - vert2.co[2]) <= limit): - double[vert2] = vert - b+=1 - a+=1 - - return double - - def buildWeldMesh(self): - - weldmesh = dict() - weldmesh['Vertex Disk'] = dict() #this is geometric adjacency - weldmesh['Vertex Faces'] = dict() #topological adjacency - - #find the doubles for this mesh - double = self.findDoubles() - - for vert in self.mesh.verts: - weldmesh['Vertex Faces'][vert] = list() - - #create weld faces - weldfaces = list() - originalfaces = list() - for face in self.mesh.faces: - weldface = list() - for vert in face.verts: - weldface.append(vert) - weldfaces.append(weldface) - originalfaces.append(face) - for i, weldface in enumerate(weldfaces): - for vert in weldface: - weldmesh['Vertex Faces'][vert].append(i) - weldmesh['Weld Faces'] = weldfaces - weldmesh['Original Faces'] = originalfaces - - #Now we need to build the vertex disk data. first we do just the 'target' vertices - for vert in self.mesh.verts: - if not double[vert]: #its a target - weldmesh['Vertex Disk'][vert] = list() - for vert in self.mesh.verts: - if double[vert]: #its a double - weldmesh['Vertex Disk'][double[vert]].append(vert) - - #Now we need to create the disk information for the remaining vertices - targets = weldmesh['Vertex Disk'].keys() - for target in targets: - for doublevert in weldmesh['Vertex Disk'][target]: - weldmesh['Vertex Disk'][doublevert] = [target] - for othervert in weldmesh['Vertex Disk'][target]: - if othervert != doublevert: - weldmesh['Vertex Disk'][doublevert].append(othervert) - - return weldmesh - - def weldFuseFaces(self,weldmesh): - - #retain original loose vertices - looseverts = dict() - for vert in self.mesh.verts: - looseverts[vert] = 0 - for edge in self.mesh.edges: - looseverts[edge.v1] += 1 - looseverts[edge.v2] += 1 - - - - #slight modification here: we need to walk around the mesh as many times as it takes to have no more matches - done = 0 - while not done: - done = 1 - for windex, weldface in enumerate(weldmesh['Weld Faces']): - for vertex in weldface: - #we walk around the faces of the doubles of this vertex and if possible, we weld them. - for doublevert in weldmesh['Vertex Disk'][vertex]: - removeFaces = list() #list of faces to remove from doubleverts face list - for doublefaceindex in weldmesh['Vertex Faces'][doublevert]: - doubleface = weldmesh['Weld Faces'][doublefaceindex] - oface1 = self.mesh.faces[windex] - oface2 = self.mesh.faces[doublefaceindex] - ok = self.__testFace(weldmesh, weldface, doubleface, oface1, oface2) - if ok: - done = 0 - removeFaces.append(doublefaceindex) - self.__replaceFaceVert(doubleface, doublevert, vertex) - for doublefaceindex in removeFaces: - weldmesh['Vertex Faces'][doublevert].remove(doublefaceindex) - #old faces first - oldindices = list() - for face in self.mesh.faces: - oldindices.append(face.index) - #make our new faces. - newfaces = list() - for weldface in weldmesh['Weld Faces']: - newfaces.append(weldface) - newindices = self.mesh.faces.extend(newfaces, indexList=True, ignoreDups=True) - #copy custom data over - for i, newindex in enumerate(newindices): - try: - self.__copyFaceData(self.mesh.faces[oldindices[i]], self.mesh.faces[newindex]) - except: - print "warning, could not copy face data!" - #delete the old faces - self.mesh.faces.delete(1, oldindices) - - #Clean up stray vertices - vertuse = dict() - for vert in self.mesh.verts: - vertuse[vert] = 0 - for face in self.mesh.faces: - for vert in face.verts: - vertuse[vert] += 1 - delverts = list() - for vert in self.mesh.verts: - if not vertuse[vert] and vert.index != 0 and looseverts[vert]: - delverts.append(vert) - - self.mesh.verts.delete(delverts) - - - ####################################################### - ## End Remove Doubles Replacement ## - ####################################################### - - def blender_import_my_faces(self): - - # Add the verts onto the mesh - blender_verts= self.header.vert_pal.blender_verts - vert_desc_lst= self.header.vert_pal.vert_desc_lst - - vert_list= [ i for flt_face in self.faceLs for i in flt_face.indices] #splitting faces apart. Is this a good thing? - face_edges= [] - face_verts= [] - self.mesh.verts.extend([blender_verts[i] for i in vert_list]) - - new_faces= [] - new_faces_props= [] - ngon= BPyMesh.ngon - vert_index= 1 - - #add vertex color layer for baked face colors. - self.mesh.addColorLayer("FLT_Fcol") - self.mesh.activeColorLayer = "FLT_Fcol" - - FLT_OrigIndex = 0 - for flt_face in self.faceLs: - if flt_face.tex_index != -1: - try: - image= self.header.tex_pal[flt_face.tex_index][1] - except KeyError: - image= None - else: - image= None - face_len= len(flt_face.indices) - - #create dummy uvert dicts - if len(flt_face.uverts) == 0: - for i in xrange(face_len): - flt_face.uverts.append(dict()) - #May need to patch up MTex info - if self.hasmtex: - #For every layer in mesh, there should be corresponding layer in the face - for mask in self.uvlayers.keys(): - if self.uvlayers[mask]: - if not flt_face.uvlayers.has_key(mask): #Does the face have this layer? - #Create Layer info for this face - flt_face.uvlayers[mask] = dict() - flt_face.uvlayers[mask]['texture index'] = -1 - flt_face.uvlayers[mask]['texture enviorment'] = 3 - flt_face.uvlayers[mask]['texture mapping'] = 0 - flt_face.uvlayers[mask]['texture data'] = 0 - - #now go through and create dummy uvs for this layer - for uvert in flt_face.uverts: - uv = Vector(0.0,0.0) - uvert[mask] = uv - - # Get the indicies in reference to the mesh. - uvs= [vert_desc_lst[j].uv for j in flt_face.indices] - if face_len == 1: - pass - elif face_len == 2: - face_edges.append((vert_index, vert_index+1)) - elif flt_face.props['draw type'] == 2 or flt_face.props['draw type'] == 3: - i = 0 - while i < (face_len-1): - face_edges.append((vert_index + i, vert_index + i + 1)) - i = i + 1 - if flt_face.props['draw type'] == 2: - face_edges.append((vert_index + i,vert_index)) - elif face_len == 3 or face_len == 4: # tri or quad - #if face_len == 1: - # pass - #if face_len == 2: - # face_edges.append((vert_index, vert_index+1)) - new_faces.append( [i+vert_index for i in xrange(face_len)] ) - new_faces_props.append((None, image, uvs, flt_face.uverts, flt_face.uvlayers, flt_face.color_index, flt_face.props,FLT_OrigIndex,0, flt_face.subfacelevel)) - - else: # fgon - mesh_face_indicies = [i+vert_index for i in xrange(face_len)] - tri_ngons= ngon(self.mesh, mesh_face_indicies) - if len(tri_ngons) != 1: - new_faces.extend([ [mesh_face_indicies[t] for t in tri] for tri in tri_ngons]) - new_faces_props.extend( [ (None, image, (uvs[tri[0]], uvs[tri[1]], uvs[tri[2]]), [flt_face.uverts[tri[0]], flt_face.uverts[tri[1]], flt_face.uverts[tri[2]]], flt_face.uvlayers, flt_face.color_index, flt_face.props,FLT_OrigIndex,1, flt_face.subfacelevel) for tri in tri_ngons ]) - - vert_index+= face_len - FLT_OrigIndex+=1 - - self.mesh.faces.extend(new_faces) - self.mesh.edges.extend(face_edges) - - #add in the FLT_ORIGINDEX layer - if len(self.mesh.faces): - try: self.mesh.faceUV= True - except: pass - - if self.mesh.faceUV == True: - self.mesh.renameUVLayer(self.mesh.activeUVLayer, 'Layer0') - - #create name layer for faces - self.mesh.faces.addPropertyLayer("FLT_ID",Blender.Mesh.PropertyTypes["STRING"]) - #create layer for face color indices - self.mesh.faces.addPropertyLayer("FLT_COL",Blender.Mesh.PropertyTypes["INT"]) - #create index layer for faces. This is needed by both FGONs and subfaces - self.mesh.faces.addPropertyLayer("FLT_ORIGINDEX",Blender.Mesh.PropertyTypes["INT"]) - #create temporary FGON flag layer. Delete after remove doubles - self.mesh.faces.addPropertyLayer("FLT_FGON",Blender.Mesh.PropertyTypes["INT"]) - self.mesh.faces.addPropertyLayer("FLT_SFLEVEL", Blender.Mesh.PropertyTypes["INT"]) - - for i, f in enumerate(self.mesh.faces): - props = new_faces_props[i] - if props[6]['template billboard'] > 0: - f.transp |= Blender.Mesh.FaceTranspModes["ALPHA"] - if props[6]['template billboard'] == 2: - f.mode |= Blender.Mesh.FaceModes["BILLBOARD"] - f.mode |= Blender.Mesh.FaceModes["LIGHT"] - if props[6]['draw type'] == 1: - f.mode |= Blender.Mesh.FaceModes["TWOSIDE"] - - #f.mat = props[0] - f.image = props[1] - f.uv = props[2] - #set vertex colors - color = self.header.get_color(props[5]) - if not color: - color = [255,255,255,255] - for mcol in f.col: - mcol.a = color[3] - mcol.r = color[0] - mcol.g = color[1] - mcol.b = color[2] - - f.setProperty("FLT_SFLEVEL", props[9]) - f.setProperty("FLT_ORIGINDEX",i) - f.setProperty("FLT_ID",props[6]['id']) - #if props[5] > 13199: - # print "Warning, invalid color index read in! Using default!" - # f.setProperty("FLT_COL",127) - #else: - if(1): #uh oh.... - value = struct.unpack('>i',struct.pack('>I',props[5]))[0] - f.setProperty("FLT_COL",value) - - #if props[8]: - # f.setProperty("FLT_FGON",1) - #else: - # f.setProperty("FLT_FGON",0) - - - #Create multitex layers, if present. - actuvlayer = self.mesh.activeUVLayer - if(self.hasmtex): - #For every multi-tex layer, we have to add a new UV layer to the mesh - for i,mask in enumerate(reversed(sorted(self.uvlayers))): - if self.uvlayers[mask]: - self.blayernames[mask] = "Layer" + str(i+1) - self.mesh.addUVLayer(self.blayernames[mask]) - - #Cycle through availible multi-tex layers and add face UVS - for mask in self.uvlayers: - if self.uvlayers[mask]: - self.mesh.activeUVLayer = self.blayernames[mask] - for j, f in enumerate(self.mesh.faces): - if props[6]['draw type'] == 1: - f.mode |= Blender.Mesh.FaceModes["TWOSIDE"] - f.transp |= Blender.Mesh.FaceTranspModes["ALPHA"] - f.mode |= Blender.Mesh.FaceModes["LIGHT"] - props = new_faces_props[j] - uvlayers = props[4] - if uvlayers.has_key(mask): #redundant - uverts = props[3] - for k, uv in enumerate(f.uv): - uv[0] = uverts[k][mask][0] - uv[1] = uverts[k][mask][1] - - uvlayer = uvlayers[mask] - tex_index = uvlayer['texture index'] - if tex_index != -1: - try: - f.image = self.header.tex_pal[tex_index][1] - except KeyError: - f.image = None - - if global_prefs['smoothshading'] == True and len(self.mesh.faces): - #We need to store per-face vertex normals in the faces as UV layers and delete them later. - self.mesh.addUVLayer("FLTNorm1") - self.mesh.addUVLayer("FLTNorm2") - self.mesh.activeUVLayer = "FLTNorm1" - for f in self.mesh.faces: - f.smooth = 1 - #grab the X and Y components of normal and store them in UV - for i, uv in enumerate(f.uv): - vert = f.v[i].index - vert_desc = vert_desc_lst[vert_list[vert-1]] - if vert_desc.cnorm: - uv[0] = vert_desc.nx - uv[1] = vert_desc.ny - else: - uv[0] = 0.0 - uv[1] = 0.0 - - #Now go through and populate the second UV Layer with the z component - self.mesh.activeUVLayer = "FLTNorm2" - for f in self.mesh.faces: - for i, uv in enumerate(f.uv): - vert = f.v[i].index - vert_desc = vert_desc_lst[vert_list[vert-1]] - if vert_desc.cnorm: - uv[0] = vert_desc.nz - uv[1] = 0.0 - else: - uv[0] = 0.0 - uv[1] = 0.0 - - - - #Finally, go through, remove dummy vertex, remove doubles and add edgesplit modifier. - Blender.Mesh.Mode(Blender.Mesh.SelectModes['VERTEX']) - self.mesh.sel= 1 - self.header.scene.update(1) #slow! - - #self.mesh.remDoubles(0.0001) - weldmesh = self.buildWeldMesh() - welded = self.weldFuseFaces(weldmesh) - self.mesh.verts.delete(0) # remove the dummy vert - - edgeHash = dict() - - for edge in self.mesh.edges: - edgeHash[edge.key] = edge.index - - - if global_prefs['smoothshading'] == True and len(self.mesh.faces): - - #rip out the custom vertex normals from the mesh and place them in a face aligned list. Easier to compare this way. - facenorms = [] - self.mesh.activeUVLayer = "FLTNorm1" - for face in self.mesh.faces: - facenorm = [] - for uv in face.uv: - facenorm.append(Vector(uv[0],uv[1],0.0)) - facenorms.append(facenorm) - self.mesh.activeUVLayer = "FLTNorm2" - for i, face in enumerate(self.mesh.faces): - facenorm = facenorms[i] - for j, uv in enumerate(face.uv): - facenorm[j][2] = uv[0] - self.mesh.removeUVLayer("FLTNorm1") - self.mesh.removeUVLayer("FLTNorm2") - - #find hard edges - #store edge data for lookup by faces - #edgeHash = dict() - #for edge in self.mesh.edges: - # edgeHash[edge.key] = edge.index - - edgeNormHash = dict() - #make sure to align the edgenormals to key value! - for i, face in enumerate(self.mesh.faces): - - facenorm = facenorms[i] - faceEdges = [] - faceEdges.append((face.v[0].index,face.v[1].index,facenorm[0],facenorm[1],face.edge_keys[0])) - faceEdges.append((face.v[1].index,face.v[2].index,facenorm[1],facenorm[2],face.edge_keys[1])) - if len(face.v) == 3: - faceEdges.append((face.v[2].index,face.v[0].index,facenorm[2],facenorm[0],face.edge_keys[2])) - elif len(face.v) == 4: - faceEdges.append((face.v[2].index,face.v[3].index,facenorm[2],facenorm[3],face.edge_keys[2])) - faceEdges.append((face.v[3].index,face.v[0].index,facenorm[3],facenorm[0],face.edge_keys[3])) - - #check to see if edgeNormal has been placed in the edgeNormHash yet - #this is a redundant test, and should be optimized to not be called as often as it is. - for j, faceEdge in enumerate(faceEdges): - #the value we are looking for is (faceEdge[2],faceEdge[3]) - hashvalue = (faceEdge[2],faceEdge[3]) - if (faceEdge[0],faceEdge[1]) != faceEdge[4]: - hashvalue = (hashvalue[1],hashvalue[0]) - assert (faceEdge[1],faceEdge[0]) == faceEdge[4] - if edgeNormHash.has_key(faceEdge[4]): - #compare value in the hash, if different, mark as sharp - edgeNorm = edgeNormHash[faceEdge[4]] - if\ - abs(hashvalue[0][0] - edgeNorm[0][0]) > FLOAT_TOLERANCE or\ - abs(hashvalue[0][1] - edgeNorm[0][1]) > FLOAT_TOLERANCE or\ - abs(hashvalue[0][2] - edgeNorm[0][2]) > FLOAT_TOLERANCE or\ - abs(hashvalue[1][0] - edgeNorm[1][0]) > FLOAT_TOLERANCE or\ - abs(hashvalue[1][1] - edgeNorm[1][1]) > FLOAT_TOLERANCE or\ - abs(hashvalue[1][2] - edgeNorm[1][2]) > FLOAT_TOLERANCE: - edge = self.mesh.edges[edgeHash[faceEdge[4]]] - edge.flag |= Blender.Mesh.EdgeFlags.SHARP - - else: - edgeNormHash[faceEdge[4]] = hashvalue - - #add in edgesplit modifier - mod = self.object.modifiers.append(Blender.Modifier.Types.EDGESPLIT) - mod[Blender.Modifier.Settings.EDGESPLIT_FROM_SHARP] = True - mod[Blender.Modifier.Settings.EDGESPLIT_FROM_ANGLE] = False - - if(actuvlayer): - self.mesh.activeUVLayer = actuvlayer - - def blender_import(self): - if self.vis and self.parent.object: - self.vis = self.parent.vis - name = self.props['id'] - - - if self.hasMesh: - self.mesh = Blender.Mesh.New() - self.mesh.name = 'FLT_FaceList' - self.mesh.fakeUser = True - self.mesh.verts.extend( Vector()) #DUMMYVERT - self.object = self.header.scene.objects.new(self.mesh) - else: - self.object = self.header.scene.objects.new('Empty') - - self.object.name = name - self.header.group.objects.link(self.object) - - #id props import - self.object.properties['FLT'] = dict() - for key in self.props: - try: - self.object.properties['FLT'][key] = self.props[key] - except: #horrible... - pass - - - if self.extension: - self.object.properties['FLT']['EXT'] = dict() - for key in self.extension: - self.object.properties['FLT']['EXT'][key] = self.extension[key] - - if self.parent and self.parent.object and (self.header.scene == self.parent.header.scene): - self.parent.object.makeParent([self.object],1) - - if self.matrix: - self.object.setMatrix(self.matrix) - - if self.vis == False: - self.object.restrictDisplay = True - self.object.restrictRender = True - - else: #check for LOD children and set the proper flags - lodlist = list() - for child in self.children: - if child.props.has_key('type') and child.props['type'] == 73: - if child.props['6d!switch out'] != 0.0: - child.vis = False - #lodlist.append(child) - - #def LODmin(a,b): - # if a.props['5d!switch in'] < b.props['5d!switch in']: - # return a - # return b - - #min= None - #if len(lodlist) > 1: - # for lod in lodlist: - # lod.vis = False - # min = lodlist[0] - # for i in xrange(len(lodlist)): - # min= LODmin(min,lodlist[i]) - # min.vis = True - - - Node.blender_import(self) # Attach faces to self.faceLs - - if self.hasMesh: - # Add all my faces into the mesh at once - self.blender_import_my_faces() - - def parse_face(self): - child = Face(self, self.subfacelevel) - child.parse() - return True - - def parse_group(self): - child = Group(self) - child.parse() - return True - - def move_to_next_layer(self): - global current_layer - current_layer = current_layer << 1 - if current_layer > 0x80000: - current_layer = 1 - - def parse_lod(self): - child = LOD(self) - child.parse() - return True - - def parse_unhandled(self): - child = Unhandled(self) - child.parse() - return True - - def parse_object(self): - child = Object(self) - child.parse() - return True - - def parse_xref(self): - child = XRef(self) - child.parse() - return True - - def parse_dof(self): - child = DOF(self) - child.parse() - return True - - def parse_indexed_light_point(self): - child = IndexedLightPoint(self) - child.parse() - return True - - def parse_inline_light_point(self): - child = InlineLightPoint(self) - child.parse() - return True - - def parse_matrix(self): - m = list() - for i in xrange(4): - m.append([]) - for j in xrange(4): - f = self.header.fw.read_float() - m[i].append(f) - self.matrix = Blender.Mathutils.Matrix(m[0], m[1], m[2], m[3]) - - def parse_subpush(self): - self.parse_push() - self.subfacelevel+= 1 - return True - def parse_subpop(self): - self.parse_pop() - self.subfacelevel -= 1 - return True - - - -class Face(Node): - def __init__(self, parent,subfacelevel): - Node.__init__(self, parent, parent.header) - self.root_handler.set_handler({31: self.parse_comment, - 10: self.parse_push, - 52: self.parse_multitex}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({72: self.parse_vertex_list, - 10: self.parse_push, - 11: self.parse_pop, - 53: self.parse_uvlist}) - - if parent: - parent.hasMesh = True - - self.subfacelevel = subfacelevel - self.indices = list() # face verts here - self.uvlayers = dict() # MultiTexture layers keyed to layer bitmask. - self.uverts = list() # Vertex aligned list of dictionaries keyed to layer bitmask. - self.uvmask = 0 # Bitfield read from MTex record - - self.comment = '' - self.props = dict() - self.props['id'] = self.header.fw.read_string(8) - # Load face. - self.props['ir color'] = self.header.fw.read_int() - self.props['priority'] = self.header.fw.read_short() - self.props['draw type'] = self.header.fw.read_char() - self.props['texture white'] = self.header.fw.read_char() - self.header.fw.read_ahead(4) # color name indices - self.header.fw.read_ahead(1) # reserved - self.props['template billboard'] = self.header.fw.read_uchar() - self.detail_tex_index = self.header.fw.read_short() - self.tex_index = self.header.fw.read_short() - self.mat_index = self.header.fw.read_short() - self.props['smc'] = self.header.fw.read_short() - self.props['fid'] = self.header.fw.read_short() - self.props['ir material'] = self.header.fw.read_int() - self.alpha = 1.0 - float(self.header.fw.read_ushort()) / 65535.0 - self.props['lod generation control'] = self.header.fw.read_uchar() - self.header.fw.read_ahead(1) # line style index - self.props['flags'] = self.header.fw.read_int() - self.props['light mode'] = self.header.fw.read_uchar() - self.header.fw.read_ahead(7) - a = self.header.fw.read_uchar() - b = self.header.fw.read_uchar() - g = self.header.fw.read_uchar() - r = self.header.fw.read_uchar() - self.packed_color = [r, g, b, a] - a = self.header.fw.read_uchar() - b = self.header.fw.read_uchar() - g = self.header.fw.read_uchar() - r = self.header.fw.read_uchar() - self.alt_packed_color = [r, g, b, a] - self.tex_map_index = self.header.fw.read_short() - self.header.fw.read_ahead(2) - self.color_index = self.header.fw.read_uint() - self.alt_color_index = self.header.fw.read_uint() - #self.header.fw.read_ahead(2) - #self.shader_index = self.header.fw.read_short() - - def parse_comment(self): - self.comment = self.header.fw.read_string(self.header.fw.get_length()-4) - return True - - def blender_import(self): - vert_count = len(self.indices) - if vert_count < 1: - if global_prefs['verbose'] >= 2: - print 'Warning: Ignoring face with no vertices.' - return - - # Assign material and image - - self.parent.faceLs.append(self) - #need to store comment in mesh prop layer! - - # Store comment info in parent. - #if self.comment != '': - # if self.parent.props['comment'] != '': - # self.parent.props['comment'] += '\n\nFrom Face:\n' + self.comment - # else: - # self.parent.props['comment'] = self.comment - - if self.uvlayers: - #Make sure that the mesh knows about the layers that this face uses - self.parent.hasmtex = True - for mask in self.uvlayers.keys(): - self.parent.uvlayers[mask] = True - - def parse_vertex_list(self): - length = self.header.fw.get_length() - fw = self.header.fw - vert_pal = self.header.vert_pal - - count = (length-4)/4 - - # If this ever fails the chunk below does error checking - self.indices= [vert_pal.index[fw.read_int()] for i in xrange(count)] - ''' - for i in xrange(count): - byte_offset = fw.read_int() - if byte_offset in vert_pal.index: - index = vert_pal.index[byte_offset] - self.indices.append(index) - elif global_prefs['verbose'] >= 1: - print 'Warning: Unable to map byte offset %s' + \ - ' to vertex index.' % byte_offset - ''' - return True - - def parse_multitex(self): - #Parse MultiTex Record. - length = self.header.fw.get_length() - fw = self.header.fw - #num layers == (length - 8) / 4 - uvmask = fw.read_uint() - mask = 2147483648 - for i in xrange(7): - if mask & uvmask: - uvlayer = dict() - self.uvlayers[mask] = uvlayer - mask = mask / 2 - - #read in record for each individual layer. - for key in reversed(sorted(self.uvlayers)): - uvlayer = self.uvlayers[key] - uvlayer['texture index'] = fw.read_ushort() - uvlayer['texture enviorment'] = fw.read_ushort() - uvlayer['texture mapping'] = fw.read_ushort() - uvlayer['texture data'] = fw.read_ushort() - - self.uvmask = uvmask - - def parse_uvlist(self): - #for each uvlayer, add uv vertices - length = self.header.fw.get_length() - fw = self.header.fw - uvmask = fw.read_uint() - if uvmask != self.uvmask: #This should never happen! - fw.read_ahead(self.length - 4) #potentially unnessecary? - else: - #need to store in uvverts dictionary for each vertex. - totverts = len(self.indices) - for i in xrange(totverts): - uvert = dict() - for key in reversed(sorted(self.uvlayers)): - uv = Vector(0.0,0.0) - uv[0] = fw.read_float() - uv[1] = fw.read_float() - uvert[key] = uv - self.uverts.append(uvert) - -class Object(InterNode): - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - - self.root_handler.set_handler({33: self.parse_long_id, - 21: self.parse_push_extension, - 31: self.parse_comment, - 10: self.parse_push, - 49: self.parse_matrix}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({5: self.parse_face, - 19: self.parse_subpush, - 20: self.parse_subpop, - 111: self.parse_inline_light_point, - 10: self.parse_push, - 11: self.parse_pop}) - self.extension_handler.set_handler({22: self.parse_pop_extension, - 100: self.parse_extension}) - - self.extension = dict() - self.props = dict() - self.props['comment'] = '' - self.parse_record() - -class Group(InterNode): - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - - self.root_handler.set_handler({33: self.parse_long_id, - 31: self.parse_comment, - 10: self.parse_push, - 49: self.parse_matrix, - 21: self.parse_push_extension}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({5: self.parse_face, - 19: self.parse_subpush, - 20: self.parse_subpop, - 111: self.parse_inline_light_point, - 2: self.parse_group, - 73: self.parse_lod, - 4: self.parse_object, - 10: self.parse_push, - 11: self.parse_pop, - 96: self.parse_unhandled, - 14: self.parse_dof, - 91: self.parse_unhandled, - 98: self.parse_unhandled, - 63: self.parse_xref}) - - self.extension_handler.set_handler({22: self.parse_pop_extension, - 100: self.parse_extension}) - - self.props = dict.fromkeys(['type', 'id', 'comment', 'priority', 'flags', 'special1', - 'special2', 'significance', 'layer code', 'loop count', - 'loop duration', 'last frame duration']) - - self.props['comment'] = '' - self.parse_record() - - #self.props['type'] = str(self.opcode) + ':' + opcode_name[self.opcode] - #props = records[self.opcode] - #propkeys = props.keys() - #propkeys.sort() - #for position in propkeys: - # (type,length,name) = props[position] - # self.props[name] = read_prop(self.header.fw,type,length) - #self.props['id'] = self.props['3t8!id'] - -class DOF(InterNode): - def blender_import(self): - InterNode.blender_import(self) - - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - - self.root_handler.set_handler({33: self.parse_long_id, - 31: self.parse_comment, - 10: self.parse_push, - 49: self.parse_matrix, - 21: self.parse_push_extension}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({#130: self.parse_indexed_light_point, - 111: self.parse_inline_light_point, - 2: self.parse_group, - 73: self.parse_lod, - 4: self.parse_object, - 10: self.parse_push, - 11: self.parse_pop, - 96: self.parse_unhandled, - 14: self.parse_dof, - 91: self.parse_unhandled, - 98: self.parse_unhandled, - 63: self.parse_xref}) - self.extension_handler.set_handler({22: self.parse_pop_extension, - 100: self.parse_extension}) - self.props = dict() - self.props['comment'] = '' - self.parse_record() - - -class XRef(InterNode): - def parse(self): - if self.xref: - self.xref.parse() - Node.parse(self) - - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - - self.root_handler.set_handler({49: self.parse_matrix}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.props = dict() - self.props['comment'] = '' - self.parse_record() - - xref_filename = self.props['3t200!filename'] #I dont even think there is a reason to keep this around... - - if not os.path.isabs(xref_filename): - absname = os.path.join(os.path.dirname(self.header.filename), xref_filename) - else: - absname = xref_filename - - self.props['id'] = 'X: ' + Blender.sys.splitext(Blender.sys.basename(xref_filename))[0] #this is really wrong as well.... - - if global_prefs['doxrefs'] and os.path.exists(absname) and not self.header.grr.xrefs.has_key(xref_filename): - self.xref = Database(absname, self.header.grr, self) - self.header.grr.xrefs[xref_filename] = self.xref - else: - self.xref = None - - - def blender_import(self): - #name = self.props['type'] + ': ' + self.props['id'] - name = self.props['id'] - self.object = self.header.scene.objects.new('Empty') - self.object.name = name - self.object.enableDupGroup = True - self.header.group.objects.link(self.object) - - #for broken links its ok to leave this empty! they purely for visual purposes anyway..... - try: - self.object.DupGroup = self.header.grr.xrefs[self.props['3t200!filename']].group - except: - pass - - - - - if self.parent and self.parent.object: - self.parent.object.makeParent([self.object],1) - - if self.matrix: - self.object.setMatrix(self.matrix) - - - #id props import - self.object.properties['FLT'] = dict() - for key in self.props: - try: - self.object.properties['FLT'][key] = self.props[key] - except: #horrible... - pass - - self.object.Layer = current_layer - self.object.sel = 1 - - Node.blender_import(self) - - -class LOD(InterNode): - def blender_import(self): - #self.move_to_next_layer() - InterNode.blender_import(self) - #self.object.properties['FLT'] = self.props.copy() - - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - - self.root_handler.set_handler({33: self.parse_long_id, - 31: self.parse_comment, - 10: self.parse_push, - 49: self.parse_matrix, - 21: self.parse_push_extension}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({2: self.parse_group, - 111: self.parse_inline_light_point, - 73: self.parse_lod, - 4: self.parse_object, - 10: self.parse_push, - 11: self.parse_pop, - 96: self.parse_unhandled, # switch - 14: self.parse_dof, # DOF - 91: self.parse_unhandled, # sound - 98: self.parse_unhandled, # clip - 63: self.parse_xref}) - self.extension_handler.set_handler({22: self.parse_pop_extension, - 100: self.parse_extension}) - - - self.props = dict() - self.props['comment'] = '' - self.parse_record() - -class InlineLightPoint(InterNode): - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - self.root_handler.set_handler({33: self.parse_long_id, - 31: self.parse_comment, - 10: self.parse_push, - 21: self.parse_push_extension, - 49: self.parse_matrix}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({72: self.parse_vertex_list, - 10: self.parse_push, - 11: self.parse_pop}) - self.extension_handler.set_handler({22: self.parse_pop_extension, - 100: self.parse_extension}) - - self.indices = list() - self.props = dict() - self.props['comment'] = '' - self.parse_record() - - - def blender_import(self): - - - name = self.props['id'] - self.mesh= Blender.Mesh.New() - self.mesh.name = 'FLT_LP' - self.object = self.header.scene.objects.new(self.mesh) - self.object.name = name - #self.mesh.verts.extend(Vector() ) # DUMMYVERT - self.object.Layer = current_layer - self.object.sel= 1 - - self.object.properties['FLT'] = dict() - for key in self.props: - try: - self.object.properties['FLT'][key] = self.props[key] - except: #horrible... - pass - - if self.extension: - self.object.properties['FLT']['EXT'] = dict() - for key in self.extension: - self.object.properties['FLT']['EXT'][key] = self.extension[key] - - if self.parent and self.parent.object and self.header.scene == self.parent.header.scene: - self.parent.object.makeParent([self.object]) - - if self.matrix: - self.object.setMatrix(self.matrix) - - self.mesh.verts.extend([self.header.vert_pal.blender_verts[i] for i in self.indices]) - - #add color index information. - self.mesh.verts.addPropertyLayer("FLT_VCOL",Blender.Mesh.PropertyTypes["INT"]) - for i, vindex in enumerate(self.indices): - vdesc = self.header.vert_pal.vert_desc_lst[vindex] - v = self.mesh.verts[i] - v.setProperty("FLT_VCOL",vdesc.cindex) - #for i, v in enumerate(self.mesh.verts): - # vdesc = self.header.vert_pal.vert_desc_lst[i] - # v.setProperty("FLT_VCOL",vdesc.cindex) - self.mesh.update() - - def parse_vertex_list(self): - length = self.header.fw.get_length() - fw = self.header.fw - vert_pal = self.header.vert_pal - - count = (length-4)/4 - - # If this ever fails the chunk below does error checking - self.indices= [vert_pal.index[fw.read_int()] for i in xrange(count)] - - ''' - for i in xrange(count): - byte_offset = fw.read_int() - if byte_offset in vert_pal.index: - index = vert_pal.index[byte_offset] - self.indices.append(index) - elif global_prefs['verbose'] >= 1: - print 'Warning: Unable to map byte offset %s' + \ - ' to vertex index.' % byte_offset - ''' - - return True - - - -class IndexedLightPoint(InterNode): - # return dictionary: lp_app name => index list - def group_points(self, props): - - name_to_indices = {} - - for i in self.indices: - vert_desc = self.header.vert_pal.vert_desc_lst[i] - app_desc = LightPointAppDesc() - app_desc.props.update(props) - # add vertex normal and color - app_desc.props.update({'nx': vert_desc.nx}) - app_desc.props.update({'ny': vert_desc.ny}) - app_desc.props.update({'nz': vert_desc.nz}) - - app_desc.props.update({'r': vert_desc.r}) - app_desc.props.update({'g': vert_desc.g}) - app_desc.props.update({'b': vert_desc.b}) - app_desc.props.update({'a': vert_desc.a}) - - app_name = self.header.grr.request_lightpoint_app(app_desc, self.header.scene) - - if name_to_indices.get(app_name): - name_to_indices[app_name].append(i) - else: - name_to_indices.update({app_name: [i]}) - - return name_to_indices - - def blender_import(self): - name = self.props['type'] + ': ' + self.props['id'] - - name_to_indices = self.group_points(self.header.lightpoint_appearance_pal[self.index]) - - for app_name, indices in name_to_indices.iteritems(): - self.object = Blender.Object.New('Mesh', name) - self.mesh= Blender.Mesh.New() - self.mesh.verts.extend( Vector() ) # DUMMYVERT - self.object.link(self.mesh) - - if self.parent: - self.parent.object.makeParent([self.object]) - - for i in indices: - vert = self.header.vert_pal.blender_verts[i] - self.mesh.verts.append(vert) - - self.header.scene.objects.link(self.object) - - self.object.Layer = current_layer - - if self.matrix: - self.object.setMatrix(self.matrix) - - # Import comment. - if self.props['comment'] != '': - name = 'COMMENT: ' + self.props['id'] - t = Blender.Text.New(name) - t.write(self.props['comment']) - self.props['comment'] = name - - # Attach properties. - self.props.update({'appearance': app_name}) - for name, value in self.props.iteritems(): - self.object.addProperty(name, value) - - self.mesh.update() - - def parse_vertex_list(self): - length = self.header.fw.get_length() - fw = self.header.fw - vert_pal = self.header.vert_pal - - count = (length-4)/4 - - # If this ever fails the chunk below does error checking - self.indices= [vert_pal.index[fw.read_int()] for i in xrange(count)] - - ''' - for i in xrange(count): - byte_offset = fw.read_int() - if byte_offset in vert_pal.index: - index = vert_pal.index[byte_offset] - self.indices.append(index) - elif global_prefs['verbose'] >= 1: - print 'Warning: Unable to map byte offset %s' + \ - ' to vertex index.' % byte_offset - ''' - return True - - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - self.root_handler.set_handler({33: self.parse_long_id, - 31: self.parse_comment, - 10: self.parse_push, - 49: self.parse_matrix}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({72: self.parse_vertex_list, - 10: self.parse_push, - 11: self.parse_pop}) - - self.indices = list() - - self.props = dict.fromkeys(['id', 'type', 'comment', 'draw order', 'appearance']) - self.props['comment'] = '' - self.props['type'] = 'Light Point' - self.props['id'] = self.header.fw.read_string(8) - self.index = self.header.fw.read_int() - self.header.fw.read_ahead(4) # animation index - self.props['draw order'] = self.header.fw.read_int() - -class Unhandled(InterNode): - def __init__(self, parent): - Node.__init__(self, parent, parent.header) - InterNode.__init__(self) - - self.root_handler.set_handler({33: self.parse_long_id, - 31: self.parse_comment, - 10: self.parse_push, - 49: self.parse_matrix}) - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({2: self.parse_group, - 73: self.parse_lod, - 4: self.parse_object, - 10: self.parse_push, - 11: self.parse_pop, - 96: self.parse_unhandled, # switch - 14: self.parse_dof, # DOF - 91: self.parse_unhandled, # sound - 98: self.parse_unhandled, # clip - 63: self.parse_xref}) - - self.props['id'] = self.header.fw.read_string(8) - -class Database(InterNode): - def blender_import(self): - for key in self.tex_pal.keys(): - path_filename= FF.find(self.tex_pal[key][0]) - if path_filename != None: - img = self.grr.request_image(path_filename) - if img: - self.tex_pal[key][1] = img - elif global_prefs['verbose'] >= 1: - print 'Warning: Unable to find', self.tex_pal[key][0] - - self.scene.properties['FLT'] = dict() - for key in self.props: - try: - self.scene.properties['FLT'][key] = self.props[key] - except: #horrible... - pass - - self.scene.properties['FLT']['Main'] = 0 - self.scene.properties['FLT']['Filename'] = self.bname - - for child in self.children: - if child.props.has_key('type') and child.props['type'] == 73: - if child.props['6d!switch out'] != 0.0: - child.vis = False - - #import color palette - carray = list() - for color in self.col_pal: - carray.append(struct.unpack('>i',struct.pack('>BBBB',color[0],color[1],color[2],color[3]))[0]) - self.scene.properties['FLT']['Color Palette'] = carray - Node.blender_import(self) - - def parse_appearance_palette(self): - props = dict() - self.fw.read_ahead(4) # reserved - props.update({'id': self.fw.read_string(256)}) - index = self.fw.read_int() - props.update({'smc': self.fw.read_short()}) - props.update({'fid': self.fw.read_short()}) - props.update({'back color: a': self.fw.read_uchar()}) - props.update({'back color: b': self.fw.read_uchar()}) - props.update({'back color: g': self.fw.read_uchar()}) - props.update({'back color: r': self.fw.read_uchar()}) - props.update({'display mode': self.fw.read_int()}) - props.update({'intensity': self.fw.read_float()}) - props.update({'back intensity': self.fw.read_float()}) - props.update({'minimum defocus': self.fw.read_float()}) - props.update({'maximum defocus': self.fw.read_float()}) - props.update({'fading mode': self.fw.read_int()}) - props.update({'fog punch mode': self.fw.read_int()}) - props.update({'directional mode': self.fw.read_int()}) - props.update({'range mode': self.fw.read_int()}) - props.update({'min pixel size': self.fw.read_float()}) - props.update({'max pixel size': self.fw.read_float()}) - props.update({'actual size': self.fw.read_float()}) - props.update({'trans falloff pixel size': self.fw.read_float()}) - props.update({'trans falloff exponent': self.fw.read_float()}) - props.update({'trans falloff scalar': self.fw.read_float()}) - props.update({'trans falloff clamp': self.fw.read_float()}) - props.update({'fog scalar': self.fw.read_float()}) - props.update({'fog intensity': self.fw.read_float()}) - props.update({'size threshold': self.fw.read_float()}) - props.update({'directionality': self.fw.read_int()}) - props.update({'horizontal lobe angle': self.fw.read_float()}) - props.update({'vertical lobe angle': self.fw.read_float()}) - props.update({'lobe roll angle': self.fw.read_float()}) - props.update({'dir falloff exponent': self.fw.read_float()}) - props.update({'dir ambient intensity': self.fw.read_float()}) - props.update({'significance': self.fw.read_float()}) - props.update({'flags': self.fw.read_int()}) - props.update({'visibility range': self.fw.read_float()}) - props.update({'fade range ratio': self.fw.read_float()}) - props.update({'fade in duration': self.fw.read_float()}) - props.update({'fade out duration': self.fw.read_float()}) - props.update({'LOD range ratio': self.fw.read_float()}) - props.update({'LOD scale': self.fw.read_float()}) - - self.lightpoint_appearance_pal.update({index: props}) - - def parse_header(self): - self.props['type'] = 'Header' - self.props['comment'] = '' - self.props['id'] = self.fw.read_string(8) - self.props['version'] = self.fw.read_int() - self.fw.read_ahead(46) - self.props['units'] = self.fw.read_char() - self.props['set white'] = bool(self.fw.read_char()) - self.props['flags'] = self.fw.read_int() - self.fw.read_ahead(24) - self.props['projection type'] = self.fw.read_int() - self.fw.read_ahead(36) - self.props['sw x'] = self.fw.read_double() - self.props['sw y'] = self.fw.read_double() - self.props['dx'] = self.fw.read_double() - self.props['dy'] = self.fw.read_double() - self.fw.read_ahead(24) - self.props['sw lat'] = self.fw.read_double() - self.props['sw lon'] = self.fw.read_double() - self.props['ne lat'] = self.fw.read_double() - self.props['ne lon'] = self.fw.read_double() - self.props['origin lat'] = self.fw.read_double() - self.props['origin lon'] = self.fw.read_double() - self.props['lambert lat1'] = self.fw.read_double() - self.props['lambert lat2'] = self.fw.read_double() - self.fw.read_ahead(16) - self.props['ellipsoid model'] = self.fw.read_int() - self.fw.read_ahead(4) - self.props['utm zone'] = self.fw.read_short() - self.fw.read_ahead(6) - self.props['dz'] = self.fw.read_double() - self.props['radius'] = self.fw.read_double() - self.fw.read_ahead(8) - self.props['major axis'] = self.fw.read_double() - self.props['minor axis'] = self.fw.read_double() - - if global_prefs['verbose'] >= 1: - print 'OpenFlight Version:', float(self.props['version']) / 100.0 - print - - return True - - def parse_mat_palette(self): - mat_desc = MaterialDesc() - index = self.fw.read_int() - - name = self.fw.read_string(12) - if len(mat_desc.name) > 0: - mat_desc.name = name - - flag = self.fw.read_int() - # skip material if not used - if not flag & 0x80000000: - return True - - ambient_col = [self.fw.read_float(), self.fw.read_float(), self.fw.read_float()] - mat_desc.diffuse = [self.fw.read_float(), self.fw.read_float(), self.fw.read_float()] - mat_desc.specular = [self.fw.read_float(), self.fw.read_float(), self.fw.read_float()] - emissive_col = [self.fw.read_float(), self.fw.read_float(), self.fw.read_float()] - - mat_desc.shininess = self.fw.read_float() / 64.0 # [0.0, 128.0] => [0.0, 2.0] - mat_desc.alpha = self.fw.read_float() - - # Convert ambient and emissive colors into intensitities. - mat_desc.ambient = col_to_gray(ambient_col) - mat_desc.emissive = col_to_gray(emissive_col) - - self.mat_desc_pal_lst.append( (index, mat_desc) ) - - return True - - def get_color(self, color_index): - color = None - index = color_index / 128 - intensity = float(color_index - 128.0 * index) / 127.0 - - if index >= 0 and index <= 1023: - brightest = self.col_pal[index] - r = int(brightest[0] * intensity) - g = int(brightest[1] * intensity) - b = int(brightest[2] * intensity) - a = int(brightest[3]) - - color = [r, g, b, a] - - return color - - def parse_color_palette(self): - self.header.fw.read_ahead(128) - for i in xrange(1024): - a = self.header.fw.read_uchar() - b = self.header.fw.read_uchar() - g = self.header.fw.read_uchar() - r = self.header.fw.read_uchar() - self.col_pal.append((r, g, b, a)) - return True - - def parse_vertex_palette(self): - self.vert_pal = VertexPalette(self) - self.vert_pal.parse() - return True - - def parse_texture_palette(self): - name = self.fw.read_string(200) - index = self.fw.read_int() - self.tex_pal[index]= [name, None] - return True - - def read_attribute_files(self): - for tex in self.tex_pal.keys(): - [name,image] = self.tex_pal[tex] - basename = os.path.basename(name) - if(image): - basename = basename + ".attr" - dirname = os.path.dirname(Blender.sys.expandpath(image.getFilename())) #can't rely on original info stored in pallette since it might be relative link - newpath = os.path.join(dirname, basename) - if os.path.exists(newpath) and not image.properties.has_key('FLT'): - fw = flt_filewalker.FltIn(newpath) - fw.read_ahead(8) #We dont care what the attribute file says about x/y dimensions - image.properties['FLT']={} - - #need to steal code from parse records.... - props = records['Image'] - propkeys = props.keys() - propkeys.sort() - for position in propkeys: - (type,length,name) = props[position] - image.properties['FLT'][name] = read_prop(fw,type,length) - fw.close_file() - - #copy clamp settings - wrap = image.properties['FLT']['10i!Wrap'] - wrapu = image.properties['FLT']['11i!WrapU'] - wrapv = image.properties['FLT']['12i!WrapV'] - - if wrapu == 3 or wrapv == 3: - wrapuv = (wrap,wrap) - else: - wrapuv = (wrapu, wrapv) - image.clampX = wrapuv[0] - image.clampY = wrapuv[1] - - elif not os.path.exists(newpath): - print "Cannot read attribute file:" + newpath - - def __init__(self, filename, grr, parent=None): - if global_prefs['verbose'] >= 1: - print 'Parsing:', filename - print - - #check to see if filename is a relative path - #filename = os.path.abspath(filename) - - self.fw = flt_filewalker.FltIn(filename) - self.filename = filename - self.bname = os.path.splitext(os.path.basename(filename))[0] - self.grr = grr - - Node.__init__(self, parent, self) - InterNode.__init__(self) - - self.root_handler.set_handler({1: self.parse_header, - 67: self.parse_vertex_palette, - 33: self.parse_long_id, - 31: self.parse_comment, - 64: self.parse_texture_palette, - 32: self.parse_color_palette, - 113: self.parse_mat_palette, - 128: self.parse_appearance_palette, - 10: self.parse_push}) - if parent: - self.root_handler.set_throw_back_lst(throw_back_opcodes) - - self.child_handler.set_handler({#130: self.parse_indexed_light_point, - 111: self.parse_inline_light_point, - 2: self.parse_group, - 73: self.parse_lod, - 4: self.parse_object, - 10: self.parse_push, - 11: self.parse_pop, - 96: self.parse_unhandled, - 14: self.parse_dof, - 91: self.parse_unhandled, - 98: self.parse_unhandled, - 63: self.parse_xref}) - - self.scene = Blender.Scene.New(self.bname) - self.group = Blender.Group.New(self.bname) - - self.vert_pal = None - self.lightpoint_appearance_pal = dict() - self.tex_pal = dict() - #self.tex_pal_lst = list() - #self.bl_tex_pal = dict() - self.col_pal = list() - self.mat_desc_pal_lst = list() - self.mat_desc_pal = dict() - self.props = dict.fromkeys(['id', 'type', 'comment', 'version', 'units', 'set white', - 'flags', 'projection type', 'sw x', 'sw y', 'dx', 'dy', 'dz', 'sw lat', - 'sw lon', 'ne lat', 'ne lon', 'origin lat', 'origin lon', 'lambert lat1', - 'lambert lat2', 'ellipsoid model', 'utm zone', 'radius', 'major axis', 'minor axis']) - - -def clearparent(root,childhash): - for child in childhash[root]: - clearparent(child,childhash) - root.clrParent(2,0) - -def fixscale(root,childhash): - for child in childhash[root]: - fixscale(child,childhash) - location = Blender.Mathutils.Vector(root.getLocation('worldspace')) - if location[0] != 0.0 or location[1] != 0.0 or location[2] != 0.0: - #direction = Blender.Mathutils.Vector(0-location[0],0-location[1],0-location[2]) #reverse vector - smat = Blender.Mathutils.ScaleMatrix(global_prefs['scale'],4) - root.setLocation(location * smat) - #if its a mesh, we need to scale all of its vertices too - if root.type == 'Mesh': - smat = Blender.Mathutils.ScaleMatrix(global_prefs['scale'],4) - rmesh = root.getData(mesh=True) - for v in rmesh.verts: - v.co = v.co * smat - -def reparent(root,childhash,sce): - for child in childhash[root]: - reparent(child,childhash,sce) - - root.makeParent(childhash[root]) - sce.update(1) - -def update_scene(root,sdone): - for object in root.objects: - if object.DupGroup: - try: - child = Blender.Scene.Get(object.DupGroup.name) - except: - child = None - if child and child not in sdone: - update_scene(child,sdone) - root.makeCurrent() - #create a list of children for each object - childhash = dict() - for object in root.objects: - childhash[object] = list() - - for object in root.objects: - if object.parent: - childhash[object.parent].append(object) - - for object in root.objects: - if not object.parent: - #recursivley go through and clear all the children of their transformation, starting at deepest level first. - clearparent(object,childhash) - #now fix the location of everything - fixscale(object,childhash) - #now fix the parenting - reparent(object,childhash,root) - - for object in root.objects: - object.makeDisplayList() - root.update(1) - sdone.append(root) - - -def select_file(filename, grr): - if not Blender.sys.exists(filename): - msg = 'Error: File ' + filename + ' does not exist.' - Blender.Draw.PupMenu(msg) - return - - if not filename.lower().endswith('.flt'): - msg = 'Error: Not a flight file.' - Blender.Draw.PupMenu(msg) - print msg - print - return - - global_prefs['fltfile']= filename - global_prefs['verbose']= 1 - global_prefs['get_texture'] = True - global_prefs['get_diffuse'] = True - global_prefs['get_specular'] = False - global_prefs['get_emissive'] = False - global_prefs['get_alpha'] = True - global_prefs['get_ambient'] = False - global_prefs['get_shininess'] = True - global_prefs['color_from_face'] = True - global_prefs['log to blender'] = True - - - - Blender.Window.WaitCursor(True) - Blender.Window.EditMode(0) - - - FF.add_file_to_search_path(filename) - - if global_prefs['verbose'] >= 1: - print 'Pass 1: Loading.' - print - - load_time = Blender.sys.time() - db = Database(filename,grr) - db.parse() - load_time = Blender.sys.time() - load_time - - if global_prefs['verbose'] >= 1: - print - print 'Pass 2: Importing to Blender.' - print - - import_time = Blender.sys.time() - db.blender_import() - - if global_prefs['attrib']: - print "reading attribute files" - db.read_attribute_files() - - Blender.Window.ViewLayer(range(1,21)) - - update_scene(db.scene,[]) - import_time = Blender.sys.time() - import_time - if global_prefs['verbose'] >= 1: - print 'Done.' - print - print 'Time to parse file: %.3f seconds' % load_time - print 'Time to import to blender: %.3f seconds' % import_time - print 'Total time: %.3f seconds' % (load_time + import_time) - - Blender.Window.WaitCursor(False) - -def setimportscale(ID,val): - global global_prefs - global_prefs['scale'] = val -def setBpath(fname): - global_prefs['fltfile'] = fname - d = dict() - for key in global_prefs: - d[key] = global_prefs[key] - Blender.Registry.SetKey('flt_import', d, 1) - -def event(evt,val): - pass - -from Blender.BGL import * -from Blender import Draw - -def but_event(evt): - - global FLTBaseLabel - global FLTBaseString - global FLTBaseChooser - - global FLTExport - global FLTClose - - global FLTDoXRef - global FLTShadeImport - global FLTAttrib - - global FLTWarn - - #Import DB - if evt == 1: - if global_prefs['verbose'] >= 1: - print - print 'OpenFlight Importer' - print 'Version:', __version__ - print 'Author: Greg MacDonald, Campbell Barton, Geoffrey Bantle' - print __url__[2] - print - - GRR = GlobalResourceRepository() - - try: - select_file(global_prefs['fltfile'], GRR) - except: - import traceback - FLTWarn = Draw.PupBlock("Ixport Error", ["See console for output!"]) - traceback.print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback) - - #choose base path for export - if evt == 4: - Blender.Window.FileSelector(setBpath, "DB Root", global_prefs['fltfile']) - #Import custom shading? - if evt == 9: - global_prefs['smoothshading'] = FLTShadeImport.val - #Import Image attribute files - if evt == 10: - global_prefs['attrib'] = FLTAttrib.val - #export XRefs - if evt == 13: - global_prefs['doxrefs'] = FLTDoXRef.val - - if evt == 2: - Draw.Exit() - - d = dict() - for key in global_prefs: - d[key] = global_prefs[key] - Blender.Registry.SetKey('flt_import', d, 1) - -def gui(): - - global FLTBaseLabel - global FLTBaseString - global FLTBaseChooser - - global FLTExport - global FLTClose - - global FLTDoXRef - global FLTShadeImport - - global FLTAttrib - - - glClearColor(0.772,0.832,0.847,1.0) - glClear(GL_COLOR_BUFFER_BIT) - - areas = Blender.Window.GetScreenInfo() - curarea = Blender.Window.GetAreaID() - curRect = None - - for area in areas: - if area['id'] == curarea: - curRect = area['vertices'] - break - - width = curRect[2] - curRect[0] - height = curRect[3] - curRect[1] - cx = 50 - cy = height - 80 - - FLTBaseLabel = Draw.Label("Base file:",cx,cy,100,20) - FLTBaseString = Draw.String("",3,cx+100,cy,300,20,global_prefs['fltfile'],255,"Root DB file") - FLTBaseChooser = Draw.PushButton("...",4,cx+400,cy,20,20,"Choose Folder") - - cy = cy-40 - FLTScale = Draw.Number("Import Scale",14,cx,cy,220,20,global_prefs['scale'],0.0,100.0,"Export scaleing factor",setimportscale) - - cy = cy-40 - FLTDoXRef = Draw.Toggle("Import XRefs", 13,cx,cy,220,20,global_prefs['doxrefs'],"Import External references") - - cy = cy-40 - FLTShadeImport = Draw.Toggle("Import Custom Shading",9,cx,cy,220,20,global_prefs['smoothshading'],"Import custom shading via edgesplit modifiers") - - cy = cy-40 - FLTAttrib = Draw.Toggle("Import Attribute Files", 10,cx,cy,220,20,global_prefs['attrib'],"Import Image Attribute files") - - cy = cy - 40 - FLTExport = Draw.PushButton("Import",1,cx,20,100,20,"Import FLT Database") - FLTClose = Draw.PushButton("Close",2,cx+120,20,100,20,"Close Window") - - - -Draw.Register(gui,event,but_event) \ No newline at end of file diff --git a/release/scripts/flt_lodedit.py b/release/scripts/flt_lodedit.py deleted file mode 100644 index 58319b9e525..00000000000 --- a/release/scripts/flt_lodedit.py +++ /dev/null @@ -1,502 +0,0 @@ -#!BPY - -""" -Name: 'FLT LOD Editor' -Blender: 240 -Group: 'Misc' -Tooltip: 'Level of Detail Edtior for FLT nodes' -""" - -__author__ = "Geoffrey Bantle" -__version__ = "1.0 11/21/07" -__email__ = ('scripts', 'Author, ') -__url__ = ('blender', 'blenderartists.org') - -__bpydoc__ ="""\ -This script provides tools for working with OpenFlight databases in Blender. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. - -Feature overview and more availible at: -http://wiki.blender.org/index.php/Scripts/Manual/FLTools -""" - -# -------------------------------------------------------------------------- -# flt_palettemanager.py version 0.1 2005/04/08 -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2007: Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender.Draw as Draw -from Blender.BGL import * -import Blender -import flt_properties -reload(flt_properties) -from flt_properties import * - -#event codes -evcode = { - "LOD_MAKE" : 100, - "LOD_DELETE" : 101, - "LOD_CALC_CENTER" : 102, - "LOD_GRAB_CENTER" : 103, - "LOD_X" : 104, - "LOD_Y" : 105, - "LOD_Z" : 106, - "LOD_FREEZE" : 107, - "LOD_SIG" : 108, - "LOD_IN" : 109, - "LOD_OUT" : 110, - "LOD_TRANS" : 111, - "LOD_PREVIOUS" : 112 -} - - -#system -LOD_MAKE = None #PushButton -LOD_DELETE = None #PushButton -LOD_CALC_CENTER = None #PushButton -LOD_GRAB_CENTER = None #Pushbutton -LOD_FREEZE = None #Toggle -LOD_PREVIOUS = None #Toggle - -LOD_X = None #Input -LOD_Y = None #Input -LOD_Z = None #Input - -LOD_SIG = None #Input -LOD_IN = None #Input -LOD_OUT = None #Input -LOD_TRANS = None #Input - -#labels -LOD_EDITLABEL = None -LOD_SWITCHLABEL = None -LOD_CENTERLABEL = None - -LOD_XLABEL = None -LOD_YLABEL = None -LOD_ZLABEL = None -LOD_SIGLABEL = None -LOD_INLABEL = None -LOD_OUTLABEL = None -LOD_TRANSLABEL = None - - -#ID Props -switch_in = '5d!switch in' -switch_out = '6d!switch out' -xco = '10d!X co' -yco = '11d!Y co' -zco = '12d!Z co' -trans = '13d!Transition' -sig_size = '14d!Sig Size' - -#Flags -lodflag = '9I!flags' -previous_mask = (1 << 31) -freeze_mask = (1 << 29) - -def update_state(): - state = dict() - state["activeScene"] = Blender.Scene.GetCurrent() - state["activeObject"] = state["activeScene"].objects.active - if state["activeObject"] and not state["activeObject"].sel: - state["activeObject"] = None - state["activeMesh"] = None - if state["activeObject"] and state["activeObject"].type == 'Mesh': - state["activeMesh"] = state["activeObject"].getData(mesh=True) - - state["activeFace"] = None - if state["activeMesh"]: - if state["activeMesh"].faceUV and state["activeMesh"].activeFace != None: - state["activeFace"] = state["activeMesh"].faces[state["activeMesh"].activeFace] - - - #update editmode - state["editmode"] = Blender.Window.EditMode() - - return state - -def idprops_append(object, typecode, props): - object.properties["FLT"] = dict() - object.properties["FLT"]['type'] = typecode - for prop in props: - object.properties["FLT"][prop] = props[prop] - object.properties["FLT"]['3t8!id'] = object.name - -def idprops_kill(): - state = update_state() - if state["activeObject"] and state["activeObject"].properties.has_key('FLT'): - state["activeObject"].properties.pop('FLT') - -def idprops_copy(source): - state = update_state() - if source.properties.has_key('FLT'): - for object in state["activeScene"].objects: - if object.sel and object != source and (state["activeScene"].Layers & object.Layers): - idprops_kill(object) - object.properties['FLT'] = dict() - for key in source.properties['FLT']: - object.properties['FLT'][key] = source.properties['FLT'][key] - -def select_by_typecode(typecode): - state = update_state() - - for object in state["activeScene"].objects: - if object.properties.has_key('FLT') and object.properties['FLT']['type'] == typecode and state["activeScene"].Layers & object.Layers: - object.select(1) - -def idprops_type(object, typecode): - if object.properties.has_key('FLT') and object.properties['FLT'].has_key('type') and object.properties['FLT']['type'] == typecode: - return True - return False - -#ui type code -def get_prop(typecode, prop): - - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], typecode): - props = state["activeObject"].properties['FLT'] - else: - props = flt_properties.FLTLOD - - return props[prop] - -def set_prop(typecode, prop, value): - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"],typecode): - state["activeObject"].properties['FLT'][prop] = value - - - -def get_lockmask(mask): - global lodflag - state = update_state() - if state["activeObject"]: - flag = get_prop(73,lodflag) - if flag & mask: - return True - return False - -def set_lockmask(mask): - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], 73): - oldvalue = state["activeObject"].properties['FLT'][lodflag] - oldvalue = struct.unpack('>I', struct.pack('>i', oldvalue))[0] - oldvalue |= mask - state["activeObject"].properties['FLT'][lodflag] = struct.unpack('>i', struct.pack(">I", oldvalue))[0] - -def clear_lockmask(mask): - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], 73): - oldvalue = state["activeObject"].properties['FLT'][lodflag] - oldvalue = struct.unpack('>I', struct.pack('>i', oldvalue))[0] - oldvalue &= ~mask - state["activeObject"].properties['FLT'][lodflag] = struct.unpack('>i',struct.pack('>I',oldvalue))[0] - -def findchildren(object): - state = update_state() - children = list() - for candidate in state["activeScene"].objects: - if candidate.parent == object: - children.append(candidate) - retlist = list(children) - for child in children: - retlist = retlist + findchildren(child) - return retlist - -def get_object_center(object): - bbox = object.getBoundBox(1) - average = Blender.Mathutils.Vector(0.0, 0.0, 0.0) - - for point in bbox: - average[0] += point[0] - average[1] += point[1] - average[2] += point[2] - - average[0] = average[0] / 8.0 - average[1] = average[1] / 8.0 - average[2] = average[2] / 8.0 - - return average - - -def calc_center(): - - global xco - global yco - global zco - - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], 73): - average = Blender.Mathutils.Vector(0.0, 0.0, 0.0) - children = findchildren(state["activeObject"]) #get children objects - if children: - for child in children: - center = get_object_center(child) - average[0] += center[0] - average[1] += center[1] - average[2] += center[2] - - average[0] = average[0] / len(children) - average[1] = average[1] / len(children) - average[2] = average[2] / len(children) - - set_prop(73, xco, average[0]) - set_prop(73, yco, average[1]) - set_prop(73, zco, average[2]) - - -def grab_center(): - - global xco - global yco - global zco - - state = update_state() - if state["activeObject"] and idprops_type(state["activeObject"], 73): - center = Blender.Window.GetCursorPos() - - set_prop(73, xco, center[0]) - set_prop(73, yco, center[1]) - set_prop(73, zco, center[2]) - - -def create_lod(): - state = update_state() - actobj = state["activeObject"] - if actobj and not idprops_type(actobj, 73): - idprops_kill() - idprops_append(actobj,73, flt_properties.FLTLOD) - calc_center() - - - -def event(evt,val): - if evt == Draw.ESCKEY: - Draw.Exit() - -def but_event(evt): - - global LOD_MAKE - global LOD_DELETE - global LOD_CALC_CENTER - global LOD_GRAB_CENTER - global LOD_FREEZE - global LOD_PREVIOUS - global LOD_X - global LOD_Y - global LOD_Z - global LOD_SIG - global LOD_IN - global LOD_OUT - global LOD_TRANS - - global switch_in - global switch_out - global xco - global yco - global zco - global trans - global sig_size - - global lodflag - global previous_mask - global freeze_mask - - global evcode - - #do "system" events - if evt == evcode["LOD_MAKE"]: - create_lod() - - if evt == evcode["LOD_CALC_CENTER"]: - calc_center() - - if evt == evcode["LOD_DELETE"]: - idprops_kill() - - if evt == evcode["LOD_GRAB_CENTER"]: - grab_center() - - #do mask events - if evt == evcode["LOD_FREEZE"]: - if LOD_FREEZE.val == True: - set_lockmask(freeze_mask) - else: - clear_lockmask(freeze_mask) - - if evt == evcode["LOD_PREVIOUS"]: - if LOD_PREVIOUS.val == True: - set_lockmask(previous_mask) - else: - clear_lockmask(previous_mask) - - #do input events - if evt == evcode["LOD_X"]: - set_prop(73, xco, LOD_X.val) - if evt == evcode["LOD_Y"]: - set_prop(73, yco, LOD_Y.val) - if evt == evcode["LOD_Z"]: - set_prop(73, zco, LOD_Z.val) - if evt == evcode["LOD_SIG"]: - set_prop(73, sig_size, LOD_SIG.val) - if evt == evcode["LOD_IN"]: - set_prop(73, switch_in, LOD_IN.val) - if evt == evcode["LOD_OUT"]: - set_prop(73, switch_out, LOD_OUT.val) - if evt == evcode["LOD_TRANS"]: - set_prop(73, trans, LOD_TRANS.val) - - - Draw.Redraw(1) - Blender.Window.RedrawAll() - -def draw_propsheet(x,y): - - global LOD_MAKE - global LOD_DELETE - global LOD_CALC_CENTER - global LOD_GRAB_CENTER - global LOD_FREEZE - global LOD_PREVIOUS - global LOD_X - global LOD_Y - global LOD_Z - global LOD_SIG - global LOD_IN - global LOD_OUT - global LOD_TRANS - - #labels - global LOD_EDITLABEL - global LOD_SWITCHLABEL - global LOD_CENTERLABEL - global LOD_XLABEL - global LOD_YLABEL - global LOD_ZLABEL - global LOD_SIGLABEL - global LOD_INLABEL - global LOD_OUTLABEL - global LOD_TRANSLABEL - - - global switch_in - global switch_out - global xco - global yco - global zco - global trans - global sig_size - - global lodflag - global previous_mask - global freeze_mask - - global evcode - - - global evcode - - state = update_state() - - label_width = 100 - row_height = 20 - toggle_width = 50 - input_width = 100 - pad = 10 - origx = x - origy = (row_height * 16) + (pad * 16) - - - #editor label - x = origx - y = origy - LOD_EDITLABEL = Blender.Draw.Label("FLT Level of Detail Editor", x, y, 250, row_height) - - - #Center inputs - x = origx - y = y- (row_height + pad) - LOD_CENTERLABEL = Blender.Draw.Label("LOD center", x, y, label_width, row_height) - y = y- (row_height + pad) - LOD_XLABEL = Blender.Draw.Label("X Coordinate", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_X = Blender.Draw.Number("", evcode["LOD_X"], x, y, input_width, row_height,get_prop(73,xco), -1000000.0, 1000000.0, "") - x = origx - y = y- (row_height + pad) - LOD_YLABEL = Blender.Draw.Label("Y Coordinate", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_Y = Blender.Draw.Number("", evcode["LOD_Y"], x, y, input_width, row_height,get_prop(73,yco), -1000000.0, 1000000.0, "") - x = origx - y = y- (row_height + pad) - LOD_ZLABEL = Blender.Draw.Label("Z Coordinate", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_Z = Blender.Draw.Number("", evcode["LOD_Z"], x, y, input_width, row_height,get_prop(73,zco), -1000000.0, 1000000.0, "") - - - #Switch inputs - x = origx - y = y- (row_height + pad) - LOD_SWITCHLABEL = Blender.Draw.Label("Switch Settings", x, y, input_width, row_height) - y = y- (row_height + pad) - LOD_SIGLABEL = Blender.Draw.Label("Significant Size", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_SIG = Blender.Draw.Number("", evcode["LOD_SIG"], x, y, input_width, row_height, get_prop(73,sig_size), -1000000.0, 1000000.0, "") - x = origx - y = y- (row_height + pad) - LOD_INLABEL = Blender.Draw.Label("Switch In", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_IN = Blender.Draw.Number("", evcode["LOD_IN"], x, y, input_width, row_height, get_prop(73,switch_in), -1000000.0, 1000000.0, "") - x = origx - y = y- (row_height + pad) - LOD_OUTLABEL = Blender.Draw.Label("Switch Out", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_OUT = Blender.Draw.Number("", evcode["LOD_OUT"], x, y, input_width, row_height, get_prop(73,switch_out), -1000000.0, 1000000.0, "") - x = origx - y = y- (row_height + pad) - LOD_TRANSLABEL = Blender.Draw.Label("Transition", x, y, label_width, row_height) - x = origx + (label_width + pad) - LOD_TRANS = Blender.Draw.Number("", evcode["LOD_TRANS"], x, y, input_width, row_height, get_prop(73,trans), -1000000.0, 1000000.0, "") - - - x = origx - y = y - (row_height + pad) - LOD_MAKE = Blender.Draw.PushButton("Make LOD", evcode["LOD_MAKE"], x, y, input_width + label_width + pad, row_height, "Make a LOD Node out of Active Object") - y = y - (row_height + pad) - LOD_DELETE = Blender.Draw.PushButton("Delete LOD", evcode["LOD_DELETE"], x, y, input_width + label_width + pad, row_height, "Delete the LOD Node properties") - y = y - (row_height + pad) - LOD_CALC_CENTER = Blender.Draw.PushButton("Calculate Center", evcode["LOD_CALC_CENTER"], x, y, input_width + label_width + pad, row_height, "Calculate the center of this LOD") - y = y - (row_height + pad) - LOD_GRAB_CENTER = Blender.Draw.PushButton("Grab Center", evcode["LOD_GRAB_CENTER"], x, y, input_width + label_width + pad, row_height, "Grab center from 3d cursor") - y = y - (row_height + pad) - LOD_FREEZE = Blender.Draw.Toggle("Freeze Center", evcode["LOD_FREEZE"], x, y, input_width + label_width + pad, row_height, get_lockmask(freeze_mask), "") - y = y - (row_height + pad) - LOD_PREVIOUS = Blender.Draw.Toggle("Previous Range", evcode["LOD_PREVIOUS"], x, y, input_width + label_width + pad, row_height, get_lockmask(previous_mask), "") - -def gui(): - #draw the propsheet/toolbox. - psheety = 800 - #psheetx = psheety + 10 - draw_propsheet(20,psheety) - -Draw.Register(gui,event,but_event) - \ No newline at end of file diff --git a/release/scripts/flt_palettemanager.py b/release/scripts/flt_palettemanager.py deleted file mode 100644 index c2f1380a6fa..00000000000 --- a/release/scripts/flt_palettemanager.py +++ /dev/null @@ -1,505 +0,0 @@ -#!BPY - -""" -Name: 'FLT Palette Manager' -Blender: 240 -Group: 'Misc' -Tooltip: 'Manage FLT colors' -""" - -__author__ = "Geoffrey Bantle" -__version__ = "1.0 11/21/2007" -__email__ = ('scripts', 'Author, ') -__url__ = ('blender', 'blenderartists.org') - -__bpydoc__ ="""\ - -This script manages colors in OpenFlight databases. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. - -Todo: --Figure out whats causing the PC speaker to beep when initializing... - -Feature overview and more availible at: -http://wiki.blender.org/index.php/Scripts/Manual/FLTools -""" - -# -------------------------------------------------------------------------- -# flt_palettemanager.py version 1.0 2005/04/08 -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2007: Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender.Draw as Draw -from Blender.BGL import * -import Blender -import flt_properties -import flt_defaultp as defaultp -from flt_properties import * - -def RGBtoHSV( r, g, b): - minc = min( r, g, b ) - maxc = max( r, g, b ) - v = maxc - - delta = maxc - minc - - if( max != 0 ): - s = delta / maxc - else: - s = 0 - h = -1 - return (h,s,v) - - if( r == maxc ): - h = ( g - b ) / delta - elif( g == maxc ): - h = 2 + ( b - r ) / delta - else: - h = 4 + ( r - g ) / delta - - h *= 60 - if( h < 0 ): - h += 360 - - return(h,s,v) - -def HSVtoRGB(h,s,v): - - if( s == 0 ): - return (v,v,v) - - - h /= 60 - i = math.floor( h) - f = h - i - p = v * ( 1 - s ) - q = v * ( 1 - s * f ) - t = v * ( 1 - s * ( 1 - f ) ) - - if i == 0: - r = v - g = t - b = p - elif i == 1: - r = q - g = v - b = p - - elif i== 2: - r = p - g = v - b = t - elif i==3: - r = p - g = q - b = v - elif i==4: - r = t - g = p - b = v - - else: - r = v - g = p - b = q - - return(r,g,b) - - -palette_size = 12 -palette_x = 0 -palette_y = 0 - -colors = list() -curint = 1.0 -curswatch = 0 -#make a default palette, not very useful. -cinc = 1.0 / 1024.0 -cstep = 0.0 -picker = None -ptt = "" - - -ts1=None -ts2=None -ts3=None -ts4=None -ts5=None - -for i in xrange(1024): - colors.append([cstep,cstep,cstep]) - cstep = cstep + cinc -def update_state(): - state = dict() - state["activeScene"] = Blender.Scene.getCurrent() - state["activeObject"] = state["activeScene"].getActiveObject() - state["activeMesh"] = None - if state["activeObject"] and state["activeObject"].type == 'Mesh': - state["activeMesh"] = state["activeObject"].getData(mesh=True) - - state["activeFace"] = None - if state["activeMesh"]: - if state["activeMesh"].faceUV and state["activeMesh"].activeFace != None: - state["activeFace"] = state["activeMesh"].faces[state["activeMesh"].activeFace] - - return state - -def pack_face_index(index, intensity): - return ((127*intensity)+(128*index)) -def unpack_face_index(face_index): - index = face_index / 128 - intensity = float(face_index - 128.0 * index) / 127.0 - return(index,intensity) - -def event(evt,val): - global palette_size - global palette_x - global palette_y - global colors - global curint - global curswatch - - areas = Blender.Window.GetScreenInfo() - curarea = Blender.Window.GetAreaID() - curRect = None - editmode = 0 - - for area in areas: - if area['id'] == curarea: - curRect = area['vertices'] - break - - if evt == Draw.LEFTMOUSE: - mval = Blender.Window.GetMouseCoords() - rastx = mval[0] - curRect[0] - rasty = mval[1] - curRect[1] - - swatchx = (rastx -palette_x) / palette_size #+state["palette_x"] - swatchy = (rasty -palette_y) / palette_size #+state["palette_y"] - if rastx > palette_x and rastx < (palette_x + palette_size * 32) and rasty > palette_y and rasty < (palette_y+ palette_size* 32): - if swatchx < 32 and swatchy < 32: - curswatch = (swatchx * 32) + swatchy - Draw.Redraw(1) - - elif swatchy < 34 and swatchx < 32: - curint = 1.0 - (float(rastx-palette_x)/(palette_size*32.0)) - Draw.Redraw(1) - - #copy current color and intensity to selected faces. - elif evt == Draw.VKEY: - - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - state = update_state() - - #retrieve color from palette - color = struct.unpack('>BBBB',struct.pack('>i',colors[curswatch])) - actmesh = state["activeMesh"] - if actmesh: - if(Blender.Window.GetKeyQualifiers() != Blender.Window.Qual["CTRL"]): - selfaces = list() - for face in actmesh.faces: - if face.sel: - selfaces.append(face) - - if not "FLT_COL" in actmesh.faces.properties: - actmesh.faces.addPropertyLayer("FLT_COL",Blender.Mesh.PropertyTypes["INT"]) - for face in actmesh.faces: - face.setProperty("FLT_COL",127) #default - try: - actmesh.activeColorLayer = "FLT_Fcol" - except: - actmesh.addColorLayer("FLT_Fcol") - actmesh.activeColorLayer = "FLT_Fcol" - - - for face in selfaces: - #First append packed index + color and store in face property - face.setProperty("FLT_COL",int(pack_face_index(curswatch,curint))) - #Save baked color to face vertex colors - for col in face.col: - col.r = int(color[0] * curint) - col.g = int(color[1] * curint) - col.b = int(color[2] * curint) - col.a = int(color[3] * curint) - else: - if Blender.Mesh.Mode() == Blender.Mesh.SelectModes['VERTEX']: - if not 'FLT_VCOL' in actmesh.verts.properties: - actmesh.verts.addPropertyLayer("FLT_VCOL",Blender.Mesh.PropertyTypes["INT"]) - for vert in actmesh.verts: - vert.setProperty("FLT_VCOL",127) - else: - for vert in actmesh.verts: - if vert.sel: - vert.setProperty("FLT_VCOL",int(pack_face_index(curswatch,curint))) - - if editmode: - Blender.Window.EditMode(1) - - Blender.Window.RedrawAll() - - #grab color and intensity from active face - elif evt == Draw.CKEY: - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - state = update_state() - - actmesh = state["activeMesh"] - activeFace = state["activeFace"] - - - if activeFace: - if not "FLT_COL" in actmesh.faces.properties: - actmesh.faces.addPropertyLayer("FLT_COL",Blender.Mesh.PropertyTypes["INT"]) - for face in actmesh.faces: - face.setProperty("FLT_COL",127) #default - try: - actmesh.activeColorLayer = "FLT_Fcol" - except: - actmesh.addColorLayer("FLT_Fcol") - actmesh.activeColorLayer = "FLT_Fcol" - tcol = activeFace.getProperty("FLT_COL") - (index,intensity) = unpack_face_index(tcol) - curswatch = index - curint = intensity - - if editmode: - Blender.Window.EditMode(1) - - Blender.Window.RedrawAll() - - elif evt == Draw.GKEY: - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode =1 - state = update_state() - - actmesh = state["activeMesh"] - activeFace = state["activeFace"] - - if activeFace and "FLT_COL" in actmesh.faces.properties: - (index,intensity) = unpack_face_index(activeFace.getProperty("FLT_COL")) - for face in actmesh.faces: - (index2, intensity2) = unpack_face_index(face.getProperty("FLT_COL")) - if index == index2: - face.sel = 1 - - - elif evt == Draw.ESCKEY: - Draw.Exit() - - if editmode: - Blender.Window.EditMode(1) - -def update_all(): - global colors - state = update_state() - #update the baked FLT colors for all meshes. - for object in state["activeScene"].objects: - if object.type == "Mesh": - mesh = object.getData(mesh=True) - if 'FLT_COL' in mesh.faces.properties and "FLT_Fcol" in mesh.getColorLayerNames(): - mesh.activeColorLayer = "FLT_Fcol" - for face in mesh.faces: - (index,intensity) = unpack_face_index(face.getProperty('FLT_COL')) - color = struct.unpack('>BBBB',struct.pack('>i',colors[index])) - #update the vertex colors for this face - for col in face.col: - col.r = int(color[0] * intensity) - col.g = int(color[1] * intensity) - col.b = int(color[2] * intensity) - col.a = 255 - - -def but_event(evt): - global palette_size - global palette_x - global palette_y - global colors - global curint - global curswatch - global picker - state = update_state() - - if evt == 1: - if picker.val: - rval = (int(picker.val[0]*255),int(picker.val[1]*255),int(picker.val[2]*255),255) - rval = struct.pack('>BBBB',rval[0],rval[1],rval[2],rval[3]) - rval = struct.unpack('>i',rval) - colors[curswatch] = rval[0] - #go cd through all meshes and update their FLT colors - update_all() - - Draw.Redraw(1) -def init_pal(): - global palette_size - global palette_x - global palette_y - global colors - global curint - global curswatch - - state = update_state() - - if not state["activeScene"].properties.has_key('FLT'): - state["activeScene"].properties['FLT'] = dict() - - try: - colors = state["activeScene"].properties['FLT']['Color Palette'] - except: - state["activeScene"].properties['FLT']['Color Palette'] = defaultp.pal - colors = state["activeScene"].properties['FLT']['Color Palette'] - -def draw_palette(): - global palette_size - global palette_x - global palette_y - global colors - global curint - global curswatch - global picker - global ts1 - global ts2 - global ts3 - global ts4 - global ts5 - - state = update_state() - init_pal() - - ssize = palette_size - xpos = palette_x - cid = 0 - - highlight = [(palette_x,palette_y),(palette_x+palette_size,palette_y),(palette_x+palette_size,palette_y+palette_size),(palette_x,palette_y+palette_size)] - for x in xrange(32): - ypos = palette_y - for y in xrange(32): - color = struct.unpack('>BBBB',struct.pack('>i',colors[cid])) - glColor3f(color[0]/255.0,color[1]/255.0,color[2]/255.0) - glBegin(GL_POLYGON) - glVertex2i(xpos,ypos) - glVertex2i(xpos+ssize,ypos) - glVertex2i(xpos+ssize,ypos+ssize) - glVertex2i(xpos,ypos+ssize) - glEnd() - - if curswatch == cid: - highlight[0] = (xpos,ypos) - highlight[1] = (xpos+ssize,ypos) - highlight[2] = (xpos+ssize,ypos+ssize) - highlight[3] = (xpos,ypos+ssize) - - glColor3f(0.0,0.0,0.0) - glBegin(GL_LINE_LOOP) - glVertex2i(xpos,ypos) - glVertex2i(xpos+ssize,ypos) - glVertex2i(xpos+ssize,ypos+ssize) - glVertex2i(xpos,ypos+ssize) - glVertex2i(xpos,ypos) - glEnd() - - - cid = cid + 1 - ypos = ypos + ssize - - xpos = xpos + ssize - - #draw intensity gradient - color = struct.unpack('>BBBB',struct.pack('>i',colors[curswatch])) - color = [color[0]/255.0,color[1]/255.0,color[2]/255.0] - colsteps = [color[0]/255.0,color[1]/255.0,color[2]/255.0] - stripwidth = (palette_size * 32.0) / 256 - strippad = palette_size / 2.0 - - xpos = palette_x - grady = (palette_y + (palette_size * 32.0)) + strippad - for x in xrange(256): - color[0] = color[0] - colsteps[0] - color[1] = color[1] - colsteps[1] - color[2] = color[2] - colsteps[2] - - glColor3f(color[0], color[1] ,color[2]) - glBegin(GL_POLYGON) - glVertex2f(xpos,grady) - glVertex2f(xpos+stripwidth,grady) - glVertex2f(xpos+stripwidth,grady+palette_size) - glVertex2f(xpos,grady+palette_size) - glEnd() - xpos = xpos + stripwidth - - #draw intensity slider bar - #xposition == 512 - ((curint) * 512) - xpos = ((palette_size*32) * (1.0 - curint)) + palette_x - glColor3f(1.0,1.0,1.0) - glBegin(GL_LINE_LOOP) - glVertex2i(int(xpos-6),int(grady-1)) - glVertex2i(int(xpos+6),int(grady-1)) - glVertex2i(int(xpos+6),int(grady+palette_size+1)) - glVertex2i(int(xpos-6),int(grady+palette_size+1)) - #glVertex2i(xpos-6,grady+7) - glEnd() - - #draw color picker - color = struct.unpack('>BBBB',struct.pack('>i',colors[curswatch])) - pickcol = (color[0]/255.0,color[1]/255.0,color[2]/255.0) - picker = Blender.Draw.ColorPicker(1,highlight[0][0]+1,highlight[0][1]+1,ssize-2,ssize-2,pickcol,ptt) - - #draw highlight swatch - glColor3f(1.0,1.0,1.0) - glBegin(GL_LINE_LOOP) - glVertex2i(highlight[0][0],highlight[0][1]) - glVertex2i(highlight[1][0],highlight[1][1]) - glVertex2i(highlight[2][0],highlight[2][1]) - glVertex2i(highlight[3][0],highlight[3][1]) - glVertex2i(highlight[0][0],highlight[0][1]) - glEnd() - - #draw text string explanations - xpos = palette_size*32+20 - ypos = palette_size*32+10 - glRasterPos2d(xpos,ypos) - ts1 = Blender.Draw.Text("FLT Palette Manager V 1.0") - ypos = ypos - 20 - glRasterPos2d(xpos,ypos) - ts3 = Blender.Draw.Text("CKEY - Copy Active Face Color*") - ypos = ypos - 20 - glRasterPos2d(xpos,ypos) - ts2 = Blender.Draw.Text("VKEY - Paste Color to Selected Faces") - ypos = ypos - 20 - glRasterPos2d(xpos,ypos) - ts4 = Blender.Draw.Text("GKEY - Select Faces With Same Color") - ypos = ypos - 15 - glRasterPos2d(xpos,ypos) - ts5 = Blender.Draw.Text("(*Requires mesh with UV coordinates)", 'small') - -def gui(): - glClearColor(0.5,0.5,0.5,1.0) - glClear(GL_COLOR_BUFFER_BIT) - draw_palette() - - -init_pal() -Draw.Register(gui,event,but_event) - \ No newline at end of file diff --git a/release/scripts/flt_properties.py b/release/scripts/flt_properties.py deleted file mode 100644 index b9d93b5f52d..00000000000 --- a/release/scripts/flt_properties.py +++ /dev/null @@ -1,630 +0,0 @@ -# flt_properties.py. For setting default OpenFLight ID property types -# Copyright (C) 2007 Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - -__bpydoc__ ="""\ -Utility functions and data defintions used by OpenFlight I/O and tool scripts. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. -""" - - -import struct - -bitsLSB = [2147483648] -for i in xrange(31): - bitsLSB.append(bitsLSB[-1]/2) -bitsRSB = bitsLSB[:] -bitsRSB.reverse() - -def pack_color(col): - return struct.pack('>B',col[3]) + struct.pack('>B',col[2]) + struct.pack('>B',col[1]) + struct.pack('>B',col[0]) - -def unpack_color(col): - string = struct.pack('>I', col) - r = struct.unpack('>B',string[3:4]) - g = struct.unpack('>B',string[2:3]) - b = struct.unpack('>B',string[1:2]) - a = struct.unpack('>B',string[0:1]) - return [r,g,b,a] - -def reverse_bits(len,num): - bitbucket = list() - rval = 0 - - for i in xrange(len): - if num & bitsRSB[i]: - bitbucket.append(1) - else: - bitbucket.append(0) - - bitbucket.reverse() - - for i, bit in enumerate(bitbucket): - if bit: - rval |= bitsLSB[i] - - return rval - - -opcode_name = { 0: 'db', - 1: 'head', - 2: 'grp', - 4: 'obj', - 5: 'face', - 10: 'push', - 11: 'pop', - 14: 'dof', - 19: 'push sub', - 20: 'pop sub', - 21: 'push ext', - 22: 'pop ext', - 23: 'cont', - 31: 'comment', - 32: 'color pal', - 33: 'long id', - 49: 'matrix', - 50: 'vector', - 52: 'multi-tex', - 53: 'uv lst', - 55: 'bsp', - 60: 'rep', - 61: 'inst ref', - 62: 'inst def', - 63: 'ext ref', - 64: 'tex pal', - 67: 'vert pal', - 68: 'vert w col', - 69: 'vert w col & norm', - 70: 'vert w col, norm & uv', - 71: 'vert w col & uv', - 72: 'vert lst', - 73: 'lod', - 74: 'bndin box', - 76: 'rot edge', - 78: 'trans', - 79: 'scl', - 80: 'rot pnt', - 81: 'rot and/or scale pnt', - 82: 'put', - 83: 'eyepoint & trackplane pal', - 84: 'mesh', - 85: 'local vert pool', - 86: 'mesh prim', - 87: 'road seg', - 88: 'road zone', - 89: 'morph vert lst', - 90: 'link pal', - 91: 'snd', - 92: 'rd path', - 93: 'snd pal', - 94: 'gen matrix', - 95: 'txt', - 96: 'sw', - 97: 'line styl pal', - 98: 'clip reg', - 100: 'ext', - 101: 'light src', - 102: 'light src pal', - 103: 'reserved', - 104: 'reserved', - 105: 'bndin sph', - 106: 'bndin cyl', - 107: 'bndin hull', - 108: 'bndin vol cntr', - 109: 'bndin vol orient', - 110: 'rsrvd', - 111: 'light pnt', - 112: 'tex map pal', - 113: 'mat pal', - 114: 'name tab', - 115: 'cat', - 116: 'cat dat', - 117: 'rsrvd', - 118: 'rsrvd', - 119: 'bounding hist', - 120: 'rsrvd', - 121: 'rsrvd', - 122: 'push attrib', - 123: 'pop attrib', - 124: 'rsrvd', - 125: 'rsrvd', - 126: 'curv', - 127: 'road const', - 128: 'light pnt appear pal', - 129: 'light pnt anim pal', - 130: 'indexed lp', - 131: 'lp sys', - 132: 'indx str', - 133: 'shdr pal'} - - -typecodes = ['c','C','s','S','i','I','f','d','t'] - -FLT_GRP = 2 -FLT_OBJ = 4 -FLT_LOD = 73 -FLT_XRF = 63 -FLT_DOF = 14 -FLT_ILP = 111 -FLT_DB = 1 -FLT_FCE = 5 - -#not actual opcodes -FLT_NUL = 0 -FLT_EXP = -1 - -#valid childtypes for each FLT node type -FLT_CHILDTYPES = { - FLT_GRP : [111,2,73,4,14,63], - FLT_OBJ : [111], - FLT_LOD : [111,2,73,4,14,63], - FLT_XRF : [], - FLT_DOF : [111,2,73,4,14,63], - FLT_ILP : [] -} - -#List of nodes that can have faces as children -FLT_FACETYPES = [ - FLT_GRP, - FLT_OBJ, - FLT_LOD, - FLT_DOF -] - -def write_prop(fw,type,value,length): - if type == 'c': - fw.write_char(value) - elif type == 'C': - fw.write_uchar(value) - elif type == 's': - fw.write_short(value) - elif type == 'S': - fw.write_ushort(value) - elif type == 'i': - fw.write_int(value) - elif type == 'I': - #NOTE!: - #there is no unsigned int type in python, but we can only store signed ints in ID props - newvalue = struct.unpack('>I', struct.pack('>i', value))[0] - fw.write_uint(newvalue) - elif type == 'd': - fw.write_double(value) - elif type == 'f': - fw.write_float(value) - elif type == 't': - fw.write_string(value,length) - -def read_prop(fw,type,length): - rval = None - if type == 'c': - rval = fw.read_char() - elif type == 'C': - rval = fw.read_uchar() - elif type == 's': - rval = fw.read_short() - elif type == 'S': - rval = fw.read_ushort() - elif type == 'i': - rval = fw.read_int() - elif type == 'I': - rval = fw.read_uint() - elif type == 'd': - rval = fw.read_double() - elif type == 'f': - rval = fw.read_float() - elif type == 't': - rval = fw.read_string(length) - return rval - - -FLTExt = { - '3t8!id' : 'Ext', - '4t8!sid' : '', - '5c!reserved': 0, - '6c!revision' : 0, - '7S!recordcode' : 0 -} -FLTGroup = { - '3t8!id' : 'G', - '4s!priority' : 0, - '5s!reserved1' : 0, - '6i!flags' : 0, - '7s!special1' : 0, - '8s!special2' : 0, - '9s!significance' : 0, - '10c!layer code' : 0, - '11c!reserved2' : 0, - '12i!reserved3' : 0, - '13i!loop count' : 0, - '14f!loop duration' : 0, - '15f!last frame duration' : 0 -} -FLTGroupDisplay = [5,11,12] - -FLTObject = { - '3t8!id' : 'O', - '4I!flags' : 0, - '5s!priority' : 0, - '6S!transp' : 0, - '7s!SFX1' : 0, - '8s!SFX2' : 0, - '9s!significance' : 0, - '10s!reserved' : 0 -} -FLTObjectDisplay = [10] - -FLTLOD = { - '3t8!id' : 'L', - '4i!reserved' : 0, - '5d!switch in' : 0.0, - '6d!switch out' : 0.0, - '7s!sfx ID1' : 0, - '8s!sfx ID2' : 0, - '9I!flags' : 0, - '10d!X co' : 0.0, - '11d!Y co' : 0.0, - '12d!Z co' : 0.0, - '13d!Transition' : 0.0, - '14d!Sig Size' : 0.0 -} -FLTLODDisplay = [4] - -FLTInlineLP = { - '3t8!id' : 'Lp', - '4s!smc' : 0, - '5s!fid' : 0, - '6C!back color: a' : 255, - '7C!back color: b' : 255, - '8C!back color: g' : 255, - '9C!back color: r' : 255, - '10i!display mode' : 0, - '11f!intensity' : 1.0, - '12f!back intensity' : 0.0, - '13f!minimum defocus' : 0.0, - '14f!maximum defocus' : 1.0, - '15i!fading mode' : 0, - '16i!fog punch mode' : 0, - '17i!directional mode' : 1, - '18i!range mode' : 0, - '19f!min pixel size' : 1.0, - '20f!max pixel size' : 1024, - '21f!actual size' : 0.25, - '22f!trans falloff pixel size' : 0.25, - '23f!trans falloff exponent' : 1.0, - '24f!trans falloff scalar' : 1.0, - '25f!trans falloff clamp' : 1.0, - '26f!fog scalar' : 0.25, - '27f!fog intensity' : 1.0, - '28f!size threshold' : 0.1, - '29i!directionality' : 0, - '30f!horizontal lobe angle' : 180.0, - '31f!vertical lobe angle' : 180.0, - '32f!lobe roll angle' : 0.0, - '33f!dir falloff exponent' : 1.0, - '34f!dir ambient intensity' : 0.1, - '35f!anim period' : 2, - '36f!anim phase' : 0, - '37f!anim enabled' : 1.0, - '38f!significance' : 0.0, - '39i!draw order' : 0, - '40I!flags' : 277004288, - '41f!roti' : 0, - '42f!rotj' : 0, - '43f!rotk' : 1.0 -} - -FLTInlineLPDisplay = [35,36,37,41,42,43] - -FLTXRef = { - '3t200!filename' : '', #we dont actually use this value on export - '4i!reserved' : 0, - '5I!flag' : -478150656, - '6s!bbox' : 0, - '7s!reserved' : 0 -} - -FLTXRefDisplay = [4,7,3] - -FLTDOF = { - '3t8!id' : 'D', - '4i!reserved' : 0, - '5d!ORIGX' : 0.0, - '6d!ORIGY' : 0.0, - '7d!ORIGZ' : 0.0, - '8d!XAXIS-X' : 10.0, - '9d!XAXIS-Y' : 0.0, - '10d!XAXIS-Z' : 0.0, - '11d!XYPLANE-X' : 0.0, - '12d!XYPLANE-Y' : 10.0, - '13d!XZPLANE-Z' : 0.0, - '14d!ZMIN' : 0.0, - '15d!ZMAX' : 0.0, - '16d!ZCUR' : 0.0, - '17d!ZSTEP' : 0.0, - '18d!YMIN' : 0.0, - '19d!YMAX' : 0.0, - '20d!YCUR' : 0.0, - '21d!YSTEP' : 0.0, - '22d!XMIN' : 0.0, - '23d!XMAX' : 0.0, - '24d!XCUR' : 0.0, - '25d!XSTEP' : 0.0, - '26d!PITCH-MIN' : 0.0, - '27d!PITCH-MAX' : 0.0, - '28d!PITCH-CUR' : 0.0, - '29d!PITCH-STEP' : 0.0, - '30d!ROLL-MIN' : 0.0, - '31d!ROLL-MAX' : 0.0, - '32d!ROLL-CUR' : 0.0, - '33d!ROLL-STEP' : 0.0, - '34d!YAW-MIN' : 0.0, - '35d!YAW-MAX' : 0.0, - '36d!YAW-CUR' : 0.0, - '37d!YAW-STEP' : 0.0, - '38d!ZSIZE-MIN' : 0.0, - '39d!ZSIZE-MAX' : 0.0, - '40d!ZSIZE-CUR' : 1.0, - '41d!ZSIZE-STEP' : 0.0, - '42d!YSIZE-MIN' : 0.0, - '43d!YSIZE-MAX' : 0.0, - '44d!YSIZE-CUR' : 1.0, - '45d!YSIZE-STEP' : 0.0, - '46d!XSIZE-MIN' : 0.0, - '47d!XSIZE-MAX' : 0.0, - '48d!XSIZE-CUR' : 1.0, - '49d!XSIZE-STEP' : 0.0, - '50I!FLAG' : 1897582, - '51i!reserved2' : 0 -} - -FLTDOFDisplay = [4] - -FLTImage = { - '3i!RealU Direction' : 0, - '4i!RealV Direction' : 0, - '5i!UpX' : 0, - '6i!UpY' : 0, - '7i!File Format' : 0, - '8i!Min Filter' : 6, - '9i!Mag Filter' : 1, - '10i!Wrap' : 0, - '11i!WrapU' : 0, - '12i!WrapV' : 0, - '13i!Modified' : 0, - '14i!PivotX' : 0, - '15i!PivotY' : 0, - '16i!Enviorment' : 0, - '17i!WhiteAlpha' : 0, - '18i!reserved1' : 0, - '19i!reserved2' : 0, - '20i!reserved3' : 0, - '21i!reserved4' : 0, - '22i!reserved5' : 0, - '23i!reserved6' : 0, - '24i!reserved7' : 0, - '25i!reserved8' : 0, - '26i!reserved9' : 0, - '27d!RealU Direction' : 0, - '28d!RealV Direction' : 0, - '29i!Origin' : 0, - '30i!Kernel no.' : 0, - '31i!Internal Format' : 0, - '32i!External Format' : 0, - '33i!MipMap Filter?' : 0, - '34f!MMF1' : 0.0, - '35f!MMF2' : 0.0, - '36f!MMF3' : 0.0, - '37f!MMF4' : 0.0, - '38f!MMF5' : 0.0, - '39f!MMF6' : 0.0, - '40f!MMF7' : 0.0, - '41f!MMF8' : 0.0, - '42i!Tex CPs?' : 0, - '43f!LOD0 CP' : 0.0, - '44f!Scale0 CP' : 0.0, - '45f!LOD1 CP' : 0.0, - '46f!Scale1 CP' : 0.0, - '47f!LOD2 CP' : 0.0, - '48f!Scale2 CP' : 0.0, - '49f!LOD3 CP' : 0.0, - '50f!Scale3 CP' : 0.0, - '51f!LOD4 CP' : 0.0, - '52f!Scale4 CP' : 0.0, - '53f!LOD5 CP' : 0.0, - '54f!Scale5 CP' : 0.0, - '55f!LOD6 CP' : 0.0, - '56f!Scale6 CP' : 0.0, - '57f!LOD7 CP' : 0.0, - '58f!Scale7 CP' : 0.0, - '59f!Control Clamp' : 0.0, - '60i!Mag Alpha Filter' : 0, - '61i!Mag Color Filter' : 0, - '62f!reserved10' : 0, - '63f!reserved11' : 0, - '64f!reserved12' : 0, - '65f!reserved13' : 0, - '66f!reserved14' : 0, - '67f!reserved15' : 0, - '68f!reserved16' : 0, - '69f!reserved17' : 0, - '70f!reserved18' : 0, - '71d!Lambert Central' : 0.0, - '72d!Lambert Upper' : 0.0, - '73d!Lambert Lower' : 0.0, - '74d!reserved19' : 0, - '75f!reserved20' : 0, - '76f!reserved21' : 0, - '77f!reserved22' : 0, - '78f!reserved23' : 0, - '79f!reserved24' : 0, - '80i!Tex Detail?' : 0, - '81i!Tex J' : 0, - '82i!Tex K' : 0, - '83i!Tex M' : 0, - '84i!Tex N' : 0, - '85i!Tex Scramble' : 0, - '86i!Tex Tile?' : 0, - '87f!Tex Tile LLU' : 0.0, - '88f!Tex Tile LLV' : 0.0, - '89f!Tex Tile URU' : 0.0, - '90f!Tex Tile URV' : 0.0, - '91i!Projection' : 0, - '92i!Earth Model' : 0, - '93i!reserved25' : 0, - '94i!UTM Zone' : 0, - '95i!Image Origin' : 0, - '96i!GPU' : 0, - '97i!reserved26' : 0, - '98i!reserved27' : 0, - '99i!GPU Hemi' : 0, - '100i!reserved41' : 0, - '101i!reserved42' : 0, - '102i!reserved43' : 0, - '103i!Cubemap' : 0, - '104t588!reserved44' : '', - '105t512!Comments' : '', - '106i!reserved28' : 0, - '107i!reserved29' : 0, - '108i!reserved30' : 0, - '109i!reserved31' : 0, - '110i!reserved32' : 0, - '111i!reserved33' : 0, - '112i!reserved34' : 0, - '113i!reserved35' : 0, - '114i!reserved36' : 0, - '115i!reserved37' : 0, - '116i!reserved38' : 0, - '117i!reserved39' : 0, - '118i!reserved40' : 0, - '119i!reserved45' : 0, - '120i!Format Version' : 0, - '121i!GPU num' : 0, -} - -FLTImageDisplay = [18,19,29,21,22,23,24,25,26,62,63,64,65,66,67,68,69,70,74,75,76,77,78,79,93,97,98,102,114] - -FLTHeader = { - '3t8!id' : 'db', - '4i!version' : 1620, - '5i!editversion' : 0, - '6t32!date' : 0, - '7s!NGID' : 0, - '8s!NLID' : 0, - '9s!NOID' : 0, - '10s!NFID' : 0, - '11s!UMULT' : 1, - '12c!units' : 0, - '13c!set white' : 0, - '14I!flags' : 0x80000000, - '15i!reserved1' : 0, - '16i!reserved2' : 0, - '17i!reserved3' : 0, - '18i!reserved4' : 0, - '19i!reserved5' : 0, - '20i!reserved6' : 0, - '21i!projection type' : 0, - '22i!reserved7' : 0, - '23i!reserved8' : 0, - '24i!reserved9' : 0, - '25i!reserved10' : 0, - '26i!reserved11' : 0, - '27i!reserved12' : 0, - '28i!reserved13' : 0, - '29s!NDID' : 0, - '30s!vstore' : 1, - '31i!origin' : 0, - '32d!sw x' : 0, - '33d!sw y' : 0, - '34d!dx' : 0, - '35d!dy' : 0, - '36s!NSID' : 0, - '37s!NPID' : 0, - '38i!reserved14' : 0, - '39i!reserved15' : 0, - '40s!NCID' : 0, - '41s!NTID' : 0, - '42s!NBID' : 0, - '43s!NWID' : 0, - '44i!reserved14' : 0, - '45d!sw lat' : 0, - '46d!sw lon' : 0, - '47d!ne lat' : 0, - '48d!ne lon' : 0, - '49d!origin lat' : 0, - '50d!origin lon' : 0, - '51d!lambert lat1' : 0, - '52d!lambert lat2' : 0, - '53s!NLSID' : 0, - '54s!NLPID' : 0, - '55s!NRID' : 0, - '56s!NCATID' : 0, - '57s!reserved15' : 0, - '58s!reserved16' : 0, - '59s!reserved17' : 0, - '60s!reserved18' : 0, - '61i!ellipsoid model' : 1, - '62s!NAID' : 0, - '63s!NCVID' : 0, - '64s!utm zone' : 0, - '65t6!reserved19' : 0, - '66d!dz' : 0, - '67d!radius' : 0, - '68S!NMID' : 0, - '69S!NLPSID' : 0, - '70i!reserved20' : 0, - '71d!major axis' : 0, - '72d!minor axis' : 0, -} - -FLT_Records = { - 2 : FLTGroup, - 4 : FLTObject, - 73 : FLTLOD, - 63 : FLTXRef, - 14 : FLTDOF, - 1 : FLTHeader, - 111 : FLTInlineLP, - 100 : FLTExt, - 'Image' : FLTImage -} - -def process_recordDefs(): - records = dict() - for record in FLT_Records: - props = dict() - for prop in FLT_Records[record]: - position = '' - slice = 0 - (format,name) = prop.split('!') - for i in format: - if i not in typecodes: - position = position + i - slice = slice + 1 - else: - break - type = format[slice:] - length = type[1:] - if len(length) == 0: - length = 1 - else: - type = type[0] - length = int(length) - - props[int(position)] = (type,length,prop) - records[record] = props - return records - - diff --git a/release/scripts/flt_toolbar.py b/release/scripts/flt_toolbar.py deleted file mode 100644 index a707b87f846..00000000000 --- a/release/scripts/flt_toolbar.py +++ /dev/null @@ -1,809 +0,0 @@ -#!BPY - -""" -Name: 'FLT Toolbar' -Blender: 240 -Group: 'Misc' -Tooltip: 'Tools for working with FLT databases' -""" - -__author__ = "Geoffrey Bantle" -__version__ = "1.0 11/21/07" -__email__ = ('scripts', 'Author, ') -__url__ = ('blender', 'blenderartists.org') - -__bpydoc__ ="""\ -This script provides tools for working with OpenFlight databases in Blender. OpenFlight is a -registered trademark of MultiGen-Paradigm, Inc. - -Feature overview and more availible at: -http://wiki.blender.org/index.php/Scripts/Manual/FLTools -""" - -# -------------------------------------------------------------------------- -# flt_palettemanager.py version 0.1 2005/04/08 -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2007: Blender Foundation -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender.Draw as Draw -from Blender.BGL import * -import Blender -import flt_properties -reload(flt_properties) -from flt_properties import * - -xrefprefix = "" -xrefstack = list() -vofsstack = list() -vquatstack = list() -prop_w = 256 -prop_h = 256 - - -#event codes -evcode = { - "XREF_MAKE" : 100, - "XREF_EDIT" : 101, - "XREF_FILE" : 102, - "XREF_PICK" : 103, - "XREF_SELECT" : 104, - "XREF_POP" : 105, - "XREF_PREFIX" : 106, - "FACE_NAME" : 200, - "FACE_MAKESUB" : 201, - "FACE_KILLSUB" : 202, - "FACE_SELSUB" : 203, - "SCENE_UPDATE" : 303, - "IDPROP_COPY" : 501, - "IDPROP_KILL" : 502, - "CLIGHT_MAKE" : 700, - "DFROMACT" : 701, - "FIXCOL" : 702 -} - -XREF_PREFIX = None -XREF_MAKE = None -XREF_EDIT = None -XREF_SELECT = None -XREF_POP = None -FACE_MAKESUB = None -FACE_SELSUB = None -FACE_KILLSUB = None -IDPROP_KILL = None -IDPROP_COPY = None -SCENE_UPDATE = None -CLIGHT_MAKE = None -DFROMACT = None -FIXCOL = None - - -def RGBtoHSV( r, g, b): - cmin = min( r, g, b ) - cmax = max( r, g, b ) - v = cmax - - if(cmax!=0.0): - s = (cmax-cmin)/cmax - else: - s = 0.0 - h = 0.0 - - if(s == 0.0): - h = -1.0 - else: - cdelta = cmax-cmin - rc = (cmax-r)/cdelta - gc = (cmax-g)/cdelta - bc = (cmax-b)/cdelta - if(r==cmax): - h = bc-gc - else: - if(g==cmax): - h = 2.0+rc-bc - else: - h = 4.0+gc-rc - h = h*60.0 - if(h<0.0): - h += 360.0 - - - h = h/360.0 - if(h < 0.0): - h = 0.0 - return (h,s,v) - - -def update_state(): - state = dict() - state["activeScene"] = Blender.Scene.GetCurrent() - state["activeObject"] = state["activeScene"].objects.active - if state["activeObject"] and not state["activeObject"].sel: - state["activeObject"] = None - state["activeMesh"] = None - if state["activeObject"] and state["activeObject"].type == 'Mesh': - state["activeMesh"] = state["activeObject"].getData(mesh=True) - - state["activeFace"] = None - if state["activeMesh"]: - if state["activeMesh"].faceUV and state["activeMesh"].activeFace != None: - state["activeFace"] = state["activeMesh"].faces[state["activeMesh"].activeFace] - - #update editmode - state["editmode"] = Blender.Window.EditMode() - - return state -def pack_face_index(index, intensity): - return ((127*intensity)+(128*index)) -def unpack_face_index(face_index): - index = face_index / 128 - intensity = float(face_index - 128.0 * index) / 127.0 - return(index,intensity) - -def idprops_append(object, typecode, props): - object.properties["FLT"] = dict() - object.properties["FLT"]['type'] = typecode - for prop in props: - object.properties["FLT"][prop] = props[prop] - object.properties["FLT"]['3t8!id'] = object.name - -def idprops_kill(object): - state = update_state() - if object and object.properties.has_key('FLT'): - object.properties.pop('FLT') - -def idprops_copy(source): - state = update_state() - if source.properties.has_key('FLT'): - for object in state["activeScene"].objects: - if object.sel and object != source and (state["activeScene"].Layers & object.Layers): - idprops_kill(object) - object.properties['FLT'] = dict() - for key in source.properties['FLT']: - object.properties['FLT'][key] = source.properties['FLT'][key] - -def unpack_color(color): - return struct.unpack('>BBBB',struct.pack('>I',color)) - - -def findColorKey(colordict, hsv): - hdelta = 0.001 - for key in colordict: - if not (((hsv[0] < (key[0] + hdelta)) and (hsv[0] > (key[0] - hdelta))) and ((hsv[1] < (key[1] + hdelta)) and (hsv[1] > (key[1] - hdelta)))): - return key - return None - -def hsvsort(a, b): - (index1, mag1) = a - (index2, mag2) = b - if mag1 > mag2: - return 1 - elif mag1 < mag2: - return -1 - return 0 - -def fix_colors(): - - editmode = 0 - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - state = update_state() - - scene = state["activeScene"] - colors = None - if state["activeScene"].properties.has_key('FLT'): - try: - colors = state["activeScene"].properties['FLT']['Color Palette'] - except: - pass - if not colors: - return - - #first build a HSV version of our palette - hsvpalette = list() - for swatch in colors: - color = unpack_color(swatch) - hsv = RGBtoHSV(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0) - hsvpalette.append(hsv) - - #collect all of our meshes - meshes = list() - for object in scene.objects.context: - if object.sel and object.type == 'Mesh': - mesh = object.getData(mesh=True) - if "FLT_COL" in mesh.faces.properties: - meshes.append(mesh) - - - #Now go through our meshes, and build a dictionary of face lists keyed according to (hue,saturation) of the baked color - colordict = dict() - for mesh in meshes: - for face in mesh.faces: - hsv = RGBtoHSV(face.col[0].r/255.0, face.col[0].g/255.0, face.col[0].b/255.0) #retrieve baked color - if colordict.has_key((hsv[0],hsv[1])): - colordict[(hsv[0],hsv[1])].append(face) - else: - colordict[(hsv[0],hsv[1])] = [face] - - - #for each color key in the color dict, build a list of distances from it to the values in hsvpalette and then quicksort them for closest match - for key in colordict: - maglist = list() - for i, hsv in enumerate(hsvpalette): - norm = Blender.Mathutils.Vector(hsv[0], hsv[1]) - Blender.Mathutils.Vector(key[0],key[1]) - maglist.append((i,norm.length)) - maglist.sort(hsvsort) - print maglist[0] - for face in colordict[key]: - (index, intensity) = unpack_face_index(face.getProperty("FLT_COL")) - newfindex = pack_face_index(maglist[0][0],intensity) - face.setProperty("FLT_COL", int(newfindex)) - - for mesh in meshes: - update_mesh_colors(colors,mesh) - - if editmode: - Blender.Window.EditMode(1) - - -def update_mesh_colors(colors, mesh): - if 'FLT_COL' in mesh.faces.properties: - mesh.activeColorLayer = "FLT_Fcol" - for face in mesh.faces: - (index,intensity) = unpack_face_index(face.getProperty('FLT_COL')) - color = struct.unpack('>BBBB',struct.pack('>I',colors[index])) - - if index == 0 and intensity == 0: - color = (255,255,255) - intensity = 1.0 - #update the vertex colors for this face - for col in face.col: - col.r = int(color[0] * intensity) - col.g = int(color[1] * intensity) - col.b = int(color[2] * intensity) - col.a = 255 - - -def update_all(): - - editmode = 0 - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - state = update_state() - colors = None - if state["activeScene"].properties.has_key('FLT'): - try: - colors = state["activeScene"].properties['FLT']['Color Palette'] - except: - pass - if colors: - #update the baked FLT colors for all meshes. - for object in state["activeScene"].objects: - if object.type == "Mesh": - mesh = object.getData(mesh=True) - update_mesh_colors(colors,mesh) - if editmode: - Blender.Window.EditMode(1) - -#Change this to find the deep parent -def xref_create(): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - state = update_state() - - def findchildren(object): - children = list() - for candidate in state["activeScene"].objects: - if candidate.parent == object: - children.append(candidate) - retlist = list(children) - for child in children: - retlist = retlist + findchildren(child) - return retlist - - actObject = state["activeObject"] - if actObject and xrefprefix: - scenenames = list() - for scene in Blender.Scene.Get(): - scenenames.append(scene.name) - - if xrefprefix in scenenames: - #build a unique name for the xref... - suffix = 1 - found = False - while not found: - candidate = xrefprefix + str(suffix) - if not candidate in scenenames: - xrefname = candidate - found = True - suffix+=1 - else: - xrefname = xrefprefix - #create our XRef node - xnode = state["activeScene"].objects.new('Empty') - xnode.name = 'X:' + xrefname - xnode.properties['FLT'] = dict() - for prop in FLTXRef: - xnode.properties['FLT'][prop] = FLTXRef[prop] - xnode.properties['FLT']['3t200!filename'] = xrefname + '.flt' - xnode.properties['FLT']['type'] = 63 - xnode.enableDupGroup = True - xnode.DupGroup = Blender.Group.New(xrefname) #this is dangerous... be careful! - - #copy rot and loc of actObject - xnode.setLocation(actObject.getLocation()) - xnode.setEuler(actObject.getEuler()) - - #build the new scene - xrefscene = Blender.Scene.New(xrefname) - xrefscene.properties['FLT'] = dict() - xrefscene.properties['FLT']['Filename'] = xrefname - xrefscene.properties['FLT']['Main'] = 0 - - #find the children of actObject so that we can add them to the group - linkobjects = findchildren(actObject) - linkobjects.append(actObject) - for object in linkobjects: - xrefscene.objects.link(object) - state["activeScene"].objects.unlink(object) - xnode.DupGroup.objects.link(object) - #clear rotation of actObject and location - actObject.setLocation(0.0,0.0,0.0) - actObject.setEuler(0.0,0.0,0.0) - - xrefscene.update(1) - state["activeScene"].update(1) - -def xref_select(): - state = update_state() - candidates = list() - scenelist = [scene.name for scene in Blender.Scene.Get()] - for object in state["activeScene"].objects: - if object.type == 'Empty' and object.enableDupGroup == True and object.DupGroup: - candidates.append(object) - - for object in candidates: - if object.DupGroup.name in scenelist: - object.sel = 1 - -def xref_edit(): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - state = update_state() - - actObject = state["activeObject"] - - if actObject and actObject.type == 'Empty' and actObject.DupGroup: -# if actObject.properties.has_key('FLT') and actObject.properties['FLT']['type'] == 63: - for FLTscene in Blender.Scene.Get(): - if FLTscene.properties.has_key('FLT') and FLTscene.name == actObject.DupGroup.name: - actObject.sel = 0 - xrefstack.append(state["activeScene"]) - vofsstack.append(Blender.Window.GetViewOffset()) - vquatstack.append(Blender.Window.GetViewQuat()) - FLTscene.makeCurrent() - Blender.Window.SetViewOffset(0.0,0.0,0.0) - -def xref_finish(): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - state = update_state() - if xrefstack: - scene = xrefstack.pop() - Blender.Window.SetViewQuat(vquatstack.pop()) - Blender.Window.SetViewOffset(vofsstack.pop()) - scene.makeCurrent() - - -def sortSub(a,b): - aindex = a.getProperty("FLT_ORIGINDEX") - bindex = b.getProperty("FLT_ORIGINDEX") - - if aindex > bindex: - return 1 - elif aindex < bindex: - return -1 - return 0 - -def subface_make(): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - editmode = 0 - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - - state = update_state() - - actmesh = state["activeMesh"] - activeFace = state["activeFace"] - if actmesh: - if not "FLT_ORIGINDEX" in actmesh.faces.properties: - actmesh.faces.addPropertyLayer("FLT_ORIGINDEX",Blender.Mesh.PropertyTypes["INT"]) - for i, face in enumerate(actmesh.faces): - face.setProperty("FLT_ORIGINDEX",i) - if not "FLT_SFLEVEL" in actmesh.faces.properties: - actmesh.faces.addPropertyLayer("FLT_SFLEVEL",Blender.Mesh.PropertyTypes["INT"]) - - #attach the subfaces to the active face. Note, this doesnt really work 100 percent properly yet, just enough for one level! - if activeFace: - #steps: - #remove actface and selected faces from the facelist - #quicksort facelist - #append actface and subfaces to end of facelist. - #generate new indices - facelist = list() - sublist = list() - for face in actmesh.faces: - facelist.append(face) - for face in facelist: - if face == activeFace: - face.setProperty("FLT_SFLEVEL",0) - sublist.insert(0,face) - elif face.sel: - face.setProperty("FLT_SFLEVEL",1) - sublist.append(face) - for face in sublist: - facelist.remove(face) - facelist.sort(sortSub) - for face in sublist: - facelist.append(face) - for i, face in enumerate(facelist): - face.setProperty("FLT_ORIGINDEX",i) - else: - pass - - if editmode: - Blender.Window.EditMode(1) - -def subface_kill(): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - editmode = 0 - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - state = update_state() - - actmesh = state["activeMesh"] - if actmesh: - if "FLT_ORIGINDEX" in actmesh.faces.properties and "FLT_SFLEVEL" in actmesh.faces.properties: - for i,face in enumerate(actmesh.faces): - face.setProperty("FLT_ORIGINDEX",i) - face.setProperty("FLT_SFLEVEL",0) - if editmode: - Blender.Window.EditMode(1) - -def subface_select(): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - editmode = 0 - if Blender.Window.EditMode(): - Blender.Window.EditMode(0) - editmode = 1 - state = update_state() - - actmesh = state["activeMesh"] - activeFace = state["activeFace"] - if actmesh and activeFace: - if "FLT_ORIGINDEX" in actmesh.faces.properties and "FLT_SFLEVEL" in actmesh.faces.properties: - facelist = list() - actIndex = None - sublevel = None - for face in actmesh.faces: - facelist.append(face) - facelist.sort(sortSub) - for i, face in enumerate(facelist): - if face == activeFace: - actIndex = i - sublevel = face.getProperty("FLT_SFLEVEL")+1 - break - leftover = facelist[actIndex+1:] - for face in leftover: - if face.getProperty("FLT_SFLEVEL") == sublevel: - face.sel = 1 - else: - break - if editmode: - Blender.Window.EditMode(1) - -def select_by_typecode(typecode): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - - state = update_state() - - for object in state["activeScene"].objects: - if object.properties.has_key('FLT') and object.properties['FLT']['type'] == typecode and state["activeScene"].Layers & object.Layers: - object.select(1) -def clight_make(): - state = update_state() - actmesh = state["activeMesh"] - actobj = state["activeObject"] - - if actobj and actmesh: - actobj.properties['FLT'] = dict() - actobj.properties['FLT']['type'] = 111 - for prop in FLTInlineLP: - actobj.properties['FLT'][prop] = FLTInlineLP[prop] - - actmesh.verts.addPropertyLayer("FLT_VCOL", Blender.Mesh.PropertyTypes["INT"]) - for v in actmesh.verts: - v.setProperty("FLT_VCOL", 83815) - -def dfromact(): - state = update_state() - actobj = state["activeObject"] - actscene = state["activeScene"] - dof = None - - for object in actscene.objects.context: - if object.sel and (object != actobj): - if not dof: - dof = object - else: - break - - if not dof: - return - - if 'FLT' not in dof.properties: - dof.properties['FLT'] = dict() - - #Warning! assumes 1 BU == 10 meters. - #do origin - dof.properties['FLT']['5d!ORIGX'] = actobj.getLocation('worldspace')[0]*10.0 - dof.properties['FLT']['6d!ORIGY'] = actobj.getLocation('worldspace')[1]*10.0 - dof.properties['FLT']['7d!ORIGZ'] = actobj.getLocation('worldspace')[2]*10.0 - #do X axis - x = Blender.Mathutils.Vector(1.0,0.0,0.0) - x = x * actobj.getMatrix('worldspace') - x = x * 10.0 - dof.properties['FLT']['8d!XAXIS-X'] = x[0] - dof.properties['FLT']['9d!XAXIS-Y'] = x[1] - dof.properties['FLT']['10d!XAXIS-Z'] = x[2] - #do X/Y plane - x = Blender.Mathutils.Vector(1.0,1.0,0.0) - x.normalize() - x = x * actobj.getMatrix('worldspace') - x = x * 10.0 - dof.properties['FLT']['11d!XYPLANE-X'] = x[0] - dof.properties['FLT']['12d!XYPLANE-Y'] = x[1] - dof.properties['FLT']['13d!XZPLANE-Z'] = x[2] - - - - - -def event(evt,val): - if evt == Draw.ESCKEY: - Draw.Exit() - -def but_event(evt): - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - global evcode - - state = update_state() - - #do Xref buttons - if evt == evcode["XREF_PREFIX"]: - xrefprefix = XREF_PREFIX.val - if evt == evcode["XREF_EDIT"]: - xref_edit() - if evt == evcode["XREF_SELECT"]: - xref_select() - if evt == evcode["XREF_MAKE"]: - xref_create() - #do scene buttons - if evt == evcode["SCENE_UPDATE"]: - update_all() - #do face buttons - if evt == evcode["FACE_MAKESUB"]: - subface_make() - if evt== evcode["FACE_KILLSUB"]: - subface_kill() - if evt== evcode["FACE_SELSUB"]: - subface_select() - #common buttons - if evt == evcode["IDPROP_KILL"]: - if state["activeObject"]: - idprops_kill(state["activeObject"]) - if evt == evcode["IDPROP_COPY"]: - if state["activeObject"]: - idprops_copy(state["activeObject"]) - if evt == evcode["XREF_POP"]: - xref_finish() - if evt == evcode["CLIGHT_MAKE"]: - clight_make() - if evt == evcode["DFROMACT"]: - dfromact() - if evt == evcode["FIXCOL"]: - fix_colors() - Draw.Redraw(1) - Blender.Window.RedrawAll() - - -def box(x,y,w,h,c,mode): - glColor3f(c[0],c[1],c[2]) - if mode == "outline": - glBegin(GL_LINE_LOOP) - else: - glBegin(GL_POLYGON) - glVertex2i(x,y) - glVertex2i(x+w,y) - glVertex2i(x+w,y+h) - glVertex2i(x,y+h) - glEnd() - -def draw_postcommon(x,y,finaly): - global sheetlabel - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - global evcode - - state = update_state() - - width = prop_w - height = prop_h - - #draw the header - glColor3f(0.15,0.15,0.15) - glBegin(GL_POLYGON) - glVertex2i(x-1,y) - glVertex2i(x+width+1,y) - glVertex2i(x+width+1,y-25) - glVertex2i(x-1,y-25) - glEnd() - glColor3f(1,1,1) - glRasterPos2i(x,y-20) - sheetlabel = Blender.Draw.Text("FLT Tools Panel") - #draw the box outline - glColor3f(0,0,0) - glBegin(GL_LINE_LOOP) - glVertex2i(x-1,y) - glVertex2i(x+1+width,y) - glVertex2i(x+1+width,finaly-1) - glVertex2i(x-1,finaly-1) - glEnd() - return finaly - - -def draw_propsheet(x,y): - global XREF_PREFIX - global XREF_MAKE - global XREF_EDIT - global XREF_SELECT - global XREF_POP - global FACE_MAKESUB - global FACE_SELSUB - global FACE_KILLSUB - global IDPROP_KILL - global IDPROP_COPY - global SCENE_UPDATE - global DFROMACT - global FIXCOL - - global CLIGHT_MAKE - global xrefprefix - global xrefstack - global vofsstack - global vquatstack - global prop_w - global prop_h - global evcode - - state = update_state() - - width = prop_w - height = prop_h - origx = x - origy = y - - #draw Xref tools - y = y-20 - XREF_PREFIX = Blender.Draw.String("XRef Name:",evcode["XREF_PREFIX"],x,y,width,20,xrefprefix,18,"Xref prefix name, Actual name is generated from this") - y = y-20 - XREF_MAKE = Blender.Draw.PushButton("Make XRef",evcode["XREF_MAKE"],x,y,width,20,"Make External Reference") - y = y-20 - XREF_EDIT = Blender.Draw.PushButton("Edit XRef",evcode["XREF_EDIT"],x,y,width,20,"Edit External Reference") - y = y-20 - XREF_SELECT = Blender.Draw.PushButton("Select XRefs",evcode["XREF_SELECT"],x,y,width,20,"Select External References") - y = y - 20 - XREF_POP = Blender.Draw.PushButton("Return to previous scene",evcode["XREF_POP"],x,y,width,20,"Go up one level in xref hierarchy") - - #Draw facetools - y = y-20 - FACE_MAKESUB = Blender.Draw.PushButton("Make Subfaces",evcode["FACE_MAKESUB"],x,y,width,20,"Make subfaces") - y = y-20 - FACE_SELSUB = Blender.Draw.PushButton("Select Subfaces",evcode["FACE_SELSUB"],x,y,width,20,"Select subfaces") - y = y-20 - FACE_KILLSUB = Blender.Draw.PushButton("Kill Subfaces",evcode["FACE_KILLSUB"],x,y,width,20,"Kill subfaces") - - #Draw ID Property tools - y = y - 20 - IDPROP_KILL = Blender.Draw.PushButton("Delete ID props",evcode["IDPROP_KILL"],x,y,width,20,"Delete ID props") - y = y - 20 - IDPROP_COPY = Blender.Draw.PushButton("Copy to selected",evcode["IDPROP_COPY"],x,y,width,20, "Copy from active to all selected") - - y= y - 20 - CLIGHT_MAKE = Blender.Draw.PushButton("Make Light Point", evcode["CLIGHT_MAKE"],x,y,width,20,"Create inline light points from current mesh") - #General tools - y = y-20 - SCENE_UPDATE = Blender.Draw.PushButton("Update All",evcode["SCENE_UPDATE"],x,y,width,20,"Update all vertex colors") - - y=y-20 - DFROMACT = Blender.Draw.PushButton("Dof from Active", evcode["DFROMACT"],x,y,width,20,"Get Dof origin from active object") - y=y-20 - FIXCOL = Blender.Draw.PushButton("Fix Colors", evcode["FIXCOL"],x,y,width,20,"Fix baked FLT colors of selected meshes") - draw_postcommon(origx, origy,y) - -def gui(): - #draw the propsheet/toolbox. - psheety = 300 - #psheetx = psheety + 10 - draw_propsheet(0,psheety) -Draw.Register(gui,event,but_event) - \ No newline at end of file diff --git a/release/scripts/help_bpy_api.py b/release/scripts/help_bpy_api.py deleted file mode 100644 index e8d77ed8452..00000000000 --- a/release/scripts/help_bpy_api.py +++ /dev/null @@ -1,47 +0,0 @@ -#!BPY -""" -Name: 'Blender/Python Scripting API' -Blender: 248 -Group: 'Help' -Tooltip: 'The Blender Python API reference manual' -""" - -__author__ = "Matt Ebb" -__url__ = ("blender", "blenderartist") -__version__ = "1.0.1" -__bpydoc__ = """\ -This script opens the user's default web browser at http://www.blender.org's -"Blender Python API Reference" page. -""" - -# -------------------------------------------------------------------------- -# Blender/Python Scripting Reference Help Menu Item -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -try: import webbrowser -except: webbrowser = None - -if webbrowser: - version = str(int(Blender.Get('version'))) - webbrowser.open('http://www.blender.org/documentation/'+ version +'PythonDoc/') -else: - Blender.Draw.PupMenu("Error%t|This script requires a full python installation") diff --git a/release/scripts/help_browser.py b/release/scripts/help_browser.py deleted file mode 100644 index c207a12068f..00000000000 --- a/release/scripts/help_browser.py +++ /dev/null @@ -1,814 +0,0 @@ -#!BPY - -""" -Name: 'Scripts Help Browser' -Blender: 234 -Group: 'Help' -Tooltip: 'Show help information about a chosen installed script.' -""" - -__author__ = "Willian P. Germano" -__version__ = "0.3 01/21/09" -__email__ = ('scripts', 'Author, wgermano:ig*com*br') -__url__ = ('blender', 'blenderartists.org') - -__bpydoc__ ="""\ -This script shows help information for scripts registered in the menus. - -Usage: - -- Start Screen: - -To read any script's "user manual" select a script from one of the -available category menus. If the script has help information in the format -expected by this Help Browser, it will be displayed in the Script Help -Screen. Otherwise you'll be offered the possibility of loading the chosen -script's source file in Blender's Text Editor. The programmer(s) may have -written useful comments there for users. - -Hotkeys:
- ESC or Q: [Q]uit - -- Script Help Screen: - -This screen shows the user manual page for the chosen script. If the text -doesn't fit completely on the screen, you can scroll it up or down with -arrow keys or a mouse wheel. There may be link and email buttons that if -clicked should open your default web browser and email client programs for -further information or support. - -Hotkeys:
- ESC: back to Start Screen
- Q: [Q]uit
- S: view script's [S]ource code in Text Editor
- UP, DOWN Arrows and mouse wheel: scroll text up / down -""" - -# $Id$ -# -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2004: Willian P. Germano, wgermano _at_ ig.com.br -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- -# Thanks: Brendon Murphy (suggestion) and Kevin Morgan (implementation) -# for the "run" button; Jean-Michel Soler for pointing a parsing error -# with multilines using triple single quotes. - -import Blender -from Blender import sys as bsys, Draw, Window, Registry - -WEBBROWSER = True -try: - import webbrowser -except: - WEBBROWSER = False - -DEFAULT_EMAILS = { - 'scripts': ['Bf-scripts-dev', 'bf-scripts-dev@blender.org'] -} - -DEFAULT_LINKS = { - 'blender': ["blender.org\'s Python forum", "http://www.blender.org/modules.php?op=modload&name=phpBB2&file=viewforum&f=9"] -} - -PADDING = 15 -COLUMNS = 1 -TEXT_WRAP = 100 -WIN_W = WIN_H = 200 -SCROLL_DOWN = 0 - -def screen_was_resized(): - global WIN_W, WIN_H - - w, h = Window.GetAreaSize() - if WIN_W != w or WIN_H != h: - WIN_W = w - WIN_H = h - return True - return False - -def fit_on_screen(): - global TEXT_WRAP, PADDING, WIN_W, WIN_H, COLUMNS - - COLUMNS = 1 - WIN_W, WIN_H = Window.GetAreaSize() - TEXT_WRAP = int((WIN_W - PADDING) / 6) - if TEXT_WRAP < 40: - TEXT_WRAP = 40 - elif TEXT_WRAP > 100: - if TEXT_WRAP > 110: - COLUMNS = 2 - TEXT_WRAP /= 2 - else: TEXT_WRAP = 100 - -def cut_point(text, length): - "Returns position of the last space found before 'length' chars" - l = length - c = text[l] - while c != ' ': - l -= 1 - if l == 0: return length # no space found - c = text[l] - return l - -def text_wrap(text, length = None): - global TEXT_WRAP - - wrapped = [] - lines = text.split('
') - llen = len(lines) - if llen > 1: - if lines[-1] == '': llen -= 1 - for i in range(llen - 1): - lines[i] = lines[i].rstrip() + '
' - lines[llen-1] = lines[llen-1].rstrip() - - if not length: length = TEXT_WRAP - - for l in lines: - while len(l) > length: - cpt = cut_point(l, length) - line, l = l[:cpt], l[cpt + 1:] - wrapped.append(line) - wrapped.append(l) - return wrapped - -def load_script_text(script): - global PATHS, SCRIPT_INFO - - if script.userdir: - path = PATHS['uscripts'] - else: - path = PATHS['scripts'] - - fname = bsys.join(path, script.fname) - - source = Blender.Text.Load(fname) - if source: - Draw.PupMenu("File loaded%%t|Please check the file \"%s\" in the Text Editor window" % source.name) - - -# for theme colors: -def float_colors(cols): - return map(lambda x: x / 255.0, cols) - -# globals - -SCRIPT_INFO = None - -PATHS = { - 'home': Blender.Get('homedir'), - 'scripts': Blender.Get('scriptsdir'), - 'uscripts': Blender.Get('uscriptsdir') -} - -if not PATHS['home']: - errmsg = """ -Can't find Blender's home dir and so can't find the -Bpymenus file automatically stored inside it, which -is needed by this script. Please run the -Help -> System -> System Information script to get -information about how to fix this. -""" - raise SystemError, errmsg - -BPYMENUS_FILE = bsys.join(PATHS['home'], 'Bpymenus') - -f = file(BPYMENUS_FILE, 'r') -lines = f.readlines() -f.close() - -AllGroups = [] - -class Script: - - def __init__(self, data): - self.name = data[0] - self.version = data[1] - self.fname = data[2] - self.userdir = data[3] - self.tip = data[4] - -# End of class Script - - -class Group: - - def __init__(self, name): - self.name = name - self.scripts = [] - - def add_script(self, script): - self.scripts.append(script) - - def get_name(self): - return self.name - - def get_scripts(self): - return self.scripts - -# End of class Group - - -class BPy_Info: - - def __init__(self, script, dict): - - self.script = script - - self.d = dict - - self.header = [] - self.len_header = 0 - self.content = [] - self.len_content = 0 - self.spaces = 0 - self.fix_urls() - self.make_header() - self.wrap_lines() - - def make_header(self): - - sc = self.script - d = self.d - - header = self.header - - title = "Script: %s" % sc.name - version = "Version: %s for Blender %1.2f or newer" % (d['__version__'], - sc.version / 100.0) - - if len(d['__author__']) == 1: - asuffix = ':' - else: asuffix = 's:' - - authors = "%s%s %s" % ("Author", asuffix, ", ".join(d['__author__'])) - - header.append(title) - header.append(version) - header.append(authors) - self.len_header = len(header) - - - def fix_urls(self): - - emails = self.d['__email__'] - fixed = [] - for a in emails: - if a in DEFAULT_EMAILS.keys(): - fixed.append(DEFAULT_EMAILS[a]) - else: - a = a.replace('*','.').replace(':','@') - ltmp = a.split(',') - if len(ltmp) != 2: - ltmp = [ltmp[0], ltmp[0]] - fixed.append(ltmp) - - self.d['__email__'] = fixed - - links = self.d['__url__'] - fixed = [] - for a in links: - if a in DEFAULT_LINKS.keys(): - fixed.append(DEFAULT_LINKS[a]) - else: - ltmp = a.split(',') - if len(ltmp) != 2: - ltmp = [ltmp[0], ltmp[0]] - fixed.append([ltmp[0].strip(), ltmp[1].strip()]) - - self.d['__url__'] = fixed - - - def wrap_lines(self, reset = 0): - - lines = self.d['__bpydoc__'].split('\n') - self.content = [] - newlines = [] - newline = [] - - if reset: - self.len_content = 0 - self.spaces = 0 - - for l in lines: - if l == '' and newline: - newlines.append(newline) - newline = [] - newlines.append('') - else: newline.append(l) - if newline: newlines.append(newline) - - for lst in newlines: - wrapped = text_wrap(" ".join(lst)) - for l in wrapped: - self.content.append(l) - if l: self.len_content += 1 - else: self.spaces += 1 - - if not self.content[-1]: - self.len_content -= 1 - - -# End of class BPy_Info - -def parse_pyobj_close(closetag, lines, i): - i += 1 - l = lines[i] - while l.find(closetag) < 0: - i += 1 - l = "%s%s" % (l, lines[i]) - return [l, i] - -def parse_pyobj(var, lines, i): - "Bad code, was in a hurry for release" - - l = lines[i].replace(var, '').replace('=','',1).strip() - - i0 = i - 1 - - if l[0] == '"': - if l[1:3] == '""': # """ - if l.find('"""', 3) < 0: # multiline - l2, i = parse_pyobj_close('"""', lines, i) - if l[-1] == '\\': l = l[:-1] - l = "%s%s" % (l, l2) - elif l[-1] == '"' and l[-2] != '\\': # single line: "..." - pass - else: - l = "ERROR" - - elif l[0] == "'": - if l[1:3] == "''": # ''' - if l.find("'''", 3) < 0: # multiline - l2, i = parse_pyobj_close("'''", lines, i) - if l[-1] == '\\': l = l[:-1] - l = "%s%s" % (l, l2) - elif l[-1] == '\\': - l2, i = parse_pyobj_close("'", lines, i) - l = "%s%s" % (l, l2) - elif l[-1] == "'" and l[-2] != '\\': # single line: '...' - pass - else: - l = "ERROR" - - elif l[0] == '(': - if l[-1] != ')': - l2, i = parse_pyobj_close(')', lines, i) - l = "%s%s" % (l, l2) - - elif l[0] == '[': - if l[-1] != ']': - l2, i = parse_pyobj_close(']', lines, i) - l = "%s%s" % (l, l2) - - return [l, i - i0] - -# helper functions: - -def parse_help_info(script): - - global PATHS, SCRIPT_INFO - - if script.userdir: - path = PATHS['uscripts'] - else: - path = PATHS['scripts'] - - fname = bsys.join(path, script.fname) - - if not bsys.exists(fname): - Draw.PupMenu('IO Error: couldn\'t find script %s' % fname) - return None - - f = file(fname, 'r') - lines = f.readlines() - f.close() - - # fix line endings: - if lines[0].find('\r'): - unixlines = [] - for l in lines: - unixlines.append(l.replace('\r','')) - lines = unixlines - - llen = len(lines) - has_doc = 0 - - doc_data = { - '__author__': '', - '__version__': '', - '__url__': '', - '__email__': '', - '__bpydoc__': '', - '__doc__': '' - } - - i = 0 - while i < llen: - l = lines[i] - incr = 1 - for k in doc_data.keys(): - if l.find(k, 0, 20) == 0: - value, incr = parse_pyobj(k, lines, i) - exec("doc_data['%s'] = %s" % (k, value)) - has_doc = 1 - break - i += incr - - # fix these to seqs, simplifies coding elsewhere - for w in ['__author__', '__url__', '__email__']: - val = doc_data[w] - if val and type(val) == str: - doc_data[w] = [doc_data[w]] - - if not doc_data['__bpydoc__']: - if doc_data['__doc__']: - doc_data['__bpydoc__'] = doc_data['__doc__'] - - if has_doc: # any data, maybe should confirm at least doc/bpydoc - info = BPy_Info(script, doc_data) - SCRIPT_INFO = info - return True - - else: - return False - - -def parse_script_line(l): - - tip = 'No tooltip' - try: - pieces = l.split("'") - name = pieces[1].replace('...','') - data = pieces[2].strip().split() - version = data[0] - userdir = data[-1] - fname = data[1] - i = 1 - while not fname.endswith('.py'): - i += 1 - fname = '%s %s' % (fname, data[i]) - if len(pieces) > 3: tip = pieces[3] - except: - return None - - return [name, int(version), fname, int(userdir), tip] - - -def parse_bpymenus(lines): - - global AllGroups - - llen = len(lines) - - for i in range(llen): - l = lines[i].strip() - if not l: continue - if l[-1] == '{': - group = Group(l[:-2]) - AllGroups.append(group) - i += 1 - l = lines[i].strip() - while l != '}': - if l[0] != '|': - data = parse_script_line(l) - if data: - script = Script(data) - group.add_script(script) - i += 1 - l = lines[i].strip() - -# AllGroups.reverse() - - -def create_group_menus(): - - global AllGroups - menus = [] - - for group in AllGroups: - - name = group.get_name() - menu = [] - scripts = group.get_scripts() - for s in scripts: menu.append(s.name) - menu = "|".join(menu) - menu = "%s%%t|%s" % (name, menu) - menus.append([name, menu]) - - return menus - - -# Collecting data: -fit_on_screen() -parse_bpymenus(lines) -GROUP_MENUS = create_group_menus() - - -# GUI: - -from Blender import BGL -from Blender.Window import Theme - -# globals: - -START_SCREEN = 0 -SCRIPT_SCREEN = 1 - -SCREEN = START_SCREEN - -# gui buttons: -len_gmenus = len(GROUP_MENUS) - -BUT_GMENU = range(len_gmenus) -for i in range(len_gmenus): - BUT_GMENU[i] = Draw.Create(0) - -# events: -BEVT_LINK = None # range(len(SCRIPT_INFO.links)) -BEVT_EMAIL = None # range(len(SCRIPT_INFO.emails)) -BEVT_GMENU = range(100, len_gmenus + 100) -BEVT_VIEWSOURCE = 1 -BEVT_EXIT = 2 -BEVT_BACK = 3 -BEVT_EXEC = 4 # Executes Script - -# gui callbacks: - -def gui(): # drawing the screen - - global SCREEN, START_SCREEN, SCRIPT_SCREEN - global SCRIPT_INFO, AllGroups, GROUP_MENUS - global BEVT_EMAIL, BEVT_LINK - global BEVT_VIEWSOURCE, BEVT_EXIT, BEVT_BACK, BEVT_GMENU, BUT_GMENU, BEVT_EXEC - global PADDING, WIN_W, WIN_H, SCROLL_DOWN, COLUMNS, FMODE - - theme = Theme.Get()[0] - tui = theme.get('ui') - ttxt = theme.get('text') - - COL_BG = float_colors(ttxt.back) - COL_TXT = ttxt.text - COL_TXTHI = ttxt.text_hi - - BGL.glClearColor(COL_BG[0],COL_BG[1],COL_BG[2],COL_BG[3]) - BGL.glClear(BGL.GL_COLOR_BUFFER_BIT) - BGL.glColor3ub(COL_TXT[0],COL_TXT[1], COL_TXT[2]) - - resize = screen_was_resized() - if resize: fit_on_screen() - - if SCREEN == START_SCREEN: - x = PADDING - bw = 85 - bh = 25 - hincr = 50 - - butcolumns = (WIN_W - 2*x)/ bw - if butcolumns < 2: butcolumns = 2 - elif butcolumns > 7: butcolumns = 7 - - len_gm = len(GROUP_MENUS) - butlines = len_gm / butcolumns - if len_gm % butcolumns: butlines += 1 - - h = hincr * butlines + 20 - y = h + bh - - BGL.glColor3ub(COL_TXTHI[0],COL_TXTHI[1], COL_TXTHI[2]) - BGL.glRasterPos2i(x, y) - Draw.Text('Scripts Help Browser') - - y -= bh - - BGL.glColor3ub(COL_TXT[0],COL_TXT[1], COL_TXT[2]) - - i = 0 - j = 0 - for group_menu in GROUP_MENUS: - BGL.glRasterPos2i(x, y) - Draw.Text(group_menu[0]+':') - BUT_GMENU[j] = Draw.Menu(group_menu[1], BEVT_GMENU[j], - x, y-bh-5, bw, bh, 0, - 'Choose a script to read its help information') - if i == butcolumns - 1: - x = PADDING - i = 0 - y -= hincr - else: - i += 1 - x += bw + 3 - j += 1 - - x = PADDING - y = 10 - BGL.glRasterPos2i(x, y) - Draw.Text('Select script for its help. Press Q or ESC to leave.') - - elif SCREEN == SCRIPT_SCREEN: - if SCRIPT_INFO: - - if resize: - SCRIPT_INFO.wrap_lines(1) - SCROLL_DOWN = 0 - - h = 18 * SCRIPT_INFO.len_content + 12 * SCRIPT_INFO.spaces - x = PADDING - y = WIN_H - bw = 38 - bh = 16 - - BGL.glColor3ub(COL_TXTHI[0],COL_TXTHI[1], COL_TXTHI[2]) - for line in SCRIPT_INFO.header: - y -= 18 - BGL.glRasterPos2i(x, y) - size = Draw.Text(line) - - for line in text_wrap('Tooltip: %s' % SCRIPT_INFO.script.tip): - y -= 18 - BGL.glRasterPos2i(x, y) - size = Draw.Text(line) - - i = 0 - y -= 28 - for data in SCRIPT_INFO.d['__url__']: - Draw.PushButton('link %d' % (i + 1), BEVT_LINK[i], - x + i*bw, y, bw, bh, data[0]) - i += 1 - y -= bh + 1 - - i = 0 - for data in SCRIPT_INFO.d['__email__']: - Draw.PushButton('email', BEVT_EMAIL[i], x + i*bw, y, bw, bh, data[0]) - i += 1 - y -= 18 - - y0 = y - BGL.glColor3ub(COL_TXT[0],COL_TXT[1], COL_TXT[2]) - for line in SCRIPT_INFO.content[SCROLL_DOWN:]: - if line: - line = line.replace('
', '') - BGL.glRasterPos2i(x, y) - Draw.Text(line) - y -= 18 - else: y -= 12 - if y < PADDING + 20: # reached end, either stop or go to 2nd column - if COLUMNS == 1: break - elif x == PADDING: # make sure we're still in column 1 - x = 6*TEXT_WRAP + PADDING / 2 - y = y0 - - x = PADDING - Draw.PushButton('source', BEVT_VIEWSOURCE, x, 17, 45, bh, - 'View this script\'s source code in the Text Editor (hotkey: S)') - Draw.PushButton('exit', BEVT_EXIT, x + 45, 17, 45, bh, - 'Exit from Scripts Help Browser (hotkey: Q)') - if not FMODE: - Draw.PushButton('back', BEVT_BACK, x + 2*45, 17, 45, bh, - 'Back to scripts selection screen (hotkey: ESC)') - Draw.PushButton('run script', BEVT_EXEC, x + 3*45, 17, 60, bh, 'Run this script') - - BGL.glColor3ub(COL_TXTHI[0],COL_TXTHI[1], COL_TXTHI[2]) - BGL.glRasterPos2i(x, 5) - Draw.Text('use the arrow keys or the mouse wheel to scroll text', 'small') - -def fit_scroll(): - global SCROLL_DOWN - if not SCRIPT_INFO: - SCROLL_DOWN = 0 - return - max = SCRIPT_INFO.len_content + SCRIPT_INFO.spaces - 1 - if SCROLL_DOWN > max: SCROLL_DOWN = max - if SCROLL_DOWN < 0: SCROLL_DOWN = 0 - - -def event(evt, val): # input events - - global SCREEN, START_SCREEN, SCRIPT_SCREEN - global SCROLL_DOWN, FMODE - - if not val: return - - if evt == Draw.ESCKEY: - if SCREEN == START_SCREEN or FMODE: Draw.Exit() - else: - SCREEN = START_SCREEN - SCROLL_DOWN = 0 - Draw.Redraw() - return - elif evt == Draw.QKEY: - Draw.Exit() - return - elif evt in [Draw.DOWNARROWKEY, Draw.WHEELDOWNMOUSE] and SCREEN == SCRIPT_SCREEN: - SCROLL_DOWN += 1 - fit_scroll() - Draw.Redraw() - return - elif evt in [Draw.UPARROWKEY, Draw.WHEELUPMOUSE] and SCREEN == SCRIPT_SCREEN: - SCROLL_DOWN -= 1 - fit_scroll() - Draw.Redraw() - return - elif evt == Draw.SKEY: - if SCREEN == SCRIPT_SCREEN and SCRIPT_INFO: - load_script_text(SCRIPT_INFO.script) - return - -def button_event(evt): # gui button events - - global SCREEN, START_SCREEN, SCRIPT_SCREEN - global BEVT_LINK, BEVT_EMAIL, BEVT_GMENU, BUT_GMENU, SCRIPT_INFO - global SCROLL_DOWN, FMODE - - if evt >= 100: # group menus - for i in range(len(BUT_GMENU)): - if evt == BEVT_GMENU[i]: - group = AllGroups[i] - index = BUT_GMENU[i].val - 1 - if index < 0: return # user didn't pick a menu entry - script = group.get_scripts()[BUT_GMENU[i].val - 1] - if parse_help_info(script): - SCREEN = SCRIPT_SCREEN - BEVT_LINK = range(20, len(SCRIPT_INFO.d['__url__']) + 20) - BEVT_EMAIL = range(50, len(SCRIPT_INFO.d['__email__']) + 50) - Draw.Redraw() - else: - res = Draw.PupMenu("No help available%t|View Source|Cancel") - if res == 1: - load_script_text(script) - elif evt >= 20: - if not WEBBROWSER: - Draw.PupMenu('Missing standard Python module%t|You need module "webbrowser" to access the web') - return - - if evt >= 50: # script screen email buttons - email = SCRIPT_INFO.d['__email__'][evt - 50][1] - webbrowser.open("mailto:%s" % email) - else: # >= 20: script screen link buttons - link = SCRIPT_INFO.d['__url__'][evt - 20][1] - webbrowser.open(link) - elif evt == BEVT_VIEWSOURCE: - if SCREEN == SCRIPT_SCREEN: load_script_text(SCRIPT_INFO.script) - elif evt == BEVT_EXIT: - Draw.Exit() - return - elif evt == BEVT_BACK: - if SCREEN == SCRIPT_SCREEN and not FMODE: - SCREEN = START_SCREEN - SCRIPT_INFO = None - SCROLL_DOWN = 0 - Draw.Redraw() - elif evt == BEVT_EXEC: # Execute script - exec_line = '' - if SCRIPT_INFO.script.userdir: - exec_line = bsys.join(Blender.Get('uscriptsdir'), SCRIPT_INFO.script.fname) - else: - exec_line = bsys.join(Blender.Get('scriptsdir'), SCRIPT_INFO.script.fname) - - Blender.Run(exec_line) - -keepon = True -FMODE = False # called by Blender.ShowHelp(name) API function ? - -KEYNAME = '__help_browser' -rd = Registry.GetKey(KEYNAME) -if rd: - rdscript = rd['script'] - keepon = False - Registry.RemoveKey(KEYNAME) - for group in AllGroups: - for script in group.get_scripts(): - if rdscript == script.fname: - parseit = parse_help_info(script) - if parseit == True: - keepon = True - SCREEN = SCRIPT_SCREEN - BEVT_LINK = range(20, len(SCRIPT_INFO.d['__url__']) + 20) - BEVT_EMAIL = range(50, len(SCRIPT_INFO.d['__email__']) + 50) - FMODE = True - elif parseit == False: - Draw.PupMenu("ERROR: script doesn't have proper help data") - break - -if not keepon: - Draw.PupMenu("ERROR: couldn't find script") -else: - Draw.Register(gui, event, button_event) diff --git a/release/scripts/hotkeys.py b/release/scripts/hotkeys.py deleted file mode 100644 index 187cba964bc..00000000000 --- a/release/scripts/hotkeys.py +++ /dev/null @@ -1,944 +0,0 @@ -#!BPY -# coding: utf-8 -""" Registration info for Blender menus: -Name: 'HotKey and MouseAction Reference' -Blender: 242 -Group: 'Help' -Tip: 'All the hotkeys/short keys' -""" - -__author__ = "Jean-Michel Soler (jms)" -__url__ = ("blender", "blenderartist", -"Script's homepage, http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_hotkeyscript.htm", -"Communicate problems and errors, http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender") -__version__ = "21/01/2007" - -__bpydoc__ = """\ -This script is a reference about all hotkeys and mouse actions in Blender. - -Usage: - -Open the script from the Help menu and select group of keys to browse. - -Notes:
- Additional entries in the database (c) 2004 by Bart. - Additional entries in the database for blender 2.37 --> 2.43 (c) 2003-2007/01 by jms. - -""" - -#------------------------ -# Hotkeys script -# (c) jm soler (2003-->01/2007) -# ----------------------- -# Page officielle : -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_hotkeyscript.htm -# Communiquer les problemes et les erreurs sur: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -#--------------------------------------------- -# ce script est proposé sous licence GPL pour etre associe -# a la distribution de Blender 2.33 et suivant -# -------------------------------------------------------------------------- -# this script is released under GPL licence -# for the Blender 2.33 scripts package -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) 2003, 2004: Jean-Michel Soler -# Additionnal entries in the original data base (c) 2004 by Bart (bart@neeneenee.de) -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender.Draw import * -from Blender.BGL import * - -# INTERNATIONAL={0:'English','1':'French'} -# LANGUAGE=0 - -hotkeys={ -'Search ':[['', '']], -'Specials 1 ':[ -[',', 'Set Bounding Box rotation scaling pivot'], -['Ctrl-,', 'Set Median Point rotation scaling pivot'], -['.', 'Set 3D cursor as rotation scaling pivot'], -['.', 'Outliner : to get the current active data in center of view'], -['Ctrl-.', 'Set Individual Object Centers as rotation scaling pivot'], -['~', 'Display all layers (German keys: ö,french keyboard: ù)'], -['Shift-~', 'Display all/previous layers (German keys: Shift-ö, french keyboard: shift-ù)'], -['ENTER', 'Outliner : to open a subtree, works on entire item line. '], -['HOME', 'Outliner : to show the entire Object hierarchy. '], -['SHIFT+BACKSPACE',' Text edit mode: Clear text '], -['SPACE', 'Popup menu'], -['SPACE', '3D View: camera selected'], -['Ctrl-SPACE', 'Manipulator (transform widget) Menu'], -['TAB', 'Enter/exit Edit Mode'], -['TAB', 'Edit Mode and Numerical Edit (see N key) : move to next input value'], -['TAB', 'Sequencer: Edit meta strip'], -['TAB', 'IPO: Edit selected'], -['TAB', 'Text Editor : indent'], -['TAB', 'NODE window : Edit group'], #243 -['Shift-TAB', 'Text Editor : unindent'], -['Shift-TAB', 'Edit Mode: Toggle snaping'], -['Ctrl-TAB', 'ARMATURE : Enter/exit Pose Mode'], -['Ctrl-TAB','MESH : all views, enter exit weight paint mode.'], -['Shift-TAB', 'Edit Mode : Enter Object Mode'], -['Ctrl-Open menu /', ''], -['Ctrl-Load Image', 'Opens a thumbnail browser instead of file browser for images'], -['.', '...'] -], - -'Mouse ':[ -['Actions:', ''], -['LMB', '3D View: Set 3D Cursor'], -['LMB', '3D View: camera selected'], -['LMB drag', 'Border select circle: add to selection'], -['LMB hold down', 'Popup menu'], -['LMB hold down drag', 'Gesture'], -['Ctrl-LMB', 'IPO: Add key'], -['Ctrl-LMB', '3D View: OBJECT or EDIT mode, select with the Lasso tool'], -['Ctrl-LMB', '3D View: ARMATURE EDIT mode, add a new bone to the selected end '], -['Shift-LMB','MANIPULATOR (transform widget): select the axe to remove in the current'], -['Shift-LMB','MANIPULATOR transformation ( if there is a problem with small step adjustment'], -['Shift-LMB','MANIPULATOR first select the axe or axes with LBM alone)'], -['Shift-LMB', 'Outliner : Hold Shift while clicking on a triangle arrow to open/close the subtree below'], -['MMB', 'Rotate'], -['Ctrl-MMB', 'Zoom view'], -['Ctrl-LMB', 'Outliner : Hold CTRL while clicking on a name allows you to edit a name.'], -['Ctrl-LMB', 'Outliner : This works for all visualized data, including bones or vertex groups,'], -['Ctrl-LMB', 'Outliner : but not for \'nameless\' items that draw the links to Hooks, Deform '], -['Ctrl-LMB', 'Outliner : Groups or Constraints.'], -['Shift-MMB', 'Move view'], -['RMB', 'Select'], -['RMB drag', 'Border select circle: subtract from selection'], -['RMB hold down', 'Popup menu'], -['Alt-RMB', 'Object Mode :Select but in a displayed list of objects located under the mouse cursor'], -['Alt-RMB', 'Edit Mode: Select EDGES LOOP '], -['Alt-Ctrl-RMB', 'Edit Mode: Select FACES LOOP'], -['Alt-Ctrl-RMB', 'UV Image Editor: Select face'], -['Shift-RMB', 'Add/subtract to/from selection'], -['Wheel', 'Zoom view'], -['Transformations:', ''], -['Drag+Ctrl', 'Step adjustment'], -['Drag+Ctrl-Shift', 'Small step adjustment (Transform Widget : first select the axe or axes with LBM alone)'], -['Drag+Shift', 'Fine adjustment (Transform Widget : first select the axe or axes with LBM alone)'], -['LMB', 'Confirm transformation'], -['MMB', 'Toggle optional transform feature'], -['RMB', 'Abort transformation'], -['LMB', 'Grease Pencil: when "Draw Mode On", draw new stroke'], -['RMB', 'Grease Pencil: when "Draw Mode On", eraser tool for stroke segments'], -['Shift-LMB', 'Grease Pencil: draw new stroke'], -['Alt-RMB', 'Grease Pencil: eraser tool for stroke segments'], -['.', '...'] -], - -'F-Keys ':[ -['F1', 'Open File'], -['Shift-F1', 'Library Data Select'], -['F2', 'Save File'], -['Shift-F2', 'Export DXF'], -['Ctrl-F2', 'Save/export in vrml 1.0 format' ], -['F3', 'Save image'], -['Ctrl-F3', 'Save image : dump 3d view'], -['Ctrl-Shift-F3', 'Save image : dump screen'], -['F4', 'Logic Window (may change)'], -['Shift-F4', 'Object manager Data Select '], -['F5', 'Material Window'], -['Shift-F5', '3D Window'], -['F6', 'Texture Window'], -['Shift-F6', 'IPO Window'], -['F7', 'Object Window'], -['Shift-F7', 'Buttons Window'], -['F8', 'World Window'], -['Shift-F8', 'Video Sequencer Window'], -['F9', 'Edit Mode Window'], -['Shift-F9', 'OOP Window'], -['Alt-Shift-F9', 'OutLiner Window'], -['F10', 'Render Window'], -['Shift-F10', 'UV Image Editor'], -['F11', 'Recall the last rendered image'], -['Shift-F11', 'Text Editor'], -['ctrl-F11', 'replay the last rendered animation'], -['F12', 'Render current Scene'], -['Ctrl-F12', 'Render animation'], -['Ctrl-Shift-F12', 'NLA Editor'], -['Shift-F12', 'Action Editor'], -['Shift-F12', 'Action Editor'], -['.', '...'] -], - -'Numbers ':[ -['1..2..0-=', 'Show layer 1..2..12'], -['1..2..0-=', 'Edit Mode with Size, Grab, rotate tools : enter value'], -['Alt-1..2..0', 'Show layer 11..12..20'], -['Shift-1..2..0', 'Toggle layer 1..2..12'], -['Ctrl-1..4', 'Object/Edit Mode : change subsurf level to the selected value'], -['Shift-ALT-...', 'Toggle layer 11..12..20'], -['Crtl-Shift-ALT-3', 'Edit Mode & Face Mode : Triangle faces'], -['Crtl-Shift-ALT-4', 'Edit Mode & Face Mode : Quad faces'], -['Crtl-Shift-ALT-5', 'Edit Mode & Face Mode : Non quad or triangle faces'], -['.', '...'] -], - -'Numpad ':[ -['Numpad DEL', 'Zoom on object'], -['Numpad /', 'Local view on object (hide others)'], -['Numpad *', 'Rotate view to objects local axes'], -['Numpad +', 'Zoom in (works everywhere)'], -['Numpad -', 'OutLiner window, Collapse one level of the hierarchy'], -['Alt-Numpad +', 'Proportional vertex Edit Mode: Increase range of influence'], -['Ctrl-Numpad +', 'Edit Mode: Select More vertices'], -['Numpad -', 'Zoom out (works everywhere)'], -['Numpad +', 'OutLiner window, Expand one level of the hierarchy'], -['Alt-Numpad -', 'Proportional vertex Edit Mode: Decrease range of influence'], -['Ctrl-Numpad +', 'Edit Mode: Select Less vertices'], -['Numpad 0', 'Set Camera view'], -['Ctrl-Numpad 0', 'Set active object as camera'], -['Alt-Numbad 0', 'Restore old camera'], -['Ctrl-Alt-Numpad 0', 'Align active camera to view'], -['Numpad 1', 'Front view'], -['Ctrl-Numpad 1', 'Back view'], -['Numpad 3', 'Right view'], -['Ctrl-Numpad 3', 'Left view'], -['Numpad 7', 'Top view'], -['Ctrl-Numpad 7', 'Bottom view '], -['Numpad 5', 'Toggle orthogonal/perspective view'], -['Numpad 9', 'Redraw view'], -['Numpad 4', 'Rotate view left'], -['ctrl-Shift-Numpad 4', 'Previous Screen'], -['Numpad 6', 'Rotate view right'], -['ctrl-Shift-Numpad 6', 'Next Screen'], -['Numpad 8', 'Rotate view up'], -['Numpad 2', 'Rotate view down'], -['.', '...'] -], - -'Arrows ':[ -['Home/Pos1', 'View all',''], -['Home', 'OutLiner Windows, Show hierarchy'], -['PgUp', 'Edit Mode and Proportionnal Editing Tools, increase influence'], -['PgUp', 'Strip Editor, Move Down'], -['PgUp', 'TimeLine: Jump to next marker'], -['PgUp', 'IPO: Select next keyframe'], -['Ctrl-PgUp', 'IPO: Select and jump to next keyframe'], -['Ctrl-PgUn', 'TimeLine: Jump to next key'], -['PgDn', 'Edit Mode and Proportionnal Editing Tools, decrease influence'], -['PgDn', 'Strip Editor, Move Up'], -['PgDn', 'TimeLine: Jump to prev marker'], -['PgDn', 'IPO: Select previous keyframe'], -['Ctrl-PgDn', 'IPO: Select and jump to previous keyframe'], -['Ctrl-PgDn', 'TimeLine: Jump to prev key'], -['Left', 'One frame backwards'], -['Right', 'One frame forwards'], -['Down', '10 frames backwards'], -['Up', '10 frames forwards'], -['Alt-Down', 'Blender in Window mode'], -['Alt-Up', 'Blender in Fullscreen mode'], -['Ctrl-Left', 'Previous screen'], -['Ctrl-Right', 'Next screen'], -['Ctrl-Down', 'Maximize window toggle'], -['Ctrl-Up', 'Maximize window toggle'], -['Shift-Arrow', 'Toggle first frame/ last frame'], -['.', '...'] -], - -'Letters ':[ -{ -"A":[ -['A', 'Select all/Deselect all'], -['A', 'Outliner : Select all/Deselect all'], -['A', 'Ipo Editor : Object mode, Select all/Deselect all displayed Curves'], #243 -['A', 'Ipo Editor : Edit mode, Select all/Deselect all vertices'], #243 -['A', 'Render window (F12) : Display alpha plane'], -['Alt-A', 'Play animation in current window'], -['Ctrl-A', 'Apply objects size/rotation to object data'], -['Ctrl-A', 'Text Editor: Select all'], -['Ctrl-ALT-A', '3D-View: Armature Edit mode, align selected bones to active bone'], -['Shift-A', 'Sequencer: Add menu'], -['Shift-A', '3D-View: Add menu'], -['Shift-A', 'Sculpt Mode: Keep the brush center anchored to the initial location'], -['Shift-ALT-A', 'Play animation in all windows'], -['Shift-CTRL-A', 'Apply lattice / Make dupliverts real'], -['Shift-CTRL-A', 'Apply Deform '], -['.', '...'] -], - -"B":[ -['B', 'Border select'], -['BB', 'Circle select'], -['Alt-B', 'Object Mode: Select visible view section in 3D space'], -['Shift-B', 'Set render border (in active camera view)'], -['Ctrl-Alt-B', 'Object Mode: in 3D view, Bake (on an image in the uv editor window) the selected Meshes'], #243 -['Ctrl-Alt-B', 'Object Mode: in 3D view, Bake Full render of selected Meshes'], #243 -['Ctrl-Alt-B', 'Object Mode: in 3D view, Bake Ambient Occlusion of selected Meshes'], #243 -['Ctrl-Alt-B', 'Object Mode: in 3D view, Bake Normals of the selected Meshes'], #243 -['Ctrl-Alt-B', 'Object Mode: in 3D view, Bake Texture Only of selected Meshes'], #243 -['.', '...'] -], - -"C":[ -['C', 'Center view on cursor'], -['C', 'UV Image Editor: Active Face Select toggle'], -['C', 'Sequencer: Change content of the strip '], #243 -['C', 'IPO: Snap current frame to selected key'], -['C', 'TimeLine: Center View'], -['C', 'File Selector : Copy file'], -['C', 'NODE window : Show cyclic referencies'], #243 -['Alt-C', 'Object Mode: Convert menu'], -['Alt-C', 'Text Editor: Copy '], -['Ctrl-Alt-C', 'Object Mode : Add Constraint'], -['Ctrl-Shift-C', 'Text Editor: Copy selection to clipboard'], -['Ctrl-C', 'Copy menu (Copy properties of active to selected objects)'], -['Ctrl-C', 'UV Image Editor: Stick UVs to mesh vertex'], -['Ctrl-C','ARMATURE : posemode, Copy pose attributes'], -['Ctrl-Alt-C',' ARMATURE : posemode, add constraint to new empty object.'], -['Shift-C', 'Center and zoom view on selected objects'], -['Shift-C', 'UV Image Editor: Stick local UVs to mesh vertex'], -['.', '...'] -], - -"D":[ -['D', 'Set 3d draw mode'], -['Alt-D', 'Object Mode: Create new instance of object'], -['Ctrl-D', 'Display alpha of image texture as wire'], -['Ctrl-D', 'Text Editor : uncomment'], -['Shift-D', 'Create full copy of object'], -['Shift-D', 'NODE window : duplicate'], #243 -['CTRL-SHIFT-D', 'NLA editor : Duplicate markers'], -['CTRL-SHIFT-D', 'Action editor : Duplicate markers'], -['CTRL-SHIFT-D', 'IPO editor : Duplicate markers'], -['.', '...'] -], - -"E":[ -['E', 'Edit Mode: Extrude'], -['E', 'UV Image Editor: LSCM Unwrap'], -['E', 'TimeLine: Set current frame as End '], -['E', 'NODE window : Execute composite'], #243 -['ER', 'Edit Mode: Extrude Rotate'], -['ES', 'Edit Mode: Extrude Scale'], -['ESX', 'Edit Mode: Extrude Scale X axis'], -['ESY', 'Edit Mode: Extrude Scale Y axis'], -['ESZ', 'Edit Mode: Extrude Scale Z axis'], -['EX', 'Edit Mode: Extrude along X axis'], -['EY', 'Edit Mode: Extrude along Y axis'], -['EZ', 'Edit Mode: Extrude along Z axis'], -['Alt-E', 'Edit Mode: exit Edit Mode'], -['Ctrl-E', 'Edit Mode: Edge Specials menu'], -['Ctrl-E', 'Edit Mode: Edge Specials menu, Mark seams'], -['Ctrl-E', 'Edit Mode: Edge Specials menu, Clear seams'], -['Ctrl-E', 'Edit Mode: Edge Specials menu, Rotate Edge CW'], -['Ctrl-E', 'Edit Mode: Edge Specials menu, Rotate Edge CCW'], -['Ctrl-E', 'Edit Mode: Edge Specials menu, Loop Cut'], -['Ctrl-E', 'Edit Mode: Edge Specials menu, Edge Slide'], -['Shift-E', 'Edit Mode: SubSurf Edge Sharpness'], -['.', '...'] -], - -"F":[ -['F', 'Edit mode: Make edge/face'], -['F', 'Sequencer: Set Filter Y'], -['F', 'Object Mode: UV/Face Select mode'], -['Alt-F', 'Edit Mode: Beautify fill'], -['Alt-F,','Text editor : find again '], -['Alt-Ctrl-F,','Text editor : find '], -['Ctrl-F', 'Object Mode: Sort faces in Z direction'], -['Ctrl-F', 'Edit Mode: Flip triangle edges'], -['Shift-F', 'Edit Mode: Fill with triangles'], -['Shift-F', 'Object Mode: fly mode (see header for fly mode keys)'], -['.', '...'] -], - -"G":[ -['G', 'Grab (move)'], -['G', 'Timeline : Grab (move) Marker'], -['Alt-G', 'Clear location (this does only make sense in Object mode)'], -['Alt-G', 'NODE window : ungroup'], #243 -['Shift-ALT-G', 'Object mode: Remove selected objects from group'], -['Ctrl-G', 'NODE window : group'], #243 -['Ctrl-G', 'Add selected objects to group'], -['Ctrl-G', 'IPO editor, Grab/move marker'], -['Ctrl-Alt-G', 'MANIPULATOR (transform widget): set in Grab Mode'], -['Shift-G', 'Object mode: Selected Group menu'], -['Shift-G', 'Object mode: Selected Group menu 1, Children'], -['Shift-G', 'Object mode: Selected Group menu 2, Immediate Children'], -['Shift-G', 'Object mode: Selected Group menu 3, Parent'], -['Shift-G', 'Object mode: Selected Group menu 4, Sibling'], -['Shift-G', 'Object mode: Selected Group menu 5, Object of same type'], -['Shift-G', 'Object mode: Selected Group menu 6, Object in same shared layers'], -['Shift-G', 'Object mode: Selected Group menu 7, Objects in same group'], -['.', '...'] -], - -"H":[ -['H', 'Hide selected vertices/faces'], -['H', 'Curves: Set handle type'], -['H', 'Action editor: Handle type aligned'], -['H', 'Action editor: Handle type free'], -['H', 'NODE window : hide/unhide'], #243 -['Alt-H', 'Edit Mode : Show Hidden vertices/faces'], -['Shift-H', 'Curves: Automatic handle calculation'], -['Shift-H', 'Action editor: Handle type auto'], -['Shift-H', 'Edit Mode : Hide deselected vertices/faces'], -['Ctrl-H', 'Edit Mode : Add a hook on selected points or show the hook menu .'], -['.', '...'] -], - -"I":[ -['I', 'Insert Keyframe menu'], -['Alt-I','Delete Keyframe menu'], -['Ctrl-I','Select Inverse'], -['Shift-I','ARMATURE : add IK constraint'], -['Ctrl-Alt-I','ARMATURE : posemode, remove IK constraints.'], -['.', '...'] -], - -"J":[ -['J', 'IPO: Join menu'], -['J', 'Mesh: Join all adjacent triangles to quads'], -['J', 'Render Window: Swap render buffer'], -['Alt-J,','Text editor : Jump '], -['Ctrl-J', 'Join selected objects'], -['Ctrl-J', 'Nurbs: Add segment'], -['Ctrl-J', 'IPO: Join keyframes menu'], -['.', '...'] -], - -"K":[ -['K', '3d Window: Show keyframe positions'], -['K', 'Edit Mode: Loop/Cut menu'], -['K', 'IPO: Show keyframe positions'], -['K', 'Nurbs: Print knots'], -['K', 'VIDEO editor : cut at current frame'], #243 -['Ctrl-K', 'Make skeleton from armature'], -['Shift-K', 'Show and select all keyframes for object'], -['Shift-K', 'Edit Mode: Knife Mode select'], -['Shift-K', 'UV Face Select: Clear vertex colours'], -['Shift-K', 'Vertex Paint: All vertex colours are erased; they are changed to the current drawing colour.'], -['.', '...'] -], - -"L":[ -['L', 'Make local menu'], -['L', 'Edit Mode: Select linked vertices (near mouse pointer)'], -['L', 'NODE window: Select linked from '], #243 -['L', 'OOPS window: Select linked objects'], -['L', 'UV Face Select: Select linked faces'], -['Ctrl-L', 'Make links menu (for instance : to scene...)'], -['Shift-L', 'Select links menu'], -['Shift-L', 'NODE window: Select linked to '], #243 -['Ctrl-L', 'POSELIB: browse poses'], -['Shift-L', 'POSELIB: add/replace pose'], -['Ctrl-Shift-L', 'POSELIB: rename pose'], -['Alt-L', 'POSELIB: remove pose'], -['.', '...'] -], - -"M":[ -['M', 'Object mode : Move object to different layer'], -['M', 'Sequencer: Make meta strip (group) from selected strips'], -['M', 'Edit Mode: Mirros Axis menu'], -['M', 'File Selector: rename file'], -['M', 'Video Sequence Editor : Make Meta strip...'], -['M', 'NLA editor: Add marker'], -['M', 'Action editor: Add marker'], -['M', 'IPO editor: Add marker'], -['M', 'TimeLine: Add marker'], -['Alt-M', 'Edit Mode: Merge vertices menu'], -['Alt-M', 'Video Sequence Editor : Separate Meta strip...'], -['Ctrl-M', 'Object Mode: Mirros Axis menu'], -['Shift-M', 'TimeLine: Name marker'], -['Shift-M', 'IPO editor : Name marker'], -['Shift-M', 'NLA editor : Name marker'], -['Shift-M', 'Actions editor : Name marker'], -['.', '...'] -], - -"N":[ -['N', 'Transform Properties panel'] , -['N', 'OOPS window: Rename object'], -['N', 'VIDEO SEQUENCE editor : display strip properties '], #243 -['Alt-N', 'Text Editor : New text '], -['Ctrl-N', 'Armature: Recalculate bone roll angles'] , -['Ctrl-N', 'Edit Mode: Recalculate normals to outside'] , -['Ctrl-Shift-N', 'Edit Mode: Recalculate normals to inside'], -['.', '...'] -], - -"O":[ -['O', 'Edit Mode/UV Image Editor: Toggle proportional vertex editing'], -['O', 'IPO editor: Clean ipo curves (beware to the thresold needed value)'], #243 -['Alt-O', 'Clear object origin'], -['Alt-O', 'Edit mode, 3dview with prop-edit-mode, enables/disables connected'], -['Alt-O', 'Text Editor : Open file '], -['Ctrl-O', 'Open a panel with the ten most recent projets files'], #243 -['Shift-O', 'Proportional vertex Edit Mode: Toggle smooth/steep falloff'], -['Shift-O', 'Object Mode: Add a subsurf modifier to the selected mesh'], -['Shift-O', 'IPO editor: Smooth ipo curves'], #243 -['.', '...'] -], - -"P":[ -['P', 'Object Mode: Start realtime engine'], -['P', 'Edit mode: Seperate vertices to new object'], -['Shift-P', 'Edit mode: Push-Pull'], -['Shift-P', 'Object mode: Add a preview window in the D window'], -['P', 'UV Image Editor: Pin selected vertices. Pinned vertices will stay in place on the UV editor when executing an LSCM unwrap.'], -['Alt-P', 'Clear parent relationship'], -['Alt-P', 'UV Image Editor: Unpin UVs'], -['Alt-P', 'Text Editor : Run current script '], -['Ctrl-P', 'Make active object parent of selected object'], -['Ctrl-Shift-P', 'Make active object parent of selected object without inverse'], -['Ctrl-P', 'Edit mode: Make active vertex parent of selected object'], -['Ctrl-P', 'ARMATURE : editmode, make bone parent.'], -['Ctrl-Alt-P', 'ARMATURE: edimode, separate bones to new object'], -['.', '...'] -], - -"Q":[['Ctrl-Q', 'Quit'], - ['.', '...'] - ], - -"R":[ -['R', 'FileSelector : remove file'], -['R', 'Rotate'], -['R', 'IPO: Record mouse movement as IPO curve'], -['R', 'UV Face Select: Rotate menu uv coords or vertex colour'], -['R', 'NODE window : read saved render result'], #243 -['R', 'SEQUENCER window : re-assign entries to another strip '], #243 -['RX', 'Rotate around X axis'], -['RXX', "Rotate around object's local X axis"], -['RY', 'Rotate around Y axis'], -['RYY', "Rotate around object's local Y axis"], -['RZ', 'Rotate around Z axis'], -['RZZ', "Rotate around object's local Z axis"], -['Alt-R', 'Clear object rotation'], -['Alt-R', 'Text editor : reopen text.'], -['Ctrl-R', 'Edit Mode: Knife, cut selected edges, accept left mouse/ cancel right mouse'], -['Ctrl-Alt-R', 'MANIPULATOR (transform widget): set in Rotate Mode'], -['Shift-R', 'Edit Mode: select Face Loop'], -['Shift-R', 'Nurbs: Select row'], -['.', '...'] -], - -"S":[ -['S', 'Scale'] , -['S', 'TimeLine: Set Start'], -['SX', 'Flip around X axis'] , -['SY', 'Flip around Y axis'] , -['SZ', 'Flip around Z axis'] , -['SXX', 'Flip around X axis and show axis'] , -['SYY', 'Flip around Y axis and show axis'] , -['SZZ', 'Flip around Z axis and show axis'] , -['Alt-S', 'Edit mode: Shrink/fatten (Scale along vertex normals)'] , -['Alt-S', 'Text Editor : Save the current text to file '], -['Alt-S',' ARMATURE : posemode editmode: Scale envalope.'], -['Ctrl-Shift-S', 'Edit mode: To Sphere'] , -['Ctrl-Alt-Shift-S', 'Edit mode: Shear'] , -['Alt-S', 'Clear object size'] , -['Ctrl-S', 'Edit mode: Shear'] , -['Alt-Shift-S,','Text editor : Select the line '], -['Ctrl-Alt-G', 'MANIPULATOR (transform widget): set in Size Mode'], -['Shift-S', 'Cursor/Grid snap menu'], -['Shift-S', 'Sculpt Mode: Smooth Stroke.'], -['Shift-S+1', 'VIDEO SEQUENCE editor : jump to the current frame '], -['.', '...'] -], - -"T":[ -['T', 'Adjust texture space'], -['T', 'Edit mode: Flip 3d curve'], -['T', 'IPO: Menu Change IPO type, 1 Constant'], -['T', 'IPO: Menu Change IPO type, 2 Linear'], -['T', 'IPO: Menu Change IPO type, 3 Bezier'], -['T', 'TimeLine: Show second'], -['T', 'VIDEO SEQUENCE editor : toggle between show second andd show frame'], #243 -['Alt-T', 'Clear tracking of object'], -['Ctrl-T', 'Make selected object track active object'], -['Ctrl-T', 'Edit Mode: Convert to triangles'], -['Ctrl-Alt-T', 'Benchmark'], -['.', '...'] -], - -"U":[ -['U', 'Make single user menu (for import completly linked object to another scene for instance) '] , -['U', '3D View: Make Single user Menu'] , -['U', 'UV Face Select: Automatic UV calculation menu'] , -['U', 'Vertex-/Weightpaint mode: Undo'] , -['Ctrl-U', 'Save current state as user default'], -['Shift-U', 'Edit Mode: Redo Menu'], -['Alt-U', 'Edit Mode & Object Mode: Undo Menu'], -['.', '...'] -], - -"V":[ -['V', 'Curves/Nurbs: Vector handle'], -['V', 'Edit Mode : Rip selected vertices'], -['V', 'Vertexpaint mode'], -['V', 'UV Image Editor: Stitch UVs'], -['Ctrl-V',' UV Image Editor: maximize stretch.'], -['V', 'Action editor: Vector'], -['Alt-V', "Scale object to match image texture's aspect ratio"], -['Alt-V', 'Text Editor : Paste '], -['Alt-Shift-V', 'Text Editor : View menu'], -['Alt-Shift-V', 'Text Editor : View menu 1, Top of the file '], -['Alt-Shift-V', 'Text Editor : View menu 2, Bottom of the file '], -['Alt-Shift-V', 'Text Editor : View menu 3, PageUp'], -['Alt-Shift-V', 'Text Editor : View menu 4, PageDown'], -['Ctrl-Shift-V', 'Text Editor: Paste from clipboard'], -['Shift-V', 'Edit mode: Align view to selected vertices'], -['Shift-V', 'UV Image Editor: Limited Stitch UVs popup'], -['.', '...'] -], - -"W":[ -['W', 'Edit Mode: Specials menu'], -['W', 'Edit Mode: Specials menu, ARMATURE 1 Subdivide'], -['W', 'Edit Mode: Specials menu, ARMATURE 2 Subdivide Multi'], -['W', 'Edit Mode: Specials menu, ARMATURE 3 Switch Direction'], -['W', 'Edit Mode: Specials menu, ARMATURE 4 Flip Left-Right Name'], -['W', 'Edit Mode: Specials menu, ARMATURE 5 AutoName Left-Right'], -['W', 'Edit Mode: Specials menu, ARMATURE 6 AutoName Front-Back'], -['W', 'Edit Mode: Specials menu, ARMATURE 7 AutoName Top-Bottom'], -['W', 'Edit Mode: Specials menu, CURVE 1 Subdivide'], -['W', 'Edit Mode: Specials menu, CURVE 2 Swich Direction'], -['W', 'Edit Mode: Specials menu, CURVE 3 Set Goal Weight'], -['W', 'Edit Mode: Specials menu, CURVE 4 Set Radius'], -['W', 'Edit Mode: Specials menu, CURVE 5 Smooth'], -['W', 'Edit Mode: Specials menu, CURVE 6 Smooth Radius'], -['W', 'Edit Mode: Specials menu, MESH 1 Subdivide'], -['W', 'Edit Mode: Specials menu, MESH 2 Subdivide Multi'], -['W', 'Edit Mode: Specials menu, MESH 3 Subdivide Multi Fractal'], -['W', 'Edit Mode: Specials menu, MESH 4 Subdivide Smooth'], -['W', 'Edit Mode: Specials menu, MESH 5 Merge'], -['W', 'Edit Mode: Specials menu, MESH 6 Remove Double'], -['W', 'Edit Mode: Specials menu, MESH 7 Hide'], -['W', 'Edit Mode: Specials menu, MESH 8 Reveal'], -['W', 'Edit Mode: Specials menu, MESH 9 Select Swap'], -['W', 'Edit Mode: Specials menu, MESH 10 Flip Normal'], -['W', 'Edit Mode: Specials menu, MESH 11 Smooth'], -['W', 'Edit Mode: Specials menu, MESH 12 Bevel'], -['W', 'Edit Mode: Specials menu, MESH 13 Set Smooth'], -['W', 'Edit Mode : Specials menu, MESH 14 Set Solid'], -['W', 'Object Mode : on MESH objects, Boolean Tools menu'], -['W', 'Object Mode : on MESH objects, Boolean Tools 1 Intersect'], -['W', 'Object Mode : on MESH objects, Boolean Tools 2 union'], -['W', 'Object Mode : on MESH objects, Boolean Tools 3 difference'], -['W', 'Object Mode : on MESH objects, Boolean Tools 4 Add an intersect Modifier'], -['W', 'Object Mode : on MESH objects, Boolean Tools 5 Add an union Modifier'], -['W', 'Object Mode : on MESH objects, Boolean Tools 6 Add a difference Modifier'], -['W', 'Object mode : on TEXT object, Split characters, a new TEXT object by character in the selected string '], -['W', 'UV Image Editor: Weld/Align'], -['WX', 'UV Image Editor: Weld/Align X axis'], -['WY', 'UV Image Editor: Weld/Align Y axis'], -['Ctrl-W', 'Save current file'] , -['Shift-W', 'Warp/bend selected vertices around cursor'], -['.', '...'] - ], - -"X":[ -['X', 'Delete menu'] , -['X', 'TimeLine : Remove marker'], -['X', 'NLA : Remove marker'], -['X', 'IPO : Remove marker'], -['X', 'NODE window : delete'], #243 -['Alt-X', 'Text Editor : Cut '], -['Alt-X', 'Grease Pencil: Delete menu'], -['Ctrl-X', 'Restore default state (Erase all)'], -['.', '...'] - ], - -"Y":[ -['Y', 'Edit Mode & Mesh : Split selected vertices/faces from the rest'], -['Ctrl-Y', 'Object Mode : Redo'], -['.', '...'] -], - -"Z":[ -['Z', 'Render Window: 200% zoom from mouse position'], -['Z', 'Switch 3d draw type : solide/ wireframe (see also D)'], -['Alt-Z', 'Switch 3d draw type : solid / textured (see also D)'], -['Alt-Z,','Text editor : undo '], -['Ctrl-Z', 'Object Mode : Undo'], -['Ctrl-Z,','Text editor : undo '], -['Ctrl-Shift-Z,','Text editor : Redo '], -['Shift-Z', 'Switch 3d draw type : shaded / wireframe (see also D)'], -['.', '...'] -]}]} - -up=128 -down=129 -UP=0 -SEARCH=131 -OLDSEARCHLINE='' -SEARCHLINE=Create('') -LINE=130 -FINDED=[] -LEN=0 - -for k in hotkeys.keys(): - hotkeys[k].append(Create(0)) - -for k in hotkeys['Letters '][0]: - hotkeys['Letters '][0][k].append(Create(0)) - -hotL=hotkeys['Letters '][0].keys() -hotL.sort() - -hot=hotkeys.keys() -hot.sort() - -def searchfor(SEARCHLINE): - global hotkeys, hot - FINDLIST=[] - for k in hot: - if k not in ['Letters ', 'Search '] : - for l in hotkeys[k][:-1]: - #print 'k, l : ', k, l, l[1] - if l[1].upper().find(SEARCHLINE.upper())!=-1: - FINDLIST.append(l) - - elif k == 'Letters ': - for l in hotL : - for l0 in hotkeys['Letters '][0][l][:-1]: - #print 'k, l : ',l, k, l0 - if l0[1].upper().find(SEARCHLINE.upper())!=-1: - FINDLIST.append(l0) - #print 'FINDLIST',FINDLIST - FINDLIST.append(['Find list','Entry']) - return FINDLIST - - -glCr=glRasterPos2d -glCl3=glColor3f -glCl4=glColor4f -glRct=glRectf - -cf=[0.95,0.95,0.9,0.0] -c1=[0.95,0.95,0.9,0.0] -c=cf -r=[0,0,0,0] - -def trace_rectangle4(r,c): - glCl4(c[0],c[1],c[2],c[3]) - glRct(r[0],r[1],r[2],r[3]) - -def trace_rectangle3(r,c,c1): - glCl3(c[0],c[1],c[2]) - glRct(r[0],r[1],r[2],r[3]) - glCl3(c1[0],c1[1],c1[2]) - -def draw(): - global r,c,c1,hotkeys, hot, hotL, up, down, UP, SEARCH, SEARCHLINE,LINE - global OLDSEARCHLINE, FINDED, SCROLL, LEN - size=Buffer(GL_FLOAT, 4) - glGetFloatv(GL_SCISSOR_BOX, size) - size= size.list - - for s in [0,1,2,3]: size[s]=int(size[s]) - - c=[0.75,0.75,0.75,0] - c1=[0.6,0.6,0.6,0] - - r=[0,size[3],size[2],0] - trace_rectangle4(r,c) - - c=[0.64,0.64,0.64,0] - c1=[0.95,0.95,0.9,0.0] - - r=[0,size[3],size[2],size[3]-40] - trace_rectangle4(r,c) - - c1=[0.7,0.7,0.9,0.0] - c=[0.2,0.2,0.4,0.0] - c2=[0.71,0.71,0.71,0.0] - - glColor3f(1, 1, 1) - glRasterPos2f(42, size[3]-25) - - Text("HotKey and MouseAction Reference") - - l=0 - listed=0 - Llisted=0 - size[3]=size[3]-18 - - BeginAlign() - for i, k in enumerate(hot): - hotkeys[k][-1]=Toggle(k, i+10, 78*i, size[3]-(47), 78, 24, hotkeys[k][-1].val ) - l+=len(k) - if hotkeys[k][-1].val==1.0: - listed= i - EndAlign() - l=0 - size[3]=size[3]-4 - - if hot[listed]!='Letters ' and hot[listed]!='Search ' : - size[3]=size[3]-8 - SCROLL=size[3]/21 - END=-1 - if SCROLL < len(hotkeys[hot[listed]][:-1]): - BeginAlign() - Button('/\\',up,4,size[3]+8,20,14,'Scroll up') - Button('\\/',down,4,size[3]-8,20,14,'Scroll down') - EndAlign() - if (SCROLL+UP)0: - LEN=len(FINDED) - size[3]=size[3]-8 - SCROLL=size[3]/21 - END=-1 - - if SCROLL < len(FINDED): - BeginAlign() - Button('/\\',up,4,size[3]+8,20,14,'Scroll up') - Button('\\/',down,4,size[3]-8,20,14,'Scroll down') - EndAlign() - if (SCROLL+UP)4: - UP-=5 - elif (evt== UPARROWKEY): - if (UP+SCROLL)0: - UP-=1 - Redraw() - -def bevent(evt): - global hotkeysmhot, hotL, up,down,UP, FINDED - global SEARCH, SEARCHLINE,LINE, OLDSEARCHLINE - - if (evt== 1): - Exit() - - elif 9 < evt < 20: - for i, k in enumerate(hot): - if i+10!=evt: - hotkeys[k][-1].val=0 - UP=0 - Blender.Window.Redraw() - - elif 19 < evt < 46: - for i, k in enumerate(hotL): - if i+20!=evt: - hotkeys['Letters '][0][k][-1].val=0 - UP=0 - Blender.Window.Redraw() - - elif (evt==up): - UP+=1 - Blender.Window.Redraw() - - elif (evt==down): - if UP>0: UP-=1 - Blender.Window.Redraw() - - elif (evt==LINE): - if SEARCHLINE.val!='' and SEARCHLINE.val!=OLDSEARCHLINE: - OLDSEARCHLINE=SEARCHLINE.val - FINDED=searchfor(OLDSEARCHLINE) - Blender.Window.Redraw() - -if __name__ == '__main__': - Register(draw, event, bevent) diff --git a/release/scripts/image_2d_cutout.py b/release/scripts/image_2d_cutout.py deleted file mode 100644 index 16d0805256b..00000000000 --- a/release/scripts/image_2d_cutout.py +++ /dev/null @@ -1,559 +0,0 @@ -#!BPY - -""" -Name: '2D Cutout Image Importer' -Blender: 249 -Group: 'Image' -Tooltip: 'Batch UV Map images to Planes' -""" - -__author__ = "Kevin Morgan (forTe)" -__url__ = ("Home page, http://gamulabs.freepgs.com") -__version__ = "1.2.1" -__bpydoc__ = """\ -This Script will take an image and -UV map it to a plane sharing the same width to height ratio as the image. -Import options allow for the image to be a still or sequence type image -

-Imports can be single images or whole directories of images depending on the chosen -option. -""" - -#################################################### -#Copyright (C) 2008: Kevin Morgan -#################################################### -#-------------GPL LICENSE BLOCK------------- -#This program is free software: you can redistribute it and/or modify -#it under the terms of the GNU General Public License as published by -#the Free Software Foundation, either version 3 of the License, or -#(at your option) any later version. -# -#This program is distributed in the hopes that it will be useful, -#but WITHOUT ANY WARRANTY; without even the implied warranty of -#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -#GNU General Public License for more details. -# -#You should have received a copy of the GNU General Public License -#along with this program. If not, see . -#################################################### -#################################################### -#V1.0 -#Basic Functionality -#Published June 28, 2007 -#################################################### -#V1.1 -#Added Support for enabling viewport transparency -#Added more options to the UI for materials -#Added Proportionality code (Pixels per unit) -#Added GPL License Block -#Published June 29, 2007 -#################################################### -#V1.2 -#Added Support for Copying Existing Materials -#Import Images as Sequences -#Refreshed GUI - now with more clutter :( -#Miscellaneous and Housekeeping -#Published June 16, 2008 -#################################################### -#V1.2.1 -#Added Extend Texture Mode option at request of a user -#Published September 24, 2008 -#################################################### - -import Blender -from Blender import BGL, Draw, Image, Mesh, Material, Texture, Window -from Blender.Mathutils import * -import bpy - -# Global Constants -DIR = 0 -SINGLE = 1 -CUROFFS = 0 - -# GUI CONSTANTS -NO_EVT = 0 -SINGLE_IMG = 1 -DIRECTORY_IMG = 2 -CLR_PATH = 3 -CHG_EXT = 4 -EXIT = 5 -DO_SCRIPT = 6 - -VERSIONSTRING = '1.2.1' - -# Note the two parameter dicts could be combined, I just, liked them seperate... -# GUI Buttons Dict -GUIPARAMS = { - 'Path': Draw.Create(''), - 'ImageExt': Draw.Create(''), - 'Seq': Draw.Create(0), - 'PackImage': Draw.Create(0), - 'PPU': Draw.Create(50), - 'VPTransp': Draw.Create(1), - 'XOff': Draw.Create(0.0), - 'YOff': Draw.Create(0.0), - 'ZOff': Draw.Create(0.0), - 'CopyMat': Draw.Create(0), - 'MatId': Draw.Create(0), - 'MatCol': Draw.Create(1.0, 0.0, 0.0), - 'Ref': Draw.Create(0.8), - 'Spec': Draw.Create(0.5), - 'Hard': Draw.Create(50), - 'Alpha': Draw.Create(1.0), - 'ZTransp': Draw.Create(1), - 'Shadeless': Draw.Create(0), - 'TexChan': Draw.Create(1), - 'MPTCol': Draw.Create(1), - 'MPTAlpha': Draw.Create(1), - 'UseAlpha': Draw.Create(1), - 'CalcAlpha': Draw.Create(0), - 'ExtendMode': Draw.Create(0), - 'AutoRefresh': Draw.Create(0), - 'Cyclic': Draw.Create(0), - 'Frames': Draw.Create(100), - 'Offs': Draw.Create(0), - 'StartFr': Draw.Create(1), - 'RedrawImp': Draw.Create(0) -} - -# Script Execution Paramaters -PARAMS = { - 'ImagePaths': [], # Path to images to import - 'ImportType': SINGLE, # Import a Directory or a Single Image? - 'ImageProp': Image.Sources.STILL, # What sources for the image, still or sequence - 'PackImage': 0, # Pack the Image(s)? - 'PPU': 20, # Pixels Per Blender Unit - 'MakeTransp': 1, # Make face transparent in viewport - - 'NewMat': 1, # If true make a new material, otherwise duplicate an existing one, replacing appropriate attributes - 'MaterialId': 0, # ID to take from the Materials list upon copy - 'Materials': None, # Materials in Scene - 'MatProps': {'Col': [1.0, 0.0, 0.0], 'Shadeless': 1, 'Ref': 0.5, 'Spec': 0.5, 'Hard': 200, 'Alpha': 1.0, 'ZTransp': 1}, - - 'TexProps': {'UseAlpha': 1, 'CalcAlpha': 0, 'ExtendMode': 0}, # Texture Properties - 'TexChannel': 0, # Texture Channel - 'TexMapTo': {'Col': 1, 'Alpha': 1}, # Map to Col and/or Alpha - 'SeqProps': {'AutoRefresh': 0, 'Cyclic': 0, 'Frames': 100, 'Offs': 0, 'StartFr': 1}, - 'ObOffset': Vector(1, 0, 0) # Offset by this vector upon creation for multifile import -} - -# Get the Active Scene, of course -scn = bpy.data.scenes.active - -########################################## -# MAIN SCRIPT FUNCTIONS -########################################## - -def imgImport(imgPath): - global CUROFFS, PARAMS - ###################################### - # Load the image - ###################################### - try: - img = Image.Load(imgPath) - imgDimensions = img.getSize() # do this to ensure the data is available - except: - Blender.Draw.PupMenu('Error%t|Unsupported image format for "'+ imgPath.split('\\')[-1].split('/')[-1] +'"') - return - - if PARAMS['PackImage']: - img.pack() - name = Blender.sys.makename(imgPath, strip = 1) - - ###################################### - # Construct the mesh - ###################################### - - me = Mesh.New(name) - - # Calculate Dimensions from Image Size - dim = [float(i)/PARAMS['PPU'] for i in imgDimensions] - v = [[dim[0], dim[1], 0], [-dim[0], dim[1], 0], [-dim[0], -dim[1], 0], [dim[0], -dim[1], 0]] - me.verts.extend(v) - me.faces.extend([0, 1, 2, 3]) - - me.faces[0].image = img - me.faces[0].uv = [Vector(1.0, 1.0), Vector(0.0, 1.0), Vector(0.0, 0.0), Vector(1.0, 0.0)] - - if PARAMS['MakeTransp']: - me.faces[0].transp = Mesh.FaceTranspModes.ALPHA - - ###################################### - # Modify the Material - ###################################### - - mat = None - if not PARAMS['NewMat']: - mat = PARAMS['Materials'][PARAMS['MaterialId']].__copy__() - mat.setName(name) - else: - mat = Material.New(name) - properties = PARAMS['MatProps'] - mat.setRGBCol(properties['Col']) - mat.setRef(properties['Ref']) - mat.setSpec(properties['Spec']) - mat.setHardness(properties['Hard']) - mat.setAlpha(properties['Alpha']) - - if properties['Shadeless']: - mat.mode |= Material.Modes.SHADELESS - if properties['ZTransp']: - mat.mode |= Material.Modes.ZTRANSP - - properties = PARAMS['TexProps'] - - tex = Texture.New(name) - tex.setType('Image') - tex.setImage(img) - if properties['UseAlpha']: - tex.useAlpha = Texture.ImageFlags.USEALPHA - - if properties['CalcAlpha']: - tex.calcAlpha = Texture.ImageFlags.CALCALPHA - - if properties['ExtendMode']: - tex.setExtend('Extend') - - if PARAMS['ImageProp'] == Image.Sources.SEQUENCE: - properties = PARAMS['SeqProps'] - - img.source = PARAMS['ImageProp'] # Needs to be done here, otherwise an error with earlier getSize() - - tex.animStart = properties['StartFr'] - tex.animOffset = properties['Offs'] - tex.animFrames = properties['Frames'] - tex.autoRefresh = properties['AutoRefresh'] - tex.cyclic = properties['Cyclic'] - - texMapSetters = Texture.TexCo.UV - - # PARAMS['TexMapTo']['Col'] (and alpha) will either be 0 or 1 because its from a toggle, otherwise this line doesn't work - texChanSetters = Texture.MapTo.COL * PARAMS['TexMapTo']['Col'] | Texture.MapTo.ALPHA * PARAMS['TexMapTo']['Alpha'] - - mat.setTexture(PARAMS['TexChannel'], tex, texMapSetters, texChanSetters) - me.materials += [mat] - - ###################################### - # Object Construction - ###################################### - - ob = scn.objects.new(me, name) - p = Vector(ob.getLocation()) # Should be the origin, but just to be safe, get it - ob.setLocation((CUROFFS * PARAMS['ObOffset']) + p) - - return - -def translateParams(): - # Translates (or assigns for the most part) GUI values to those that can be read by the - # Import Function - - global GUIPARAMS, PARAMS - - if GUIPARAMS['Seq'].val and PARAMS['ImportType'] != DIR: - PARAMS['ImageProp'] = Image.Sources.SEQUENCE - - PARAMS['PackImage'] = GUIPARAMS['PackImage'].val - PARAMS['PPU'] = GUIPARAMS['PPU'].val - PARAMS['MakeTransp'] = GUIPARAMS['VPTransp'].val - PARAMS['ObOffset'] = Vector(GUIPARAMS['XOff'].val, GUIPARAMS['YOff'].val, GUIPARAMS['ZOff'].val) - - PARAMS['NewMat'] = not GUIPARAMS['CopyMat'].val - PARAMS['MaterialId'] = GUIPARAMS['MatId'].val - PARAMS['MatProps']['Col'] = list(GUIPARAMS['MatCol'].val) - PARAMS['MatProps']['Ref'] = GUIPARAMS['Ref'].val - PARAMS['MatProps']['Spec'] = GUIPARAMS['Spec'].val - PARAMS['MatProps']['Hard'] = GUIPARAMS['Hard'].val - PARAMS['MatProps']['Alpha'] = GUIPARAMS['Alpha'].val - PARAMS['MatProps']['ZTransp'] = GUIPARAMS['ZTransp'].val - PARAMS['MatProps']['Shadeless'] = GUIPARAMS['Shadeless'].val - - PARAMS['TexChannel'] = GUIPARAMS['TexChan'].val - 1 #Channels are 0-9, but GUI shows 1-10 - PARAMS['TexProps']['UseAlpha'] = GUIPARAMS['UseAlpha'].val - PARAMS['TexProps']['CalcAlpha'] = GUIPARAMS['CalcAlpha'].val - PARAMS['TexProps']['ExtendMode'] = GUIPARAMS['ExtendMode'].val - PARAMS['TexMapTo']['Col'] = GUIPARAMS['MPTCol'].val - PARAMS['TexMapTo']['Alpha'] = GUIPARAMS['MPTAlpha'].val - - PARAMS['SeqProps']['AutoRefresh'] = GUIPARAMS['AutoRefresh'].val - PARAMS['SeqProps']['Cyclic'] = GUIPARAMS['Cyclic'].val - PARAMS['SeqProps']['Frames'] = GUIPARAMS['Frames'].val - PARAMS['SeqProps']['Offs'] = GUIPARAMS['Offs'].val - PARAMS['SeqProps']['StartFr'] = GUIPARAMS['StartFr'].val - return - -def doScript(): - # Main script Function - # Consists of choosing between 2 loops, one with a redraw, one without, see comments for why - - global CUROFFS - - translateParams() - - total = len(PARAMS['ImagePaths']) - broken = 0 - - if GUIPARAMS['RedrawImp'].val: # Reduces the need to compare on every go through the loop - for i, path in enumerate(PARAMS['ImagePaths']): - CUROFFS = i # Could be passed to the import Function, but I chose a global instead - Window.DrawProgressBar(float(i)/total, "Importing %i of %i Images..." %(i+1, total)) - imgImport(path) - Blender.Redraw() - if Blender.Get('version') >= 246: - if Window.TestBreak(): - broken = 1 - break - else: - for i, path in enumerate(PARAMS['ImagePaths']): - CUROFFS = i - Window.DrawProgressBar(float(i)/total, "Importing %i of %i Images..." %(i+1, total)) - imgImport(path) - if Blender.Get('version') >= 246: - if Window.TestBreak(): - broken = 1 - break - - if broken: - Window.DrawProgressBar(1.0, "Script Execution Aborted") - else: - Window.DrawProgressBar(1.0, "Finished Importing") - - Blender.Redraw() # Force a refresh, since the user may have chosen to not refresh as they go along - - return - -########################################## -# PATH SETTERS AND CHANGERS -########################################## - -def setSinglePath(filename): - global GUIPARAMS, PARAMS - GUIPARAMS['Path'].val = filename - PARAMS['ImagePaths'] = [filename] - return - -def setDirPath(filename): - global GUIPARAMS, PARAMS - - try: - import os - except: - Draw.PupMenu('Full install of python required to be able to set Directory Paths') - Draw.Exit() - return - - path = os.path.dirname(filename) # Blender.sys.dirname fails on '/' - GUIPARAMS['Path'].val = path - - ext_lower = GUIPARAMS['ImageExt'].val.lower() - for f in os.listdir(path): - if f.lower().endswith(ext_lower): - PARAMS['ImagePaths'].append(os.path.join(path, f)) - - return - -def changeExtension(): - global GUIPARAMS, PARAMS - - if PARAMS['ImportType'] == SINGLE: - return - - try: - import os - except: - Draw.PupMenu('Full install of python required to be able to set Directory Paths') - Draw.Exit() - return - - PARAMS['ImagePaths'] = [] - - ext_lower = GUIPARAMS['ImageExt'].val.lower() - for f in os.listdir(GUIPARAMS['Path'].val): - if f.lower().endswith(ext_lower): - PARAMS['ImagePaths'].append(os.path.join(GUIPARAMS['Path'].val, f)) - - return - -########################################## -# INTERFACE FUNCTIONS -########################################## -def compileMaterialList(): - # Pretty straight forward, just grabs the materials in the blend file and constructs - # an appropriate string for use as a menu - - mats = [mat for mat in bpy.data.materials] - PARAMS['Materials'] = mats - title = 'Materials%t|' - menStrs = [mat.name + '%x' + str(i) + '|' for i, mat in enumerate(mats)] - return title + ''.join(menStrs) - -def event(evt, val): - # Disabled, since Esc is often used from the file browser - #if evt == Draw.ESCKEY: - # Draw.Exit() - - return - -def bevent(evt): - global GUIPARAMS, PARAMS - - if evt == NO_EVT: - Draw.Redraw() - - elif evt == SINGLE_IMG: - Window.FileSelector(setSinglePath, 'Image', Blender.sys.expandpath('//')) - Draw.Redraw() - PARAMS['ImportType'] = SINGLE - - elif evt == DIRECTORY_IMG: - Window.FileSelector(setDirPath, 'Directory', Blender.sys.expandpath('//')) - Draw.Redraw() - PARAMS['ImportType'] = DIR - - elif evt == CLR_PATH: - GUIPARAMS['Path'].val = '' - PARAMS['ImagePaths'] = [] - GUIPARAMS['ImageExt'].val = '' - Draw.Redraw() - - elif evt == CHG_EXT: - changeExtension() - Draw.Redraw() - - elif evt == EXIT: - Draw.Exit() - - elif evt == DO_SCRIPT: - doScript() - - else: - print "ERROR: UNEXPECTED BUTTON EVENT" - - return - -# GUI Colors ###### -ScreenColor = [0.7, 0.7, 0.7] -BackgroundColor = [0.8, 0.8, 0.8] -TitleBG = [0.6, 0.6, 0.6] -TitleCol = [1.0, 1.0, 1.0] -ErrCol = [1.0, 0.0, 0.0] -TextCol = [0.4, 0.4, 0.5] -################### - -def GUI(): - global GUIPARAMS, PARAMS - - BGL.glClearColor(*(ScreenColor + [1.0])) - BGL.glClear(BGL.GL_COLOR_BUFFER_BIT) - - minx = 5 - maxx = 500 - miny = 5 - maxy = 450 - - lineheight = 24 - buPad = 5 # Generic Button Padding, most buttons should have 24-19 (or 5) px space around them - - lP = 5 # Left Padding - rP = 5 # Right Padding - - # Draw Background Box - BGL.glColor3f(*BackgroundColor) - BGL.glRecti(minx, miny, maxx, maxy) - - # Draw Title - BGL.glColor3f(*TitleBG) - BGL.glRecti(minx, maxy - (lineheight), maxx, maxy) - BGL.glColor3f(*TitleCol) - - title = "2D Cutout Image Importer v" + VERSIONSTRING - BGL.glRasterPos2i(minx + lP, maxy - 15) - Draw.Text(title, 'large') - - Draw.PushButton('Exit', EXIT, maxx-50-rP, maxy - lineheight + 2, 50, 19, "Exit Script") - - # Path Buttons - if GUIPARAMS['Path'].val == '': - Draw.PushButton('Single Image', SINGLE_IMG, minx + lP, maxy - (2*lineheight), 150, 19, "Select a Single Image to Import") - Draw.PushButton('Directory', DIRECTORY_IMG, minx + lP + 150, maxy - (2*lineheight), 150, 19, "Select a Directory of Images to Import") - - else: - Draw.PushButton('Clear', CLR_PATH, minx+lP, maxy - (2*lineheight), 50, 19, "Clear Path and Change Import Options") - - GUIPARAMS['Path'] = Draw.String('Path: ', NO_EVT, minx + lP, maxy - (3*lineheight), (maxx-minx-lP-rP), 19, GUIPARAMS['Path'].val, 399, 'Path to Import From') - if PARAMS['ImportType'] == DIR: - GUIPARAMS['ImageExt'] = Draw.String('Image Ext: ', CHG_EXT, minx + lP, maxy - (4*lineheight), 110, 19, GUIPARAMS['ImageExt'].val, 6, 'Image extension for batch directory importing (case insensitive)') - GUIPARAMS['PackImage'] = Draw.Toggle('Pack', NO_EVT, maxx - rP - 50, maxy - (4*lineheight), 50, 19, GUIPARAMS['PackImage'].val, 'Pack Image(s) into .Blend File') - - # Geometry and Viewport Options - BGL.glColor3f(*TextCol) - BGL.glRecti(minx+lP, maxy - (5*lineheight), maxx-rP, maxy - (5*lineheight) + 1) - BGL.glRasterPos2i(minx + lP, maxy-(5*lineheight) + 3) - Draw.Text('Geometry and Display Options', 'small') - - GUIPARAMS['PPU'] = Draw.Slider('Pixels Per Unit: ', NO_EVT, minx + lP, maxy - (6*lineheight), (maxx-minx)/2 - lP, 19, GUIPARAMS['PPU'].val, 1, 5000, 0, 'Set the Number of Pixels Per Blender Unit to preserve Image Size Relations') - GUIPARAMS['VPTransp'] = Draw.Toggle('Viewport Transparency', NO_EVT, minx + lP, maxy - (8*lineheight), (maxx-minx)/2 - lP, 2*lineheight - buPad, GUIPARAMS['VPTransp'].val, 'Display Alpha Transparency in the Viewport') - - GUIPARAMS['XOff'] = Draw.Slider('Offs X: ', NO_EVT, minx + lP + (maxx-minx)/2, maxy - (6*lineheight), (maxx-minx)/2 - lP - rP, 19, GUIPARAMS['XOff'].val, 0, 5.0, 0, 'Amount to Offset Each Imported in the X-Direction if Importing Multiple Images') - GUIPARAMS['YOff'] = Draw.Slider('Offs Y: ', NO_EVT, minx + lP + (maxx-minx)/2, maxy - (7*lineheight), (maxx-minx)/2 - lP - rP, 19, GUIPARAMS['YOff'].val, 0, 5.0, 0, 'Amount to Offset Each Imported in the Y-Direction if Importing Multiple Images') - GUIPARAMS['ZOff'] = Draw.Slider('Offs Z: ', NO_EVT, minx + lP + (maxx-minx)/2, maxy - (8*lineheight), (maxx-minx)/2 - lP - rP, 19, GUIPARAMS['ZOff'].val, 0, 5.0, 0, 'Amount to Offset Each Imported in the Z-Direction if Importing Multiple Images') - - # Material and Texture Options - BGL.glColor3f(*TextCol) - BGL.glRecti(minx+lP, maxy - (9*lineheight), maxx-rP, maxy - (9*lineheight) + 1) - BGL.glRasterPos2i(minx + lP, maxy-(9*lineheight) + 3) - Draw.Text('Material and Texture Options', 'small') - - half = (maxx-minx-lP-rP)/2 - GUIPARAMS['CopyMat'] = Draw.Toggle('Copy Existing Material', NO_EVT, minx + lP, maxy-(10*lineheight), half, 19, GUIPARAMS['CopyMat'].val, 'Copy an Existing Material') - if GUIPARAMS['CopyMat'].val: - menStr = compileMaterialList() - GUIPARAMS['MatId'] = Draw.Menu(menStr, NO_EVT, minx + lP, maxy - (11*lineheight), half, 19, GUIPARAMS['MatId'].val, 'Material to Copy Settings From') - else: - GUIPARAMS['MatCol'] = Draw.ColorPicker(NO_EVT, minx+lP, maxy - (13*lineheight), 40, (3*lineheight) - buPad, GUIPARAMS['MatCol'].val, 'Color of Newly Created Material') - GUIPARAMS['Ref'] = Draw.Slider('Ref: ', NO_EVT, minx +lP+45, maxy - (11*lineheight), half-45, 19, GUIPARAMS['Ref'].val, 0.0, 1.0, 0, 'Set the Ref Value for Created Materials') - GUIPARAMS['Spec'] = Draw.Slider('Spec: ', NO_EVT, minx +lP+45, maxy - (12*lineheight), half-45, 19, GUIPARAMS['Spec'].val, 0.0, 2.0, 0, 'Set the Spec Value for Created Materials') - GUIPARAMS['Hard'] = Draw.Slider('Hard: ', NO_EVT, minx +lP+45, maxy - (13*lineheight), half-45, 19, GUIPARAMS['Hard'].val, 1, 500, 0, 'Set the Hardness Value for Created Materials') - GUIPARAMS['Alpha'] = Draw.Slider('A: ', NO_EVT, minx +lP, maxy - (14*lineheight), half, 19, GUIPARAMS['Alpha'].val, 0.0, 1.0, 0, 'Set the Alpha Value for Created Materials') - - GUIPARAMS['ZTransp'] = Draw.Toggle('ZTransparency', NO_EVT, minx + lP, maxy - (15*lineheight), half, 19, GUIPARAMS['ZTransp'].val, 'Enable ZTransparency') - GUIPARAMS['Shadeless'] = Draw.Toggle('Shadeless', NO_EVT, minx + lP, maxy - (16*lineheight), half, 19, GUIPARAMS['Shadeless'].val, 'Enable Shadeless') - - GUIPARAMS['TexChan'] = Draw.Number('Texture Channel: ', NO_EVT, minx + lP+ half + buPad, maxy - (10*lineheight), half-rP, 19, GUIPARAMS['TexChan'].val, 1, 10, 'Texture Channel for Image Texture') - - GUIPARAMS['MPTCol'] = Draw.Toggle('Color', NO_EVT, minx + lP + half + buPad, maxy - (11*lineheight), half/2, 19, GUIPARAMS['MPTCol'].val, 'Map To Color Channel') - GUIPARAMS['MPTAlpha'] = Draw.Toggle('Alpha', NO_EVT, minx + lP + int((1.5)*half) + buPad, maxy - (11*lineheight), half/2 - rP, 19, GUIPARAMS['MPTAlpha'].val, 'Map To Alpha Channel') - - third = int((maxx-minx-lP-rP)/6) - GUIPARAMS['UseAlpha'] = Draw.Toggle('Use Alpha', NO_EVT, minx + lP + half + buPad, maxy - (12*lineheight), third, 19, GUIPARAMS['UseAlpha'].val, "Use the Images' Alpha Values") - GUIPARAMS['CalcAlpha'] = Draw.Toggle('Calc Alpha', NO_EVT, minx + lP + half + third + buPad, maxy - (12*lineheight), third, 19, GUIPARAMS['CalcAlpha'].val, "Calculate Images' Alpha Values") - GUIPARAMS['ExtendMode'] = Draw.Toggle('Extend', NO_EVT, minx+lP+half+third+third+buPad, maxy - (12*lineheight), third-3, 19, GUIPARAMS['ExtendMode'].val, "Use Extend texture mode. If deselected, Repeat is used") - GUIPARAMS['Seq'] = Draw.Toggle('Sequence', NO_EVT, minx + lP + half + buPad, maxy - (13*lineheight), half-rP, 19, GUIPARAMS['Seq'].val, 'Set the Image(s) to use a Sequence instead of a Still') - - if GUIPARAMS['Seq'].val and not PARAMS['ImportType'] == DIR: - GUIPARAMS['AutoRefresh'] = Draw.Toggle('Auto Refresh', NO_EVT, minx + lP + half + buPad, maxy - (14*lineheight), half/2, 19, GUIPARAMS['AutoRefresh'].val, 'Use Auto Refresh') - GUIPARAMS['Cyclic'] = Draw.Toggle('Cyclic', NO_EVT, minx + lP + half + buPad + half/2, maxy - (14*lineheight), half/2 - rP, 19, GUIPARAMS['Cyclic'].val, 'Repeat Frames Cyclically`') - - GUIPARAMS['Frames'] = Draw.Number('Frames: ', NO_EVT, minx +lP + half + buPad, maxy - (15*lineheight), half - rP, 19, GUIPARAMS['Frames'].val, 1, 30000, 'Sets the Number of Images of a Movie to Use') - GUIPARAMS['Offs'] = Draw.Number('Offs: ', NO_EVT, minx +lP + half + buPad, maxy - (16*lineheight), half/2, 19, GUIPARAMS['Offs'].val, -30000, 30000, 'Offsets the Number of the Frame to use in the Animation') - GUIPARAMS['StartFr'] = Draw.Number('StartFr: ', NO_EVT, minx +lP + half + buPad + half/2, maxy - (16*lineheight), half/2 - rP, 19, GUIPARAMS['StartFr'].val, 1, 30000, 'Sets the Global Starting Frame of the Movie') - elif GUIPARAMS['Seq'].val and PARAMS['ImportType'] == DIR: - BGL.glColor3f(*ErrCol) - BGL.glRasterPos2i(minx + lP + half + buPad + 7, maxy-(14 * lineheight) + 5) - Draw.Text('Sequence only available for Single Image Import', 'small') - - # Import Options - BGL.glColor3f(*TextCol) - BGL.glRecti(minx+lP, maxy - (17*lineheight), maxx-rP, maxy - (17*lineheight) + 1) - BGL.glRasterPos2i(minx + lP, maxy-(17*lineheight) + 3) - Draw.Text('Import', 'small') - - if GUIPARAMS['Path'].val and GUIPARAMS['ImageExt'].val or GUIPARAMS['Path'].val and PARAMS['ImportType'] == SINGLE: - Draw.PushButton('Import', DO_SCRIPT, minx + lP, maxy - (18*lineheight), 75, 19, "Import Image(s)") - else: - BGL.glColor3f(*ErrCol) - BGL.glRasterPos2i(minx+lP, maxy - (18*lineheight) + 5) - Draw.Text('A path and image type must be specified to import images') - - GUIPARAMS['RedrawImp'] = Draw.Toggle('Redraw During Import', NO_EVT, maxx - rP - 150, maxy - (18*lineheight), 150, 19, GUIPARAMS['RedrawImp'].val, 'Redraw the View as Images Import') - -Draw.Register(GUI, event, bevent) \ No newline at end of file diff --git a/release/scripts/image_auto_layout.py b/release/scripts/image_auto_layout.py deleted file mode 100644 index d19ba1da662..00000000000 --- a/release/scripts/image_auto_layout.py +++ /dev/null @@ -1,455 +0,0 @@ -#!BPY - -""" -Name: 'Consolidate into one image' -Blender: 243 -Group: 'Image' -Tooltip: 'Pack all texture images into 1 image and remap faces.' -""" - -__author__ = "Campbell Barton" -__url__ = ("blender", "blenderartists.org") -__version__ = "1.1a 2009/04/01" - -__bpydoc__ = """\ -This script makes a new image from the used areas of all the images mapped to the selected mesh objects. -Image are packed into 1 new image that is assigned to the original faces. -This is usefull for game models where 1 image is faster then many, and saves the labour of manual texture layout in an image editor. - -""" -# -------------------------------------------------------------------------- -# Auto Texture Layout v1.0 by Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -# Function to find all the images we use -import Blender as B -from Blender.Mathutils import Vector, RotationMatrix -from Blender.Scene import Render -import BPyMathutils -BIGNUM= 1<<30 -TEXMODE= B.Mesh.FaceModes.TEX - -def pointBounds(points): - ''' - Takes a list of points and returns the - area, center, bounds - ''' - ymax= xmax= -BIGNUM - ymin= xmin= BIGNUM - - for p in points: - x= p.x - y= p.y - - if x>xmax: xmax=x - if y>ymax: ymax=y - - if x 0.1: - mat_pos= RotationMatrix( rot_angle, 2) - mat_neg= RotationMatrix( -rot_angle, 2) - - new_points_pos= [v*mat_pos for v in current_points] - area_pos, cent_pos, bounds_pos= pointBounds(new_points_pos) - - # 45d rotations only need to be tested in 1 direction. - if rot_angle == 45: - area_neg= area_pos - else: - new_points_neg= [v*mat_neg for v in current_points] - area_neg, cent_neg, bounds_neg= pointBounds(new_points_neg) - - - # Works! - #print 'Testing angle', rot_angle, current_area, area_pos, area_neg - - best_area= min(area_pos, area_neg, current_area) - if area_pos == best_area: - current_area= area_pos - cent= cent_pos - bounds= bounds_pos - current_points= new_points_pos - total_rot_angle+= rot_angle - elif rot_angle != 45 and area_neg == best_area: - current_area= area_neg - cent= cent_neg - bounds= bounds_neg - current_points= new_points_neg - total_rot_angle-= rot_angle - - rot_angle *= 0.5 - - # Return the optimal rotation. - return total_rot_angle - - -class faceGroup(object): - ''' - A Group of faces that all use the same image, each group has its UVs packed into a square. - ''' - __slots__= 'xmax', 'ymax', 'xmin', 'ymin',\ - 'image', 'faces', 'box_pack', 'size', 'ang', 'rot_mat', 'cent'\ - - def __init__(self, mesh_list, image, size, PREF_IMAGE_MARGIN): - self.image= image - self.size= size - self.faces= [f for me in mesh_list for f in me.faces if f.mode & TEXMODE and f.image == image] - - # Find the best rotation. - all_points= [uv for f in self.faces for uv in f.uv] - bountry_indicies= BPyMathutils.convexHull(all_points) - bountry_points= [all_points[i] for i in bountry_indicies] - - # Pre Rotation bounds - self.cent= pointBounds(bountry_points)[1] - - # Get the optimal rotation angle - self.ang= bestBoundsRotation(bountry_points) - self.rot_mat= RotationMatrix(self.ang, 2), RotationMatrix(-self.ang, 2) - - # Post rotation bounds - bounds= pointBounds([\ - ((uv-self.cent) * self.rot_mat[0]) + self.cent\ - for uv in bountry_points])[2] - - # Break the bounds into useable values. - xmin, ymin, xmax, ymax= bounds - - # Store the bounds, include the margin. - # The bounds rect will need to be rotated to the rotation angle. - self.xmax= xmax + (PREF_IMAGE_MARGIN/size[0]) - self.xmin= xmin - (PREF_IMAGE_MARGIN/size[0]) - self.ymax= ymax + (PREF_IMAGE_MARGIN/size[1]) - self.ymin= ymin - (PREF_IMAGE_MARGIN/size[1]) - - self.box_pack=[\ - 0.0, 0.0,\ - size[0]*(self.xmax - self.xmin),\ - size[1]*(self.ymax - self.ymin),\ - image.name] - - ''' - # default. - self.scale= 1.0 - - def set_worldspace_scale(self): - scale_uv= 0.0 - scale_3d= 0.0 - for f in self.faces: - for i in xrange(len(f.v)): - scale_uv+= (f.uv[i]-f.uv[i-1]).length * 0.1 - scale_3d+= (f.v[i].co-f.v[i-1].co).length * 0.1 - self.scale= scale_3d/scale_uv - ''' - - - - def move2packed(self, width, height): - ''' - Moves the UV coords to their packed location - using self.box_pack as the offset, scaler. - box_pack must be set to its packed location. - width and weight are the w/h of the overall packed area's bounds. - ''' - # packedLs is a list of [(anyUniqueID, left, bottom, width, height)...] - # Width and height in float pixel space. - - # X Is flipped :/ - #offset_x= (1-(self.box_pack[1]/d)) - (((self.xmax-self.xmin) * self.image.size[0])/d) - offset_x= self.box_pack[0]/width - offset_y= self.box_pack[1]/height - - for f in self.faces: - for uv in f.uv: - uv_rot= ((uv-self.cent) * self.rot_mat[0]) + self.cent - uv.x= offset_x+ (((uv_rot.x-self.xmin) * self.size[0])/width) - uv.y= offset_y+ (((uv_rot.y-self.ymin) * self.size[1])/height) - -def consolidate_mesh_images(mesh_list, scn, PREF_IMAGE_PATH, PREF_IMAGE_SIZE, PREF_KEEP_ASPECT, PREF_IMAGE_MARGIN): #, PREF_SIZE_FROM_UV=True): - ''' - Main packing function - - All meshes from mesh_list must have faceUV else this function will fail. - ''' - face_groups= {} - - for me in mesh_list: - for f in me.faces: - if f.mode & TEXMODE: - image= f.image - if image: - try: - face_groups[image.name] # will fail if teh groups not added. - except: - try: - size= image.size - except: - B.Draw.PupMenu('Aborting: Image cold not be loaded|' + image.name) - return - - face_groups[image.name]= faceGroup(mesh_list, image, size, PREF_IMAGE_MARGIN) - - if not face_groups: - B.Draw.PupMenu('No Images found in mesh(es). Aborting!') - return - - if len(face_groups)<2: - B.Draw.PupMenu('Only 1 image found|Select a mesh(es) using 2 or more images.') - return - - ''' - if PREF_SIZE_FROM_UV: - for fg in face_groups.itervalues(): - fg.set_worldspace_scale() - ''' - - # RENDER THE FACES. - render_scn= B.Scene.New() - render_scn.makeCurrent() - render_context= render_scn.getRenderingContext() - render_context.setRenderPath('') # so we can ignore any existing path and save to the abs path. - - PREF_IMAGE_PATH_EXPAND= B.sys.expandpath(PREF_IMAGE_PATH) + '.png' - - # TEST THE FILE WRITING. - try: - # Can we write to this file??? - f= open(PREF_IMAGE_PATH_EXPAND, 'w') - f.close() - except: - B.Draw.PupMenu('Error%t|Could not write to path|' + PREF_IMAGE_PATH_EXPAND) - return - - render_context.imageSizeX(PREF_IMAGE_SIZE) - render_context.imageSizeY(PREF_IMAGE_SIZE) - render_context.enableOversampling(True) - render_context.setOversamplingLevel(16) - render_context.setRenderWinSize(100) - render_context.setImageType(Render.PNG) - render_context.enableExtensions(True) - render_context.enablePremultiply() # No alpha needed. - render_context.enableRGBAColor() - render_context.threads = 2 - - #Render.EnableDispView() # Broken?? - - # New Mesh and Object - render_mat= B.Material.New() - render_mat.mode |= \ - B.Material.Modes.SHADELESS | \ - B.Material.Modes.TEXFACE | \ - B.Material.Modes.TEXFACE_ALPHA | \ - B.Material.Modes.ZTRANSP - - render_mat.setAlpha(0.0) - - render_me= B.Mesh.New() - render_me.verts.extend([Vector(0,0,0)]) # Stupid, dummy vert, preverts errors. when assigning UV's/ - render_ob= B.Object.New('Mesh') - render_ob.link(render_me) - render_scn.link(render_ob) - render_me.materials= [render_mat] - - - # New camera and object - render_cam_data= B.Camera.New('ortho') - render_cam_ob= B.Object.New('Camera') - render_cam_ob.link(render_cam_data) - render_scn.link(render_cam_ob) - render_scn.objects.camera = render_cam_ob - - render_cam_data.type= 'ortho' - render_cam_data.scale= 1.0 - - - # Position the camera - render_cam_ob.LocZ= 1.0 - render_cam_ob.LocX= 0.5 - render_cam_ob.LocY= 0.5 - - # List to send to to boxpack function. - boxes2Pack= [ fg.box_pack for fg in face_groups.itervalues()] - packWidth, packHeight = B.Geometry.BoxPack2D(boxes2Pack) - - if PREF_KEEP_ASPECT: - packWidth= packHeight= max(packWidth, packHeight) - - - # packedLs is a list of [(anyUniqueID, left, bottom, width, height)...] - # Re assign the face groups boxes to the face_group. - for box in boxes2Pack: - face_groups[ box[4] ].box_pack= box # box[4] is the ID (image name) - - - # Add geometry to the mesh - for fg in face_groups.itervalues(): - # Add verts clockwise from the bottom left. - _x= fg.box_pack[0] / packWidth - _y= fg.box_pack[1] / packHeight - _w= fg.box_pack[2] / packWidth - _h= fg.box_pack[3] / packHeight - - render_me.verts.extend([\ - Vector(_x, _y, 0),\ - Vector(_x, _y +_h, 0),\ - Vector(_x + _w, _y +_h, 0),\ - Vector(_x + _w, _y, 0),\ - ]) - - render_me.faces.extend([\ - render_me.verts[-1],\ - render_me.verts[-2],\ - render_me.verts[-3],\ - render_me.verts[-4],\ - ]) - - target_face= render_me.faces[-1] - target_face.image= fg.image - target_face.mode |= TEXMODE - - # Set the UV's, we need to flip them HOZ? - target_face.uv[0].x= target_face.uv[1].x= fg.xmax - target_face.uv[2].x= target_face.uv[3].x= fg.xmin - - target_face.uv[0].y= target_face.uv[3].y= fg.ymin - target_face.uv[1].y= target_face.uv[2].y= fg.ymax - - for uv in target_face.uv: - uv_rot= ((uv-fg.cent) * fg.rot_mat[1]) + fg.cent - uv.x= uv_rot.x - uv.y= uv_rot.y - - render_context.render() - Render.CloseRenderWindow() - render_context.saveRenderedImage(PREF_IMAGE_PATH_EXPAND) - - #if not B.sys.exists(PREF_IMAGE_PATH_EXPAND): - # raise 'Error!!!' - - - # NOW APPLY THE SAVED IMAGE TO THE FACES! - #print PREF_IMAGE_PATH_EXPAND - try: - target_image= B.Image.Load(PREF_IMAGE_PATH_EXPAND) - except: - B.Draw.PupMenu('Error: Could not render or load the image at path|' + PREF_IMAGE_PATH_EXPAND) - return - - # Set to the 1 image. - for me in mesh_list: - for f in me.faces: - if f.mode & TEXMODE and f.image: - f.image= target_image - - for fg in face_groups.itervalues(): - fg.move2packed(packWidth, packHeight) - - scn.makeCurrent() - render_me.verts= None # free a tiny amount of memory. - B.Scene.Unlink(render_scn) - target_image.makeCurrent() - - -def main(): - scn= B.Scene.GetCurrent() - scn_objects = scn.objects - ob= scn_objects.active - - if not ob or ob.type != 'Mesh': - B.Draw.PupMenu('Error, no active mesh object, aborting.') - return - - # Create the variables. - # Filename without path or extension. - newpath= B.Get('filename').split('/')[-1].split('\\')[-1].replace('.blend', '') - - PREF_IMAGE_PATH = B.Draw.Create('//%s_grp' % newpath) - PREF_IMAGE_SIZE = B.Draw.Create(1024) - PREF_IMAGE_MARGIN = B.Draw.Create(6) - PREF_KEEP_ASPECT = B.Draw.Create(0) - PREF_ALL_SEL_OBS = B.Draw.Create(0) - - pup_block = [\ - 'Image Path: (no ext)',\ - ('', PREF_IMAGE_PATH, 3, 100, 'Path to new Image. "//" for curent blend dir.'),\ - 'Image Options', - ('Pixel Size:', PREF_IMAGE_SIZE, 64, 4096, 'Image Width and Height.'),\ - ('Pixel Margin:', PREF_IMAGE_MARGIN, 0, 64, 'Use a margin to stop mipmapping artifacts.'),\ - ('Keep Aspect', PREF_KEEP_ASPECT, 'If disabled, will stretch the images to the bounds of the texture'),\ - 'Texture Source',\ - ('All Sel Objects', PREF_ALL_SEL_OBS, 'Combine all selected objects into 1 texture, otherwise active object only.'),\ - ] - - if not B.Draw.PupBlock('Consolidate images...', pup_block): - return - - PREF_IMAGE_PATH= PREF_IMAGE_PATH.val - PREF_IMAGE_SIZE= PREF_IMAGE_SIZE.val - PREF_IMAGE_MARGIN= float(PREF_IMAGE_MARGIN.val) # important this is a float otherwise division wont work properly - PREF_KEEP_ASPECT= PREF_KEEP_ASPECT.val - PREF_ALL_SEL_OBS= PREF_ALL_SEL_OBS.val - - if PREF_ALL_SEL_OBS: - mesh_list= [ob.getData(mesh=1) for ob in scn_objects.context if ob.type=='Mesh'] - # Make sure we have no doubles- dict by name, then get the values back. - - for me in mesh_list: me.tag = False - - mesh_list_new = [] - for me in mesh_list: - if me.faceUV and me.tag==False: - me.tag = True - mesh_list_new.append(me) - - # replace list with possible doubles - mesh_list = mesh_list_new - - else: - mesh_list= [ob.getData(mesh=1)] - if not mesh_list[0].faceUV: - B.Draw.PupMenu('Error, active mesh has no images, Aborting!') - return - - consolidate_mesh_images(mesh_list, scn, PREF_IMAGE_PATH, PREF_IMAGE_SIZE, PREF_KEEP_ASPECT, PREF_IMAGE_MARGIN) - B.Window.RedrawAll() - -if __name__=='__main__': - main() diff --git a/release/scripts/image_billboard.py b/release/scripts/image_billboard.py deleted file mode 100644 index 54f0f7c5c55..00000000000 --- a/release/scripts/image_billboard.py +++ /dev/null @@ -1,269 +0,0 @@ -#!BPY -""" -Name: 'Billboard Render on Active' -Blender: 242 -Group: 'Image' -Tooltip: 'Selected objects and lamps to rendered faces on the act mesh' -""" -__author__= "Campbell Barton" -__url__= ["blender", "blenderartist"] -__version__= "1.0" - -__bpydoc__= """\ -Render Billboard Script -This can texture a simple billboard mesh from any number of selected objects. - -Renders objects in the selection to quad faces on the active mesh. - -Usage -* Light your model or enable the shadless flag so it is visible -* Make a low poly mesh out of quads with 90d corners. (this will be you billboard mesh) -* Select the model and any lamps that light it -* Select the billboard mesh so that it is active -* Run this script, Adjust settings such as image size or oversampling. -* Select a place to save the PNG image. -* Once the script has finished running return to the 3d view by pressing Shift+F5 -* To see the newly applied textures change the drawtype to 'Textured Solid' -""" -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton 2006 -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender import Mesh, Material, Draw -import BPyMathutils -import bpy -import BPyRender -from Blender.Scene import Render - -# reload(BPyRender) -# reload(BPyMathutils) - -import os -Vector= Blender.Mathutils.Vector - -def alpha_mat(image): - # returns a material useable for - mtl= bpy.data.materials.new() - mtl.mode |= (Material.Modes.SHADELESS | Material.Modes.ZTRANSP | Material.Modes.FULLOSA | Material.Modes.TEXFACE | Material.Modes.TEXFACE_ALPHA ) - return mtl - -# PupBlock Settings -GLOBALS= {} -PREF_RES= Draw.Create(512) -PREF_TILE_RES= Draw.Create(256) -PREF_AA = Draw.Create(1) -PREF_ALPHA= Draw.Create(1) -PREF_Z_OFFSET = Draw.Create(10.0) -PREF_IMG_PACK= Draw.Create(1) - - -def save_billboard(PREF_IMAGE_PATH): - Blender.Window.WaitCursor(1) - # remove png, add it later - PREF_IMAGE_PATH= PREF_IMAGE_PATH.replace('.png', '') - - ob_sel= GLOBALS['ob_sel'] - me_ob = GLOBALS['me_ob'] - me_data = GLOBALS['me_data'] - - time= Blender.sys.time() - - me_mat= me_ob.matrixWorld - - # Render images for all faces - face_data= [] # Store faces, images etc - boxes2Pack= [] - me_data.faceUV= True - - for i, f in enumerate(me_data.faces): - no= f.no - - # Offset the plane by the zoffset on the faces normal - plane= [v.co * me_mat for v in f] - - # Horizontal stacking, make sure 0,1 and 2,3 are the longest - if\ - (plane[0]-plane[1]).length + (plane[2]-plane[3]).length < \ - (plane[1]-plane[2]).length + (plane[3]-plane[0]).length: - plane.append(plane.pop(0)) - rot90= True - else: - rot90= False - - no= Blender.Mathutils.QuadNormal(*plane) - plane= [v + no*PREF_Z_OFFSET.val for v in plane] - - cent= (plane[0]+plane[1]+plane[2]+plane[3] ) /4.0 - camera_matrix= BPyMathutils.plane2mat(plane) - tmp_path= '%s_%d' % (PREF_IMAGE_PATH, i) - img= BPyRender.imageFromObjectsOrtho(ob_sel, tmp_path, PREF_TILE_RES.val, PREF_TILE_RES.val, PREF_AA.val, PREF_ALPHA.val, camera_matrix) - img.reload() - #img.pack() # se we can keep overwriting the path - #img.filename= "" - - if rot90: - f.uv=Vector(1,1), Vector(0,1), Vector(0,0), Vector(1,0) - else: - f.uv= Vector(0,1), Vector(0,0), Vector(1,0), Vector(1,1) - - if not PREF_IMG_PACK.val: - f.mode |= Mesh.FaceModes.TEX - f.image = img - - if PREF_ALPHA.val: - f.transp |= Mesh.FaceTranspModes.ALPHA - else: - w= ((plane[0]-plane[1]).length + (plane[2]-plane[3]).length)/2 - h= ((plane[1]-plane[2]).length + (plane[3]-plane[0]).length)/2 - - face_data.append( (f, img) ) - boxes2Pack.append( [0.0,0.0,h, w, i] ) - - if PREF_IMG_PACK.val: - # pack the quads into a square - packWidth, packHeight = Blender.Geometry.BoxPack2D(boxes2Pack) - - render_obs= [] - - render_mat= alpha_mat(img) - - # Add geometry to the mesh - for box in boxes2Pack: - i= box[4] - - orig_f, img= face_data[i] - - # New Mesh and Object - - render_me= bpy.data.meshes.new() - - render_ob= Blender.Object.New('Mesh') - render_me.materials= [render_mat] - render_ob.link(render_me) - - render_obs.append(render_ob) - - # Add verts clockwise from the bottom left. - _x= box[0] / packWidth - _y= box[1] / packHeight - _w= box[2] / packWidth - _h= box[3] / packHeight - - - render_me.verts.extend([\ - Vector(_x, _y, 0),\ - Vector(_x, _y +_h, 0),\ - Vector(_x + _w, _y +_h, 0),\ - Vector(_x + _w, _y, 0),\ - ]) - - render_me.faces.extend(list(render_me.verts)) - render_me.faceUV= True - - render_me.faces[0].uv = [Vector(0,0), Vector(0,1), Vector(1,1), Vector(1,0)] - render_me.faces[0].image = img - - # Set the UV's, we need to flip them HOZ? - for uv in orig_f.uv: - uv.x = _x + (uv.x * _w) - uv.y = _y + (uv.y * _h) - - target_image= BPyRender.imageFromObjectsOrtho(render_obs, PREF_IMAGE_PATH, PREF_RES.val, PREF_RES.val, PREF_AA.val, PREF_ALPHA.val, None) - target_image.reload() # incase your overwriting an existing image. - - # Set to the 1 image. - for f in me_data.faces: - f.image= target_image - if PREF_ALPHA.val: - f.transp |= Mesh.FaceTranspModes.ALPHA - - # Free the images data and remove - for data in face_data: - img= data[1] - os.remove(img.filename) - img.reload() - - # Finish pack - - me_data.update() - me_ob.makeDisplayList() - Blender.Window.WaitCursor(0) - print '%.2f secs taken' % (Blender.sys.time()-time) - - -def main(): - scn= bpy.data.scenes.active - ob_sel= list(scn.objects.context) - - PREF_KEEP_ASPECT= False - - # Error Checking - if len(ob_sel) < 2: - Draw.PupMenu("Error%t|Select 2 mesh objects") - return - - me_ob= scn.objects.active - - if not me_ob: - Draw.PupMenu("Error%t|No active mesh selected.") - - try: - ob_sel.remove(me_ob) - except: - pass - - if me_ob.type != 'Mesh': - Draw.PupMenu("Error%t|Active Object must be a mesh to write billboard images too") - return - - me_data= me_ob.getData(mesh=1) - - for f in me_data.faces: - if len(f) != 4: - Draw.PupMenu("Error%t|Active mesh must have only quads") - return - - - # Get user input - block = [\ - 'Image Pixel Size',\ - ("Packed Size: ", PREF_RES, 128, 2048, "Pixel width and height to render the billboard to"),\ - ("Tile Size: ", PREF_TILE_RES, 64, 1024, "Pixel width and height for each tile to render to"),\ - 'Render Settings',\ - ("Pack Final", PREF_IMG_PACK , "Pack the image for each face into images into a single image"),\ - ("Oversampling", PREF_AA , "Higher quality woth extra sampling"),\ - ("Alpha Clipping", PREF_ALPHA , "Render empty areas as transparent"),\ - ("Cam ZOffset: ", PREF_Z_OFFSET, 0.1, 100, "Distance to place the camera away from the quad when rendering")\ - ] - - if not Draw.PupBlock("Billboard Render", block): - return - - # Set globals - GLOBALS['ob_sel'] = ob_sel - GLOBALS['me_ob'] = me_ob - GLOBALS['me_data'] = me_data - - Blender.Window.FileSelector(save_billboard, 'SAVE BILLBOARD', Blender.sys.makename(ext='.png')) - # save_billboard('/tmp/test.png') - -if __name__=='__main__': - main() diff --git a/release/scripts/image_edit.py b/release/scripts/image_edit.py deleted file mode 100644 index cae40b74097..00000000000 --- a/release/scripts/image_edit.py +++ /dev/null @@ -1,158 +0,0 @@ -#!BPY -""" -Name: 'Edit Externally' -Blender: 242a -Group: 'Image' -Tooltip: 'Open in an application for editing. (hold Shift to configure)' -""" - -__author__ = "Campbell Barton" -__url__ = ["blender", "blenderartists.org"] -__version__ = "1.0" -__bpydoc__ = """\ -This script opens the current image in an external application for editing. - -Usage: -Choose an image for editing in the UV/Image view. - -To configure the application to open the image with, hold Shift as you -click on this menu item. - -For first time users try running the default application for your -operating system. If the application does not open you can type in -the full path. You can choose that the last entered application will -be saved as a default. - -* Note, default commants for opening an image are "start" for win32 -and "open" for macos. This will use the system default associated -application. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton 2006 -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender import Image, sys, Draw, Registry - -try: - import subprocess - import sys as py_sys - platform = py_sys.platform -except: - Draw.PupMenu('Error: Recent version of Python not installed.') - subprocess=None - -def os_run(appstring, filename): - ''' - Run the app, take into account different python versions etc - looks like python 2.6 wants a list for - ''' - - # evil trick, temp replace spaces so we can allow spaces in filenames - # also allows multiple instances of %f - appstring = appstring.replace(' ', '\t') - appstring = appstring.replace('%f', filename) - appstring = appstring.split('\t') - - print ' '.join(appstring) - - try: # only python 2.6 wants a list? - p = subprocess.Popen(appstring) - except: - p = subprocess.Popen(' '.join(appstring)) - - -def edit_extern(image=None): - - if not image: - image = Image.GetCurrent() - - if not image: # Image is None - Draw.PupMenu('ERROR: Please select active Image.') - return - if image.packed: - Draw.PupMenu('ERROR: Image is packed, unpack before editing.') - return - - imageFileName = sys.expandpath( image.filename ) - - if not sys.exists(imageFileName): - Draw.PupMenu('ERROR: Image path does not exist.') - return - - pupblock = [imageFileName.split('/')[-1].split('\\')[-1]] - - new_text= False - try: - appstring = Registry.GetKey('ExternalImageEditor', True) - appstring = appstring['path'] - - # for ZanQdo if he removed the path from the textbox totaly. ;) - Cam - if not appstring or appstring.find('%f')==-1: - new_text= True - except: - new_text= True - - if new_text: - pupblock.append('first time, set path.') - if platform == 'win32': - # Example of path to popular image editor... ;-) - # appstring = '"C:\\Program Files\\Adobe\\Photoshop CS\\photoshop.exe" "%f"' - # Have to add "cmd /c" to make sure we're using Windows shell. - appstring = 'cmd /c start "" /B "%f"' - elif platform == 'darwin': - appstring = 'open "%f"' - else: - appstring = 'gimp %f' - - appstring_but = Draw.Create(appstring) - save_default_but = Draw.Create(0) - - pupblock.append(('editor: ', appstring_but, 0, 99, 'Path to application, %f will be replaced with the image path.')) - pupblock.append(('Set Default', save_default_but, 'Store this path in the blender registry.')) - - # Only configure if Shift is held, - if Blender.Window.GetKeyQualifiers() & Blender.Window.Qual.SHIFT: - if not Draw.PupBlock('External Image Editor...', pupblock): - return - - appstring = appstring_but.val - save_default= save_default_but.val - - if save_default: - Registry.SetKey('ExternalImageEditor', {'path':appstring}, True) - - if appstring.find('%f') == -1: - Draw.PupMenu('ERROR: No filename specified! ("%f")') - return - - # ------------------------------- - - os_run(appstring, imageFileName) - - - -def main(): - edit_extern() - - -if __name__ == '__main__' and subprocess: - main() diff --git a/release/scripts/import_dxf.py b/release/scripts/import_dxf.py deleted file mode 100644 index b3bee11c464..00000000000 --- a/release/scripts/import_dxf.py +++ /dev/null @@ -1,6225 +0,0 @@ -#!BPY - -""" -Name: 'Autodesk DXF (.dxf .dwg)' -Blender: 249 -Group: 'Import' -Tooltip: 'Import for DWG/DXF geometry data.' -""" -__author__ = 'Kitsu(Ed Blake) & migius(Remigiusz Fiedler)' -__version__ = '1.12 - 2009.06.16 by migius' -__url__ = ["http://blenderartists.org/forum/showthread.php?t=84319", - "http://wiki.blender.org/index.php/Scripts/Manual/Import/DXF-3D"] -__email__ = ["migius(at)4d-vectors.de","Kitsune_e(at)yahoo.com"] -__bpydoc__ = """\ -This script imports objects from DWG/DXF (2d/3d) into Blender. - -This script imports 2d and 3d geometery from DXF files. -It supports DWG format too, with help of an external converter. -Supported DXF format versions: from (r2.5) r12 up to r2008. -Enhanced features are: -- configurable object filtering and geometry manipulation, -- configurable material pre-processing, -- DXF-code analyze and reporting. - -Supported DXF r12 objects: -LINE, -POINT, -SOLID, -TRACE, -TEXT, -INSERT (=block), -MINSERT (=array of blocks), -CIRCLE, -ARC, -3DFACE, -2d-POLYLINE (=in plane, incl. arc, variable-width, curve, spline), -3d-POLYLINE (=non-plane), -3d-POLYMESH, -3d-POLYFACE, -VIEW, VPORT -XREF (External Reference). - -Supported DXF>r12 objects: -ELLIPSE, -LWPOLYLINE (LightWeight Polyline), -SPLINE, -(todo v1.13) MLINE, -(todo v1.13) MTEXT - -Unsupported objects: -DXF r12: DIMENSION. -DXF>r12: GROUP, RAY/XLINE, LEADER, 3DSOLID, BODY, REGION, dynamic BLOCK - -Supported geometry: 2d and 3d DXF-objects. -Curves imported as Blender curves or meshes optionally. - -Supported layout modes: -"model space" is default, -"paper space" as option (= "layout views") - -Supported scene definition objects produced with AVE_RENDER: -scene: selection of lights assigned to the camera, -lights: DIRECT, OVERHEAD, SH_SPOT, -(wip v1.13 import of AVE_RENDER material definitions) - -Hierarchy: -Entire DXF BLOCK hierarchy is preserved after import into Blender -(BLOCKs as groups on layer19, INSERTs as dupliGroups on target layer). - -Supported properties: -visibility status, -frozen status, -thickness, -width, -color, -layer, -(todo v1.13: XDATA, grouped status) -It is recommended to use DXF-object properties for assign Blender materials. - -Notes: -- Recommend that you run 'RemoveDoubles' on each imported mesh after using this script -- Blocks are created on layer 19 then referenced at each insert point. -- support for DXF-files up to 160MB on systems with 1GB RAM -- DXF-files with over 1500 objects decrease import performance. -The problem is not the inefficiency of python-scripting but Blenders performance -in creating new objects in scene database - probably a database management problem. - -""" - -""" -History: - v1.0 - 2007/2008/2009 by migius - planned tasks: - -- (to see more, search for "--todo--" in script code) - -- command-line-mode/batch-mode - -- in-place-editing for dupliGroups - -- support for MLINE (is exported to r12 as BLOCK*Unnamed with LINEs) - -- support for MTEXT (is exported to r12 as TEXT???) - -- blender_object.properties['dxf_layer_name'] - -- better support for long dxf-layer-names - -- add configuration file.ini handles multiple material setups - -- added f_layerFilter - -- to-check: obj/mat/group/_mapping-idea from ideasman42 - -- curves: added "fill/non-fill" option for closed curves: CIRCLEs,ELLIPSEs,POLYLINEs - -- "normalize Z" option to correct non-planar figures - -- LINEs need "width" in 3d-space incl vGroups - -- support width_force for LINEs/ELLIPSEs = "solidify" - -- add better support for color_index BYLAYER=256, BYBLOCK=0 - -- bug: "oneMesh" produces irregularly errors - -- bug: Registry recall from hd_cache ?? only win32 bug?? - -- support DXF-definitions of autoshade: scene, lights and cameras - -- support ortho mode for VIEWs and VPORTs as cameras - - v1.12 - 2009.06.16 by migius - d7 fix for ignored BLOCKs (e.g. *X) which are members of other BLOCKs - v1.12 - 2009.05.27 by migius - d6 bugfix negative scaled INSERTs - isLeftHand(Matrix) check - v1.12 - 2009.05.26 by migius - d5 changed to the new 2.49 method Vector.cross() - d5 bugfix WORLDY(1,1,0) to (0,1,0) - v1.12 - 2009.04.11 by migius - d4 added DWG support, Stani Michiels idea for binding an extern DXF-DWG-converter - v1.12 - 2009.03.14 by migius - d3 removed all set()functions (problem with osx/python<2.4 reported by Blinkozo) - d3 code-cleaning - v1.12 - 2009.01.14 by migius - d2 temp patch for noname BLOCKS (*X,*U,*D) - v1.12 - 2008.11.16 by migius - d1 remove try_finally: cause not supported in python <2.5 - d1 add Bezier curves bevel radius support (default 1.0) - v1.12 - 2008.08.03 by migius - c2 warningfix: relocating of globals: layersmap, oblist - c2 modif UI: buttons newScene+targetLayer moved to start panel - v1.12 - 2008.07.04 by migius - c1 added control Curve's OrderU parameter - c1 modif UI: preset buttons X-2D-3D moved to start panel - b6 added handling exception of not registered LAYERs (Hammer-HL-editor DXF output) - b5 rebuild UI: global preset 2D for Curve-Import - b5 added UI-options: PL-MESH N+N plmesh_flip and normals_out - b5 added support for SPLINEs, added control OrderU parameter - b5 rewrote draw module for NURBS_curve and Bezier_curve - v1.12 - 2008.06.22 by migius - b4 change versioning system 1.0.12 -> 1.12 - b4 print at start version-info to console - b3 bugfix: ob.name conflict with existing meshes (different ob.name/mesh.name) - v1.0.12: 2008.05.24 by migius - b2 added support for LWPOLYLINEs - b2 added support for ProE in readerDXF.py - v1.0.12: 2008.02.08 by migius - b1 update: object = Object.Get(obname) -> f_getSceChild().getChildren() - a9 bugfix by non-existing tables views, vports, layers (Kai reported) - v1.0.12: 2008.01.17 by migius - a8 lately used INI-dir/filename persistently stored in Registry - a8 lately used DXF-dir/filename persistently stored in Registry - a7 fix missing layersmap{} for dxf-files without "section:layer" - a6 added support for XREF external referenced BLOCKs - a6 check for bug in AutoCAD2002:DXFr12export: ELLIPSE->POLYLINE_ARC fault angles - a6 support VIEWs and VPORTs as cameras: ortho and perspective mode - a6 save resources through ignoring unused BLOCKs (not-inserted or on frozen/blocked layers) - a6 added try_finally: f.close() for all IO-files - a6 added handling for TypeError raise - a5 bugfix f_getOCS for (0,0,z!=1.0) (ellipse in Kai's dxf) - a4 added to analyzeTool: report about VIEWs, VPORTs, unused/xref BLOCKs - a4 bugfix: individual support for 2D/3DPOLYLINE/POLYMESH - a4 added to UI: (*wip)BLOCK-(F): name filtering for BLOCKs - a4 added to UI: BLOCK-(n): filter noname/hatch BLOCKs *X... - a2 g_scale_as is no more GUI_A-variable - a2 bugfix "material": negative sign color_index - a2 added support for BLOCKs defined with origin !=(0,0,0) - a1 added 'global.reLocation-vector' option - - v1.0.11: 2007.11.24 by migius - c8 added 'curve_resolution_U' option - c8 added context_sensitivity for some UI-buttons - c8 bugfix ELLIPSE rotation, added closed_variant and caps - c7 rebuild UI: new layout, grouping and meta-buttons - c6 rewritten support for ELLIPSE mesh & curve representation - c6 restore selector-buttons for DXF-drawTypes: LINE & Co - c6 change header of INI/INF-files: # at begin - c6 apply scale(1,1,1) after glob.Scale for all mesh objects, not for curve objects. - c5 fixing 'material_on' option - c4 added "analyze DXF-file" UI-option: print LAYER/BLOCK-dependences into a textfile - c3 human-formating of data in INI-Files - c2 added "caps" for closed Bezier-curves - c2 added "set elevation" UI-option - c1 rewrite POLYLINE2d-arc-segments Bezier-interpreter - b9 many bugs fixed - b9 rewrite POLYLINE2d-arc-segments trimming (clean-trim) - b8 added "import from frozen layers" UI-option - b8 added "import from paper space" UI-option - b8 support Bezier curves for LINEs incl.thickness(0.0-10.0) - b8 added meshSmooth_on for circle/arc/polyline - b8 added vertexGroups for circle/arc - b7 added width_force for ARCs/CIRCLEs = "thin_box" option - b3 cleanup code, rename f_drawArc/Bulg->f_calcArc/Bulg - b2 fixing material assignment by LAYER+COLOR - b1 fixing Bezier curves representation of POLYLINEs-arc-segments - b0 added global_scale_presets: "yard/feet/inch to meter" - - v1.0.10: 2007.10.18 by migius - a6 bugfix CircleDrawCaps for OSX - a5 added two "curve_res" UI-buttons for Bezier curves representation - a5 improved Bezier curves representation of circles/arcs: correct handlers - a4 try to fix malformed endpoints of Blender curves of ARC/POLYLINE-arc segments. - a3 bugfix: open-POLYLINEs with end_point.loc==start_point.loc - a2 bugfix: f_transform for OCS=(0,0,-1) oriented objects - a1 added "fill_on=caps" option to draw top and bottom sides of CIRCLEs and ELLIPSEs - a1 rewrite f_CIRCLE.Draw: from Mesh.Primitive to Mesh - a1 bugfix "newScene"-mode: all Cylinders/Arcs were drawn at <0,0,0>location - - v1.0.beta09: 2007.09.02 by migius - g5 redesign UI: grouping of buttons - g3 update multi-import-mode: <*.*> button - g- added multi-import-mode: (path/*) for importing many dxf-files at once - g- added import into newScene - g- redesign UI: user presets, into newScene-import - f- cleanup code - f- bugfix: thickness for Bezier/Bsplines into Blender-curves - f- BlenderWiki documentation, on-line Manual - f- added import POLYLINE-Bsplines into Blender-NURBSCurves - f- added import POLYLINE-arc-segments into Blender-BezierCurves - f- added import POLYLINE-Bezier-curves into Blender-Curves - d5 rewrite: Optimization Levels, added 'directDrawing' - d4 added: f_set_thick(controlled by ini-parameters) - d4 bugfix: face-normals in objects with minus thickness - d4 added: placeholder'Empty'-size in f_Insert.draw - d3 rewrite f_Text.Draw: added support for all Text's parameters - d2 redesign: progressbar - e- tuning by ideasman42: better use of the Py API. - c- tuning by ideasman42 - b- rewrite f_Text.Draw rotation/transform - b- bugfix: POLYLINE-segment-intersection more reliable now - b- bugfix: circle:_thic, 'Empties':no material_assignment - b- added material assignment (from layer and/or color) - a- added empty, cylinder and UVsphere for POINTs - a- added support for 2d-POLYLINE: splines, fitted curves, fitted surfaces - a- redesign f_Drawer for block_definitions - a- rewrite import into Blender-Curve-Object - - v1.0.beta08 - 2007.07.27 by migius: "full 3d"-release - l- bugfix: solid_vgroups, clean:scene.objects.new() - l- redesign UI to standard Draw.Register+FileSelector, advanced_config_option - k- bugfix UI:fileSelect() for MacOSX os.listdir() - k- added reset/save/load for config-data - k- redesign keywords/drawTypes/Draw.Create_Buttons - j- new UI using UIBlock() with own FileSelector, cause problem Window.FileSelector() - i- rewritten Class:Settings for better config-parameter management - h- bugfix: face-normals in objects with minus thickness - h- added Vertex-Groups in POLYLINE and SOLID meshes, for easy material assignment - h- beautify code, whitespace->tabs - h- added settings.thic_force switch for forcing thickness - h- added "one Mesh" option for all entities from the same Layer, sorted in
- Vertex-Groups(color_name) (fewer objects = better import performance) - g- rewrote: insert-point-handle-object is a small tetrahedron - e- bugfix: closed-polymesh3d - - rewrote: UI, type_map.keys, f_drawer, all class_f_draw(added "settings" as attribut) - - added 2d/3d-support for Polyline_Width incl. angle intersection - beta07: 2007.06.19 by migius - - added 3d-support for LWPolylines - - added 2d/3d-support for Points - beta06: 2007.06.15 by migius - - cleanup code - - added 2d/3d-support for MINSERT=BlockArray in f_drawer, added f_rotXY_Vec - beta05: 2007.06.14 by migius - - added 2d/3d-support for 3d-PolyLine, PolyMesh and PolyFace - - added Global-Scale for size control of imported scenes - beta04: 2007.06.12 by migius - - rewrote the f_drawBulge for correct import the arc-segments of Polylines - beta03: 2007.06.10 by migius - - rewrote interface - beta02: 2007.06.09 by migius - - added 3d-support for Arcs and Circles - - added support for Object_Thickness(=height) - beta01: 2007.06.08 by migius - - added 3d-support for Blocks/Inserts within nested-structures - - rewrote f_transform for correct 3d-location/3d-rotation - - added 3d-support Lines, 3dFaces - - added 2d+3d-support for Solids and Traces - - v0.9 - 2007.01 by kitsu: (for 2.43) - - first draft of true POLYLINE import - - - - v0.8 - 2006.12 by kitsu: - - first draft of object space coordinates OCS import - - - - v0.5b - 2006.10 by kitsu: (for 2.42a) - - dxfReader.py - - color_map.py - -""" - -# -------------------------------------------------------------------------- -# DXF Import v1.0 by Ed Blake (AKA kitsu) and Remigiusz Fiedler (AKA migius) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender import Mathutils, BezTriple, Draw, Registry, sys,\ -Text3d, Window, Mesh, Material, Group, Curve -#from Blender.Mathutils import Vector, Matrix -#import bpy #not used yet -#import BPyMessages - -from dxfReader import readDXF -#from dxfReader import get_name, get_layer -from dxfReader import Object as dxfObject -from dxfColorMap import color_map -from math import log10, sqrt, radians, degrees, atan, cos, sin - -# osx-patch by Blinkozo -#todo: avoid additional modules, prefer Blender-build-in test routines -#import platform -#if platform.python_version() < '2.4': -# from sets import Set as set -#from sys import version_info -#ver = '%s.%s' % version_info[0:2] -# end osx-patch - -import subprocess -import os -if os.name != 'mac': - try: - import psyco - psyco.log(Blender.Get('tempdir')+"/blender.log-psyco") - #psyco.log() - psyco.full(memory=100) - psyco.profile(0.05, memory=100) - psyco.profile(0.2) - #print 'psyco imported' - except ImportError: - print 'psyco not imported' - -print '\n\n\n' -print 'DXF/DWG-Importer v%s *** start ***' %(__version__) #--------------------- - -SCENE = None -WORLDX = Mathutils.Vector((1,0,0)) -WORLDY = Mathutils.Vector((0,1,0)) -WORLDZ = Mathutils.Vector((0,0,1)) - -G_SCALE = 1.0 #(0.0001-1000) global scaling factor for all dxf data -G_ORIGIN_X = 0.0 #global translation-vector (x,y,z) in DXF units -G_ORIGIN_Y = 0.0 -G_ORIGIN_Z = 0.0 -MIN_DIST = 0.001 #cut-off value for sort out short-distance polyline-"duoble_vertex" -ARC_RESOLUTION = 64 #(4-500) arc/circle resolution - number of segments -ARC_RADIUS = 1.0 #(0.01-100) arc/circle radius for number of segments algorithm -CURV_RESOLUTION = 12 #(1-128) Bezier curves U-resolution -CURVARC_RESOLUTION = 4 #(3-32) resolution of circle represented as Bezier curve -THIN_RESOLUTION = 8 #(4-64) thin_cylinder arc_resolution - number of segments -MIN_THICK = MIN_DIST * 10.0 #minimal thickness by forced thickness -MIN_WIDTH = MIN_DIST * 10.0 #minimal width by forced width -TRIM_LIMIT = 3.0 #limit for triming of polylines-wide-segments (values:0.0 - 5.0) -ELEVATION = 0.0 #standard elevation = coordinate Z value - -BYBLOCK = 0 -BYLAYER = 256 -TARGET_LAYER = 3 #target blender_layer -GROUP_BYLAYER = 0 #(0/1) all entities from same layer import into one blender-group -LAYER_DEF_NAME = 'AAAA' #default layer name -LAYER_DEF_COLOR = 4 #default layer color -E_M = 0 -LAB = ". wip .. todo" #"*) parts under construction" -M_OBJ = 0 - -FILENAME_MAX = 180 #max length of path+file_name string (FILE_MAXDIR + FILE_MAXFILE) -MAX_NAMELENGTH = 17 #max_effective_obnamelength in blender =21=17+(.001) -INIFILE_DEFAULT_NAME = 'importDXF' -INIFILE_EXTENSION = '.ini' -INIFILE_HEADER = '#ImportDXF.py ver.1.0 config data' -INFFILE_HEADER = '#ImportDXF.py ver.1.0 analyze of DXF-data' - -AUTO = BezTriple.HandleTypes.AUTO -FREE = BezTriple.HandleTypes.FREE -VECT = BezTriple.HandleTypes.VECT -ALIGN = BezTriple.HandleTypes.ALIGN - -UI_MODE = True #activates UI-popup-print, if not multiple files imported - -#---- migration to 2.49------------------------------------------------- -if 'cross' in dir(Mathutils.Vector()): - #Draw.PupMenu('DXF exporter: Abort%t|This script version works for Blender up 2.49 only!') - def M_CrossVecs(v1,v2): - return v1.cross(v2) #for up2.49 - def M_DotVecs(v1,v2): - return v1.dot(v2) #for up2.49 -else: - def M_CrossVecs(v1,v2): - return Mathutils.CrossVecs(v1,v2) #for pre2.49 - def M_DotVecs(v1,v2): - return Mathutils.DotVecs(v1,v2) #for pre2.49 - - -#-------- DWG support ------------------------------------------ -extCONV_OK = True -extCONV = 'DConvertCon.exe' -extCONV_PATH = os.path.join(Blender.Get('scriptsdir'),extCONV) -if not os.path.isfile(extCONV_PATH): - extCONV_OK = False - extCONV_TEXT = 'DWG-Importer cant find external DWG-converter (%s) in Blender script directory.|\ -More details in online Help.' %extCONV -else: - if not os.sys.platform.startswith('win'): - # check if Wine installed: - if subprocess.Popen(('which', 'winepath'), stdout=subprocess.PIPE).stdout.read().strip(): - extCONV_PATH = 'wine %s'%extCONV_PATH - else: - extCONV_OK = False - extCONV_TEXT = 'The external DWG-converter (%s) needs Wine installed on your system.|\ -More details in online Help.' %extCONV -#print 'extCONV_PATH = ', extCONV_PATH - - - -class View: #----------------------------------------------------------------- - """Class for objects representing dxf VIEWs. - """ - def __init__(self, obj, active=None): - """Expects an object of type VIEW as input. - """ - if not obj.type == 'view': - raise TypeError, "Wrong type %s for VIEW object!" %obj.type - - self.type = obj.type - self.name = obj.get_type(2)[0] -# self.data = obj.data[:] - - - self.centerX = getit(obj, 10, 0.0) #view center pointX (in DCS) - self.centerY = getit(obj, 20, 0.0) #view center pointY (in DCS) - self.height = obj.get_type(40)[0] #view height (in DCS) - self.width = obj.get_type(41)[0] #view width (in DCS) - - self.dir = [0,0,0] - self.dir[0] = getit(obj, 11, 0.0) #view directionX from target (in WCS) - self.dir[1] = getit(obj, 21, 0.0) # - self.dir[2] = getit(obj, 31, 0.0) # - - self.target = [0,0,0] - self.target[0] = getit(obj, 12, 0.0) #target pointX(in WCS) - self.target[1] = getit(obj, 22, 0.0) # - self.target[2] = getit(obj, 32, 0.0) # - - self.length = obj.get_type(42)[0] #Lens length - self.clip_front = getit(obj, 43) #Front clipping plane (offset from target point) - self.clip_back = getit(obj, 44) #Back clipping plane (offset from target point) - self.twist = obj.get_type(50)[0] #view twist angle in degrees - - self.flags = getit(obj, 70, 0) - self.paperspace = self.flags & 1 # - - self.mode = obj.get_type(71)[0] #view mode (VIEWMODE system variable) - - def __repr__(self): - return "%s: name - %s, focus length - %s" %(self.__class__.__name__, self.name, self.length) - - - def draw(self, settings): - """for VIEW: generate Blender_camera. - """ - obname = 'vw_%s' %self.name # create camera object name - #obname = 'ca_%s' %self.name # create camera object name - obname = obname[:MAX_NAMELENGTH] - - if self.target == [0,0,0] and Mathutils.Vector(self.dir).length == 1.0: - cam= Camera.New('ortho', obname) - ob= SCENE.objects.new(cam) - cam.type = 'ortho' - cam.scale = 1.0 # for ortho cameras - else: - cam= Camera.New('persp', obname) - ob= SCENE.objects.new(cam) - cam.type = 'persp' - cam.angle = 60.0 # for persp cameras - if self.length: - #cam.angle = 2 * atan(17.5/self.length) * 180/pi - cam.lens = self.length #for persp cameras - # hack to update Camera>Lens setting (inaccurate as a focal length) - #curLens = cam.lens; cam.lens = curLens - # AutoCAD gets clip distance from target: - dist = Mathutils.Vector(self.dir).length - cam.clipEnd = dist - self.clip_back - cam.clipStart = dist - self.clip_front - - cam.drawLimits = 1 - cam.drawSize = 10 - - v = Mathutils.Vector(self.dir) -# print 'deb:view cam:', cam #------------ -# print 'deb:view self.target:', self.target #------------ -# print 'deb:view self.dir:', self.dir #------------ -# print 'deb:view self.twist:', self.twist #------------ -# print 'deb:view self.clip_front=%s, self.clip_back=%s, dist=%s' %(self.clip_front, self.clip_back, dist) #------------ - transform(v.normalize(), -self.twist, ob) - ob.loc = Mathutils.Vector(self.target) + Mathutils.Vector(self.dir) - return ob - - -class Vport: #----------------------------------------------------------------- - """Class for objects representing dxf VPORTs. - """ - def __init__(self, obj, active=None): - """Expects an object of type VPORT as input. - """ - if not obj.type == 'vport': - raise TypeError, "Wrong type %s for VPORT object!" %obj.type - - self.type = obj.type - self.name = obj.get_type(2)[0] -# self.data = obj.data[:] - #print 'deb:vport name, data:', self.name #------- - #print 'deb:vport data:', self.data #------- - - self.height = obj.get_type(40)[0] #vport height (in DCS) - self.centerX = getit(obj, 12, 0.0) #vport center pointX (in DCS) - self.centerY = getit(obj, 22, 0.0) #vport center pointY (in DCS) - self.width = self.height * obj.get_type(41)[0] #vport aspect ratio - width (in DCS) - - self.dir = [0,0,0] - self.dir[0] = getit(obj, 16, 0.0) #vport directionX from target (in WCS) - self.dir[1] = getit(obj, 26, 0.0) # - self.dir[2] = getit(obj, 36, 0.0) # - - self.target = [0,0,0] - self.target[0] = getit(obj, 17, 0.0) #target pointX(in WCS) - self.target[1] = getit(obj, 27, 0.0) # - self.target[2] = getit(obj, 37, 0.0) # - - self.length = obj.get_type(42)[0] #Lens length - self.clip_front = getit(obj, 43) #Front clipping plane (offset from target point) - self.clip_back = getit(obj, 44) #Back clipping plane (offset from target point) - self.twist = obj.get_type(51)[0] #view twist angle - - self.flags = getit(obj, 70, 0) - self.paperspace = self.flags & 1 # - - self.mode = obj.get_type(71)[0] #view mode (VIEWMODE system variable) - - def __repr__(self): - return "%s: name - %s, focus length - %s" %(self.__class__.__name__, self.name, self.length) - - def draw(self, settings): - """for VPORT: generate Blender_camera. - """ - obname = 'vp_%s' %self.name # create camera object name - #obname = 'ca_%s' %self.name # create camera object name - obname = obname[:MAX_NAMELENGTH] - - if self.target == [0,0,0] and Mathutils.Vector(self.dir).length == 1.0: - cam= Camera.New('ortho', obname) - ob= SCENE.objects.new(cam) - cam.type = 'ortho' - cam.scale = 1.0 # for ortho cameras - else: - cam= Camera.New('persp', obname) - ob= SCENE.objects.new(cam) - cam.type = 'persp' - cam.angle = 60.0 # for persp cameras - if self.length: - #cam.angle = 2 * atan(17.5/self.length) * 180/pi - cam.lens = self.length #for persp cameras - # hack to update Camera>Lens setting (inaccurate as a focal length) - #curLens = cam.lens; cam.lens = curLens - # AutoCAD gets clip distance from target: - dist = Mathutils.Vector(self.dir).length - cam.clipEnd = dist - self.clip_back - cam.clipStart = dist - self.clip_front - - cam.drawLimits = 1 - cam.drawSize = 10 - - v = Mathutils.Vector(self.dir) -# print 'deb:view cam:', cam #------------ -# print 'deb:view self.target:', self.target #------------ -# print 'deb:view self.dir:', self.dir #------------ -# print 'deb:view self.twist:', self.twist #------------ -# print 'deb:view self.clip_front=%s, self.clip_back=%s, dist=%s' %(self.clip_front, self.clip_back, dist) #------------ - transform(v.normalize(), -self.twist, ob) - ob.loc = Mathutils.Vector(self.target) + Mathutils.Vector(self.dir) - return ob - - - -class Layer: #----------------------------------------------------------------- - """Class for objects representing dxf LAYERs. - """ - def __init__(self, obj, name=None, color=None, frozen=None): - """Expects an dxfobject of type layer as input. - if no dxfobject - creates surogate layer with default parameters - """ - - if obj==None: - self.type = 'layer' - if name: self.name = name - else: self.name = LAYER_DEF_NAME - - if color: self.color = color - else: self.color = LAYER_DEF_COLOR - - if frozen!=None: self.frozen = frozen - else: self.frozen = 0 - else: - if obj.type=='layer': - self.type = obj.type - #self.data = obj.data[:] - if name: self.name = name - #self.bfname = name #--todo---see layernamesmap in f_getLayersmap --- - else: self.name = obj.get_type(2)[0] #layer name of object - - if color: self.color = color - else: self.color = obj.get_type(62)[0] #color of object - - if frozen!=None: self.frozen = frozen - else: - self.flags = obj.get_type(70)[0] - self.frozen = self.flags & 1 - - def __repr__(self): - return "%s: name - %s, color - %s" %(self.__class__.__name__, self.name, self.color) - - - -def getit(obj, typ, default=None): #------------------------------------------ - """Universal procedure for geting data from list/objects. - """ - it = default - if type(obj) == list: #if obj is a list, then searching in a list - for item in obj: - #print 'deb:getit item, type(item)', item, type(item) - try: - if item[0] == typ: - it = item[1] - break #as soon as the first found - except: - # --todo-- I found one case where item was a text instance - # that failed with no __getitem__ - pass - else: #else searching in Object with get_type-Methode - item = obj.get_type(typ) - if item: - it = item[0] - #print 'deb:getit:typ, it', typ, it #---------- - return it - - - -def get_extrusion(data): #------------------------------------------------- - """Find the axis of extrusion. - - Used to get from object_data the objects Object_Coordinate_System (ocs). - """ - #print 'deb:get_extrusion: data: \n', data #--------------- - vec = [0,0,1] - vec[0] = getit(data, 210, 0) # 210 = x - vec[1] = getit(data, 220, 0) # 220 = y - vec[2] = getit(data, 230, 1) # 230 = z - #print 'deb:get_extrusion: vec: ', vec #--------------- - return vec - - -#------------------------------------------ -def getSceneChild(name): - dudu = [i for i in SCENE.objects if i.name==name] -# dudu = [i for i in SCENE.getChildren() if i.name==name] - #print 'deb:getSceneChild %s -result: %s:' %(name,dudu) #----------------- - if dudu!=[]: return dudu[0] - return None - - -class Solid: #----------------------------------------------------------------- - """Class for objects representing dxf SOLID or TRACE. - """ - def __init__(self, obj): - """Expects an entity object of type solid or trace as input. - """ - if obj.type == 'trace': - obj.type = 'solid' - if not obj.type == 'solid': - raise TypeError, "Wrong type \'%s\' for solid/trace object!" %obj.type - - self.type = obj.type -# self.data = obj.data[:] - - self.space = getit(obj, 67, 0) - self.thic = getit(obj, 39, 0) - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.extrusion = get_extrusion(obj) - self.points = self.get_points(obj) - - - - def get_points(self, data): - """Gets start and end points for a solid type object. - - Solids have 3 or 4 points and fixed codes for each value. - """ - - # start x, y, z and end x, y, z = 0 - a = [0, 0, 0] - b = [0, 0, 0] - c = [0, 0, 0] - d = [0, 0, 0] - a[0] = getit(data, 10, None) # 10 = x - a[1] = getit(data, 20, None) # 20 = y - a[2] = getit(data, 30, 0) # 30 = z - b[0] = getit(data, 11, None) - b[1] = getit(data, 21, None) - b[2] = getit(data, 31, 0) - c[0] = getit(data, 12, None) - c[1] = getit(data, 22, None) - c[2] = getit(data, 32, 0) - out = [a,b,c] - - d[0] = getit(data, 13, None) - if d[0] != None: - d[1] = getit(data, 23, None) - d[2] = getit(data, 33, 0) - out.append(d) - #print 'deb:solid.vertices:---------\n', out #----------------------- - return out - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - def draw(self, settings): - """for SOLID: generate Blender_geometry. - """ - points = self.points - if not points: return - edges, faces = [], [] - l = len(self.points) - - obname = 'so_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - - vg_left, vg_right, vg_top, vg_bottom, vg_start, vg_end = [], [], [], [], [], [] - thic = set_thick(self.thic, settings) - if thic != 0: - thic_points = [[v[0], v[1], v[2] + thic] for v in points[:]] - if thic < 0.0: - thic_points.extend(points) - points = thic_points - else: - points.extend(thic_points) - - if l == 4: - faces = [[0,1,3,2], [4,6,7,5], [0,4,5,1], - [1,5,7,3], [3,7,6,2], [2,6,4,0]] - vg_left = [2,6,4,0] - vg_right = [1,5,7,3] - vg_top = [4,6,7,5] - vg_bottom = [0,1,3,2] - vg_start = [0,4,5,1] - vg_end = [3,7,6,2] - elif l == 3: - faces = [[0,1,2], [3,5,4], [0,3,4,1], [1,4,5,2], [2,5,3,0]] - vg_top = [3,4,5] - vg_bottom = [0,1,2] - vg_left = [2,5,3,0] - vg_right = [1,4,5,2] - vg_start = [0,3,4,1] - elif l == 2: faces = [[0,1,3,2]] - else: - if l == 4: faces = [[0,1,3,2]] - elif l == 3: faces = [[0,1,2]] - elif l == 2: edges = [[0,1]] - - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend(points) # add vertices to mesh - if faces: me.faces.extend(faces) # add faces to the mesh - if edges: me.edges.extend(edges) # add faces to the mesh - - if settings.var['vGroup_on'] and not M_OBJ: - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - replace = Mesh.AssignModes.ADD #or .AssignModes.ADD/REPLACE - if vg_left: me.addVertGroup('side.left') ; me.assignVertsToGroup('side.left', vg_left, 1.0, replace) - if vg_right:me.addVertGroup('side.right') ; me.assignVertsToGroup('side.right', vg_right, 1.0, replace) - if vg_top: me.addVertGroup('side.top') ; me.assignVertsToGroup('side.top', vg_top, 1.0, replace) - if vg_bottom:me.addVertGroup('side.bottom'); me.assignVertsToGroup('side.bottom',vg_bottom, 1.0, replace) - if vg_start:me.addVertGroup('side.start') ; me.assignVertsToGroup('side.start', vg_start, 1.0, replace) - if vg_end: me.addVertGroup('side.end') ; me.assignVertsToGroup('side.end', vg_end, 1.0, replace) - - transform(self.extrusion, 0, ob) - - return ob - -class Line: #----------------------------------------------------------------- - """Class for objects representing dxf LINEs. - """ - def __init__(self, obj): - """Expects an entity object of type line as input. - """ - if not obj.type == 'line': - raise TypeError, "Wrong type \'%s\' for line object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - self.space = getit(obj, 67, 0) - self.thic = getit(obj, 39, 0) - #print 'deb:self.thic: ', self.thic #--------------------- - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.extrusion = get_extrusion(obj) - self.points = self.get_points(obj) - - - def get_points(self, data): - """Gets start and end points for a line type object. - - Lines have a fixed number of points (two) and fixed codes for each value. - """ - # start x,y,z and end x,y,z = 0 - a = [0, 0, 0] - b = [0, 0, 0] - a[0] = getit(data, 10, None) # 10 = x - a[1] = getit(data, 20, None) # 20 = y - a[2] = getit(data, 30, 0) # 30 = z - b[0] = getit(data, 11, None) - b[1] = getit(data, 21, None) - b[2] = getit(data, 31, 0) - out = [a,b] - return out - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - def draw(self, settings): - """for LINE: generate Blender_geometry. - """ - # Generate the geometery - #settings.var['curves_on']=False - - points = self.points - thic = set_thick(self.thic, settings) - width = 0.0 - if settings.var['lines_as'] == 4: # as thin_box - thic = settings.var['thick_min'] - width = settings.var['width_min'] - elif settings.var['lines_as'] == 3: # as thin cylinder - cyl_rad = 0.5 * settings.var['width_min'] - - elif settings.var['lines_as'] == 5: # LINE curve representation------------------------- - obname = 'li_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - - c = Curve.New(obname) # create new curve data - curve = c.appendNurb(BezTriple.New(points[0])) - curve.append(BezTriple.New(points[1])) - for point in curve: - point.handleTypes = [VECT, VECT] - point.radius = 1.0 - curve.flagU = 0 # 0 sets the curve not cyclic=open - c.setResolu(settings.var['curve_res']) - c.update() #important for handles calculation - - ob = SCENE.objects.new(c) # create a new curve_object - - #if False: # --todo-- better support for 210-group - if thic != 0.0: #hack: Blender2.45 curve-extrusion - t = thic * 0.5 - if abs(t) > 5.0: t = 5.0 * cmp(t,0) # Blender2.45 accepts only (0.0 - 5.0) - e = self.extrusion - c.setExt1(abs(t)) # curve-extrusion - ob.LocX += t * e[0] - ob.LocY += t * e[1] - ob.LocZ += t * e[2] - #c.setExt1(1.0) # curve-extrusion: Blender2.45 accepts only (0.0 - 5.0) - #ob.LocZ = t + self.loc[2] - #ob.SizeZ *= abs(t) - return ob - - else: # LINE mesh representation ------------------------------ - global activObjectLayer - global activObjectName - #print 'deb:draw:line.ob IN activObjectName: ', activObjectName #--------------------- - - if M_OBJ: obname, me, ob = makeNewObject() - else: - if activObjectLayer == self.layer and settings.var['one_mesh_on']: - obname = activObjectName - #print 'deb:line.draw obname from activObjectName: ', obname #--------------------- - ob = getSceneChild(obname) # open an existing mesh_object - #ob = SCENE.getChildren(obname) # open an existing mesh_object - #me = Mesh.Get(ob.name) # open objects mesh data - me = ob.getData(name_only=False, mesh=True) - else: - obname = 'li_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - activObjectName = ob.name - activObjectLayer = self.layer - #print ('deb:line.draw new line.ob+mesh:"%s" created!' %ob.name) #--------------------- - - faces, edges = [], [] - n = len(me.verts) - - #if settings.var['width_force']: #--todo----------- - - if thic != 0: - t, e = thic, self.extrusion - #print 'deb:thic, extr: ', t, e #--------------------- - points.extend([[v[0]+t*e[0], v[1]+t*e[1], v[2]+t*e[2]] for v in points[:]]) - faces = [[0+n, 1+n, 3+n, 2+n]] - else: - edges = [[0+n, 1+n]] - - me.verts.extend(points) # adds vertices to global mesh - if faces: me.faces.extend(faces) # add faces to the mesh - if edges: me.edges.extend(edges) # add faces to the mesh - - if settings.var['vGroup_on'] and not M_OBJ: - # entities with the same color build one vertexGroup for easier material assignment ---- - ob.link(me) # link mesh to that object - vG_name = 'color_%s' %self.color_index - if edges: faces = edges - replace = Mesh.AssignModes.ADD #or .AssignModes.REPLACE or ADD - try: - me.assignVertsToGroup(vG_name, faces[0], 1.0, replace) - #print 'deb: existed vGroup:', vG_name #--------------------- - except: - me.addVertGroup(vG_name) - me.assignVertsToGroup(vG_name, faces[0], 1.0, replace) - #print 'deb: create new vGroup:', vG_name #--------------------- - - - #print 'deb:draw:line.ob OUT activObjectName: ', activObjectName #--------------------- - return ob - - - -class Point: #----------------------------------------------------------------- - """Class for objects representing dxf POINTs. - """ - def __init__(self, obj): - """Expects an entity object of type point as input. - """ - if not obj.type == 'point': - raise TypeError, "Wrong type %s for point object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - self.space = getit(obj, 67, 0) - self.thic = getit(obj, 39, 0) - #print 'deb:self.thic: ', self.thic #--------------------- - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.extrusion = get_extrusion(obj) - self.points = self.get_points(obj) - - - def get_points(self, data): - """Gets coordinates for a point type object. - - Points have fixed codes for each value. - """ - a = [0, 0, 0] - a[0] = getit(data, 10, None) # 10 = x - a[1] = getit(data, 20, None) # 20 = y - a[2] = getit(data, 30, 0) # 30 = z - out = [a] - return out - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - def draw(self, settings): - """for POINT: generate Blender_geometry. - """ - points = self.points - obname = 'po_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - points_as = settings.var['points_as'] - thic = settings.var['thick_min'] - if thic < settings.var['dist_min']: thic = settings.var['dist_min'] - - if points_as in [1,3,4,5]: - if points_as in [1,5]: # as 'empty' - c = 'Empty' - elif points_as == 3: # as 'thin sphere' - res = settings.var['thin_res'] - c = Mesh.Primitives.UVsphere(res,res,thic) - elif points_as == 4: # as 'thin box' - c = Mesh.Primitives.Cube(thic) - ob = SCENE.objects.new(c, obname) # create a new object - transform(self.extrusion, 0, ob) - ob.loc = tuple(points[0]) - - elif points_as == 2: # as 'vertex' - global activObjectLayer - global activObjectName - #print 'deb:draw:point.ob IN activObjectName: ', activObjectName #--------------------- - if M_OBJ: obname, me, ob = makeNewObject() - else: - if activObjectLayer == self.layer and settings.var['one_mesh_on']: - obname = activObjectName - #print 'deb:draw:point.ob obname from activObjectName: ', obname #--------------------- - ob = getSceneChild(obname) # open an existing mesh_object - #ob = SCENE.getChildren(obname) # open an existing mesh_object - me = ob.getData(name_only=False, mesh=True) - #me = Mesh.Get(ob.name) # open objects mesh data - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - activObjectName = ob.name - activObjectLayer = self.layer - #print ('deb:draw:point new point.ob+mesh:"%s" created!' %ob.name) #--------------------- - me.verts.extend(points) # add vertices to mesh - - return ob - - - -class Polyline: #----------------------------------------------------------------- - """Class for objects representing dxf POLYLINEs. - """ - def __init__(self, obj): - """Expects an entity object of type polyline as input. - """ - #print 'deb:polyline.init.START:----------------' #------------------------ - if not obj.type == 'polyline': - raise TypeError, "Wrong type %s for polyline object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - self.space = getit(obj, 67, 0) - self.elevation = getit(obj, 30, 0) - #print 'deb:elevation: ', self.elevation #--------------- - self.thic = getit(obj, 39, 0) - self.color_index = getit(obj, 62, BYLAYER) - - self.flags = getit(obj, 70, 0) - self.closed = self.flags & 1 # closed in the M direction - self.curved = self.flags & 2 # Bezier-curve-fit vertices have been added - self.spline = self.flags & 4 # NURBS-curve-fit vertices have been added - self.poly3d = self.flags & 8 # 3D-polyline - self.plmesh = self.flags & 16 # 3D-polygon mesh - self.closeN = self.flags & 32 # closed in the N direction - self.plface = self.flags & 64 # 3D-polyface mesh - self.contin = self.flags & 128 # the linetype pattern is generated continuously - - self.pltype='poly2d' # default is a 2D-polyline - if self.poly3d: self.pltype='poly3d' - elif self.plface: self.pltype='plface' - elif self.plmesh: self.pltype='plmesh' - - self.swidth = getit(obj, 40, 0) # default start width - self.ewidth = getit(obj, 41, 0) # default end width - #self.bulge = getit(obj, 42, None) # bulge of the segment - self.vectorsM = getit(obj, 71, None) # PolyMesh: expansion in M-direction / PolyFace: number of the vertices - self.vectorsN = getit(obj, 72, None) # PolyMesh: expansion in M-direction / PolyFace: number of faces - #self.resolM = getit(obj, 73, None) # resolution of surface in M direction - #self.resolN = getit(obj, 74, None) # resolution of surface in N direction - self.curvNoFitted = False - self.curvQuadrati = False - self.curvCubicBsp = False - self.curvBezier = False - curvetype = getit(obj, 75, 0) # type of curve/surface: 0=None/5=Quadric/6=Cubic/8=Bezier - if curvetype == 0: self.curvNoFitted = True - elif curvetype == 5: self.curvQuadrati = True - elif curvetype == 6: self.curvCubicBsp = True - elif curvetype == 8: self.curvBezier = True - - self.layer = getit(obj, 8, None) - self.extrusion = get_extrusion(obj) - - self.points = [] #list with vertices coordinats - self.faces = [] #list with vertices assigment to faces - #print 'deb:polyline.init.ENDinit:----------------' #------------ - - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - - def doubles_out(self, settings, d_points): - """routine to sort out of double.vertices----------------------------- - """ - minimal_dist = settings.var['dist_min'] * 0.1 - dv_count = 0 - temp_points = [] - for i in xrange(len(d_points)-1): - point = d_points[i] - point2 = d_points[i+1] - #print 'deb:double.vertex p1,p2', point, point2 #------------------------ - delta = Mathutils.Vector(point2.loc) - Mathutils.Vector(point.loc) - if delta.length > minimal_dist: - temp_points.append(point) - else: - dv_count+=1 - #print 'deb:drawPoly2d double.vertex sort out! count=', dv_count #------------------------ - temp_points.append(d_points[-1]) #------ incl. last vertex ------------- - #if self.closed: temp_points.append(d_points[1]) #------ loop start vertex ------------- - d_points = temp_points #-----vertex.list without "double.vertices" - #print 'deb:drawPoly2d d_pointsList =after DV-outsorting=====:\n ', d_points #------------------------ - return d_points - - - def tribles_out(self, settings, d_points): - """routine to sort out of three_in_place.vertices----------------------------- - """ - minimal_dist = settings.var['dist_min'] * 0.1 - dv_count = 0 - temp_points = [] - for i in xrange(len(d_points)-2): - point1 = d_points[i] - point2 = d_points[i+1] - point3 = d_points[i+2] - #print 'deb:double.vertex p1,p2', point, point2 #------------------------ - delta12 = Mathutils.Vector(point2.loc) - Mathutils.Vector(point1.loc) - delta23 = Mathutils.Vector(point3.loc) - Mathutils.Vector(point2.loc) - if delta12.length < minimal_dist and delta23.length < minimal_dist: - dv_count+=1 - else: - temp_points.append(point1) - #print 'deb:drawPoly2d double.vertex sort out! count=', dv_count #------------------------ - point1 = d_points[-2] - point2 = d_points[-1] - delta12 = Mathutils.Vector(point2.loc) - Mathutils.Vector(point1.loc) - if delta12.length > minimal_dist: - temp_points.append(d_points[-2]) #------ incl. 2last vertex ------------- - temp_points.append(d_points[-1]) #------ incl. 1last vertex ------------- - #if self.closed: temp_points.append(d_points[1]) #------ loop start vertex ------------- - d_points = temp_points #-----vertex.list without "double.vertices" - #print 'deb:drawPoly2d d_pointsList =after DV-outsorting=====:\n ', d_points #------------------------ - return d_points - - - def draw(self, settings): #-------------%%%% DRAW POLYLINE %%%--------------- - """for POLYLINE: generate Blender_geometry. - """ - #print 'deb:drawPOLYLINE.START:----------------' #------------------------ - #print 'deb:POLYLINEdraw self.pltype:', self.pltype #------------------------ - #print 'deb:POLYLINEdraw self.points:\n', self.points #------------------------ - ob = [] - #---- 3dPolyFace - mesh with free topology - if self.pltype=='plface' and settings.drawTypes['plmesh']: - ob = self.drawPlFace(settings) - #---- 3dPolyMesh - mesh with ortogonal topology - elif self.pltype=='plmesh' and settings.drawTypes['plmesh']: - ob = self.drawPlMesh(settings) - - #---- 2dPolyline - plane polyline with arc/wide/thic segments - elif self.pltype=='poly2d' and settings.drawTypes['polyline']: - if settings.var['plines_as'] in [5,6]: # and self.spline: - ob = self.drawPolyCurve(settings) - else: - ob = self.drawPoly2d(settings) - - #---- 3dPolyline - non-plane polyline (thin segments = without arc/wide/thic) - elif self.pltype=='poly3d' and settings.drawTypes['pline3']: - if settings.var['plines3_as'] in [5,6]: # and self.spline: - ob = self.drawPolyCurve(settings) - else: - ob = self.drawPoly2d(settings) - - #---- Spline - curved polyline (thin segments = without arc/wide/thic) - elif self.pltype=='spline' and settings.drawTypes['spline']: - if settings.var['splines_as'] in [5,6]: - ob = self.drawPolyCurve(settings) - else: - ob = self.drawPoly2d(settings) - return ob - - - def drawPlFace(self, settings): #---- 3dPolyFace - mesh with free topology - """Generate the geometery of polyface. - """ - #print 'deb:drawPlFace.START:----------------' #------------------------ - points = [] - faces = [] - #print 'deb:len of pointsList ====== ', len(self.points) #------------------------ - for point in self.points: - if point.face: - faces.append(point.face) - else: - points.append(point.loc) - - if settings.var['plmesh_flip']: # ---------------------- - for face in faces: - face.reverse() - face = [face[-1]] + face[:-1] - - #print 'deb:drawPlFace: len of points_list:\n', len(points) #----------------------- - #print 'deb:drawPlFace: len of faces_list:\n', len(faces) #----------------------- - #print 'deb:drawPlFace: points_list:\n', points #----------------------- - #print 'deb:drawPlFace: faces_list:\n', faces #----------------------- - obname = 'pf_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend(points) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - if settings.var['normals_out']: # ---------------------- - #me.flipNormals() - me.recalcNormals(0) - #me.update() - #print 'deb:drawPlFace: len of me.faces:\n', len(me.faces) #----------------------- - - if settings.var['meshSmooth_on']: # ---------------------- - for i in xrange(len(me.faces)): - me.faces[i].smooth = True - #me.Mode(AUTOSMOOTH) - transform(self.extrusion, 0, ob) - #print 'deb:drawPlFace.END:----------------' #------------------------ - return ob - - - - def drawPlMesh(self, settings): #---- 3dPolyMesh - mesh with orthogonal topology - """Generate the geometery of polymesh. - """ - #print 'deb:polymesh.draw.START:----------------' #------------------------ - #points = [] - #print 'deb:len of pointsList ====== ', len(self.points) #------------------------ - faces = [] - m = self.vectorsM - n = self.vectorsN - for j in xrange(m - 1): - for i in xrange(n - 1): - nn = j * n - faces.append([nn+i, nn+i+1, nn+n+i+1, nn+n+i]) - - if self.closed: #mesh closed in N-direction - nn = (m-1)*n - for i in xrange(n - 1): - faces.append([nn+i, nn+i+1, i+1, i]) - - if self.closeN: #mesh closed in M-direction - for j in xrange(m-1): - nn = j * n - faces.append([nn+n-1, nn, nn+n, nn+n-1+n]) - - if self.closed and self.closeN: #mesh closed in M/N-direction - faces.append([ (n*m)-1, (m-1)*n, 0, n-1]) - - #print 'deb:len of points_list:\n', len(points) #----------------------- - #print 'deb:faces_list:\n', faces #----------------------- - obname = 'pm_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend([point.loc for point in self.points]) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - if settings.var['normals_out']: # ---------------------- - #me.flipNormals() - me.recalcNormals(0) - #me.update() - if settings.var['meshSmooth_on']: # ---------------------- - for i in xrange(len(faces)): - me.faces[i].smooth = True - #me.Mode(AUTOSMOOTH) - - transform(self.extrusion, 0, ob) - #print 'deb:polymesh.draw.END:----------------' #------------------------ - return ob - - - def drawPolyCurve(self, settings): #---- Polyline - draw as Blender-curve - """Generate the geometery of polyline as Blender-curve. - """ - #print 'deb:polyline2dCurve.draw.START:----------------' #--- - if len(self.points) < 2: - #print 'deb:drawPoly2d exit, cause POLYLINE has less than 2 vertices' #--------- - return - - if self.spline: pline_typ = 'ps' # Polyline-NURBSpline - elif self.curved: pline_typ = 'pc' # Polyline-BezierCurve - else: pline_typ = 'pl' # Polyline classic - obname = '%s_%s' %(pline_typ, self.layer) # create object_name from layer name - obname = obname[:MAX_NAMELENGTH] - d_points = [] - - if settings.var['Z_force_on']: - self.elevation = settings.var['Z_elev'] - for point in self.points: - point.loc[2] = self.elevation - d_points.append(point) - else: #for DXFr10-format: update all points[].loc[2] == None -> 0.0 - for point in self.points: - if point.loc[2] == None: - point.loc[2] = self.elevation - d_points.append(point) - - #d_points = self.tribles_out(settings, d_points) - #d_points = self.doubles_out(settings, d_points) - #print 'deb:drawPolyCurve d_pointsList =after DV-outsorting=====:\n ', d_points #------------------------ - - thic = set_thick(self.thic, settings) - if thic != 0.0: #hack: Blender<2.45 curve-extrusion - LocZ = d_points[0].loc[2] - temp_points = [] - for point in d_points: - point.loc[2] = 0.0 - temp_points.append(point) - d_points = temp_points - - #print 'deb:polyline2dCurve.draw d_points=', d_points #--------------- - pline = Curve.New(obname) # create new curve data - #pline.setResolu(24) #--todo----- - - if self.spline: # NURBSplines-----OK----- - #print 'deb:polyline2dCurve.draw self.spline!' #--------------- - nurbs_points = [] - for d in d_points: - pkt = d.loc - pkt.append(d.weight) - nurbs_points.append(pkt) - firstpoint = nurbs_points[0] - curve = pline.appendNurb(firstpoint) - curve.setType(4) # set curve_type NURBS - print 'deb: dir(curve):', dir(curve[-1]) #---------------- - for point in nurbs_points[1:]: - curve.append(point) - #TODO: what is the trick for bevel radius? curve[-1].radius = 1.0 - if self.closed: - curve.flagU = 1+0 # Set curve cyclic=close and uni - else: - curve.flagU = 0+2 # Set curve not cyclic=open - try: curve.orderU = 5 # works only with >2.46svn080625 - except AttributeError: pass - #print 'deb: dir(curve):', dir(curve) #---------------- - - elif self.curved: #--SPLINE as Bezier-curves---wip------ - #print 'deb:polyline2dCurve.draw self.curved!' #--------------- - begtangent, endtangent = None, None - if d_points[0].tangent: - begtangent = d_points[0] - d_points = d_points[1:] - if d_points[-1].tangent: - endtangent = d_points[-1] - d_points = d_points[:-1] - curve = pline.appendNurb(BezTriple.New(d_points[0])) - for p in d_points[1:]: - curve.append(BezTriple.New(p)) - for point in curve: - point.handleTypes = [AUTO, AUTO] - point.radius = 1.0 - #curve.setType(1) #Bezier curve - if self.closed: - curve.flagU = 5 #1 # Set curve cyclic=close - else: - curve.flagU = 4 #0 # Set curve not cyclic=open - if begtangent: - #print 'deb:polyline2dCurve.draw curve[0].vec:', curve[0].vec #----- - #print 'deb:polyline2dCurve.draw begtangent:', begtangent #----- - p0h1,p0,p0h2 = curve[0].vec - p0h1 = [p0h1[i]+begtangent[i] for i in range(3)] - curve.__setitem__(0,BezTriple.New(p0h1+p0+p0h2)) - curve[0].handleTypes = [FREE, ALIGN] #remi--todo----- - curve[0].radius = 1.0 - if endtangent: - #print 'deb:polyline2dCurve.draw curve[-1].vec:', curve[-1].vec #----- - #print 'deb:polyline2dCurve.draw endtangent:', endtangent #----- - p0h1,p0,p0h2 = curve[-1].vec - p0h2 = [p0h2[i]+endtangent[i] for i in range(3)] - #print 'deb:drawPlineCurve: p0h2:', p0h2 #---------- - curve.__setitem__(-1,BezTriple.New(p0h1+p0+p0h2)) - #print 'deb:polyline2dCurve.draw curve[-1].vec:', curve[-1].vec #----- - curve[-1].handleTypes = [ALIGN, FREE] #remi--todo----- - curve[-1].radius = 1.0 - - - - else: #-- only straight line- and arc-segments----OK------ - #print 'deb:polyline2dCurve.draw curve:', curve #----- - points = [] - arc_res = settings.var['curve_arc'] - prevHandleType = VECT - #d_points.append(d_points[0]) #------ first vertex added at the end of list -------- - #curve.setType(0) #polygon_type of Blender_curve - for i in xrange(len(d_points)): - point1 = d_points[i] - #point2 = d_points[i+1] - #----- optimised Bezier-Handles calculation -------------------------------- - #print 'deb:drawPlineCurve: i:', i #--------- - if point1.bulge and not (i == len(d_points)-1 and point1.bulge and not self.closed): - if i == len(d_points)-1: point2 = d_points[0] - else: point2 = d_points[i+1] - - - # calculate additional points for bulge - VectorTriples = calcBulge(point1, point2, arc_res, triples=True) - - if prevHandleType == FREE: - #print 'deb:drawPlineCurve: VectorTriples[0]:', VectorTriples[0] #--------- - VectorTriples[0][:3] = prevHandleVect - #print 'deb:drawPlineCurve: VectorTriples[0]:', VectorTriples[0] #--------- - - if i == 0: curve = pline.appendNurb(BezTriple.New(VectorTriples[0])) - else: curve.append(BezTriple.New(VectorTriples[0])) - curve[-1].handleTypes = [prevHandleType, FREE] - curve[-1].radius = 1.0 - - for p in VectorTriples[1:-1]: - curve.append(BezTriple.New(p)) - curve[-1].handleTypes = [FREE, FREE] - curve[-1].radius = 1.0 - - prevHandleVect = VectorTriples[-1][:3] - prevHandleType = FREE - #print 'deb:drawPlineCurve: prevHandleVect:', prevHandleVect #--------- - else: - #print 'deb:drawPlineCurve: else' #---------- - if prevHandleType == FREE: - VectorTriples = prevHandleVect + list(point1) + list(point1) - #print 'deb:drawPlineCurve: VectorTriples:', VectorTriples #--------- - curve.append(BezTriple.New(VectorTriples)) - curve[-1].handleTypes = [FREE, VECT] - prevHandleType = VECT - curve[-1].radius = 1.0 - else: - if i == 0: curve = pline.appendNurb(BezTriple.New(point1.loc)) - else: curve.append(BezTriple.New(point1.loc)) - curve[-1].handleTypes = [VECT, VECT] - curve[-1].radius = 1.0 - #print 'deb:drawPlineCurve: curve[-1].vec[0]', curve[-1].vec[0] #---------- - - if self.closed: - curve.flagU = 1 # Set curve cyclic=close - if prevHandleType == FREE: - #print 'deb:drawPlineCurve:closed curve[0].vec:', curve[0].vec #---------- - #print 'deb:drawPlineCurve:closed curve[0].handleTypes:', curve[0].handleTypes #---------- - prevHandleType2 = curve[0].handleTypes[1] - p0h1,p0,p0h2 = curve[0].vec - #print 'deb:drawPlineCurve:closed p0h1:', p0h1 #---------- - p0h1 = prevHandleVect - #p0h1 = [0,0,0] - #print 'deb:drawPlineCurve:closed p0h1:', p0h1 #---------- - #curve[0].vec = [p0h1,p0,p0h2] - curve.__setitem__(0,BezTriple.New(p0h1+p0+p0h2)) - - curve[0].handleTypes = [FREE,prevHandleType2] - curve[0].radius = 1.0 - #print 'deb:drawPlineCurve:closed curve[0].vec:', curve[0].vec #---------- - #print 'deb:drawPlineCurve:closed curve[0].handleTypes:', curve[0].handleTypes #---------- - else: - curve[0].handleTypes[0] = VECT - curve[0].radius = 1.0 - else: - curve.flagU = 0 # Set curve not cyclic=open - - if settings.var['fill_on']: - pline.setFlag(6) # 2+4 set top and button caps - else: - pline.setFlag(pline.getFlag() & ~6) # dont set top and button caps - - pline.setResolu(settings.var['curve_res']) - pline.update() - ob = SCENE.objects.new(pline) # create a new curve_object - - if thic != 0.0: #hack: Blender<2.45 curve-extrusion - thic = thic * 0.5 - pline.setExt1(1.0) # curve-extrusion accepts only (0.0 - 2.0) - ob.LocZ = thic + LocZ - - transform(self.extrusion, 0, ob) - if thic != 0.0: - ob.SizeZ *= abs(thic) - - #print 'deb:polyline2dCurve.draw.END:----------------' #----- - return ob - - - def drawPoly2d(self, settings): #---- 2dPolyline - plane lines/arcs with wide/thic - """Generate the geometery of regular polyline. - """ - #print 'deb:polyline2d.draw.START:----------------' #------------------------ - points = [] - d_points = [] - swidths = [] - ewidths = [] - swidth_default = self.swidth #default start width of POLYLINEs segments - ewidth_default = self.ewidth #default end width of POLYLINEs segments - #print 'deb:drawPoly2d self.swidth=', self.swidth #------------------------ - thic = set_thick(self.thic, settings) - if self.spline: pline_typ = 'ps' - elif self.curved: pline_typ = 'pc' - else: pline_typ = 'pl' - obname = '%s_%s' %(pline_typ, self.layer) # create object_name from layer name - obname = obname[:MAX_NAMELENGTH] - - if len(self.points) < 2: - #print 'deb:drawPoly2d exit, cause POLYLINE has less than 2 vertices' #--------- - return - - if settings.var['Z_force_on']: - self.elevation = settings.var['Z_elev'] - for point in self.points: - point.loc[2] = self.elevation - d_points.append(point) - else: #for DXFr10-format: update all non-existing LocZ points[].loc[2] == None -> 0.0 elevation - for point in self.points: - if point.loc[2] == None: - point.loc[2] = self.elevation - d_points.append(point) - #print 'deb:drawPoly2d len of d_pointsList ====== ', len(d_points) #------------------------ - #print 'deb:drawPoly2d d_pointsList ======:\n ', d_points #------------------------ - - - #if closed polyline, add duplic of the first vertex at the end of pointslist - if self.closed: #new_b8 - if d_points[-1].loc != d_points[0].loc: # if not equal, then set the first at the end of pointslist - d_points.append(d_points[0]) - else: - if d_points[-1].loc == d_points[0].loc: # if equal, then set to closed, and modify the last point - d_points[-1] = d_points[0] - self.closed = True - #print 'deb:drawPoly2d len of d_pointsList ====== ', len(d_points) #------------------------ - #print 'deb:drawPoly2d d_pointsList ======:\n ', d_points #------------------------ - - d_points = self.doubles_out(settings, d_points) - #print 'deb:drawPolyCurve d_pointsList =after DV-outsorting=====:\n ', d_points #------------------------ - - #print 'deb:drawPoly2d len of d_pointsList ====== ', len(d_points) #------------------------ - if len(d_points) < 2: #if too few vertex, then return - #print 'deb:drawPoly2d corrupted Vertices' #--------- - return - - # analyze of straight- and bulge-segments - # generation of additional points for bulge segments - arc_res = settings.var['arc_res']/sqrt(settings.var['arc_rad']) - wide_segment_exist = False - bulg_points = [] # for each point set None (or center for arc-subPoints) - for i in xrange(len(d_points)-1): - point1 = d_points[i] - point2 = d_points[i+1] - #print 'deb:drawPoly2d_bulg tocalc.point1:', point1 #------------------------ - #print 'deb:drawPoly2d_bulg tocalc.point2:', point2 #------------------------ - - swidth = point1.swidth - ewidth = point1.ewidth - #print 'deb:drawPoly2d point1.swidth=', swidth #------------------------ - if swidth == None: swidth = swidth_default - if ewidth == None: ewidth = ewidth_default - if swidth != 0.0 or ewidth != 0.0: wide_segment_exist = True - #print 'deb:drawPoly2d vertex_swidth=', swidth #------------------------ - - if settings.var['width_force']: # force minimal width for thin segments - width_min = settings.var['width_min'] - if swidth < width_min: swidth = width_min - if ewidth < width_min: ewidth = width_min - if not settings.var['width_on']: # then force minimal width for all segments - swidth = width_min - ewidth = width_min - - #if point1.bulge and (i < (len(d_points)-1) or self.closed): - if point1.bulge and i < (len(d_points)-1): #10_b8 - verts, center = calcBulge(point1, point2, arc_res) #calculate additional points for bulge - points.extend(verts) - delta_width = (ewidth - swidth) / len(verts) - width_list = [swidth + (delta_width * ii) for ii in xrange(len(verts)+1)] - swidths.extend(width_list[:-1]) - ewidths.extend(width_list[1:]) - bulg_list = [center for ii in xrange(len(verts))] - #the last point in bulge has index False for better indexing of bulg_end! - bulg_list[-1] = None - bulg_points.extend(bulg_list) - - else: - points.append(point1.loc) - swidths.append(swidth) - ewidths.append(ewidth) - bulg_points.append(None) - points.append(d_points[-1].loc) - - - #--calculate width_vectors: left-side- and right-side-points ---------------- - # 1.level:IF width --------------------------------------- - if (settings.var['width_on'] and wide_segment_exist) or settings.var['width_force']: - #new_b8 points.append(d_points[0].loc) #temporarly add first vertex at the end (for better loop) - dist_min05 = 0.5 * settings.var['dist_min'] #minimal width for zero_witdh - - pointsLs = [] # list of left-start-points - pointsLe = [] # list of left-end-points - pointsRs = [] # list of right-start-points - pointsRe = [] # list of right-end-points - pointsW = [] # list of all border-points - #rotMatr90 = Mathutils.Matrix(rotate 90 degree around Z-axis) = normalvectorXY - rotMatr90 = Mathutils.Matrix([0, -1, 0], [1, 0, 0], [0, 0, 1]) - bulg_in = False - last_bulg_point = False - for i in xrange(len(points)-1): - point1 = points[i] - point2 = points[i+1] - point1vec = Mathutils.Vector(point1) - point2vec = Mathutils.Vector(point2) - swidth05 = swidths[i] * 0.5 - ewidth05 = ewidths[i] * 0.5 - if swidth05 == 0: swidth05 = dist_min05 - if ewidth05 == 0: ewidth05 = dist_min05 - normal_vector = rotMatr90 * (point2vec-point1vec).normalize() - if last_bulg_point: - last_bulg_point = False - bulg_in = True - elif bulg_points[i] != None: - centerVec = Mathutils.Vector(bulg_points[i]) - if bulg_points[i+1] == None: last_bulg_point = True - bulg_in = True - else: bulg_in = False - - if bulg_in: - #makes clean intersections for arc-segments - radius1vec = point1vec - centerVec - radius2vec = point2vec - centerVec - angle = Mathutils.AngleBetweenVecs(normal_vector, radius1vec) - if angle < 90.0: - normal_vector1 = radius1vec.normalize() - normal_vector2 = radius2vec.normalize() - else: - normal_vector1 = - radius1vec.normalize() - normal_vector2 = - radius2vec.normalize() - - swidth05vec = swidth05 * normal_vector1 - ewidth05vec = ewidth05 * normal_vector2 - pointsLs.append(point1vec + swidth05vec) #vertex left start - pointsRs.append(point1vec - swidth05vec) #vertex right start - pointsLe.append(point2vec + ewidth05vec) #vertex left end - pointsRe.append(point2vec - ewidth05vec) #vertex right end - - else: - swidth05vec = swidth05 * normal_vector - ewidth05vec = ewidth05 * normal_vector - pointsLs.append(point1vec + swidth05vec) #vertex left start - pointsRs.append(point1vec - swidth05vec) #vertex right start - pointsLe.append(point2vec + ewidth05vec) #vertex left end - pointsRe.append(point2vec - ewidth05vec) #vertex right end - - # additional last point is also calculated - #pointsLs.append(pointsLs[0]) - #pointsRs.append(pointsRs[0]) - #pointsLe.append(pointsLe[0]) - #pointsRe.append(pointsRe[0]) - - pointsLc, pointsRc = [], [] # lists Left/Right corners = intersection points - - # 2.level:IF width and corner-trim - if settings.var['pl_trim_on']: #optional clean corner-intersections - # loop preset - # set STARTpoints of the first point points[0] - if not self.closed: - pointsLc.append(pointsLs[0]) - pointsRc.append(pointsRs[0]) - else: - pointsLs.append(pointsLs[0]) - pointsRs.append(pointsRs[0]) - pointsLe.append(pointsLe[0]) - pointsRe.append(pointsRe[0]) - points.append(points[0]) - vecL3, vecL4 = pointsLs[0], pointsLe[0] - vecR3, vecR4 = pointsRs[0], pointsRe[0] - lenL = len(pointsLs)-1 - #print 'deb:drawPoly2d pointsLs():\n', pointsLs #---------------- - #print 'deb:drawPoly2d lenL, len.pointsLs():', lenL,',', len(pointsLs) #---------------- - bulg_in = False - last_bulg_point = False - - # LOOP: makes (ENDpoints[i],STARTpoints[i+1]) - for i in xrange(lenL): - if bulg_points[i] != None: - if bulg_points[i+1] == None: #makes clean intersections for arc-segments - last_bulg_point = True - if not bulg_in: - bulg_in = True - #pointsLc.extend((points[i], pointsLs[i])) - #pointsRc.extend((points[i], pointsRs[i])) - vecL1, vecL2 = vecL3, vecL4 - vecR1, vecR2 = vecR3, vecR4 - vecL3, vecL4 = pointsLs[i+1], pointsLe[i+1] - vecR3, vecR4 = pointsRs[i+1], pointsRe[i+1] - #compute left- and right-cornerpoints - #cornerpointL = Geometry.LineIntersect2D(vec1, vec2, vec3, vec4) - cornerpointL = Mathutils.LineIntersect(vecL1, vecL2, vecL3, vecL4) - cornerpointR = Mathutils.LineIntersect(vecR1, vecR2, vecR3, vecR4) - #print 'deb:drawPoly2d cornerpointL: ', cornerpointL #------------- - #print 'deb:drawPoly2d cornerpointR: ', cornerpointR #------------- - - # IF not cornerpoint THEN check if identic start-endpoints (=collinear segments) - if cornerpointL == None or cornerpointR == None: - if vecL2 == vecL3 and vecR2 == vecR3: - #print 'deb:drawPoly2d pointVec: ####### identic ##########' #---------------- - pointsLc.append(pointsLe[i]) - pointsRc.append(pointsRe[i]) - else: - pointsLc.extend((pointsLe[i],points[i+1],pointsLs[i+1])) - pointsRc.extend((pointsRe[i],points[i+1],pointsRs[i+1])) - else: - cornerpointL = cornerpointL[0] # because Mathutils.LineIntersect() -> (pkt1,pkt2) - cornerpointR = cornerpointR[0] - #print 'deb:drawPoly2d cornerpointL: ', cornerpointL #------------- - #print 'deb:drawPoly2d cornerpointR: ', cornerpointR #------------- - pointVec0 = Mathutils.Vector(points[i]) - pointVec = Mathutils.Vector(points[i+1]) - pointVec2 = Mathutils.Vector(points[i+2]) - #print 'deb:drawPoly2d pointVec0: ', pointVec0 #------------- - #print 'deb:drawPoly2d pointVec: ', pointVec #------------- - #print 'deb:drawPoly2d pointVec2: ', pointVec2 #------------- - # if diststance(cornerL-center-cornerR) < limiter * (seg1_endWidth + seg2_startWidth) - max_cornerDist = (vecL2 - vecR2).length + (vecL3 - vecR3).length - is_cornerDist = (cornerpointL - pointVec).length + (cornerpointR - pointVec).length - #corner_angle = Mathutils.AngleBetweenVecs((pointVec0 - pointVec),(pointVec - pointVec2)) - #print 'deb:drawPoly2d corner_angle: ', corner_angle #------------- - #print 'deb:drawPoly2d max_cornerDist, is_cornerDist: ', max_cornerDist, is_cornerDist #------------- - #if abs(corner_angle) < 90.0: - # intersection --------- limited by TRIM_LIMIT (1.0 - 5.0) - if is_cornerDist < max_cornerDist * settings.var['pl_trim_max']: - # clean corner intersection - pointsLc.append(cornerpointL) - pointsRc.append(cornerpointR) - else: - pointsLc.extend((pointsLe[i],points[i+1],pointsLs[i+1])) - pointsRc.extend((pointsRe[i],points[i+1],pointsRs[i+1])) - if not self.closed: - pointsLc.append(pointsLe[-1]) - pointsRc.append(pointsRe[-1]) - - # 2.level:IF width but no-trim - else: - # loop preset - # set STARTpoints of the first point points[0] - if not self.closed: - pointsLc.append(pointsLs[0]) - pointsRc.append(pointsRs[0]) - else: - pointsLs.append(pointsLs[0]) - pointsRs.append(pointsRs[0]) - pointsLe.append(pointsLe[0]) - pointsRe.append(pointsRe[0]) - points.append(points[0]) - vecL3, vecL4 = pointsLs[0], pointsLe[0] - vecR3, vecR4 = pointsRs[0], pointsRe[0] - lenL = len(pointsLs)-1 - #print 'deb:drawPoly2d pointsLs():\n', pointsLs #---------------- - #print 'deb:drawPoly2d lenL, len.pointsLs():', lenL,',', len(pointsLs) #---------------- - bulg_in = False - last_bulg_point = False - - # LOOP: makes (ENDpoints[i],STARTpoints[i+1]) - for i in xrange(lenL): - vecL1, vecL2 = vecL3, vecL4 - vecR1, vecR2 = vecR3, vecR4 - vecL3, vecL4 = pointsLs[i+1], pointsLe[i+1] - vecR3, vecR4 = pointsRs[i+1], pointsRe[i+1] - if bulg_points[i] != None: - #compute left- and right-cornerpoints - cornerpointL = Mathutils.LineIntersect(vecL1, vecL2, vecL3, vecL4) - cornerpointR = Mathutils.LineIntersect(vecR1, vecR2, vecR3, vecR4) - pointsLc.append(cornerpointL[0]) - pointsRc.append(cornerpointR[0]) - else: # IF non-bulg - pointsLc.extend((pointsLe[i],points[i+1],pointsLs[i+1])) - pointsRc.extend((pointsRe[i],points[i+1],pointsRs[i+1])) - if not self.closed: - pointsLc.append(pointsLe[-1]) - pointsRc.append(pointsRe[-1]) - - len1 = len(pointsLc) - #print 'deb:drawPoly2d len1:', len1 #----------------------- - #print 'deb:drawPoly2d len1 len(pointsLc),len(pointsRc):', len(pointsLc),len(pointsRc) #----------------------- - pointsW = pointsLc + pointsRc # all_points_List = left_side + right_side - #print 'deb:drawPoly2d pointsW():\n', pointsW #---------------- - - # 2.level:IF width and thickness --------------------- - if thic != 0: - thic_pointsW = [] - thic_pointsW.extend([[point[0], point[1], point[2]+thic] for point in pointsW]) - if thic < 0.0: - thic_pointsW.extend(pointsW) - pointsW = thic_pointsW - else: - pointsW.extend(thic_pointsW) - faces = [] - f_start, f_end = [], [] - f_bottom = [[num, num+1, len1+num+1, len1+num] for num in xrange(len1-1)] - f_top = [[num, len1+num, len1+num+1, num+1] for num in xrange(len1+len1, len1+len1+len1-1)] - f_left = [[num, len1+len1+num, len1+len1+num+1, num+1] for num in xrange(len1-1)] - f_right = [[num, num+1, len1+len1+num+1, len1+len1+num] for num in xrange(len1, len1+len1-1)] - - if self.closed: - f_bottom.append([len1-1, 0, len1, len1+len1-1]) #bottom face - f_top.append( [len1+len1+len1-1, len1+len1+len1+len1-1, len1+len1+len1, len1+len1+0]) #top face - f_left.append( [0, len1-1, len1+len1+len1-1, len1+len1]) #left face - f_right.append( [len1, len1+len1+len1, len1+len1+len1+len1-1, len1+len1-1]) #right face - else: - f_start = [[0, len1, len1+len1+len1, len1+len1]] - f_end = [[len1+len1-1, 0+len1-1, len1+len1+len1-1, len1+len1+len1+len1-1]] - - faces = f_left + f_right + f_bottom + f_top + f_start + f_end - #faces = f_bottom + f_top - #faces = f_left + f_right + f_start + f_end - #print 'deb:faces_list:\n', faces #----------------------- - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend(pointsW) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - # The mesh must first be linked to an object so the method knows which object to update. - # This is because vertex groups in Blender are stored in the object -- not in the mesh, - # which may be linked to more than one object. - if settings.var['vGroup_on'] and not M_OBJ: - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - replace = Mesh.AssignModes.REPLACE #or .AssignModes.ADD - vg_left, vg_right, vg_top, vg_bottom = [], [], [], [] - for v in f_left: vg_left.extend(v) - for v in f_right: vg_right.extend(v) - for v in f_top: vg_top.extend(v) - for v in f_bottom: vg_bottom.extend(v) - me.addVertGroup('side.left') ; me.assignVertsToGroup('side.left', vg_left, 1.0, replace) - me.addVertGroup('side.right') ; me.assignVertsToGroup('side.right', vg_right, 1.0, replace) - me.addVertGroup('side.top') ; me.assignVertsToGroup('side.top', vg_top, 1.0, replace) - me.addVertGroup('side.bottom'); me.assignVertsToGroup('side.bottom',vg_bottom, 1.0, replace) - if not self.closed: - me.addVertGroup('side.start'); me.assignVertsToGroup('side.start', f_start[0], 1.0, replace) - me.addVertGroup('side.end') ; me.assignVertsToGroup('side.end', f_end[0], 1.0, replace) - - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - #if self.spline or self.curved: - smooth_len = len(f_left) + len(f_right) - for i in xrange(smooth_len): - me.faces[i].smooth = True - #me.Modes(AUTOSMOOTH) - - # 2.level:IF width, but no-thickness --------------------- - else: - faces = [] - faces = [[num, len1+num, len1+num+1, num+1] for num in xrange(len1 - 1)] - if self.closed: - faces.append([len1, 0, len1-1, len1+len1-1]) - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend(pointsW) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - - # 1.level:IF no-width, but thickness --------------------- - elif thic != 0: - len1 = len(points) - thic_points = [] - thic_points.extend([[point[0], point[1], point[2]+thic] for point in points]) - if thic < 0.0: - thic_points.extend(points) - points = thic_points - else: - points.extend(thic_points) - faces = [] - faces = [[num, num+1, num+len1+1, num+len1] for num in xrange(len1 - 1)] - if self.closed: - faces.append([len1-1, 0, len1, 2*len1-1]) - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend(points) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - #if self.spline or self.curved: - for i in xrange(len(faces)): - me.faces[i].smooth = True - #me.Modes(AUTOSMOOTH) - - # 1.level:IF no-width and no-thickness --------------------- - else: - edges = [[num, num+1] for num in xrange(len(points)-1)] - if self.closed: - edges.append([len(points)-1, 0]) - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - me.verts.extend(points) # add vertices to mesh - me.edges.extend(edges) # add edges to the mesh - - transform(self.extrusion, 0, ob) - #print 'deb:polyline.draw.END:----------------' #----------------------- - return ob - - - - -class Vertex(object): #----------------------------------------------------------------- - """Generic vertex object used by POLYLINEs, (and maybe others). - also used by class_LWPOLYLINEs but without obj-parameter - """ - - def __init__(self, obj=None): - """Initializes vertex data. - - The optional obj arg is an entity object of type vertex. - """ - #print 'deb:Vertex.init.START:----------------' #----------------------- - self.loc = [0,0,0] - self.face = [] - self.swidth = None #0 - self.ewidth = None #0 - self.bulge = 0 - self.tangent = False - self.weight = 1.0 - if obj is not None: - if not obj.type == 'vertex': - raise TypeError, "Wrong type %s for vertex object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - self.get_props(obj) - else: - pass - #print 'deb:Vertex.init.END:----------------' #------------------------ - - - def get_props(self, data): - """Gets coords for a VERTEX type object. - - Each vert can have a number of properties. - Verts should be coded as - 10:xvalue - 20:yvalue - 40:startwidth or 0 - 41:endwidth or 0 - 42:bulge or 0 - """ - self.x = getit(data, 10, None) - self.y = getit(data, 20, None) - self.z = getit(data, 30, None) - - self.flags = getit(data, 70, 0) # flags - self.curved = self.flags&1 # Bezier-curve-fit:additional-vertex - self.curved_t = self.flags&2 # Bezier-curve-fit:tangent exists - self.spline = self.flags&8 # NURBSpline-fit:additional-vertex - self.spline_c = self.flags&16 # NURBSpline-fit:control-vertex - self.poly3d = self.flags&32 # polyline3d:control-vertex - self.plmesh = self.flags&64 # polymesh3d:control-vertex - self.plface = self.flags&128 # polyface - - # if PolyFace.Vertex with Face_definition - if self.curved_t: - self.curve_tangent = getit(data, 50, None) # curve_tangent - if not self.curve_tangent==None: - self.tangent = True - #elif self.spline_c: # NURBSpline:control-vertex - # self.weight = getit(data, 41, 1.0) # weight od control point - - elif self.plface and not self.plmesh: - v1 = getit(data, 71, 0) # polyface:Face.vertex 1. - v2 = getit(data, 72, 0) # polyface:Face.vertex 2. - v3 = getit(data, 73, 0) # polyface:Face.vertex 3. - v4 = getit(data, 74, None) # polyface:Face.vertex 4. - self.face = [abs(v1)-1,abs(v2)-1,abs(v3)-1] - if v4 != None: - if abs(v4) != abs(v1): - self.face.append(abs(v4)-1) - else: #--parameter for polyline2d - self.swidth = getit(data, 40, None) # start width - self.ewidth = getit(data, 41, None) # end width - self.bulge = getit(data, 42, 0) # bulge of segment - - - def __len__(self): - return 3 - - - def __getitem__(self, key): - return self.loc[key] - - - def __setitem__(self, key, value): - if key in [0,1,2]: - self.loc[key] - - - def __iter__(self): - return self.loc.__iter__() - - - def __str__(self): - return str(self.loc) - - - def __repr__(self): - return "Vertex %s, swidth=%s, ewidth=%s, bulge=%s, face=%s" %(self.loc, self.swidth, self.ewidth, self.bulge, self.face) - - - def getx(self): - return self.loc[0] - def setx(self, value): - self.loc[0] = value - x = property(getx, setx) - - - def gety(self): - return self.loc[1] - def sety(self, value): - self.loc[1] = value - y = property(gety, sety) - - - def getz(self): - return self.loc[2] - def setz(self, value): - self.loc[2] = value - z = property(getz, setz) - - - -class Spline(Polyline): #----------------------------------------------------------------- - """Class for objects representing dxf SPLINEs. - """ - """Expects an entity object of type spline as input. -100 - Subclass marker (AcDbSpline) -210,220, 230 - Normal vector (omitted if the spline is nonplanar) X,Y,Z values of normal vector -70 - Spline flag (bit coded): - 1 = Closed spline - 2 = Periodic spline - 4 = Rational spline - 8 = Planar - 16 = Linear (planar bit is also set) -71 - Degree of the spline curve -72 - Number of knots -73 - Number of control points -74 - Number of fit points (if any) -42 - Knot tolerance (default = 0.0000001) -43 - Control-point tolerance (default = 0.0000001) -44 - Fit tolerance (default = 0.0000000001) -12,22,32 - Start tangent--may be omitted (in WCS). X,Y,Z values of start tangent--may be omitted (in WCS). -13,23, 33 - End tangent--may be omitted (in WCS). X,Y,Z values of end tangent--may be omitted (in WCS) -40 - Knot value (one entry per knot) -41 - Weight (if not 1); with multiple group pairs, are present if all are not 1 -10,20, 30 - Control points (in WCS) one entry per control point. -DXF: X value; APP: 3D point, Y and Z values of control points (in WCS) (one entry per control point) -11,21, 31 - Fit points (in WCS) one entry per fit point. - X,Y,Z values of fit points (in WCS) (one entry per fit point) - """ - def __init__(self, obj): - #print 'deb:Spline.START:----------------' #------------------------ - if not obj.type == 'spline': - raise TypeError, "Wrong type %s for spline object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.num_points = obj.get_type(73)[0] - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - - self.color_index = getit(obj, 62, BYLAYER) - - #self.elevation = getit(obj, 30, 0) - self.thic = 0 # getit(obj, 39, 0) - - width = 0 - self.swidth = width # default start width - self.ewidth = width # default end width - - self.flags = getit(obj, 70, 0) - self.closed = self.flags & 1 # closed spline - self.period = self.flags & 2 # Periodic spline - self.ration = self.flags & 4 # Rational spline - self.planar = self.flags & 8 # Planar - self.linear = self.flags & 16 # Linear (and Planar) - - self.curvNoFitted = False - self.curvQuadrati = False - self.curvCubicBsp = False - self.curvBezier = False - self.degree = getit(obj, 71, 0) # Degree of the spline curve - if self.degree == 0: self.curvNoFitted = True - elif self.degree == 1: self.curvQuadrati = True - elif self.degree == 2: self.curvCubicBsp = True - #elif self.degree == 3: self.curvBezier = True - #elif self.degree == 3: self.spline = True - - self.knotpk_len = getit(obj, 72, 0) # Number of knots - self.ctrlpk_len = getit(obj, 73, 0) # Number of control points - self.fit_pk_len = getit(obj, 74, 0) # Number of fit points (if any) - - #TODO: import SPLINE as Bezier curve directly, possible? - #print 'deb:Spline self.fit_pk_len=', self.fit_pk_len #------------------------ - #self.fit_pk_len = 0 # temp for debug - if self.fit_pk_len and settings.var['splines_as']==5: - self.spline = False - self.curved = True - else: - self.spline = True - self.curved = False - - self.knotpk_tol = getit(obj, 42, 0.0000001) # Knot tolerance (default = 0.0000001) - self.ctrlpk_tol = getit(obj, 43, 0.0000001) # Control-point tolerance (default = 0.0000001) - self.fit_pk_tol = getit(obj, 44, 0.0000000001) # Fit tolerance (default = 0.0000000001) - - self.layer = getit(obj, 8, None) - self.extrusion = get_extrusion(obj) - - self.pltype = 'spline' # spline is a 2D- or 3D-polyline - - self.points = self.get_points(obj.data) - #self.knots_val = self.get_knots_val(obj.data) # 40 - Knot value (one entry per knot) - #self.knots_wgh = self.get_knots_wgh(obj.data) # 41 - Weight (default 1) - - #print 'deb:Spline obj.data:\n', obj.data #------------------------ - #print 'deb:Spline self.points:\n', self.points #------------------------ - #print 'deb:Spline.ENDinit:----------------' #------------------------ - - - def get_points(self, data): - """Gets points for a spline type object. - - Splines have fixed number of verts, and - each vert can have a number of properties. - Verts should be coded as - 10:xvalue - 20:yvalue - for each vert - """ - point = None - points = [] - pointend = None - #point = Vertex() - if self.spline: # NURBSpline definition - for item in data: - #print 'deb:Spline.get_points spilne_item:', item #------------------------ - if item[0] == 10: # control point - if point: points.append(point) - point = Vertex() - point.curved = True - point.x = item[1] - elif item[0] == 20: # 20 = y - point.y = item[1] - elif item[0] == 30: # 30 = z - point.z = item[1] - elif item[0] == 41: # 41 = weight - point.weight = item[1] - #print 'deb:Spline.get_points control point:', point #------------------------ - - elif self.curved: # Bezier definition - for item in data: - #print 'deb:Spline.get_points curved_item:', item #------------------------ - if item[0] == 11: # fit point - if point: points.append(point) - point = Vertex() - point.tangent = False - point.x = item[1] - elif item[0] == 21: # 20 = y - point.y = item[1] - elif item[0] == 31: # 30 = z - point.z = item[1] - #print 'deb:Spline.get_points fit point:', point #------------------------ - - elif item[0] == 12: # start tangent - if point: points.append(point) - point = Vertex() - point.tangent = True - point.x = item[1] - elif item[0] == 22: # = y - point.y = item[1] - elif item[0] == 32: # = z - point.z = item[1] - #print 'deb:Spline.get_points fit begtangent:', point #------------------------ - - elif item[0] == 13: # end tangent - if point: points.append(point) - pointend = Vertex() - pointend.tangent = True - pointend.x = item[1] - elif item[0] == 23: # 20 = y - pointend.y = item[1] - elif item[0] == 33: # 30 = z - pointend.z = item[1] - #print 'deb:Spline.get_points fit endtangent:', pointend #------------------------ - points.append(point) - if self.curved and pointend: - points.append(pointend) - #print 'deb:Spline points:\n', points #------------------------ - return points - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - -class LWpolyline(Polyline): #------------------------------------------------------------- - """Class for objects representing dxf LWPOLYLINEs. - """ - def __init__(self, obj): - """Expects an entity object of type lwpolyline as input. - """ - #print 'deb:LWpolyline.START:----------------' #------------------------ - if not obj.type == 'lwpolyline': - raise TypeError, "Wrong type %s for polyline object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.num_points = obj.get_type(90)[0] - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.elevation = getit(obj, 38, 0) - self.thic = getit(obj, 39, 0) - self.color_index = getit(obj, 62, BYLAYER) - width = getit(obj, 43, 0) - self.swidth = width # default start width - self.ewidth = width # default end width - #print 'deb:LWpolyline width=', width #------------------------ - #print 'deb:LWpolyline elevation=', self.elevation #------------------------ - - self.flags = getit(obj, 70, 0) - self.closed = self.flags&1 # byte coded, 1 = closed, 128 = plinegen - - self.layer = getit(obj, 8, None) - self.extrusion = get_extrusion(obj) - - self.points = self.get_points(obj.data) - - self.pltype = 'poly2d' # LW-polyline is a 2D-polyline - self.spline = False - self.curved = False - - #print 'deb:LWpolyline.obj.data:\n', obj.data #------------------------ - #print 'deb:LWpolyline.ENDinit:----------------' #------------------------ - - - def get_points(self, data): - """Gets points for a polyline type object. - - LW-Polylines have no fixed number of verts, and - each vert can have a number of properties. - Verts should be coded as - 10:xvalue - 20:yvalue - 40:startwidth or 0 - 41:endwidth or 0 - 42:bulge or 0 - for each vert - """ - num = self.num_points - point = None - points = [] - for item in data: - if item[0] == 10: # 10 = x - if point: - points.append(point) - point = Vertex() - point.x = item[1] - point.z = self.elevation - elif item[0] == 20: # 20 = y - point.y = item[1] - elif item[0] == 40: # 40 = start width - point.swidth = item[1] - elif item[0] == 41: # 41 = end width - point.ewidth = item[1] - elif item[0] == 42: # 42 = bulge - point.bulge = item[1] - points.append(point) - return points - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - -class Text: #----------------------------------------------------------------- - """Class for objects representing dxf TEXT. - """ - def __init__(self, obj): - """Expects an entity object of type text as input. - """ - if not obj.type == 'text': - raise TypeError, "Wrong type %s for text object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.height = 1.7 * obj.get_type(40)[0] #text.height - self.value = obj.get_type(1)[0] #The text string value - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.color_index = getit(obj, 62, BYLAYER) - self.thic = getit(obj, 39, 0) - - self.rotation = getit(obj, 50, 0) # radians - self.width_factor = getit(obj, 41, 1) # Scaling factor along local x axis - self.oblique = getit(obj, 51, 0) # oblique angle: skew in degrees -90 <= oblique <= 90 - - #self.style = getit(obj, 7, 'STANDARD') # --todo---- Text style name (optional, default = STANDARD) - - #Text generation flags (optional, default = 0): - #2 = backward (mirrored in X), - #4 = upside down (mirrored in Y) - self.flags = getit(obj, 71, 0) - self.mirrorX, self.mirrorY = 1.0, 1.0 - if self.flags&2: self.mirrorX = - 1.0 - if self.flags&4: self.mirrorY = - 1.0 - - # vertical.alignment: 0=baseline, 1=bottom, 2=middle, 3=top - self.valignment = getit(obj, 73, 0) - #Horizontal text justification type (optional, default = 0) integer codes (not bit-coded) - #0=left, 1=center, 2=right - #3=aligned, 4=middle, 5=fit - self.halignment = getit(obj, 72, 0) - - self.layer = getit(obj, 8, None) - self.loc1, self.loc2 = self.get_loc(obj) - if self.loc2[0] != None and self.halignment != 5: - self.loc = self.loc2 - else: - self.loc = self.loc1 - self.extrusion = get_extrusion(obj) - - - def get_loc(self, data): - """Gets adjusted location for text type objects. - - If group 72 and/or 73 values are nonzero then the first alignment point values - are ignored and AutoCAD calculates new values based on the second alignment - point and the length and height of the text string itself (after applying the - text style). If the 72 and 73 values are zero or missing, then the second - alignment point is meaningless. - I don't know how to calc text size... - """ - # bottom left x, y, z and justification x, y, z = 0 - #x, y, z, jx, jy, jz = 0, 0, 0, 0, 0, 0 - x = getit(data, 10, None) #First alignment point (in OCS). - y = getit(data, 20, None) - z = getit(data, 30, 0.0) - jx = getit(data, 11, None) #Second alignment point (in OCS). - jy = getit(data, 21, None) - jz = getit(data, 31, 0.0) - return [x, y, z],[jx, jy, jz] - - - def __repr__(self): - return "%s: layer - %s, value - %s" %(self.__class__.__name__, self.layer, self.value) - - - def draw(self, settings): - """for TEXTs: generate Blender_geometry. - """ - obname = 'tx_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - txt = Text3d.New(obname) - ob = SCENE.objects.new(txt) # create a new text_object - - txt.setText(self.value) - txt.setSize(1.0) #Blender<2.45 accepts only (0.0 - 5.0) - #txt.setSize(self.height) - #txt.setWidth(self.bold) - #setLineSeparation(sep) - txt.setShear(self.oblique/90) - - thic = set_thick(self.thic, settings) - if thic != 0.0: - thic = self.thic * 0.5 - self.loc[2] += thic - txt.setExtrudeDepth(1.0) #Blender<2.45 accepts only (0.1 - 10.0) - if self.halignment == 0: - align = Text3d.LEFT - elif self.halignment == 1: - align = Text3d.MIDDLE - elif self.halignment == 2: - align = Text3d.RIGHT - else: - align = Text3d.LEFT - txt.setAlignment(align) - - if self.valignment == 1: - txt.setYoffset(0.0) - elif self.valignment == 2: - txt.setYoffset(- self.height * 0.5) - elif self.valignment == 3: - txt.setYoffset(- self.height) - - # move the object center to the text location - ob.loc = tuple(self.loc) - transform(self.extrusion, self.rotation, ob) - - # flip it and scale it to the text width - ob.SizeX *= self.height * self.width_factor * self.mirrorX - ob.SizeY *= self.height * self.mirrorY - if thic != 0.0: ob.SizeZ *= abs(thic) - return ob - - - -def set_thick(thickness, settings): - """Set thickness relative to settings variables. - - Set thickness relative to settings variables: - 'thick_on','thick_force','thick_min'. - Accepted also minus values of thickness - python trick: sign(x)=cmp(x,0) - """ - if settings.var['thick_force']: - if settings.var['thick_on']: - if abs(thickness) < settings.var['thick_min']: - thic = settings.var['thick_min'] * cmp(thickness,0) - else: thic = thickness - else: thic = settings.var['thick_min'] - else: - if settings.var['thick_on']: thic = thickness - else: thic = 0.0 - return thic - - - - -class Mtext: #----------------------------------------------------------------- - """Class for objects representing dxf MTEXT. - """ - - def __init__(self, obj): - """Expects an entity object of type mtext as input. - """ - if not obj.type == 'mtext': - raise TypeError, "Wrong type %s for mtext object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.height = obj.get_type(40)[0] - self.width = obj.get_type(41)[0] - self.alignment = obj.get_type(71)[0] # alignment 1=TL, 2=TC, 3=TR, 4=ML, 5=MC, 6=MR, 7=BL, 8=BC, 9=BR - self.value = self.get_text(obj) # The text string value - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.color_index = getit(obj, 62, BYLAYER) - self.rotation = getit(obj, 50, 0) # radians - - self.width_factor = getit(obj, 42, 1) # Scaling factor along local x axis - self.line_space = getit(obj, 44, 1) # percentage of default - - self.layer = getit(obj, 8, None) - self.loc = self.get_loc(obj) - self.extrusion = get_extrusion(obj) - - - def get_text(self, data): - """Reconstructs mtext data from dxf codes. - """ - primary = '' - secondary = [] - for item in data: - if item[0] == 1: # There should be only one primary... - primary = item[1] - elif item[0] == 3: # There may be any number of extra strings (in order) - secondary.append(item[1]) - if not primary: - #raise ValueError, "Empty Mtext Object!" - string = "Empty Mtext Object!" - if not secondary: - string = primary.replace(r'\P', '\n') - else: - string = ''.join(secondary)+primary - string = string.replace(r'\P', '\n') - return string - - - def get_loc(self, data): - """Gets location for a mtext type objects. - - Mtext objects have only one point indicating - """ - loc = [0, 0, 0] - loc[0] = getit(data, 10, None) - loc[1] = getit(data, 20, None) - loc[2] = getit(data, 30, 0.0) - return loc - - - def __repr__(self): - return "%s: layer - %s, value - %s" %(self.__class__.__name__, self.layer, self.value) - - - def draw(self, settings): - """for MTEXTs: generate Blender_geometry. - """ - # Now Create an object - obname = 'tm_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - txt = Text3d.New(obname) - ob = SCENE.objects.new(txt) # create a new text_object - - txt.setSize(1) - # Blender doesn't give access to its text object width currently - # only to the text3d's curve width... - #txt.setWidth(text.width/10) - txt.setLineSeparation(self.line_space) - txt.setExtrudeDepth(0.5) - txt.setText(self.value) - - # scale it to the text size - ob.SizeX = self.height * self.width_factor - ob.SizeY = self.height - ob.SizeZ = self.height - - # move the object center to the text location - ob.loc = tuple(self.loc) - transform(self.extrusion, self.rotation, ob) - - return ob - - -class Circle: #----------------------------------------------------------------- - """Class for objects representing dxf CIRCLEs. - """ - - def __init__(self, obj): - """Expects an entity object of type circle as input. - """ - if not obj.type == 'circle': - raise TypeError, "Wrong type %s for circle object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.radius = obj.get_type(40)[0] - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.thic = getit(obj, 39, 0) - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.loc = self.get_loc(obj) - self.extrusion = get_extrusion(obj) - - - - def get_loc(self, data): - """Gets the center location for circle type objects. - - Circles have a single coord location. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 10, None) - loc[1] = getit(data, 20, None) - loc[2] = getit(data, 30, 0.0) - return loc - - - - def __repr__(self): - return "%s: layer - %s, radius - %s" %(self.__class__.__name__, self.layer, self.radius) - - - def draw(self, settings): - """for CIRCLE: generate Blender_geometry. - """ - obname = 'ci_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - radius = self.radius - - thic = set_thick(self.thic, settings) - width = 0.0 - if settings.var['lines_as'] == 4: # as thin_box - thic = settings.var['thick_min'] - width = settings.var['width_min'] - if settings.var['lines_as'] == 3: # as thin cylinder - cyl_rad = 0.5 * settings.var['width_min'] - - if settings.var['lines_as'] == 5: # draw CIRCLE as curve ------------- - arc_res = settings.var['curve_arc'] - #arc_res = 3 - start, end = 0.0, 360.0 - VectorTriples = calcArc(None, radius, start, end, arc_res, True) - c = Curve.New(obname) # create new curve data - curve = c.appendNurb(BezTriple.New(VectorTriples[0])) - for p in VectorTriples[1:-1]: - curve.append(BezTriple.New(p)) - for point in curve: - point.handleTypes = [FREE, FREE] - point.radius = 1.0 - curve.flagU = 1 # 1 sets the curve cyclic=closed - if settings.var['fill_on']: - c.setFlag(6) # 2+4 set top and button caps - else: - c.setFlag(c.getFlag() & ~6) # dont set top and button caps - - c.setResolu(settings.var['curve_res']) - c.update() - - #--todo-----to check--------------------------- - ob = SCENE.objects.new(c) # create a new curve_object - ob.loc = tuple(self.loc) - if thic != 0.0: #hack: Blender<2.45 curve-extrusion - thic = thic * 0.5 - c.setExt1(1.0) # curve-extrusion accepts only (0.0 - 2.0) - ob.LocZ = thic + self.loc[2] - transform(self.extrusion, 0, ob) - if thic != 0.0: - ob.SizeZ *= abs(thic) - return ob - - else: # draw CIRCLE as mesh ----------------------------------------------- - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - # set a number of segments in entire circle - arc_res = settings.var['arc_res'] * sqrt(radius) / sqrt(settings.var['arc_rad']) - start, end = 0.0 , 360.0 - verts = calcArc(None, radius, start, end, arc_res, False) - verts = verts[:-1] #list without last point/edge (cause by circle it is equal to the first point) - #print 'deb:circleDraw: verts:', verts #--------------- - - if thic != 0: - len1 = len(verts) - thic_verts = [] - thic_verts.extend([[point[0], point[1], point[2]+thic] for point in verts]) - if thic < 0.0: - thic_verts.extend(verts) - verts = thic_verts - else: - verts.extend(thic_verts) - faces = [] - f_band = [[num, num+1, num+len1+1, num+len1] for num in xrange(len1 - 1)] - #f_band = [[num, num+1, num+len1+1, num+len1] for num in xrange(len1)] - f_band.append([len1 - 1, 0, len1, len1 + len1 -1]) - faces = f_band - smooth_len = len(f_band) - if settings.var['fill_on']: - if thic < 0.0: - verts.append([0,0,thic]) #center of top side - verts.append([0,0,0]) #center of bottom side - else: - verts.append([0,0,0]) #center of bottom side - verts.append([0,0,thic]) #center of top side - center1 = len(verts)-2 - center2 = len(verts)-1 - f_bottom = [[num+1, num, center1] for num in xrange(len1 - 1)] - f_bottom.append([0, len1 - 1, center1]) - f_top = [[num+len1, num+1+len1, center2] for num in xrange(len1 - 1)] - f_top.append([len1-1+len1, 0+len1, center2]) - #print 'deb:circleDraw:verts:', verts #--------------- - faces = f_band + f_bottom + f_top - #print 'deb:circleDraw:faces:', faces #--------------- - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - for i in xrange(smooth_len): - me.faces[i].smooth = True - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - if settings.var['vGroup_on'] and not M_OBJ: - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - replace = Mesh.AssignModes.REPLACE #or .AssignModes.ADD - vg_band, vg_top, vg_bottom = [], [], [] - for v in f_band: vg_band.extend(v) - me.addVertGroup('side.band') ; me.assignVertsToGroup('side.band', vg_band, 1.0, replace) - - if settings.var['fill_on']: - for v in f_top: vg_top.extend(v) - for v in f_bottom: vg_bottom.extend(v) - me.addVertGroup('side.top') ; me.assignVertsToGroup('side.top', vg_top, 1.0, replace) - me.addVertGroup('side.bottom'); me.assignVertsToGroup('side.bottom',vg_bottom, 1.0, replace) - - else: # if thic == 0 - if settings.var['fill_on']: - len1 = len(verts) - verts.append([0,0,0]) #center of circle - center1 = len1 - faces = [] - faces.extend([[num, num+1, center1] for num in xrange(len1)]) - faces.append([len1-1, 0, center1]) - #print 'deb:circleDraw:verts:', verts #--------------- - #print 'deb:circleDraw:faces:', faces #--------------- - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - else: - me.verts.extend(verts) # add vertices to mesh - edges = [[num, num+1] for num in xrange(len(verts))] - edges[-1][1] = 0 # it points the "new" last edge to the first vertex - me.edges.extend(edges) # add edges to the mesh - - ob.loc = tuple(self.loc) - transform(self.extrusion, 0, ob) - return ob - - -class Arc: #----------------------------------------------------------------- - """Class for objects representing dxf ARCs. - """ - - def __init__(self, obj): - """Expects an entity object of type arc as input. - """ - if not obj.type == 'arc': - raise TypeError, "Wrong type %s for arc object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.radius = obj.get_type(40)[0] - self.start_angle = obj.get_type(50)[0] - self.end_angle = obj.get_type(51)[0] - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.thic = getit(obj, 39, 0) - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.loc = self.get_loc(obj) - self.extrusion = get_extrusion(obj) - #print 'deb:Arc__init__: center, radius, start, end:\n', self.loc, self.radius, self.start_angle, self.end_angle #--------- - - - - def get_loc(self, data): - """Gets the center location for arc type objects. - - Arcs have a single coord location. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 10, None) - loc[1] = getit(data, 20, None) - loc[2] = getit(data, 30, 0.0) - return loc - - - - def __repr__(self): - return "%s: layer - %s, radius - %s" %(self.__class__.__name__, self.layer, self.radius) - - - def draw(self, settings): - """for ARC: generate Blender_geometry. - """ - obname = 'ar_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - - center = self.loc - radius = self.radius - start = self.start_angle - end = self.end_angle - #print 'deb:calcArcPoints:\n center, radius, start, end:\n', center, radius, start, end #--------- - thic = set_thick(self.thic, settings) - width = 0.0 - if settings.var['lines_as'] == 4: # as thin_box - thic = settings.var['thick_min'] - width = settings.var['width_min'] - if settings.var['lines_as'] == 3: # as thin cylinder - cyl_rad = 0.5 * settings.var['width_min'] - - if settings.var['lines_as'] == 5: # draw ARC as curve ------------- - arc_res = settings.var['curve_arc'] - triples = True - VectorTriples = calcArc(None, radius, start, end, arc_res, triples) - arc = Curve.New(obname) # create new curve data - curve = arc.appendNurb(BezTriple.New(VectorTriples[0])) - for p in VectorTriples[1:]: - curve.append(BezTriple.New(p)) - for point in curve: - point.handleTypes = [FREE, FREE] - point.radius = 1.0 - curve.flagU = 0 # 0 sets the curve not cyclic=open - arc.setResolu(settings.var['curve_res']) - - arc.update() #important for handles calculation - - ob = SCENE.objects.new(arc) # create a new curve_object - ob.loc = tuple(self.loc) - if thic != 0.0: #hack: Blender<2.45 curve-extrusion - thic = thic * 0.5 - arc.setExt1(1.0) # curve-extrusion: Blender2.45 accepts only (0.0 - 5.0) - ob.LocZ = thic + self.loc[2] - transform(self.extrusion, 0, ob) - if thic != 0.0: - ob.SizeZ *= abs(thic) - return ob - - else: # draw ARC as mesh -------------------- - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - # set a number of segments in entire circle - arc_res = settings.var['arc_res'] * sqrt(radius) / sqrt(settings.var['arc_rad']) - - verts = calcArc(None, radius, start, end, arc_res, False) - #verts = [list(point) for point in verts] - len1 = len(verts) - #print 'deb:len1:', len1 #----------------------- - if width != 0: - radius_out = radius + (0.5 * width) - radius_in = radius - (0.5 * width) - if radius_in <= 0.0: - radius_in = settings.var['dist_min'] - #radius_in = 0.0 - verts_in = [] - verts_out = [] - for point in verts: - pointVec = Mathutils.Vector(point) - pointVec = pointVec.normalize() - verts_in.append(list(radius_in * pointVec)) #vertex inside - verts_out.append(list(radius_out * pointVec)) #vertex outside - verts = verts_in + verts_out - - #print 'deb:verts:', verts #--------------------- - if thic != 0: - thic_verts = [] - thic_verts.extend([[point[0], point[1], point[2]+thic] for point in verts]) - if thic < 0.0: - thic_verts.extend(verts) - verts = thic_verts - else: - verts.extend(thic_verts) - f_bottom = [[num, num+1, len1+num+1, len1+num] for num in xrange(len1-1)] - f_top = [[num, len1+num, len1+num+1, num+1] for num in xrange(len1+len1, len1+len1+len1-1)] - f_left = [[num, len1+len1+num, len1+len1+num+1, num+1] for num in xrange(len1-1)] - f_right = [[num, num+1, len1+len1+num+1, len1+len1+num] for num in xrange(len1, len1+len1-1)] - f_start = [[0, len1, len1+len1+len1, len1+len1]] - f_end = [[len1+len1-1, 0+len1-1, len1+len1+len1-1, len1+len1+len1+len1-1]] - faces = f_left + f_right + f_bottom + f_top + f_start + f_end - - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - smooth_len = len(f_left) + len(f_right) - for i in xrange(smooth_len): - me.faces[i].smooth = True - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - if settings.var['vGroup_on'] and not M_OBJ: - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - replace = Mesh.AssignModes.REPLACE #or .AssignModes.ADD - vg_left, vg_right, vg_top, vg_bottom = [], [], [], [] - for v in f_left: vg_left.extend(v) - for v in f_right: vg_right.extend(v) - for v in f_top: vg_top.extend(v) - for v in f_bottom: vg_bottom.extend(v) - me.addVertGroup('side.left') ; me.assignVertsToGroup('side.left', vg_left, 1.0, replace) - me.addVertGroup('side.right') ; me.assignVertsToGroup('side.right', vg_right, 1.0, replace) - me.addVertGroup('side.top') ; me.assignVertsToGroup('side.top', vg_top, 1.0, replace) - me.addVertGroup('side.bottom'); me.assignVertsToGroup('side.bottom',vg_bottom, 1.0, replace) - me.addVertGroup('side.start'); me.assignVertsToGroup('side.start', f_start[0], 1.0, replace) - me.addVertGroup('side.end') ; me.assignVertsToGroup('side.end', f_end[0], 1.0, replace) - - - else: # if thick=0 - draw only flat ring - faces = [[num, len1+num, len1+num+1, num+1] for num in xrange(len1 - 1)] - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - elif thic != 0: - thic_verts = [] - thic_verts.extend([[point[0], point[1], point[2]+thic] for point in verts]) - if thic < 0.0: - thic_verts.extend(verts) - verts = thic_verts - else: - verts.extend(thic_verts) - faces = [] - #print 'deb:len1:', len1 #----------------------- - #print 'deb:verts:', verts #--------------------- - faces = [[num, num+1, num+len1+1, num+len1] for num in xrange(len1 - 1)] - - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - for i in xrange(len(faces)): - me.faces[i].smooth = True - - else: - edges = [[num, num+1] for num in xrange(len(verts)-1)] - me.verts.extend(verts) # add vertices to mesh - me.edges.extend(edges) # add edges to the mesh - - #me.update() - #ob = SCENE.objects.new(me) # create a new arc_object - #ob.link(me) - ob.loc = tuple(center) - #ob.loc = Mathutils.Vector(ob.loc) - transform(self.extrusion, 0, ob) - #ob.size = (1,1,1) - return ob - - -class BlockRecord: #----------------------------------------------------------------- - """Class for objects representing dxf block_records. - """ - - def __init__(self, obj): - """Expects an entity object of type block_record as input. - """ - if not obj.type == 'block_record': - raise TypeError, "Wrong type %s for block_record object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.name = getit(obj, 2, None) - - # optional data (with defaults) - self.insertion_units = getit(obj, 70, None) - self.insert_units = getit(obj, 1070, None) - """code 1070 Einfuegeeinheiten: - 0 = Keine Einheiten; 1 = Zoll; 2 = Fuss; 3 = Meilen; 4 = Millimeter; - 5 = Zentimeter; 6 = Meter; 7 = Kilometer; 8 = Mikrozoll; - 9 = Mils; 10 = Yard; 11 = Angstrom; 12 = Nanometer; - 13 = Mikrons; 14 = Dezimeter; 15 = Dekameter; - 16 = Hektometer; 17 = Gigameter; 18 = Astronomische Einheiten; - 19 = Lichtjahre; 20 = Parsecs - """ - - - def __repr__(self): - return "%s: name - %s, insert units - %s" %(self.__class__.__name__, self.name, self.insertion_units) - - - - -class Block: #----------------------------------------------------------------- - """Class for objects representing dxf BLOCKs. - """ - - def __init__(self, obj): - """Expects an entity object of type block as input. - """ - if not obj.type == 'block': - raise TypeError, "Wrong type %s for block object!" %obj.type - - self.type = obj.type - self.name = obj.name - self.data = obj.data[:] - - # required data - self.flags = getit(obj, 70, 0) - self.anonim = self.flags & 1 #anonymous block generated by hatching, associative dimensioning, other - self.atrib = self.flags & 2 # has attribute definitions - self.xref = self.flags & 4 # is an external reference (xref) - self.xref_lay = self.flags & 8 # is an xref overlay - self.dep_ext = self.flags & 16 # is externally dependent - self.dep_res = self.flags & 32 # resolved external reference - self.xref_ext = self.flags & 64 # is a referenced external reference xref - #--todo--- if self.flag > 4: self.xref = True - - # optional data (with defaults) - self.path = getit(obj, 1, '') # Xref path name - self.discription = getit(obj, 4, '') - - self.entities = dxfObject('block_contents') #creates empty entities_container for this block - self.entities.data = objectify([ent for ent in obj.data if type(ent) != list]) - - self.layer = getit(obj, 8, None) - self.loc = self.get_loc(obj) - - #print 'deb:Block %s data:\n%s' %(self.name, self.data) #------------ - #print 'deb:Block %s self.entities.data:\n%s' %(self.name, self.entities.data) #------------ - - - - def get_loc(self, data): - """Gets the insert point of the block. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 10, 0.0) # 10 = x - loc[1] = getit(data, 20, 0.0) # 20 = y - loc[2] = getit(data, 30, 0.0) # 30 = z - return loc - - - def __repr__(self): - return "%s: name - %s, description - %s, xref-path - %s" %(self.__class__.__name__, self.name, self.discription, self.path) - - - - -class Insert: #----------------------------------------------------------------- - """Class for objects representing dxf INSERTs. - """ - - def __init__(self, obj): - """Expects an entity object of type insert as input. - """ - if not obj.type == 'insert': - raise TypeError, "Wrong type %s for insert object!" %obj.type - self.type = obj.type - self.data = obj.data[:] - #print 'deb:Insert_init_ self.data:\n', self.data #----------- - - # required data - self.name = obj.get_type(2)[0] - - # optional data (with defaults) - self.rotation = getit(obj, 50, 0) - self.space = getit(obj, 67, 0) - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.loc = self.get_loc(obj) - self.scale = self.get_scale(obj) - self.rows, self.columns = self.get_array(obj) - self.extrusion = get_extrusion(obj) - - #self.flags = getit(obj.data, 66, 0) # - #self.attrib = self.flags & 1 - - - def get_loc(self, data): - """Gets the origin location of the insert. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 10, 0.0) - loc[1] = getit(data, 20, 0.0) - loc[2] = getit(data, 30, 0.0) - return loc - - - def get_scale(self, data): - """Gets the x/y/z scale factors of the insert. - """ - scale = [1, 1, 1] - scale[0] = getit(data, 41, 1.0) - scale[1] = getit(data, 42, 1.0) - scale[2] = getit(data, 43, 1.0) - return scale - - - def get_array(self, data): - """Returns the pair (row number, row spacing), (column number, column spacing). - """ - columns = getit(data, 70, 1) - rows = getit(data, 71, 1) - cspace = getit(data, 44, 0.0) - rspace = getit(data, 45, 0.0) - return (rows, rspace), (columns, cspace) - - - def get_target(self, data): - """Gets the origin location of the insert. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 1011, 0.0) - loc[1] = getit(data, 1021, 0.0) - loc[2] = getit(data, 1031, 0.0) - return loc - - - def get_color(self, data): - """Gets the origin location of the insert. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 1010, 0.0) - loc[1] = getit(data, 1020, 0.0) - loc[2] = getit(data, 1030, 0.0) - return loc - - - def get_ave_render(self, data): - """Gets the origin location of the insert. - """ - loc = [0, 0, 0] - loc[0] = getit(data, 1010, 0.0) - loc[1] = getit(data, 1020, 0.0) - loc[2] = getit(data, 1030, 0.0) - return loc - - - def __repr__(self): - return "%s: layer - %s, name - %s" %(self.__class__.__name__, self.layer, self.name) - - - def draw(self, settings, deltaloc): - """for INSERT(block): draw empty-marker for duplicated Blender_Group. - - Blocks are made of three objects: - the block_record in the tables section - the block in the blocks section - the insert object (one or more) in the entities section - block_record gives the insert units, - block provides the objects drawn in the block, - insert object gives the location/scale/rotation of the block instances. - """ - - name = self.name.lower() - if name == 'ave_render': - if settings.var['lights_on']: #if lights support activated - a_data = get_ave_data(self.data) - # AVE_RENDER objects: - # 7:'Pref', 0:'Full Opt', 0:'Quick Opt', 1:'Scanl Opt', 2:'Raytr Opt', 0:'RFile Opt' - # 0:'Fog Opt', 0:'BG Opt', 0:'SCENE1','','','','','','','','','', - # '','','','','','','','','','','','', - - if a_data.key == 'SCENE': # define set of lights as blender group - scene_lights = 1 - return - elif name == 'ave_global': - if settings.var['lights_on']: #if lights support activated - return - elif name == 'sh_spot': - if settings.var['lights_on']: #if lights support activated - obname = settings.blocknamesmap[self.name] - obname = 'sp_%s' %obname # create object name from block name - #obname = obname[:MAX_NAMELENGTH] - # blender: 'Lamp', 'Sun', 'Spot', 'Hemi', 'Area', or 'Photon' - li = Lamp.New('Spot', obname) - ob = SCENE.objects.new(li) - intensity = 2.0 #--todo-- ----------- - li.setEnergy(intensity) - target = self.get_target(self.data) - color = self.get_color(self.data) - li.R = color[0] - li.G = color[1] - li.B = color[2] - - ob.loc = tuple(self.loc) - transform(self.extrusion, 0, ob) - return ob - - elif name == 'overhead': - if settings.var['lights_on']: #if lights support activated - obname = settings.blocknamesmap[self.name] - obname = 'la_%s' %obname # create object name from block name - #obname = obname[:MAX_NAMELENGTH] - # blender: 'Lamp', 'Sun', 'Spot', 'Hemi', 'Area', or 'Photon' - li = Lamp.New('Lamp', obname) - ob = SCENE.objects.new(li) - intensity = 2.0 #--todo-- ----------- - li.setEnergy(intensity) - target = self.get_target(self.data) - color = self.get_color(self.data) - li.R = color[0] - li.G = color[1] - li.B = color[2] - - ob.loc = tuple(self.loc) - transform(self.extrusion, 0, ob) - return ob - - elif name == 'direct': - if settings.var['lights_on']: #if lights support activated - obname = settings.blocknamesmap[self.name] - obname = 'su_%s' %obname # create object name from block name - #obname = obname[:MAX_NAMELENGTH] - # blender: 'Lamp', 'Sun', 'Spot', 'Hemi', 'Area', or 'Photon' - li = Lamp.New('Sun', obname) - ob = SCENE.objects.new(li) - intensity = 2.0 #--todo-- ----------- - li.setEnergy(intensity) - color = self.get_color(self.data) - li.R = color[0] - li.G = color[1] - li.B = color[2] - - ob.loc = tuple(self.loc) - transform(self.extrusion, 0, ob) - return ob - - elif settings.drawTypes['insert']: #if insert_drawType activated - #print 'deb:draw. settings.blocknamesmap:', settings.blocknamesmap #-------------------- - obname = settings.blocknamesmap[self.name] - obname = 'in_%s' %obname # create object name from block name - #obname = obname[:MAX_NAMELENGTH] - - # if material BYBLOCK def needed: use as placeholder a mesh-vertex instead of empty - ob = SCENE.objects.new('Empty', obname) # create a new empty_object - empty_size = 1.0 * settings.var['g_scale'] - if empty_size < 0.01: empty_size = 0.01 #Blender limits (0.01-10.0) - elif empty_size > 10.0: empty_size = 10.0 - ob.drawSize = empty_size - - # get our block_def-group - block = settings.blocks(self.name) - ob.DupGroup = block - ob.enableDupGroup = True - - if block.name.startswith('xr_'): - ob.name = 'xb_' + ob.name[3:] - - #print 'deb:draw.block.deltaloc:', deltaloc #-------------------- - ob.loc = tuple(self.loc) - if deltaloc: - deltaloc = rotXY_Vec(self.rotation, deltaloc) - #print 'deb:draw.block.loc:', deltaloc #-------------------- - ob.loc = [ob.loc[0]+deltaloc[0], ob.loc[1]+deltaloc[1], ob.loc[2]+deltaloc[2]] - transform(self.extrusion, self.rotation, ob) - ob.size = tuple(self.scale) - return ob - - - - -class Ellipse: #----------------------------------------------------------------- - """Class for objects representing dxf ELLIPSEs. - """ - - def __init__(self, obj): - """Expects an entity object of type ellipse as input. - """ - if not obj.type == 'ellipse': - raise TypeError, "Wrong type %s for ellipse object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # required data - self.ratio = obj.get_type(40)[0] # Ratio of minor axis to major axis - self.start_angle = obj.get_type(41)[0] # in radians - self.end_angle = obj.get_type(42)[0] - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.thic = getit(obj, 39, 0.0) - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.loc = self.get_loc(obj) - self.major = self.get_major(obj) - self.extrusion = get_extrusion(obj) - - - def get_loc(self, data): - """Gets the center location for arc type objects. - - Arcs have a single coord location. - """ - loc = [0.0, 0.0, 0.0] - loc[0] = getit(data, 10, 0.0) - loc[1] = getit(data, 20, 0.0) - loc[2] = getit(data, 30, 0.0) - return loc - - - def get_major(self, data): - """Gets the major axis for ellipse type objects. - - The ellipse major axis defines the rotation of the ellipse and its radius. - """ - loc = [0.0, 0.0, 0.0] - loc[0] = getit(data, 11, 0.0) - loc[1] = getit(data, 21, 0.0) - loc[2] = getit(data, 31, 0.0) - return loc - - - def __repr__(self): - return "%s: layer - %s, radius - %s" %(self.__class__.__name__, self.layer, self.radius) - - - def draw(self, settings): - """for ELLIPSE: generate Blender_geometry. - """ - obname = 'el_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - - center = self.loc - start = degrees(self.start_angle) - end = degrees(self.end_angle) - if abs(end - 360.0) < 0.00001: end = 360.0 - ellipse_closed = False - if end - start == 360.0: ellipse_closed = True - - # rotation = Angle between major and WORLDX - # doesnt work, couse produces always positive value: rotation = Mathutils.AngleBetweenVecs(major, WORLDX) - if self.major[0] == 0: - rotation = 90.0 - if self.major[1] < 0: rotation += 180 - else: - rotation = degrees(atan(self.major[1] / self.major[0])) - if self.major[0] < 0: - rotation += 180.0 - - major = Mathutils.Vector(self.major) - #radius = sqrt(self.major[0]**2 + self.major[1]**2 + self.major[2]**2) - radius = major.length - #print 'deb:calcEllipse:\n center, radius, start, end:\n', center, radius, start, end #--------- - - thic = set_thick(self.thic, settings) - width = 0.0 - if settings.var['lines_as'] == 4: # as thin_box - thic = settings.var['thick_min'] - width = settings.var['width_min'] - elif settings.var['lines_as'] == 3: # as thin cylinder - cyl_rad = 0.5 * settings.var['width_min'] - - elif settings.var['lines_as'] == 5: # draw ELLIPSE as curve ------------- - arc_res = settings.var['curve_arc'] - triples = True - VectorTriples = calcArc(None, radius, start, end, arc_res, triples) - arc = Curve.New(obname) # create new curve data - curve = arc.appendNurb(BezTriple.New(VectorTriples[0])) - if ellipse_closed: - for p in VectorTriples[1:-1]: - curve.append(BezTriple.New(p)) - for point in curve: - point.handleTypes = [FREE, FREE] - point.radius = 1.0 - curve.flagU = 1 # 0 sets the curve not cyclic=open - if settings.var['fill_on']: - arc.setFlag(6) # 2+4 set top and button caps - else: - arc.setFlag(arc.getFlag() & ~6) # dont set top and button caps - else: - for p in VectorTriples[1:]: - curve.append(BezTriple.New(p)) - for point in curve: - point.handleTypes = [FREE, FREE] - point.radius = 1.0 - curve.flagU = 0 # 0 sets the curve not cyclic=open - - arc.setResolu(settings.var['curve_res']) - arc.update() #important for handles calculation - - ob = SCENE.objects.new(arc) # create a new curve_object - ob.loc = tuple(self.loc) - if thic != 0.0: #hack: Blender<2.45 curve-extrusion - thic = thic * 0.5 - arc.setExt1(1.0) # curve-extrusion: Blender2.45 accepts only (0.0 - 5.0) - ob.LocZ = thic + self.loc[2] - transform(self.extrusion, rotation, ob) - ob.SizeY *= self.ratio - if thic != 0.0: - ob.SizeZ *= abs(thic) - return ob - - - else: # draw ELLIPSE as mesh -------------------------------------- - if M_OBJ: obname, me, ob = makeNewObject() - else: - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - # set a number of segments in entire circle - arc_res = settings.var['arc_res'] * sqrt(radius) / sqrt(settings.var['arc_rad']) - - verts = calcArc(None, radius, start, end, arc_res, False) - #verts = [list(point) for point in verts] - len1 = len(verts) - #print 'deb:len1:', len1 #----------------------- - if width != 0: - radius_out = radius + (0.5 * width) - radius_in = radius - (0.5 * width) - if radius_in <= 0.0: - radius_in = settings.var['dist_min'] - #radius_in = 0.0 - verts_in = [] - verts_out = [] - for point in verts: - pointVec = Mathutils.Vector(point) - pointVec = pointVec.normalize() - verts_in.append(list(radius_in * pointVec)) #vertex inside - verts_out.append(list(radius_out * pointVec)) #vertex outside - verts = verts_in + verts_out - - #print 'deb:verts:', verts #--------------------- - if thic != 0: - thic_verts = [] - thic_verts.extend([[point[0], point[1], point[2]+thic] for point in verts]) - if thic < 0.0: - thic_verts.extend(verts) - verts = thic_verts - else: - verts.extend(thic_verts) - f_bottom = [[num, num+1, len1+num+1, len1+num] for num in xrange(len1-1)] - f_top = [[num, len1+num, len1+num+1, num+1] for num in xrange(len1+len1, len1+len1+len1-1)] - f_left = [[num, len1+len1+num, len1+len1+num+1, num+1] for num in xrange(len1-1)] - f_right = [[num, num+1, len1+len1+num+1, len1+len1+num] for num in xrange(len1, len1+len1-1)] - f_start = [[0, len1, len1+len1+len1, len1+len1]] - f_end = [[len1+len1-1, 0+len1-1, len1+len1+len1-1, len1+len1+len1+len1-1]] - faces = f_left + f_right + f_bottom + f_top + f_start + f_end - - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - smooth_len = len(f_left) + len(f_right) - for i in xrange(smooth_len): - me.faces[i].smooth = True - if settings.var['vGroup_on'] and not M_OBJ: - # each MeshSide becomes vertexGroup for easier material assignment --------------------- - replace = Mesh.AssignModes.REPLACE #or .AssignModes.ADD - vg_left, vg_right, vg_top, vg_bottom = [], [], [], [] - for v in f_left: vg_left.extend(v) - for v in f_right: vg_right.extend(v) - for v in f_top: vg_top.extend(v) - for v in f_bottom: vg_bottom.extend(v) - me.addVertGroup('side.left') ; me.assignVertsToGroup('side.left', vg_left, 1.0, replace) - me.addVertGroup('side.right') ; me.assignVertsToGroup('side.right', vg_right, 1.0, replace) - me.addVertGroup('side.top') ; me.assignVertsToGroup('side.top', vg_top, 1.0, replace) - me.addVertGroup('side.bottom'); me.assignVertsToGroup('side.bottom',vg_bottom, 1.0, replace) - me.addVertGroup('side.start'); me.assignVertsToGroup('side.start', f_start[0], 1.0, replace) - me.addVertGroup('side.end') ; me.assignVertsToGroup('side.end', f_end[0], 1.0, replace) - - - else: # if thick=0 - draw only flat ring - faces = [[num, len1+num, len1+num+1, num+1] for num in xrange(len1 - 1)] - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - - elif thic != 0: - thic_verts = [] - thic_verts.extend([[point[0], point[1], point[2]+thic] for point in verts]) - if thic < 0.0: - thic_verts.extend(verts) - verts = thic_verts - else: - verts.extend(thic_verts) - faces = [] - #print 'deb:len1:', len1 #----------------------- - #print 'deb:verts:', verts #--------------------- - faces = [[num, num+1, num+len1+1, num+len1] for num in xrange(len1 - 1)] - - me.verts.extend(verts) # add vertices to mesh - me.faces.extend(faces) # add faces to the mesh - if settings.var['meshSmooth_on']: # left and right side become smooth ---------------------- - for i in xrange(len(faces)): - me.faces[i].smooth = True - - else: - edges = [[num, num+1] for num in xrange(len(verts)-1)] - me.verts.extend(verts) # add vertices to mesh - me.edges.extend(edges) # add edges to the mesh - - #print 'deb:calcEllipse transform rotation: ', rotation #--------- - ob.loc = tuple(center) - #old ob.SizeY = self.ratio - transform(self.extrusion, rotation, ob) - #old transform(self.extrusion, 0, ob) - ob.SizeY *= self.ratio - - return ob - - - -class Face: #----------------------------------------------------------------- - """Class for objects representing dxf 3DFACEs. - """ - - def __init__(self, obj): - """Expects an entity object of type 3dfaceplot as input. - """ - if not obj.type == '3dface': - raise TypeError, "Wrong type %s for 3dface object!" %obj.type - self.type = obj.type -# self.data = obj.data[:] - - # optional data (with defaults) - self.space = getit(obj, 67, 0) - self.color_index = getit(obj, 62, BYLAYER) - - self.layer = getit(obj, 8, None) - self.points = self.get_points(obj) - - - def get_points(self, data): - """Gets 3-4 points for a 3d face type object. - - Faces have three or optionally four verts. - """ - a = [0, 0, 0] - b = [0, 0, 0] - c = [0, 0, 0] - d = [0, 0, 0] - a[0] = getit(data, 10, None) # 10 = x - a[1] = getit(data, 20, None) # 20 = y - a[2] = getit(data, 30, 0.0) # 30 = z - b[0] = getit(data, 11, None) - b[1] = getit(data, 21, None) - b[2] = getit(data, 31, 0.0) - c[0] = getit(data, 12, None) - c[1] = getit(data, 22, None) - c[2] = getit(data, 32, 0.0) - out = [a,b,c] - - d[0] = getit(data, 13, None) - if d[0] != None: - d[1] = getit(data, 23, None) - d[2] = getit(data, 33, 0.0) - out.append(d) - - #if len(out) < 4: print '3dface with only 3 vertices:\n',a,b,c,d #----------------- - return out - - - def __repr__(self): - return "%s: layer - %s, points - %s" %(self.__class__.__name__, self.layer, self.points) - - - def draw(self, settings): - """for 3DFACE: generate Blender_geometry. - """ - # Generate the geometery - points = self.points - - global activObjectLayer - global activObjectName - #print 'deb:draw:face.ob IN activObjectName: ', activObjectName #--------------------- - - if M_OBJ: obname, me, ob = makeNewObject() - else: - if activObjectLayer == self.layer and settings.var['one_mesh_on']: - obname = activObjectName - #print 'deb:face.draw obname from activObjectName: ', obname #--------------------- - ob = getSceneChild(obname) # open an existing mesh_object - #ob = SCENE.getChildren(obname) # open an existing mesh_object - me = ob.getData(name_only=False, mesh=True) - else: - obname = 'fa_%s' %self.layer # create object name from layer name - obname = obname[:MAX_NAMELENGTH] - me = Mesh.New(obname) # create a new mesh - ob = SCENE.objects.new(me) # create a new mesh_object - activObjectName = ob.name - activObjectLayer = self.layer - #print ('deb:except. new face.ob+mesh:"%s" created!' %ob.name) #--------------------- - - #me = Mesh.Get(ob.name) # open objects mesh data - faces, edges = [], [] - n = len(me.verts) - if len(self.points) == 4: - faces = [[0+n,1+n,2+n,3+n]] - elif len(self.points) == 3: - faces = [[0+n,1+n,2+n]] - elif len(self.points) == 2: - edges = [[0+n,1+n]] - - me.verts.extend(points) # add vertices to mesh - if faces: me.faces.extend(faces) # add faces to the mesh - if edges: me.edges.extend(edges) # add faces to the mesh - if settings.var['vGroup_on'] and not M_OBJ: - # entities with the same color build one vertexGroup for easier material assignment --------------------- - ob.link(me) # link mesh to that object - vG_name = 'color_%s' %self.color_index - if edges: faces = edges - replace = Mesh.AssignModes.ADD #or .AssignModes.REPLACE or ADD - try: - me.assignVertsToGroup(vG_name, faces[0], 1.0, replace) - #print 'deb: existed vGroup:', vG_name #--------------------- - except: - me.addVertGroup(vG_name) - me.assignVertsToGroup(vG_name, faces[0], 1.0, replace) - #print 'deb: create new vGroup:', vG_name #-------------------- - - #print 'deb:draw:face.ob OUT activObjectName: ', activObjectName #--------------------- - return ob - - -#--------------------------------------------------------------------------------------- -# type to object maping (sorted-dictionary for f_obiectify ONLY!, format={'key':Class} ) -type_map = { - 'vport':Vport, - 'view':View, - 'layer':Layer, - 'block_record':BlockRecord, - 'block':Block, - 'insert':Insert, - 'point':Point, - '3dface':Face, - 'line':Line, -# 'mline':MLine, - 'polyline':Polyline, - 'lwpolyline':LWpolyline, - 'spline':Spline, -# 'region':Region, - 'trace':Solid, - 'solid':Solid, - 'text':Text, - 'mtext':Mtext, - 'circle':Circle, - 'ellipse':Ellipse, - 'arc':Arc -} - - - -def objectify(data): #----------------------------------------------------------------- - """Expects a section type object's data as input. - - Maps object data to the correct object type. - """ - #print 'deb:objectify start %%%%%%%%%%%' #--------------- - objects = [] # colector for finished objects - known_types = type_map.keys() # so we don't have to call foo.keys() every iteration - curves_on = GUI_A['curves_on'].val - index = 0 - while index < len(data): - item = data[index] - #print 'deb:objectify item: \n', item #------------ - if type(item) != list and item.type == 'table': - item.data = objectify(item.data) # tables have sub-objects - objects.append(item) - elif type(item) != list and item.type == 'polyline': #remi --todo----------- - #print 'deb:gosub Polyline\n' #------------- - pline = Polyline(item) - while 1: - index += 1 - item = data[index] - if item.type == 'vertex': - #print 'deb:objectify gosub Vertex--------' #------------- - v = Vertex(item) - if pline.spline: # if NURBSpline-curve - # then for Blender-mesh filter only additional_vertices - # OR - # then for Blender-curve filter only spline_control_vertices - if (v.spline and not curves_on) or (curves_on and v.spline_c): #correct for real NURBS-import - #if (v.spline and not curves_on) or (curves_on and not v.spline_c): #fake for Bezier-emulation of NURBS-import - pline.points.append(v) - elif pline.curved: # if Bezier-curve - # then for Blender-mesh filter only curve_additional_vertices - # OR - # then for Blender-curve filter curve_control_vertices - if not curves_on or (curves_on and not v.curved): - pline.points.append(v) - else: - pline.points.append(v) - elif item.type == 'seqend': - #print 'deb:objectify it is seqEND ---------\n' #------------- - break - else: - print "Error: non-vertex found before seqend!" - index -= 1 #so go back one step - break - objects.append(pline) - elif type(item) != list and item.type in ['block', 'insert']: - if not settings.var['block_nn'] and item.name.startswith('*X'): - #print 'deb:objectify item.type:"%s", item.name:"%s"' %(item.type, item.name) #------------ - pass - elif settings.var['blockFilter_on'] and not settings.accepted_block(item.name): - pass - else: - try: - objects.append(type_map[item.type](item)) - except TypeError: - pass - elif type(item) != list and item.type in known_types: - # proccess the object and append the resulting object - try: - objects.append(type_map[item.type](item)) - except TypeError: - pass - else: - #we will just let the data pass un-harrased - #objects.append(item) - pass - index += 1 - #print 'deb:objectify objects:\n', objects #------------ - #print 'deb:objectify END %%%%%%%%' #------------ - return objects - - - -class MatColors: #----------------------------------------------------------------- - """A smart container for dxf-color based materials. - - This class is a wrapper around a dictionary mapping dxf-color indicies to materials. - When called with a color_index - it returns a material corresponding to that index. - Behind the scenes it checks if that index is in its keys, and if not it creates - a new material. It then adds the new index:material pair to its dict and returns - the material. - """ - - def __init__(self): - """Expects a map - a dictionary mapping layer names to layers. - """ - #self.layersmap = layersmap # a dictionary of layername:layerobject - self.colMaterials = {} # a dictionary of color_index:blender_material - #print 'deb:init_MatColors argument.map: ', map #------------------ - - - def __call__(self, color=None): - """Return the material associated with color. - - If a layer name is provided, the color of that layer is used. - """ - if color == None: color = 256 # color 256=BYLAYER - if type(color) == str: # looking for color of LAYER named "color" - #--todo---bug with ARC from ARC-T0.DXF layer="T-3DARC-1"----- - #print 'deb:color is string:--------: ', color - #try: - #color = layersmap[color].color - #print 'deb:color=self.map[color].color:', color #------------------ - #except KeyError: - #layer = Layer(name=color, color=256, frozen=False) - #layersmap[color] = layer - #color = 0 - if color in layersmap.keys(): - color = layersmap[color].color - if color == 256: # color 256 = BYLAYER - #--todo-- should looking for color of LAYER - #if layersmap: color = layersmap[color].color - color = 3 - if color == 0: # color 0 = BYBLOCK - #--todo-- should looking for color of paret-BLOCK - #if layersmap: color = layersmap[color].color - color = 3 - color = abs(color) # cause the value could be nagative = means the layer is turned off - - if color not in self.colMaterials.keys(): - self.add(color) - return self.colMaterials[color] - - - def add(self, color): - """Create a new material 'ColorNr-N' using the provided color index-N. - """ - #global color_map #--todo-- has not to be global? - mat = Material.New('ColorNr-%s' %color) - mat.setRGBCol(color_map[color]) - #mat.mode |= Material.Modes.SHADELESS #--todo-- - #mat.mode |= Material.Modes.WIRE -# try: mat.setMode('Shadeless', 'Wire') #work-around for 2.45rc-bug -# except: pass - self.colMaterials[color] = mat - - - -class MatLayers: #----------------------------------------------------------------- - """A smart container for dxf-layer based materials. - - This class is a wrapper around a dictionary mapping dxf-layer names to materials. - When called with a layer name it returns a material corrisponding to that. - Behind the scenes it checks if that layername is in its keys, and if not it creates - a new material. It then adds the new layername:material pair to its dict and returns - the material. - """ - - def __init__(self): - """Expects a map - a dictionary mapping layer names to layers. - """ - #self.layersmap = layersmap # a dictionary of layername:layer - self.layMaterials = {} # a dictionary of layer_name:blender_material - #print 'deb:init_MatLayers argument.map: ', map #------------------ - - - def __call__(self, layername=None, color=None): - """Return the material associated with dxf-layer. - - If a dxf-layername is not provided, create a new material - """ - #global layernamesmap - layername_short = layername - if layername in layernamesmap.keys(): - layername_short = layernamesmap[layername] - colorlayername = layername_short - if color: colorlayername = str(color) + colorlayername - if colorlayername not in self.layMaterials.keys(): - self.add(layername, color, colorlayername) - return self.layMaterials[colorlayername] - - - def add(self, layername, color, colorlayername): - """Create a new material 'layername'. - """ - try: mat = Material.Get('L-%s' %colorlayername) - except: mat = Material.New('L-%s' %colorlayername) - #print 'deb:MatLayers material: ', mat #---------- - #global settings - #print 'deb:MatLayers material_from: ', settings.var['material_from'] #---------- - if settings.var['material_from'] == 3 and color: - if color == 0 or color == 256: mat_color = 3 - else: mat_color = color - elif layersmap and layername: - mat_color = layersmap[layername].color - else: mat_color = 3 - #print 'deb:MatLayers color: ', color #----------- - #print 'deb:MatLayers mat_color: ', mat_color #----------- - mat.setRGBCol(color_map[abs(mat_color)]) - #mat.mode |= Material.Modes.SHADELESS - #mat.mode |= Material.Modes.WIRE -# try: mat.setMode('Shadeless', 'Wire') #work-around for 2.45rc-bug -# except: pass - self.layMaterials[colorlayername] = mat - - - - -class Blocks: #----------------------------------------------------------------- - """A smart container for blocks. - - This class is a wrapper around a dictionary mapping block names to Blender data blocks. - When called with a name string it returns a block corresponding to that name. - Behind the scenes it checks if that name is in its keys, and if not it creates - a new data block. It then adds the new name:block_data pair to its dict and returns - the block. - """ - - def __init__(self, blocksmap, settings): - """Expects a dictionary mapping block_name:block_data. - """ - self.blocksmap = blocksmap #a dictionary mapping block_name:block_data - self.settings = settings - self.blocks = {} #container for blender groups representing blocks - - - def __call__(self, name=None): - """Return the data block associated with that block_name. - - If that name is not in its keys, it creates a new data block. - If no name is provided return entire self.blocks container. - """ - if name == None: - return self.blocks - if name not in self.blocks.keys(): - self.addBlock(name) - return self.blocks[name] - - - def addBlock(self, name): - """Create a new 'block group' for the block name. - """ - block = self.blocksmap[name] - prefix = 'bl' - if block.xref: prefix = 'xr' - blender_group = Group.New('%s_%s' %(prefix,name)) # Blender groupObject contains definition of BLOCK - block_def = [blender_group, block.loc] - self.settings.write("\nDrawing block:\'%s\' ..." % name) - - if block.xref: - obname = 'xr_%s' %name # create object name from xref block name - #obname = obname[:MAX_NAMELENGTH] - # if material BYBLOCK def needed: use as placeholder a mesh-vertex instead of empty - ob = SCENE.objects.new('Empty', obname) # create a new empty_object - empty_size = 1.0 * settings.var['g_scale'] - if empty_size < 0.01: empty_size = 0.01 #Blender limits (0.01-10.0) - elif empty_size > 10.0: empty_size = 10.0 - ob.drawSize = empty_size - ob.loc = tuple(block.loc) - ob.properties['xref_path'] = block.path - ob.layers = [19] - insertFlag=True; blockFlag=True - global oblist - oblist.append((ob, insertFlag, blockFlag)) - else: - if M_OBJ: - car_end() - car_start() - drawEntities(block.entities, self.settings, block_def) - if M_OBJ: car_end() - self.settings.write("Drawing block:\'%s\' done!" %name) - self.blocks[name] = blender_group - - - - - -class Settings: #----------------------------------------------------------------- - """A container for all the import settings and objects used by the draw functions. - - This is like a collection of globally accessable persistant properties and functions. - """ - # Optimization constants - MIN = 0 - MID = 1 - PRO = 2 - MAX = 3 - - def __init__(self, keywords, drawTypes): - """initialize all the important settings used by the draw functions. - """ - self.obj_number = 1 #global object_number for progress_bar - - self.var = dict(keywords) #a dictionary of (key_variable:Value) control parameter - self.drawTypes = dict(drawTypes) #a dictionary of (entity_type:True/False) = import on/off for this entity_type - - self.var['colorFilter_on'] = False #deb:remi------------ - self.acceptedColors = [0,2,3,4,5,6,7,8,9, - 10 ] - - self.var['layerFilter_on'] = False #deb:remi------------ - self.acceptedLayers = ['3', - '0' - ] - - self.var['groupFilter_on'] = False #deb:remi------------ - self.acceptedLayers = ['3', - '0' - ] - - #self.var['blockFilter_on'] = 0 #deb:remi------------ - self.acceptedBlocks = ['WALL_1871', - 'BOX02' - ] - self.unwantedBlocks = ['BOX05', - 'BOX04' - ] - - - def update(self, keywords, drawTypes): - """update all the important settings used by the draw functions. - mostly used after loading parameters from INI-file - """ - - for k, v in keywords.iteritems(): - self.var[k] = v - #print 'deb:settings_update var %s= %s' %(k, self.var[k]) #-------------- - for t, v in drawTypes.iteritems(): - self.drawTypes[t] = v - #print 'deb:settings_update drawType %s= %s' %(t, self.drawTypes[t]) #-------------- - - self.drawTypes['arc'] = self.drawTypes['line'] - self.drawTypes['circle'] = self.drawTypes['line'] - self.drawTypes['ellipse'] = self.drawTypes['line'] - self.drawTypes['trace'] = self.drawTypes['solid'] - self.drawTypes['insert'] = self.drawTypes['block'] - #self.drawTypes['vport'] = self.drawTypes['view'] - - #print 'deb:self.drawTypes', self.drawTypes #--------------- - - - def validate(self, drawing): - """Given the drawing, build dictionaries of Layers, Colors and Blocks. - """ - - global oblist - #adjust the distance parameter to globalScale - if self.var['g_scale'] != 1.0: - self.var['dist_min'] = self.var['dist_min'] / self.var['g_scale'] - self.var['thick_min'] = self.var['thick_min'] / self.var['g_scale'] - self.var['width_min'] = self.var['width_min'] / self.var['g_scale'] - self.var['arc_rad'] = self.var['arc_rad'] / self.var['g_scale'] - - self.g_origin = Mathutils.Vector(self.var['g_originX'], self.var['g_originY'], self.var['g_originZ']) - - # First sort out all the section_items - sections = dict([(item.name, item) for item in drawing.data]) - - # The section:header may be omited - if 'header' in sections.keys(): - self.write("found section:header") - else: - self.write("File contains no section:header!") - - if self.var['optimization'] == 0: self.var['one_mesh_on'] = 0 - # The section:tables may be partialy or completely missing. - self.layersTable = False - self.colMaterials = MatColors() #A container for dxf-color based materials - self.layMaterials = MatLayers() #A container for dxf-layer based materials - #self.collayMaterials = MatColLayers({}) #A container for dxf-color+layer based materials - global layersmap, layernamesmap - layersmap, layernamesmap = {}, {} - if 'tables' in sections.keys(): - self.write("found section:tables") - views, vports, layers = False, False, False - for table in drawing.tables.data: - if table.name == 'layer': - self.write("found table:layers") - layers = table - elif table.name == 'view': - print "found table:view" - views = table - elif table.name == 'vport': - print "found table:vport" - vports = table - if layers: #---------------------------------- - # Read the layers table and get the layer colors - layersmap, layernamesmap = getLayersmap(layers) - #self.colMaterials = MatColors() - #self.layMaterials = MatLayers() - else: - self.write("File contains no table:layers!") - - - if views: #---------------------------------- - if self.var['views_on']: - for item in views.data: - if type(item) != list and item.type == 'view': - #print 'deb:settings_valid views dir(item)=', dir(item) #------------- - #print 'deb:settings_valid views item=', item #------------- - ob = item.draw(self) - #viewsmap[item.name] = [item.length] - #--todo-- add to obj_list for global.Scaling - insertFlag, blockFlag = False, False - oblist.append((ob, insertFlag, blockFlag)) - - else: - self.write("File contains no table:views!") - - - if vports: #---------------------------------- - if self.var['views_on']: - for item in vports.data: - if type(item) != list and item.type == 'vport': - #print 'deb:settings_valid views dir(item)=', dir(item) #------------- - #print 'deb:settings_valid views item=', item #------------- - ob = item.draw(self) - #viewsmap[item.name] = [item.length] - #--todo-- add to obj_list for global.Scaling - insertFlag, blockFlag = False, False - oblist.append((ob, insertFlag, blockFlag)) - else: - self.write("File contains no table:vports!") - - - else: - self.write("File contains no section:tables!") - self.write("File contains no table:layers!") - - - # The section:blocks may be omited - if 'blocks' in sections.keys(): - self.write("found section:blocks") - # Read the block definitions and build our block object - if self.drawTypes['insert']: #if support for entity type 'Insert' is activated - #Build a dictionary of blockname:block_data pairs - blocksmap, obj_number = getBlocksmap(drawing, layersmap, self.var['layFrozen_on']) - self.obj_number += obj_number - self.blocknamesmap = getBlocknamesmap(blocksmap) - self.blocks = Blocks(blocksmap, self) # initiates container for blocks_data - self.usedBlocks = blocksmap.keys() - #print 'deb:settings_valid self.usedBlocks', self.usedBlocks #---------- - else: - self.write("ignored, because support for BLOCKs is turn off!") - #print 'deb:settings_valid self.obj_number', self.obj_number #---------- - else: - self.write("File contains no section:blocks!") - self.drawTypes['insert'] = False - - # The section:entities - if 'entities' in sections.keys(): - self.write("found section:entities") - self.obj_number += len(drawing.entities.data) - self.obj_number = 1.0 / self.obj_number - - - def accepted_block(self, name): - if name not in self.usedBlocks: return False - if name in self.unwantedBlocks: return False - elif name in self.acceptedBlocks: return True - #elif (name.find('*X')+1): return False - #elif name.startswith('3'): return True - #elif name.endswith('H'): return False - return True - - - def write(self, text, newline=True): - """Wraps the built-in print command in a optimization check. - """ - if self.var['optimization'] <= self.MID: - if newline: print text - else: print text, - - - def redraw(self): - """Update Blender if optimization level is low enough. - """ - if self.var['optimization'] <= self.MIN: - Blender.Redraw() - - - def progress(self, done, text): - """Wrapper for Blender.Window.DrawProgressBar. - """ - if self.var['optimization'] <= self.PRO: - progressbar = done * self.obj_number - Window.DrawProgressBar(progressbar, text) - #print 'deb:drawer done, progressbar: ', done, progressbar #----------------------- - - def layer_isOff(self, layername): # no more used ------- - """Given a layer name, and return its visible status. - """ - # if layer is off then color_index is negative - if layersmap and layersmap[layername].color < 0: return True - #print 'deb:layer_isOff: layer is ON' #--------------- - return False - - - def layer_isFrozen(self, layername): # no more used ------- - """Given a layer name, and return its frozen status. - """ - if layersmap and layersmap[layername].frozen: return True - #print 'deb:layer_isFrozen: layer is not FROZEN' #--------------- - return False - - - -def analyzeDXF(dxfFile): #--------------------------------------- - """list statistics about LAYER and BLOCK dependences into textfile.INF - - """ - Window.WaitCursor(True) # Let the user know we are thinking - print 'reading DXF file: %s.' % dxfFile - time1 = sys.time() #time marker1 - drawing = readDXF(dxfFile, objectify) - print 'finish reading in %.4f sec.' % (sys.time()-time1) - - # First sort out all the section_items - sections = dict([(item.name, item) for item in drawing.data]) - - # The section:header may be omited - if 'header' in sections.keys(): print "found section:header" - else: print "File contains no section:header!" - - # The section:tables may be partialy or completely missing. - layersTable = False - global layersmap - layersmap = {} - viewsmap = {} - vportsmap = {} - layersmap_str = '#File contains no table:layers!' - viewsmap_str = '#File contains no table:views!' - vportsmap_str = '#File contains no table:vports!' - if 'tables' in sections.keys(): - print "found section:tables" - views, vports, layers = False, False, False - for table in drawing.tables.data: - if table.name == 'layer': - print "found table:layers" - layers = table - elif table.name == 'view': - print "found table:view" - views = table - elif table.name == 'vport': - print "found table:vport" - vports = table - if layers: #---------------------------------- - for item in layers.data: - if type(item) != list and item.type == 'layer': - #print dir(item) - layersmap[item.name] = [item.color, item.frozen] - #print 'deb:analyzeDXF: layersmap=' , layersmap #------------- - layersmap_str = '#list of LAYERs: name, color, frozen_status ---------------------------\n' - key_list = layersmap.keys() - key_list.sort() - for key in key_list: - #for layer_name, layer_data in layersmap.iteritems(): - layer_name, layer_data = key, layersmap[key] - layer_str = '\'%s\': col=%s' %(layer_name,layer_data[0])#------------- - if layer_data[1]: layer_str += ', frozen' - layersmap_str += layer_str + '\n' - #print 'deb:analyzeDXF: layersmap_str=\n' , layersmap_str #------------- - else: - print "File contains no table:layers!" - - if views: #---------------------------------- - for item in views.data: - if type(item) != list and item.type == 'view': - #print dir(item) - viewsmap[item.name] = [item.length] - #print 'deb:analyzeDXF: viewsmap=' , viewsmap #------------- - viewsmap_str = '#list of VIEWs: name, focus_length ------------------------------------\n' - key_list = viewsmap.keys() - key_list.sort() - for key in key_list: - #for view_name, view_data in viewsmap.iteritems(): - view_name, view_data = key, viewsmap[key] - view_str = '\'%s\': length=%s' %(view_name,view_data[0])#------------- - #if view_data[1]: view_str += ', something' - viewsmap_str += view_str + '\n' - #print 'deb:analyzeDXF: layersmap_str=\n' , layersmap_str #------------- - else: - print "File contains no table:views!" - - if vports: #---------------------------------- - for item in vports.data: - if type(item) != list and item.type == 'vport': - #print dir(item) - vportsmap[item.name] = [item.length] - #print 'deb:analyzeDXF: vportsmap=' , vportsmap #------------- - vportsmap_str = '#list of VPORTs: name, focus_length -----------------------------------\n' - key_list = vportsmap.keys() - key_list.sort() - for key in key_list: - #for vport_name, vport_data in vportsmap.iteritems(): - vport_name, vport_data = key, vportsmap[key] - vport_str = '\'%s\': length=%s' %(vport_name,vport_data[0])#------------- - #if vport_data[1]: vport_str += ', something' - vportsmap_str += vport_str + '\n' - #print 'deb:analyzeDXF: vportsmap_str=\n' , vportsmap_str #------------- - else: - print "File contains no table:vports!" - - else: - print "File contains no section:tables!" - print "File contains no tables:layers,views,vports!" - - # The section:blocks may be omited - if 'blocks' in sections.keys(): - print "found section:blocks" - blocksmap = {} - for item in drawing.blocks.data: - #print 'deb:getBlocksmap item=' ,item #-------- - #print 'deb:getBlocksmap item.entities=' ,item.entities #-------- - #print 'deb:getBlocksmap item.entities.data=' ,item.entities.data #-------- - if type(item) != list and item.type == 'block': - xref = False - if item.xref: xref = True - childList = [] - used = False - for item2 in item.entities.data: - if type(item2) != list and item2.type == 'insert': - #print 'deb:getBlocksmap dir(item2)=', dir(item2) #---------- - item2str = [item2.name, item2.layer, item2.color_index, item2.scale, item2.space] - childList.append(item2str) - try: blocksmap[item.name] = [used, childList, xref] - except KeyError: print 'Cannot map "%s" - "%s" as Block!' %(item.name, item) - #print 'deb:analyzeDXF: blocksmap=' , blocksmap #------------- - - for item2 in drawing.entities.data: - if type(item2) != list and item2.type == 'insert': - if item2.name in blocksmap.keys(): - if not layersmap or (layersmap and not layersmap[item2.layer][1]): #if insert_layer is not frozen - blocksmap[item2.name][0] = True # marked as world used BLOCK - - key_list = blocksmap.keys() - key_list.reverse() - for key in key_list: - if blocksmap[key][0]: #if used - for child in blocksmap[key][1]: - if not layersmap or (layersmap and not layersmap[child[1]][1]): #if insert_layer is not frozen - blocksmap[child[0]][0] = True # marked as used BLOCK - - blocksmap_str = '#list of BLOCKs: name:(unused)(xref) -[child_name, layer, color, scale, space]-------\n' - key_list = blocksmap.keys() - key_list.sort() - for key in key_list: - #for block_name, block_data in blocksmap.iteritems(): - block_name, block_data = key, blocksmap[key] - block_str = '\'%s\': ' %(block_name) #------------- - used = '(unused)' - if block_data[0]: used = '' -# else: used = '(unused)' - xref = '' - if block_data[2]: xref = '(xref)' - blocksmap_str += block_str + used + xref +'\n' - if block_data: - for block_item in block_data[1]: - block_data_str = ' - %s\n' %block_item - blocksmap_str += block_data_str - #print 'deb:analyzeDXF: blocksmap_str=\n' , blocksmap_str #------------- - else: - blocksmap_str = '#File contains no section:blocks!' - print "File contains no section:blocks!" - - Window.WaitCursor(False) - output_str = '%s\n%s\n%s\n%s' %(viewsmap_str, vportsmap_str, layersmap_str, blocksmap_str) - infFile = dxfFile[:-4] + '_DXF.INF' # replace last char:'.dxf' with '_DXF.inf' - try: - f = file(infFile, 'w') - f.write(INFFILE_HEADER + '\n# this is a comment line\n\n') - f.write(output_str) - f.close() - Draw.PupMenu('DXF importer: report saved in INF-file:%t|' + '\'%s\'' %infFile) - except: - Draw.PupMenu('DXF importer: ERROR by writing report in INF-file:%t|' + '\'%s\'' %infFile) - #finally: f.close() - - - - -def main(dxfFile): #---------------#############################----------- - #print 'deb:filename:', filename #-------------- - global SCENE - global oblist - editmode = Window.EditMode() # are we in edit mode? If so ... - if editmode: - Window.EditMode(0) # leave edit mode before - - #SCENE = bpy.data.scenes.active - #SCENE.objects.selected = [] # deselect all - - global cur_COUNTER #counter for progress_bar - cur_COUNTER = 0 - - #try: - if 1: - #print "Getting settings..." - global GUI_A, GUI_B, g_scale_as - if not GUI_A['g_scale_on'].val: - GUI_A['g_scale'].val = 1.0 - - keywords = {} - drawTypes = {} - for k, v in GUI_A.iteritems(): - keywords[k] = v.val - for k, v in GUI_B.iteritems(): - drawTypes[k] = v.val - #print 'deb:startUInew keywords: ', keywords #-------------- - #print 'deb:startUInew drawTypes: ', drawTypes #-------------- - - # The settings object controls how dxf entities are drawn - settings.update(keywords, drawTypes) - #print 'deb:settings.var:\n', settings.var #----------------------- - - if not settings: - #Draw.PupMenu('DXF importer: EXIT!%t') - #print '\nDXF Import: terminated by user!' - print '\nDXF Import: terminated, cause settings failure!' - Window.WaitCursor(False) - if editmode: Window.EditMode(1) # and put things back how we fond them - return None - - #no more used dxfFile = dxfFileName.val - #print 'deb: dxfFile file: ', dxfFile #---------------------- - if dxfFile.lower().endswith('.dxf') and sys.exists(dxfFile): - Window.WaitCursor(True) # Let the user know we are thinking - print 'reading file: %s.' % dxfFile - time1 = sys.time() #time marker1 - drawing = readDXF(dxfFile, objectify) - print 'reading finished in %.4f sec.' % (sys.time()-time1) - Window.WaitCursor(False) - elif dxfFile.lower().endswith('.dwg') and sys.exists(dxfFile): - if not extCONV_OK: - Draw.PupMenu(extCONV_TEXT) - Window.WaitCursor(False) - if editmode: Window.EditMode(1) # and put things back how we fond them - return None - else: - Window.WaitCursor(True) # Let the user know we are thinking - #todo: issue: in DConvertCon.exe the output filename is fixed to dwg_name.dxf - - if 0: # works only for Windows - dwgTemp = 'temp_01.dwg' - dxfTemp = 'temp_01.dxf' - os.system('copy %s %s' %(dxfFile,dwgTemp)) - else: - dwgTemp = dxfFile - dxfTemp = dxfFile[:-3]+'dxf' - #print 'temp. converting: %s\n to: %s' %(dxfFile, dxfTemp) - #os.system('%s %s -acad11 -dxf' %(extCONV_PATH, dxfFile)) - os.system('%s %s -dxf' %(extCONV_PATH, dwgTemp)) - #os.system('%s %s -dxf' %(extCONV_PATH, dxfFile_temp)) - if sys.exists(dxfTemp): - print 'reading file: %s.' % dxfTemp - time1 = sys.time() #time marker1 - drawing = readDXF(dxfTemp, objectify) - #os.remove(dwgTemp) - os.remove(dxfTemp) # clean up - print 'reading finished in %.4f sec.' % (sys.time()-time1) - Window.WaitCursor(False) - else: - if UI_MODE: Draw.PupMenu('DWG importer: nothing imported!%t|No valid DXF-representation found!') - print 'DWG importer: nothing imported. No valid DXF-representation found.' - Window.WaitCursor(False) - if editmode: Window.EditMode(1) # and put things back how we fond them - return None - else: - if UI_MODE: Draw.PupMenu('DXF importer: Alert!%t| no valid DXF-file selected!') - print "DXF importer: Alert! - no valid DXF-file selected." - Window.WaitCursor(False) - if editmode: Window.EditMode(1) # and put things back how we fond them - return None - - # Draw all the know entity types in the current scene - oblist = [] # a list of all created AND linked objects for final f_globalScale - time2 = sys.time() #time marker2 - - Window.WaitCursor(True) # Let the user know we are thinking - settings.write("\n\nDrawing entities...") - - settings.validate(drawing) - - global activObjectLayer, activObjectName - activObjectLayer, activObjectName = None, None - - if M_OBJ: car_init() - - drawEntities(drawing.entities, settings) - - #print 'deb:drawEntities after: oblist:', oblist #----------------------- - if M_OBJ: car_end() - if oblist: # and settings.var['g_scale'] != 1: - globalScale(oblist, settings.var['g_scale']) - - # Set visibility for all layers on all View3d - #Window.ViewLayers([i+1 for i in range(18)]) # for 2.45 - SCENE.setLayers([i+1 for i in range(18)]) - SCENE.update(1) - SCENE.objects.selected = [i[0] for i in oblist] #select only the imported objects - #SCENE.objects.selected = SCENE.objects #select all objects in current scene - Blender.Redraw() - - time_text = sys.time() - time2 - Window.WaitCursor(False) - if settings.var['paper_space_on']: space = 'from paper space' - else: space = 'from model space' - ob_len = len(oblist) - message = ' %s objects imported %s in %.4f sec. -----DONE-----' % (ob_len, space, time_text) - settings.progress(1.0/settings.obj_number, 'DXF import done!') - print message - #settings.write(message) - if UI_MODE: Draw.PupMenu('DXF importer: Done!|finished in %.4f sec.' % time_text) - - #finally: - # restore state even if things didn't work - #print 'deb:drawEntities finally!' #----------------------- - Window.WaitCursor(False) - if editmode: Window.EditMode(1) # and put things back how we fond them - - - -def getOCS(az): #----------------------------------------------------------------- - """An implimentation of the Arbitrary Axis Algorithm. - """ - #decide if we need to transform our coords - #if az[0] == 0 and az[1] == 0: - if abs(az[0]) < 0.00001 and abs(az[1]) < 0.00001: - if az[2] > 0.0: - return False - elif az[2] < 0.0: - ax = Mathutils.Vector(-1.0, 0, 0) - ay = Mathutils.Vector(0, 1.0, 0) - az = Mathutils.Vector(0, 0, -1.0) - return ax, ay, az - - az = Mathutils.Vector(az) - - cap = 0.015625 # square polar cap value (1/64.0) - if abs(az.x) < cap and abs(az.y) < cap: - ax = M_CrossVecs(WORLDY,az) - else: - ax = M_CrossVecs(WORLDZ,az) - ax = ax.normalize() - ay = M_CrossVecs(az, ax) - ay = ay.normalize() - return ax, ay, az - - - -def transform(normal, rotation, obj): #-------------------------------------------- - """Use the calculated ocs to determine the objects location/orientation in space. - - Quote from dxf docs: - The elevation value stored with an entity and output in DXF files is a sum - of the Z-coordinate difference between the UCS XY plane and the OCS XY - plane, and the elevation value that the user specified at the time the entity - was drawn. - """ - ma = Mathutils.Matrix([1,0,0],[0,1,0],[0,0,1]) - o = Mathutils.Vector(obj.loc) - ocs = getOCS(normal) - if ocs: - ma = Mathutils.Matrix(ocs[0], ocs[1], ocs[2]) - o = ma.invert() * o - ma = Mathutils.Matrix(ocs[0], ocs[1], ocs[2]) - - if rotation != 0: - g = radians(-rotation) - rmat = Mathutils.Matrix([cos(g), -sin(g), 0], [sin(g), cos(g), 0], [0, 0, 1]) - ma = rmat * ma - - obj.setMatrix(ma) - obj.loc = o - #print 'deb:new obj.matrix:\n', obj.getMatrix() #-------------------- - - - -def rotXY_Vec(rotation, vec): #---------------------------------------------------- - """Rotate vector vec in XY-plane. vec must be in radians - """ - if rotation != 0: - o = Mathutils.Vector(vec) - g = radians(-rotation) - vec = o * Mathutils.Matrix([cos(g), -sin(g), 0], [sin(g), cos(g), 0], [0, 0, 1]) - return vec - - - -def getLayersmap(dxflayers): #------------------------------------------------------ - """Build two dictionaries: 1.layername:layer object, and 2.layername:layername_short - gets set of layers from TABLES SECTION LAYERS - """ - layersmap = {} - layernamesmap = {} - for item in dxflayers.data: - if type(item) != list and item.type == 'layer': - layersmap[item.name] = item - layername_short = item.name[:MAX_NAMELENGTH-1] - i = 0 #sufix for layernames cause Blender-objectnames-limits - while layername_short in layernamesmap.keys(): - i += 1 - suffix = str(i) #--todo--set zero-leading number format - layername_short = layername_short[:-2] + suffix - layernamesmap[item.name] = layername_short - - #print 'deb:getLayersmap layersmap:\n', layersmap #------------ - #print 'deb:getLayersmap layernamesmap:\n', layernamesmap #------------ - return layersmap, layernamesmap - - - -def getBlocksmap(drawing, layersmap, layFrozen_on=False): #-------------------------------------------------------- - """Build a dictionary of blockname:block_data pairs - """ - usedblocks = {} - for item in drawing.blocks.data: - #print 'deb:getBlocksmap item=%s\n i.entities=%s\n i.data=%s' %(item,item.entities,item.entities.data) - if type(item) != list and item.type == 'block': - childList = [] - used = False - for item2 in item.entities.data: - if type(item2) != list and item2.type == 'insert': - #print 'deb:getBlocksmap dir(item2)=', dir(item2) #---------- - item2str = [item2.name, item2.layer] - childList.append(item2str) - try: usedblocks[item.name] = [used, childList] - except KeyError: print 'Cannot find "%s" Block!' %(item.name) - #print 'deb:getBlocksmap: usedblocks=' , usedblocks #------------- - #print 'deb:getBlocksmap: layersmap=' , layersmap #------------- - - for item in drawing.entities.data: - if type(item) != list and item.type == 'insert': - if not layersmap or (not layersmap[item.layer].frozen or layFrozen_on): #if insert_layer is not frozen - try: usedblocks[item.name][0] = True - except KeyError: print 'Cannot find "%s" Block!' %(item.name) - - key_list = usedblocks.keys() - key_list.reverse() - for key in key_list: - if usedblocks[key][0]: #if parent used, then set used also all child blocks - for child in usedblocks[key][1]: - if not layersmap or (layersmap and not layersmap[child[1]].frozen): #if insert_layer is not frozen - try: usedblocks[child[0]][0] = True # marked as used BLOCK - except KeyError: print 'Cannot find "%s" Block!' %(child[0]) - - usedblocks = [i for i in usedblocks.keys() if usedblocks[i][0]] - #print 'deb:getBlocksmap: usedblocks=' , usedblocks #------------- - obj_number = 0 - blocksmap = {} - for item in drawing.blocks.data: - if type(item) != list and item.type == 'block' and item.name in usedblocks: - #if item.name.startswith('*X'): #--todo-- - obj_number += len(item.entities.data) - try: blocksmap[item.name] = item - except KeyError: print 'Cannot map "%s" - "%s" as Block!' %(item.name, item) - - - #print 'deb:getBlocksmap: blocksmap:\n', blocksmap #------------ - return blocksmap, obj_number - - -def getBlocknamesmap(blocksmap): #-------------------------------------------------------- - """Build a dictionary of blockname:blockname_short pairs - """ - #print 'deb:getBlocknamesmap blocksmap:\n', blocksmap #------------ - blocknamesmap = {} - for n in blocksmap.keys(): - blockname_short = n[:MAX_NAMELENGTH-1] - i = 0 #sufix for blockname cause Blender-objectnamelength-limit - while blockname_short in blocknamesmap.keys(): - i += 1 - suffix = str(i) - blockname_short = blockname_short[:-2] + suffix - blocknamesmap[n] = blockname_short - #print 'deb:getBlocknamesmap blocknamesmap:\n', blocknamesmap #------------ - return blocknamesmap - - -def drawEntities(entities, settings, block_def=None): #---------------------------------------- - """Draw every kind of thing in the entity list. - - If provided 'block_def': the entities are to be added to the Blender 'group'. - """ - for _type in type_map.keys(): - #print 'deb:drawEntities_type:', _type #------------------ - # for each known type get a list of that type and call the associated draw function - entities_type = entities.get_type(_type) - if entities_type: drawer(_type, entities_type, settings, block_def) - - -def drawer(_type, entities, settings, block_def): #------------------------------------------ - """Call with a list of entities and a settings object to generate Blender geometry. - - If 'block_def': the entities are to be added to the Blender 'group'. - """ - global layersmap, layersmapshort - #print 'deb:drawer _type, entities:\n ', _type, entities #----------------------- - - if entities: - # Break out early if settings says we aren't drawing the current dxf-type - global cur_COUNTER #counter for progress_bar - group = None - #print 'deb:drawer.check:_type: ', _type #-------------------- - if _type == '3dface':_type = 'face' # hack, while python_variable_name can not beginn with a nummber - if not settings.drawTypes[_type] or _type == 'block_record': - message = 'Skipping dxf\'%ss\' entities' %_type - settings.write(message, True) - cur_COUNTER += len(entities) - settings.progress(cur_COUNTER, message) - return - #print 'deb:drawer.todo:_type:', _type #----------------------- - #print 'deb:drawer entities:\n ', entities #----------------------- - - len_temp = len(entities) - # filtering only model-space enitities (no paper-space enitities) - if settings.var['paper_space_on']: - entities = [entity for entity in entities if entity.space != 0] - else: - entities = [entity for entity in entities if entity.space == 0] - - # filtering only objects with color from acceptedColorsList - if settings.var['colorFilter_on']: - entities = [entity for entity in entities if entity.color in settings.acceptedColors] - - # filtering only objects on layers from acceptedLayersList - if settings.var['layerFilter_on']: - #entities = [entity for entity in entities if entity.layer[0] in ['M','3','0'] and not entity.layer.endswith('H')] - entities = [entity for entity in entities if entity.layer in settings.acceptedLayers] - - # patch for incomplete layer table in HL2-DXF-files - if layersmap: - for entity in entities: - oblayer = entity.layer - if oblayer not in layersmap.keys(): - layer_obj = Layer(None, name=oblayer) - layersmap[oblayer] = layer_obj - layername_short = oblayer[:MAX_NAMELENGTH-1] - i = 0 #sufix for layernames cause Blender-objectnames-limits - while layername_short in layernamesmap.keys(): - i += 1 - suffix = str(i) #--todo--set zero-leading number format - layername_short = layername_short[:-2] + suffix - layernamesmap[oblayer] = layername_short - - # filtering only objects on not-frozen layers - if layersmap and not settings.var['layFrozen_on']: - entities = [entity for entity in entities if not layersmap[entity.layer].frozen] - - global activObjectLayer, activObjectName - activObjectLayer = '' - activObjectName = '' - - message = "Drawing dxf \'%ss\'..." %_type - cur_COUNTER += len_temp - len(entities) - settings.write(message, False) - settings.progress(cur_COUNTER, message) - if len(entities) > 0.1 / settings.obj_number: - show_progress = int(0.03 / settings.obj_number) - else: show_progress = 0 - cur_temp = 0 - - #print 'deb:drawer cur_COUNTER: ', cur_COUNTER #----------------------- - - for entity in entities: #----loop------------------------------------- - settings.write('\b.', False) - cur_COUNTER += 1 - if show_progress: - cur_temp += 1 - if cur_temp == show_progress: - settings.progress(cur_COUNTER, message) - cur_temp = 0 - #print 'deb:drawer show_progress=',show_progress #---------------- - - # get the layer group (just to make things a little cleaner) - if settings.var['group_bylayer_on'] and not block_def: - group = getGroup('l:%s' % layernamesmap[entity.layer]) - - if _type == 'insert': #---- INSERT and MINSERT=array -------------------- - if not settings.var['block_nn']: #----turn off support for noname BLOCKs - prefix = entity.name[:2] - if prefix in ('*X', '*U', '*D'): - #print 'deb:drawer entity.name:', entity.name #------------ - continue - if settings.var['blockFilter_on'] and not settings.accepted_block(entity.name): - continue - - #print 'deb:insert entity.loc:', entity.loc #---------------- - insertFlag = True - columns = entity.columns[0] - coldist = entity.columns[1] - rows = entity.rows[0] - rowdist = entity.rows[1] - deltaloc = [0,0,0] - #print 'deb:insert columns, rows:', columns, rows #----------- - for col in xrange(columns): - deltaloc[0] = col * coldist - for row in xrange(rows): - deltaloc[1] = row * rowdist - #print 'deb:insert col=%s, row=%s,deltaloc=%s' %(col, row, deltaloc) #------ - ob = entity.draw(settings, deltaloc) #-----draw BLOCK---------- - if block_def: - blockFlag = True - bl_loc = block_def[1] - ob.loc = [ob.loc[0]-bl_loc[0],ob.loc[1]-bl_loc[1],ob.loc[2]-bl_loc[2]] - else: blockFlag = False - setObjectProperties(ob, group, entity, settings, block_def) - if ob: - if settings.var['optimization'] <= settings.MIN: - #if settings.var['g_origin_on'] and not block_def: ob.loc = Mathutils.Vector(ob.loc) + settings.g_origin - if settings.var['g_scale_on']: globalScaleOne(ob, insertFlag, blockFlag, settings.var['g_scale']) - settings.redraw() - else: oblist.append((ob, insertFlag, blockFlag)) - - else: #---draw entities except BLOCKs/INSERTs--------------------- - insertFlag = False - alt_obname = activObjectName - ob = entity.draw(settings) - if ob: - if M_OBJ and ob.type=='Mesh': #'Curve', 'Text' - if block_def: - blockFlag = True - bl_loc = block_def[1] - ob.loc = [ob.loc[0]-bl_loc[0],ob.loc[1]-bl_loc[1],ob.loc[2]-bl_loc[2]] - car_nr() - - elif ob.name != alt_obname: - if block_def: - blockFlag = True - bl_loc = block_def[1] - ob.loc = [ob.loc[0]-bl_loc[0],ob.loc[1]-bl_loc[1],ob.loc[2]-bl_loc[2]] - else: blockFlag = False - setObjectProperties(ob, group, entity, settings, block_def) - if settings.var['optimization'] <= settings.MIN: - #if settings.var['g_origin_on'] and not block_def: ob.loc = Mathutils.Vector(ob.loc) + settings.g_origin - if settings.var['g_scale_on']: globalScaleOne(ob, insertFlag, blockFlag, settings.var['g_scale']) - settings.redraw() - else: oblist.append((ob, insertFlag, blockFlag)) - - #print 'deb:Finished drawing:', entities[0].type #------------------------ - message = "\nDrawing dxf\'%ss\' done!" % _type - settings.write(message, True) - - - -def globalScale(oblist, SCALE): #--------------------------------------------------------- - """Global_scale for list of all imported objects. - - oblist is a list of pairs (ob, insertFlag), where insertFlag=True/False - """ - #print 'deb:globalScale.oblist: ---------%\n', oblist #--------------------- - for l in oblist: - ob, insertFlag, blockFlag = l[0], l[1], l[2] - globalScaleOne(ob, insertFlag, blockFlag, SCALE) - - -def globalScaleOne(ob, insertFlag, blockFlag, SCALE): #--------------------------------------------------------- - """Global_scale imported object. - """ - #print 'deb:globalScaleOne ob: ', ob #--------------------- - if settings.var['g_origin_on'] and not blockFlag: - ob.loc = Mathutils.Vector(ob.loc) + settings.g_origin - - SCALE_MAT= Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1]) - if insertFlag: # by BLOCKs/INSERTs only insert-point coords must be scaled------------ - ob.loc = Mathutils.Vector(ob.loc) * SCALE_MAT - else: # entire scaling for all other imported objects ------------ - if ob.type == 'Mesh': - me = ob.getData(name_only=False, mesh=True) - #me = Mesh.Get(ob.name) - # set centers of all objects in (0,0,0) - #me.transform(ob.matrixWorld*SCALE_MAT) - #ob.loc = Mathutils.Vector([0,0,0]) - # preseve centers of all objects - me.transform(SCALE_MAT) - ob.loc = Mathutils.Vector(ob.loc) * SCALE_MAT - else: #--todo-- also for curves: neutral scale factor after import - ob.setMatrix(ob.matrixWorld*SCALE_MAT) - - -def setObjectProperties(ob, group, entity, settings, block_def): #----------------------- - """Link object to scene. - """ - - if not ob: #remi--todo----------------------- - message = "\nObject \'%s\' not found!" %entity - settings.write(message) - return - - if group: - setGroup(group, ob) # if object belongs to group - - if block_def: # if object belongs to BLOCK_def - Move it to layer nr19 - setGroup(block_def[0], ob) - #print 'deb:setObjectProperties \'%s\' set to block_def_group!' %ob.name #--------- - ob.layers = [19] - else: - #ob.layers = [i+1 for i in xrange(20)] #remi--todo------------ - ob.layers = [settings.var['target_layer']] - - # Set material for any objects except empties - if ob.type != 'Empty' and settings.var['material_on']: - setMaterial_from(entity, ob, settings, block_def) - - # Set the visibility - #if settings.layer_isOff(entity.layer): - if layersmap and layersmap[entity.layer].color < 0: # color is negative if layer is off - #ob.layers = [20] #remi--todo------------- - ob.restrictDisplay = True - ob.restrictRender = True - - #print 'deb:\n---------linking Object %s!' %ob.name #---------- - - - -def getGroup(name): #----------------------------------------------------------------- - """Returns a Blender group-object. - """ - try: - group = Group.Get(name) - except: # What is the exception? - group = Group.New(name) - return group - - -def setGroup(group, ob): #------------------------------------------------------------ - """Assigns object to Blender group. - """ - try: - group.objects.link(ob) - except: - group.objects.append(ob) #remi?--------------- - - - -def setMaterial_from(entity, ob, settings, block_def): #------------------------------------------------ - """ Set Blender-material for the object controled by item. - - Set Blender-material for the object - - controlled by settings.var['material_from'] - """ - if settings.var['material_from'] == 1: # 1= material from color - if entity.color_index == BYLAYER or entity.color_index == 256: - mat = settings.colMaterials(entity.layer) - elif entity.color_index == BYBLOCK or entity.color_index == 0: - #--todo-- looking for block.color_index - #mat = settings.colMaterials(block.color_index) - #if block_def: mat = settings.colMaterials(block_def[2]) - mat = settings.colMaterials(3) - else: - mat = settings.colMaterials(entity.color_index) - - elif settings.var['material_from'] == 2: # 2= material from layer_name - mat = settings.layMaterials(layername=entity.layer) - - elif settings.var['material_from'] == 3: # 3= material from layer+color - mat = settings.layMaterials(layername=entity.layer, color=entity.color_index) - -# elif settings.var['material_from'] == 4: # 4= material from block_name - -# elif settings.var['material_from'] == 5: # 5= material from XDATA - -# elif settings.var['material_from'] == 6: # 6= material from INI-file - - else: # set neutral material - try: - mat = Material.Get('dxf-neutral') - except: - mat = Material.New('dxf-neutral') - mat.setRGBCol(color_map[3]) - try:mat.setMode('Shadeless', 'Wire') #work-around for 2.45rc1-bug - except: - mat.mode |= Material.Modes.SHADELESS # - mat.mode |= Material.Modes.WIRE - try: - #print 'deb:material mat:', mat #----------- - ob.setMaterials([mat]) #assigns Blender-material to object - except ValueError: - settings.write("material error - \'%s\'!" %mat) - ob.colbits = 0x01 # Set OB materials. - - - -def calcBulge(p1, p2, arc_res, triples=False): #------------------------------------------------- - """given startpoint, endpoint and bulge of arc, returns points/segments of its representation. - - Needs to take into account bulge sign. - negative = clockwise - positive = counter-clockwise - - to find center given two points, and arc angle - calculate radius - Cord = sqrt(start^2 + end^2) - S = (bulge*Cord)/2 - radius = ((Cord/2)^2+S^2)/2*S - angle of arc = 4*atan( bulge ) - angle from p1 to center is (180-angle)/2 - get vector pointing from p1 to p2 (p2 - p1) - normalize it and multiply by radius - rotate around p1 by angle to center, point to center. - start angle = angle between (center - p1) and worldX - end angle = angle between (center - p2) and worldX - - calculate the center, radius, start angle, and end angle - returns points/segments of its mesh representation - incl.startpoint, without endpoint - """ - - bulge = p1.bulge - p1 = Mathutils.Vector(p1.loc) - p2 = Mathutils.Vector(p2.loc) - cord = p2 - p1 # vector from p1 to p2 - clength = cord.length - s = (bulge * clength)/2.0 # sagitta (height) - radius = abs(((clength/2.0)**2.0 + s**2.0)/(2.0*s)) # magic formula - angle = (degrees(4.0*atan(bulge))) # theta (included angle) - radial = cord.normalize() * radius # a radius length vector aligned with cord - delta = (180.0 - abs(angle))/2.0 # the angle from cord to center - if bulge < 0: delta = -delta - rmat = Mathutils.RotationMatrix(-delta, 3, 'Z') - center = p1 + (rmat * radial) # rotate radial by delta degrees, then add to p1 to find center - #length = radians(abs(angle)) * radius - #print 'deb:calcBulge:\n angle, delta: ', angle, delta #---------------- - #print 'deb:center, radius: ', center, radius #---------------------- - startpoint = p1 - center - endpoint = p2 - center - #print 'deb:calcBulg: startpoint:', startpoint #--------- - #print 'deb:calcBulg: endpoint:', endpoint #--------- - - if not triples: #IF mesh-representation ----------- - if arc_res > 1024: arc_res = 1024 - elif arc_res < 4: arc_res = 4 - pieces = int(abs(angle)/(360.0/arc_res)) # set a fixed step of ARC_RESOLUTION - if pieces < 3: pieces = 3 - else: #IF curve-representation ------------------------------- - if arc_res > 32: arc_res = 32 - elif arc_res < 3: arc_res = 3 - pieces = int(abs(angle)/(360.0/arc_res)) # set a fixed step of ARC_RESOLUTION - if pieces < 2: pieces = 2 - - step = angle/pieces # set step so pieces * step = degrees in arc - stepmatrix = Mathutils.RotationMatrix(-step, 3, "Z") - - if not triples: #IF mesh-representation ----------- - points = [startpoint] - point = startpoint - for i in xrange(int(pieces)-1): #fast (but not so acurate as: vector * RotMatrix(-step*i,3,"Z") - point = stepmatrix * point - points.append(point) - points = [ point+center for point in points] - # vector to point convertion: - points = [list(point) for point in points] - return points, list(center) - - else: #IF curve-representation ------------------------------- - # correct Bezier curves representation for free segmented circles/arcs - step2 = radians(step * 0.5) - bulg = radius * (1 - cos(step2)) - deltaY = 4.0 * bulg / (3.0 * sin(step2) ) - #print 'deb:calcArcCurve: bulg, deltaY:\n', bulg, deltaY #--------- - #print 'deb:calcArcCurve: step:\n', step #--------- - - #org handler0 = Mathutils.Vector(0.0, -deltaY, 0.0) - #handler = startmatrix * handler0 - #endhandler = endmatrix * handler0 - rotMatr90 = Mathutils.Matrix([0, -1, 0], [1, 0, 0], [0, 0, 1]) - handler = rotMatr90 * startpoint - handler = - deltaY * handler.normalize() - endhandler = rotMatr90 * endpoint - endhandler = - deltaY * endhandler.normalize() - - points = [startpoint] - handlers1 = [startpoint + handler] - handlers2 = [startpoint - handler] - point = Mathutils.Vector(startpoint) - for i in xrange(int(pieces)-1): - point = stepmatrix * point - handler = stepmatrix * handler - handler1 = point + handler - handler2 = point - handler - points.append(point) - handlers1.append(handler1) - handlers2.append(handler2) - points.append(endpoint) - handlers1.append(endpoint + endhandler) - handlers2.append(endpoint - endhandler) - - points = [point + center for point in points] - handlers1 = [point + center for point in handlers1] - handlers2 = [point + center for point in handlers2] - - VectorTriples = [list(h1)+list(p)+list(h2) for h1,p,h2 in zip(handlers1, points, handlers2)] - #print 'deb:calcBulgCurve: handlers1:\n', handlers1 #--------- - #print 'deb:calcBulgCurve: points:\n', points #--------- - #print 'deb:calcBulgCurve: handlers2:\n', handlers2 #--------- - #print 'deb:calcBulgCurve: VectorTriples:\n', VectorTriples #--------- - return VectorTriples - - - - -def calcArc(center, radius, start, end, arc_res, triples): #----------------------------------------- - """calculate Points (or BezierTriples) for ARC/CIRCLEs representation. - - Given parameters of the ARC/CIRCLE, - returns points/segments (or BezierTriples) and centerPoint - """ - # center is currently set by object - # if start > end: start = start - 360 - if end > 360: end = end % 360.0 - - startmatrix = Mathutils.RotationMatrix(-start, 3, "Z") - startpoint = startmatrix * Mathutils.Vector(radius, 0, 0) - endmatrix = Mathutils.RotationMatrix(-end, 3, "Z") - endpoint = endmatrix * Mathutils.Vector(radius, 0, 0) - - if end < start: end +=360.0 - angle = end - start - #length = radians(angle) * radius - - if not triples: #IF mesh-representation ----------- - if arc_res > 1024: arc_res = 1024 - elif arc_res < 4: arc_res = 4 - pieces = int(abs(angle)/(360.0/arc_res)) # set a fixed step of ARC_RESOLUTION - if pieces < 3: pieces = 3 - step = angle/pieces # set step so pieces * step = degrees in arc - stepmatrix = Mathutils.RotationMatrix(-step, 3, "Z") - - points = [startpoint] - point = startpoint - for i in xrange(int(pieces)-1): - point = stepmatrix * point - points.append(point) - points.append(endpoint) - - if center: - centerVec = Mathutils.Vector(center) - #points = [point + centerVec for point in points()] - points = [point + centerVec for point in points] - # vector to point convertion: - points = [list(point) for point in points] - return points - - else: #IF curve-representation --------------- - if arc_res > 32: arc_res = 32 - elif arc_res < 3: arc_res = 3 - pieces = int(abs(angle)/(360.0/arc_res)) # set a fixed step of ARC_RESOLUTION - if pieces < 2: pieces = 2 - step = angle/pieces # set step so pieces * step = degrees in arc - stepmatrix = Mathutils.RotationMatrix(-step, 3, "Z") - - # correct Bezier curves representation for free segmented circles/arcs - step2 = radians(step * 0.5) - bulg = radius * (1 - cos(step2)) - deltaY = 4.0 * bulg / (3.0 * sin(step2) ) - #print 'deb:calcArcCurve: bulg, deltaY:\n', bulg, deltaY #--------- - #print 'deb:calcArcCurve: step:\n', step #--------- - handler0 = Mathutils.Vector(0.0, -deltaY, 0.0) - - points = [startpoint] - handler = startmatrix * handler0 - endhandler = endmatrix * handler0 - handlers1 = [startpoint + handler] - handlers2 = [startpoint - handler] - point = Mathutils.Vector(startpoint) - for i in xrange(int(pieces)-1): - point = stepmatrix * point - handler = stepmatrix * handler - handler1 = point + handler - handler2 = point - handler - points.append(point) - handlers1.append(handler1) - handlers2.append(handler2) - points.append(endpoint) - handlers1.append(endpoint + endhandler) - handlers2.append(endpoint - endhandler) - VectorTriples = [list(h1)+list(p)+list(h2) for h1,p,h2 in zip(handlers1, points, handlers2)] - #print 'deb:calcArcCurve: handlers1:\n', handlers1 #--------- - #print 'deb:calcArcCurve: points:\n', points #--------- - #print 'deb:calcArcCurve: handlers2:\n', handlers2 #--------- - #print 'deb:calcArcCurve: VectorTriples:\n', VectorTriples #--------- - return VectorTriples - - -def drawCurveCircle(circle): #--- no more used -------------------------------------------- - """Given a dxf circle object return a blender circle object using curves. - """ - c = Curve.New('circle') # create new curve data - center = circle.loc - radius = circle.radius - - p1 = (0, -radius, 0) - p2 = (radius, 0, 0) - p3 = (0, radius, 0) - p4 = (-radius, 0, 0) - - p1 = BezTriple.New(p1) - p2 = BezTriple.New(p2) - p3 = BezTriple.New(p3) - p4 = BezTriple.New(p4) - - curve = c.appendNurb(p1) - curve.append(p2) - curve.append(p3) - curve.append(p4) - for point in curve: - point.handleTypes = [AUTO, AUTO] - point.radius = 1.0 - curve.flagU = 1 # Set curve cyclic - c.update() - - ob = Object.New('Curve', 'circle') # make curve object - return ob - - -def drawCurveArc(self): #---- only for ELLIPSE ------------------------------------------------------------- - """Given a dxf ELLIPSE object return a blender_curve. - """ - center = self.loc - radius = self.radius - start = self.start_angle - end = self.end_angle - - if start > end: - start = start - 360.0 - startmatrix = Mathutils.RotationMatrix(start, 3, "Z") - startpoint = startmatrix * Mathutils.Vector((radius, 0, 0)) - endmatrix = Mathutils.RotationMatrix(end, 3, "Z") - endpoint = endmatrix * Mathutils.Vector((radius, 0, 0)) - # Note: handles must be tangent to arc and of correct length... - - a = Curve.New('arc') # create new curve data - - p1 = (0, -radius, 0) - p2 = (radius, 0, 0) - p3 = (0, radius, 0) - p4 = (-radius, 0, 0) - - p1 = BezTriple.New(p1) - p2 = BezTriple.New(p2) - p3 = BezTriple.New(p3) - p4 = BezTriple.New(p4) - - curve = a.appendNurb(p1) - curve.append(p2) - curve.append(p3) - curve.append(p4) - for point in curve: - point.handleTypes = [AUTO, AUTO] - point.radius = 1.0 - curve.flagU = 1 # Set curve cyclic - a.update() - - ob = Object.New('Curve', 'arc') # make curve object - return ob - - - - -# GUI STUFF -----#################################################----------------- -from Blender.BGL import glColor3f, glRecti, glClear, glRasterPos2d - -EVENT_NONE = 1 -EVENT_START = 2 -EVENT_REDRAW = 3 -EVENT_LOAD_INI = 4 -EVENT_SAVE_INI = 5 -EVENT_RESET = 6 -EVENT_CHOOSE_INI = 7 -EVENT_CHOOSE_DXF = 8 -EVENT_HELP = 9 -EVENT_PRESETCURV = 10 -EVENT_PRESETS = 11 -EVENT_DXF_DIR = 12 -# = 13 -EVENT_LIST = 14 -EVENT_ORIGIN = 15 -EVENT_SCALE = 16 -EVENT_PRESET2D = 20 -EVENT_PRESET3D = 21 -EVENT_EXIT = 100 -GUI_EVENT = EVENT_NONE - -GUI_A = {} # GUI-buttons dictionary for parameter -GUI_B = {} # GUI-buttons dictionary for drawingTypes - -# settings default, initialize ------------------------ - -points_as_menu = "convert to: %t|empty %x1|mesh.vertex %x2|thin sphere %x3|thin box %x4|..curve.vertex %x5" -lines_as_menu = "convert to: %t|..edge %x1|mesh %x2|..thin cylinder %x3|thin box %x4|Bezier-curve %x5|..NURBS-curve %x6" -mlines_as_menu = "convert to: %t|..edge %x1|..mesh %x2|..thin cylinder %x3|..thin box %x|..curve %x5" -plines_as_menu = "convert to: %t|..edge %x1|mesh %x2|..thin cylinder %x3|..thin box %x4|Bezier-curve %x5|NURBS-curve %x6" -splines_as_menu = "convert to: %t|mesh %x2|..thin cylinder %x3|..thin box %x4|Bezier-curve %x5|NURBS-curve %x6" -plines3_as_menu = "convert to: %t|..edge %x1|mesh %x2|..thin cylinder %x3|..thin box %x4|Bezier-curve %x5|NURBS-curve %x6" -plmesh_as_menu = "convert to: %t|..edge %x1|mesh %x2|..NURBS-surface %x6" -solids_as_menu = "convert to: %t|..edge %x1|mesh %x2" -blocks_as_menu = "convert to: %t|dupliGroup %x1|..real.Group %x2|..exploded %x3" -texts_as_menu = "convert to: %t|text %x1|..mesh %x2|..curve %x5" -material_from_menu= "material from: %t|..LINESTYLE %x7|COLOR %x1|LAYER %x2|..LAYER+COLOR %x3|..BLOCK %x4|..XDATA %x5|..INI-File %x6" -g_scale_list = ''.join(( - 'scale factor: %t', - '|user def. %x12', - '|yard to m %x8', - '|feet to m %x7', - '|inch to m %x6', - '| x 1000 %x3', - '| x 100 %x2', - '| x 10 %x1', - '| x 1 %x0', - '| x 0.1 %x-1', - '| x 0.01 %x-2', - '| x 0.001 %x-3', - '| x 0.0001 %x-4', - '| x 0.00001 %x-5')) - -#print 'deb: g_scale_list', g_scale_list #----------- - -dxfFileName = Draw.Create("") -iniFileName = Draw.Create(INIFILE_DEFAULT_NAME + INIFILE_EXTENSION) -user_preset = 0 -config_UI = Draw.Create(0) #switch_on/off extended config_UI -g_scale_as = Draw.Create(int(log10(G_SCALE))) - - -keywords_org = { - 'curves_on' : 0, - 'optimization': 2, - 'one_mesh_on': 1, - 'vGroup_on' : 1, - 'dummy_on' : 0, - 'views_on' : 0, - 'cams_on' : 0, - 'lights_on' : 0, - 'xref_on' : 1, - 'block_nn': 0, - 'blockFilter_on': 0, - 'layerFilter_on': 0, - 'colorFilter_on': 0, - 'groupFilter_on': 0, - 'newScene_on' : 1, - 'target_layer' : TARGET_LAYER, - 'group_bylayer_on' : GROUP_BYLAYER, - 'g_originX' : G_ORIGIN_X, - 'g_originY' : G_ORIGIN_Y, - 'g_originZ' : G_ORIGIN_Z, - 'g_origin_on': 0, - 'g_scale' : float(G_SCALE), -# 'g_scale_as': int(log10(G_SCALE)), # 0, - 'g_scale_on': 0, - 'thick_on' : 1, - 'thick_min' : float(MIN_THICK), - 'thick_force': 0, - 'width_on' : 1, - 'width_min' : float(MIN_WIDTH), - 'width_force': 0, - 'dist_on' : 1, - 'dist_min' : float(MIN_DIST), - 'dist_force': 0, - 'material_on': 1, - 'material_from': 2, - 'fill_on' : 1, - 'meshSmooth_on': 1, - 'curve_res' : CURV_RESOLUTION, - 'curve_arc' : CURVARC_RESOLUTION, - 'arc_res' : ARC_RESOLUTION, - 'arc_rad' : ARC_RADIUS, - 'thin_res' : THIN_RESOLUTION, - 'pl_trim_max' : TRIM_LIMIT, - 'pl_trim_on': 1, - 'plmesh_flip': 0, - 'normals_out': 0, - 'paper_space_on': 0, - 'layFrozen_on': 0, - 'Z_force_on': 0, - 'Z_elev': float(ELEVATION), - 'points_as' : 2, - 'lines_as' : 2, - 'mlines_as' : 2, - 'plines_as' : 2, - 'splines_as' : 5, - 'plines3_as': 2, - 'plmesh_as' : 2, - 'solids_as' : 2, - 'blocks_as' : 1, - 'texts_as' : 1 - } - -drawTypes_org = { - 'point' : 1, - 'line' : 1, - 'arc' : 1, - 'circle': 1, - 'ellipse': 1, - 'mline' : 0, - 'polyline': 1, - 'spline': 1, - 'plmesh': 1, - 'pline3': 1, - 'lwpolyline': 1, - 'text' : 1, - 'mtext' : 0, - 'block' : 1, - 'insert': 1, - 'solid' : 1, - 'trace' : 1, - 'face' : 1, -# 'view' : 0, - } - -# creating of GUI-buttons -# GUI_A - GUI-buttons dictionary for parameter -# GUI_B - GUI-buttons dictionary for drawingTypes -for k, v in keywords_org.iteritems(): - GUI_A[k] = Draw.Create(v) -for k, v in drawTypes_org.iteritems(): - GUI_B[k] = Draw.Create(v) -#print 'deb:init GUI_A: ', GUI_A #--------------- -#print 'deb:init GUI_B: ', GUI_B #--------------- - -model_space_on = Draw.Create(1) - -# initialize settings-object controls how dxf entities are drawn -settings = Settings(keywords_org, drawTypes_org) - - -def update_RegistryKey(key, item): # - """updates key in Blender.Registry - """ - cache = True # data is also saved to a file - rdict = Registry.GetKey('DXF_Importer', cache) - if not rdict: rdict = {} - if item: - rdict[key] = item - Registry.SetKey('DXF_Importer', rdict, cache) - #print 'deb:update_RegistryKey rdict', rdict #--------------- - - -def check_RegistryKey(key): - """ check if the key is already there (saved on a previous execution of this script) - """ - cache = True # data is also saved to a file - rdict = Registry.GetKey('DXF_Importer', cache) - #print 'deb:check_RegistryKey rdict:', rdict #---------------- - if rdict: # if found, get the values saved there - try: - item = rdict[key] - return item - except: - #update_RegistryKey() # if data isn't valid rewrite it - pass - -def saveConfig(): #--todo----------------------------------------------- - """Save settings/config/materials from GUI to INI-file. - - Write all config data to INI-file. - """ - global iniFileName - - iniFile = iniFileName.val - #print 'deb:saveConfig inifFile: ', inifFile #---------------------- - if iniFile.lower().endswith(INIFILE_EXTENSION): - - #--todo-- sort key.list for output - #key_list = GUI_A.keys().val - #key_list.sort() - #for key in key_list: - # l_name, l_data = key, GUI_A[key].val - # list_A - - output_str = '[%s,%s]' %(GUI_A, GUI_B) - if output_str =='None': - Draw.PupMenu('DXF importer: INI-file: Alert!%t|no config-data present to save!') - else: - #if BPyMessages.Warning_SaveOver(iniFile): #<- remi find it too abstarct - if sys.exists(iniFile): - f = file(iniFile, 'r') - header_str = f.readline() - f.close() - if header_str.startswith(INIFILE_HEADER[0:13]): - if Draw.PupMenu(' OK ? %t|SAVE OVER: ' + '\'%s\'' %iniFile) == 1: - save_ok = True - else: save_ok = False - elif Draw.PupMenu(' OK ? %t|SAVE OVER: ' + '\'%s\'' %iniFile + - '|Alert: this file has no valid ImportDXF-header| ! it may belong to another aplication !') == 1: - save_ok = True - else: save_ok = False - else: save_ok = True - - if save_ok: - # replace: ',' -> ',\n' - # replace: '{' -> '\n{\n' - # replace: '}' -> '\n}\n' - output_str = ',\n'.join(output_str.split(',')) - output_str = '\n}'.join(output_str.split('}')) - output_str = '{\n'.join(output_str.split('{')) - try: - f = file(iniFile, 'w') - f.write(INIFILE_HEADER + '\n# this is a comment line\n') - f.write(output_str) - f.close() - #Draw.PupMenu('DXF importer: INI-file: Done!%t|config-data saved in ' + '\'%s\'' %iniFile) - except: - Draw.PupMenu('DXF importer: INI-file: Error!%t|failure by writing to ' + '\'%s\'|no config-data saved!' %iniFile) - - else: - Draw.PupMenu('DXF importer: INI-file: Alert!%t|no valid name/extension for INI-file selected!') - print "DXF importer: Alert!: no valid INI-file selected." - if not iniFile: - if dxfFileName.val.lower().endswith('.dxf'): - iniFileName.val = dxfFileName.val[0:-4] + INIFILE_EXTENSION - - -def loadConfig(): #remi--todo----------------------------------------------- - """Load settings/config/materials from INI-file. - - Read material-assignements from config-file. - """ - #070724 buggy Window.FileSelector(loadConfigFile, 'Load config data from INI-file', inifilename) - global iniFileName, GUI_A, GUI_B - - iniFile = iniFileName.val - update_RegistryKey('iniFileName', iniFile) - #print 'deb:loadConfig iniFile: ', iniFile #---------------------- - if iniFile.lower().endswith(INIFILE_EXTENSION) and sys.exists(iniFile): - f = file(iniFile, 'r') - header_str = f.readline() - if header_str.startswith(INIFILE_HEADER): - data_str = f.read() - f.close() - #print 'deb:loadConfig data_str from %s: \n' %iniFile , data_str #----------------- - data = eval(data_str) - for k, v in data[0].iteritems(): - try: GUI_A[k].val = v - except: GUI_A[k] = Draw.Create(v) - for k, v in data[1].iteritems(): - try: GUI_B[k].val = v - except: GUI_B[k] = Draw.Create(v) - else: - f.close() - Draw.PupMenu('DXF importer: INI-file: Alert!%t|no valid header in INI-file: ' + '\'%s\'' %iniFile) - else: - Draw.PupMenu('DXF importer: INI-file: Alert!%t|no valid INI-file selected!') - print "DXF importer: Alert!: no valid INI-file selected." - if not iniFileName: - if dxfFileName.val.lower().endswith('.dxf'): - iniFileName.val = dxfFileName.val[0:-4] + INIFILE_EXTENSION - - - -def updateConfig(keywords, drawTypes): #----------------------------------------------- - """updates GUI_settings with given dictionaries - - """ - global GUI_A, GUI_B - #print 'deb:lresetDefaultConfig keywords_org: \n', keywords_org #--------- - for k, v in keywords.iteritems(): - GUI_A[k].val = v - for k, v in drawTypes.iteritems(): - GUI_B[k].val = v - -def resetDefaultConfig(): #----------------------------------------------- - """Resets settings/config/materials to defaults. - - """ - #print 'deb:lresetDefaultConfig keywords_org: \n', keywords_org #--------- - updateConfig(keywords_org, drawTypes_org) - - -def presetConfig_curv(activate): #----------------------------------------------- - """Sets settings/config/materials for curve representation. - - """ - global GUI_A - if activate: - GUI_A['curves_on'].val = 1 - GUI_A['points_as'].val = 5 - GUI_A['lines_as'].val = 5 - GUI_A['mlines_as'].val = 5 - GUI_A['plines_as'].val = 5 - GUI_A['splines_as'].val = 5 - GUI_A['plines3_as'].val = 5 - else: - GUI_A['curves_on'].val = 0 - GUI_A['points_as'].val = 2 - GUI_A['lines_as'].val = 2 - GUI_A['mlines_as'].val = 2 - GUI_A['plines_as'].val = 2 - GUI_A['splines_as'].val = 6 - GUI_A['plines3_as'].val = 2 - - -def resetDefaultConfig_2D(): #----------------------------------------------- - """Sets settings/config/materials to defaults 2D. - - """ - presetConfig_curv(1) - keywords2d = { - 'views_on' : 0, - 'cams_on' : 0, - 'lights_on' : 0, - 'vGroup_on' : 1, - 'thick_on' : 0, - 'thick_force': 0, - 'width_on' : 1, - 'width_force': 0, - 'dist_on' : 1, - 'dist_force': 0, - 'fill_on' : 0, - 'pl_trim_on': 1, - 'Z_force_on': 0, - 'meshSmooth_on': 0, - 'solids_as' : 2, - 'blocks_as' : 1, - 'texts_as' : 1 - } - - drawTypes2d = { - 'point' : 1, - 'line' : 1, - 'arc' : 1, - 'circle': 1, - 'ellipse': 1, - 'mline' : 0, - 'polyline': 1, - 'spline': 1, - 'plmesh': 0, - 'pline3': 1, - 'lwpolyline': 1, - 'text' : 1, - 'mtext' : 0, - 'block' : 1, - 'insert': 1, - 'solid' : 1, - 'trace' : 1, - 'face' : 0, -# 'view' : 0, - } - - updateConfig(keywords2d, drawTypes2d) - -def resetDefaultConfig_3D(): #----------------------------------------------- - """Sets settings/config/materials to defaults 3D. - - """ - presetConfig_curv(0) - keywords3d = { -# 'views_on' : 1, -# 'cams_on' : 1, -# 'lights_on' : 1, - 'vGroup_on' : 1, - 'thick_on' : 1, - 'thick_force': 0, - 'width_on' : 1, - 'width_force': 0, - 'dist_on' : 1, - 'dist_force': 0, - 'fill_on' : 1, - 'pl_trim_on': 1, - 'Z_force_on': 0, - 'meshSmooth_on': 1, - 'solids_as' : 2, - 'blocks_as' : 1, - 'texts_as' : 1 - } - - drawTypes3d = { - 'point' : 1, - 'line' : 1, - 'arc' : 1, - 'circle': 1, - 'ellipse': 1, - 'mline' : 0, - 'polyline': 1, - 'spline': 1, - 'plmesh': 1, - 'pline3': 1, - 'lwpolyline': 1, - 'text' : 0, - 'mtext' : 0, - 'block' : 1, - 'insert': 1, - 'solid' : 1, - 'trace' : 1, - 'face' : 1, -# 'view' : 0, - } - - updateConfig(keywords3d, drawTypes3d) - - -def inputGlobalScale(): - """Pop-up UI-Block for global scale factor - """ - global GUI_A - #print 'deb:inputGlobalScale ##########' #------------ - x_scale = Draw.Create(GUI_A['g_scale'].val) - block = [] - #block.append("global translation vector:") - block.append(("", x_scale, 0.0, 10000000.0)) - - retval = Draw.PupBlock("set global scale factor:", block) - - GUI_A['g_scale'].val = float(x_scale.val) - - -def inputOriginVector(): - """Pop-up UI-Block for global translation vector - """ - global GUI_A - #print 'deb:inputOriginVector ##########' #------------ - x_origin = Draw.Create(GUI_A['g_originX'].val) - y_origin = Draw.Create(GUI_A['g_originY'].val) - z_origin = Draw.Create(GUI_A['g_originZ'].val) - block = [] - #block.append("global translation vector:") - block.append(("X: ", x_origin, -100000000.0, 100000000.0)) - block.append(("Y: ", y_origin, -100000000.0, 100000000.0)) - block.append(("Z: ", z_origin, -100000000.0, 100000000.0)) - - retval = Draw.PupBlock("set global translation vector:", block) - - GUI_A['g_originX'].val = x_origin.val - GUI_A['g_originY'].val = y_origin.val - GUI_A['g_originZ'].val = z_origin.val - - -def draw_UI(): #----------------------------------------------------------------- - """ Draw startUI and setup Settings. - """ - global GUI_A, GUI_B #__version__ - global user_preset, iniFileName, dxfFileName, config_UI, g_scale_as - global model_space_on - - # This is for easy layout changes - but_0c = 70 #button 1.column width - but_1c = 70 #button 1.column width - but_2c = 70 #button 2.column - but_3c = 70 #button 3.column - menu_margin = 10 - butt_margin = 10 - menu_w = (3 * butt_margin) + but_0c + but_1c + but_2c + but_3c #menu width - - simple_menu_h = 100 - extend_menu_h = 350 - y = simple_menu_h # y is menu upper.y - if config_UI.val: y += extend_menu_h - x = 20 #menu left.x - but0c = x + menu_margin #buttons 0.column position.x - but1c = but0c + but_0c + butt_margin - but2c = but1c + but_1c + butt_margin - but3c = but2c + but_2c + butt_margin - but4c = but3c + but_3c - - # Here starts menu ----------------------------------------------------- - #glClear(GL_COLOR_BUFFER_BIT) - #glRasterPos2d(8, 125) - - y += 30 - colorbox(x, y+20, x+menu_w+menu_margin*2, menu_margin) - Draw.Label("DXF/DWG-Importer v" + __version__, but0c, y, menu_w, 20) - - if config_UI.val: - b0, b0_ = but0c, but_0c + butt_margin - b1, b1_ = but1c, but_1c - y_top = y - - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_B['point'] = Draw.Toggle('POINT', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['point'].val, "support dxf-POINT on/off") - if GUI_B['point'].val: - GUI_A['points_as'] = Draw.Menu(points_as_menu, EVENT_NONE, b1, y, b1_, 20, GUI_A['points_as'].val, "select target Blender-object") -# Draw.Label('-->', but2c, y, but_2c, 20) - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['line'] = Draw.Toggle('LINE...etc', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['line'].val, "support dxf-LINE,ARC,CIRCLE,ELLIPSE on/off") - if GUI_B['line'].val: - GUI_A['lines_as'] = Draw.Menu(lines_as_menu, EVENT_NONE, but1c, y, but_1c, 20, GUI_A['lines_as'].val, "select target Blender-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['mline'] = Draw.Toggle('..MLINE', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['mline'].val, "(*todo)support dxf-MLINE on/off") - if GUI_B['mline'].val: - GUI_A['mlines_as'] = Draw.Menu(mlines_as_menu, EVENT_NONE, but1c, y, but_1c, 20, GUI_A['mlines_as'].val, "select target Blender-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['spline'] = Draw.Toggle('SPLINE', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['spline'].val, "support dxf-SPLINE on/off") - if GUI_B['spline'].val: - GUI_A['splines_as'] = Draw.Menu(splines_as_menu, EVENT_NONE, but1c, y, but_1c, 20, GUI_A['splines_as'].val, "select target Blender-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['polyline'] = Draw.Toggle('2D/LWPLINE', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['polyline'].val, "support dxf-2D-POLYLINE on/off") - if GUI_B['polyline'].val: - GUI_A['plines_as'] = Draw.Menu(plines_as_menu, EVENT_NONE, but1c, y, but_1c, 20, GUI_A['plines_as'].val, "select target Blender-object") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_B['pline3'] = Draw.Toggle('3D-PLINE', EVENT_REDRAW, b0, y, b0_, 20, GUI_B['pline3'].val, "support dxf-3D-POLYLINE on/off") - if GUI_B['pline3'].val: - GUI_A['plines3_as'] = Draw.Menu(plines3_as_menu, EVENT_NONE, but1c, y, but_1c, 20, GUI_A['plines3_as'].val, "select target Blender-object") - Draw.EndAlign() - - y_down = y - # ----------------------------------------------- - - y = y_top - b0, b0_ = but2c, but_2c + butt_margin - b1, b1_ = but3c, but_3c - - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_B['plmesh'] = Draw.Toggle('PL-MESH/FACE', EVENT_NONE, b0, y, b0_+b1_-40, 20, GUI_B['plmesh'].val, "support dxf-POLYMESH/POLYFACE on/off") -# GUI_A['plmesh_as'] = Draw.Menu(plmesh_as_menu, EVENT_NONE, but1c, y, but_1c, 20, GUI_A['plmesh_as'].val, "select target Blender-object") - GUI_A['plmesh_flip'] = Draw.Toggle('N', EVENT_NONE, b1+b1_-40, y, 20, 20, GUI_A['plmesh_flip'].val, "flip DXF normals on/off") - GUI_A['normals_out'] = Draw.Toggle('N', EVENT_NONE, b1+b1_-20, y, 20, 20, GUI_A['normals_out'].val, "force Blender normals to outside on/off") - Draw.EndAlign() - - y -= 20 - GUI_B['solid'] = Draw.Toggle('SOLID', EVENT_NONE, b0, y, b0_, 20, GUI_B['solid'].val, "support dxf-SOLID and TRACE on/off") - GUI_B['face'] = Draw.Toggle('3DFACE', EVENT_NONE, b1, y, b1_, 20, GUI_B['face'].val, "support dxf-3DFACE on/off") -# GUI_A['solids_as'] = Draw.Menu(solids_as_menu, EVENT_NONE, but3c, y, but_3c, 20, GUI_A['solids_as'].val, "select target Blender-object") - #print 'deb:support solid, trace', GUI_B['trace'].val, GUI_B['solid'].val # ------------ - - - y -= 20 - GUI_B['text'] = Draw.Toggle('TEXT', EVENT_NONE, b0, y, b0_, 20, GUI_B['text'].val, "support dxf-TEXT on/off") - GUI_B['mtext'] = Draw.Toggle('..MTEXT', EVENT_NONE, b1, y, b1_, 20, GUI_B['mtext'].val, "(*todo)support dxf-MTEXT on/off") -# GUI_A['texts_as'] = Draw.Menu(texts_as_menu, EVENT_NONE, but3c, y, but_3c, 20, GUI_A['texts_as'].val, "select target Blender-object") - - y -= 20 - Draw.BeginAlign() - GUI_B['block'] = Draw.Toggle('BLOCK', EVENT_REDRAW, b0, y, b0_-30, 20, GUI_B['block'].val, "support dxf-BLOCK and ARRAY on/off") - GUI_B['insert'].val = GUI_B['block'].val - if GUI_B['block'].val: - GUI_A['block_nn'] = Draw.Toggle('n', EVENT_NONE, b1-30, y, 15, 20, GUI_A['block_nn'].val, "support hatch/noname BLOCKs *X... on/off") - GUI_A['xref_on'] = Draw.Toggle('Xref', EVENT_NONE, b1-15, y, 35, 20, GUI_A['xref_on'].val, "support for XREF-BLOCKs (place holders) on/off") - GUI_A['blocks_as'] = Draw.Menu(blocks_as_menu, EVENT_NONE, b1+20, y, b1_-20, 20, GUI_A['blocks_as'].val, "select target representation for imported BLOCKs") - Draw.EndAlign() - - - y -= 20 - y -= 20 - - Draw.BeginAlign() - GUI_A['views_on'] = Draw.Toggle('views', EVENT_NONE, b0, y, b0_-25, 20, GUI_A['views_on'].val, "imports VIEWs and VIEWPORTs as cameras on/off") - GUI_A['cams_on'] = Draw.Toggle('..cams', EVENT_NONE, b1-25, y, b1_-25, 20, GUI_A['cams_on'].val, "(*todo) support ASHADE cameras on/off") - GUI_A['lights_on'] = Draw.Toggle('..lights', EVENT_NONE, b1+25, y, b1_-25, 20, GUI_A['lights_on'].val, "(*todo) support AVE_RENDER lights on/off") - Draw.EndAlign() - - - if y < y_down: y_down = y - # -----end supported objects-------------------------------------- - - y_top = y_down - y = y_top - y -= 10 - y -= 20 - but_ = menu_w / 6 - b0 = but0c + (menu_w - but_*6)/2 - Draw.BeginAlign() - GUI_A['paper_space_on'] = Draw.Toggle('paper', EVENT_NONE, b0+but_*0, y, but_, 20, GUI_A['paper_space_on'].val, "import only from Paper-Space on/off") - GUI_A['layFrozen_on'] = Draw.Toggle ('frozen', EVENT_NONE, b0+but_*1, y, but_, 20, GUI_A['layFrozen_on'].val, "import also from frozen LAYERs on/off") - GUI_A['layerFilter_on'] = Draw.Toggle('..layer', EVENT_NONE, b0+but_*2, y, but_, 20, GUI_A['layerFilter_on'].val, "(*todo) LAYER filtering on/off") - GUI_A['colorFilter_on'] = Draw.Toggle('..color', EVENT_NONE, b0+but_*3, y, but_, 20, GUI_A['colorFilter_on'].val, "(*todo) COLOR filtering on/off") - GUI_A['groupFilter_on'] = Draw.Toggle('..group', EVENT_NONE, b0+but_*4, y, but_, 20, GUI_A['groupFilter_on'].val, "(*todo) GROUP filtering on/off") - GUI_A['blockFilter_on'] = Draw.Toggle('..block', EVENT_NONE, b0+but_*5, y, but_, 20, GUI_A['blockFilter_on'].val, "(*todo) BLOCK filtering on/off") - #GUI_A['dummy_on'] = Draw.Toggle('-', EVENT_NONE, but3c, y, but_3c, 20, GUI_A['dummy_on'].val, "dummy on/off") - Draw.EndAlign() - - # -----end filters-------------------------------------- - - b0, b0_ = but0c, but_0c + butt_margin - b1, b1_ = but1c, but_1c - - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_A['g_origin_on'] = Draw.Toggle('glob.reLoc', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['g_origin_on'].val, "global relocate all DXF objects on/off") - if GUI_A['g_origin_on'].val: - tmp = Draw.PushButton('=', EVENT_ORIGIN, b1, y, 20, 20, "edit relocation-vector (x,y,z in DXF units)") - origin_str = '(%.4f, %.4f, %.4f)' % ( - GUI_A['g_originX'].val, - GUI_A['g_originY'].val, - GUI_A['g_originZ'].val - ) - tmp = Draw.Label(origin_str, b1+20, y, 300, 20) - #GUI_A['g_origin'] = Draw.String('', EVENT_ORIGIN, b1, y, b1_, 20, GUI_A['g_origin'].val, "global translation-vector (x,y,z) in DXF units") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['g_scale_on'] = Draw.Toggle('glob.Scale', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['g_scale_on'].val, "global scale all DXF objects on/off") - if GUI_A['g_scale_on'].val: - g_scale_as = Draw.Menu(g_scale_list, EVENT_SCALE, b1, y, 45, 20, g_scale_as.val, "factor for scaling the DXFdata") - if g_scale_as.val == 12: - pass - else: - if g_scale_as.val == 6: #scale inches to meters - GUI_A['g_scale'].val = 0.0254000 - elif g_scale_as.val == 7: #scale feets to meters - GUI_A['g_scale'].val = 0.3048000 - elif g_scale_as.val == 8: #scale yards to meters - GUI_A['g_scale'].val = 0.9144000 - else: - GUI_A['g_scale'].val = 10.0 ** int(g_scale_as.val) - scale_float = GUI_A['g_scale'].val - if scale_float < 0.000001 or scale_float > 1000000: - scale_str = ' = %s' % GUI_A['g_scale'].val - else: - scale_str = ' = %.6f' % GUI_A['g_scale'].val - Draw.Label(scale_str, b1+45, y, 200, 20) - Draw.EndAlign() - - y_down = y - # -----end material,translate,scale------------------------------------------ - - b0, b0_ = but0c, but_0c + butt_margin - b1, b1_ = but1c, but_1c - - y_top = y_down - y = y_top - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_A['meshSmooth_on'] = Draw.Toggle('smooth', EVENT_NONE, b0, y, b0_-20, 20, GUI_A['meshSmooth_on'].val, "mesh smooth for circles/arc-segments on/off") - GUI_A['pl_trim_on'] = Draw.Toggle('trim', EVENT_NONE, b1-20, y, 32, 20, GUI_A['pl_trim_on'].val, "clean intersection of POLYLINE-wide-segments on/off") - GUI_A['pl_trim_max'] = Draw.Number('', EVENT_NONE, b1+12, y, b1_-12, 20, GUI_A['pl_trim_max'].val, 0, 5, "threshold intersection of POLYLINE-wide-segments: 0.0-5.0") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() -# GUI_A['thin_res'] = Draw.Number('thin:', EVENT_NONE, but0c, y, but_0c, 20, GUI_A['thin_res'].val, 4, 64, "thin cylinder resolution - number of segments (4-64)") - GUI_A['arc_rad'] = Draw.Number('bR:', EVENT_NONE, b0, y, b0_, 20, GUI_A['arc_rad'].val, 0.01, 100, "basis radius for arc/circle resolution (0.01-100)") - GUI_A['arc_res'] = Draw.Number('', EVENT_NONE, b1, y, b1_/2, 20, GUI_A['arc_res'].val, 3, 500, "arc/circle resolution - number of segments (3-500)") - GUI_A['fill_on'] = Draw.Toggle('caps', EVENT_NONE, b1+b1_/2, y, b1_/2, 20, GUI_A['fill_on'].val, "draws top and bottom caps of CYLINDERs/closed curves on/off") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['curve_arc'] = Draw.Number('', EVENT_NONE, b0, y, b0_/2, 20, GUI_A['curve_arc'].val, 3, 32, "Bezier circle: amount of segments: 3-32") - GUI_A['curve_res'] = Draw.Number('', EVENT_NONE, b0+b0_/2, y, b0_/2, 20, GUI_A['curve_res'].val, 1, 128, "Set the Curve's U-resolution value: 1-128") - GUI_A['curves_on'] = Draw.Toggle('to Curves', EVENT_PRESETCURV, b1, y, b1_, 20, GUI_A['curves_on'].val, "set Curve as target object type on/off") - Draw.EndAlign() - - y -= 20 - GUI_A['group_bylayer_on'] = Draw.Toggle('Layer', EVENT_NONE, b0, y, 30, 20, GUI_A['group_bylayer_on'].val, "DXF-entities group by layer on/off") - GUI_A['vGroup_on'] = Draw.Toggle('vGroups', EVENT_NONE, b0+30, y, b1_-10, 20, GUI_A['vGroup_on'].val, "sort faces into VertexGroups on/off") - GUI_A['one_mesh_on'] = Draw.Toggle('oneMesh', EVENT_NONE, b1+10, y, b1_-10, 20, GUI_A['one_mesh_on'].val, "draw DXF-entities into one mesh-object. Recommended for big DXF-files. on/off") - - y -= 30 - Draw.BeginAlign() - GUI_A['material_on'] = Draw.Toggle('material', EVENT_REDRAW, b0, y, b0_-20, 20, GUI_A['material_on'].val, "support for material assignment on/off") - if GUI_A['material_on'].val: - GUI_A['material_from'] = Draw.Menu(material_from_menu, EVENT_NONE, b1-20, y, b1_+20, 20, GUI_A['material_from'].val, "material assignment from?") - Draw.EndAlign() - - y_down = y - # ----------------------------------------------- - - b0, b0_ = but2c, but_2c + butt_margin - b1, b1_ = but3c, but_3c - - y = y_top - y -= 10 - y -= 20 - Draw.BeginAlign() - GUI_A['Z_force_on'] = Draw.Toggle('.elevation', EVENT_REDRAW, b0, y, b0_, 20, GUI_A['Z_force_on'].val, ".set objects Z-coordinates to elevation on/off") - if GUI_A['Z_force_on'].val: - GUI_A['Z_elev'] = Draw.Number('', EVENT_NONE, b1, y, b1_, 20, GUI_A['Z_elev'].val, -1000, 1000, "set default elevation(Z-coordinate)") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['dist_on'] = Draw.Toggle('dist.:', EVENT_NONE, b0, y, b0_-20, 20, GUI_A['dist_on'].val, "support distance on/off") - GUI_A['dist_force'] = Draw.Toggle('F', EVENT_NONE, b0+b0_-20, y, 20, 20, GUI_A['dist_force'].val, "force minimal distance on/off") - GUI_A['dist_min'] = Draw.Number('', EVENT_NONE, b1, y, b1_, 20, GUI_A['dist_min'].val, 0, 10, "minimal length/distance (double.vertex removing)") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['thick_on'] = Draw.Toggle('thick:', EVENT_NONE, b0, y, b0_-20, 20, GUI_A['thick_on'].val, "support thickness on/off") - GUI_A['thick_force'] = Draw.Toggle('F', EVENT_REDRAW, b0+b0_-20, y, 20, 20, GUI_A['thick_force'].val, "force for thickness at least limiter value on/off") - if GUI_A['thick_force'].val: - GUI_A['thick_min'] = Draw.Number('', EVENT_NONE, b1, y, b1_, 20, GUI_A['thick_min'].val, 0, 10, "minimal value for thickness") - Draw.EndAlign() - - y -= 20 - Draw.BeginAlign() - GUI_A['width_on'] = Draw.Toggle('width:', EVENT_NONE, b0, y, b0_-20, 20, GUI_A['width_on'].val, "support width on/off") - GUI_A['width_force'] = Draw.Toggle('F', EVENT_REDRAW, b0+b0_-20, y, 20, 20, GUI_A['width_force'].val, "force for width at least limiter value on/off") - if GUI_A['width_force'].val: - GUI_A['width_min'] = Draw.Number('', EVENT_NONE, b1, y, b1_, 20, GUI_A['width_min'].val, 0, 10, "minimal value for width") - Draw.EndAlign() - - y -= 30 - but, but_ = but2c, 25 - Draw.BeginAlign() - Draw.EndAlign() - - if y < y_down: y_down = y - # -----end options -------------------------------------- - - - #-------------------------------------- - y_top = y_down - y = y_top - #GUI_A['dummy_on'] = Draw.Toggle(' - ', EVENT_NONE, but0c, y, but_0c, 20, GUI_A['dummy_on'].val, "reserved") - y -= 30 - Draw.BeginAlign() - Draw.PushButton('INI file >', EVENT_CHOOSE_INI, but0c, y, but_0c, 20, 'Select INI-file from project directory') - iniFileName = Draw.String(' :', EVENT_NONE, but1c, y, menu_w-but_1c-60, 20, iniFileName.val, FILENAME_MAX, "write here the name of the INI-file") - but = but4c-60 - Draw.PushButton('#', EVENT_PRESETS, but, y, 20, 20, "toggle Preset-INI-files") - Draw.PushButton('L', EVENT_LOAD_INI, but+20, y, 20, 20, 'Loads configuration from ini-file: %s' % iniFileName.val) - Draw.PushButton('S', EVENT_SAVE_INI, but+40, y, 20, 20, 'Saves configuration to ini-file: %s' % iniFileName.val) - Draw.EndAlign() - - - b0, b0_ = but2c, but_2c + butt_margin - b1, b1_ = but3c, but_3c - - y = simple_menu_h - bm = butt_margin/2 - - #y -= 10 - Draw.BeginAlign() - Draw.PushButton('DXFfile >', EVENT_CHOOSE_DXF, but0c, y, but_0c, 20, 'Select DXF/DWG-file for import') - dxfFileName = Draw.String(' :', EVENT_NONE, but1c, y, but_1c+but_2c+but_3c-20, 20, dxfFileName.val, FILENAME_MAX, "type the name of DXF/DWG-file or type *.dxf/*.dwg for multiple files") - Draw.PushButton('*.*', EVENT_DXF_DIR, but3c+but_3c-20, y, 20, 20, 'set filter for import all files from this directory') - Draw.EndAlign() - - y -= 30 - config_UI = Draw.Toggle('CONFIG', EVENT_REDRAW, but0c, y, but_0c+bm, 20, config_UI.val, 'Advanced configuration on/off' ) - Draw.BeginAlign() - but, but_ = but1c, but_1c+bm - but_ /= 3 - Draw.PushButton('X', EVENT_RESET, but, y, 15, 20, "reset configuration to defaults") - Draw.PushButton('2D', EVENT_PRESET2D, but+but_, y, but_, 20, 'set configuration for 2D import') - Draw.PushButton('3D', EVENT_PRESET3D, but+(but_*2), y, but_, 20, 'set configuration for 3D import') - Draw.EndAlign() - - Draw.BeginAlign() - GUI_A['newScene_on'] = Draw.Toggle('newScene', EVENT_NONE, but2c, y, but_2c, 20, GUI_A['newScene_on'].val, "create new Scene for each imported dxf file on/off") - GUI_A['target_layer'] = Draw.Number('layer', EVENT_NONE, but3c, y, but_3c, 20, GUI_A['target_layer'].val, 1, 18, "target Blender-layer (<19> reserved for block_definitions)") - Draw.EndAlign() - - y -= 40 - Draw.PushButton('EXIT', EVENT_EXIT, but0c, y, but_0c+bm, 20, '' ) - Draw.PushButton('HELP', EVENT_HELP, but1c, y, but_1c+bm, 20, 'calls DXF-Importer Manual Page on Wiki.Blender.org') - Draw.BeginAlign() - GUI_A['optimization'] = Draw.Number('', EVENT_NONE, but2c, y+20, 40, 20, GUI_A['optimization'].val, 0, 3, "Optimization Level: 0=Debug/directDrawing, 1=Verbose, 2=ProgressBar, 3=SilentMode") - Draw.EndAlign() - Draw.BeginAlign() - Draw.PushButton('TEST', EVENT_LIST, but2c, y, 40, 20, 'DXF-Analyze-Tool: reads data from selected dxf file and writes report in project_directory/dxf_blendname.INF') - Draw.PushButton('START IMPORT', EVENT_START, but2c+40, y, but_2c-40+but_3c+butt_margin, 40, 'Start the import process. For Cancel go to console and hit Ctrl-C') - Draw.EndAlign() - - - - - y -= 20 - Draw.BeginAlign() - Draw.Label(' ', but0c-menu_margin, y, menu_margin, 20) - Draw.Label(LAB, but0c, y, menu_w, 20) - Draw.Label(' ', but0c+menu_w, y, menu_margin, 20) - Draw.EndAlign() - -#-- END GUI Stuf----------------------------------------------------- - -def colorbox(x,y,xright,bottom): - glColor3f(0.75, 0.75, 0.75) - glRecti(x + 1, y + 1, xright - 1, bottom - 1) - -def dxf_callback(input_filename): - global dxfFileName - if input_filename.lower()[-3:] in ('dwg','dxf'): - dxfFileName.val=input_filename -# dirname == sys.dirname(Blender.Get('filename')) -# update_RegistryKey('DirName', dirname) -# update_RegistryKey('dxfFileName', input_filename) - -def ini_callback(input_filename): - global iniFileName - iniFileName.val=input_filename - -def event(evt, val): - if evt in (Draw.QKEY, Draw.ESCKEY) and not val: - Draw.Exit() - -def bevent(evt): -# global EVENT_NONE,EVENT_LOAD_DXF,EVENT_LOAD_INI,EVENT_SAVE_INI,EVENT_EXIT - global config_UI, user_preset - global GUI_A, UI_MODE - - ######### Manages GUI events - if (evt==EVENT_EXIT): - Draw.Exit() - print 'DXF/DWG-Importer *** exit ***' #--------------------- - elif (evt==EVENT_CHOOSE_INI): - Window.FileSelector(ini_callback, "INI-file Selection", '*.ini') - elif (evt==EVENT_REDRAW): - Draw.Redraw() - elif (evt==EVENT_RESET): - resetDefaultConfig() - Draw.Redraw() - elif (evt==EVENT_PRESET2D): - resetDefaultConfig_2D() - Draw.Redraw() - elif (evt==EVENT_SCALE): - if g_scale_as.val == 12: - inputGlobalScale() - if GUI_A['g_scale'].val < 0.00000001: - GUI_A['g_scale'].val = 0.00000001 - Draw.Redraw() - elif (evt==EVENT_ORIGIN): - inputOriginVector() - Draw.Redraw() - elif (evt==EVENT_PRESET3D): - resetDefaultConfig_3D() - Draw.Redraw() - elif (evt==EVENT_PRESETCURV): - presetConfig_curv(GUI_A['curves_on'].val) - Draw.Redraw() - elif (evt==EVENT_PRESETS): - user_preset += 1 - index = str(user_preset) - if user_preset > 5: user_preset = 0; index = '' - iniFileName.val = INIFILE_DEFAULT_NAME + index + INIFILE_EXTENSION - Draw.Redraw() - elif (evt==EVENT_LIST): - dxfFile = dxfFileName.val - update_RegistryKey('dxfFileName', dxfFileName.val) - if dxfFile.lower().endswith('.dxf') and sys.exists(dxfFile): - analyzeDXF(dxfFile) - else: - Draw.PupMenu('DXF importer: Alert!%t|no valid DXF-file selected!') - print "DXF importer: error, no valid DXF-file selected! try again" - Draw.Redraw() - elif (evt==EVENT_HELP): - try: - import webbrowser - webbrowser.open('http://wiki.blender.org/index.php?title=Scripts/Manual/Import/DXF-3D') - except: - Draw.PupMenu('DXF importer: HELP Alert!%t|no connection to manual-page on Blender-Wiki! try:|\ -http://wiki.blender.org/index.php?title=Scripts/Manual/Import/DXF-3D') - Draw.Redraw() - elif (evt==EVENT_LOAD_INI): - loadConfig() - Draw.Redraw() - elif (evt==EVENT_SAVE_INI): - saveConfig() - Draw.Redraw() - elif (evt==EVENT_DXF_DIR): - dxfFile = dxfFileName.val - dxfFileExt = '*'+dxfFile.lower()[-4:] #can be .dxf or .dwg - dxfPathName = '' - if '/' in dxfFile: - dxfPathName = '/'.join(dxfFile.split('/')[:-1]) + '/' - elif '\\' in dxfFile: - dxfPathName = '\\'.join(dxfFile.split('\\')[:-1]) + '\\' - dxfFileName.val = dxfPathName + dxfFileExt -# dirname == sys.dirname(Blender.Get('filename')) -# update_RegistryKey('DirName', dirname) -# update_RegistryKey('dxfFileName', dxfFileName.val) - GUI_A['newScene_on'].val = 1 - Draw.Redraw() - elif (evt==EVENT_CHOOSE_DXF): - filename = '' # '*.dxf' - if dxfFileName.val: filename = dxfFileName.val - Window.FileSelector(dxf_callback, "DXF/DWG-file Selection", filename) - elif (evt==EVENT_START): - dxfFile = dxfFileName.val - #print 'deb: dxfFile file: ', dxfFile #---------------------- - if E_M: dxfFileName.val, dxfFile = e_mode(dxfFile) #evaluation mode - update_RegistryKey('dxfFileName', dxfFileName.val) - if dxfFile.lower().endswith('*.dxf'): - if Draw.PupMenu('DXF importer will import all DXF-files from:|%s|OK?' % dxfFile) != -1: - UI_MODE = False - multi_import(dxfFile) - UI_MODE = True - Draw.Redraw() - - elif dxfFile.lower().endswith('*.dwg'): - if not extCONV_OK: Draw.PupMenu(extCONV_TEXT) - elif Draw.PupMenu('DWG importer will import all DWG-files from:|%s|OK?' % dxfFile) != -1: - #elif Draw.PupMenu('DWG importer will import all DWG-files from:|%s|Caution! overwrites existing DXF-files!| OK?' % dxfFile) != -1: - UI_MODE = False - multi_import(dxfFile) - UI_MODE = True - Draw.Redraw() - - elif sys.exists(dxfFile) and dxfFile.lower()[-4:] in ('.dxf','.dwg'): - if dxfFile.lower().endswith('.dwg') and (not extCONV_OK): - Draw.PupMenu(extCONV_TEXT) - else: - #print '\nStandard Mode: active' - if GUI_A['newScene_on'].val: - _dxf_file = dxfFile.split('/')[-1].split('\\')[-1] - _dxf_file = _dxf_file[:-4] # cut last char:'.dxf' - _dxf_file = _dxf_file[:MAX_NAMELENGTH] #? [-MAX_NAMELENGTH:]) - global SCENE - SCENE = Blender.Scene.New(_dxf_file) - SCENE.makeCurrent() - Blender.Redraw() - #or so? Blender.Scene.makeCurrent(_dxf_file) - #sce = bpy.data.scenes.new(_dxf_file) - #bpy.data.scenes.active = sce - else: - SCENE = Blender.Scene.GetCurrent() - SCENE.objects.selected = [] # deselect all - main(dxfFile) - #SCENE.objects.selected = SCENE.objects - #Window.RedrawAll() - #Blender.Redraw() - #Draw.Redraw() - else: - Draw.PupMenu('DXF importer: nothing imported!%t|no valid DXF-file selected!') - print "DXF importer: nothing imported, no valid DXF-file selected! try again" - Draw.Redraw() - - - - -def multi_import(DIR): - """Imports all DXF-files from directory DIR. - - """ - global SCENE - batchTIME = sys.time() - #if #DIR == "": DIR = os.path.curdir - if DIR == "": - DIR = sys.dirname(Blender.Get('filename')) - EXT = '.dxf' - else: - EXT = DIR[-4:] # get last 4 characters '.dxf' - DIR = DIR[:-5] # cut last 5 characters '*.dxf' - print 'importing multiple %s files from %s' %(EXT,DIR) - files = \ - [sys.join(DIR, f) for f in os.listdir(DIR) if f.lower().endswith(EXT)] - if not files: - print '...None %s-files found. Abort!' %EXT - return - - i = 0 - for dxfFile in files: - i += 1 - print '\n%s-file' %EXT, i, 'of', len(files) #,'\nImporting', dxfFile - if GUI_A['newScene_on'].val: - _dxf_file = dxfFile.split('/')[-1].split('\\')[-1] - _dxf_file = _dxf_file[:-4] # cut last char:'.dxf' - _dxf_file = _dxf_file[:MAX_NAMELENGTH] #? [-MAX_NAMELENGTH:]) - SCENE = Blender.Scene.New(_dxf_file) - SCENE.makeCurrent() - #or so? Blender.Scene.makeCurrent(_dxf_file) - #sce = bpy.data.scenes.new(_dxf_file) - #bpy.data.scenes.active = sce - else: - SCENE = Blender.Scene.GetCurrent() - SCENE.objects.selected = [] # deselect all - main(dxfFile) - #Blender.Redraw() - - print 'TOTAL TIME: %.6f' % (sys.time() - batchTIME) - print '\a\r', # beep when done - Draw.PupMenu('DXF importer: Done!|finished in %.4f sec.' % (sys.time() - batchTIME)) - - -if __name__ == "__main__": - #Draw.PupMenu('DXF importer: Abort%t|This script version works for Blender up 2.49 only!') - UI_MODE = True - # recall last used DXF-file and INI-file names - dxffilename = check_RegistryKey('dxfFileName') - #print 'deb:start dxffilename:', dxffilename #---------------- - if dxffilename: dxfFileName.val = dxffilename - else: - dirname = sys.dirname(Blender.Get('filename')) - #print 'deb:start dirname:', dirname #---------------- - dxfFileName.val = sys.join(dirname, '') - inifilename = check_RegistryKey('iniFileName') - if inifilename: iniFileName.val = inifilename - - Draw.Register(draw_UI, event, bevent) - - -""" -if 1: - # DEBUG ONLY - UI_MODE = False - TIME= sys.time() - #DIR = '/dxf_r12_testfiles/' - DIR = '/metavr/' - import os - print 'Searching for files' - os.system('find %s -iname "*.dxf" > /tmp/tempdxf_list' % DIR) - # os.system('find /storage/ -iname "*.dxf" > /tmp/tempdxf_list') - print '...Done' - file= open('/tmp/tempdxf_list', 'r') - lines= file.readlines() - file.close() - # sort by filesize for faster testing - lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines] - lines_size.sort() - lines = [f[1] for f in lines_size] - - for i, _dxf in enumerate(lines): - if i >= 70: - #if 1: - print 'Importing', _dxf, '\nNUMBER', i, 'of', len(lines) - if True: - _dxf_file= _dxf.split('/')[-1].split('\\')[-1] - _dxf_file = _dxf_file[:-4] # cut last char:'.dxf' - _dxf_file = _dxf_file[:MAX_NAMELENGTH] #? [-MAX_NAMELENGTH:]) - sce = bpy.data.scenes.new(_dxf_file) - bpy.data.scenes.active = sce - dxfFileName.val = _dxf - main(_dxf) - - print 'TOTAL TIME: %.6f' % (sys.time() - TIME) -""" \ No newline at end of file diff --git a/release/scripts/import_edl.py b/release/scripts/import_edl.py deleted file mode 100644 index 8c5d041b34c..00000000000 --- a/release/scripts/import_edl.py +++ /dev/null @@ -1,961 +0,0 @@ -#!BPY - -""" -Name: 'Video Sequence (.edl)...' -Blender: 248 -Group: 'Import' -Tooltip: 'Load a CMX formatted EDL into the sequencer' -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2009: Campbell Barton, ideasman42@gmail.com -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# -------------------------------------------------------------------------- - -class TimeCode(object): - ''' - Simple timecode class - also supports conversion from other time strings used by EDL - ''' - def __init__(self, data, fps): - self.fps= fps - if type(data)==str: - self.fromString(data) - frame = self.asFrame() - self.fromFrame(frame) - else: - self.fromFrame(data) - - def fromString(self, text): - # hh:mm:ss:ff - # No dropframe support yet - - if text.lower().endswith('mps'): # 5.2mps - return self.fromFrame( int( float(text[:-3]) * self.fps ) ) - elif text.lower().endswith('s'): # 5.2s - return self.fromFrame( int( float(text[:-1]) * self.fps ) ) - elif text.isdigit(): # 1234 - return self.fromFrame( int(text) ) - elif ':' in text: # hh:mm:ss:ff - text= text.replace(';', ':').replace(',', ':').replace('.', ':') - text= text.split(':') - - self.hours= int(text[0]) - self.minutes= int(text[1]) - self.seconds= int(text[2]) - self.frame= int(text[3]) - return self - else: - print 'ERROR: could not convert this into timecode "%s"' % test - return self - - - def fromFrame(self, frame): - - if frame < 0: - frame = -frame; - neg=True - else: - neg=False - - fpm = 60 * self.fps - fph = 60 * fpm - - if frame < fph: - self.hours= 0 - else: - self.hours= int(frame/fph) - frame = frame % fph - - if frame < fpm: - self.minutes= 0 - else: - self.minutes= int(frame/fpm) - frame = frame % fpm - - if frame < self.fps: - self.seconds= 0 - else: - self.seconds= int(frame/self.fps) - frame = frame % self.fps - - self.frame= frame - - if neg: - self.frame = -self.frame - self.seconds = -self.seconds - self.minutes = -self.minutes - self.hours = -self.hours - - return self - - def asFrame(self): - abs_frame= self.frame - abs_frame += self.seconds * self.fps - abs_frame += self.minutes * 60 * self.fps - abs_frame += self.hours * 60 * 60 * self.fps - - return abs_frame - - def asString(self): - self.fromFrame(int(self)) - return '%.2d:%.2d:%.2d:%.2d' % (self.hours, self.minutes, self.seconds, self.frame) - - def __repr__(self): - return self.asString() - - # Numeric stuff, may as well have this - def __neg__(self): return TimeCode(-int(self), self.fps) - def __int__(self): return self.asFrame() - def __sub__(self, other): return TimeCode(int(self)-int(other), self.fps) - def __add__(self, other): return TimeCode(int(self)+int(other), self.fps) - def __mul__(self, other): return TimeCode(int(self)*int(other), self.fps) - def __div__(self, other): return TimeCode(int(self)/int(other), self.fps) - def __abs__(self): return TimeCode(abs(int(self)), self.fps) - def __iadd__(self, other): return self.fromFrame(int(self)+int(other)) - def __imul__(self, other): return self.fromFrame(int(self)*int(other)) - def __idiv__(self, other): return self.fromFrame(int(self)/int(other)) -# end timecode - - -'''Comments -Comments can appear at the beginning of the EDL file (header) or between the edit lines in the EDL. The first block of comments in the file is defined to be the header comments and they are associated with the EDL as a whole. Subsequent comments in the EDL file are associated with the first edit line that appears after them. -Edit Entries - [num] [duration] [srcIn] [srcOut] [recIn] [recOut] - - * : Filename or tag value. Filename can be for an MPEG file, Image file, or Image file template. Image file templates use the same pattern matching as for command line glob, and can be used to specify images to encode into MPEG. i.e. /usr/data/images/image*.jpg - * : 'V' | 'A' | 'VA' | 'B' | 'v' | 'a' | 'va' | 'b' which equals Video, Audio, Video_Audio edits (note B or b can be used in place of VA or va). - * : 'C' | 'D' | 'E' | 'FI' | 'FO' | 'W' | 'c' | 'd' | 'e' | 'fi' | 'fo' | 'w'. which equals Cut, Dissolve, Effect, FadeIn, FadeOut, Wipe. - * [num]: if TransitionType = Wipe, then a wipe number must be given. At the moment only wipe 'W0' and 'W1' are supported. - * [duration]: if the TransitionType is not equal to Cut, then an effect duration must be given. Duration is in frames. - * [srcIn]: Src in. If no srcIn is given, then it defaults to the first frame of the video or the first frame in the image pattern. If srcIn isn't specified, then srcOut, recIn, recOut can't be specified. - * [srcOut]: Src out. If no srcOut is given, then it defaults to the last frame of the video - or last image in the image pattern. if srcOut isn't given, then recIn and recOut can't be specified. - * [recIn]: Rec in. If no recIn is given, then it is calculated based on its position in the EDL and the length of its input. - [recOut]: Rec out. If no recOut is given, then it is calculated based on its position in the EDL and the length of its input. first frame of the video. - -For srcIn, srcOut, recIn, recOut, the values can be specified as either timecode, frame number, seconds, or mps seconds. i.e. -[tcode | fnum | sec | mps], where: - - * tcode : SMPTE timecode in hh:mm:ss:ff - * fnum : frame number (the first decodable frame in the video is taken to be frame 0). - * sec : seconds with 's' suffix (e.g. 5.2s) - * mps : seconds with 'mps' suffix (e.g. 5.2mps). This corresponds to the 'seconds' value displayed by Windows MediaPlayer. - -More notes, -Key - -''' - -enum= 0 -TRANSITION_UNKNOWN= enum -TRANSITION_CUT= enum; enum+=1 -TRANSITION_DISSOLVE= enum; enum+=1 -TRANSITION_EFFECT= enum; enum+=1 -TRANSITION_FADEIN= enum; enum+=1 -TRANSITION_FADEOUT= enum; enum+=1 -TRANSITION_WIPE= enum; enum+=1 -TRANSITION_KEY= enum; enum+=1 - -TRANSITION_DICT={ \ - 'c':TRANSITION_CUT, - 'd':TRANSITION_DISSOLVE, - 'e':TRANSITION_EFFECT, - 'fi':TRANSITION_FADEIN, - 'fo':TRANSITION_FADEOUT, - 'w':TRANSITION_WIPE, - 'k':TRANSITION_KEY, - } - -enum= 0 -EDIT_UNKNOWN= 1<= 1.0: - mov.endStill = int(mov.length * (scale - 1.0)) - else: - speed.speedEffectGlobalSpeed = 1.0/scale - meta.endOffset = mov.length - int(mov.length*scale) - - speed.update() - meta.update() - return meta - -def apply_dissolve_ipo(mov, blendin): - len_disp = float(mov.endDisp - mov.startDisp) - - if len_disp <= 0.0: - print 'Error, strip is zero length' - return - - mov.ipo= ipo= bpy.data.ipos.new("fade", "Sequence") - icu= ipo.addCurve('Fac') - - icu.interpolation= Blender.IpoCurve.InterpTypes.LINEAR - icu.append((0, 0)) - icu.append(((int(blendin)/len_disp) * 100, 1)) - - if mov.type not in (SEQ_HD_SOUND, SEQ_RAM_SOUND): - mov.blendMode = Blender.Scene.Sequence.BlendModes.ALPHAOVER - - -def replace_ext(path, ext): - return path[:path.rfind('.')+1] + ext - -def load_edl(filename, reel_files, reel_offsets): - ''' - reel_files - key:reel <--> reel:filename - ''' - - # For test file - # frame_offset = -769 - - - sce= bpy.data.scenes.active - fps= sce.render.fps - - elist= EditList() - if not elist.parse(filename, fps): - return 'Unable to parse "%s"' % filename - # elist.clean() - - - seq= sce.sequence - - track= 0 - - edits = elist.edits[:] - # edits.sort(key = lambda edit: int(edit.recIn)) - - prev_edit = None - for edit in edits: - print edit - frame_offset = reel_offsets[edit.reel] - - - src_start= int(edit.srcIn) + frame_offset - src_end= int(edit.srcOut) + frame_offset - src_length= src_end-src_start - - rec_start= int(edit.recIn) + 1 - rec_end= int(edit.recOut) + 1 - rec_length= rec_end-rec_start - - # print src_length, rec_length, src_start - - if edit.m2 != None: scale = fps/float(edit.m2.fps) - else: scale = 1.0 - - unedited_start= rec_start - src_start - offset_start = src_start - int(src_start*scale) # works for scaling up AND down - - if edit.transition_type == TRANSITION_CUT and (not elist.testOverlap(edit)): - track = 1 - - strip= None - final_strips = [] - if edit.reel.lower()=='bw': - strip= seq.new((0,0,0), rec_start, track+1) - strip.length= rec_length # for color its simple - final_strips.append(strip) - else: - - path_full = reel_files[edit.reel] - path_fileonly= path_full.split('/')[-1].split('\\')[-1] # os.path.basename(full) - path_dironly= path_full[:-len(path_fileonly)] # os.path.dirname(full) - - if edit.edit_type & EDIT_VIDEO: #and edit.transition_type == TRANSITION_CUT: - - try: - strip= seq.new((path_fileonly, path_dironly, path_full, 'movie'), unedited_start + offset_start, track+1) - except: - return 'Invalid input for movie' - - # Apply scaled rec in bounds - if scale != 1.0: - meta = scale_meta_speed(seq, strip, scale) - final_strip = meta - else: - final_strip = strip - - - final_strip.update() - final_strip.startOffset= rec_start - final_strip.startDisp - final_strip.endOffset= rec_end- final_strip.endDisp - final_strip.update() - final_strip.endOffset += (final_strip.endDisp - rec_end) - final_strip.update() - - - if edit.transition_duration: - if not prev_edit: - print "Error no previous strip" - else: - new_end = rec_start + int(edit.transition_duration) - for other in prev_edit.custom_data: - if other.type != SEQ_HD_SOUND and other.type != SEQ_RAM_SOUND: - other.endOffset += (other.endDisp - new_end) - other.update() - - # Apply disolve - if edit.transition_type == TRANSITION_DISSOLVE: - apply_dissolve_ipo(final_strip, edit.transition_duration) - - if edit.transition_type == TRANSITION_WIPE: - other_track = track + 2 - for other in prev_edit.custom_data: - if other.type != SEQ_HD_SOUND and other.type != SEQ_RAM_SOUND: - - strip_wipe= seq.new((SEQ_WIPE, other, final_strip), 1, other_track) - - if edit.wipe_type == WIPE_0: - strip_wipe.wipeEffectAngle = 90 - else: - strip_wipe.wipeEffectAngle = -90 - - other_track += 1 - - - - # strip.endOffset= strip.length - int(edit.srcOut) - #end_offset= (unedited_start+strip.length) - end - # print start, end, end_offset - #strip.endOffset = end_offset - - # break - # print strip - - final_strips.append(final_strip) - - - if edit.edit_type & (EDIT_AUDIO | EDIT_AUDIO_STEREO | EDIT_VIDEO_AUDIO): - - if scale == 1.0: # TODO - scaled audio - - try: - strip= seq.new((path_fileonly, path_dironly, path_full, 'audio_hd'), unedited_start + offset_start, track+6) - except: - - # See if there is a wave file there - path_full_wav = replace_ext(path_full, 'wav') - path_fileonly_wav = replace_ext(path_fileonly, 'wav') - - #try: - strip= seq.new((path_fileonly_wav, path_dironly, path_full_wav, 'audio_hd'), unedited_start + offset_start, track+6) - #except: - # return 'Invalid input for audio' - - final_strip = strip - - # Copied from above - final_strip.update() - final_strip.startOffset= rec_start - final_strip.startDisp - final_strip.endOffset= rec_end- final_strip.endDisp - final_strip.update() - final_strip.endOffset += (final_strip.endDisp - rec_end) - final_strip.update() - - if edit.transition_type == TRANSITION_DISSOLVE: - apply_dissolve_ipo(final_strip, edit.transition_duration) - - final_strips.append(final_strip) - - # strip= seq.new((0.6, 0.6, 0.6), start, track+1) - - if final_strips: - for strip in final_strips: - # strip.length= length - final_strip.name = edit.asName() - edit.custom_data[:]= final_strips - # track = not track - prev_edit = edit - track += 1 - - #break - - - def recursive_update(s): - s.update(1) - for s_kid in s: - recursive_update(s_kid) - - - for s in seq: - recursive_update(s) - - return '' - - - -#load_edl('/fe/edl/EP30CMXtrk1.edl') # /tmp/test.edl -#load_edl('/fe/edl/EP30CMXtrk2.edl') # /tmp/test.edl -#load_edl('/fe/edl/EP30CMXtrk3.edl') # /tmp/test.edl -#load_edl('/root/vid/rush/blender_edl.edl', ['/root/vid/rush/rushes3.avi',]) # /tmp/test.edl - - - - -# ---------------------- Blender UI part -from Blender import Draw, Window -import BPyWindow - -if 0: - DEFAULT_FILE_EDL = '/root/vid/rush/blender_edl.edl' - DEFAULT_FILE_MEDIA = '/root/vid/rush/rushes3_wav.avi' - DEFAULT_FRAME_OFFSET = -769 -else: - DEFAULT_FILE_EDL = '' - DEFAULT_FILE_MEDIA = '' - DEFAULT_FRAME_OFFSET = 0 - -B_EVENT_IMPORT = 1 -B_EVENT_RELOAD = 2 -B_EVENT_FILESEL_EDL = 3 -B_EVENT_NOP = 4 - -B_EVENT_FILESEL = 100 # or greater - -class ReelItemUI(object): - __slots__ = 'filename_but', 'offset_but', 'ui_text' - def __init__(self): - self.filename_but = Draw.Create(DEFAULT_FILE_MEDIA) - self.offset_but = Draw.Create(DEFAULT_FRAME_OFFSET) - self.ui_text = '' - - - -REEL_UI = {} # reel:ui_string - - -#REEL_FILENAMES = {} # reel:filename -#REEL_OFFSETS = {} # reel:filename - -PREF = {} - -PREF['filename'] = Draw.Create(DEFAULT_FILE_EDL) -PREF['reel_act'] = '' - -def edl_reload(): - Window.WaitCursor(1) - filename = PREF['filename'].val - sce= bpy.data.scenes.active - fps= sce.render.fps - - elist= EditList() - - if filename: - if not elist.parse(filename, fps): - Draw.PupMenu('Error%t|Could not open the file "' + filename + '"') - reels = elist.getReels() - else: - reels = {} - - REEL_UI.clear() - for reel_key, edits in reels.iteritems(): - - if reel_key == 'bw': - continue - - flag = 0 - for edit in edits: - flag |= edit.edit_type - - reel_item = REEL_UI[reel_key] = ReelItemUI() - - reel_item.ui_text = '%s (%s): ' % (reel_key, editFlagsToText(flag)) - - Window.WaitCursor(0) - -def edl_set_path(filename): - PREF['filename'].val = filename - edl_reload() - Draw.Redraw() - -def edl_set_path_reel(filename): - REEL_UI[PREF['reel_act']].filename_but.val = filename - Draw.Redraw() - -def edl_reel_keys(): - reel_keys = REEL_UI.keys() - - if 'bw' in reel_keys: - reel_keys.remove('bw') - - reel_keys.sort() - return reel_keys - -def edl_draw(): - - MARGIN = 4 - rect = BPyWindow.spaceRect() - but_width = int((rect[2]-MARGIN*2)/4.0) # 72 - # Clamp - if but_width>100: but_width = 100 - but_height = 17 - - x=MARGIN - y=rect[3]-but_height-MARGIN - xtmp = x - - - - # ---------- ---------- ---------- ---------- - Blender.Draw.BeginAlign() - PREF['filename'] = Draw.String('edl path: ', B_EVENT_RELOAD, xtmp, y, (but_width*3)-20, but_height, PREF['filename'].val, 256, 'EDL Path'); xtmp += (but_width*3)-20; - Draw.PushButton('..', B_EVENT_FILESEL_EDL, xtmp, y, 20, but_height, 'Select an EDL file'); xtmp += 20; - Blender.Draw.EndAlign() - - Draw.PushButton('Reload', B_EVENT_RELOAD, xtmp + MARGIN, y, but_width - MARGIN, but_height, 'Read the ID Property settings from the active curve object'); xtmp += but_width; - y-=but_height + MARGIN - xtmp = x - # ---------- ---------- ---------- ---------- - - reel_keys = edl_reel_keys() - - - - if reel_keys: text = 'Reel file list...' - elif PREF['filename'].val == '': text = 'No EDL loaded.' - else: text = 'No reels found!' - - Draw.Label(text, xtmp + MARGIN, y, but_width*4, but_height); xtmp += but_width*4; - - y-=but_height + MARGIN - xtmp = x - - # ---------- ---------- ---------- ---------- - - - for i, reel_key in enumerate(reel_keys): - reel_item = REEL_UI[reel_key] - - Blender.Draw.BeginAlign() - REEL_UI[reel_key].filename_but = Draw.String(reel_item.ui_text, B_EVENT_NOP, xtmp, y, (but_width*3)-20, but_height, REEL_UI[reel_key].filename_but.val, 256, 'Select the reel path'); xtmp += (but_width*3)-20; - Draw.PushButton('..', B_EVENT_FILESEL + i, xtmp, y, 20, but_height, 'Media path to use for this reel'); xtmp += 20; - Blender.Draw.EndAlign() - - reel_item.offset_but= Draw.Number('ofs:', B_EVENT_NOP, xtmp + MARGIN, y, but_width - MARGIN, but_height, reel_item.offset_but.val, -100000, 100000, 'Start offset in frames when applying timecode'); xtmp += but_width - MARGIN; - - y-=but_height + MARGIN - xtmp = x - - # ---------- ---------- ---------- ---------- - - Draw.PushButton('Import CMX-EDL Sequencer Strips', B_EVENT_IMPORT, xtmp + MARGIN, MARGIN, but_width*4 - MARGIN, but_height, 'Load the EDL file into the sequencer'); xtmp += but_width*4; - y-=but_height + MARGIN - xtmp = x - - -def edl_event(evt, val): - pass - -def edl_bevent(evt): - - if evt == B_EVENT_NOP: - pass - elif evt == B_EVENT_IMPORT: - ''' - Load the file into blender with UI settings - ''' - filename = PREF['filename'].val - - reel_files = {} - reel_offsets = {} - - for reel_key, reel_item in REEL_UI.iteritems(): - reel_files[reel_key] = reel_item.filename_but.val - reel_offsets[reel_key] = reel_item.offset_but.val - - error = load_edl(filename, reel_files, reel_offsets) - if error != '': - Draw.PupMenu('Error%t|' + error) - else: - Window.RedrawAll() - - elif evt == B_EVENT_RELOAD: - edl_reload() - Draw.Redraw() - - elif evt == B_EVENT_FILESEL_EDL: - filename = PREF['filename'].val - if not filename: filename = Blender.sys.join(Blender.sys.expandpath('//'), '*.edl') - - Window.FileSelector(edl_set_path, 'Select EDL', filename) - - elif evt >= B_EVENT_FILESEL: - reel_keys = edl_reel_keys() - reel_key = reel_keys[evt - B_EVENT_FILESEL] - - filename = REEL_UI[reel_key].filename_but.val - if not filename: filename = Blender.sys.expandpath('//') - - PREF['reel_act'] = reel_key # so file set path knows which one to set - Window.FileSelector(edl_set_path_reel, 'Reel Media', filename) - - - -if __name__ == '__main__': - Draw.Register(edl_draw, edl_event, edl_bevent) - edl_reload() - diff --git a/release/scripts/import_lightwave_motion.py b/release/scripts/import_lightwave_motion.py deleted file mode 100644 index 20c87dfd5c6..00000000000 --- a/release/scripts/import_lightwave_motion.py +++ /dev/null @@ -1,244 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: <- these words are ignored -Name: 'Lightwave Motion (.mot)...' -Blender: 245 -Group: 'Import' -Tip: 'Import Loc Rot Size chanels from a Lightwave .mot file' -""" - -__author__ = "Daniel Salazar (ZanQdo)" -__url__ = ("blender", "blenderartists.org", -"e-mail: zanqdo@gmail.com") -__version__ = "16/04/08" - -__bpydoc__ = """\ -This script loads Lightwave motion files (.mot) -into the selected objects - -Usage: -Run the script with one or more objects selected (any kind) -Be sure to set the framerate correctly - -""" - -# $Id$ -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2003, 2004: A Vanpoucke -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import math as M -import Blender as B -import bpy - - -def FuncionPrincipal (Dir): - B.Window.WaitCursor(1) - ObjSelect = B.Object.GetSelected() - - if not ObjSelect: - B.Draw.PupMenu('Select one or more objects, aborting.') - return - - - SC = B.Scene.GetCurrent() - SCR = SC.getRenderingContext() - FrameRate = float(SCR.framesPerSec()) - - - # Creating new IPO - - IPO = B.Ipo.New('Object', 'LW_Motion') - - - # Creating Curves in the IPO - - LocX = IPO.addCurve("LocX") - LocX.setInterpolation("Bezier") - - LocY = IPO.addCurve("LocY") - LocX.setInterpolation("Bezier") - - LocZ = IPO.addCurve("LocZ") - LocX.setInterpolation("Bezier") - - RotX = IPO.addCurve("RotX") - LocX.setInterpolation("Bezier") - - RotY = IPO.addCurve("RotY") - LocX.setInterpolation("Bezier") - - RotZ = IPO.addCurve("RotZ") - LocX.setInterpolation("Bezier") - - ScaleX = IPO.addCurve("ScaleX") - LocX.setInterpolation("Bezier") - - ScaleY = IPO.addCurve("ScaleY") - LocX.setInterpolation("Bezier") - - ScaleZ = IPO.addCurve("ScaleZ") - LocX.setInterpolation("Bezier") - - - # Opening the mot file - - File = open (Dir, 'rU') - - - # Init flags - - CurChannel = -1 - ScaleFlag = 0 - - # Main file reading cycle - - for Line in File: - - ''' - # Number of channels in the file - - if "NumChannels" in Line: - Line = Line.split (' ') - NumChannels = int(Line[1]) - ''' - - # Current Channel Flag - - if "Channel 0" in Line: - CurChannel = 0 - - elif "Channel 1" in Line: - CurChannel = 1 - - elif "Channel 2" in Line: - CurChannel = 2 - - elif "Channel 3" in Line: - CurChannel = 3 - - elif "Channel 4" in Line: - CurChannel = 4 - - elif "Channel 5" in Line: - CurChannel = 5 - - elif "Channel 6" in Line: - CurChannel = 6 - - elif "Channel 7" in Line: - CurChannel = 7 - - elif "Channel 8" in Line: - CurChannel = 8 - - - # Getting the data and writing to IPOs - - if CurChannel == 0: - if "Key" in Line: - Line = Line.split (' ') - ValCh_0 = float (Line [3]) - TimeCh_0 = float (Line [4]) * FrameRate - LocX.addBezier ((TimeCh_0, ValCh_0)) - - if CurChannel == 1: - if "Key" in Line: - Line = Line.split (' ') - ValCh_1 = float (Line [3]) - TimeCh_1 = float (Line [4]) * FrameRate - LocZ.addBezier ((TimeCh_1, ValCh_1)) - - if CurChannel == 2: - if "Key" in Line: - Line = Line.split (' ') - ValCh_2 = float (Line [3]) - TimeCh_2 = float (Line [4]) * FrameRate - LocY.addBezier ((TimeCh_2, ValCh_2)) - - if CurChannel == 3: - if "Key" in Line: - Line = Line.split (' ') - ValCh_3 = M.degrees ( - float (Line [3]) ) / 10 - TimeCh_3 = float (Line [4]) * FrameRate - RotZ.addBezier ((TimeCh_3, ValCh_3)) - - if CurChannel == 4: - if "Key" in Line: - Line = Line.split (' ') - ValCh_4 = M.degrees ( - float (Line [3]) ) / 10 - TimeCh_4 = float (Line [4]) * FrameRate - RotX.addBezier ((TimeCh_4, ValCh_4)) - - if CurChannel == 5: - if "Key" in Line: - Line = Line.split (' ') - ValCh_5 = M.degrees ( - float (Line [3]) ) / 10 - TimeCh_5 = float (Line [4]) * FrameRate - RotY.addBezier ((TimeCh_5, ValCh_5)) - - if CurChannel == 6: - if "Key" in Line: - Line = Line.split (' ') - ValCh_6 = float (Line [3]) - TimeCh_6 = float (Line [4]) * FrameRate - ScaleX.addBezier ((TimeCh_6, ValCh_6)) - elif ScaleFlag < 3: - ScaleFlag += 1 - ScaleX.addBezier ((0, 1)) - - if CurChannel == 7: - if "Key" in Line: - Line = Line.split (' ') - ValCh_7 = float (Line [3]) - TimeCh_7 = float (Line [4]) * FrameRate - ScaleZ.addBezier ((TimeCh_7, ValCh_7)) - elif ScaleFlag < 3: - ScaleFlag += 1 - ScaleZ.addBezier ((0, 1)) - - if CurChannel == 8: - if "Key" in Line: - Line = Line.split (' ') - ValCh_8 = float (Line [3]) - TimeCh_8 = float (Line [4]) * FrameRate - ScaleY.addBezier ((TimeCh_8, ValCh_8)) - elif ScaleFlag < 3: - ScaleFlag += 1 - ScaleY.addBezier ((0, 1)) - - - # Link the IPO to all selected objects - - for ob in ObjSelect: - ob.setIpo(IPO) - - File.close() - - print '\nDone, the following motion file has been loaded:\n\n%s' % Dir - B.Window.WaitCursor(0) - -def main(): - B.Window.FileSelector(FuncionPrincipal, "Load IPO from .mot File", B.sys.makename(ext='.mot')) - -if __name__=='__main__': - main() - diff --git a/release/scripts/import_mdd.py b/release/scripts/import_mdd.py deleted file mode 100644 index 1ee196ab67f..00000000000 --- a/release/scripts/import_mdd.py +++ /dev/null @@ -1,158 +0,0 @@ -#!BPY - -""" - Name: 'Load MDD to Mesh RVKs' - Blender: 242 - Group: 'Import' - Tooltip: 'baked vertex animation to active mesh object.' -""" -__author__ = "Bill L.Nieuwendorp" -__bpydoc__ = """\ -This script Imports Lightwaves MotionDesigner format. - -The .mdd format has become quite a popular Pipeline format
-for moving animations from package to package. -""" -# mdd importer -# -# Warning if the vertex order or vertex count differs from the -# origonal model the mdd was Baked out from their will be Strange -# behavior -# -# -#vertex animation to ShapeKeys with ipo and gives the frame a value of 1.0 -#A modifier to read mdd files would be Ideal but thats for another day :) -# -#Please send any fixes,updates,bugs to Slow67_at_Gmail.com -#Bill Niewuendorp - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - - - - -try: - from struct import unpack -except: - unpack = None - -import Blender -from Blender import Mesh, Object, Scene -import BPyMessages - -def mdd_import(filepath, ob, PREF_IPONAME, PREF_START_FRAME, PREF_JUMP): - - print '\n\nimporting mdd "%s"' % filepath - - Blender.Window.DrawProgressBar (0.0, "Importing mdd ...") - Blender.Window.EditMode(0) - Blender.Window.WaitCursor(1) - - file = open(filepath, 'rb') - frames, points = unpack(">2i", file.read(8)) - time = unpack((">%df" % frames), file.read(frames * 4)) - - print '\tpoints:%d frames:%d' % (points,frames) - - scn = Scene.GetCurrent() - ctx = scn.getRenderingContext() - Blender.Set("curframe", PREF_START_FRAME) - me = ob.getData(mesh=1) - - def UpdateMesh(me,fr): - for v in me.verts: - # 12 is the size of 3 floats - x,y,z= unpack('>3f', file.read(12)) - v.co[:] = x,z,y - me.update() - - Blender.Window.DrawProgressBar (0.4, "4 Importing mdd ...") - - - curfr = ctx.currentFrame() - print'\twriting mdd data...' - for i in xrange(frames): - Blender.Set("curframe", i+PREF_START_FRAME) - if len(me.verts) > 1 and (curfr >= PREF_START_FRAME) and (curfr <= PREF_START_FRAME+frames): - UpdateMesh(me, i) - ob.insertShapeKey() - - Blender.Window.DrawProgressBar (0.5, "5 Importing mdd ...") - - key= me.key - - # Add the key of its not there - if not key: - me.insertKey(1, 'relative') - key= me.key - - key.ipo = Blender.Ipo.New('Key', PREF_IPONAME) - ipo = key.ipo - # block = key.getBlocks() # not used. - all_keys = ipo.curveConsts - - for i in xrange(PREF_JUMP+1, len(all_keys), PREF_JUMP): - curve = ipo.getCurve(i) - if curve == None: - curve = ipo.addCurve(all_keys[i]) - - curve.append((PREF_START_FRAME+i-1,1)) - curve.append((PREF_START_FRAME+i- PREF_JUMP -1,0)) - curve.append((PREF_START_FRAME+i+ PREF_JUMP-1,0)) - curve.setInterpolation('Linear') - curve.recalc() - - print 'done' - Blender.Window.WaitCursor(0) - Blender.Window.DrawProgressBar (1.0, '') - - -def mdd_import_ui(filepath): - - if BPyMessages.Error_NoFile(filepath): - return - - scn= Scene.GetCurrent() - ob_act= scn.objects.active - - if ob_act == None or ob_act.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - PREF_IPONAME = Blender.Draw.Create(filepath.split('/')[-1].split('\\')[-1].split('.')[0]) - PREF_START_FRAME = Blender.Draw.Create(1) - PREF_JUMP = Blender.Draw.Create(1) - - block = [\ - ("Ipo Name: ", PREF_IPONAME, 0, 30, "Ipo name for the new shape key"),\ - ("Start Frame: ", PREF_START_FRAME, 1, 3000, "Start frame for the animation"),\ - ("Key Skip: ", PREF_JUMP, 1, 100, "KeyReduction, Skip every Nth Frame")\ - ] - - if not Blender.Draw.PupBlock("Import MDD", block): - return - orig_frame = Blender.Get('curframe') - mdd_import(filepath, ob_act, PREF_IPONAME.val, PREF_START_FRAME.val, PREF_JUMP.val) - Blender.Set('curframe', orig_frame) - -if __name__ == '__main__': - if not unpack: - Draw.PupMenu('Error%t|This script requires a full python install') - - Blender.Window.FileSelector(mdd_import_ui, 'IMPORT MDD', '*.mdd') diff --git a/release/scripts/import_obj.py b/release/scripts/import_obj.py deleted file mode 100644 index 81230bfcf03..00000000000 --- a/release/scripts/import_obj.py +++ /dev/null @@ -1,1234 +0,0 @@ -#!BPY - -""" -Name: 'Wavefront (.obj)...' -Blender: 249 -Group: 'Import' -Tooltip: 'Load a Wavefront OBJ File, Shift: batch import all dir.' -""" - -__author__= "Campbell Barton", "Jiri Hnidek", "Paolo Ciccone" -__url__= ['http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj', 'blender.org', 'blenderartists.org'] -__version__= "2.13" - -__bpydoc__= """\ -This script imports a Wavefront OBJ files to Blender. - -Usage: -Run this script from "File->Import" menu and then load the desired OBJ file. -Note, This loads mesh objects and materials only, nurbs and curves are not supported. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton 2007-2009 -# - V2.12- bspline import/export added (funded by PolyDimensions GmbH) -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from Blender import Mesh, Draw, Window, Texture, Material, sys -import bpy -import BPyMesh -import BPyImage -import BPyMessages - -try: import os -except: os= False - -# Generic path functions -def stripFile(path): - '''Return directory, where the file is''' - lastSlash= max(path.rfind('\\'), path.rfind('/')) - if lastSlash != -1: - path= path[:lastSlash] - return '%s%s' % (path, sys.sep) - -def stripPath(path): - '''Strips the slashes from the back of a string''' - return path.split('/')[-1].split('\\')[-1] - -def stripExt(name): # name is a string - '''Strips the prefix off the name before writing''' - index= name.rfind('.') - if index != -1: - return name[ : index ] - else: - return name -# end path funcs - - - -def line_value(line_split): - ''' - Returns 1 string represneting the value for this line - None will be returned if theres only 1 word - ''' - length= len(line_split) - if length == 1: - return None - - elif length == 2: - return line_split[1] - - elif length > 2: - return ' '.join( line_split[1:] ) - -def obj_image_load(imagepath, DIR, IMAGE_SEARCH): - ''' - Mainly uses comprehensiveImageLoad - but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores. - ''' - - if '_' in imagepath: - image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) - if image: return image - # Did the exporter rename the image? - image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) - if image: return image - - # Return an image, placeholder if it dosnt exist - image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= IMAGE_SEARCH) - return image - - -def create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH): - ''' - Create all the used materials in this obj, - assign colors and images to the materials from all referenced material libs - ''' - DIR= stripFile(filepath) - - #==================================================================================# - # This function sets textures defined in .mtl file # - #==================================================================================# - def load_material_image(blender_material, context_material_name, imagepath, type): - - texture= bpy.data.textures.new(type) - texture.setType('Image') - - # Absolute path - c:\.. etc would work here - image= obj_image_load(imagepath, DIR, IMAGE_SEARCH) - has_data = image.has_data - texture.image = image - - if not has_data: - try: - # first time using this image. We need to load it first - image.glLoad() - except: - # probably the image is crashed - pass - else: - has_data = image.has_data - - # Adds textures for materials (rendering) - if type == 'Kd': - if has_data and image.depth == 32: - # Image has alpha - blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA) - texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha') - blender_material.mode |= Material.Modes.ZTRANSP - blender_material.alpha = 0.0 - else: - blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) - - # adds textures to faces (Textured/Alt-Z mode) - # Only apply the diffuse texture to the face if the image has not been set with the inline usemat func. - unique_material_images[context_material_name]= image, has_data # set the texface image - - elif type == 'Ka': - blender_material.setTexture(1, texture, Texture.TexCo.UV, Texture.MapTo.CMIR) # TODO- Add AMB to BPY API - - elif type == 'Ks': - blender_material.setTexture(2, texture, Texture.TexCo.UV, Texture.MapTo.SPEC) - - elif type == 'Bump': - blender_material.setTexture(3, texture, Texture.TexCo.UV, Texture.MapTo.NOR) - elif type == 'D': - blender_material.setTexture(4, texture, Texture.TexCo.UV, Texture.MapTo.ALPHA) - blender_material.mode |= Material.Modes.ZTRANSP - blender_material.alpha = 0.0 - # Todo, unset deffuse material alpha if it has an alpha channel - - elif type == 'refl': - blender_material.setTexture(5, texture, Texture.TexCo.UV, Texture.MapTo.REF) - - - # Add an MTL with the same name as the obj if no MTLs are spesified. - temp_mtl= stripExt(stripPath(filepath))+ '.mtl' - - if sys.exists(DIR + temp_mtl) and temp_mtl not in material_libs: - material_libs.append( temp_mtl ) - del temp_mtl - - #Create new materials - for name in unique_materials: # .keys() - if name != None: - unique_materials[name]= bpy.data.materials.new(name) - unique_material_images[name]= None, False # assign None to all material images to start with, add to later. - - unique_materials[None]= None - unique_material_images[None]= None, False - - for libname in material_libs: - mtlpath= DIR + libname - if not sys.exists(mtlpath): - #print '\tError Missing MTL: "%s"' % mtlpath - pass - else: - #print '\t\tloading mtl: "%s"' % mtlpath - context_material= None - mtl= open(mtlpath, 'rU') - for line in mtl: #.xreadlines(): - if line.startswith('newmtl'): - context_material_name= line_value(line.split()) - if unique_materials.has_key(context_material_name): - context_material = unique_materials[ context_material_name ] - else: - context_material = None - - elif context_material: - # we need to make a material to assign properties to it. - line_split= line.split() - line_lower= line.lower().lstrip() - if line_lower.startswith('ka'): - context_material.setMirCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) - elif line_lower.startswith('kd'): - context_material.setRGBCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) - elif line_lower.startswith('ks'): - context_material.setSpecCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) - elif line_lower.startswith('ns'): - context_material.setHardness( int((float(line_split[1])*0.51)) ) - elif line_lower.startswith('ni'): # Refraction index - context_material.setIOR( max(1, min(float(line_split[1]), 3))) # Between 1 and 3 - elif line_lower.startswith('d') or line_lower.startswith('tr'): - context_material.setAlpha(float(line_split[1])) - context_material.mode |= Material.Modes.ZTRANSP - elif line_lower.startswith('map_ka'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Ka') - elif line_lower.startswith('map_ks'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Ks') - elif line_lower.startswith('map_kd'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Kd') - elif line_lower.startswith('map_bump'): - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'Bump') - elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'): # Alpha map - Dissolve - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'D') - - elif line_lower.startswith('refl'): # Reflectionmap - img_filepath= line_value(line.split()) - if img_filepath: - load_material_image(context_material, context_material_name, img_filepath, 'refl') - mtl.close() - - - - -def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS): - ''' - Takes vert_loc and faces, and seperates into multiple sets of - (verts_loc, faces, unique_materials, dataname) - This is done so objects do not overload the 16 material limit. - ''' - - filename = stripExt(stripPath(filepath)) - - if not SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS: - # use the filename for the object name since we arnt chopping up the mesh. - return [(verts_loc, faces, unique_materials, filename)] - - - def key_to_name(key): - # if the key is a tuple, join it to make a string - if type(key) == tuple: - return '%s_%s' % key - elif not key: - return filename # assume its a string. make sure this is true if the splitting code is changed - else: - return key - - # Return a key that makes the faces unique. - if SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS: - def face_key(face): - return face[4] # object - - elif not SPLIT_OB_OR_GROUP and SPLIT_MATERIALS: - def face_key(face): - return face[2] # material - - else: # Both - def face_key(face): - return face[4], face[2] # object,material - - - face_split_dict= {} - - oldkey= -1 # initialize to a value that will never match the key - - for face in faces: - - key= face_key(face) - - if oldkey != key: - # Check the key has changed. - try: - verts_split, faces_split, unique_materials_split, vert_remap= face_split_dict[key] - except KeyError: - faces_split= [] - verts_split= [] - unique_materials_split= {} - vert_remap= [-1]*len(verts_loc) - - face_split_dict[key]= (verts_split, faces_split, unique_materials_split, vert_remap) - - oldkey= key - - face_vert_loc_indicies= face[0] - - # Remap verts to new vert list and add where needed - for enum, i in enumerate(face_vert_loc_indicies): - if vert_remap[i] == -1: - new_index= len(verts_split) - vert_remap[i]= new_index # set the new remapped index so we only add once and can reference next time. - face_vert_loc_indicies[enum] = new_index # remap to the local index - verts_split.append( verts_loc[i] ) # add the vert to the local verts - - else: - face_vert_loc_indicies[enum] = vert_remap[i] # remap to the local index - - matname= face[2] - if matname and not unique_materials_split.has_key(matname): - unique_materials_split[matname] = unique_materials[matname] - - faces_split.append(face) - - - # remove one of the itemas and reorder - return [(value[0], value[1], value[2], key_to_name(key)) for key, value in face_split_dict.iteritems()] - - -def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname): - ''' - Takes all the data gathered and generates a mesh, adding the new object to new_objects - deals with fgons, sharp edges and assigning materials - ''' - if not has_ngons: - CREATE_FGONS= False - - if unique_smooth_groups: - sharp_edges= {} - smooth_group_users= dict([ (context_smooth_group, {}) for context_smooth_group in unique_smooth_groups.iterkeys() ]) - context_smooth_group_old= -1 - - # Split fgons into tri's - fgon_edges= {} # Used for storing fgon keys - if CREATE_EDGES: - edges= [] - - context_object= None - - # reverse loop through face indicies - for f_idx in xrange(len(faces)-1, -1, -1): - - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object= faces[f_idx] - - len_face_vert_loc_indicies = len(face_vert_loc_indicies) - - if len_face_vert_loc_indicies==1: - faces.pop(f_idx)# cant add single vert faces - - elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines - if CREATE_EDGES: - # generators are better in python 2.4+ but can't be used in 2.3 - # edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) ) - edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1)] ) - - faces.pop(f_idx) - else: - - # Smooth Group - if unique_smooth_groups and context_smooth_group: - # Is a part of of a smooth group and is a face - if context_smooth_group_old is not context_smooth_group: - edge_dict= smooth_group_users[context_smooth_group] - context_smooth_group_old= context_smooth_group - - for i in xrange(len_face_vert_loc_indicies): - i1= face_vert_loc_indicies[i] - i2= face_vert_loc_indicies[i-1] - if i1>i2: i1,i2= i2,i1 - - try: - edge_dict[i1,i2]+= 1 - except KeyError: - edge_dict[i1,i2]= 1 - - # FGons into triangles - if has_ngons and len_face_vert_loc_indicies > 4: - - ngon_face_indices= BPyMesh.ngon(verts_loc, face_vert_loc_indicies) - faces.extend(\ - [(\ - [face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\ - [face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\ - context_material,\ - context_smooth_group,\ - context_object)\ - for ngon in ngon_face_indices]\ - ) - - # edges to make fgons - if CREATE_FGONS: - edge_users= {} - for ngon in ngon_face_indices: - for i in (0,1,2): - i1= face_vert_loc_indicies[ngon[i ]] - i2= face_vert_loc_indicies[ngon[i-1]] - if i1>i2: i1,i2= i2,i1 - - try: - edge_users[i1,i2]+=1 - except KeyError: - edge_users[i1,i2]= 1 - - for key, users in edge_users.iteritems(): - if users>1: - fgon_edges[key]= None - - # remove all after 3, means we dont have to pop this one. - faces.pop(f_idx) - - - # Build sharp edges - if unique_smooth_groups: - for edge_dict in smooth_group_users.itervalues(): - for key, users in edge_dict.iteritems(): - if users==1: # This edge is on the boundry of a group - sharp_edges[key]= None - - - # map the material names to an index - material_mapping= dict([(name, i) for i, name in enumerate(unique_materials)]) # enumerate over unique_materials keys() - - materials= [None] * len(unique_materials) - - for name, index in material_mapping.iteritems(): - materials[index]= unique_materials[name] - - me= bpy.data.meshes.new(dataname) - - me.materials= materials[0:16] # make sure the list isnt too big. - #me.verts.extend([(0,0,0)]) # dummy vert - me.verts.extend(verts_loc) - - face_mapping= me.faces.extend([f[0] for f in faces], indexList=True) - - if verts_tex and me.faces: - me.faceUV= 1 - # TEXMODE= Mesh.FaceModes['TEX'] - - context_material_old= -1 # avoid a dict lookup - mat= 0 # rare case it may be un-initialized. - me_faces= me.faces - ALPHA= Mesh.FaceTranspModes.ALPHA - - for i, face in enumerate(faces): - if len(face[0]) < 2: - pass #raise "bad face" - elif len(face[0])==2: - if CREATE_EDGES: - edges.append(face[0]) - else: - face_index_map= face_mapping[i] - if face_index_map!=None: # None means the face wasnt added - blender_face= me_faces[face_index_map] - - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object= face - - - - if context_smooth_group: - blender_face.smooth= True - - if context_material: - if context_material_old is not context_material: - mat= material_mapping[context_material] - if mat>15: - mat= 15 - context_material_old= context_material - - blender_face.mat= mat - - - if verts_tex: - if context_material: - image, has_data= unique_material_images[context_material] - if image: # Can be none if the material dosnt have an image. - blender_face.image= image - if has_data and image.depth == 32: - blender_face.transp |= ALPHA - - # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled. - if len(face_vert_loc_indicies)==4: - if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0: - face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1] - else: # length of 3 - if face_vert_loc_indicies[2]==0: - face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0] - # END EEEKADOODLE FIX - - # assign material, uv's and image - for ii, uv in enumerate(blender_face.uv): - uv.x, uv.y= verts_tex[face_vert_tex_indicies[ii]] - del me_faces - del ALPHA - - # Add edge faces. - me_edges= me.edges - if CREATE_FGONS and fgon_edges: - FGON= Mesh.EdgeFlags.FGON - for ed in me.findEdges( fgon_edges.keys() ): - if ed!=None: - me_edges[ed].flag |= FGON - del FGON - - if unique_smooth_groups and sharp_edges: - SHARP= Mesh.EdgeFlags.SHARP - for ed in me.findEdges( sharp_edges.keys() ): - if ed!=None: - me_edges[ed].flag |= SHARP - del SHARP - - if CREATE_EDGES: - me_edges.extend( edges ) - - del me_edges - - me.calcNormals() - - ob= scn.objects.new(me) - new_objects.append(ob) - - # Create the vertex groups. No need to have the flag passed here since we test for the - # content of the vertex_groups. If the user selects to NOT have vertex groups saved then - # the following test will never run - for group_name, group_indicies in vertex_groups.iteritems(): - me.addVertGroup(group_name) - me.assignVertsToGroup(group_name, group_indicies,1.00, Mesh.AssignModes.REPLACE) - - -def create_nurbs(scn, context_nurbs, vert_loc, new_objects): - ''' - Add nurbs object to blender, only support one type at the moment - ''' - deg = context_nurbs.get('deg', (3,)) - curv_range = context_nurbs.get('curv_range', None) - curv_idx = context_nurbs.get('curv_idx', []) - parm_u = context_nurbs.get('parm_u', []) - parm_v = context_nurbs.get('parm_v', []) - name = context_nurbs.get('name', 'ObjNurb') - cstype = context_nurbs.get('cstype', None) - - if cstype == None: - print '\tWarning, cstype not found' - return - if cstype != 'bspline': - print '\tWarning, cstype is not supported (only bspline)' - return - if not curv_idx: - print '\tWarning, curv argument empty or not set' - return - if len(deg) > 1 or parm_v: - print '\tWarning, surfaces not supported' - return - - cu = bpy.data.curves.new(name, 'Curve') - cu.flag |= 1 # 3D curve - - nu = None - for pt in curv_idx: - - pt = vert_loc[pt] - pt = (pt[0], pt[1], pt[2], 1.0) - - if nu == None: - nu = cu.appendNurb(pt) - else: - nu.append(pt) - - nu.orderU = deg[0]+1 - - # get for endpoint flag from the weighting - if curv_range and len(parm_u) > deg[0]+1: - do_endpoints = True - for i in xrange(deg[0]+1): - - if abs(parm_u[i]-curv_range[0]) > 0.0001: - do_endpoints = False - break - - if abs(parm_u[-(i+1)]-curv_range[1]) > 0.0001: - do_endpoints = False - break - - else: - do_endpoints = False - - if do_endpoints: - nu.flagU |= 2 - - - # close - ''' - do_closed = False - if len(parm_u) > deg[0]+1: - for i in xrange(deg[0]+1): - #print curv_idx[i], curv_idx[-(i+1)] - - if curv_idx[i]==curv_idx[-(i+1)]: - do_closed = True - break - - if do_closed: - nu.flagU |= 1 - ''' - - ob = scn.objects.new(cu) - new_objects.append(ob) - - -def strip_slash(line_split): - if line_split[-1][-1]== '\\': - if len(line_split[-1])==1: - line_split.pop() # remove the \ item - else: - line_split[-1]= line_split[-1][:-1] # remove the \ from the end last number - return True - return False - - - -def get_float_func(filepath): - ''' - find the float function for this obj file - - weather to replace commas or not - ''' - file= open(filepath, 'rU') - for line in file: #.xreadlines(): - line = line.lstrip() - if line.startswith('v'): # vn vt v - if ',' in line: - return lambda f: float(f.replace(',', '.')) - elif '.' in line: - return float - - # incase all vert values were ints - return float - -def load_obj(filepath, - CLAMP_SIZE= 0.0, - CREATE_FGONS= True, - CREATE_SMOOTH_GROUPS= True, - CREATE_EDGES= True, - SPLIT_OBJECTS= True, - SPLIT_GROUPS= True, - SPLIT_MATERIALS= True, - ROTATE_X90= True, - IMAGE_SEARCH=True, - POLYGROUPS=False): - ''' - Called by the user interface or another script. - load_obj(path) - should give acceptable results. - This function passes the file and sends the data off - to be split into objects and then converted into mesh objects - ''' - print '\nimporting obj "%s"' % filepath - - if SPLIT_OBJECTS or SPLIT_GROUPS or SPLIT_MATERIALS: - POLYGROUPS = False - - time_main= sys.time() - - verts_loc= [] - verts_tex= [] - faces= [] # tuples of the faces - material_libs= [] # filanems to material libs this uses - vertex_groups = {} # when POLYGROUPS is true - - # Get the string to float conversion func for this file- is 'float' for almost all files. - float_func= get_float_func(filepath) - - # Context variables - context_material= None - context_smooth_group= None - context_object= None - context_vgroup = None - - # Nurbs - context_nurbs = {} - nurbs = [] - context_parm = '' # used by nurbs too but could be used elsewhere - - has_ngons= False - # has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0 - - # Until we can use sets - unique_materials= {} - unique_material_images= {} - unique_smooth_groups= {} - # unique_obects= {} - no use for this variable since the objects are stored in the face. - - # when there are faces that end with \ - # it means they are multiline- - # since we use xreadline we cant skip to the next line - # so we need to know weather - context_multi_line= '' - - print '\tparsing obj file "%s"...' % filepath, - time_sub= sys.time() - - file= open(filepath, 'rU') - for line in file: #.xreadlines(): - line = line.lstrip() # rare cases there is white space at the start of the line - - if line.startswith('v '): - line_split= line.split() - # rotate X90: (x,-z,y) - verts_loc.append( (float_func(line_split[1]), -float_func(line_split[3]), float_func(line_split[2])) ) - - elif line.startswith('vn '): - pass - - elif line.startswith('vt '): - line_split= line.split() - verts_tex.append( (float_func(line_split[1]), float_func(line_split[2])) ) - - # Handel faces lines (as faces) and the second+ lines of fa multiline face here - # use 'f' not 'f ' because some objs (very rare have 'fo ' for faces) - elif line.startswith('f') or context_multi_line == 'f': - - if context_multi_line: - # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face - line_split= line.split() - - else: - line_split= line[2:].split() - face_vert_loc_indicies= [] - face_vert_tex_indicies= [] - - # Instance a face - faces.append((\ - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object\ - )) - - if strip_slash(line_split): - context_multi_line = 'f' - else: - context_multi_line = '' - - for v in line_split: - obj_vert= v.split('/') - - vert_loc_index= int(obj_vert[0])-1 - # Add the vertex to the current group - # *warning*, this wont work for files that have groups defined around verts - if POLYGROUPS and context_vgroup: - vertex_groups[context_vgroup].append(vert_loc_index) - - # Make relative negative vert indicies absolute - if vert_loc_index < 0: - vert_loc_index= len(verts_loc) + vert_loc_index + 1 - - face_vert_loc_indicies.append(vert_loc_index) - - if len(obj_vert)>1 and obj_vert[1]: - # formatting for faces with normals and textures us - # loc_index/tex_index/nor_index - - vert_tex_index= int(obj_vert[1])-1 - # Make relative negative vert indicies absolute - if vert_tex_index < 0: - vert_tex_index= len(verts_tex) + vert_tex_index + 1 - - face_vert_tex_indicies.append(vert_tex_index) - else: - # dummy - face_vert_tex_indicies.append(0) - - if len(face_vert_loc_indicies) > 4: - has_ngons= True - - elif CREATE_EDGES and (line.startswith('l ') or context_multi_line == 'l'): - # very similar to the face load function above with some parts removed - - if context_multi_line: - # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face - line_split= line.split() - - else: - line_split= line[2:].split() - face_vert_loc_indicies= [] - face_vert_tex_indicies= [] - - # Instance a face - faces.append((\ - face_vert_loc_indicies,\ - face_vert_tex_indicies,\ - context_material,\ - context_smooth_group,\ - context_object\ - )) - - if strip_slash(line_split): - context_multi_line = 'l' - else: - context_multi_line = '' - - isline= line.startswith('l') - - for v in line_split: - vert_loc_index= int(v)-1 - - # Make relative negative vert indicies absolute - if vert_loc_index < 0: - vert_loc_index= len(verts_loc) + vert_loc_index + 1 - - face_vert_loc_indicies.append(vert_loc_index) - - elif line.startswith('s'): - if CREATE_SMOOTH_GROUPS: - context_smooth_group= line_value(line.split()) - if context_smooth_group=='off': - context_smooth_group= None - elif context_smooth_group: # is not None - unique_smooth_groups[context_smooth_group]= None - - elif line.startswith('o'): - if SPLIT_OBJECTS: - context_object= line_value(line.split()) - # unique_obects[context_object]= None - - elif line.startswith('g'): - if SPLIT_GROUPS: - context_object= line_value(line.split()) - # print 'context_object', context_object - # unique_obects[context_object]= None - elif POLYGROUPS: - context_vgroup = line_value(line.split()) - if context_vgroup and context_vgroup != '(null)': - vertex_groups.setdefault(context_vgroup, []) - else: - context_vgroup = None # dont assign a vgroup - - elif line.startswith('usemtl'): - context_material= line_value(line.split()) - unique_materials[context_material]= None - elif line.startswith('mtllib'): # usemap or usemat - material_libs.extend( line.split()[1:] ) # can have multiple mtllib filenames per line - - - # Nurbs support - elif line.startswith('cstype '): - context_nurbs['cstype']= line_value(line.split()) # 'rat bspline' / 'bspline' - elif line.startswith('curv ') or context_multi_line == 'curv': - line_split= line.split() - - curv_idx = context_nurbs['curv_idx'] = context_nurbs.get('curv_idx', []) # incase were multiline - - if not context_multi_line: - context_nurbs['curv_range'] = float_func(line_split[1]), float_func(line_split[2]) - line_split[0:3] = [] # remove first 3 items - - if strip_slash(line_split): - context_multi_line = 'curv' - else: - context_multi_line = '' - - - for i in line_split: - vert_loc_index = int(i)-1 - - if vert_loc_index < 0: - vert_loc_index= len(verts_loc) + vert_loc_index + 1 - - curv_idx.append(vert_loc_index) - - elif line.startswith('parm') or context_multi_line == 'parm': - line_split= line.split() - - if context_multi_line: - context_multi_line = '' - else: - context_parm = line_split[1] - line_split[0:2] = [] # remove first 2 - - if strip_slash(line_split): - context_multi_line = 'parm' - else: - context_multi_line = '' - - if context_parm.lower() == 'u': - context_nurbs.setdefault('parm_u', []).extend( [float_func(f) for f in line_split] ) - elif context_parm.lower() == 'v': # surfaces not suported yet - context_nurbs.setdefault('parm_v', []).extend( [float_func(f) for f in line_split] ) - # else: # may want to support other parm's ? - - elif line.startswith('deg '): - context_nurbs['deg']= [int(i) for i in line.split()[1:]] - elif line.startswith('end'): - # Add the nurbs curve - if context_object: - context_nurbs['name'] = context_object - nurbs.append(context_nurbs) - context_nurbs = {} - context_parm = '' - - ''' # How to use usemap? depricated? - elif line.startswith('usema'): # usemap or usemat - context_image= line_value(line.split()) - ''' - - file.close() - time_new= sys.time() - print '%.4f sec' % (time_new-time_sub) - time_sub= time_new - - - print '\tloading materials and images...', - create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH) - - time_new= sys.time() - print '%.4f sec' % (time_new-time_sub) - time_sub= time_new - - if not ROTATE_X90: - verts_loc[:] = [(v[0], v[2], -v[1]) for v in verts_loc] - - # deselect all - scn = bpy.data.scenes.active - scn.objects.selected = [] - new_objects= [] # put new objects here - - print '\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % ( len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups) ), - # Split the mesh by objects/materials, may - if SPLIT_OBJECTS or SPLIT_GROUPS: SPLIT_OB_OR_GROUP = True - else: SPLIT_OB_OR_GROUP = False - - for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS): - # Create meshes from the data, warning 'vertex_groups' wont support splitting - create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname) - - # nurbs support - for context_nurbs in nurbs: - create_nurbs(scn, context_nurbs, verts_loc, new_objects) - - - axis_min= [ 1000000000]*3 - axis_max= [-1000000000]*3 - - if CLAMP_SIZE: - # Get all object bounds - for ob in new_objects: - for v in ob.getBoundBox(): - for axis, value in enumerate(v): - if axis_min[axis] > value: axis_min[axis]= value - if axis_max[axis] < value: axis_max[axis]= value - - # Scale objects - max_axis= max(axis_max[0]-axis_min[0], axis_max[1]-axis_min[1], axis_max[2]-axis_min[2]) - scale= 1.0 - - while CLAMP_SIZE < max_axis * scale: - scale= scale/10.0 - - for ob in new_objects: - ob.setSize(scale, scale, scale) - - # Better rotate the vert locations - #if not ROTATE_X90: - # for ob in new_objects: - # ob.RotX = -1.570796326794896558 - - time_new= sys.time() - - print '%.4f sec' % (time_new-time_sub) - print 'finished importing: "%s" in %.4f sec.' % (filepath, (time_new-time_main)) - - -DEBUG= True - - -def load_obj_ui(filepath, BATCH_LOAD= False): - if BPyMessages.Error_NoFile(filepath): - return - - global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90 - - CREATE_SMOOTH_GROUPS= Draw.Create(0) - CREATE_FGONS= Draw.Create(1) - CREATE_EDGES= Draw.Create(1) - SPLIT_OBJECTS= Draw.Create(0) - SPLIT_GROUPS= Draw.Create(0) - SPLIT_MATERIALS= Draw.Create(0) - CLAMP_SIZE= Draw.Create(10.0) - IMAGE_SEARCH= Draw.Create(1) - POLYGROUPS= Draw.Create(0) - KEEP_VERT_ORDER= Draw.Create(1) - ROTATE_X90= Draw.Create(1) - - - # Get USER Options - # Note, Works but not pretty, instead use a more complicated GUI - ''' - pup_block= [\ - 'Import...',\ - ('Smooth Groups', CREATE_SMOOTH_GROUPS, 'Surround smooth groups by sharp edges'),\ - ('Create FGons', CREATE_FGONS, 'Import faces with more then 4 verts as fgons.'),\ - ('Lines', CREATE_EDGES, 'Import lines and faces with 2 verts as edges'),\ - 'Separate objects from obj...',\ - ('Object', SPLIT_OBJECTS, 'Import OBJ Objects into Blender Objects'),\ - ('Group', SPLIT_GROUPS, 'Import OBJ Groups into Blender Objects'),\ - ('Material', SPLIT_MATERIALS, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)'),\ - 'Options...',\ - ('Keep Vert Order', KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\ - ('Clamp Scale:', CLAMP_SIZE, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)'),\ - ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\ - ] - - if not Draw.PupBlock('Import OBJ...', pup_block): - return - - if KEEP_VERT_ORDER.val: - SPLIT_OBJECTS.val = False - SPLIT_GROUPS.val = False - SPLIT_MATERIALS.val = False - ''' - - - - # BEGIN ALTERNATIVE UI ******************* - if True: - - EVENT_NONE = 0 - EVENT_EXIT = 1 - EVENT_REDRAW = 2 - EVENT_IMPORT = 3 - - GLOBALS = {} - GLOBALS['EVENT'] = EVENT_REDRAW - #GLOBALS['MOUSE'] = Window.GetMouseCoords() - GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] - - def obj_ui_set_event(e,v): - GLOBALS['EVENT'] = e - - def do_split(e,v): - global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS - if SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val: - KEEP_VERT_ORDER.val = 0 - POLYGROUPS.val = 0 - else: - KEEP_VERT_ORDER.val = 1 - - def do_vertorder(e,v): - global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER - if KEEP_VERT_ORDER.val: - SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0 - else: - if not (SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val): - KEEP_VERT_ORDER.val = 1 - - def do_polygroups(e,v): - global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS - if POLYGROUPS.val: - SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0 - - def do_help(e,v): - url = __url__[0] - print 'Trying to open web browser with documentation at this address...' - print '\t' + url - - try: - import webbrowser - webbrowser.open(url) - except: - print '...could not open a browser window.' - - def obj_ui(): - ui_x, ui_y = GLOBALS['MOUSE'] - - # Center based on overall pup size - ui_x -= 165 - ui_y -= 90 - - global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90 - - Draw.Label('Import...', ui_x+9, ui_y+159, 220, 21) - Draw.BeginAlign() - CREATE_SMOOTH_GROUPS = Draw.Toggle('Smooth Groups', EVENT_NONE, ui_x+9, ui_y+139, 110, 20, CREATE_SMOOTH_GROUPS.val, 'Surround smooth groups by sharp edges') - CREATE_FGONS = Draw.Toggle('NGons as FGons', EVENT_NONE, ui_x+119, ui_y+139, 110, 20, CREATE_FGONS.val, 'Import faces with more then 4 verts as fgons') - CREATE_EDGES = Draw.Toggle('Lines as Edges', EVENT_NONE, ui_x+229, ui_y+139, 110, 20, CREATE_EDGES.val, 'Import lines and faces with 2 verts as edges') - Draw.EndAlign() - - Draw.Label('Separate objects by OBJ...', ui_x+9, ui_y+110, 220, 20) - Draw.BeginAlign() - SPLIT_OBJECTS = Draw.Toggle('Object', EVENT_REDRAW, ui_x+9, ui_y+89, 55, 21, SPLIT_OBJECTS.val, 'Import OBJ Objects into Blender Objects', do_split) - SPLIT_GROUPS = Draw.Toggle('Group', EVENT_REDRAW, ui_x+64, ui_y+89, 55, 21, SPLIT_GROUPS.val, 'Import OBJ Groups into Blender Objects', do_split) - SPLIT_MATERIALS = Draw.Toggle('Material', EVENT_REDRAW, ui_x+119, ui_y+89, 60, 21, SPLIT_MATERIALS.val, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)', do_split) - Draw.EndAlign() - - # Only used for user feedback - KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+184, ui_y+89, 113, 21, KEEP_VERT_ORDER.val, 'Keep vert and face order, disables split options, enable for morph targets', do_vertorder) - - ROTATE_X90 = Draw.Toggle('-X90', EVENT_REDRAW, ui_x+302, ui_y+89, 38, 21, ROTATE_X90.val, 'Rotate X 90.') - - Draw.Label('Options...', ui_x+9, ui_y+60, 211, 20) - CLAMP_SIZE = Draw.Number('Clamp Scale: ', EVENT_NONE, ui_x+9, ui_y+39, 130, 21, CLAMP_SIZE.val, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)') - POLYGROUPS = Draw.Toggle('Poly Groups', EVENT_REDRAW, ui_x+144, ui_y+39, 90, 21, POLYGROUPS.val, 'Import OBJ groups as vertex groups.', do_polygroups) - IMAGE_SEARCH = Draw.Toggle('Image Search', EVENT_NONE, ui_x+239, ui_y+39, 100, 21, IMAGE_SEARCH.val, 'Search subdirs for any assosiated images (Warning, may be slow)') - Draw.BeginAlign() - Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 21, 'Load the wiki page for this script', do_help) - Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 21, '', obj_ui_set_event) - Draw.PushButton('Import', EVENT_IMPORT, ui_x+229, ui_y+9, 110, 21, 'Import with these settings', obj_ui_set_event) - Draw.EndAlign() - - - # hack so the toggle buttons redraw. this is not nice at all - while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_IMPORT): - Draw.UIBlock(obj_ui, 0) - - if GLOBALS['EVENT'] != EVENT_IMPORT: - return - - # END ALTERNATIVE UI ********************* - - - - - - - - Window.WaitCursor(1) - - if BATCH_LOAD: # load the dir - try: - files= [ f for f in os.listdir(filepath) if f.lower().endswith('.obj') ] - except: - Window.WaitCursor(0) - Draw.PupMenu('Error%t|Could not open path ' + filepath) - return - - if not files: - Window.WaitCursor(0) - Draw.PupMenu('Error%t|No files at path ' + filepath) - return - - for f in files: - scn= bpy.data.scenes.new( stripExt(f) ) - scn.makeCurrent() - - load_obj(sys.join(filepath, f),\ - CLAMP_SIZE.val,\ - CREATE_FGONS.val,\ - CREATE_SMOOTH_GROUPS.val,\ - CREATE_EDGES.val,\ - SPLIT_OBJECTS.val,\ - SPLIT_GROUPS.val,\ - SPLIT_MATERIALS.val,\ - ROTATE_X90.val,\ - IMAGE_SEARCH.val,\ - POLYGROUPS.val - ) - - else: # Normal load - load_obj(filepath,\ - CLAMP_SIZE.val,\ - CREATE_FGONS.val,\ - CREATE_SMOOTH_GROUPS.val,\ - CREATE_EDGES.val,\ - SPLIT_OBJECTS.val,\ - SPLIT_GROUPS.val,\ - SPLIT_MATERIALS.val,\ - ROTATE_X90.val,\ - IMAGE_SEARCH.val,\ - POLYGROUPS.val - ) - - Window.WaitCursor(0) - - -def load_obj_ui_batch(file): - load_obj_ui(file, True) - -DEBUG= False - -if __name__=='__main__' and not DEBUG: - if os and Window.GetKeyQualifiers() & Window.Qual.SHIFT: - Window.FileSelector(load_obj_ui_batch, 'Import OBJ Dir', '') - else: - Window.FileSelector(load_obj_ui, 'Import a Wavefront OBJ', '*.obj') - - # For testing compatibility -''' -else: - # DEBUG ONLY - TIME= sys.time() - DIR = '/fe/obj' - import os - print 'Searching for files' - def fileList(path): - for dirpath, dirnames, filenames in os.walk(path): - for filename in filenames: - yield os.path.join(dirpath, filename) - - files = [f for f in fileList(DIR) if f.lower().endswith('.obj')] - files.sort() - - for i, obj_file in enumerate(files): - if 0 < i < 20: - print 'Importing', obj_file, '\nNUMBER', i, 'of', len(files) - newScn= bpy.data.scenes.new(os.path.basename(obj_file)) - newScn.makeCurrent() - load_obj(obj_file, False, IMAGE_SEARCH=0) - - print 'TOTAL TIME: %.6f' % (sys.time() - TIME) -''' -#load_obj('/test.obj') -#load_obj('/fe/obj/mba1.obj') diff --git a/release/scripts/import_web3d.py b/release/scripts/import_web3d.py deleted file mode 100644 index a5547506dc7..00000000000 --- a/release/scripts/import_web3d.py +++ /dev/null @@ -1,2594 +0,0 @@ -#!BPY -""" -Name: 'X3D & VRML97 (.x3d / wrl)...' -Blender: 248 -Group: 'Import' -Tooltip: 'Load an X3D or VRML97 file' -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# (C) Copyright 2008 Paravizion -# Written by Campbell Barton aka Ideasman42 -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -__author__ = "Campbell Barton" -__url__ = ['www.blender.org', 'blenderartists.org', 'http://wiki.blender.org/index.php/Scripts/Manual/Import/X3D_VRML97'] -__version__ = "0.1" - -__bpydoc__ = """\ -This script is an importer for the X3D and VRML97 file formats. -""" - -DEBUG = False - -# This should work without a blender at all -try: - from Blender.sys import exists -except: - from os.path import exists - -def baseName(path): - return path.split('/')[-1].split('\\')[-1] - -def dirName(path): - return path[:-len(baseName(path))] - -def imageConvertCompat(path): - - try: import os - except: return path - if os.sep=='\\': return path # assime win32 has quicktime, dont convert - - if path.lower().endswith('.gif'): - path_to = path[:-3] + 'png' - - ''' - if exists(path_to): - return path_to - ''' - # print '\n'+path+'\n'+path_to+'\n' - os.system('convert "%s" "%s"' % (path, path_to)) # for now just hope we have image magick - - if exists(path_to): - return path_to - - return path - -# notes -# transform are relative -# order dosnt matter for loc/size/rot -# right handed rotation -# angles are in radians -# rotation first defines axis then ammount in radians - - - -# =============================== VRML Spesific - - -def vrmlFormat(data): - ''' - Keep this as a valid vrml file, but format in a way we can predict. - ''' - # Strip all commends - # not in strings - warning multiline strings are ignored. - def strip_comment(l): - #l = ' '.join(l.split()) - l = l.strip() - - if l.startswith('#'): - return '' - - i = l.find('#') - - if i==-1: - return l - - # Most cases accounted for! if we have a comment at the end of the line do this... - #j = l.find('url "') - j = l.find('"') - - if j == -1: # simple no strings - return l[:i].strip() - - q = False - for i,c in enumerate(l): - if c == '"': - q = not q # invert - - elif c == '#': - if q==False: - return l[:i-1] - - return l - - data = '\n'.join([strip_comment(l) for l in data.split('\n') ]) # remove all whitespace - - EXTRACT_STRINGS = True # only needed when strings or filesnames containe ,[]{} chars :/ - - if EXTRACT_STRINGS: - - # We need this so we can detect URL's - data = '\n'.join([' '.join(l.split()) for l in data.split('\n')]) # remove all whitespace - - string_ls = [] - - #search = 'url "' - search = '"' - - ok = True - last_i = 0 - while ok: - ok = False - i = data.find(search, last_i) - if i != -1: - - start = i + len(search) # first char after end of search - end = data.find('"', start) - if end != -1: - item = data[start:end] - string_ls.append( item ) - data = data[:start] + data[end:] - ok = True # keep looking - - last_i = (end - len(item)) + 1 - # print last_i, item, '|' + data[last_i] + '|' - - # done with messy extracting strings part - - - # Bad, dont take strings into account - ''' - data = data.replace('#', '\n#') - data = '\n'.join([ll for l in data.split('\n') for ll in (l.strip(),) if not ll.startswith('#')]) # remove all whitespace - ''' - data = data.replace('{', '\n{\n') - data = data.replace('}', '\n}\n') - data = data.replace('[', '\n[\n') - data = data.replace(']', '\n]\n') - data = data.replace(',', ' , ') # make sure comma's seperate - - if EXTRACT_STRINGS: - # add strings back in - - search = '"' # fill in these empty strings - - ok = True - last_i = 0 - while ok: - ok = False - i = data.find(search + '"', last_i) - # print i - if i != -1: - start = i + len(search) # first char after end of search - item = string_ls.pop(0) - # print item - data = data[:start] + item + data[start:] - - last_i = start + len(item) + 1 - - ok = True - - - # More annoying obscure cases where USE or DEF are placed on a newline - # data = data.replace('\nDEF ', ' DEF ') - # data = data.replace('\nUSE ', ' USE ') - - data = '\n'.join([' '.join(l.split()) for l in data.split('\n')]) # remove all whitespace - - # Better to parse the file accounting for multiline arrays - ''' - data = data.replace(',\n', ' , ') # remove line endings with commas - data = data.replace(']', '\n]\n') # very very annoying - but some comma's are at the end of the list, must run this again. - ''' - - return [l for l in data.split('\n') if l] - -NODE_NORMAL = 1 # {} -NODE_ARRAY = 2 # [] -NODE_REFERENCE = 3 # USE foobar -# NODE_PROTO = 4 # - -lines = [] - -def getNodePreText(i, words): - # print lines[i] - use_node = False - while len(words) < 5: - - if i>=len(lines): - break - ''' - elif lines[i].startswith('PROTO'): - return NODE_PROTO, i+1 - ''' - elif lines[i]=='{': - # words.append(lines[i]) # no need - # print "OK" - return NODE_NORMAL, i+1 - elif lines[i].count('"') % 2 != 0: # odd number of quotes? - part of a string. - # print 'ISSTRING' - break - else: - new_words = lines[i].split() - if 'USE' in new_words: - use_node = True - - words.extend(new_words) - i += 1 - - # Check for USE node - no { - # USE #id - should always be on the same line. - if use_node: - # print 'LINE', i, words[:words.index('USE')+2] - words[:] = words[:words.index('USE')+2] - if lines[i] == '{' and lines[i+1] == '}': - # USE sometimes has {} after it anyway - i+=2 - return NODE_REFERENCE, i - - # print "error value!!!", words - return 0, -1 - -def is_nodeline(i, words): - - if not lines[i][0].isalpha(): - return 0, 0 - - #if lines[i].startswith('field'): - # return 0, 0 - - # Is this a prototype?? - if lines[i].startswith('PROTO'): - words[:] = lines[i].split() - return NODE_NORMAL, i+1 # TODO - assumes the next line is a '[\n', skip that - if lines[i].startswith('EXTERNPROTO'): - words[:] = lines[i].split() - return NODE_ARRAY, i+1 # TODO - assumes the next line is a '[\n', skip that - - ''' - proto_type, new_i = is_protoline(i, words, proto_field_defs) - if new_i != -1: - return proto_type, new_i - ''' - - # Simple "var [" type - if lines[i+1] == '[': - if lines[i].count('"') % 2 == 0: - words[:] = lines[i].split() - return NODE_ARRAY, i+2 - - node_type, new_i = getNodePreText(i, words) - - if not node_type: - if DEBUG: print "not node_type", lines[i] - return 0, 0 - - # Ok, we have a { after some values - # Check the values are not fields - for i, val in enumerate(words): - if i != 0 and words[i-1] in ('DEF', 'USE'): - # ignore anything after DEF, it is a ID and can contain any chars. - pass - elif val[0].isalpha() and val not in ('TRUE', 'FALSE'): - pass - else: - # There is a number in one of the values, therefor we are not a node. - return 0, 0 - - #if node_type==NODE_REFERENCE: - # print words, "REF_!!!!!!!" - return node_type, new_i - -def is_numline(i): - ''' - Does this line start with a number? - ''' - - # Works but too slow. - ''' - l = lines[i] - for w in l.split(): - if w==',': - pass - else: - try: - float(w) - return True - - except: - return False - - return False - ''' - - l = lines[i] - - line_start = 0 - - if l.startswith(', '): - line_start += 2 - - line_end = len(l)-1 - line_end_new = l.find(' ', line_start) # comma's always have a space before them - - if line_end_new != -1: - line_end = line_end_new - - try: - float(l[line_start:line_end]) # works for a float or int - return True - except: - return False - - -class vrmlNode(object): - __slots__ = 'id', 'fields', 'proto_node', 'proto_field_defs', 'proto_fields', 'node_type', 'parent', 'children', 'parent', 'array_data', 'reference', 'lineno', 'filename', 'blendObject', 'DEF_NAMESPACE', 'ROUTE_IPO_NAMESPACE', 'PROTO_NAMESPACE', 'x3dNode' - def __init__(self, parent, node_type, lineno): - self.id = None - self.node_type = node_type - self.parent = parent - self.blendObject = None - self.x3dNode = None # for x3d import only - if parent: - parent.children.append(self) - - self.lineno = lineno - - # This is only set from the root nodes. - # Having a filename also denotes a root node - self.filename = None - self.proto_node = None # proto field definition eg: "field SFColor seatColor .6 .6 .1" - - # Store in the root node because each inline file needs its own root node and its own namespace - self.DEF_NAMESPACE = None - self.ROUTE_IPO_NAMESPACE = None - ''' - self.FIELD_NAMESPACE = None - ''' - - - self.PROTO_NAMESPACE = None - - self.reference = None - - if node_type==NODE_REFERENCE: - # For references, only the parent and ID are needed - # the reference its self is assigned on parsing - return - - self.fields = [] # fields have no order, in some cases rool level values are not unique so dont use a dict - - self.proto_field_defs = [] # proto field definition eg: "field SFColor seatColor .6 .6 .1" - self.proto_fields = [] # proto field usage "diffuseColor IS seatColor" - self.children = [] - self.array_data = [] # use for arrays of data - should only be for NODE_ARRAY types - - - # Only available from the root node - ''' - def getFieldDict(self): - if self.FIELD_NAMESPACE != None: - return self.FIELD_NAMESPACE - else: - return self.parent.getFieldDict() - ''' - def getProtoDict(self): - if self.PROTO_NAMESPACE != None: - return self.PROTO_NAMESPACE - else: - return self.parent.getProtoDict() - - def getDefDict(self): - if self.DEF_NAMESPACE != None: - return self.DEF_NAMESPACE - else: - return self.parent.getDefDict() - - def getRouteIpoDict(self): - if self.ROUTE_IPO_NAMESPACE != None: - return self.ROUTE_IPO_NAMESPACE - else: - return self.parent.getRouteIpoDict() - - def setRoot(self, filename): - self.filename = filename - # self.FIELD_NAMESPACE = {} - self.DEF_NAMESPACE = {} - self.ROUTE_IPO_NAMESPACE = {} - self.PROTO_NAMESPACE = {} - - def isRoot(self): - if self.filename == None: - return False - else: - return True - - def getFilename(self): - if self.filename: - return self.filename - elif self.parent: - return self.parent.getFilename() - else: - return None - - def getRealNode(self): - if self.reference: - return self.reference - else: - return self - - def getSpec(self): - self_real = self.getRealNode() - try: - return self_real.id[-1] # its possible this node has no spec - except: - return None - - def findSpecRecursive(self, spec): - self_real = self.getRealNode() - if spec == self_real.getSpec(): - return self - - for child in self_real.children: - if child.findSpecRecursive(spec): - return child - - return None - - def getPrefix(self): - if self.id: - return self.id[0] - return None - - def getSpecialTypeName(self, typename): - self_real = self.getRealNode() - try: return self_real.id[ list(self_real.id).index(typename)+1 ] - except: return None - - - def getDefName(self): - return self.getSpecialTypeName('DEF') - - def getProtoName(self): - return self.getSpecialTypeName('PROTO') - - def getExternprotoName(self): - return self.getSpecialTypeName('EXTERNPROTO') - - def getChildrenBySpec(self, node_spec): # spec could be Transform, Shape, Appearance - self_real = self.getRealNode() - # using getSpec functions allows us to use the spec of USE children that dont have their spec in their ID - if type(node_spec) == str: - return [child for child in self_real.children if child.getSpec()==node_spec] - else: - # Check inside a list of optional types - return [child for child in self_real.children if child.getSpec() in node_spec] - - def getChildBySpec(self, node_spec): # spec could be Transform, Shape, Appearance - # Use in cases where there is only ever 1 child of this type - ls = self.getChildrenBySpec(node_spec) - if ls: return ls[0] - else: return None - - def getChildrenByName(self, node_name): # type could be geometry, children, appearance - self_real = self.getRealNode() - return [child for child in self_real.children if child.id if child.id[0]==node_name] - - def getChildByName(self, node_name): - self_real = self.getRealNode() - for child in self_real.children: - if child.id and child.id[0]==node_name: # and child.id[-1]==node_spec: - return child - - def getSerialized(self, results, ancestry): - ''' Return this node and all its children in a flat list ''' - ancestry = ancestry[:] # always use a copy - - # self_real = self.getRealNode() - - results.append((self, tuple(ancestry))) - ancestry.append(self) - for child in self.getRealNode().children: - if child not in ancestry: - # We dont want to load proto's, they are only references - # We could enforce this elsewhere - - # Only add this in a very special case - # where the parent of this object is not the real parent - # - In this case we have added the proto as a child to a node instancing it. - # This is a bit arbitary, but its how Proto's are done with this importer. - if child.getProtoName() == None and child.getExternprotoName() == None: - child.getSerialized(results, ancestry) - else: - - if DEBUG: print 'getSerialized() is proto:', child.getProtoName(), child.getExternprotoName(), self.getSpec() - - self_spec = self.getSpec() - - if child.getProtoName() == self_spec or child.getExternprotoName() == self_spec: - if DEBUG: "FoundProto!" - child.getSerialized(results, ancestry) - - - - return results - - def searchNodeTypeID(self, node_spec, results): - self_real = self.getRealNode() - # print self.lineno, self.id - if self_real.id and self_real.id[-1]==node_spec: # use last element, could also be only element - results.append(self_real) - for child in self_real.children: - child.searchNodeTypeID(node_spec, results) - return results - - def getFieldName(self, field, ancestry, AS_CHILD=False): - self_real = self.getRealNode() # incase we're an instance - - for f in self_real.fields: - # print f - if f and f[0] == field: - # print '\tfound field', f - - if len(f)>=3 and f[1] == 'IS': # eg: 'diffuseColor IS legColor' - field_id = f[2] - - # print "\n\n\n\n\n\nFOND IS!!!" - f_proto_lookup = None - f_proto_child_lookup = None - i = len(ancestry) - while i: - i -= 1 - node = ancestry[i] - node = node.getRealNode() - - # proto settings are stored in "self.proto_node" - if node.proto_node: - # Get the default value from the proto, this can be overwridden by the proto instace - # 'field SFColor legColor .8 .4 .7' - if AS_CHILD: - for child in node.proto_node.children: - #if child.id and len(child.id) >= 3 and child.id[2]==field_id: - if child.id and ('point' in child.id or 'points' in child.id): - f_proto_child_lookup = child - - else: - for f_def in node.proto_node.proto_field_defs: - if len(f_def) >= 4: - if f_def[0]=='field' and f_def[2]==field_id: - f_proto_lookup = f_def[3:] - - # Node instance, Will be 1 up from the proto-node in the ancestry list. but NOT its parent. - # This is the setting as defined by the instance, including this setting is optional, - # and will override the default PROTO value - # eg: 'legColor 1 0 0' - if AS_CHILD: - for child in node.children: - if child.id and child.id[0]==field_id: - f_proto_child_lookup = child - else: - for f_def in node.fields: - if len(f_def) >= 2: - if f_def[0]==field_id: - if DEBUG: print "getFieldName(), found proto", f_def - f_proto_lookup = f_def[1:] - - - if AS_CHILD: - if f_proto_child_lookup: - if DEBUG: - print "getFieldName() - AS_CHILD=True, child found" - print f_proto_child_lookup - return f_proto_child_lookup - else: - return f_proto_lookup - else: - if AS_CHILD: - return None - else: - # Not using a proto - return f[1:] - - # print '\tfield not found', field - - - # See if this is a proto name - if AS_CHILD: - child_array = None - for child in self_real.children: - if child.id and len(child.id) == 1 and child.id[0] == field: - return child - - return None - - def getFieldAsInt(self, field, default, ancestry): - self_real = self.getRealNode() # incase we're an instance - - f = self_real.getFieldName(field, ancestry) - if f==None: return default - if ',' in f: f = f[:f.index(',')] # strip after the comma - - if len(f) != 1: - print '\t"%s" wrong length for int conversion for field "%s"' % (f, field) - return default - - try: - return int(f[0]) - except: - print '\tvalue "%s" could not be used as an int for field "%s"' % (f[0], field) - return default - - def getFieldAsFloat(self, field, default, ancestry): - self_real = self.getRealNode() # incase we're an instance - - f = self_real.getFieldName(field, ancestry) - if f==None: return default - if ',' in f: f = f[:f.index(',')] # strip after the comma - - if len(f) != 1: - print '\t"%s" wrong length for float conversion for field "%s"' % (f, field) - return default - - try: - return float(f[0]) - except: - print '\tvalue "%s" could not be used as a float for field "%s"' % (f[0], field) - return default - - def getFieldAsFloatTuple(self, field, default, ancestry): - self_real = self.getRealNode() # incase we're an instance - - f = self_real.getFieldName(field, ancestry) - if f==None: return default - # if ',' in f: f = f[:f.index(',')] # strip after the comma - - if len(f) < 1: - print '"%s" wrong length for float tuple conversion for field "%s"' % (f, field) - return default - - ret = [] - for v in f: - if v != ',': - try: ret.append(float(v)) - except: break # quit of first non float, perhaps its a new field name on the same line? - if so we are going to ignore it :/ TODO - # print ret - - if ret: - return ret - if not ret: - print '\tvalue "%s" could not be used as a float tuple for field "%s"' % (f, field) - return default - - def getFieldAsBool(self, field, default, ancestry): - self_real = self.getRealNode() # incase we're an instance - - f = self_real.getFieldName(field, ancestry) - if f==None: return default - if ',' in f: f = f[:f.index(',')] # strip after the comma - - if len(f) != 1: - print '\t"%s" wrong length for bool conversion for field "%s"' % (f, field) - return default - - if f[0].upper()=='"TRUE"' or f[0].upper()=='TRUE': - return True - elif f[0].upper()=='"FALSE"' or f[0].upper()=='FALSE': - return False - else: - print '\t"%s" could not be used as a bool for field "%s"' % (f[1], field) - return default - - def getFieldAsString(self, field, default, ancestry): - self_real = self.getRealNode() # incase we're an instance - - f = self_real.getFieldName(field, ancestry) - if f==None: return default - if len(f) < 1: - print '\t"%s" wrong length for string conversion for field "%s"' % (f, field) - return default - - if len(f) > 1: - # String may contain spaces - st = ' '.join(f) - else: - st = f[0] - - # X3D HACK - if self.x3dNode: - return st - - if st[0]=='"' and st[-1]=='"': - return st[1:-1] - else: - print '\tvalue "%s" could not be used as a string for field "%s"' % (f[0], field) - return default - - def getFieldAsArray(self, field, group, ancestry): - ''' - For this parser arrays are children - ''' - self_real = self.getRealNode() # incase we're an instance - - child_array = self_real.getFieldName(field, ancestry, True) - - #if type(child_array)==list: # happens occasionaly - # array_data = child_array - - if child_array==None: - - # For x3d, should work ok with vrml too - # for x3d arrays are fields, vrml they are nodes, annoying but not tooo bad. - data_split = self.getFieldName(field, ancestry) - if not data_split: - return [] - array_data = ' '.join(data_split) - if array_data == None: - return [] - - array_data = array_data.replace(',', ' ') - data_split = array_data.split() - try: - array_data = [int(val) for val in data_split] - except: - try: - array_data = [float(val) for val in data_split] - except: - print '\tWarning, could not parse array data from field' - array_data = [] - else: - # print child_array - # Normal vrml - array_data = child_array.array_data - - - # print 'array_data', array_data - - if group==-1 or len(array_data)==0: - return array_data - - # We want a flat list - flat = True - for item in array_data: - if type(item) == list: - flat = False - break - - # make a flat array - if flat: - flat_array = array_data # we are alredy flat. - else: - flat_array = [] - - def extend_flat(ls): - for item in ls: - if type(item)==list: extend_flat(item) - else: flat_array.append(item) - - extend_flat(array_data) - - - # We requested a flat array - if group == 0: - return flat_array - - new_array = [] - sub_array = [] - - for item in flat_array: - sub_array.append(item) - if len(sub_array)==group: - new_array.append(sub_array) - sub_array = [] - - if sub_array: - print '\twarning, array was not aligned to requested grouping', group, 'remaining value', sub_array - - return new_array - - def getFieldAsStringArray(self, field, ancestry): - ''' - Get a list of strings - ''' - self_real = self.getRealNode() # incase we're an instance - - child_array = None - for child in self_real.children: - if child.id and len(child.id) == 1 and child.id[0] == field: - child_array = child - break - if not child_array: - return [] - - # each string gets its own list, remove ""'s - try: - new_array = [f[0][1:-1] for f in child_array.fields] - except: - print '\twarning, string array could not be made' - new_array = [] - - return new_array - - - def getLevel(self): - # Ignore self_real - level = 0 - p = self.parent - while p: - level +=1 - p = p.parent - if not p: break - - return level - - def __repr__(self): - level = self.getLevel() - ind = ' ' * level - if self.node_type==NODE_REFERENCE: - brackets = '' - elif self.node_type==NODE_NORMAL: - brackets = '{}' - else: - brackets = '[]' - - if brackets: - text = ind + brackets[0] + '\n' - else: - text = '' - - text += ind + 'ID: ' + str(self.id) + ' ' + str(level) + (' lineno %d\n' % self.lineno) - - if self.node_type==NODE_REFERENCE: - text += ind + "(reference node)\n" - return text - - if self.proto_node: - text += ind + 'PROTO NODE...\n' - text += str(self.proto_node) - text += ind + 'PROTO NODE_DONE\n' - - text += ind + 'FIELDS:' + str(len(self.fields)) + '\n' - - for i,item in enumerate(self.fields): - text += ind + 'FIELD:\n' - text += ind + str(item) +'\n' - - text += ind + 'PROTO_FIELD_DEFS:' + str(len(self.proto_field_defs)) + '\n' - - for i,item in enumerate(self.proto_field_defs): - text += ind + 'PROTO_FIELD:\n' - text += ind + str(item) +'\n' - - text += ind + 'ARRAY: ' + str(len(self.array_data)) + ' ' + str(self.array_data) + '\n' - #text += ind + 'ARRAY: ' + str(len(self.array_data)) + '[...] \n' - - text += ind + 'CHILDREN: ' + str(len(self.children)) + '\n' - for i, child in enumerate(self.children): - text += ind + ('CHILD%d:\n' % i) - text += str(child) - - text += '\n' + ind + brackets[1] - - return text - - def parse(self, i, IS_PROTO_DATA=False): - new_i = self.__parse(i, IS_PROTO_DATA) - - # print self.id, self.getFilename() - - # Check if this node was an inline or externproto - - url_ls = [] - - if self.node_type == NODE_NORMAL and self.getSpec() == 'Inline': - ancestry = [] # Warning! - PROTO's using this wont work at all. - url = self.getFieldAsString('url', None, ancestry) - if url: - url_ls = [(url, None)] - del ancestry - - elif self.getExternprotoName(): - # externproto - url_ls = [] - for f in self.fields: - - if type(f)==str: - f = [f] - - for ff in f: - for f_split in ff.split('"'): - # print f_split - # "someextern.vrml#SomeID" - if '#' in f_split: - - f_split, f_split_id = f_split.split('#') # there should only be 1 # anyway - - url_ls.append( (f_split, f_split_id) ) - else: - url_ls.append( (f_split, None) ) - - - # Was either an Inline or an EXTERNPROTO - if url_ls: - - # print url_ls - - for url, extern_key in url_ls: - print url - urls = [] - urls.append( url ) - urls.append( BPySys.caseInsensitivePath(urls[-1]) ) - - urls.append( dirName(self.getFilename()) + url ) - urls.append( BPySys.caseInsensitivePath(urls[-1]) ) - - urls.append( dirName(self.getFilename()) + baseName(url) ) - urls.append( BPySys.caseInsensitivePath(urls[-1]) ) - - try: - url = [url for url in urls if exists(url)][0] - url_found = True - except: - url_found = False - - if not url_found: - print '\tWarning: Inline URL could not be found:', url - else: - if url==self.getFilename(): - print '\tWarning: cant Inline yourself recursively:', url - else: - - try: - data = gzipOpen(url) - except: - print '\tWarning: cant open the file:', url - data = None - - if data: - # Tricky - inline another VRML - print '\tLoading Inline:"%s"...' % url - - # Watch it! - backup lines - lines_old = lines[:] - - - lines[:] = vrmlFormat( data ) - - lines.insert(0, '{') - lines.insert(0, 'root_node____') - lines.append('}') - ''' - ff = open('/tmp/test.txt', 'w') - ff.writelines([l+'\n' for l in lines]) - ''' - - child = vrmlNode(self, NODE_NORMAL, -1) - child.setRoot(url) # initialized dicts - child.parse(0) - - # if self.getExternprotoName(): - if self.getExternprotoName(): - if not extern_key: # if none is spesified - use the name - extern_key = self.getSpec() - - if extern_key: - - self.children.remove(child) - child.parent = None - - extern_child = child.findSpecRecursive(extern_key) - - if extern_child: - self.children.append(extern_child) - extern_child.parent = self - - if DEBUG: print "\tEXTERNPROTO ID found!:", extern_key - else: - print "\tEXTERNPROTO ID not found!:", extern_key - - # Watch it! - restore lines - lines[:] = lines_old - - return new_i - - def __parse(self, i, IS_PROTO_DATA=False): - ''' - print 'parsing at', i, - print i, self.id, self.lineno - ''' - l = lines[i] - - if l=='[': - # An anonymous list - self.id = None - i+=1 - else: - words = [] - - node_type, new_i = is_nodeline(i, words) - if not node_type: # fail for parsing new node. - print "Failed to parse new node" - raise ValueError - - if self.node_type==NODE_REFERENCE: - # Only assign the reference and quit - key = words[words.index('USE')+1] - self.id = (words[0],) - - self.reference = self.getDefDict()[key] - return new_i - - self.id = tuple(words) - - # fill in DEF/USE - key = self.getDefName() - if key != None: - self.getDefDict()[ key ] = self - - key = self.getProtoName() - if not key: key = self.getExternprotoName() - - proto_dict = self.getProtoDict() - if key != None: - proto_dict[ key ] = self - - # Parse the proto nodes fields - self.proto_node = vrmlNode(self, NODE_ARRAY, new_i) - new_i = self.proto_node.parse(new_i) - - self.children.remove(self.proto_node) - - # print self.proto_node - - new_i += 1 # skip past the { - - - else: # If we're a proto instance, add the proto node as our child. - spec = self.getSpec() - try: - self.children.append( proto_dict[spec] ) - #pass - except: - pass - - del spec - - del proto_dict, key - - i = new_i - - # print self.id - ok = True - while ok: - if i>=len(lines): - return len(lines)-1 - - l = lines[i] - # print '\tDEBUG:', i, self.node_type, l - if l=='': - i+=1 - continue - - if l=='}': - if self.node_type != NODE_NORMAL: # also ends proto nodes, we may want a type for these too. - print 'wrong node ending, expected an } ' + str(i) + ' ' + str(self.node_type) - if DEBUG: - raise ValueError - ### print "returning", i - return i+1 - if l==']': - if self.node_type != NODE_ARRAY: - print 'wrong node ending, expected a ] ' + str(i) + ' ' + str(self.node_type) - if DEBUG: - raise ValueError - ### print "returning", i - return i+1 - - node_type, new_i = is_nodeline(i, []) - if node_type: # check text\n{ - child = vrmlNode(self, node_type, i) - i = child.parse(i) - - elif l=='[': # some files have these anonymous lists - child = vrmlNode(self, NODE_ARRAY, i) - i = child.parse(i) - - elif is_numline(i): - l_split = l.split(',') - - values = None - # See if each item is a float? - - for num_type in (int, float): - try: - values = [num_type(v) for v in l_split ] - break - except: - pass - - - try: - values = [[num_type(v) for v in segment.split()] for segment in l_split ] - break - except: - pass - - if values == None: # dont parse - values = l_split - - # This should not extend over multiple lines however it is possible - # print self.array_data - if values: - self.array_data.extend( values ) - i+=1 - else: - words = l.split() - if len(words) > 2 and words[1] == 'USE': - vrmlNode(self, NODE_REFERENCE, i) - else: - - # print "FIELD", i, l - # - #words = l.split() - ### print '\t\ttag', i - # this is a tag/ - # print words, i, l - value = l - # print i - # javastrips can exist as values. - quote_count = l.count('"') - if quote_count % 2: # odd number? - # print 'MULTILINE' - while 1: - i+=1 - l = lines[i] - quote_count = l.count('"') - if quote_count % 2: # odd number? - value += '\n'+ l[:l.rfind('"')] - break # assume - else: - value += '\n'+ l - - value_all = value.split() - - def iskey(k): - if k[0] != '"' and k[0].isalpha() and k.upper() not in ('TRUE', 'FALSE'): - return True - return False - - def split_fields(value): - ''' - key 0.0 otherkey 1,2,3 opt1 opt1 0.0 - -> [key 0.0], [otherkey 1,2,3], [opt1 opt1 0.0] - ''' - field_list = [] - field_context = [] - - for j in xrange(len(value)): - if iskey(value[j]): - if field_context: - # this IS a key but the previous value was not a key, ot it was a defined field. - if (not iskey(field_context[-1])) or ((len(field_context)==3 and field_context[1]=='IS')): - field_list.append(field_context) - - field_context = [value[j]] - else: - # The last item was not a value, multiple keys are needed in some cases. - field_context.append(value[j]) - else: - # Is empty, just add this on - field_context.append(value[j]) - else: - # Add a value to the list - field_context.append(value[j]) - - if field_context: - field_list.append(field_context) - - return field_list - - - for value in split_fields(value_all): - # Split - - if value[0]=='field': - # field SFFloat creaseAngle 4 - self.proto_field_defs.append(value) - else: - self.fields.append(value) - i+=1 - -def gzipOpen(path): - try: import gzip - except: gzip = None - - data = None - if gzip: - try: data = gzip.open(path, 'r').read() - except: pass - else: - print '\tNote, gzip module could not be imported, compressed files will fail to load' - - if data==None: - try: data = open(path, 'rU').read() - except: pass - - return data - -def vrml_parse(path): - ''' - Sets up the root node and returns it so load_web3d() can deal with the blender side of things. - Return root (vrmlNode, '') or (None, 'Error String') - ''' - data = gzipOpen(path) - - if data==None: - return None, 'Failed to open file: ' + path - - # Stripped above - lines[:] = vrmlFormat( data ) - - lines.insert(0, '{') - lines.insert(0, 'dymmy_node') - lines.append('}') - # Use for testing our parsed output, so we can check on line numbers. - - ''' - ff = open('/tmp/test.txt', 'w') - ff.writelines([l+'\n' for l in lines]) - ff.close() - ''' - - # Now evaluate it - node_type, new_i = is_nodeline(0, []) - if not node_type: - return None, 'Error: VRML file has no starting Node' - - # Trick to make sure we get all root nodes. - lines.insert(0, '{') - lines.insert(0, 'root_node____') # important the name starts with an ascii char - lines.append('}') - - root = vrmlNode(None, NODE_NORMAL, -1) - root.setRoot(path) # we need to set the root so we have a namespace and know the path incase of inlineing - - # Parse recursively - root.parse(0) - - # This prints a load of text - if DEBUG: - print root - - return root, '' - - -# ====================== END VRML - - - -# ====================== X3d Support - -# Sane as vrml but replace the parser -class x3dNode(vrmlNode): - def __init__(self, parent, node_type, x3dNode): - vrmlNode.__init__(self, parent, node_type, -1) - self.x3dNode = x3dNode - - def parse(self, IS_PROTO_DATA=False): - # print self.x3dNode.tagName - - define = self.x3dNode.getAttributeNode('DEF') - if define: - self.getDefDict()[define.value] = self - else: - use = self.x3dNode.getAttributeNode('USE') - if use: - try: - self.reference = self.getDefDict()[use.value] - self.node_type = NODE_REFERENCE - except: - print '\tWarning: reference', use.value, 'not found' - self.parent.children.remove(self) - - return - - for x3dChildNode in self.x3dNode.childNodes: - if x3dChildNode.nodeType in (x3dChildNode.TEXT_NODE, x3dChildNode.COMMENT_NODE, x3dChildNode.CDATA_SECTION_NODE): - continue - - node_type = NODE_NORMAL - # print x3dChildNode, dir(x3dChildNode) - if x3dChildNode.getAttributeNode('USE'): - node_type = NODE_REFERENCE - - child = x3dNode(self, node_type, x3dChildNode) - child.parse() - - # TODO - x3d Inline - - def getSpec(self): - return self.x3dNode.tagName # should match vrml spec - - def getDefName(self): - data = self.x3dNode.getAttributeNode('DEF') - if data: data.value - return None - - # Other funcs operate from vrml, but this means we can wrap XML fields, still use nice utility funcs - # getFieldAsArray getFieldAsBool etc - def getFieldName(self, field, ancestry, AS_CHILD=False): - # ancestry and AS_CHILD are ignored, only used for VRML now - - self_real = self.getRealNode() # incase we're an instance - field_xml = self.x3dNode.getAttributeNode(field) - if field_xml: - value = field_xml.value - - # We may want to edit. for x3d spesific stuff - # Sucks a bit to return the field name in the list but vrml excepts this :/ - return value.split() - else: - return None - -def x3d_parse(path): - ''' - Sets up the root node and returns it so load_web3d() can deal with the blender side of things. - Return root (x3dNode, '') or (None, 'Error String') - ''' - - try: - import xml.dom.minidom - except: - return None, 'Error, import XML parsing module (xml.dom.minidom) failed, install python' - - ''' - try: doc = xml.dom.minidom.parse(path) - except: return None, 'Could not parse this X3D file, XML error' - ''' - - # Could add a try/except here, but a console error is more useful. - data = gzipOpen(path) - - if data==None: - return None, 'Failed to open file: ' + path - - doc = xml.dom.minidom.parseString(data) - - - try: - x3dnode = doc.getElementsByTagName('X3D')[0] - except: - return None, 'Not a valid x3d document, cannot import' - - root = x3dNode(None, NODE_NORMAL, x3dnode) - root.setRoot(path) # so images and Inline's we load have a relative path - root.parse() - - return root, '' - - - -## f = open('/_Cylinder.wrl', 'r') -# f = open('/fe/wrl/Vrml/EGS/TOUCHSN.WRL', 'r') -# vrml_parse('/fe/wrl/Vrml/EGS/TOUCHSN.WRL') -#vrml_parse('/fe/wrl/Vrml/EGS/SCRIPT.WRL') -''' - -import os -files = os.popen('find /fe/wrl -iname "*.wrl"').readlines() -files.sort() -tot = len(files) -for i, f in enumerate(files): - #if i < 801: - # continue - - f = f.strip() - print f, i, tot - vrml_parse(f) -''' - -# NO BLENDER CODE ABOVE THIS LINE. -# ----------------------------------------------------------------------------------- -import bpy -import BPyImage -import BPySys -reload(BPySys) -reload(BPyImage) -import Blender -from Blender import Texture, Material, Mathutils, Mesh, Types, Window -from Blender.Mathutils import TranslationMatrix -from Blender.Mathutils import RotationMatrix -from Blender.Mathutils import Vector -from Blender.Mathutils import Matrix - -RAD_TO_DEG = 57.29578 - -GLOBALS = {'CIRCLE_DETAIL':16} - -def translateRotation(rot): - ''' axis, angle ''' - return RotationMatrix(rot[3]*RAD_TO_DEG, 4, 'r', Vector(rot[:3])) - -def translateScale(sca): - mat = Matrix() # 4x4 default - mat[0][0] = sca[0] - mat[1][1] = sca[1] - mat[2][2] = sca[2] - return mat - -def translateTransform(node, ancestry): - cent = node.getFieldAsFloatTuple('center', None, ancestry) # (0.0, 0.0, 0.0) - rot = node.getFieldAsFloatTuple('rotation', None, ancestry) # (0.0, 0.0, 1.0, 0.0) - sca = node.getFieldAsFloatTuple('scale', None, ancestry) # (1.0, 1.0, 1.0) - scaori = node.getFieldAsFloatTuple('scaleOrientation', None, ancestry) # (0.0, 0.0, 1.0, 0.0) - tx = node.getFieldAsFloatTuple('translation', None, ancestry) # (0.0, 0.0, 0.0) - - if cent: - cent_mat = TranslationMatrix(Vector(cent)).resize4x4() - cent_imat = cent_mat.copy().invert() - else: - cent_mat = cent_imat = None - - if rot: rot_mat = translateRotation(rot) - else: rot_mat = None - - if sca: sca_mat = translateScale(sca) - else: sca_mat = None - - if scaori: - scaori_mat = translateRotation(scaori) - scaori_imat = scaori_mat.copy().invert() - else: - scaori_mat = scaori_imat = None - - if tx: tx_mat = TranslationMatrix(Vector(tx)).resize4x4() - else: tx_mat = None - - new_mat = Matrix() - - mats = [tx_mat, cent_mat, rot_mat, scaori_mat, sca_mat, scaori_imat, cent_imat] - for mtx in mats: - if mtx: - new_mat = mtx * new_mat - - return new_mat - -def translateTexTransform(node, ancestry): - cent = node.getFieldAsFloatTuple('center', None, ancestry) # (0.0, 0.0) - rot = node.getFieldAsFloat('rotation', None, ancestry) # 0.0 - sca = node.getFieldAsFloatTuple('scale', None, ancestry) # (1.0, 1.0) - tx = node.getFieldAsFloatTuple('translation', None, ancestry) # (0.0, 0.0) - - - if cent: - # cent is at a corner by default - cent_mat = TranslationMatrix(Vector(cent).resize3D()).resize4x4() - cent_imat = cent_mat.copy().invert() - else: - cent_mat = cent_imat = None - - if rot: rot_mat = RotationMatrix(rot*RAD_TO_DEG, 4, 'z') # translateRotation(rot) - else: rot_mat = None - - if sca: sca_mat = translateScale((sca[0], sca[1], 0.0)) - else: sca_mat = None - - if tx: tx_mat = TranslationMatrix(Vector(tx).resize3D()).resize4x4() - else: tx_mat = None - - new_mat = Matrix() - - # as specified in VRML97 docs - mats = [cent_imat, sca_mat, rot_mat, cent_mat, tx_mat] - - for mtx in mats: - if mtx: - new_mat = mtx * new_mat - - return new_mat - - - -def getFinalMatrix(node, mtx, ancestry): - - transform_nodes = [node_tx for node_tx in ancestry if node_tx.getSpec() == 'Transform'] - if node.getSpec()=='Transform': - transform_nodes.append(node) - transform_nodes.reverse() - - if mtx==None: - mtx = Matrix() - - for node_tx in transform_nodes: - mat = translateTransform(node_tx, ancestry) - mtx = mtx * mat - - return mtx - -def importMesh_IndexedFaceSet(geom, bpyima, ancestry): - # print geom.lineno, geom.id, vrmlNode.DEF_NAMESPACE.keys() - - ccw = geom.getFieldAsBool('ccw', True, ancestry) - ifs_colorPerVertex = geom.getFieldAsBool('colorPerVertex', True, ancestry) # per vertex or per face - ifs_normalPerVertex = geom.getFieldAsBool('normalPerVertex', True, ancestry) - - # This is odd how point is inside Coordinate - - # VRML not x3d - #coord = geom.getChildByName('coord') # 'Coordinate' - - coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml - - if coord: ifs_points = coord.getFieldAsArray('point', 3, ancestry) - else: coord = [] - - if not coord: - print '\tWarnint: IndexedFaceSet has no points' - return None, ccw - - ifs_faces = geom.getFieldAsArray('coordIndex', 0, ancestry) - - coords_tex = None - if ifs_faces: # In rare cases this causes problems - no faces but UVs??? - - # WORKS - VRML ONLY - # coords_tex = geom.getChildByName('texCoord') - coords_tex = geom.getChildBySpec('TextureCoordinate') - - if coords_tex: - ifs_texpoints = coords_tex.getFieldAsArray('point', 2, ancestry) - ifs_texfaces = geom.getFieldAsArray('texCoordIndex', 0, ancestry) - - if not ifs_texpoints: - # IF we have no coords, then dont bother - coords_tex = None - - - # WORKS - VRML ONLY - # vcolor = geom.getChildByName('color') - vcolor = geom.getChildBySpec('Color') - vcolor_spot = None # spot color when we dont have an array of colors - if vcolor: - # float to char - ifs_vcol = [(0,0,0)] # EEKADOODLE - vertex start at 1 - ifs_vcol.extend([[int(c*256) for c in col] for col in vcolor.getFieldAsArray('color', 3, ancestry)]) - ifs_color_index = geom.getFieldAsArray('colorIndex', 0, ancestry) - - if not ifs_vcol: - vcolor_spot = [int(c*256) for c in vcolor.getFieldAsFloatTuple('color', [], ancestry)] - - # Convert faces into somthing blender can use - edges = [] - - # All lists are aligned! - faces = [] - faces_uv = [] # if ifs_texfaces is empty then the faces_uv will match faces exactly. - faces_orig_index = [] # for ngons, we need to know our original index - - if coords_tex and ifs_texfaces: - do_uvmap = True - else: - do_uvmap = False - - # current_face = [0] # pointer anyone - - def add_face(face, fuvs, orig_index): - l = len(face) - if l==3 or l==4: - faces.append(face) - # faces_orig_index.append(current_face[0]) - if do_uvmap: - faces_uv.append(fuvs) - - faces_orig_index.append(orig_index) - elif l==2: edges.append(face) - elif l>4: - for i in xrange(2, len(face)): - faces.append([face[0], face[i-1], face[i]]) - if do_uvmap: - faces_uv.append([fuvs[0], fuvs[i-1], fuvs[i]]) - faces_orig_index.append(orig_index) - else: - # faces with 1 verts? pfft! - # still will affect index ordering - pass - - face = [] - fuvs = [] - orig_index = 0 - for i, fi in enumerate(ifs_faces): - # ifs_texfaces and ifs_faces should be aligned - if fi != -1: - # face.append(int(fi)) # in rare cases this is a float - # EEKADOODLE!!! - # Annoyance where faces that have a zero index vert get rotated. This will then mess up UVs and VColors - face.append(int(fi)+1) # in rare cases this is a float, +1 because of stupid EEKADOODLE :/ - - if do_uvmap: - if i >= len(ifs_texfaces): - print '\tWarning: UV Texface index out of range' - fuvs.append(ifs_texfaces[0]) - else: - fuvs.append(ifs_texfaces[i]) - else: - add_face(face, fuvs, orig_index) - face = [] - if do_uvmap: - fuvs = [] - orig_index += 1 - - add_face(face, fuvs, orig_index) - del add_face # dont need this func anymore - - bpymesh = bpy.data.meshes.new() - - bpymesh.verts.extend([(0,0,0)]) # EEKADOODLE - bpymesh.verts.extend(ifs_points) - - # print len(ifs_points), faces, edges, ngons - - try: - bpymesh.faces.extend(faces, smooth=True, ignoreDups=True) - except KeyError: - print "one or more vert indicies out of range. corrupt file?" - #for f in faces: - # bpymesh.faces.extend(faces, smooth=True) - - bpymesh.calcNormals() - - if len(bpymesh.faces) != len(faces): - print '\tWarning: adding faces did not work! file is invalid, not adding UVs or vcolors' - return bpymesh, ccw - - # Apply UVs if we have them - if not do_uvmap: - faces_uv = faces # fallback, we didnt need a uvmap in the first place, fallback to the face/vert mapping. - if coords_tex: - #print ifs_texpoints - # print geom - bpymesh.faceUV = True - for i,f in enumerate(bpymesh.faces): - f.image = bpyima - fuv = faces_uv[i] # uv indicies - for j,uv in enumerate(f.uv): - # print fuv, j, len(ifs_texpoints) - try: - uv[:] = ifs_texpoints[fuv[j]] - except: - print '\tWarning: UV Index out of range' - uv[:] = ifs_texpoints[0] - - elif bpyima and len(bpymesh.faces): - # Oh Bugger! - we cant really use blenders ORCO for for texture space since texspace dosnt rotate. - # we have to create VRML's coords as UVs instead. - - # VRML docs - ''' - If the texCoord field is NULL, a default texture coordinate mapping is calculated using the local - coordinate system bounding box of the shape. The longest dimension of the bounding box defines the S coordinates, - and the next longest defines the T coordinates. If two or all three dimensions of the bounding box are equal, - ties shall be broken by choosing the X, Y, or Z dimension in that order of preference. - The value of the S coordinate ranges from 0 to 1, from one end of the bounding box to the other. - The T coordinate ranges between 0 and the ratio of the second greatest dimension of the bounding box to the greatest dimension. - ''' - - # Note, S,T == U,V - # U gets longest, V gets second longest - xmin, ymin, zmin = ifs_points[0] - xmax, ymax, zmax = ifs_points[0] - for co in ifs_points: - x,y,z = co - if x < xmin: xmin = x - if y < ymin: ymin = y - if z < zmin: zmin = z - - if x > xmax: xmax = x - if y > ymax: ymax = y - if z > zmax: zmax = z - - xlen = xmax - xmin - ylen = ymax - ymin - zlen = zmax - zmin - - depth_min = xmin, ymin, zmin - depth_list = [xlen, ylen, zlen] - depth_sort = depth_list[:] - depth_sort.sort() - - depth_idx = [depth_list.index(val) for val in depth_sort] - - axis_u = depth_idx[-1] - axis_v = depth_idx[-2] # second longest - - # Hack, swap these !!! TODO - Why swap??? - it seems to work correctly but should not. - # axis_u,axis_v = axis_v,axis_u - - min_u = depth_min[axis_u] - min_v = depth_min[axis_v] - depth_u = depth_list[axis_u] - depth_v = depth_list[axis_v] - - depth_list[axis_u] - - if axis_u == axis_v: - # This should be safe because when 2 axies have the same length, the lower index will be used. - axis_v += 1 - - bpymesh.faceUV = True - - # HACK !!! - seems to be compatible with Cosmo though. - depth_v = depth_u = max(depth_v, depth_u) - - for f in bpymesh.faces: - f.image = bpyima - fuv = f.uv - - for i,v in enumerate(f): - co = v.co - fuv[i][:] = (co[axis_u]-min_u) / depth_u, (co[axis_v]-min_v) / depth_v - - # Add vcote - if vcolor: - # print ifs_vcol - bpymesh.vertexColors = True - - for f in bpymesh.faces: - fcol = f.col - if ifs_colorPerVertex: - fv = f.verts - for i,c in enumerate(fcol): - color_index = fv[i].index # color index is vert index - if ifs_color_index: - try: - color_index = ifs_color_index[color_index] - except: - print '\tWarning: per vertex color index out of range' - continue - - if color_index < len(ifs_vcol): - c.r, c.g, c.b = ifs_vcol[color_index] - else: - #print '\tWarning: per face color index out of range' - pass - else: - if vcolor_spot: # use 1 color, when ifs_vcol is [] - for c in fcol: - c.r, c.g, c.b = vcolor_spot - else: - color_index = faces_orig_index[f.index] # color index is face index - #print color_index, ifs_color_index - if ifs_color_index: - if color_index <= len(ifs_color_index): - print '\tWarning: per face color index out of range' - color_index = 0 - else: - color_index = ifs_color_index[color_index] - - - col = ifs_vcol[color_index] - for i,c in enumerate(fcol): - try: - c.r, c.g, c.b = col - except: - pass # incase its not between 0 and 255 - - bpymesh.verts.delete([0,]) # EEKADOODLE - - return bpymesh, ccw - -def importMesh_IndexedLineSet(geom, ancestry): - # VRML not x3d - #coord = geom.getChildByName('coord') # 'Coordinate' - coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml - if coord: points = coord.getFieldAsArray('point', 3, ancestry) - else: points = [] - - if not points: - print '\tWarning: IndexedLineSet had no points' - return None - - ils_lines = geom.getFieldAsArray('coordIndex', 0, ancestry) - - lines = [] - line = [] - - for il in ils_lines: - if il==-1: - lines.append(line) - line = [] - else: - line.append(int(il)) - lines.append(line) - - # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color - - bpycurve = bpy.data.curves.new('IndexedCurve', 'Curve') - bpycurve.setFlag(1) - - w=t=1 - - curve_index = 0 - - for line in lines: - if not line: - continue - co = points[line[0]] - bpycurve.appendNurb([co[0], co[1], co[2], w, t]) - bpycurve[curve_index].type= 0 # Poly Line - - for il in line[1:]: - co = points[il] - bpycurve.appendPoint(curve_index, [co[0], co[1], co[2], w]) - - - curve_index += 1 - - return bpycurve - - -def importMesh_PointSet(geom, ancestry): - # VRML not x3d - #coord = geom.getChildByName('coord') # 'Coordinate' - coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml - if coord: points = coord.getFieldAsArray('point', 3, ancestry) - else: points = [] - - # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color - - bpymesh = bpy.data.meshes.new() - bpymesh.verts.extend(points) - bpymesh.calcNormals() # will just be dummy normals - return bpymesh - -GLOBALS['CIRCLE_DETAIL'] = 12 - -MATRIX_Z_TO_Y = RotationMatrix(90, 4, 'x') - -def importMesh_Sphere(geom, ancestry): - # bpymesh = bpy.data.meshes.new() - diameter = geom.getFieldAsFloat('radius', 0.5, ancestry) * 2 # * 2 for the diameter - bpymesh = Mesh.Primitives.UVsphere(GLOBALS['CIRCLE_DETAIL'], GLOBALS['CIRCLE_DETAIL'], diameter) - bpymesh.transform(MATRIX_Z_TO_Y) - return bpymesh - -def importMesh_Cylinder(geom, ancestry): - # bpymesh = bpy.data.meshes.new() - diameter = geom.getFieldAsFloat('radius', 1.0, ancestry) * 2 # * 2 for the diameter - height = geom.getFieldAsFloat('height', 2, ancestry) - bpymesh = Mesh.Primitives.Cylinder(GLOBALS['CIRCLE_DETAIL'], diameter, height) - bpymesh.transform(MATRIX_Z_TO_Y) - - # Warning - Rely in the order Blender adds verts - # not nice design but wont change soon. - - bottom = geom.getFieldAsBool('bottom', True, ancestry) - side = geom.getFieldAsBool('side', True, ancestry) - top = geom.getFieldAsBool('top', True, ancestry) - - if not top: # last vert is top center of tri fan. - bpymesh.verts.delete([(GLOBALS['CIRCLE_DETAIL']+GLOBALS['CIRCLE_DETAIL'])+1]) - - if not bottom: # second last vert is bottom of triangle fan - bpymesh.verts.delete([GLOBALS['CIRCLE_DETAIL']+GLOBALS['CIRCLE_DETAIL']]) - - if not side: - # remove all quads - bpymesh.faces.delete(1, [f for f in bpymesh.faces if len(f)==4]) - - return bpymesh - -def importMesh_Cone(geom, ancestry): - # bpymesh = bpy.data.meshes.new() - diameter = geom.getFieldAsFloat('bottomRadius', 1.0, ancestry) * 2 # * 2 for the diameter - height = geom.getFieldAsFloat('height', 2, ancestry) - bpymesh = Mesh.Primitives.Cone(GLOBALS['CIRCLE_DETAIL'], diameter, height) - bpymesh.transform(MATRIX_Z_TO_Y) - - # Warning - Rely in the order Blender adds verts - # not nice design but wont change soon. - - bottom = geom.getFieldAsBool('bottom', True, ancestry) - side = geom.getFieldAsBool('side', True, ancestry) - - if not bottom: # last vert is on the bottom - bpymesh.verts.delete([GLOBALS['CIRCLE_DETAIL']+1]) - if not side: # second last vert is on the pointy bit of the cone - bpymesh.verts.delete([GLOBALS['CIRCLE_DETAIL']]) - - return bpymesh - -def importMesh_Box(geom, ancestry): - # bpymesh = bpy.data.meshes.new() - - size = geom.getFieldAsFloatTuple('size', (2.0, 2.0, 2.0), ancestry) - bpymesh = Mesh.Primitives.Cube(1.0) - - # Scale the box to the size set - scale_mat = Matrix([size[0],0,0], [0, size[1], 0], [0, 0, size[2]]) - bpymesh.transform(scale_mat.resize4x4()) - - return bpymesh - -def importShape(node, ancestry): - vrmlname = node.getDefName() - if not vrmlname: vrmlname = 'Shape' - - # works 100% in vrml, but not x3d - #appr = node.getChildByName('appearance') # , 'Appearance' - #geom = node.getChildByName('geometry') # , 'IndexedFaceSet' - - # Works in vrml and x3d - appr = node.getChildBySpec('Appearance') - geom = node.getChildBySpec(['IndexedFaceSet', 'IndexedLineSet', 'PointSet', 'Sphere', 'Box', 'Cylinder', 'Cone']) - - # For now only import IndexedFaceSet's - if geom: - bpymat = None - bpyima = None - texmtx = None - - depth = 0 # so we can set alpha face flag later - - if appr: - - #mat = appr.getChildByName('material') # 'Material' - #ima = appr.getChildByName('texture') # , 'ImageTexture' - #if ima and ima.getSpec() != 'ImageTexture': - # print '\tWarning: texture type "%s" is not supported' % ima.getSpec() - # ima = None - # textx = appr.getChildByName('textureTransform') - - mat = appr.getChildBySpec('Material') - ima = appr.getChildBySpec('ImageTexture') - - textx = appr.getChildBySpec('TextureTransform') - - if textx: - texmtx = translateTexTransform(textx, ancestry) - - - - # print mat, ima - if mat or ima: - - if not mat: - mat = ima # This is a bit dumb, but just means we use default values for all - - # all values between 0.0 and 1.0, defaults from VRML docs - bpymat = bpy.data.materials.new() - bpymat.amb = mat.getFieldAsFloat('ambientIntensity', 0.2, ancestry) - bpymat.rgbCol = mat.getFieldAsFloatTuple('diffuseColor', [0.8, 0.8, 0.8], ancestry) - - # NOTE - blender dosnt support emmisive color - # Store in mirror color and approximate with emit. - emit = mat.getFieldAsFloatTuple('emissiveColor', [0.0, 0.0, 0.0], ancestry) - bpymat.mirCol = emit - bpymat.emit = (emit[0]+emit[1]+emit[2])/3.0 - - bpymat.hard = int(1+(510*mat.getFieldAsFloat('shininess', 0.2, ancestry))) # 0-1 -> 1-511 - bpymat.specCol = mat.getFieldAsFloatTuple('specularColor', [0.0, 0.0, 0.0], ancestry) - bpymat.alpha = 1.0 - mat.getFieldAsFloat('transparency', 0.0, ancestry) - if bpymat.alpha < 0.999: - bpymat.mode |= Material.Modes.ZTRANSP - - - if ima: - - ima_url = ima.getFieldAsString('url', None, ancestry) - - if ima_url==None: - try: ima_url = ima.getFieldAsStringArray('url', ancestry)[0] # in some cases we get a list of images. - except: ima_url = None - - if ima_url==None: - print "\twarning, image with no URL, this is odd" - else: - bpyima= BPyImage.comprehensiveImageLoad(ima_url, dirName(node.getFilename()), PLACE_HOLDER= False, RECURSIVE= False, CONVERT_CALLBACK= imageConvertCompat) - if bpyima: - texture= bpy.data.textures.new() - texture.setType('Image') - texture.image = bpyima - - # Adds textures for materials (rendering) - try: depth = bpyima.depth - except: depth = -1 - - if depth == 32: - # Image has alpha - bpymat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA) - texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha') - bpymat.mode |= Material.Modes.ZTRANSP - bpymat.alpha = 0.0 - else: - bpymat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) - - ima_repS = ima.getFieldAsBool('repeatS', True, ancestry) - ima_repT = ima.getFieldAsBool('repeatT', True, ancestry) - - # To make this work properly we'd need to scale the UV's too, better to ignore th - # texture.repeat = max(1, ima_repS * 512), max(1, ima_repT * 512) - - if not ima_repS: bpyima.clampX = True - if not ima_repT: bpyima.clampY = True - - bpydata = None - geom_spec = geom.getSpec() - ccw = True - if geom_spec == 'IndexedFaceSet': - bpydata, ccw = importMesh_IndexedFaceSet(geom, bpyima, ancestry) - elif geom_spec == 'IndexedLineSet': - bpydata = importMesh_IndexedLineSet(geom, ancestry) - elif geom_spec == 'PointSet': - bpydata = importMesh_PointSet(geom, ancestry) - elif geom_spec == 'Sphere': - bpydata = importMesh_Sphere(geom, ancestry) - elif geom_spec == 'Box': - bpydata = importMesh_Box(geom, ancestry) - elif geom_spec == 'Cylinder': - bpydata = importMesh_Cylinder(geom, ancestry) - elif geom_spec == 'Cone': - bpydata = importMesh_Cone(geom, ancestry) - else: - print '\tWarning: unsupported type "%s"' % geom_spec - return - - if bpydata: - vrmlname = vrmlname + geom_spec - - bpydata.name = vrmlname - - bpyob = node.blendObject = bpy.data.scenes.active.objects.new(bpydata) - - if type(bpydata) == Types.MeshType: - is_solid = geom.getFieldAsBool('solid', True, ancestry) - creaseAngle = geom.getFieldAsFloat('creaseAngle', None, ancestry) - - if creaseAngle != None: - bpydata.maxSmoothAngle = 1+int(min(79, creaseAngle * RAD_TO_DEG)) - bpydata.mode |= Mesh.Modes.AUTOSMOOTH - - # Only ever 1 material per shape - if bpymat: bpydata.materials = [bpymat] - - if bpydata.faceUV: - - if depth==32: # set the faces alpha flag? - transp = Mesh.FaceTranspModes.ALPHA - for f in bpydata.faces: - f.transp = transp - - if texmtx: - # Apply texture transform? - uv_copy = Vector() - for f in bpydata.faces: - for uv in f.uv: - uv_copy.x = uv.x - uv_copy.y = uv.y - - uv.x, uv.y = (uv_copy * texmtx)[0:2] - # Done transforming the texture - - - # Must be here and not in IndexedFaceSet because it needs an object for the flip func. Messy :/ - if not ccw: bpydata.flipNormals() - - - # else could be a curve for example - - - - # Can transform data or object, better the object so we can instance the data - #bpymesh.transform(getFinalMatrix(node)) - bpyob.setMatrix( getFinalMatrix(node, None, ancestry) ) - - -def importLamp_PointLight(node, ancestry): - vrmlname = node.getDefName() - if not vrmlname: vrmlname = 'PointLight' - - # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO - # attenuation = node.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO - color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry) - intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher. - location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry) - # is_on = node.getFieldAsBool('on', True, ancestry) # TODO - radius = node.getFieldAsFloat('radius', 100.0, ancestry) - - bpylamp = bpy.data.lamps.new() - bpylamp.setType('Lamp') - bpylamp.energy = intensity - bpylamp.dist = radius - bpylamp.col = color - - mtx = TranslationMatrix(Vector(location)) - - return bpylamp, mtx - -def importLamp_DirectionalLight(node, ancestry): - vrmlname = node.getDefName() - if not vrmlname: vrmlname = 'DirectLight' - - # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0) # TODO - color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry) - direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry) - intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher. - # is_on = node.getFieldAsBool('on', True, ancestry) # TODO - - bpylamp = bpy.data.lamps.new(vrmlname) - bpylamp.setType('Sun') - bpylamp.energy = intensity - bpylamp.col = color - - # lamps have their direction as -z, yup - mtx = Vector(direction).toTrackQuat('-z', 'y').toMatrix().resize4x4() - - return bpylamp, mtx - -# looks like default values for beamWidth and cutOffAngle were swapped in VRML docs. - -def importLamp_SpotLight(node, ancestry): - vrmlname = node.getDefName() - if not vrmlname: vrmlname = 'SpotLight' - - # ambientIntensity = geom.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO - # attenuation = geom.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO - beamWidth = node.getFieldAsFloat('beamWidth', 1.570796, ancestry) * RAD_TO_DEG # max is documented to be 1.0 but some files have higher. - color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry) - cutOffAngle = node.getFieldAsFloat('cutOffAngle', 0.785398, ancestry) * RAD_TO_DEG # max is documented to be 1.0 but some files have higher. - direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry) - intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher. - location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry) - # is_on = node.getFieldAsBool('on', True, ancestry) # TODO - radius = node.getFieldAsFloat('radius', 100.0, ancestry) - - bpylamp = bpy.data.lamps.new(vrmlname) - bpylamp.setType('Spot') - bpylamp.energy = intensity - bpylamp.dist = radius - bpylamp.col = color - bpylamp.spotSize = cutOffAngle - if beamWidth > cutOffAngle: - bpylamp.spotBlend = 0.0 - else: - if cutOffAngle==0.0: #@#$%^&*(!!! - this should never happen - bpylamp.spotBlend = 0.5 - else: - bpylamp.spotBlend = beamWidth / cutOffAngle - - # Convert - - # lamps have their direction as -z, y==up - mtx = Vector(direction).toTrackQuat('-z', 'y').toMatrix().resize4x4() * TranslationMatrix(Vector(location)) - - return bpylamp, mtx - - -def importLamp(node, spec, ancestry): - if spec=='PointLight': - bpylamp,mtx = importLamp_PointLight(node, ancestry) - elif spec=='DirectionalLight': - bpylamp,mtx = importLamp_DirectionalLight(node, ancestry) - elif spec=='SpotLight': - bpylamp,mtx = importLamp_SpotLight(node, ancestry) - else: - print "Error, not a lamp" - raise ValueError - - bpyob = node.blendObject = bpy.data.scenes.active.objects.new(bpylamp) - bpyob.setMatrix( getFinalMatrix(node, mtx, ancestry) ) - - -def importViewpoint(node, ancestry): - name = node.getDefName() - if not name: name = 'Viewpoint' - - fieldOfView = node.getFieldAsFloat('fieldOfView', 0.785398, ancestry) * RAD_TO_DEG # max is documented to be 1.0 but some files have higher. - # jump = node.getFieldAsBool('jump', True, ancestry) - orientation = node.getFieldAsFloatTuple('orientation', (0.0, 0.0, 1.0, 0.0), ancestry) - position = node.getFieldAsFloatTuple('position', (0.0, 0.0, 0.0), ancestry) - description = node.getFieldAsString('description', '', ancestry) - - bpycam = bpy.data.cameras.new(name) - - bpycam.angle = fieldOfView - - mtx = translateRotation(orientation) * TranslationMatrix(Vector(position)) - - - bpyob = node.blendObject = bpy.data.scenes.active.objects.new(bpycam) - bpyob.setMatrix( getFinalMatrix(node, mtx, ancestry) ) - - -def importTransform(node, ancestry): - name = node.getDefName() - if not name: name = 'Transform' - - bpyob = node.blendObject = bpy.data.scenes.active.objects.new('Empty', name) # , name) - bpyob.setMatrix( getFinalMatrix(node, None, ancestry) ) - - # so they are not too annoying - bpyob.emptyShape= Blender.Object.EmptyShapes.AXES - bpyob.drawSize= 0.2 - - -#def importTimeSensor(node): - - -def translatePositionInterpolator(node, ipo, ancestry): - key = node.getFieldAsArray('key', 0, ancestry) - keyValue = node.getFieldAsArray('keyValue', 3, ancestry) - - try: - loc_x = ipo.addCurve('LocX') - loc_y = ipo.addCurve('LocY') - loc_z = ipo.addCurve('LocZ') - except ValueError: - return - - loc_x.interpolation = loc_y.interpolation = loc_z.interpolation = Blender.IpoCurve.InterpTypes.LINEAR - - for i, time in enumerate(key): - try: x,y,z = keyValue[i] - except: continue - - loc_x.append((time,x)) - loc_y.append((time,y)) - loc_z.append((time,z)) - -def translateOrientationInterpolator(node, ipo, ancestry): - key = node.getFieldAsArray('key', 0, ancestry) - keyValue = node.getFieldAsArray('keyValue', 4, ancestry) - - try: - rot_x = ipo.addCurve('RotX') - rot_y = ipo.addCurve('RotY') - rot_z = ipo.addCurve('RotZ') - except ValueError: - return - - rot_x.interpolation = rot_y.interpolation = rot_z.interpolation = Blender.IpoCurve.InterpTypes.LINEAR - - for i, time in enumerate(key): - try: x,y,z,w = keyValue[i] - except: continue - - mtx = translateRotation((x,y,z,w)) - eul = mtx.toEuler() - rot_x.append((time,eul.x/10.0)) - rot_y.append((time,eul.y/10.0)) - rot_z.append((time,eul.z/10.0)) - -# Untested! -def translateScalarInterpolator(node, ipo, ancestry): - key = node.getFieldAsArray('key', 0, ancestry) - keyValue = node.getFieldAsArray('keyValue', 4, ancestry) - - try: - sca_x = ipo.addCurve('ScaleX') - sca_y = ipo.addCurve('ScaleY') - sca_z = ipo.addCurve('ScaleZ') - except ValueError: - return - - sca_x.interpolation = sca_y.interpolation = sca_z.interpolation = Blender.IpoCurve.InterpTypes.LINEAR - - for i, time in enumerate(key): - try: x,y,z = keyValue[i] - except: continue - sca_x.append((time,x/10.0)) - sca_y.append((time,y/10.0)) - sca_z.append((time,z/10.0)) - -def translateTimeSensor(node, ipo, ancestry): - ''' - Apply a time sensor to an IPO, VRML has many combinations of loop/start/stop/cycle times - to give different results, for now just do the basics - ''' - - time_cu = ipo.addCurve('Time') - time_cu.interpolation = Blender.IpoCurve.InterpTypes.LINEAR - - cycleInterval = node.getFieldAsFloat('cycleInterval', None, ancestry) - - startTime = node.getFieldAsFloat('startTime', 0.0, ancestry) - stopTime = node.getFieldAsFloat('stopTime', 250.0, ancestry) - - if cycleInterval != None: - stopTime = startTime+cycleInterval - - loop = node.getFieldAsBool('loop', False, ancestry) - - time_cu.append((1+startTime, 0.0)) - time_cu.append((1+stopTime, 1.0/10.0))# anoying, the UI uses /10 - - - if loop: - time_cu.extend = Blender.IpoCurve.ExtendTypes.CYCLIC # or - EXTRAP, CYCLIC_EXTRAP, CONST, - - -def importRoute(node, ancestry): - ''' - Animation route only at the moment - ''' - - if not hasattr(node, 'fields'): - return - - routeIpoDict = node.getRouteIpoDict() - - def getIpo(id): - try: ipo = routeIpoDict[id] - except: ipo = routeIpoDict[id] = bpy.data.ipos.new('web3d_ipo', 'Object') - return ipo - - # for getting definitions - defDict = node.getDefDict() - ''' - Handles routing nodes to eachother - -ROUTE vpPI.value_changed TO champFly001.set_position -ROUTE vpOI.value_changed TO champFly001.set_orientation -ROUTE vpTs.fraction_changed TO vpPI.set_fraction -ROUTE vpTs.fraction_changed TO vpOI.set_fraction -ROUTE champFly001.bindTime TO vpTs.set_startTime - ''' - - #from_id, from_type = node.id[1].split('.') - #to_id, to_type = node.id[3].split('.') - - #value_changed - set_position_node = None - set_orientation_node = None - time_node = None - - for field in node.fields: - if field and field[0]=='ROUTE': - try: - from_id, from_type = field[1].split('.') - to_id, to_type = field[3].split('.') - except: - print "Warning, invalid ROUTE", field - continue - - if from_type == 'value_changed': - if to_type == 'set_position': - ipo = getIpo(to_id) - set_data_from_node = defDict[from_id] - translatePositionInterpolator(set_data_from_node, ipo, ancestry) - - if to_type in ('set_orientation', 'rotation'): - ipo = getIpo(to_id) - set_data_from_node = defDict[from_id] - translateOrientationInterpolator(set_data_from_node, ipo, ancestry) - - if to_type == 'set_scale': - ipo = getIpo(to_id) - set_data_from_node = defDict[from_id] - translateScalarInterpolator(set_data_from_node, ipo, ancestry) - - elif from_type =='bindTime': - ipo = getIpo(from_id) - time_node = defDict[to_id] - translateTimeSensor(time_node, ipo, ancestry) - - - - -def load_web3d(path, PREF_FLAT=False, PREF_CIRCLE_DIV=16, HELPER_FUNC = None): - - # Used when adding blender primitives - GLOBALS['CIRCLE_DETAIL'] = PREF_CIRCLE_DIV - - #root_node = vrml_parse('/_Cylinder.wrl') - if path.lower().endswith('.x3d'): - root_node, msg = x3d_parse(path) - else: - root_node, msg = vrml_parse(path) - - if not root_node: - if Blender.mode == 'background': - print msg - else: - Blender.Draw.PupMenu(msg) - return - - - # fill with tuples - (node, [parents-parent, parent]) - all_nodes = root_node.getSerialized([], []) - - for node, ancestry in all_nodes: - #if 'castle.wrl' not in node.getFilename(): - # continue - - spec = node.getSpec() - ''' - prefix = node.getPrefix() - if prefix=='PROTO': - pass - else - ''' - if HELPER_FUNC and HELPER_FUNC(node, ancestry): - # Note, include this function so the VRML/X3D importer can be extended - # by an external script. - gets first pick - pass - if spec=='Shape': - importShape(node, ancestry) - elif spec in ('PointLight', 'DirectionalLight', 'SpotLight'): - importLamp(node, spec, ancestry) - elif spec=='Viewpoint': - importViewpoint(node, ancestry) - elif spec=='Transform': - # Only use transform nodes when we are not importing a flat object hierarchy - if PREF_FLAT==False: - importTransform(node, ancestry) - ''' - # These are delt with later within importRoute - elif spec=='PositionInterpolator': - ipo = bpy.data.ipos.new('web3d_ipo', 'Object') - translatePositionInterpolator(node, ipo) - ''' - - - - # After we import all nodes, route events - anim paths - for node, ancestry in all_nodes: - importRoute(node, ancestry) - - for node, ancestry in all_nodes: - if node.isRoot(): - # we know that all nodes referenced from will be in - # routeIpoDict so no need to run node.getDefDict() for every node. - routeIpoDict = node.getRouteIpoDict() - defDict = node.getDefDict() - - for key, ipo in routeIpoDict.iteritems(): - - # Assign anim curves - node = defDict[key] - if node.blendObject==None: # Add an object if we need one for animation - node.blendObject = bpy.data.scenes.active.objects.new('Empty', 'AnimOb') # , name) - - node.blendObject.setIpo(ipo) - - - - # Add in hierarchy - if PREF_FLAT==False: - child_dict = {} - for node, ancestry in all_nodes: - if node.blendObject: - blendObject = None - - # Get the last parent - i = len(ancestry) - while i: - i-=1 - blendObject = ancestry[i].blendObject - if blendObject: - break - - if blendObject: - # Parent Slow, - 1 liner but works - # blendObject.makeParent([node.blendObject], 0, 1) - - # Parent FAST - try: child_dict[blendObject].append(node.blendObject) - except: child_dict[blendObject] = [node.blendObject] - - # Parent FAST - for parent, children in child_dict.iteritems(): - parent.makeParent(children, 0, 1) - - # update deps - bpy.data.scenes.active.update(1) - del child_dict - - -def load_ui(path): - Draw = Blender.Draw - PREF_HIERARCHY= Draw.Create(0) - PREF_CIRCLE_DIV= Draw.Create(16) - - # Get USER Options - pup_block= [\ - 'Import...',\ - ('Hierarchy', PREF_HIERARCHY, 'Import transform nodes as empties to create a parent/child hierarchy'),\ - ('Circle Div:', PREF_CIRCLE_DIV, 3, 128, 'Number of divisions to use for circular primitives') - ] - - if not Draw.PupBlock('Import X3D/VRML...', pup_block): - return - - Window.WaitCursor(1) - - load_web3d(path,\ - (not PREF_HIERARCHY.val),\ - PREF_CIRCLE_DIV.val,\ - ) - - Window.WaitCursor(0) - - -if __name__ == '__main__': - Window.FileSelector(load_ui, 'Import X3D/VRML97') - - -# Testing stuff - -# load_web3d('/test.x3d') -# load_web3d('/_Cylinder.x3d') - -# Testing below -# load_web3d('m:\\root\\Desktop\\_Cylinder.wrl') -# load_web3d('/_Cylinder.wrl') -# load_web3d('/fe/wrl/Vrml/EGS/BCKGD.WRL') - -# load_web3d('/fe/wrl/Vrml/EGS/GRNDPLNE.WRL') -# load_web3d('/fe/wrl/Vrml/EGS/INDEXFST.WRL') -# load_web3d('/fe/wrl/panel1c.wrl') -# load_web3d('/test.wrl') -# load_web3d('/fe/wrl/dulcimer.wrl') -# load_web3d('/fe/wrl/rccad/Ju-52.wrl') # Face index out of range -# load_web3d('/fe/wrl/16lat.wrl') # spotlight -# load_web3d('/fe/wrl/Vrml/EGS/FOG.WRL') # spotlight -# load_web3d('/fe/wrl/Vrml/EGS/LOD.WRL') # vcolor per face - -# load_web3d('/fe/wrl/new/daybreak_final.wrl') # no faces in mesh, face duplicate error -# load_web3d('/fe/wrl/new/earth.wrl') -# load_web3d('/fe/wrl/new/hendrix.ei.dtu.dk/vrml/talairach/fourd/TalaDruryRight.wrl') # define/use fields -# load_web3d('/fe/wrl/new/imac.wrl') # extrusion and define/use fields, face index is a float somehow -# load_web3d('/fe/wrl/new/www.igs.net/~mascott/vrml/vrml2/mcastle.wrl') -# load_web3d('/fe/wrl/new/www.igs.net/~mascott/vrml/vrml2/tower.wrl') -# load_web3d('/fe/wrl/new/www.igs.net/~mascott/vrml/vrml2/temple.wrl') -# load_web3d('/fe/wrl/brain.wrl') # field define test 'a IS b' -# load_web3d('/fe/wrl/new/coaster.wrl') # fields that are confusing to read. - -# X3D - -# load_web3d('/fe/x3d/www.web3d.org/x3d/content/examples/Basic/StudentProjects/PlayRoom.x3d') # invalid UVs - - - -def test(): - import os - - files = os.popen('find /fe/wrl -iname "*.wrl"').readlines() - # files = os.popen('find /fe/x3d -iname "*.x3d"').readlines() - # files = os.popen('find /fe/x3d/X3dExamplesSavage -iname "*.x3d"').readlines() - - files.sort() - tot = len(files) - for i, f in enumerate(files): - if i < 124 or i > 1000000: - continue - - #if i != 1068: - # continue - - #if i != 12686: - # continue - - f = f.strip() - print f, i, tot - sce = bpy.data.scenes.new(str(i) + '_' + f.split('/')[-1]) - bpy.data.scenes.active = sce - # Window. - load_web3d(f, PREF_FLAT=True) - -# test() diff --git a/release/scripts/io/engine_render_pov.py b/release/scripts/io/engine_render_pov.py new file mode 100644 index 00000000000..f0247ce532a --- /dev/null +++ b/release/scripts/io/engine_render_pov.py @@ -0,0 +1,912 @@ +import bpy + +from math import atan, pi, degrees +import subprocess +import os +import sys +import time + +import platform as pltfrm + +if pltfrm.architecture()[0] == '64bit': + bitness = 64 +else: + bitness = 32 + +def write_pov(filename, scene=None, info_callback = None): + file = open(filename, 'w') + + # Only for testing + if not scene: + scene = bpy.data.scenes[0] + + render = scene.render_data + world = scene.world + + # --- taken from fbx exporter + ## This was used to make V, but faster not to do all that + ##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}' + ##v = range(255) + ##for c in valid: v.remove(ord(c)) + v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,46,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] + invalid = ''.join([chr(i) for i in v]) + def cleanName(name): + for ch in invalid: name = name.replace(ch, '_') + return name + del v + + # --- done with clean name. + + def uniqueName(name, nameSeq): + + if name not in nameSeq: + return name + + name_orig = name + i = 1 + while name in nameSeq: + name = '%s_%.3d' % (name_orig, i) + i+=1 + + return name + + + def writeMatrix(matrix): + file.write('\tmatrix <%.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f>\n' %\ + (matrix[0][0], matrix[0][1], matrix[0][2], matrix[1][0], matrix[1][1], matrix[1][2], matrix[2][0], matrix[2][1], matrix[2][2], matrix[3][0], matrix[3][1], matrix[3][2]) ) + + def writeObjectMaterial(material): + if material and material.transparency_method=='RAYTRACE': + file.write('\tinterior { ior %.6f }\n' % material.raytrace_transparency.ior) + + # Other interior args + # fade_distance 2 + # fade_power [Value] + # fade_color + + # dispersion + # dispersion_samples + + materialNames = {} + DEF_MAT_NAME = 'Default' + def writeMaterial(material): + # Assumes only called once on each material + + if material: + name_orig = material.name + else: + name_orig = DEF_MAT_NAME + + name = materialNames[name_orig] = uniqueName(cleanName(name_orig), materialNames) + + file.write('#declare %s = finish {\n' % name) + + if material: + file.write('\tdiffuse %.3g\n' % material.diffuse_intensity) + file.write('\tspecular %.3g\n' % material.specular_intensity) + + file.write('\tambient %.3g\n' % material.ambient) + #file.write('\tambient rgb <%.3g, %.3g, %.3g>\n' % tuple([c*material.ambient for c in world.ambient_color])) # povray blends the global value + + # map hardness between 0.0 and 1.0 + roughness = ((1.0 - ((material.specular_hardness-1.0)/510.0))) + # scale from 0.0 to 0.1 + roughness *= 0.1 + # add a small value because 0.0 is invalid + roughness += (1/511.0) + + file.write('\troughness %.3g\n' % roughness) + + # 'phong 70.0 ' + + if material.raytrace_mirror.enabled: + raytrace_mirror= material.raytrace_mirror + if raytrace_mirror.reflect_factor: + file.write('\treflection {\n') + file.write('\t\trgb <%.3g, %.3g, %.3g>' % tuple(material.mirror_color)) + file.write('\t\tfresnel 1 falloff %.3g exponent %.3g metallic %.3g} ' % (raytrace_mirror.fresnel, raytrace_mirror.fresnel_factor, raytrace_mirror.reflect_factor)) + + else: + file.write('\tdiffuse 0.8\n') + file.write('\tspecular 0.2\n') + + + # This is written into the object + ''' + if material and material.transparency_method=='RAYTRACE': + 'interior { ior %.3g} ' % material.raytrace_transparency.ior + ''' + + #file.write('\t\t\tcrand 1.0\n') # Sand granyness + #file.write('\t\t\tmetallic %.6f\n' % material.spec) + #file.write('\t\t\tphong %.6f\n' % material.spec) + #file.write('\t\t\tphong_size %.6f\n' % material.spec) + #file.write('\t\t\tbrilliance %.6f ' % (material.specular_hardness/256.0) # Like hardness + + file.write('}\n') + + def exportCamera(): + camera = scene.camera + matrix = camera.matrix + + # compute resolution + Qsize=float(render.resolution_x)/float(render.resolution_y) + + file.write('camera {\n') + file.write('\tlocation <0, 0, 0>\n') + file.write('\tlook_at <0, 0, -1>\n') + file.write('\tright <%s, 0, 0>\n' % -Qsize) + file.write('\tup <0, 1, 0>\n') + file.write('\tangle %f \n' % (360.0*atan(16.0/camera.data.lens)/pi)) + + file.write('\trotate <%.6f, %.6f, %.6f>\n' % tuple([degrees(e) for e in matrix.rotationPart().toEuler()])) + file.write('\ttranslate <%.6f, %.6f, %.6f>\n' % (matrix[3][0], matrix[3][1], matrix[3][2])) + file.write('}\n') + + def exportLamps(lamps): + # Get all lamps + for ob in lamps: + lamp = ob.data + + matrix = ob.matrix + + color = tuple([c * lamp.energy for c in lamp.color]) # Colour is modified by energy + + file.write('light_source {\n') + file.write('\t< 0,0,0 >\n') + file.write('\tcolor rgb<%.3g, %.3g, %.3g>\n' % color) + + if lamp.type == 'POINT': # Point Lamp + pass + elif lamp.type == 'SPOT': # Spot + file.write('\tspotlight\n') + + # Falloff is the main radius from the centre line + file.write('\tfalloff %.2f\n' % (lamp.spot_size/2.0) ) # 1 TO 179 FOR BOTH + file.write('\tradius %.6f\n' % ((lamp.spot_size/2.0) * (1-lamp.spot_blend)) ) + + # Blender does not have a tightness equivilent, 0 is most like blender default. + file.write('\ttightness 0\n') # 0:10f + + file.write('\tpoint_at <0, 0, -1>\n') + elif lamp.type == 'SUN': + file.write('\tparallel\n') + file.write('\tpoint_at <0, 0, -1>\n') # *must* be after 'parallel' + + elif lamp.type == 'AREA': + + size_x = lamp.size + samples_x = lamp.shadow_ray_samples_x + if lamp.shape == 'SQUARE': + size_y = size_x + samples_y = samples_x + else: + size_y = lamp.size_y + samples_y = lamp.shadow_ray_samples_y + + file.write('\tarea_light <%d,0,0>,<0,0,%d> %d, %d\n' % (size_x, size_y, samples_x, samples_y)) + if lamp.shadow_ray_sampling_method == 'CONSTANT_JITTERED': + if lamp.jitter: + file.write('\tjitter\n') + else: + file.write('\tadaptive 1\n') + file.write('\tjitter\n') + + if lamp.shadow_method == 'NOSHADOW': + file.write('\tshadowless\n') + + file.write('\tfade_distance %.6f\n' % lamp.distance) + file.write('\tfade_power %d\n' % 1) # Could use blenders lamp quad? + writeMatrix(matrix) + + file.write('}\n') + + def exportMeta(metas): + + # TODO - blenders 'motherball' naming is not supported. + + for ob in metas: + meta = ob.data + + file.write('blob {\n') + file.write('\t\tthreshold %.4g\n' % meta.threshold) + + try: + material= meta.materials[0] # lame! - blender cant do enything else. + except: + material= None + + for elem in meta.elements: + + if elem.type not in ('BALL', 'ELLIPSOID'): + continue # Not supported + + loc = elem.location + + stiffness= elem.stiffness + if elem.negative: + stiffness = -stiffness + + if elem.type == 'BALL': + + file.write('\tsphere { <%.6g, %.6g, %.6g>, %.4g, %.4g ' % (loc.x, loc.y, loc.z, elem.radius, stiffness)) + + # After this wecould do something simple like... + # "pigment {Blue} }" + # except we'll write the color + + elif elem.type == 'ELLIPSOID': + # location is modified by scale + file.write('\tsphere { <%.6g, %.6g, %.6g>, %.4g, %.4g ' % (loc.x/elem.size_x, loc.y/elem.size_y, loc.z/elem.size_z, elem.radius, stiffness)) + file.write( 'scale <%.6g, %.6g, %.6g> ' % (elem.size_x, elem.size_y, elem.size_z)) + + if material: + diffuse_color = material.diffuse_color + + if material.transparency and material.transparency_method=='RAYTRACE': trans = 1-material.raytrace_transparency.filter + else: trans = 0.0 + + file.write( + 'pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} finish {%s} }\n' % \ + (diffuse_color[0], diffuse_color[1], diffuse_color[2], 1-material.alpha, trans, materialNames[material.name]) + ) + + else: + file.write('pigment {rgb<1 1 1>} finish {%s} }\n' % DEF_MAT_NAME) # Write the finish last. + + writeObjectMaterial(material) + + writeMatrix(ob.matrix) + + file.write('}\n') + + def exportMeshs(sel): + + ob_num = 0 + + for ob in sel: + ob_num+= 1 + + if ob.type in ('LAMP', 'CAMERA', 'EMPTY', 'META'): + continue + + me = ob.data + me_materials= me.materials + + me = ob.create_mesh(True, 'RENDER') + + if not me: + continue + + if info_callback: + info_callback('Object %2.d of %2.d (%s)' % (ob_num, len(sel), ob.name)) + + #if ob.type!='MESH': + # continue + # me = ob.data + + matrix = ob.matrix + try: uv_layer = me.active_uv_texture.data + except:uv_layer = None + + try: vcol_layer = me.active_vertex_color.data + except:vcol_layer = None + + faces_verts = [f.verts for f in me.faces] + faces_normals = [tuple(f.normal) for f in me.faces] + verts_normals = [tuple(v.normal) for v in me.verts] + + # quads incur an extra face + quadCount = len([f for f in faces_verts if len(f)==4]) + + file.write('mesh2 {\n') + file.write('\tvertex_vectors {\n') + file.write('\t\t%s' % (len(me.verts))) # vert count + for v in me.verts: + file.write(',\n\t\t<%.6f, %.6f, %.6f>' % tuple(v.co)) # vert count + file.write('\n }\n') + + + # Build unique Normal list + uniqueNormals = {} + for fi, f in enumerate(me.faces): + fv = faces_verts[fi] + # [-1] is a dummy index, use a list so we can modify in place + if f.smooth: # Use vertex normals + for v in fv: + key = verts_normals[v] + uniqueNormals[key] = [-1] + else: # Use face normal + key = faces_normals[fi] + uniqueNormals[key] = [-1] + + file.write('\tnormal_vectors {\n') + file.write('\t\t%d' % len(uniqueNormals)) # vert count + idx = 0 + for no, index in uniqueNormals.items(): + file.write(',\n\t\t<%.6f, %.6f, %.6f>' % no) # vert count + index[0] = idx + idx +=1 + file.write('\n }\n') + + + # Vertex colours + vertCols = {} # Use for material colours also. + + if uv_layer: + # Generate unique UV's + uniqueUVs = {} + + for fi, uv in enumerate(uv_layer): + + if len(faces_verts[fi])==4: + uvs = uv.uv1, uv.uv2, uv.uv3, uv.uv4 + else: + uvs = uv.uv1, uv.uv2, uv.uv3 + + for uv in uvs: + uniqueUVs[tuple(uv)] = [-1] + + file.write('\tuv_vectors {\n') + #print unique_uvs + file.write('\t\t%s' % (len(uniqueUVs))) # vert count + idx = 0 + for uv, index in uniqueUVs.items(): + file.write(',\n\t\t<%.6f, %.6f>' % uv) + index[0] = idx + idx +=1 + ''' + else: + # Just add 1 dummy vector, no real UV's + file.write('\t\t1') # vert count + file.write(',\n\t\t<0.0, 0.0>') + ''' + file.write('\n }\n') + + + if me.vertex_colors: + + for fi, f in enumerate(me.faces): + material_index = f.material_index + material = me_materials[material_index] + + if material and material.vertex_color_paint: + + col = vcol_layer[fi] + + if len(faces_verts[fi])==4: + cols = col.color1, col.color2, col.color3, col.color4 + else: + cols = col.color1, col.color2, col.color3 + + for col in cols: + key = col[0], col[1], col[2], material_index # Material index! + vertCols[key] = [-1] + + else: + if material: + diffuse_color = tuple(material.diffuse_color) + key = diffuse_color[0], diffuse_color[1], diffuse_color[2], material_index + vertCols[key] = [-1] + + + else: + # No vertex colours, so write material colours as vertex colours + for i, material in enumerate(me_materials): + + if material: + diffuse_color = tuple(material.diffuse_color) + key = diffuse_color[0], diffuse_color[1], diffuse_color[2], i # i == f.mat + vertCols[key] = [-1] + + + # Vert Colours + file.write('\ttexture_list {\n') + file.write('\t\t%s' % (len(vertCols))) # vert count + idx=0 + for col, index in vertCols.items(): + + if me_materials: + material = me_materials[col[3]] + material_finish = materialNames[material.name] + + if material.transparency and material.transparency_method=='RAYTRACE': trans = 1-material.raytrace_transparency.filter + else: trans = 0.0 + + else: + material_finish = DEF_MAT_NAME # not working properly, + trans = 0.0 + + #print material.apl + file.write( ',\n\t\ttexture { pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} finish {%s}}' % + (col[0], col[1], col[2], 1-material.alpha, trans, material_finish) ) + + index[0] = idx + idx+=1 + + file.write( '\n }\n' ) + + # Face indicies + file.write('\tface_indices {\n') + file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count + for fi, f in enumerate(me.faces): + fv = faces_verts[fi] + material_index= f.material_index + if len(fv) == 4: indicies = (0,1,2), (0,2,3) + else: indicies = ((0,1,2),) + + if vcol_layer: + col = vcol_layer[fi] + + if len(fv) == 4: + cols = col.color1, col.color2, col.color3, col.color4 + else: + cols = col.color1, col.color2, col.color3 + + + if not me_materials or me_materials[material_index] == None: # No materials + for i1, i2, i3 in indicies: + file.write(',\n\t\t<%d,%d,%d>' % (fv[i1], fv[i2], fv[i3])) # vert count + else: + material = me_materials[material_index] + for i1, i2, i3 in indicies: + if me.vertex_colors and material.vertex_color_paint: + # Colour per vertex - vertex colour + + col1 = cols[i1] + col2 = cols[i2] + col3 = cols[i3] + + ci1 = vertCols[col1[0], col1[1], col1[2], material_index][0] + ci2 = vertCols[col2[0], col2[1], col2[2], material_index][0] + ci3 = vertCols[col3[0], col3[1], col3[2], material_index][0] + else: + # Colour per material - flat material colour + diffuse_color= material.diffuse_color + ci1 = ci2 = ci3 = vertCols[diffuse_color[0], diffuse_color[1], diffuse_color[2], f.material_index][0] + + file.write(',\n\t\t<%d,%d,%d>, %d,%d,%d' % (fv[i1], fv[i2], fv[i3], ci1, ci2, ci3)) # vert count + + + file.write('\n }\n') + + # normal_indices indicies + file.write('\tnormal_indices {\n') + file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count + for fi, fv in enumerate(faces_verts): + + if len(fv) == 4: indicies = (0,1,2), (0,2,3) + else: indicies = ((0,1,2),) + + for i1, i2, i3 in indicies: + if f.smooth: + file.write(',\n\t\t<%d,%d,%d>' %\ + (uniqueNormals[verts_normals[fv[i1]]][0],\ + uniqueNormals[verts_normals[fv[i2]]][0],\ + uniqueNormals[verts_normals[fv[i3]]][0])) # vert count + else: + idx = uniqueNormals[faces_normals[fi]][0] + file.write(',\n\t\t<%d,%d,%d>' % (idx, idx, idx)) # vert count + + file.write('\n }\n') + + if uv_layer: + file.write('\tuv_indices {\n') + file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count + for fi, fv in enumerate(faces_verts): + + if len(fv) == 4: indicies = (0,1,2), (0,2,3) + else: indicies = ((0,1,2),) + + uv = uv_layer[fi] + if len(faces_verts[fi])==4: + uvs = tuple(uv.uv1), tuple(uv.uv2), tuple(uv.uv3), tuple(uv.uv4) + else: + uvs = tuple(uv.uv1), tuple(uv.uv2), tuple(uv.uv3) + + for i1, i2, i3 in indicies: + file.write(',\n\t\t<%d,%d,%d>' %\ + (uniqueUVs[uvs[i1]][0],\ + uniqueUVs[uvs[i2]][0],\ + uniqueUVs[uvs[i2]][0])) # vert count + file.write('\n }\n') + + if me.materials: + material = me.materials[0] # dodgy + writeObjectMaterial(material) + + writeMatrix(matrix) + file.write('}\n') + + bpy.data.remove_mesh(me) + + def exportWorld(world): + if not world: + return + + mist = world.mist + + if mist.enabled: + file.write('fog {\n') + file.write('\tdistance %.6f\n' % mist.depth) + file.write('\tcolor rgbt<%.3g, %.3g, %.3g, %.3g>\n' % (tuple(world.horizon_color) + (1-mist.intensity,))) + #file.write('\tfog_offset %.6f\n' % mist.start) + #file.write('\tfog_alt 5\n') + #file.write('\tturbulence 0.2\n') + #file.write('\tturb_depth 0.3\n') + file.write('\tfog_type 1\n') + file.write('}\n') + + def exportGlobalSettings(scene): + + file.write('global_settings {\n') + + if scene.pov_radio_enable: + file.write('\tradiosity {\n') + file.write("\t\tadc_bailout %.4g\n" % scene.pov_radio_adc_bailout) + file.write("\t\talways_sample %d\n" % scene.pov_radio_always_sample) + file.write("\t\tbrightness %.4g\n" % scene.pov_radio_brightness) + file.write("\t\tcount %d\n" % scene.pov_radio_count) + file.write("\t\terror_bound %.4g\n" % scene.pov_radio_error_bound) + file.write("\t\tgray_threshold %.4g\n" % scene.pov_radio_gray_threshold) + file.write("\t\tlow_error_factor %.4g\n" % scene.pov_radio_low_error_factor) + file.write("\t\tmedia %d\n" % scene.pov_radio_media) + file.write("\t\tminimum_reuse %.4g\n" % scene.pov_radio_minimum_reuse) + file.write("\t\tnearest_count %d\n" % scene.pov_radio_nearest_count) + file.write("\t\tnormal %d\n" % scene.pov_radio_normal) + file.write("\t\trecursion_limit %d\n" % scene.pov_radio_recursion_limit) + file.write('\t}\n') + + if world: + file.write("\tambient_light rgb<%.3g, %.3g, %.3g>\n" % tuple(world.ambient_color)) + + file.write('}\n') + + + # Convert all materials to strings we can access directly per vertex. + writeMaterial(None) # default material + + for material in bpy.data.materials: + writeMaterial(material) + + exportCamera() + #exportMaterials() + sel = scene.objects + exportLamps([l for l in sel if l.type == 'LAMP']) + exportMeta([l for l in sel if l.type == 'META']) + exportMeshs(sel) + exportWorld(scene.world) + exportGlobalSettings(scene) + + file.close() + +def write_pov_ini(filename_ini, filename_pov, filename_image): + scene = bpy.data.scenes[0] + render = scene.render_data + + x= int(render.resolution_x*render.resolution_percentage*0.01) + y= int(render.resolution_y*render.resolution_percentage*0.01) + + file = open(filename_ini, 'w') + + file.write('Input_File_Name="%s"\n' % filename_pov) + file.write('Output_File_Name="%s"\n' % filename_image) + + file.write('Width=%d\n' % x) + file.write('Height=%d\n' % y) + + # Needed for border render. + ''' + file.write('Start_Column=%d\n' % part.x) + file.write('End_Column=%d\n' % (part.x+part.w)) + + file.write('Start_Row=%d\n' % (part.y)) + file.write('End_Row=%d\n' % (part.y+part.h)) + ''' + + file.write('Display=0\n') + file.write('Pause_When_Done=0\n') + file.write('Output_File_Type=T\n') # TGA, best progressive loading + file.write('Output_Alpha=1\n') + + if render.antialiasing: + aa_mapping = {'OVERSAMPLE_5':2, 'OVERSAMPLE_8':3, 'OVERSAMPLE_11':4, 'OVERSAMPLE_16':5} # method 1 assumed + file.write('Antialias=1\n') + file.write('Antialias_Depth=%d\n' % aa_mapping[render.antialiasing_samples]) + else: + file.write('Antialias=0\n') + + file.close() + +# Radiosity panel, use in the scene for now. +FloatProperty= bpy.types.Scene.FloatProperty +IntProperty= bpy.types.Scene.IntProperty +BoolProperty= bpy.types.Scene.BoolProperty + +# Not a real pov option, just to know if we should write +BoolProperty( attr="pov_radio_enable", + name="Enable Radiosity", + description="Enable povrays radiosity calculation.", + default= False) +BoolProperty( attr="pov_radio_display_advanced", + name="Advanced Options", + description="Show advanced options.", + default= False) + +# Real pov options +FloatProperty( attr="pov_radio_adc_bailout", + name="ADC Bailout", + description="The adc_bailout for radiosity rays. Use adc_bailout = 0.01 / brightest_ambient_object for good results.", + min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default= 0.01) + +BoolProperty( attr="pov_radio_always_sample", + name="Always Sample", + description="Only use the data from the pretrace step and not gather any new samples during the final radiosity pass..", + default= True) + +FloatProperty( attr="pov_radio_brightness", + name="Brightness", + description="Ammount objects are brightened before being returned upwards to the rest of the system.", + min=0.0, max=1000.0, soft_min=0.0, soft_max=10.0, default= 1.0) + +IntProperty( attr="pov_radio_count", + name="Ray Count", + description="number of rays that are sent out whenever a new radiosity value has to be calculated.", + min=1, max=1600, default= 35) + +FloatProperty( attr="pov_radio_error_bound", + name="Error Bound", + description="one of the two main speed/quality tuning values, lower values are more accurate.", + min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default= 1.8) + +FloatProperty( attr="pov_radio_gray_threshold", + name="Gray Threshold", + description="one of the two main speed/quality tuning values, lower values are more accurate.", + min=0.0, max=1.0, soft_min=0, soft_max=1, default= 0.0) + +FloatProperty( attr="pov_radio_low_error_factor", + name="Low Error Factor", + description="If you calculate just enough samples, but no more, you will get an image which has slightly blotchy lighting.", + min=0.0, max=1.0, soft_min=0.0, soft_max=1.0, default= 0.5) + +# max_sample - not available yet +BoolProperty( attr="pov_radio_media", + name="Media", + description="Radiosity estimation can be affected by media.", + default= False) + +FloatProperty( attr="pov_radio_minimum_reuse", + name="Minimum Reuse", + description="Fraction of the screen width which sets the minimum radius of reuse for each sample point (At values higher than 2% expect errors).", + min=0.0, max=1.0, soft_min=0.1, soft_max=0.1, default= 0.015) + +IntProperty( attr="pov_radio_nearest_count", + name="Nearest Count", + description="Number of old ambient values blended together to create a new interpolated value.", + min=1, max=20, default= 5) + +BoolProperty( attr="pov_radio_normal", + name="Normals", + description="Radiosity estimation can be affected by normals.", + default= False) + +IntProperty( attr="pov_radio_recursion_limit", + name="Recursion Limit", + description="how many recursion levels are used to calculate the diffuse inter-reflection.", + min=1, max=20, default= 3) + + +class PovrayRender(bpy.types.RenderEngine): + __idname__ = 'POVRAY_RENDER' + __label__ = "Povray" + DELAY = 0.02 + + def _export(self, scene): + import tempfile + + self.temp_file_in = tempfile.mktemp(suffix='.pov') + self.temp_file_out = tempfile.mktemp(suffix='.tga') + self.temp_file_ini = tempfile.mktemp(suffix='.ini') + ''' + self.temp_file_in = '/test.pov' + self.temp_file_out = '/test.tga' + self.temp_file_ini = '/test.ini' + ''' + + def info_callback(txt): + self.update_stats("", "POVRAY: " + txt) + + write_pov(self.temp_file_in, scene, info_callback) + + def _render(self): + + try: os.remove(self.temp_file_out) # so as not to load the old file + except: pass + + write_pov_ini(self.temp_file_ini, self.temp_file_in, self.temp_file_out) + + print ("***-STARTING-***") + + pov_binary = "povray" + + if sys.platform=='win32': + import winreg + regKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\POV-Ray\\v3.6\\Windows') + + if bitness == 64: + pov_binary = winreg.QueryValueEx(regKey, 'Home')[0] + '\\bin\\pvengine64' + else: + pov_binary = winreg.QueryValueEx(regKey, 'Home')[0] + '\\bin\\pvengine' + + if 1: + self.process = subprocess.Popen([pov_binary, self.temp_file_ini]) # stdout=subprocess.PIPE, stderr=subprocess.PIPE + else: + # This works too but means we have to wait until its done + os.system('%s %s' % (pov_binary, self.temp_file_ini)) + + print ("***-DONE-***") + + def _cleanup(self): + for f in (self.temp_file_in, self.temp_file_ini, self.temp_file_out): + try: os.remove(f) + except: pass + + self.update_stats("", "") + + def render(self, scene): + + self.update_stats("", "POVRAY: Exporting data from Blender") + self._export(scene) + self.update_stats("", "POVRAY: Parsing File") + self._render() + + r = scene.render_data + + # compute resolution + x= int(r.resolution_x*r.resolution_percentage*0.01) + y= int(r.resolution_y*r.resolution_percentage*0.01) + + # Wait for the file to be created + while not os.path.exists(self.temp_file_out): + if self.test_break(): + try: self.process.terminate() + except: pass + break + + if self.process.poll() != None: + self.update_stats("", "POVRAY: Failed") + break + + time.sleep(self.DELAY) + + if os.path.exists(self.temp_file_out): + + self.update_stats("", "POVRAY: Rendering") + + prev_size = -1 + + def update_image(): + result = self.begin_result(0, 0, x, y) + lay = result.layers[0] + # possible the image wont load early on. + try: lay.load_from_file(self.temp_file_out) + except: pass + self.end_result(result) + + # Update while povray renders + while True: + + # test if povray exists + if self.process.poll() != None: + update_image(); + break + + # user exit + if self.test_break(): + try: self.process.terminate() + except: pass + + break + + # Would be nice to redirect the output + # stdout_value, stderr_value = self.process.communicate() # locks + + + # check if the file updated + new_size = os.path.getsize(self.temp_file_out) + + if new_size != prev_size: + update_image() + prev_size = new_size + + time.sleep(self.DELAY) + + self._cleanup() + +bpy.types.register(PovrayRender) + +# Use some of the existing buttons. +import buttons_scene +buttons_scene.SCENE_PT_render.COMPAT_ENGINES.add('POVRAY_RENDER') +buttons_scene.SCENE_PT_dimensions.COMPAT_ENGINES.add('POVRAY_RENDER') +buttons_scene.SCENE_PT_antialiasing.COMPAT_ENGINES.add('POVRAY_RENDER') +buttons_scene.SCENE_PT_output.COMPAT_ENGINES.add('POVRAY_RENDER') +del buttons_scene + +# Use only a subset of the world panels +import buttons_world +buttons_world.WORLD_PT_preview.COMPAT_ENGINES.add('POVRAY_RENDER') +buttons_world.WORLD_PT_context_world.COMPAT_ENGINES.add('POVRAY_RENDER') +buttons_world.WORLD_PT_world.COMPAT_ENGINES.add('POVRAY_RENDER') +buttons_world.WORLD_PT_mist.COMPAT_ENGINES.add('POVRAY_RENDER') +del buttons_world + +# Example of wrapping every class 'as is' +import buttons_material +for member in dir(buttons_material): + subclass = getattr(buttons_material, member) + try: subclass.COMPAT_ENGINES.add('POVRAY_RENDER') + except: pass +del buttons_material + +class RenderButtonsPanel(bpy.types.Panel): + __space_type__ = 'PROPERTIES' + __region_type__ = 'WINDOW' + __context__ = "scene" + # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here + + def poll(self, context): + rd = context.scene.render_data + return (rd.use_game_engine==False) and (rd.engine in self.COMPAT_ENGINES) + +class SCENE_PT_povray_radiosity(RenderButtonsPanel): + __label__ = "Radiosity" + COMPAT_ENGINES = set(['POVRAY_RENDER']) + + def draw_header(self, context): + scene = context.scene + + self.layout.itemR(scene, "pov_radio_enable", text="") + + def draw(self, context): + layout = self.layout + + scene = context.scene + rd = scene.render_data + + layout.active = scene.pov_radio_enable + + split = layout.split() + + col = split.column() + col.itemR(scene, "pov_radio_count", text="Rays") + col.itemR(scene, "pov_radio_recursion_limit", text="Recursions") + col = split.column() + col.itemR(scene, "pov_radio_error_bound", text="Error") + + layout.itemR(scene, "pov_radio_display_advanced") + + if scene.pov_radio_display_advanced: + split = layout.split() + + col = split.column() + col.itemR(scene, "pov_radio_adc_bailout", slider=True) + col.itemR(scene, "pov_radio_gray_threshold", slider=True) + col.itemR(scene, "pov_radio_low_error_factor", slider=True) + + col = split.column() + col.itemR(scene, "pov_radio_brightness") + col.itemR(scene, "pov_radio_minimum_reuse", text="Min Reuse") + col.itemR(scene, "pov_radio_nearest_count") + + split = layout.split() + + col = split.column() + col.itemL(text="Estimation Influence:") + col.itemR(scene, "pov_radio_media") + col.itemR(scene, "pov_radio_normal") + + col = split.column() + col.itemR(scene, "pov_radio_always_sample") + +bpy.types.register(SCENE_PT_povray_radiosity) diff --git a/release/scripts/io/export_3ds.py b/release/scripts/io/export_3ds.py new file mode 100644 index 00000000000..2c1999c3d45 --- /dev/null +++ b/release/scripts/io/export_3ds.py @@ -0,0 +1,1130 @@ +#!BPY +# coding: utf-8 +""" +Name: '3D Studio (.3ds)...' +Blender: 243 +Group: 'Export' +Tooltip: 'Export to 3DS file format (.3ds).' +""" + +__author__ = ["Campbell Barton", "Bob Holcomb", "Richard Lärkäng", "Damien McGinnes", "Mark Stijnman"] +__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/") +__version__ = "0.90a" +__bpydoc__ = """\ + +3ds Exporter + +This script Exports a 3ds file. + +Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information +from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode. +""" + +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# Script copyright (C) Bob Holcomb +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# -------------------------------------------------------------------------- + + +###################################################### +# Importing modules +###################################################### + +import struct +import os +import time + +import bpy + +# import Blender +# from BPyMesh import getMeshFromObject +# from BPyObject import getDerivedObjects +# try: +# import struct +# except: +# struct = None + +# also used by X3D exporter +# return a tuple (free, object list), free is True if memory should be freed later with free_derived_objects() +def create_derived_objects(ob): + if ob.parent and ob.parent.dupli_type != 'NONE': + return False, None + + if ob.dupli_type != 'NONE': + ob.create_dupli_list() + return True, [(dob.object, dob.matrix) for dob in ob.dupli_list] + else: + return False, [(ob, ob.matrix)] + +# also used by X3D exporter +def free_derived_objects(ob): + ob.free_dupli_list() + +# So 3ds max can open files, limit names to 12 in length +# this is verry annoying for filenames! +name_unique = [] +name_mapping = {} +def sane_name(name): + name_fixed = name_mapping.get(name) + if name_fixed != None: + return name_fixed + + if len(name) > 12: + new_name = name[:12] + else: + new_name = name + + i = 0 + + while new_name in name_unique: + new_name = new_name[:-4] + '.%.3d' % i + i+=1 + + name_unique.append(new_name) + name_mapping[name] = new_name + return new_name + +###################################################### +# Data Structures +###################################################### + +#Some of the chunks that we will export +#----- Primary Chunk, at the beginning of each file +PRIMARY= int("0x4D4D",16) + +#------ Main Chunks +OBJECTINFO = int("0x3D3D",16); #This gives the version of the mesh and is found right before the material and object information +VERSION = int("0x0002",16); #This gives the version of the .3ds file +KFDATA = int("0xB000",16); #This is the header for all of the key frame info + +#------ sub defines of OBJECTINFO +MATERIAL=45055 #0xAFFF // This stored the texture info +OBJECT=16384 #0x4000 // This stores the faces, vertices, etc... + +#>------ sub defines of MATERIAL +MATNAME = int("0xA000",16); # This holds the material name +MATAMBIENT = int("0xA010",16); # Ambient color of the object/material +MATDIFFUSE = int("0xA020",16); # This holds the color of the object/material +MATSPECULAR = int("0xA030",16); # SPecular color of the object/material +MATSHINESS = int("0xA040",16); # ?? +MATMAP = int("0xA200",16); # This is a header for a new material +MATMAPFILE = int("0xA300",16); # This holds the file name of the texture + +RGB1= int("0x0011",16) +RGB2= int("0x0012",16) + +#>------ sub defines of OBJECT +OBJECT_MESH = int("0x4100",16); # This lets us know that we are reading a new object +OBJECT_LIGHT = int("0x4600",16); # This lets un know we are reading a light object +OBJECT_CAMERA= int("0x4700",16); # This lets un know we are reading a camera object + +#>------ sub defines of CAMERA +OBJECT_CAM_RANGES= int("0x4720",16); # The camera range values + +#>------ sub defines of OBJECT_MESH +OBJECT_VERTICES = int("0x4110",16); # The objects vertices +OBJECT_FACES = int("0x4120",16); # The objects faces +OBJECT_MATERIAL = int("0x4130",16); # This is found if the object has a material, either texture map or color +OBJECT_UV = int("0x4140",16); # The UV texture coordinates +OBJECT_TRANS_MATRIX = int("0x4160",16); # The Object Matrix + +#>------ sub defines of KFDATA +KFDATA_KFHDR = int("0xB00A",16); +KFDATA_KFSEG = int("0xB008",16); +KFDATA_KFCURTIME = int("0xB009",16); +KFDATA_OBJECT_NODE_TAG = int("0xB002",16); + +#>------ sub defines of OBJECT_NODE_TAG +OBJECT_NODE_ID = int("0xB030",16); +OBJECT_NODE_HDR = int("0xB010",16); +OBJECT_PIVOT = int("0xB013",16); +OBJECT_INSTANCE_NAME = int("0xB011",16); +POS_TRACK_TAG = int("0xB020",16); +ROT_TRACK_TAG = int("0xB021",16); +SCL_TRACK_TAG = int("0xB022",16); + +def uv_key(uv): + return round(uv[0], 6), round(uv[1], 6) +# return round(uv.x, 6), round(uv.y, 6) + +# size defines: +SZ_SHORT = 2 +SZ_INT = 4 +SZ_FLOAT = 4 + +class _3ds_short(object): + '''Class representing a short (2-byte integer) for a 3ds file. + *** This looks like an unsigned short H is unsigned from the struct docs - Cam***''' + __slots__ = 'value' + def __init__(self, val=0): + self.value=val + + def get_size(self): + return SZ_SHORT + + def write(self,file): + file.write(struct.pack("= mat_ls_len: + mat_index = f.mat = 0 + mat = mat_ls[mat_index] + if mat: mat_name = mat.name + else: mat_name = None + # else there alredy set to none + + img = uf.image +# img = f.image + if img: img_name = img.name + else: img_name = None + + materialDict.setdefault((mat_name, img_name), (mat, img) ) + + + else: + for mat in mat_ls: + if mat: # material may be None so check its not. + materialDict.setdefault((mat.name, None), (mat, None) ) + + # Why 0 Why! + for f in data.faces: + if f.material_index >= mat_ls_len: +# if f.mat >= mat_ls_len: + f.material_index = 0 + # f.mat = 0 + + if free: + free_derived_objects(ob) + + + # Make material chunks for all materials used in the meshes: + for mat_and_image in materialDict.values(): + object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1])) + + # Give all objects a unique ID and build a dictionary from object name to object id: + """ + name_to_id = {} + for ob, data in mesh_objects: + name_to_id[ob.name]= len(name_to_id) + #for ob in empty_objects: + # name_to_id[ob.name]= len(name_to_id) + """ + + # Create object chunks for all meshes: + i = 0 + for ob, blender_mesh in mesh_objects: + # create a new object chunk + object_chunk = _3ds_chunk(OBJECT) + + # set the object name + object_chunk.add_variable("name", _3ds_string(sane_name(ob.name))) + + # make a mesh chunk out of the mesh: + object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict)) + object_info.add_subchunk(object_chunk) + + ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX + # make a kf object node for the object: + kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) + ''' +# if not blender_mesh.users: + bpy.data.remove_mesh(blender_mesh) +# blender_mesh.verts = None + + i+=i + + # Create chunks for all empties: + ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX + for ob in empty_objects: + # Empties only require a kf object node: + kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id)) + pass + ''' + + # Add main object info chunk to primary chunk: + primary.add_subchunk(object_info) + + ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX + # Add main keyframe data chunk to primary chunk: + primary.add_subchunk(kfdata) + ''' + + # At this point, the chunk hierarchy is completely built. + + # Check the size: + primary.get_size() + # Open the file for writing: + file = open( filename, 'wb' ) + + # Recursively write the chunks to file: + primary.write(file) + + # Close the file: + file.close() + + # Debugging only: report the exporting time: +# Blender.Window.WaitCursor(0) + print("3ds export time: %.2f" % (time.clock() - time1)) +# print("3ds export time: %.2f" % (Blender.sys.time() - time1)) + + # Debugging only: dump the chunk hierarchy: + #primary.dump() + + +# if __name__=='__main__': +# if struct: +# Blender.Window.FileSelector(save_3ds, "Export 3DS", Blender.sys.makename(ext='.3ds')) +# else: +# Blender.Draw.PupMenu("Error%t|This script requires a full python installation") +# # save_3ds('/test_b.3ds') + +class EXPORT_OT_3ds(bpy.types.Operator): + ''' + 3DS Exporter + ''' + __idname__ = "export.3ds" + __label__ = 'Export 3DS' + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + # bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the 3DS file", maxlen= 1024, default= ""), + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the 3DS file", maxlen= 1024, default= ""), + ] + + def execute(self, context): + save_3ds(self.path, context) + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + + def poll(self, context): # Poll isnt working yet + print("Poll") + return context.active_object != None + +bpy.ops.add(EXPORT_OT_3ds) diff --git a/release/scripts/io/export_fbx.py b/release/scripts/io/export_fbx.py new file mode 100644 index 00000000000..21b1388ebfe --- /dev/null +++ b/release/scripts/io/export_fbx.py @@ -0,0 +1,3453 @@ +#!BPY +""" +Name: 'Autodesk FBX (.fbx)...' +Blender: 249 +Group: 'Export' +Tooltip: 'Selection to an ASCII Autodesk FBX ' +""" +__author__ = "Campbell Barton" +__url__ = ['www.blender.org', 'blenderartists.org'] +__version__ = "1.2" + +__bpydoc__ = """\ +This script is an exporter to the FBX file format. + +http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx +""" +# -------------------------------------------------------------------------- +# FBX Export v0.1 by Campbell Barton (AKA Ideasman) +# -------------------------------------------------------------------------- +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# -------------------------------------------------------------------------- + +import os +import time +import math # math.pi +import shutil # for file copying + +# try: +# import time +# # import os # only needed for batch export, nbot used yet +# except: +# time = None # use this to check if they have python modules installed + +# for python 2.3 support +try: + set() +except: + try: + from sets import Set as set + except: + set = None # so it complains you dont have a ! + +# # os is only needed for batch 'own dir' option +# try: +# import os +# except: +# os = None + +# import Blender +import bpy +import Mathutils +# from Blender.Mathutils import Matrix, Vector, RotationMatrix + +# import BPyObject +# import BPyMesh +# import BPySys +# import BPyMessages + +## This was used to make V, but faster not to do all that +##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}' +##v = range(255) +##for c in valid: v.remove(ord(c)) +v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] +invalid = ''.join([chr(i) for i in v]) +def cleanName(name): + for ch in invalid: name = name.replace(ch, '_') + return name +# del v, i + + +def copy_file(source, dest): + file = open(source, 'rb') + data = file.read() + file.close() + + file = open(dest, 'wb') + file.write(data) + file.close() + + +# XXX not used anymore, images are copied one at a time +def copy_images(dest_dir, textures): + if not dest_dir.endswith(os.sep): + dest_dir += os.sep + + image_paths = set() + for tex in textures: + image_paths.add(Blender.sys.expandpath(tex.filename)) + + # Now copy images + copyCount = 0 + for image_path in image_paths: + if Blender.sys.exists(image_path): + # Make a name for the target path. + dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1] + if not Blender.sys.exists(dest_image_path): # Image isnt alredy there + print('\tCopying "%s" > "%s"' % (image_path, dest_image_path)) + try: + copy_file(image_path, dest_image_path) + copyCount+=1 + except: + print('\t\tWarning, file failed to copy, skipping.') + + print('\tCopied %d images' % copyCount) + +# I guess FBX uses degrees instead of radians (Arystan). +# Call this function just before writing to FBX. +def eulerRadToDeg(eul): + ret = Mathutils.Euler() + + ret.x = 180 / math.pi * eul[0] + ret.y = 180 / math.pi * eul[1] + ret.z = 180 / math.pi * eul[2] + + return ret + +mtx4_identity = Mathutils.Matrix() + +# testing +mtx_x90 = Mathutils.RotationMatrix( math.pi/2, 3, 'x') # used +#mtx_x90n = RotationMatrix(-90, 3, 'x') +#mtx_y90 = RotationMatrix( 90, 3, 'y') +#mtx_y90n = RotationMatrix(-90, 3, 'y') +#mtx_z90 = RotationMatrix( 90, 3, 'z') +#mtx_z90n = RotationMatrix(-90, 3, 'z') + +#mtx4_x90 = RotationMatrix( 90, 4, 'x') +mtx4_x90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'x') # used +#mtx4_y90 = RotationMatrix( 90, 4, 'y') +mtx4_y90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'y') # used +mtx4_z90 = Mathutils.RotationMatrix( math.pi/2, 4, 'z') # used +mtx4_z90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'z') # used + +# def strip_path(p): +# return p.split('\\')[-1].split('/')[-1] + +# Used to add the scene name into the filename without using odd chars +sane_name_mapping_ob = {} +sane_name_mapping_mat = {} +sane_name_mapping_tex = {} +sane_name_mapping_take = {} +sane_name_mapping_group = {} + +# Make sure reserved names are not used +sane_name_mapping_ob['Scene'] = 'Scene_' +sane_name_mapping_ob['blend_root'] = 'blend_root_' + +def increment_string(t): + name = t + num = '' + while name and name[-1].isdigit(): + num = name[-1] + num + name = name[:-1] + if num: return '%s%d' % (name, int(num)+1) + else: return name + '_0' + + + +# todo - Disallow the name 'Scene' and 'blend_root' - it will bugger things up. +def sane_name(data, dct): + #if not data: return None + + if type(data)==tuple: # materials are paired up with images + data, other = data + use_other = True + else: + other = None + use_other = False + + if data: name = data.name + else: name = None + orig_name = name + + if other: + orig_name_other = other.name + name = '%s #%s' % (name, orig_name_other) + else: + orig_name_other = None + + # dont cache, only ever call once for each data type now, + # so as to avoid namespace collision between types - like with objects <-> bones + #try: return dct[name] + #except: pass + + if not name: + name = 'unnamed' # blank string, ASKING FOR TROUBLE! + else: + #name = BPySys.cleanName(name) + name = cleanName(name) # use our own + + while name in iter(dct.values()): name = increment_string(name) + + if use_other: # even if other is None - orig_name_other will be a string or None + dct[orig_name, orig_name_other] = name + else: + dct[orig_name] = name + + return name + +def sane_obname(data): return sane_name(data, sane_name_mapping_ob) +def sane_matname(data): return sane_name(data, sane_name_mapping_mat) +def sane_texname(data): return sane_name(data, sane_name_mapping_tex) +def sane_takename(data): return sane_name(data, sane_name_mapping_take) +def sane_groupname(data): return sane_name(data, sane_name_mapping_group) + +# def derived_paths(fname_orig, basepath, FORCE_CWD=False): +# ''' +# fname_orig - blender path, can be relative +# basepath - fname_rel will be relative to this +# FORCE_CWD - dont use the basepath, just add a ./ to the filename. +# use when we know the file will be in the basepath. +# ''' +# fname = bpy.sys.expandpath(fname_orig) +# # fname = Blender.sys.expandpath(fname_orig) +# fname_strip = os.path.basename(fname) +# # fname_strip = strip_path(fname) +# if FORCE_CWD: +# fname_rel = '.' + os.sep + fname_strip +# else: +# fname_rel = bpy.sys.relpath(fname, basepath) +# # fname_rel = Blender.sys.relpath(fname, basepath) +# if fname_rel.startswith('//'): fname_rel = '.' + os.sep + fname_rel[2:] +# return fname, fname_strip, fname_rel + + +def mat4x4str(mat): + return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([ f for v in mat for f in v ]) + +# XXX not used +# duplicated in OBJ exporter +def getVertsFromGroup(me, group_index): + ret = [] + + for i, v in enumerate(me.verts): + for g in v.groups: + if g.group == group_index: + ret.append((i, g.weight)) + + return ret + +# ob must be OB_MESH +def BPyMesh_meshWeight2List(ob): + ''' Takes a mesh and return its group names and a list of lists, one list per vertex. + aligning the each vert list with the group names, each list contains float value for the weight. + These 2 lists can be modified and then used with list2MeshWeight to apply the changes. + ''' + + me = ob.data + + # Clear the vert group. + groupNames= [g.name for g in ob.vertex_groups] + len_groupNames= len(groupNames) + + if not len_groupNames: + # no verts? return a vert aligned empty list + return [[] for i in range(len(me.verts))], [] + else: + vWeightList= [[0.0]*len_groupNames for i in range(len(me.verts))] + + for i, v in enumerate(me.verts): + for g in v.groups: + vWeightList[i][g.group] = g.weight + + return groupNames, vWeightList + +def meshNormalizedWeights(me): + try: # account for old bad BPyMesh + groupNames, vWeightList = BPyMesh_meshWeight2List(me) +# groupNames, vWeightList = BPyMesh.meshWeight2List(me) + except: + return [],[] + + if not groupNames: + return [],[] + + for i, vWeights in enumerate(vWeightList): + tot = 0.0 + for w in vWeights: + tot+=w + + if tot: + for j, w in enumerate(vWeights): + vWeights[j] = w/tot + + return groupNames, vWeightList + +header_comment = \ +'''; FBX 6.1.0 project file +; Created by Blender FBX Exporter +; for support mail: ideasman42@gmail.com +; ---------------------------------------------------- + +''' + +# This func can be called with just the filename +def write(filename, batch_objects = None, \ + context = None, + EXP_OBS_SELECTED = True, + EXP_MESH = True, + EXP_MESH_APPLY_MOD = True, +# EXP_MESH_HQ_NORMALS = False, + EXP_ARMATURE = True, + EXP_LAMP = True, + EXP_CAMERA = True, + EXP_EMPTY = True, + EXP_IMAGE_COPY = False, + GLOBAL_MATRIX = Mathutils.Matrix(), + ANIM_ENABLE = True, + ANIM_OPTIMIZE = True, + ANIM_OPTIMIZE_PRECISSION = 6, + ANIM_ACTION_ALL = False, + BATCH_ENABLE = False, + BATCH_GROUP = True, + BATCH_FILE_PREFIX = '', + BATCH_OWN_DIR = False + ): + + # ----------------- Batch support! + if BATCH_ENABLE: + if os == None: BATCH_OWN_DIR = False + + fbxpath = filename + + # get the path component of filename + tmp_exists = bpy.sys.exists(fbxpath) +# tmp_exists = Blender.sys.exists(fbxpath) + + if tmp_exists != 2: # a file, we want a path + fbxpath = os.path.dirname(fbxpath) +# while fbxpath and fbxpath[-1] not in ('/', '\\'): +# fbxpath = fbxpath[:-1] + if not fbxpath: +# if not filename: + # XXX + print('Error%t|Directory does not exist!') +# Draw.PupMenu('Error%t|Directory does not exist!') + return + + tmp_exists = bpy.sys.exists(fbxpath) +# tmp_exists = Blender.sys.exists(fbxpath) + + if tmp_exists != 2: + # XXX + print('Error%t|Directory does not exist!') +# Draw.PupMenu('Error%t|Directory does not exist!') + return + + if not fbxpath.endswith(os.sep): + fbxpath += os.sep + del tmp_exists + + + if BATCH_GROUP: + data_seq = bpy.data.groups + else: + data_seq = bpy.data.scenes + + # call this function within a loop with BATCH_ENABLE == False + orig_sce = context.scene +# orig_sce = bpy.data.scenes.active + + + new_fbxpath = fbxpath # own dir option modifies, we need to keep an original + for data in data_seq: # scene or group + newname = BATCH_FILE_PREFIX + cleanName(data.name) +# newname = BATCH_FILE_PREFIX + BPySys.cleanName(data.name) + + + if BATCH_OWN_DIR: + new_fbxpath = fbxpath + newname + os.sep + # path may alredy exist + # TODO - might exist but be a file. unlikely but should probably account for it. + + if bpy.sys.exists(new_fbxpath) == 0: +# if Blender.sys.exists(new_fbxpath) == 0: + os.mkdir(new_fbxpath) + + + filename = new_fbxpath + newname + '.fbx' + + print('\nBatch exporting %s as...\n\t"%s"' % (data, filename)) + + # XXX don't know what to do with this, probably do the same? (Arystan) + if BATCH_GROUP: #group + # group, so objects update properly, add a dummy scene. + sce = bpy.data.scenes.new() + sce.Layers = (1<<20) -1 + bpy.data.scenes.active = sce + for ob_base in data.objects: + sce.objects.link(ob_base) + + sce.update(1) + + # TODO - BUMMER! Armatures not in the group wont animate the mesh + + else:# scene + + + data_seq.active = data + + + # Call self with modified args + # Dont pass batch options since we alredy usedt them + write(filename, data.objects, + context, + False, + EXP_MESH, + EXP_MESH_APPLY_MOD, +# EXP_MESH_HQ_NORMALS, + EXP_ARMATURE, + EXP_LAMP, + EXP_CAMERA, + EXP_EMPTY, + EXP_IMAGE_COPY, + GLOBAL_MATRIX, + ANIM_ENABLE, + ANIM_OPTIMIZE, + ANIM_OPTIMIZE_PRECISSION, + ANIM_ACTION_ALL + ) + + if BATCH_GROUP: + # remove temp group scene + bpy.data.remove_scene(sce) +# bpy.data.scenes.unlink(sce) + + bpy.data.scenes.active = orig_sce + + return # so the script wont run after we have batch exported. + + # end batch support + + # Use this for working out paths relative to the export location + basepath = os.path.dirname(filename) or '.' + basepath += os.sep +# basepath = Blender.sys.dirname(filename) + + # ---------------------------------------------- + # storage classes + class my_bone_class: + __slots__ =(\ + 'blenName',\ + 'blenBone',\ + 'blenMeshes',\ + 'restMatrix',\ + 'parent',\ + 'blenName',\ + 'fbxName',\ + 'fbxArm',\ + '__pose_bone',\ + '__anim_poselist') + + def __init__(self, blenBone, fbxArm): + + # This is so 2 armatures dont have naming conflicts since FBX bones use object namespace + self.fbxName = sane_obname(blenBone) + + self.blenName = blenBone.name + self.blenBone = blenBone + self.blenMeshes = {} # fbxMeshObName : mesh + self.fbxArm = fbxArm + self.restMatrix = blenBone.armature_matrix +# self.restMatrix = blenBone.matrix['ARMATURESPACE'] + + # not used yet + # self.restMatrixInv = self.restMatrix.copy().invert() + # self.restMatrixLocal = None # set later, need parent matrix + + self.parent = None + + # not public + pose = fbxArm.blenObject.pose +# pose = fbxArm.blenObject.getPose() + self.__pose_bone = pose.pose_channels[self.blenName] +# self.__pose_bone = pose.bones[self.blenName] + + # store a list if matricies here, (poseMatrix, head, tail) + # {frame:posematrix, frame:posematrix, ...} + self.__anim_poselist = {} + + ''' + def calcRestMatrixLocal(self): + if self.parent: + self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.copy().invert() + else: + self.restMatrixLocal = self.restMatrix.copy() + ''' + def setPoseFrame(self, f): + # cache pose info here, frame must be set beforehand + + # Didnt end up needing head or tail, if we do - here it is. + ''' + self.__anim_poselist[f] = (\ + self.__pose_bone.poseMatrix.copy(),\ + self.__pose_bone.head.copy(),\ + self.__pose_bone.tail.copy() ) + ''' + + self.__anim_poselist[f] = self.__pose_bone.pose_matrix.copy() +# self.__anim_poselist[f] = self.__pose_bone.poseMatrix.copy() + + # get pose from frame. + def getPoseMatrix(self, f):# ---------------------------------------------- + return self.__anim_poselist[f] + ''' + def getPoseHead(self, f): + #return self.__pose_bone.head.copy() + return self.__anim_poselist[f][1].copy() + def getPoseTail(self, f): + #return self.__pose_bone.tail.copy() + return self.__anim_poselist[f][2].copy() + ''' + # end + + def getAnimParRelMatrix(self, frame): + #arm_mat = self.fbxArm.matrixWorld + #arm_mat = self.fbxArm.parRelMatrix() + if not self.parent: + #return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore + return mtx4_z90 * self.getPoseMatrix(frame) + else: + #return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat))) * (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).invert() + return (mtx4_z90 * (self.getPoseMatrix(frame))) * (mtx4_z90 * self.parent.getPoseMatrix(frame)).invert() + + # we need thes because cameras and lights modified rotations + def getAnimParRelMatrixRot(self, frame): + return self.getAnimParRelMatrix(frame) + + def flushAnimData(self): + self.__anim_poselist.clear() + + + class my_object_generic: + # Other settings can be applied for each type - mesh, armature etc. + def __init__(self, ob, matrixWorld = None): + self.fbxName = sane_obname(ob) + self.blenObject = ob + self.fbxGroupNames = [] + self.fbxParent = None # set later on IF the parent is in the selection. + if matrixWorld: self.matrixWorld = matrixWorld * GLOBAL_MATRIX + else: self.matrixWorld = ob.matrix * GLOBAL_MATRIX +# else: self.matrixWorld = ob.matrixWorld * GLOBAL_MATRIX + self.__anim_poselist = {} # we should only access this + + def parRelMatrix(self): + if self.fbxParent: + return self.matrixWorld * self.fbxParent.matrixWorld.copy().invert() + else: + return self.matrixWorld + + def setPoseFrame(self, f): + self.__anim_poselist[f] = self.blenObject.matrix.copy() +# self.__anim_poselist[f] = self.blenObject.matrixWorld.copy() + + def getAnimParRelMatrix(self, frame): + if self.fbxParent: + #return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].copy().invert() ) * GLOBAL_MATRIX + return (self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert() + else: + return self.__anim_poselist[frame] * GLOBAL_MATRIX + + def getAnimParRelMatrixRot(self, frame): + type = self.blenObject.type + if self.fbxParent: + matrix_rot = (((self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert())).rotationPart() + else: + matrix_rot = (self.__anim_poselist[frame] * GLOBAL_MATRIX).rotationPart() + + # Lamps need to be rotated + if type =='LAMP': + matrix_rot = mtx_x90 * matrix_rot + elif type =='CAMERA': +# elif ob and type =='Camera': + y = Mathutils.Vector(0,1,0) * matrix_rot + matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y) + + return matrix_rot + + # ---------------------------------------------- + + + + + + print('\nFBX export starting...', filename) + start_time = time.clock() +# start_time = Blender.sys.time() + try: + file = open(filename, 'w') + except: + return False + + sce = context.scene +# sce = bpy.data.scenes.active + world = sce.world + + + # ---------------------------- Write the header first + file.write(header_comment) + if time: + curtime = time.localtime()[0:6] + else: + curtime = (0,0,0,0,0,0) + # + file.write(\ +'''FBXHeaderExtension: { + FBXHeaderVersion: 1003 + FBXVersion: 6100 + CreationTimeStamp: { + Version: 1000 + Year: %.4i + Month: %.2i + Day: %.2i + Hour: %.2i + Minute: %.2i + Second: %.2i + Millisecond: 0 + } + Creator: "FBX SDK/FBX Plugins build 20070228" + OtherFlags: { + FlagPLE: 0 + } +}''' % (curtime)) + + file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime) + file.write('\nCreator: "Blender3D version 2.5"') +# file.write('\nCreator: "Blender3D version %.2f"' % Blender.Get('version')) + + pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way + + # --------------- funcs for exporting + def object_tx(ob, loc, matrix, matrix_mod = None): + ''' + Matrix mod is so armature objects can modify their bone matricies + ''' + if isinstance(ob, bpy.types.Bone): +# if isinstance(ob, Blender.Types.BoneType): + + # we know we have a matrix + # matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod) + matrix = mtx4_z90 * ob.armature_matrix # dont apply armature matrix anymore +# matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore + + parent = ob.parent + if parent: + #par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod) + par_matrix = mtx4_z90 * parent.armature_matrix # dont apply armature matrix anymore +# par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore + matrix = matrix * par_matrix.copy().invert() + + matrix_rot = matrix.rotationPart() + + loc = tuple(matrix.translationPart()) + scale = tuple(matrix.scalePart()) + rot = tuple(matrix_rot.toEuler()) + + else: + # This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore + #if ob and not matrix: matrix = ob.matrixWorld * GLOBAL_MATRIX + if ob and not matrix: raise Exception("error: this should never happen!") + + matrix_rot = matrix + #if matrix: + # matrix = matrix_scale * matrix + + if matrix: + loc = tuple(matrix.translationPart()) + scale = tuple(matrix.scalePart()) + + matrix_rot = matrix.rotationPart() + # Lamps need to be rotated + if ob and ob.type =='Lamp': + matrix_rot = mtx_x90 * matrix_rot + rot = tuple(matrix_rot.toEuler()) + elif ob and ob.type =='Camera': + y = Mathutils.Vector(0,1,0) * matrix_rot + matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y) + rot = tuple(matrix_rot.toEuler()) + else: + rot = tuple(matrix_rot.toEuler()) + else: + if not loc: + loc = 0,0,0 + scale = 1,1,1 + rot = 0,0,0 + + return loc, rot, scale, matrix, matrix_rot + + def write_object_tx(ob, loc, matrix, matrix_mod= None): + ''' + We have loc to set the location if non blender objects that have a location + + matrix_mod is only used for bones at the moment + ''' + loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod) + + file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc) + file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % tuple(eulerRadToDeg(rot))) +# file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot) + file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale) + return loc, rot, scale, matrix, matrix_rot + + def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None): + # if the type is 0 its an empty otherwise its a mesh + # only difference at the moment is one has a color + file.write(''' + Properties60: { + Property: "QuaternionInterpolate", "bool", "",0 + Property: "Visibility", "Visibility", "A+",1''') + + loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod) + + # Rotation order, note, for FBX files Iv loaded normal order is 1 + # setting to zero. + # eEULER_XYZ = 0 + # eEULER_XZY + # eEULER_YZX + # eEULER_YXZ + # eEULER_ZXY + # eEULER_ZYX + + file.write(''' + Property: "RotationOffset", "Vector3D", "",0,0,0 + Property: "RotationPivot", "Vector3D", "",0,0,0 + Property: "ScalingOffset", "Vector3D", "",0,0,0 + Property: "ScalingPivot", "Vector3D", "",0,0,0 + Property: "TranslationActive", "bool", "",0 + Property: "TranslationMin", "Vector3D", "",0,0,0 + Property: "TranslationMax", "Vector3D", "",0,0,0 + Property: "TranslationMinX", "bool", "",0 + Property: "TranslationMinY", "bool", "",0 + Property: "TranslationMinZ", "bool", "",0 + Property: "TranslationMaxX", "bool", "",0 + Property: "TranslationMaxY", "bool", "",0 + Property: "TranslationMaxZ", "bool", "",0 + Property: "RotationOrder", "enum", "",0 + Property: "RotationSpaceForLimitOnly", "bool", "",0 + Property: "AxisLen", "double", "",10 + Property: "PreRotation", "Vector3D", "",0,0,0 + Property: "PostRotation", "Vector3D", "",0,0,0 + Property: "RotationActive", "bool", "",0 + Property: "RotationMin", "Vector3D", "",0,0,0 + Property: "RotationMax", "Vector3D", "",0,0,0 + Property: "RotationMinX", "bool", "",0 + Property: "RotationMinY", "bool", "",0 + Property: "RotationMinZ", "bool", "",0 + Property: "RotationMaxX", "bool", "",0 + Property: "RotationMaxY", "bool", "",0 + Property: "RotationMaxZ", "bool", "",0 + Property: "RotationStiffnessX", "double", "",0 + Property: "RotationStiffnessY", "double", "",0 + Property: "RotationStiffnessZ", "double", "",0 + Property: "MinDampRangeX", "double", "",0 + Property: "MinDampRangeY", "double", "",0 + Property: "MinDampRangeZ", "double", "",0 + Property: "MaxDampRangeX", "double", "",0 + Property: "MaxDampRangeY", "double", "",0 + Property: "MaxDampRangeZ", "double", "",0 + Property: "MinDampStrengthX", "double", "",0 + Property: "MinDampStrengthY", "double", "",0 + Property: "MinDampStrengthZ", "double", "",0 + Property: "MaxDampStrengthX", "double", "",0 + Property: "MaxDampStrengthY", "double", "",0 + Property: "MaxDampStrengthZ", "double", "",0 + Property: "PreferedAngleX", "double", "",0 + Property: "PreferedAngleY", "double", "",0 + Property: "PreferedAngleZ", "double", "",0 + Property: "InheritType", "enum", "",0 + Property: "ScalingActive", "bool", "",0 + Property: "ScalingMin", "Vector3D", "",1,1,1 + Property: "ScalingMax", "Vector3D", "",1,1,1 + Property: "ScalingMinX", "bool", "",0 + Property: "ScalingMinY", "bool", "",0 + Property: "ScalingMinZ", "bool", "",0 + Property: "ScalingMaxX", "bool", "",0 + Property: "ScalingMaxY", "bool", "",0 + Property: "ScalingMaxZ", "bool", "",0 + Property: "GeometricTranslation", "Vector3D", "",0,0,0 + Property: "GeometricRotation", "Vector3D", "",0,0,0 + Property: "GeometricScaling", "Vector3D", "",1,1,1 + Property: "LookAtProperty", "object", "" + Property: "UpVectorProperty", "object", "" + Property: "Show", "bool", "",1 + Property: "NegativePercentShapeSupport", "bool", "",1 + Property: "DefaultAttributeIndex", "int", "",0''') + if ob and not isinstance(ob, bpy.types.Bone): +# if ob and type(ob) != Blender.Types.BoneType: + # Only mesh objects have color + file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') + file.write('\n\t\t\tProperty: "Size", "double", "",100') + file.write('\n\t\t\tProperty: "Look", "enum", "",1') + + return loc, rot, scale, matrix, matrix_rot + + + # -------------------------------------------- Armatures + #def write_bone(bone, name, matrix_mod): + def write_bone(my_bone): + file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName) + file.write('\n\t\tVersion: 232') + + #poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3] + poseMatrix = write_object_props(my_bone.blenBone)[3] # dont apply bone matricies anymore + pose_items.append( (my_bone.fbxName, poseMatrix) ) + + + # file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length) + file.write('\n\t\t\tProperty: "Size", "double", "",1') + + #((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length) + + """ + file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\ + ((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length) + """ + + file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' % + (my_bone.blenBone.armature_head - my_bone.blenBone.armature_tail).length) +# (my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length) + + #file.write('\n\t\t\tProperty: "LimbLength", "double", "",1') + file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8') + file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') + file.write('\n\t\t}') + file.write('\n\t\tMultiLayer: 0') + file.write('\n\t\tMultiTake: 1') + file.write('\n\t\tShading: Y') + file.write('\n\t\tCulling: "CullingOff"') + file.write('\n\t\tTypeFlags: "Skeleton"') + file.write('\n\t}') + + def write_camera_switch(): + file.write(''' + Model: "Model::Camera Switcher", "CameraSwitcher" { + Version: 232''') + + write_object_props() + file.write(''' + Property: "Color", "Color", "A",0.8,0.8,0.8 + Property: "Camera Index", "Integer", "A+",100 + } + MultiLayer: 0 + MultiTake: 1 + Hidden: "True" + Shading: W + Culling: "CullingOff" + Version: 101 + Name: "Model::Camera Switcher" + CameraId: 0 + CameraName: 100 + CameraIndexName: + }''') + + def write_camera_dummy(name, loc, near, far, proj_type, up): + file.write('\n\tModel: "Model::%s", "Camera" {' % name ) + file.write('\n\t\tVersion: 232') + write_object_props(None, loc) + + file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8') + file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0') + file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40') + file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1') + file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1') + file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0') + file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0') + file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63') + file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0') + file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1') + file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1') + file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0') + file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1') + file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0') + file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2') + file.write('\n\t\t\tProperty: "GateFit", "enum", "",0') + file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486') + file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0') + file.write('\n\t\t\tProperty: "AspectW", "double", "",320') + file.write('\n\t\t\tProperty: "AspectH", "double", "",200') + file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1') + file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0') + file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3') + file.write('\n\t\t\tProperty: "ShowName", "bool", "",1') + file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1') + file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0') + file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1') + file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0') + file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near) + file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far) + file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816') + file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612') + file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333') + file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1') + file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4') + file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1') + file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0') + file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2') + file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100') + file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0') + file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1') + file.write('\n\t\t\tProperty: "LockMode", "bool", "",0') + file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0') + file.write('\n\t\t\tProperty: "FitImage", "bool", "",0') + file.write('\n\t\t\tProperty: "Crop", "bool", "",0') + file.write('\n\t\t\tProperty: "Center", "bool", "",1') + file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1') + file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0') + file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5') + file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1') + file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0') + file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1') + file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333') + file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0') + file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100') + file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50') + file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50') + file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type) + file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0') + file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0') + file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0') + file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5') + file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200') + file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0') + file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777') + file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0') + file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7') + file.write('\n\t\t}') + file.write('\n\t\tMultiLayer: 0') + file.write('\n\t\tMultiTake: 0') + file.write('\n\t\tHidden: "True"') + file.write('\n\t\tShading: Y') + file.write('\n\t\tCulling: "CullingOff"') + file.write('\n\t\tTypeFlags: "Camera"') + file.write('\n\t\tGeometryVersion: 124') + file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc) + file.write('\n\t\tUp: %i,%i,%i' % up) + file.write('\n\t\tLookAt: 0,0,0') + file.write('\n\t\tShowInfoOnMoving: 1') + file.write('\n\t\tShowAudio: 0') + file.write('\n\t\tAudioColor: 0,1,0') + file.write('\n\t\tCameraOrthoZoom: 1') + file.write('\n\t}') + + def write_camera_default(): + # This sucks but to match FBX converter its easier to + # write the cameras though they are not needed. + write_camera_dummy('Producer Perspective', (0,71.3,287.5), 10, 4000, 0, (0,1,0)) + write_camera_dummy('Producer Top', (0,4000,0), 1, 30000, 1, (0,0,-1)) + write_camera_dummy('Producer Bottom', (0,-4000,0), 1, 30000, 1, (0,0,-1)) + write_camera_dummy('Producer Front', (0,0,4000), 1, 30000, 1, (0,1,0)) + write_camera_dummy('Producer Back', (0,0,-4000), 1, 30000, 1, (0,1,0)) + write_camera_dummy('Producer Right', (4000,0,0), 1, 30000, 1, (0,1,0)) + write_camera_dummy('Producer Left', (-4000,0,0), 1, 30000, 1, (0,1,0)) + + def write_camera(my_cam): + ''' + Write a blender camera + ''' + render = sce.render_data + width = render.resolution_x + height = render.resolution_y +# render = sce.render +# width = render.sizeX +# height = render.sizeY + aspect = float(width)/height + + data = my_cam.blenObject.data + + file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName ) + file.write('\n\t\tVersion: 232') + loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix()) + + file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0') + file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % data.angle) + file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1') + file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1') + file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026') + file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shift_x) # not sure if this is in the correct units? +# file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shiftX) # not sure if this is in the correct units? + file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shift_y) # ditto +# file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shiftY) # ditto + file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0') + file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0') + file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1') + file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1') + file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0') + file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1') + file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0') + file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2') + file.write('\n\t\t\tProperty: "GateFit", "enum", "",0') + file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0') + file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width) + file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height) + + '''Camera aspect ratio modes. + 0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant. + 1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value. + 2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels. + 3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value. + 4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value. + + Definition at line 234 of file kfbxcamera.h. ''' + + file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2') + + file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0') + file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3') + file.write('\n\t\t\tProperty: "ShowName", "bool", "",1') + file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1') + file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0') + file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1') + file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0') + file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clip_start) +# file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clipStart) + file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clip_end) +# file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clipStart) + file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0') + file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0') + file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect) + file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1') + file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0') + file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1') + file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0') + file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2') + file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100') + file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0') + file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1') + file.write('\n\t\t\tProperty: "LockMode", "bool", "",0') + file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0') + file.write('\n\t\t\tProperty: "FitImage", "bool", "",0') + file.write('\n\t\t\tProperty: "Crop", "bool", "",0') + file.write('\n\t\t\tProperty: "Center", "bool", "",1') + file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1') + file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0') + file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5') + file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1') + file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0') + file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1') + file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect) + file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0') + file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100') + file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50') + file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50') + file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0') + file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0') + file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0') + file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0') + file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5') + file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200') + file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0') + file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777') + file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0') + file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7') + + file.write('\n\t\t}') + file.write('\n\t\tMultiLayer: 0') + file.write('\n\t\tMultiTake: 0') + file.write('\n\t\tShading: Y') + file.write('\n\t\tCulling: "CullingOff"') + file.write('\n\t\tTypeFlags: "Camera"') + file.write('\n\t\tGeometryVersion: 124') + file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc) + file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,1,0) * matrix_rot) ) + file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,0,-1)*matrix_rot) ) + + #file.write('\n\t\tUp: 0,0,0' ) + #file.write('\n\t\tLookAt: 0,0,0' ) + + file.write('\n\t\tShowInfoOnMoving: 1') + file.write('\n\t\tShowAudio: 0') + file.write('\n\t\tAudioColor: 0,1,0') + file.write('\n\t\tCameraOrthoZoom: 1') + file.write('\n\t}') + + def write_light(my_light): + light = my_light.blenObject.data + file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName) + file.write('\n\t\tVersion: 232') + + write_object_props(my_light.blenObject, None, my_light.parRelMatrix()) + + # Why are these values here twice?????? - oh well, follow the holy sdk's output + + # Blender light types match FBX's, funny coincidence, we just need to + # be sure that all unsupported types are made into a point light + #ePOINT, + #eDIRECTIONAL + #eSPOT + light_type_items = {'POINT': 0, 'SUN': 1, 'SPOT': 2, 'HEMI': 3, 'AREA': 4} + light_type = light_type_items[light.type] +# light_type = light.type + if light_type > 2: light_type = 1 # hemi and area lights become directional + +# mode = light.mode + if light.shadow_method == 'RAY_SHADOW' or light.shadow_method == 'BUFFER_SHADOW': +# if mode & Blender.Lamp.Modes.RayShadow or mode & Blender.Lamp.Modes.Shadows: + do_shadow = 1 + else: + do_shadow = 0 + + if light.only_shadow or (not light.diffuse and not light.specular): +# if mode & Blender.Lamp.Modes.OnlyShadow or (mode & Blender.Lamp.Modes.NoDiffuse and mode & Blender.Lamp.Modes.NoSpecular): + do_light = 0 + else: + do_light = 1 + + scale = abs(GLOBAL_MATRIX.scalePart()[0]) # scale is always uniform in this case + + file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type) + file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1') + file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1') + file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1') + file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0') + file.write('\n\t\t\tProperty: "GoboProperty", "object", ""') + file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1') + file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200 + if light.type == 'SPOT': + file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spot_size * scale)) +# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale)) + file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50') + file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.color)) +# file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.col)) + file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200 +# + # duplication? see ^ (Arystan) +# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale)) + file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50') + file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type) + file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light) + file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1') + file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0') + file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1') + file.write('\n\t\t\tProperty: "GoboProperty", "object", ""') + file.write('\n\t\t\tProperty: "DecayType", "enum", "",0') + file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.distance) +# file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.dist) + file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0') + file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0') + file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0') + file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0') + file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0') + file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0') + file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow) + file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1') + file.write('\n\t\t}') + file.write('\n\t\tMultiLayer: 0') + file.write('\n\t\tMultiTake: 0') + file.write('\n\t\tShading: Y') + file.write('\n\t\tCulling: "CullingOff"') + file.write('\n\t\tTypeFlags: "Light"') + file.write('\n\t\tGeometryVersion: 124') + file.write('\n\t}') + + # matrixOnly is not used at the moment + def write_null(my_null = None, fbxName = None, matrixOnly = None): + # ob can be null + if not fbxName: fbxName = my_null.fbxName + + file.write('\n\tModel: "Model::%s", "Null" {' % fbxName) + file.write('\n\t\tVersion: 232') + + # only use this for the root matrix at the moment + if matrixOnly: + poseMatrix = write_object_props(None, None, matrixOnly)[3] + + else: # all other Null's + if my_null: poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3] + else: poseMatrix = write_object_props()[3] + + pose_items.append((fbxName, poseMatrix)) + + file.write(''' + } + MultiLayer: 0 + MultiTake: 1 + Shading: Y + Culling: "CullingOff" + TypeFlags: "Null" + }''') + + # Material Settings + if world: world_amb = tuple(world.ambient_color) +# if world: world_amb = world.getAmb() + else: world_amb = (0,0,0) # Default value + + def write_material(matname, mat): + file.write('\n\tMaterial: "Material::%s", "" {' % matname) + + # Todo, add more material Properties. + if mat: + mat_cold = tuple(mat.diffuse_color) +# mat_cold = tuple(mat.rgbCol) + mat_cols = tuple(mat.specular_color) +# mat_cols = tuple(mat.specCol) + #mat_colm = tuple(mat.mirCol) # we wont use the mirror color + mat_colamb = world_amb +# mat_colamb = tuple([c for c in world_amb]) + + mat_dif = mat.diffuse_intensity +# mat_dif = mat.ref + mat_amb = mat.ambient +# mat_amb = mat.amb + mat_hard = (float(mat.specular_hardness)-1)/5.10 +# mat_hard = (float(mat.hard)-1)/5.10 + mat_spec = mat.specular_intensity/2.0 +# mat_spec = mat.spec/2.0 + mat_alpha = mat.alpha + mat_emit = mat.emit + mat_shadeless = mat.shadeless +# mat_shadeless = mat.mode & Blender.Material.Modes.SHADELESS + if mat_shadeless: + mat_shader = 'Lambert' + else: + if mat.diffuse_shader == 'LAMBERT': +# if mat.diffuseShader == Blender.Material.Shaders.DIFFUSE_LAMBERT: + mat_shader = 'Lambert' + else: + mat_shader = 'Phong' + else: + mat_cols = mat_cold = 0.8, 0.8, 0.8 + mat_colamb = 0.0,0.0,0.0 + # mat_colm + mat_dif = 1.0 + mat_amb = 0.5 + mat_hard = 20.0 + mat_spec = 0.2 + mat_alpha = 1.0 + mat_emit = 0.0 + mat_shadeless = False + mat_shader = 'Phong' + + file.write('\n\t\tVersion: 102') + file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower()) + file.write('\n\t\tMultiLayer: 0') + + file.write('\n\t\tProperties60: {') + file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader) + file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0') + file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) # emit and diffuse color are he same in blender + file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit) + + file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb) + file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb) + file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) + file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif) + file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0') + file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1') + file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha)) + if not mat_shadeless: + file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols) + file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec) + file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0') + file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0') + file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1') + file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0') + file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb) + file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold) + if not mat_shadeless: + file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols) + file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard) + file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha) + if not mat_shadeless: + file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0') + + file.write('\n\t\t}') + file.write('\n\t}') + + def copy_image(image): + + rel = image.get_export_path(basepath, True) + base = os.path.basename(rel) + + if EXP_IMAGE_COPY: + absp = image.get_export_path(basepath, False) + if not os.path.exists(absp): + shutil.copy(image.get_abs_filename(), absp) + + return (rel, base) + + # tex is an Image (Arystan) + def write_video(texname, tex): + # Same as texture really! + file.write('\n\tVideo: "Video::%s", "Clip" {' % texname) + + file.write(''' + Type: "Clip" + Properties60: { + Property: "FrameRate", "double", "",0 + Property: "LastFrame", "int", "",0 + Property: "Width", "int", "",0 + Property: "Height", "int", "",0''') + if tex: + fname_rel, fname_strip = copy_image(tex) +# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY) + else: + fname = fname_strip = fname_rel = '' + + file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip) + + + file.write(''' + Property: "StartFrame", "int", "",0 + Property: "StopFrame", "int", "",0 + Property: "PlaySpeed", "double", "",1 + Property: "Offset", "KTime", "",0 + Property: "InterlaceMode", "enum", "",0 + Property: "FreeRunning", "bool", "",0 + Property: "Loop", "bool", "",0 + Property: "AccessMode", "enum", "",0 + } + UseMipMap: 0''') + + file.write('\n\t\tFilename: "%s"' % fname_strip) + if fname_strip: fname_strip = '/' + fname_strip + file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # make relative + file.write('\n\t}') + + + def write_texture(texname, tex, num): + # if tex == None then this is a dummy tex + file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname) + file.write('\n\t\tType: "TextureVideoClip"') + file.write('\n\t\tVersion: 202') + # TODO, rare case _empty_ exists as a name. + file.write('\n\t\tTextureName: "Texture::%s"' % texname) + + file.write(''' + Properties60: { + Property: "Translation", "Vector", "A+",0,0,0 + Property: "Rotation", "Vector", "A+",0,0,0 + Property: "Scaling", "Vector", "A+",1,1,1''') + file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num) + + + # WrapModeU/V 0==rep, 1==clamp, TODO add support + file.write(''' + Property: "TextureTypeUse", "enum", "",0 + Property: "CurrentTextureBlendMode", "enum", "",1 + Property: "UseMaterial", "bool", "",0 + Property: "UseMipMap", "bool", "",0 + Property: "CurrentMappingType", "enum", "",0 + Property: "UVSwap", "bool", "",0''') + + file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clamp_x) +# file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clampX) + file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clamp_y) +# file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clampY) + + file.write(''' + Property: "TextureRotationPivot", "Vector3D", "",0,0,0 + Property: "TextureScalingPivot", "Vector3D", "",0,0,0 + Property: "VideoProperty", "object", "" + }''') + + file.write('\n\t\tMedia: "Video::%s"' % texname) + + if tex: + fname_rel, fname_strip = copy_image(tex) +# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY) + else: + fname = fname_strip = fname_rel = '' + + file.write('\n\t\tFileName: "%s"' % fname_strip) + file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # need some make relative command + + file.write(''' + ModelUVTranslation: 0,0 + ModelUVScaling: 1,1 + Texture_Alpha_Source: "None" + Cropping: 0,0,0,0 + }''') + + def write_deformer_skin(obname): + ''' + Each mesh has its own deformer + ''' + file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname) + file.write(''' + Version: 100 + MultiLayer: 0 + Type: "Skin" + Properties60: { + } + Link_DeformAcuracy: 50 + }''') + + # in the example was 'Bip01 L Thigh_2' + def write_sub_deformer_skin(my_mesh, my_bone, weights): + + ''' + Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers + So the SubDeformer needs the mesh-object name as a prefix to make it unique + + Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer, + a but silly but dosnt really matter + ''' + file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName)) + + file.write(''' + Version: 100 + MultiLayer: 0 + Type: "Cluster" + Properties60: { + Property: "SrcModel", "object", "" + Property: "SrcModelReference", "object", "" + } + UserData: "", ""''') + + # Support for bone parents + if my_mesh.fbxBoneParent: + if my_mesh.fbxBoneParent == my_bone: + # TODO - this is a bit lazy, we could have a simple write loop + # for this case because all weights are 1.0 but for now this is ok + # Parent Bones arent used all that much anyway. + vgroup_data = [(j, 1.0) for j in range(len(my_mesh.blenData.verts))] + else: + # This bone is not a parent of this mesh object, no weights + vgroup_data = [] + + else: + # Normal weight painted mesh + if my_bone.blenName in weights[0]: + # Before we used normalized wright list + #vgroup_data = me.getVertsFromGroup(bone.name, 1) + group_index = weights[0].index(my_bone.blenName) + vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]] + else: + vgroup_data = [] + + file.write('\n\t\tIndexes: ') + + i = -1 + for vg in vgroup_data: + if i == -1: + file.write('%i' % vg[0]) + i=0 + else: + if i==23: + file.write('\n\t\t') + i=0 + file.write(',%i' % vg[0]) + i+=1 + + file.write('\n\t\tWeights: ') + i = -1 + for vg in vgroup_data: + if i == -1: + file.write('%.8f' % vg[1]) + i=0 + else: + if i==38: + file.write('\n\t\t') + i=0 + file.write(',%.8f' % vg[1]) + i+=1 + + if my_mesh.fbxParent: + # TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible! + m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() ) + else: + # Yes! this is it... - but dosnt work when the mesh is a. + m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() ) + + #m = mtx4_z90 * my_bone.restMatrix + matstr = mat4x4str(m) + matstr_i = mat4x4str(m.invert()) + + file.write('\n\t\tTransform: %s' % matstr_i) # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/ + file.write('\n\t\tTransformLink: %s' % matstr) + file.write('\n\t}') + + def write_mesh(my_mesh): + + me = my_mesh.blenData + + # if there are non NULL materials on this mesh + if my_mesh.blenMaterials: do_materials = True + else: do_materials = False + + if my_mesh.blenTextures: do_textures = True + else: do_textures = False + + do_uvs = len(me.uv_textures) > 0 +# do_uvs = me.faceUV + + + file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName) + file.write('\n\t\tVersion: 232') # newline is added in write_object_props + + poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3] + pose_items.append((my_mesh.fbxName, poseMatrix)) + + file.write('\n\t\t}') + file.write('\n\t\tMultiLayer: 0') + file.write('\n\t\tMultiTake: 1') + file.write('\n\t\tShading: Y') + file.write('\n\t\tCulling: "CullingOff"') + + + # Write the Real Mesh data here + file.write('\n\t\tVertices: ') + i=-1 + + for v in me.verts: + if i==-1: + file.write('%.6f,%.6f,%.6f' % tuple(v.co)); i=0 + else: + if i==7: + file.write('\n\t\t'); i=0 + file.write(',%.6f,%.6f,%.6f'% tuple(v.co)) + i+=1 + + file.write('\n\t\tPolygonVertexIndex: ') + i=-1 + for f in me.faces: + fi = f.verts + # fi = [v_index for j, v_index in enumerate(f.verts) if v_index != 0 or j != 3] +# fi = [v.index for v in f] + + # flip the last index, odd but it looks like + # this is how fbx tells one face from another + fi[-1] = -(fi[-1]+1) + fi = tuple(fi) + if i==-1: + if len(fi) == 3: file.write('%i,%i,%i' % fi ) +# if len(f) == 3: file.write('%i,%i,%i' % fi ) + else: file.write('%i,%i,%i,%i' % fi ) + i=0 + else: + if i==13: + file.write('\n\t\t') + i=0 + if len(fi) == 3: file.write(',%i,%i,%i' % fi ) +# if len(f) == 3: file.write(',%i,%i,%i' % fi ) + else: file.write(',%i,%i,%i,%i' % fi ) + i+=1 + + file.write('\n\t\tEdges: ') + i=-1 + for ed in me.edges: + if i==-1: + file.write('%i,%i' % (ed.verts[0], ed.verts[1])) +# file.write('%i,%i' % (ed.v1.index, ed.v2.index)) + i=0 + else: + if i==13: + file.write('\n\t\t') + i=0 + file.write(',%i,%i' % (ed.verts[0], ed.verts[1])) +# file.write(',%i,%i' % (ed.v1.index, ed.v2.index)) + i+=1 + + file.write('\n\t\tGeometryVersion: 124') + + file.write(''' + LayerElementNormal: 0 { + Version: 101 + Name: "" + MappingInformationType: "ByVertice" + ReferenceInformationType: "Direct" + Normals: ''') + + i=-1 + for v in me.verts: + if i==-1: + file.write('%.15f,%.15f,%.15f' % tuple(v.normal)); i=0 +# file.write('%.15f,%.15f,%.15f' % tuple(v.no)); i=0 + else: + if i==2: + file.write('\n '); i=0 + file.write(',%.15f,%.15f,%.15f' % tuple(v.normal)) +# file.write(',%.15f,%.15f,%.15f' % tuple(v.no)) + i+=1 + file.write('\n\t\t}') + + # Write Face Smoothing + file.write(''' + LayerElementSmoothing: 0 { + Version: 102 + Name: "" + MappingInformationType: "ByPolygon" + ReferenceInformationType: "Direct" + Smoothing: ''') + + i=-1 + for f in me.faces: + if i==-1: + file.write('%i' % f.smooth); i=0 + else: + if i==54: + file.write('\n '); i=0 + file.write(',%i' % f.smooth) + i+=1 + + file.write('\n\t\t}') + + # Write Edge Smoothing + file.write(''' + LayerElementSmoothing: 0 { + Version: 101 + Name: "" + MappingInformationType: "ByEdge" + ReferenceInformationType: "Direct" + Smoothing: ''') + +# SHARP = Blender.Mesh.EdgeFlags.SHARP + i=-1 + for ed in me.edges: + if i==-1: + file.write('%i' % (ed.sharp)); i=0 +# file.write('%i' % ((ed.flag&SHARP)!=0)); i=0 + else: + if i==54: + file.write('\n '); i=0 + file.write(',%i' % (ed.sharp)) +# file.write(',%i' % ((ed.flag&SHARP)!=0)) + i+=1 + + file.write('\n\t\t}') +# del SHARP + + # small utility function + # returns a slice of data depending on number of face verts + # data is either a MeshTextureFace or MeshColor + def face_data(data, face): + totvert = len(f.verts) + + return data[:totvert] + + + # Write VertexColor Layers + # note, no programs seem to use this info :/ + collayers = [] + if len(me.vertex_colors): +# if me.vertexColors: + collayers = me.vertex_colors +# collayers = me.getColorLayerNames() + collayer_orig = me.active_vertex_color +# collayer_orig = me.activeColorLayer + for colindex, collayer in enumerate(collayers): +# me.activeColorLayer = collayer + file.write('\n\t\tLayerElementColor: %i {' % colindex) + file.write('\n\t\t\tVersion: 101') + file.write('\n\t\t\tName: "%s"' % collayer.name) +# file.write('\n\t\t\tName: "%s"' % collayer) + + file.write(''' + MappingInformationType: "ByPolygonVertex" + ReferenceInformationType: "IndexToDirect" + Colors: ''') + + i = -1 + ii = 0 # Count how many Colors we write + + for f, cf in zip(me.faces, collayer.data): + colors = [cf.color1, cf.color2, cf.color3, cf.color4] + + # determine number of verts + colors = face_data(colors, f) + + for col in colors: + if i==-1: + file.write('%.4f,%.4f,%.4f,1' % tuple(col)) + i=0 + else: + if i==7: + file.write('\n\t\t\t\t') + i=0 + file.write(',%.4f,%.4f,%.4f,1' % tuple(col)) + i+=1 + ii+=1 # One more Color + +# for f in me.faces: +# for col in f.col: +# if i==-1: +# file.write('%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0)) +# i=0 +# else: +# if i==7: +# file.write('\n\t\t\t\t') +# i=0 +# file.write(',%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0)) +# i+=1 +# ii+=1 # One more Color + + file.write('\n\t\t\tColorIndex: ') + i = -1 + for j in range(ii): + if i == -1: + file.write('%i' % j) + i=0 + else: + if i==55: + file.write('\n\t\t\t\t') + i=0 + file.write(',%i' % j) + i+=1 + + file.write('\n\t\t}') + + + + # Write UV and texture layers. + uvlayers = [] + if do_uvs: + uvlayers = me.uv_textures +# uvlayers = me.getUVLayerNames() + uvlayer_orig = me.active_uv_texture +# uvlayer_orig = me.activeUVLayer + for uvindex, uvlayer in enumerate(me.uv_textures): +# for uvindex, uvlayer in enumerate(uvlayers): +# me.activeUVLayer = uvlayer + file.write('\n\t\tLayerElementUV: %i {' % uvindex) + file.write('\n\t\t\tVersion: 101') + file.write('\n\t\t\tName: "%s"' % uvlayer.name) +# file.write('\n\t\t\tName: "%s"' % uvlayer) + + file.write(''' + MappingInformationType: "ByPolygonVertex" + ReferenceInformationType: "IndexToDirect" + UV: ''') + + i = -1 + ii = 0 # Count how many UVs we write + + for uf in uvlayer.data: +# for f in me.faces: + for uv in uf.uv: +# for uv in f.uv: + if i==-1: + file.write('%.6f,%.6f' % tuple(uv)) + i=0 + else: + if i==7: + file.write('\n ') + i=0 + file.write(',%.6f,%.6f' % tuple(uv)) + i+=1 + ii+=1 # One more UV + + file.write('\n\t\t\tUVIndex: ') + i = -1 + for j in range(ii): + if i == -1: + file.write('%i' % j) + i=0 + else: + if i==55: + file.write('\n\t\t\t\t') + i=0 + file.write(',%i' % j) + i+=1 + + file.write('\n\t\t}') + + if do_textures: + file.write('\n\t\tLayerElementTexture: %i {' % uvindex) + file.write('\n\t\t\tVersion: 101') + file.write('\n\t\t\tName: "%s"' % uvlayer.name) +# file.write('\n\t\t\tName: "%s"' % uvlayer) + + if len(my_mesh.blenTextures) == 1: + file.write('\n\t\t\tMappingInformationType: "AllSame"') + else: + file.write('\n\t\t\tMappingInformationType: "ByPolygon"') + + file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"') + file.write('\n\t\t\tBlendMode: "Translucent"') + file.write('\n\t\t\tTextureAlpha: 1') + file.write('\n\t\t\tTextureId: ') + + if len(my_mesh.blenTextures) == 1: + file.write('0') + else: + texture_mapping_local = {None:-1} + + i = 0 # 1 for dummy + for tex in my_mesh.blenTextures: + if tex: # None is set above + texture_mapping_local[tex] = i + i+=1 + + i=-1 + for f in uvlayer.data: +# for f in me.faces: + img_key = f.image + + if i==-1: + i=0 + file.write( '%s' % texture_mapping_local[img_key]) + else: + if i==55: + file.write('\n ') + i=0 + + file.write(',%s' % texture_mapping_local[img_key]) + i+=1 + + else: + file.write(''' + LayerElementTexture: 0 { + Version: 101 + Name: "" + MappingInformationType: "NoMappingInformation" + ReferenceInformationType: "IndexToDirect" + BlendMode: "Translucent" + TextureAlpha: 1 + TextureId: ''') + file.write('\n\t\t}') + +# me.activeUVLayer = uvlayer_orig + + # Done with UV/textures. + + if do_materials: + file.write('\n\t\tLayerElementMaterial: 0 {') + file.write('\n\t\t\tVersion: 101') + file.write('\n\t\t\tName: ""') + + if len(my_mesh.blenMaterials) == 1: + file.write('\n\t\t\tMappingInformationType: "AllSame"') + else: + file.write('\n\t\t\tMappingInformationType: "ByPolygon"') + + file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"') + file.write('\n\t\t\tMaterials: ') + + if len(my_mesh.blenMaterials) == 1: + file.write('0') + else: + # Build a material mapping for this + material_mapping_local = {} # local-mat & tex : global index. + + for j, mat_tex_pair in enumerate(my_mesh.blenMaterials): + material_mapping_local[mat_tex_pair] = j + + len_material_mapping_local = len(material_mapping_local) + + mats = my_mesh.blenMaterialList + + if me.active_uv_texture: + uv_faces = me.active_uv_texture.data + else: + uv_faces = [None] * len(me.faces) + + i=-1 + for f, uf in zip(me.faces, uv_faces): +# for f in me.faces: + try: mat = mats[f.material_index] +# try: mat = mats[f.mat] + except:mat = None + + if do_uvs: tex = uf.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/ +# if do_uvs: tex = f.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/ + else: tex = None + + if i==-1: + i=0 + file.write( '%s' % (material_mapping_local[mat, tex])) # None for mat or tex is ok + else: + if i==55: + file.write('\n\t\t\t\t') + i=0 + + file.write(',%s' % (material_mapping_local[mat, tex])) + i+=1 + + file.write('\n\t\t}') + + file.write(''' + Layer: 0 { + Version: 100 + LayerElement: { + Type: "LayerElementNormal" + TypedIndex: 0 + }''') + + if do_materials: + file.write(''' + LayerElement: { + Type: "LayerElementMaterial" + TypedIndex: 0 + }''') + + # Always write this + if do_textures: + file.write(''' + LayerElement: { + Type: "LayerElementTexture" + TypedIndex: 0 + }''') + + if me.vertex_colors: +# if me.vertexColors: + file.write(''' + LayerElement: { + Type: "LayerElementColor" + TypedIndex: 0 + }''') + + if do_uvs: # same as me.faceUV + file.write(''' + LayerElement: { + Type: "LayerElementUV" + TypedIndex: 0 + }''') + + + file.write('\n\t\t}') + + if len(uvlayers) > 1: + for i in range(1, len(uvlayers)): + + file.write('\n\t\tLayer: %i {' % i) + file.write('\n\t\t\tVersion: 100') + + file.write(''' + LayerElement: { + Type: "LayerElementUV"''') + + file.write('\n\t\t\t\tTypedIndex: %i' % i) + file.write('\n\t\t\t}') + + if do_textures: + + file.write(''' + LayerElement: { + Type: "LayerElementTexture"''') + + file.write('\n\t\t\t\tTypedIndex: %i' % i) + file.write('\n\t\t\t}') + + file.write('\n\t\t}') + + if len(collayers) > 1: + # Take into account any UV layers + layer_offset = 0 + if uvlayers: layer_offset = len(uvlayers)-1 + + for i in range(layer_offset, len(collayers)+layer_offset): + file.write('\n\t\tLayer: %i {' % i) + file.write('\n\t\t\tVersion: 100') + + file.write(''' + LayerElement: { + Type: "LayerElementColor"''') + + file.write('\n\t\t\t\tTypedIndex: %i' % i) + file.write('\n\t\t\t}') + file.write('\n\t\t}') + file.write('\n\t}') + + def write_group(name): + file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name) + + file.write(''' + Properties60: { + Property: "MultiLayer", "bool", "",0 + Property: "Pickable", "bool", "",1 + Property: "Transformable", "bool", "",1 + Property: "Show", "bool", "",1 + } + MultiLayer: 0 + }''') + + + # add meshes here to clear because they are not used anywhere. + meshes_to_clear = [] + + ob_meshes = [] + ob_lights = [] + ob_cameras = [] + # in fbx we export bones as children of the mesh + # armatures not a part of a mesh, will be added to ob_arms + ob_bones = [] + ob_arms = [] + ob_null = [] # emptys + + # List of types that have blender objects (not bones) + ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null] + + groups = [] # blender groups, only add ones that have objects in the selections + materials = {} # (mat, image) keys, should be a set() + textures = {} # should be a set() + + tmp_ob_type = ob_type = None # incase no objects are exported, so as not to raise an error + + # if EXP_OBS_SELECTED is false, use sceens objects + if not batch_objects: + if EXP_OBS_SELECTED: tmp_objects = context.selected_objects +# if EXP_OBS_SELECTED: tmp_objects = sce.objects.context + else: tmp_objects = sce.objects + else: + tmp_objects = batch_objects + + if EXP_ARMATURE: + # This is needed so applying modifiers dosnt apply the armature deformation, its also needed + # ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes. + # set every armature to its rest, backup the original values so we done mess up the scene + ob_arms_orig_rest = [arm.rest_position for arm in bpy.data.armatures] +# ob_arms_orig_rest = [arm.restPosition for arm in bpy.data.armatures] + + for arm in bpy.data.armatures: + arm.rest_position = True +# arm.restPosition = True + + if ob_arms_orig_rest: + for ob_base in bpy.data.objects: + #if ob_base.type == 'Armature': + ob_base.make_display_list() +# ob_base.makeDisplayList() + + # This causes the makeDisplayList command to effect the mesh + sce.set_frame(sce.current_frame) +# Blender.Set('curframe', Blender.Get('curframe')) + + + for ob_base in tmp_objects: + + # ignore dupli children + if ob_base.parent and ob_base.parent.dupli_type != 'NONE': + continue + + obs = [(ob_base, ob_base.matrix)] + if ob_base.dupli_type != 'NONE': + ob_base.create_dupli_list() + obs = [(dob.object, dob.matrix) for dob in ob_base.dupli_list] + + for ob, mtx in obs: +# for ob, mtx in BPyObject.getDerivedObjects(ob_base): + tmp_ob_type = ob.type + if tmp_ob_type == 'CAMERA': +# if tmp_ob_type == 'Camera': + if EXP_CAMERA: + ob_cameras.append(my_object_generic(ob, mtx)) + elif tmp_ob_type == 'LAMP': +# elif tmp_ob_type == 'Lamp': + if EXP_LAMP: + ob_lights.append(my_object_generic(ob, mtx)) + elif tmp_ob_type == 'ARMATURE': +# elif tmp_ob_type == 'Armature': + if EXP_ARMATURE: + # TODO - armatures dont work in dupligroups! + if ob not in ob_arms: ob_arms.append(ob) + # ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)" + elif tmp_ob_type == 'EMPTY': +# elif tmp_ob_type == 'Empty': + if EXP_EMPTY: + ob_null.append(my_object_generic(ob, mtx)) + elif EXP_MESH: + origData = True + if tmp_ob_type != 'MESH': +# if tmp_ob_type != 'Mesh': +# me = bpy.data.meshes.new() + try: me = ob.create_mesh(True, 'PREVIEW') +# try: me.getFromObject(ob) + except: me = None + if me: + meshes_to_clear.append( me ) + mats = me.materials + origData = False + else: + # Mesh Type! + if EXP_MESH_APPLY_MOD: +# me = bpy.data.meshes.new() + me = ob.create_mesh(True, 'PREVIEW') +# me.getFromObject(ob) + + # so we keep the vert groups +# if EXP_ARMATURE: +# orig_mesh = ob.getData(mesh=1) +# if orig_mesh.getVertGroupNames(): +# ob.copy().link(me) +# # If new mesh has no vgroups we can try add if verts are teh same +# if not me.getVertGroupNames(): # vgroups were not kept by the modifier +# if len(me.verts) == len(orig_mesh.verts): +# groupNames, vWeightDict = BPyMesh.meshWeight2Dict(orig_mesh) +# BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict) + + # print ob, me, me.getVertGroupNames() + meshes_to_clear.append( me ) + origData = False + mats = me.materials + else: + me = ob.data +# me = ob.getData(mesh=1) + mats = me.materials + +# # Support object colors +# tmp_colbits = ob.colbits +# if tmp_colbits: +# tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too. +# for i in xrange(16): +# if tmp_colbits & (1< 0: +# if me.faceUV: + uvlayer_orig = me.active_uv_texture +# uvlayer_orig = me.activeUVLayer + for uvlayer in me.uv_textures: +# for uvlayer in me.getUVLayerNames(): +# me.activeUVLayer = uvlayer + for f, uf in zip(me.faces, uvlayer.data): +# for f in me.faces: + tex = uf.image +# tex = f.image + textures[tex] = texture_mapping_local[tex] = None + + try: mat = mats[f.material_index] +# try: mat = mats[f.mat] + except: mat = None + + materials[mat, tex] = material_mapping_local[mat, tex] = None # should use sets, wait for blender 2.5 + + +# me.activeUVLayer = uvlayer_orig + else: + for mat in mats: + # 2.44 use mat.lib too for uniqueness + materials[mat, None] = material_mapping_local[mat, None] = None + else: + materials[None, None] = None + + if EXP_ARMATURE: + armob = ob.find_armature() + blenParentBoneName = None + + # parent bone - special case + if (not armob) and ob.parent and ob.parent.type == 'ARMATURE' and \ + ob.parent_type == 'BONE': +# if (not armob) and ob.parent and ob.parent.type == 'Armature' and ob.parentType == Blender.Object.ParentTypes.BONE: + armob = ob.parent + blenParentBoneName = ob.parent_bone +# blenParentBoneName = ob.parentbonename + + + if armob and armob not in ob_arms: + ob_arms.append(armob) + + else: + blenParentBoneName = armob = None + + my_mesh = my_object_generic(ob, mtx) + my_mesh.blenData = me + my_mesh.origData = origData + my_mesh.blenMaterials = list(material_mapping_local.keys()) + my_mesh.blenMaterialList = mats + my_mesh.blenTextures = list(texture_mapping_local.keys()) + + # if only 1 null texture then empty the list + if len(my_mesh.blenTextures) == 1 and my_mesh.blenTextures[0] == None: + my_mesh.blenTextures = [] + + my_mesh.fbxArm = armob # replace with my_object_generic armature instance later + my_mesh.fbxBoneParent = blenParentBoneName # replace with my_bone instance later + + ob_meshes.append( my_mesh ) + + # not forgetting to free dupli_list + if ob_base.dupli_list: ob_base.free_dupli_list() + + + if EXP_ARMATURE: + # now we have the meshes, restore the rest arm position + for i, arm in enumerate(bpy.data.armatures): + arm.rest_position = ob_arms_orig_rest[i] +# arm.restPosition = ob_arms_orig_rest[i] + + if ob_arms_orig_rest: + for ob_base in bpy.data.objects: + if ob_base.type == 'ARMATURE': +# if ob_base.type == 'Armature': + ob_base.make_display_list() +# ob_base.makeDisplayList() + # This causes the makeDisplayList command to effect the mesh + sce.set_frame(sce.current_frame) +# Blender.Set('curframe', Blender.Get('curframe')) + + del tmp_ob_type, tmp_objects + + # now we have collected all armatures, add bones + for i, ob in enumerate(ob_arms): + + ob_arms[i] = my_arm = my_object_generic(ob) + + my_arm.fbxBones = [] + my_arm.blenData = ob.data + if ob.animation_data: + my_arm.blenAction = ob.animation_data.action + else: + my_arm.blenAction = None +# my_arm.blenAction = ob.action + my_arm.blenActionList = [] + + # fbxName, blenderObject, my_bones, blenderActions + #ob_arms[i] = fbxArmObName, ob, arm_my_bones, (ob.action, []) + + for bone in my_arm.blenData.bones: +# for bone in my_arm.blenData.bones.values(): + my_bone = my_bone_class(bone, my_arm) + my_arm.fbxBones.append( my_bone ) + ob_bones.append( my_bone ) + + # add the meshes to the bones and replace the meshes armature with own armature class + #for obname, ob, mtx, me, mats, arm, armname in ob_meshes: + for my_mesh in ob_meshes: + # Replace + # ...this could be sped up with dictionary mapping but its unlikely for + # it ever to be a bottleneck - (would need 100+ meshes using armatures) + if my_mesh.fbxArm: + for my_arm in ob_arms: + if my_arm.blenObject == my_mesh.fbxArm: + my_mesh.fbxArm = my_arm + break + + for my_bone in ob_bones: + + # The mesh uses this bones armature! + if my_bone.fbxArm == my_mesh.fbxArm: + my_bone.blenMeshes[my_mesh.fbxName] = me + + + # parent bone: replace bone names with our class instances + # my_mesh.fbxBoneParent is None or a blender bone name initialy, replacing if the names match. + if my_mesh.fbxBoneParent == my_bone.blenName: + my_mesh.fbxBoneParent = my_bone + + bone_deformer_count = 0 # count how many bones deform a mesh + my_bone_blenParent = None + for my_bone in ob_bones: + my_bone_blenParent = my_bone.blenBone.parent + if my_bone_blenParent: + for my_bone_parent in ob_bones: + # Note 2.45rc2 you can compare bones normally + if my_bone_blenParent.name == my_bone_parent.blenName and my_bone.fbxArm == my_bone_parent.fbxArm: + my_bone.parent = my_bone_parent + break + + # Not used at the moment + # my_bone.calcRestMatrixLocal() + bone_deformer_count += len(my_bone.blenMeshes) + + del my_bone_blenParent + + + # Build blenObject -> fbxObject mapping + # this is needed for groups as well as fbxParenting +# for ob in bpy.data.objects: ob.tag = False +# bpy.data.objects.tag = False + + # using a list of object names for tagging (Arystan) + tagged_objects = [] + + tmp_obmapping = {} + for ob_generic in ob_all_typegroups: + for ob_base in ob_generic: + tagged_objects.append(ob_base.blenObject.name) +# ob_base.blenObject.tag = True + tmp_obmapping[ob_base.blenObject] = ob_base + + # Build Groups from objects we export + for blenGroup in bpy.data.groups: + fbxGroupName = None + for ob in blenGroup.objects: + if ob.name in tagged_objects: +# if ob.tag: + if fbxGroupName == None: + fbxGroupName = sane_groupname(blenGroup) + groups.append((fbxGroupName, blenGroup)) + + tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName) # also adds to the objects fbxGroupNames + + groups.sort() # not really needed + + # Assign parents using this mapping + for ob_generic in ob_all_typegroups: + for my_ob in ob_generic: + parent = my_ob.blenObject.parent + if parent and parent.name in tagged_objects: # does it exist and is it in the mapping +# if parent and parent.tag: # does it exist and is it in the mapping + my_ob.fbxParent = tmp_obmapping[parent] + + + del tmp_obmapping + # Finished finding groups we use + + + materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.keys()] + textures = [(sane_texname(tex), tex) for tex in textures.keys() if tex] + materials.sort() # sort by name + textures.sort() + + camera_count = 8 + file.write(''' + +; Object definitions +;------------------------------------------------------------------ + +Definitions: { + Version: 100 + Count: %i''' % (\ + 1+1+camera_count+\ + len(ob_meshes)+\ + len(ob_lights)+\ + len(ob_cameras)+\ + len(ob_arms)+\ + len(ob_null)+\ + len(ob_bones)+\ + bone_deformer_count+\ + len(materials)+\ + (len(textures)*2))) # add 1 for the root model 1 for global settings + + del bone_deformer_count + + file.write(''' + ObjectType: "Model" { + Count: %i + }''' % (\ + 1+camera_count+\ + len(ob_meshes)+\ + len(ob_lights)+\ + len(ob_cameras)+\ + len(ob_arms)+\ + len(ob_null)+\ + len(ob_bones))) # add 1 for the root model + + file.write(''' + ObjectType: "Geometry" { + Count: %i + }''' % len(ob_meshes)) + + if materials: + file.write(''' + ObjectType: "Material" { + Count: %i + }''' % len(materials)) + + if textures: + file.write(''' + ObjectType: "Texture" { + Count: %i + }''' % len(textures)) # add 1 for an empty tex + file.write(''' + ObjectType: "Video" { + Count: %i + }''' % len(textures)) # add 1 for an empty tex + + tmp = 0 + # Add deformer nodes + for my_mesh in ob_meshes: + if my_mesh.fbxArm: + tmp+=1 + + # Add subdeformers + for my_bone in ob_bones: + tmp += len(my_bone.blenMeshes) + + if tmp: + file.write(''' + ObjectType: "Deformer" { + Count: %i + }''' % tmp) + del tmp + + # we could avoid writing this possibly but for now just write it + + file.write(''' + ObjectType: "Pose" { + Count: 1 + }''') + + if groups: + file.write(''' + ObjectType: "GroupSelection" { + Count: %i + }''' % len(groups)) + + file.write(''' + ObjectType: "GlobalSettings" { + Count: 1 + } +}''') + + file.write(''' + +; Object properties +;------------------------------------------------------------------ + +Objects: {''') + + # To comply with other FBX FILES + write_camera_switch() + + # Write the null object + write_null(None, 'blend_root')# , GLOBAL_MATRIX) + + for my_null in ob_null: + write_null(my_null) + + for my_arm in ob_arms: + write_null(my_arm) + + for my_cam in ob_cameras: + write_camera(my_cam) + + for my_light in ob_lights: + write_light(my_light) + + for my_mesh in ob_meshes: + write_mesh(my_mesh) + + #for bonename, bone, obname, me, armob in ob_bones: + for my_bone in ob_bones: + write_bone(my_bone) + + write_camera_default() + + for matname, (mat, tex) in materials: + write_material(matname, mat) # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard) + + # each texture uses a video, odd + for texname, tex in textures: + write_video(texname, tex) + i = 0 + for texname, tex in textures: + write_texture(texname, tex, i) + i+=1 + + for groupname, group in groups: + write_group(groupname) + + # NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do. + + # Write armature modifiers + # TODO - add another MODEL? - because of this skin definition. + for my_mesh in ob_meshes: + if my_mesh.fbxArm: + write_deformer_skin(my_mesh.fbxName) + + # Get normalized weights for temorary use + if my_mesh.fbxBoneParent: + weights = None + else: + weights = meshNormalizedWeights(my_mesh.blenObject) +# weights = meshNormalizedWeights(my_mesh.blenData) + + #for bonename, bone, obname, bone_mesh, armob in ob_bones: + for my_bone in ob_bones: + if me in iter(my_bone.blenMeshes.values()): + write_sub_deformer_skin(my_mesh, my_bone, weights) + + # Write pose's really weired, only needed when an armature and mesh are used together + # each by themselves dont need pose data. for now only pose meshes and bones + + file.write(''' + Pose: "Pose::BIND_POSES", "BindPose" { + Type: "BindPose" + Version: 100 + Properties60: { + } + NbPoseNodes: ''') + file.write(str(len(pose_items))) + + + for fbxName, matrix in pose_items: + file.write('\n\t\tPoseNode: {') + file.write('\n\t\t\tNode: "Model::%s"' % fbxName ) + if matrix: file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix)) + else: file.write('\n\t\t\tMatrix: %s' % mat4x4str(mtx4_identity)) + file.write('\n\t\t}') + + file.write('\n\t}') + + + # Finish Writing Objects + # Write global settings + file.write(''' + GlobalSettings: { + Version: 1000 + Properties60: { + Property: "UpAxis", "int", "",1 + Property: "UpAxisSign", "int", "",1 + Property: "FrontAxis", "int", "",2 + Property: "FrontAxisSign", "int", "",1 + Property: "CoordAxis", "int", "",0 + Property: "CoordAxisSign", "int", "",1 + Property: "UnitScaleFactor", "double", "",100 + } + } +''') + file.write('}') + + file.write(''' + +; Object relations +;------------------------------------------------------------------ + +Relations: {''') + + file.write('\n\tModel: "Model::blend_root", "Null" {\n\t}') + + for my_null in ob_null: + file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName) + + for my_arm in ob_arms: + file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName) + + for my_mesh in ob_meshes: + file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName) + + # TODO - limbs can have the same name for multiple armatures, should prefix. + #for bonename, bone, obname, me, armob in ob_bones: + for my_bone in ob_bones: + file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName) + + for my_cam in ob_cameras: + file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName) + + for my_light in ob_lights: + file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName) + + file.write(''' + Model: "Model::Producer Perspective", "Camera" { + } + Model: "Model::Producer Top", "Camera" { + } + Model: "Model::Producer Bottom", "Camera" { + } + Model: "Model::Producer Front", "Camera" { + } + Model: "Model::Producer Back", "Camera" { + } + Model: "Model::Producer Right", "Camera" { + } + Model: "Model::Producer Left", "Camera" { + } + Model: "Model::Camera Switcher", "CameraSwitcher" { + }''') + + for matname, (mat, tex) in materials: + file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname) + + if textures: + for texname, tex in textures: + file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname) + for texname, tex in textures: + file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname) + + # deformers - modifiers + for my_mesh in ob_meshes: + if my_mesh.fbxArm: + file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName) + + #for bonename, bone, obname, me, armob in ob_bones: + for my_bone in ob_bones: + for fbxMeshObName in my_bone.blenMeshes: # .keys() - fbxMeshObName + # is this bone effecting a mesh? + file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName)) + + # This should be at the end + # file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}') + + for groupname, group in groups: + file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname) + + file.write('\n}') + file.write(''' + +; Object connections +;------------------------------------------------------------------ + +Connections: {''') + + # NOTE - The FBX SDK dosnt care about the order but some importers DO! + # for instance, defining the material->mesh connection + # before the mesh->blend_root crashes cinema4d + + + # write the fake root node + file.write('\n\tConnect: "OO", "Model::blend_root", "Model::Scene"') + + for ob_generic in ob_all_typegroups: # all blender 'Object's we support + for my_ob in ob_generic: + if my_ob.fbxParent: + file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName)) + else: + file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_ob.fbxName) + + if materials: + for my_mesh in ob_meshes: + # Connect all materials to all objects, not good form but ok for now. + for mat, tex in my_mesh.blenMaterials: + if mat: mat_name = mat.name + else: mat_name = None + + if tex: tex_name = tex.name + else: tex_name = None + + file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName)) + + if textures: + for my_mesh in ob_meshes: + if my_mesh.blenTextures: + # file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName) + for tex in my_mesh.blenTextures: + if tex: + file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName)) + + for texname, tex in textures: + file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname)) + + for my_mesh in ob_meshes: + if my_mesh.fbxArm: + file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName)) + + #for bonename, bone, obname, me, armob in ob_bones: + for my_bone in ob_bones: + for fbxMeshObName in my_bone.blenMeshes: # .keys() + file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName)) + + # limbs -> deformers + # for bonename, bone, obname, me, armob in ob_bones: + for my_bone in ob_bones: + for fbxMeshObName in my_bone.blenMeshes: # .keys() + file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName)) + + + #for bonename, bone, obname, me, armob in ob_bones: + for my_bone in ob_bones: + # Always parent to armature now + if my_bone.parent: + file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName) ) + else: + # the armature object is written as an empty and all root level bones connect to it + file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName) ) + + # groups + if groups: + for ob_generic in ob_all_typegroups: + for ob_base in ob_generic: + for fbxGroupName in ob_base.fbxGroupNames: + file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName)) + + for my_arm in ob_arms: + file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_arm.fbxName) + + file.write('\n}') + + + # Needed for scene footer as well as animation + render = sce.render_data +# render = sce.render + + # from the FBX sdk + #define KTIME_ONE_SECOND KTime (K_LONGLONG(46186158000)) + def fbx_time(t): + # 0.5 + val is the same as rounding. + return int(0.5 + ((t/fps) * 46186158000)) + + fps = float(render.fps) + start = sce.start_frame +# start = render.sFrame + end = sce.end_frame +# end = render.eFrame + if end < start: start, end = end, start + if start==end: ANIM_ENABLE = False + + # animations for these object types + ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms + + if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]: + + frame_orig = sce.current_frame +# frame_orig = Blender.Get('curframe') + + if ANIM_OPTIMIZE: + ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION + + # default action, when no actions are avaioable + tmp_actions = [None] # None is the default action + blenActionDefault = None + action_lastcompat = None + + # instead of tagging + tagged_actions = [] + + if ANIM_ACTION_ALL: +# bpy.data.actions.tag = False + tmp_actions = list(bpy.data.actions) + + + # find which actions are compatible with the armatures + # blenActions is not yet initialized so do it now. + tmp_act_count = 0 + for my_arm in ob_arms: + + # get the default name + if not blenActionDefault: + blenActionDefault = my_arm.blenAction + + arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones]) + + for action in tmp_actions: + + action_chan_names = arm_bone_names.intersection( set([g.name for g in action.groups]) ) +# action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) ) + + if action_chan_names: # at least one channel matches. + my_arm.blenActionList.append(action) + tagged_actions.append(action.name) +# action.tag = True + tmp_act_count += 1 + + # incase there is no actions applied to armatures + action_lastcompat = action + + if tmp_act_count: + # unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature. + if not blenActionDefault: + blenActionDefault = action_lastcompat + + del action_lastcompat + + file.write(''' +;Takes and animation section +;---------------------------------------------------- + +Takes: {''') + + if blenActionDefault: + file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault)) + else: + file.write('\n\tCurrent: "Default Take"') + + for blenAction in tmp_actions: + # we have tagged all actious that are used be selected armatures + if blenAction: + if blenAction.name in tagged_actions: +# if blenAction.tag: + print('\taction: "%s" exporting...' % blenAction.name) + else: + print('\taction: "%s" has no armature using it, skipping' % blenAction.name) + continue + + if blenAction == None: + # Warning, this only accounts for tmp_actions being [None] + file.write('\n\tTake: "Default Take" {') + act_start = start + act_end = end + else: + # use existing name + if blenAction == blenActionDefault: # have we alredy got the name + file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name]) + else: + file.write('\n\tTake: "%s" {' % sane_takename(blenAction)) + + act_start, act_end = blenAction.get_frame_range() +# tmp = blenAction.getFrameNumbers() +# if tmp: +# act_start = min(tmp) +# act_end = max(tmp) +# del tmp +# else: +# # Fallback on this, theres not much else we can do? :/ +# # when an action has no length +# act_start = start +# act_end = end + + # Set the action active + for my_bone in ob_arms: + if blenAction in my_bone.blenActionList: + ob.action = blenAction + # print '\t\tSetting Action!', blenAction + # sce.update(1) + + file.write('\n\t\tFileName: "Default_Take.tak"') # ??? - not sure why this is needed + file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed + file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed + + file.write(''' + + ;Models animation + ;----------------------------------------------------''') + + + # set pose data for all bones + # do this here incase the action changes + ''' + for my_bone in ob_bones: + my_bone.flushAnimData() + ''' + i = act_start + while i <= act_end: + sce.set_frame(i) +# Blender.Set('curframe', i) + for ob_generic in ob_anim_lists: + for my_ob in ob_generic: + #Blender.Window.RedrawAll() + if ob_generic == ob_meshes and my_ob.fbxArm: + # We cant animate armature meshes! + pass + else: + my_ob.setPoseFrame(i) + + i+=1 + + + #for bonename, bone, obname, me, armob in ob_bones: + for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms): + + for my_ob in ob_generic: + + if ob_generic == ob_meshes and my_ob.fbxArm: + # do nothing, + pass + else: + + file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName) # ??? - not sure why this is needed + file.write('\n\t\t\tVersion: 1.1') + file.write('\n\t\t\tChannel: "Transform" {') + + context_bone_anim_mats = [ (my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in range(act_start, act_end+1) ] + + # ---------------- + # ---------------- + for TX_LAYER, TX_CHAN in enumerate('TRS'): # transform, rotate, scale + + if TX_CHAN=='T': context_bone_anim_vecs = [mtx[0].translationPart() for mtx in context_bone_anim_mats] + elif TX_CHAN=='S': context_bone_anim_vecs = [mtx[0].scalePart() for mtx in context_bone_anim_mats] + elif TX_CHAN=='R': + # Was.... + # elif TX_CHAN=='R': context_bone_anim_vecs = [mtx[1].toEuler() for mtx in context_bone_anim_mats] + # + # ...but we need to use the previous euler for compatible conversion. + context_bone_anim_vecs = [] + prev_eul = None + for mtx in context_bone_anim_mats: + if prev_eul: prev_eul = mtx[1].toEuler(prev_eul) + else: prev_eul = mtx[1].toEuler() + context_bone_anim_vecs.append(eulerRadToDeg(prev_eul)) +# context_bone_anim_vecs.append(prev_eul) + + file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN) # translation + + for i in range(3): + # Loop on each axis of the bone + file.write('\n\t\t\t\t\tChannel: "%s" {'% ('XYZ'[i])) # translation + file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i] ) + file.write('\n\t\t\t\t\t\tKeyVer: 4005') + + if not ANIM_OPTIMIZE: + # Just write all frames, simple but in-eficient + file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start)) + file.write('\n\t\t\t\t\t\tKey: ') + frame = act_start + while frame <= act_end: + if frame!=act_start: + file.write(',') + + # Curve types are 'C,n' for constant, 'L' for linear + # C,n is for bezier? - linear is best for now so we can do simple keyframe removal + file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame-1), context_bone_anim_vecs[frame-act_start][i] )) + frame+=1 + else: + # remove unneeded keys, j is the frame, needed when some frames are removed. + context_bone_anim_keys = [ (vec[i], j) for j, vec in enumerate(context_bone_anim_vecs) ] + + # last frame to fisrt frame, missing 1 frame on either side. + # removeing in a backwards loop is faster + #for j in xrange( (act_end-act_start)-1, 0, -1 ): + # j = (act_end-act_start)-1 + j = len(context_bone_anim_keys)-2 + while j > 0 and len(context_bone_anim_keys) > 2: + # print j, len(context_bone_anim_keys) + # Is this key the same as the ones next to it? + + # co-linear horizontal... + if abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j-1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and\ + abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j+1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT: + + del context_bone_anim_keys[j] + + else: + frame_range = float(context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j-1][1]) + frame_range_fac1 = (context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j][1]) / frame_range + frame_range_fac2 = 1.0 - frame_range_fac1 + + if abs(((context_bone_anim_keys[j-1][0]*frame_range_fac1 + context_bone_anim_keys[j+1][0]*frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT: + del context_bone_anim_keys[j] + else: + j-=1 + + # keep the index below the list length + if j > len(context_bone_anim_keys)-2: + j = len(context_bone_anim_keys)-2 + + if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]: + # This axis has no moton, its okay to skip KeyCount and Keys in this case + pass + else: + # We only need to write these if there is at least one + file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys)) + file.write('\n\t\t\t\t\t\tKey: ') + for val, frame in context_bone_anim_keys: + if frame != context_bone_anim_keys[0][1]: # not the first + file.write(',') + # frame is alredy one less then blenders frame + file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val )) + + if i==0: file.write('\n\t\t\t\t\t\tColor: 1,0,0') + elif i==1: file.write('\n\t\t\t\t\t\tColor: 0,1,0') + elif i==2: file.write('\n\t\t\t\t\t\tColor: 0,0,1') + + file.write('\n\t\t\t\t\t}') + file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER+1) ) + file.write('\n\t\t\t\t}') + + # --------------- + + file.write('\n\t\t\t}') + file.write('\n\t\t}') + + # end the take + file.write('\n\t}') + + # end action loop. set original actions + # do this after every loop incase actions effect eachother. + for my_bone in ob_arms: + my_bone.blenObject.action = my_bone.blenAction + + file.write('\n}') + + sce.set_frame(frame_orig) +# Blender.Set('curframe', frame_orig) + + else: + # no animation + file.write('\n;Takes and animation section') + file.write('\n;----------------------------------------------------') + file.write('\n') + file.write('\nTakes: {') + file.write('\n\tCurrent: ""') + file.write('\n}') + + + # write meshes animation + #for obname, ob, mtx, me, mats, arm, armname in ob_meshes: + + + # Clear mesh data Only when writing with modifiers applied + for me in meshes_to_clear: + bpy.data.remove_mesh(me) +# me.verts = None + + # --------------------------- Footer + if world: + m = world.mist + has_mist = m.enabled +# has_mist = world.mode & 1 + mist_intense = m.intensity + mist_start = m.start + mist_end = m.depth + mist_height = m.height +# mist_intense, mist_start, mist_end, mist_height = world.mist + world_hor = world.horizon_color +# world_hor = world.hor + else: + has_mist = mist_intense = mist_start = mist_end = mist_height = 0 + world_hor = 0,0,0 + + file.write('\n;Version 5 settings') + file.write('\n;------------------------------------------------------------------') + file.write('\n') + file.write('\nVersion5: {') + file.write('\n\tAmbientRenderSettings: {') + file.write('\n\t\tVersion: 101') + file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb)) + file.write('\n\t}') + file.write('\n\tFogOptions: {') + file.write('\n\t\tFlogEnable: %i' % has_mist) + file.write('\n\t\tFogMode: 0') + file.write('\n\t\tFogDensity: %.3f' % mist_intense) + file.write('\n\t\tFogStart: %.3f' % mist_start) + file.write('\n\t\tFogEnd: %.3f' % mist_end) + file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor)) + file.write('\n\t}') + file.write('\n\tSettings: {') + file.write('\n\t\tFrameRate: "%i"' % int(fps)) + file.write('\n\t\tTimeFormat: 1') + file.write('\n\t\tSnapOnFrames: 0') + file.write('\n\t\tReferenceTimeIndex: -1') + file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start-1)) + file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end-1)) + file.write('\n\t}') + file.write('\n\tRendererSetting: {') + file.write('\n\t\tDefaultCamera: "Producer Perspective"') + file.write('\n\t\tDefaultViewingMode: 0') + file.write('\n\t}') + file.write('\n}') + file.write('\n') + + # Incase sombody imports this, clean up by clearing global dicts + sane_name_mapping_ob.clear() + sane_name_mapping_mat.clear() + sane_name_mapping_tex.clear() + + ob_arms[:] = [] + ob_bones[:] = [] + ob_cameras[:] = [] + ob_lights[:] = [] + ob_meshes[:] = [] + ob_null[:] = [] + + + # copy images if enabled +# if EXP_IMAGE_COPY: +# # copy_images( basepath, [ tex[1] for tex in textures if tex[1] != None ]) +# bpy.util.copy_images( [ tex[1] for tex in textures if tex[1] != None ], basepath) + + print('export finished in %.4f sec.' % (time.clock() - start_time)) +# print 'export finished in %.4f sec.' % (Blender.sys.time() - start_time) + return True + + +# -------------------------------------------- +# UI Function - not a part of the exporter. +# this is to seperate the user interface from the rest of the exporter. +# from Blender import Draw, Window +EVENT_NONE = 0 +EVENT_EXIT = 1 +EVENT_REDRAW = 2 +EVENT_FILESEL = 3 + +GLOBALS = {} + +# export opts + +def do_redraw(e,v): GLOBALS['EVENT'] = e + +# toggle between these 2, only allow one on at once +def do_obs_sel(e,v): + GLOBALS['EVENT'] = e + GLOBALS['EXP_OBS_SCENE'].val = 0 + GLOBALS['EXP_OBS_SELECTED'].val = 1 + +def do_obs_sce(e,v): + GLOBALS['EVENT'] = e + GLOBALS['EXP_OBS_SCENE'].val = 1 + GLOBALS['EXP_OBS_SELECTED'].val = 0 + +def do_batch_type_grp(e,v): + GLOBALS['EVENT'] = e + GLOBALS['BATCH_GROUP'].val = 1 + GLOBALS['BATCH_SCENE'].val = 0 + +def do_batch_type_sce(e,v): + GLOBALS['EVENT'] = e + GLOBALS['BATCH_GROUP'].val = 0 + GLOBALS['BATCH_SCENE'].val = 1 + +def do_anim_act_all(e,v): + GLOBALS['EVENT'] = e + GLOBALS['ANIM_ACTION_ALL'][0].val = 1 + GLOBALS['ANIM_ACTION_ALL'][1].val = 0 + +def do_anim_act_cur(e,v): + if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val: + Draw.PupMenu('Warning%t|Cant use this with batch export group option') + else: + GLOBALS['EVENT'] = e + GLOBALS['ANIM_ACTION_ALL'][0].val = 0 + GLOBALS['ANIM_ACTION_ALL'][1].val = 1 + +def fbx_ui_exit(e,v): + GLOBALS['EVENT'] = e + +def do_help(e,v): + url = 'http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx' + print('Trying to open web browser with documentation at this address...') + print('\t' + url) + + try: + import webbrowser + webbrowser.open(url) + except: + Blender.Draw.PupMenu("Error%t|Opening a webbrowser requires a full python installation") + print('...could not open a browser window.') + + + +# run when export is pressed +#def fbx_ui_write(e,v): +def fbx_ui_write(filename, context): + + # Dont allow overwriting files when saving normally + if not GLOBALS['BATCH_ENABLE'].val: + if not BPyMessages.Warning_SaveOver(filename): + return + + GLOBALS['EVENT'] = EVENT_EXIT + + # Keep the order the same as above for simplicity + # the [] is a dummy arg used for objects + + Blender.Window.WaitCursor(1) + + # Make the matrix + GLOBAL_MATRIX = mtx4_identity + GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = GLOBALS['_SCALE'].val + if GLOBALS['_XROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n + if GLOBALS['_YROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n + if GLOBALS['_ZROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n + + ret = write(\ + filename, None,\ + context, + GLOBALS['EXP_OBS_SELECTED'].val,\ + GLOBALS['EXP_MESH'].val,\ + GLOBALS['EXP_MESH_APPLY_MOD'].val,\ + GLOBALS['EXP_MESH_HQ_NORMALS'].val,\ + GLOBALS['EXP_ARMATURE'].val,\ + GLOBALS['EXP_LAMP'].val,\ + GLOBALS['EXP_CAMERA'].val,\ + GLOBALS['EXP_EMPTY'].val,\ + GLOBALS['EXP_IMAGE_COPY'].val,\ + GLOBAL_MATRIX,\ + GLOBALS['ANIM_ENABLE'].val,\ + GLOBALS['ANIM_OPTIMIZE'].val,\ + GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val,\ + GLOBALS['ANIM_ACTION_ALL'][0].val,\ + GLOBALS['BATCH_ENABLE'].val,\ + GLOBALS['BATCH_GROUP'].val,\ + GLOBALS['BATCH_SCENE'].val,\ + GLOBALS['BATCH_FILE_PREFIX'].val,\ + GLOBALS['BATCH_OWN_DIR'].val,\ + ) + + Blender.Window.WaitCursor(0) + GLOBALS.clear() + + if ret == False: + Draw.PupMenu('Error%t|Path cannot be written to!') + + +def fbx_ui(): + # Only to center the UI + x,y = GLOBALS['MOUSE'] + x-=180; y-=0 # offset... just to get it centered + + Draw.Label('Export Objects...', x+20,y+165, 200, 20) + + if not GLOBALS['BATCH_ENABLE'].val: + Draw.BeginAlign() + GLOBALS['EXP_OBS_SELECTED'] = Draw.Toggle('Selected Objects', EVENT_REDRAW, x+20, y+145, 160, 20, GLOBALS['EXP_OBS_SELECTED'].val, 'Export selected objects on visible layers', do_obs_sel) + GLOBALS['EXP_OBS_SCENE'] = Draw.Toggle('Scene Objects', EVENT_REDRAW, x+180, y+145, 160, 20, GLOBALS['EXP_OBS_SCENE'].val, 'Export all objects in this scene', do_obs_sce) + Draw.EndAlign() + + Draw.BeginAlign() + GLOBALS['_SCALE'] = Draw.Number('Scale:', EVENT_NONE, x+20, y+120, 140, 20, GLOBALS['_SCALE'].val, 0.01, 1000.0, 'Scale all data, (Note! some imports dont support scaled armatures)') + GLOBALS['_XROT90'] = Draw.Toggle('Rot X90', EVENT_NONE, x+160, y+120, 60, 20, GLOBALS['_XROT90'].val, 'Rotate all objects 90 degrese about the X axis') + GLOBALS['_YROT90'] = Draw.Toggle('Rot Y90', EVENT_NONE, x+220, y+120, 60, 20, GLOBALS['_YROT90'].val, 'Rotate all objects 90 degrese about the Y axis') + GLOBALS['_ZROT90'] = Draw.Toggle('Rot Z90', EVENT_NONE, x+280, y+120, 60, 20, GLOBALS['_ZROT90'].val, 'Rotate all objects 90 degrese about the Z axis') + Draw.EndAlign() + + y -= 35 + + Draw.BeginAlign() + GLOBALS['EXP_EMPTY'] = Draw.Toggle('Empty', EVENT_NONE, x+20, y+120, 60, 20, GLOBALS['EXP_EMPTY'].val, 'Export empty objects') + GLOBALS['EXP_CAMERA'] = Draw.Toggle('Camera', EVENT_NONE, x+80, y+120, 60, 20, GLOBALS['EXP_CAMERA'].val, 'Export camera objects') + GLOBALS['EXP_LAMP'] = Draw.Toggle('Lamp', EVENT_NONE, x+140, y+120, 60, 20, GLOBALS['EXP_LAMP'].val, 'Export lamp objects') + GLOBALS['EXP_ARMATURE'] = Draw.Toggle('Armature', EVENT_NONE, x+200, y+120, 60, 20, GLOBALS['EXP_ARMATURE'].val, 'Export armature objects') + GLOBALS['EXP_MESH'] = Draw.Toggle('Mesh', EVENT_REDRAW, x+260, y+120, 80, 20, GLOBALS['EXP_MESH'].val, 'Export mesh objects', do_redraw) #, do_axis_z) + Draw.EndAlign() + + if GLOBALS['EXP_MESH'].val: + # below mesh but + Draw.BeginAlign() + GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Toggle('Modifiers', EVENT_NONE, x+260, y+100, 80, 20, GLOBALS['EXP_MESH_APPLY_MOD'].val, 'Apply modifiers to mesh objects') #, do_axis_z) + GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Toggle('HQ Normals', EVENT_NONE, x+260, y+80, 80, 20, GLOBALS['EXP_MESH_HQ_NORMALS'].val, 'Generate high quality normals') #, do_axis_z) + Draw.EndAlign() + + GLOBALS['EXP_IMAGE_COPY'] = Draw.Toggle('Copy Image Files', EVENT_NONE, x+20, y+80, 160, 20, GLOBALS['EXP_IMAGE_COPY'].val, 'Copy image files to the destination path') #, do_axis_z) + + + Draw.Label('Export Armature Animation...', x+20,y+45, 300, 20) + + GLOBALS['ANIM_ENABLE'] = Draw.Toggle('Enable Animation', EVENT_REDRAW, x+20, y+25, 160, 20, GLOBALS['ANIM_ENABLE'].val, 'Export keyframe animation', do_redraw) + if GLOBALS['ANIM_ENABLE'].val: + Draw.BeginAlign() + GLOBALS['ANIM_OPTIMIZE'] = Draw.Toggle('Optimize Keyframes', EVENT_REDRAW, x+20, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE'].val, 'Remove double keyframes', do_redraw) + if GLOBALS['ANIM_OPTIMIZE'].val: + GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Number('Precission: ', EVENT_NONE, x+180, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val, 1, 16, 'Tolerence for comparing double keyframes (higher for greater accuracy)') + Draw.EndAlign() + + Draw.BeginAlign() + GLOBALS['ANIM_ACTION_ALL'][1] = Draw.Toggle('Current Action', EVENT_REDRAW, x+20, y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][1].val, 'Use actions currently applied to the armatures (use scene start/end frame)', do_anim_act_cur) + GLOBALS['ANIM_ACTION_ALL'][0] = Draw.Toggle('All Actions', EVENT_REDRAW, x+180,y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][0].val, 'Use all actions for armatures', do_anim_act_all) + Draw.EndAlign() + + + Draw.Label('Export Batch...', x+20,y-60, 300, 20) + GLOBALS['BATCH_ENABLE'] = Draw.Toggle('Enable Batch', EVENT_REDRAW, x+20, y-80, 160, 20, GLOBALS['BATCH_ENABLE'].val, 'Automate exporting multiple scenes or groups to files', do_redraw) + + if GLOBALS['BATCH_ENABLE'].val: + Draw.BeginAlign() + GLOBALS['BATCH_GROUP'] = Draw.Toggle('Group > File', EVENT_REDRAW, x+20, y-105, 160, 20, GLOBALS['BATCH_GROUP'].val, 'Export each group as an FBX file', do_batch_type_grp) + GLOBALS['BATCH_SCENE'] = Draw.Toggle('Scene > File', EVENT_REDRAW, x+180, y-105, 160, 20, GLOBALS['BATCH_SCENE'].val, 'Export each scene as an FBX file', do_batch_type_sce) + + # Own dir requires OS module + if os: + GLOBALS['BATCH_OWN_DIR'] = Draw.Toggle('Own Dir', EVENT_NONE, x+20, y-125, 80, 20, GLOBALS['BATCH_OWN_DIR'].val, 'Create a dir for each exported file') + GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+100, y-125, 240, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ') + else: + GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+20, y-125, 320, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ') + + + Draw.EndAlign() + + #y+=80 + + ''' + Draw.BeginAlign() + GLOBALS['FILENAME'] = Draw.String('path: ', EVENT_NONE, x+20, y-170, 300, 20, GLOBALS['FILENAME'].val, 64, 'Prefix each file with this name ') + Draw.PushButton('..', EVENT_FILESEL, x+320, y-170, 20, 20, 'Select the path', do_redraw) + ''' + # Until batch is added + # + + + #Draw.BeginAlign() + Draw.PushButton('Online Help', EVENT_REDRAW, x+20, y-160, 100, 20, 'Open online help in a browser window', do_help) + Draw.PushButton('Cancel', EVENT_EXIT, x+130, y-160, 100, 20, 'Exit the exporter', fbx_ui_exit) + Draw.PushButton('Export', EVENT_FILESEL, x+240, y-160, 100, 20, 'Export the fbx file', do_redraw) + + #Draw.PushButton('Export', EVENT_EXIT, x+180, y-160, 160, 20, 'Export the fbx file', fbx_ui_write) + #Draw.EndAlign() + + # exit when mouse out of the view? + # GLOBALS['EVENT'] = EVENT_EXIT + +#def write_ui(filename): +def write_ui(): + + # globals + GLOBALS['EVENT'] = EVENT_REDRAW + #GLOBALS['MOUSE'] = Window.GetMouseCoords() + GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] + GLOBALS['FILENAME'] = '' + ''' + # IF called from the fileselector + if filename == None: + GLOBALS['FILENAME'] = filename # Draw.Create(Blender.sys.makename(ext='.fbx')) + else: + GLOBALS['FILENAME'].val = filename + ''' + GLOBALS['EXP_OBS_SELECTED'] = Draw.Create(1) # dont need 2 variables but just do this for clarity + GLOBALS['EXP_OBS_SCENE'] = Draw.Create(0) + + GLOBALS['EXP_MESH'] = Draw.Create(1) + GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Create(1) + GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Create(0) + GLOBALS['EXP_ARMATURE'] = Draw.Create(1) + GLOBALS['EXP_LAMP'] = Draw.Create(1) + GLOBALS['EXP_CAMERA'] = Draw.Create(1) + GLOBALS['EXP_EMPTY'] = Draw.Create(1) + GLOBALS['EXP_IMAGE_COPY'] = Draw.Create(0) + # animation opts + GLOBALS['ANIM_ENABLE'] = Draw.Create(1) + GLOBALS['ANIM_OPTIMIZE'] = Draw.Create(1) + GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Create(4) # decimal places + GLOBALS['ANIM_ACTION_ALL'] = [Draw.Create(0), Draw.Create(1)] # not just the current action + + # batch export options + GLOBALS['BATCH_ENABLE'] = Draw.Create(0) + GLOBALS['BATCH_GROUP'] = Draw.Create(1) # cant have both of these enabled at once. + GLOBALS['BATCH_SCENE'] = Draw.Create(0) # see above + GLOBALS['BATCH_FILE_PREFIX'] = Draw.Create(Blender.sys.makename(ext='_').split('\\')[-1].split('/')[-1]) + GLOBALS['BATCH_OWN_DIR'] = Draw.Create(0) + # done setting globals + + # Used by the user interface + GLOBALS['_SCALE'] = Draw.Create(1.0) + GLOBALS['_XROT90'] = Draw.Create(True) + GLOBALS['_YROT90'] = Draw.Create(False) + GLOBALS['_ZROT90'] = Draw.Create(False) + + # best not do move the cursor + # Window.SetMouseCoords(*[i/2 for i in Window.GetScreenSize()]) + + # hack so the toggle buttons redraw. this is not nice at all + while GLOBALS['EVENT'] != EVENT_EXIT: + + if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val and GLOBALS['ANIM_ACTION_ALL'][1].val: + #Draw.PupMenu("Warning%t|Cant batch export groups with 'Current Action' ") + GLOBALS['ANIM_ACTION_ALL'][0].val = 1 + GLOBALS['ANIM_ACTION_ALL'][1].val = 0 + + if GLOBALS['EVENT'] == EVENT_FILESEL: + if GLOBALS['BATCH_ENABLE'].val: + txt = 'Batch FBX Dir' + name = Blender.sys.expandpath('//') + else: + txt = 'Export FBX' + name = Blender.sys.makename(ext='.fbx') + + Blender.Window.FileSelector(fbx_ui_write, txt, name) + #fbx_ui_write('/test.fbx') + break + + Draw.UIBlock(fbx_ui, 0) + + + # GLOBALS.clear() + +class EXPORT_OT_fbx(bpy.types.Operator): + ''' + Operator documentation text, will be used for the operator tooltip and python docs. + ''' + __idname__ = "export.fbx" + __label__ = "Export FBX" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the FBX file", maxlen= 1024, default= ""), + + bpy.props.BoolProperty(attr="EXP_OBS_SELECTED", name="Selected Objects", description="Export selected objects on visible layers", default=True), +# bpy.props.BoolProperty(attr="EXP_OBS_SCENE", name="Scene Objects", description="Export all objects in this scene", default=True), + bpy.props.FloatProperty(attr="_SCALE", name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0), + bpy.props.BoolProperty(attr="_XROT90", name="Rot X90", description="Rotate all objects 90 degrese about the X axis", default=True), + bpy.props.BoolProperty(attr="_YROT90", name="Rot Y90", description="Rotate all objects 90 degrese about the Y axis", default=False), + bpy.props.BoolProperty(attr="_ZROT90", name="Rot Z90", description="Rotate all objects 90 degrese about the Z axis", default=False), + bpy.props.BoolProperty(attr="EXP_EMPTY", name="Empties", description="Export empty objects", default=True), + bpy.props.BoolProperty(attr="EXP_CAMERA", name="Cameras", description="Export camera objects", default=True), + bpy.props.BoolProperty(attr="EXP_LAMP", name="Lamps", description="Export lamp objects", default=True), + bpy.props.BoolProperty(attr="EXP_ARMATURE", name="Armatures", description="Export armature objects", default=True), + bpy.props.BoolProperty(attr="EXP_MESH", name="Meshes", description="Export mesh objects", default=True), + bpy.props.BoolProperty(attr="EXP_MESH_APPLY_MOD", name="Modifiers", description="Apply modifiers to mesh objects", default=True), + bpy.props.BoolProperty(attr="EXP_MESH_HQ_NORMALS", name="HQ Normals", description="Generate high quality normals", default=True), + bpy.props.BoolProperty(attr="EXP_IMAGE_COPY", name="Copy Image Files", description="Copy image files to the destination path", default=False), + # armature animation + bpy.props.BoolProperty(attr="ANIM_ENABLE", name="Enable Animation", description="Export keyframe animation", default=True), + bpy.props.BoolProperty(attr="ANIM_OPTIMIZE", name="Optimize Keyframes", description="Remove double keyframes", default=True), + bpy.props.FloatProperty(attr="ANIM_OPTIMIZE_PRECISSION", name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0), +# bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True), + bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="All Actions", description="Use all actions for armatures, if false, use current action", default=False), + # batch + bpy.props.BoolProperty(attr="BATCH_ENABLE", name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False), + bpy.props.BoolProperty(attr="BATCH_GROUP", name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False), + bpy.props.BoolProperty(attr="BATCH_OWN_DIR", name="Own Dir", description="Create a dir for each exported file", default=True), + bpy.props.StringProperty(attr="BATCH_FILE_PREFIX", name="Prefix", description="Prefix each file with this name", maxlen= 1024, default=""), + ] + + def poll(self, context): + print("Poll") + return context.active_object != None + + def execute(self, context): + if not self.path: + raise Exception("path not set") + + GLOBAL_MATRIX = mtx4_identity + GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = self._SCALE + if self._XROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n + if self._YROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n + if self._ZROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n + + write(self.path, + None, # XXX + context, + self.EXP_OBS_SELECTED, + self.EXP_MESH, + self.EXP_MESH_APPLY_MOD, +# self.EXP_MESH_HQ_NORMALS, + self.EXP_ARMATURE, + self.EXP_LAMP, + self.EXP_CAMERA, + self.EXP_EMPTY, + self.EXP_IMAGE_COPY, + GLOBAL_MATRIX, + self.ANIM_ENABLE, + self.ANIM_OPTIMIZE, + self.ANIM_OPTIMIZE_PRECISSION, + self.ANIM_ACTION_ALL, + self.BATCH_ENABLE, + self.BATCH_GROUP, + self.BATCH_FILE_PREFIX, + self.BATCH_OWN_DIR) + + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + + +bpy.ops.add(EXPORT_OT_fbx) + +# if __name__ == "__main__": +# bpy.ops.EXPORT_OT_ply(filename="/tmp/test.ply") + + +# NOTES (all line numbers correspond to original export_fbx.py (under release/scripts) +# - Draw.PupMenu alternative in 2.5?, temporarily replaced PupMenu with print +# - get rid of cleanName somehow +# + fixed: isinstance(inst, bpy.types.*) doesn't work on RNA objects: line 565 +# + get rid of BPyObject_getObjectArmature, move it in RNA? +# - BATCH_ENABLE and BATCH_GROUP options: line 327 +# - implement all BPyMesh_* used here with RNA +# - getDerivedObjects is not fully replicated with .dupli* funcs +# - talk to Campbell, this code won't work? lines 1867-1875 +# - don't know what those colbits are, do we need them? they're said to be deprecated in DNA_object_types.h: 1886-1893 +# - no hq normals: 1900-1901 + +# TODO + +# - bpy.data.remove_scene: line 366 +# - bpy.sys.time move to bpy.sys.util? +# - new scene creation, activation: lines 327-342, 368 +# - uses bpy.sys.expandpath, *.relpath - replace at least relpath + +# SMALL or COSMETICAL +# - find a way to get blender version, and put it in bpy.util?, old was Blender.Get('version') diff --git a/release/scripts/io/export_obj.py b/release/scripts/io/export_obj.py new file mode 100644 index 00000000000..83b400816e3 --- /dev/null +++ b/release/scripts/io/export_obj.py @@ -0,0 +1,996 @@ +#!BPY + +""" +Name: 'Wavefront (.obj)...' +Blender: 248 +Group: 'Export' +Tooltip: 'Save a Wavefront OBJ File' +""" + +__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone" +__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org'] +__version__ = "1.21" + +__bpydoc__ = """\ +This script is an exporter to OBJ file format. + +Usage: + +Select the objects you wish to export and run this script from "File->Export" menu. +Selecting the default options from the popup box will be good in most cases. +All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d) +will be exported as mesh data. +""" + + +# -------------------------------------------------------------------------- +# OBJ Export v1.1 by Campbell Barton (AKA Ideasman) +# -------------------------------------------------------------------------- +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# -------------------------------------------------------------------------- + +# import math and other in functions that use them for the sake of fast Blender startup +# import math +import os +import time + +import bpy +import Mathutils + + +# Returns a tuple - path,extension. +# 'hello.obj' > ('hello', '.obj') +def splitExt(path): + dotidx = path.rfind('.') + if dotidx == -1: + return path, '' + else: + return path[:dotidx], path[dotidx:] + +def fixName(name): + if name == None: + return 'None' + else: + return name.replace(' ', '_') + + +# this used to be in BPySys module +# frankly, I don't understand how it works +def BPySys_cleanName(name): + + v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,58,59,60,61,62,63,64,91,92,93,94,96,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254] + + invalid = ''.join([chr(i) for i in v]) + + for ch in invalid: + name = name.replace(ch, '_') + return name + +# A Dict of Materials +# (material.name, image.name):matname_imagename # matname_imagename has gaps removed. +MTL_DICT = {} + +def write_mtl(scene, filename, copy_images): + + world = scene.world + worldAmb = world.ambient_color + + dest_dir = os.path.dirname(filename) + + def copy_image(image): + rel = image.get_export_path(dest_dir, True) + + if copy_images: + abspath = image.get_export_path(dest_dir, False) + if not os.path.exists(abs_path): + shutil.copy(image.get_abs_filename(), abs_path) + + return rel + + + file = open(filename, "w") + # XXX +# file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1]) + file.write('# Material Count: %i\n' % len(MTL_DICT)) + # Write material/image combinations we have used. + for key, (mtl_mat_name, mat, img) in MTL_DICT.items(): + + # Get the Blender data for the material and the image. + # Having an image named None will make a bug, dont do it :) + + file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname + + if mat: + file.write('Ns %.6f\n' % ((mat.specular_hardness-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's + file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.ambient for c in worldAmb]) ) # Ambient, uses mirror colour, + file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.diffuse_intensity for c in mat.diffuse_color]) ) # Diffuse + file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.specular_intensity for c in mat.specular_color]) ) # Specular + if hasattr(mat, "ior"): + file.write('Ni %.6f\n' % mat.ior) # Refraction index + else: + file.write('Ni %.6f\n' % 1.0) + file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve) + + # 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting. + if mat.shadeless: + file.write('illum 0\n') # ignore lighting + elif mat.specular_intensity == 0: + file.write('illum 1\n') # no specular. + else: + file.write('illum 2\n') # light normaly + + else: + #write a dummy material here? + file.write('Ns 0\n') + file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour, + file.write('Kd 0.8 0.8 0.8\n') + file.write('Ks 0.8 0.8 0.8\n') + file.write('d 1\n') # No alpha + file.write('illum 2\n') # light normaly + + # Write images! + if img: # We have an image on the face! + # write relative image path + rel = copy_image(img) + file.write('map_Kd %s\n' % rel) # Diffuse mapping image +# file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image + + elif mat: # No face image. if we havea material search for MTex image. + for mtex in mat.textures: + if mtex and mtex.texture.type == 'IMAGE': + try: + filename = copy_image(mtex.texture.image) +# filename = mtex.texture.image.filename.split('\\')[-1].split('/')[-1] + file.write('map_Kd %s\n' % filename) # Diffuse mapping image + break + except: + # Texture has no image though its an image type, best ignore. + pass + + file.write('\n\n') + + file.close() + +# XXX not used +def copy_file(source, dest): + file = open(source, 'rb') + data = file.read() + file.close() + + file = open(dest, 'wb') + file.write(data) + file.close() + + +# XXX not used +def copy_images(dest_dir): + if dest_dir[-1] != os.sep: + dest_dir += os.sep +# if dest_dir[-1] != sys.sep: +# dest_dir += sys.sep + + # Get unique image names + uniqueImages = {} + for matname, mat, image in MTL_DICT.values(): # Only use image name + # Get Texface images + if image: + uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default. + + # Get MTex images + if mat: + for mtex in mat.textures: + if mtex and mtex.texture.type == 'IMAGE': + image_tex = mtex.texture.image + if image_tex: + try: + uniqueImages[image_tex] = image_tex + except: + pass + + # Now copy images + copyCount = 0 + +# for bImage in uniqueImages.values(): +# image_path = bpy.sys.expandpath(bImage.filename) +# if bpy.sys.exists(image_path): +# # Make a name for the target path. +# dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1] +# if not bpy.sys.exists(dest_image_path): # Image isnt alredy there +# print('\tCopying "%s" > "%s"' % (image_path, dest_image_path)) +# copy_file(image_path, dest_image_path) +# copyCount+=1 + +# paths= bpy.util.copy_images(uniqueImages.values(), dest_dir) + + print('\tCopied %d images' % copyCount) +# print('\tCopied %d images' % copyCount) + +# XXX not converted +def test_nurbs_compat(ob): + if ob.type != 'Curve': + return False + + for nu in ob.data: + if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier + return True + + return False + + +# XXX not converted +def write_nurb(file, ob, ob_mat): + tot_verts = 0 + cu = ob.data + + # use negative indices + Vector = Blender.Mathutils.Vector + for nu in cu: + + if nu.type==0: DEG_ORDER_U = 1 + else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct + + if nu.type==1: + print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported") + continue + + if nu.knotsV: + print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported") + continue + + if len(nu) <= DEG_ORDER_U: + print("\tWarning, orderU is lower then vert count, skipping:", ob.name) + continue + + pt_num = 0 + do_closed = (nu.flagU & 1) + do_endpoints = (do_closed==0) and (nu.flagU & 2) + + for pt in nu: + pt = Vector(pt[0], pt[1], pt[2]) * ob_mat + file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2])) + pt_num += 1 + tot_verts += pt_num + + file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too + file.write('cstype bspline\n') # not ideal, hard coded + file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still + + curve_ls = [-(i+1) for i in range(pt_num)] + + # 'curv' keyword + if do_closed: + if DEG_ORDER_U == 1: + pt_num += 1 + curve_ls.append(-1) + else: + pt_num += DEG_ORDER_U + curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U] + + file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve + + # 'parm' keyword + tot_parm = (DEG_ORDER_U + 1) + pt_num + tot_parm_div = float(tot_parm-1) + parm_ls = [(i/tot_parm_div) for i in range(tot_parm)] + + if do_endpoints: # end points, force param + for i in range(DEG_ORDER_U+1): + parm_ls[i] = 0.0 + parm_ls[-(1+i)] = 1.0 + + file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] )) + + file.write('end\n') + + return tot_verts + +def write(filename, objects, scene, + EXPORT_TRI=False, + EXPORT_EDGES=False, + EXPORT_NORMALS=False, + EXPORT_NORMALS_HQ=False, + EXPORT_UV=True, + EXPORT_MTL=True, + EXPORT_COPY_IMAGES=False, + EXPORT_APPLY_MODIFIERS=True, + EXPORT_ROTX90=True, + EXPORT_BLEN_OBS=True, + EXPORT_GROUP_BY_OB=False, + EXPORT_GROUP_BY_MAT=False, + EXPORT_KEEP_VERT_ORDER=False, + EXPORT_POLYGROUPS=False, + EXPORT_CURVE_AS_NURBS=True): + ''' + Basic write function. The context and options must be alredy set + This can be accessed externaly + eg. + write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options. + ''' + + # XXX + import math + + def veckey3d(v): + return round(v.x, 6), round(v.y, 6), round(v.z, 6) + + def veckey2d(v): + return round(v[0], 6), round(v[1], 6) + # return round(v.x, 6), round(v.y, 6) + + def findVertexGroupName(face, vWeightMap): + """ + Searches the vertexDict to see what groups is assigned to a given face. + We use a frequency system in order to sort out the name because a given vetex can + belong to two or more groups at the same time. To find the right name for the face + we list all the possible vertex group names with their frequency and then sort by + frequency in descend order. The top element is the one shared by the highest number + of vertices is the face's group + """ + weightDict = {} + for vert_index in face.verts: +# for vert in face: + vWeights = vWeightMap[vert_index] +# vWeights = vWeightMap[vert] + for vGroupName, weight in vWeights: + weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight + + if weightDict: + alist = [(weight,vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight + alist.sort() + return(alist[-1][1]) # highest value last + else: + return '(null)' + + # TODO: implement this in C? dunno how it should be called... + def getVertsFromGroup(me, group_index): + ret = [] + + for i, v in enumerate(me.verts): + for g in v.groups: + if g.group == group_index: + ret.append((i, g.weight)) + + return ret + + + print('OBJ Export path: "%s"' % filename) + temp_mesh_name = '~tmp-mesh' + + time1 = time.clock() +# time1 = sys.time() +# scn = Scene.GetCurrent() + + file = open(filename, "w") + + # Write Header + version = "2.5" + file.write('# Blender3D v%s OBJ File: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] )) + file.write('# www.blender3d.org\n') + + # Tell the obj file what material file to use. + if EXPORT_MTL: + mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1]) + file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] )) + + if EXPORT_ROTX90: + mat_xrot90= Mathutils.RotationMatrix(-math.pi/2, 4, 'x') + + # Initialize totals, these are updated each object + totverts = totuvco = totno = 1 + + face_vert_index = 1 + + globalNormals = {} + + # Get all meshes + for ob_main in objects: + + # ignore dupli children + if ob_main.parent and ob_main.parent.dupli_type != 'NONE': + # XXX + print(ob_main.name, 'is a dupli child - ignoring') + continue + + obs = [] + if ob_main.dupli_type != 'NONE': + # XXX + print('creating dupli_list on', ob_main.name) + ob_main.create_dupli_list() + + obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list] + + # XXX debug print + print(ob_main.name, 'has', len(obs), 'dupli children') + else: + obs = [(ob_main, ob_main.matrix)] + + for ob, ob_mat in obs: + + # XXX postponed +# # Nurbs curve support +# if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob): +# if EXPORT_ROTX90: +# ob_mat = ob_mat * mat_xrot90 + +# totverts += write_nurb(file, ob, ob_mat) + +# continue +# end nurbs + + if ob.type != 'MESH': + continue + + me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW') + + if EXPORT_ROTX90: + me.transform(ob_mat * mat_xrot90) + else: + me.transform(ob_mat) + +# # Will work for non meshes now! :) +# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn) +# if not me: +# continue + + if EXPORT_UV: + faceuv = len(me.uv_textures) > 0 + else: + faceuv = False + + # XXX - todo, find a better way to do triangulation + # ...removed convert_to_triface because it relies on editmesh + ''' + # We have a valid mesh + if EXPORT_TRI and me.faces: + # Add a dummy object to it. + has_quads = False + for f in me.faces: + if f.verts[3] != 0: + has_quads = True + break + + if has_quads: + newob = bpy.data.add_object('MESH', 'temp_object') + newob.data = me + # if we forget to set Object.data - crash + scene.add_object(newob) + newob.convert_to_triface(scene) + # mesh will still be there + scene.remove_object(newob) + ''' + + # Make our own list so it can be sorted to reduce context switching + face_index_pairs = [ (face, index) for index, face in enumerate(me.faces)] + # faces = [ f for f in me.faces ] + + if EXPORT_EDGES: + edges = me.edges + else: + edges = [] + + if not (len(face_index_pairs)+len(edges)+len(me.verts)): # Make sure there is somthing to write + + # clean up + bpy.data.remove_mesh(me) + + continue # dont bother with this mesh. + + # XXX + # High Quality Normals + if EXPORT_NORMALS and face_index_pairs: + me.calc_normals() +# if EXPORT_NORMALS_HQ: +# BPyMesh.meshCalcNormals(me) +# else: +# # transforming normals is incorrect +# # when the matrix is scaled, +# # better to recalculate them +# me.calcNormals() + + materials = me.materials + + materialNames = [] + materialItems = [m for m in materials] + if materials: + for mat in materials: + if mat: # !=None + materialNames.append(mat.name) + else: + materialNames.append(None) + # Cant use LC because some materials are None. + # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken. + + # Possible there null materials, will mess up indicies + # but at least it will export, wait until Blender gets fixed. + materialNames.extend((16-len(materialNames)) * [None]) + materialItems.extend((16-len(materialItems)) * [None]) + + # Sort by Material, then images + # so we dont over context switch in the obj file. + if EXPORT_KEEP_VERT_ORDER: + pass + elif faceuv: + # XXX update + tface = me.active_uv_texture.data + + # exception only raised if Python 2.3 or lower... + try: + face_index_pairs.sort(key = lambda a: (a[0].material_index, tface[a[1]].image, a[0].smooth)) + except: + face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, tface[a[1]].image, a[0].smooth), + (b[0].material_index, tface[b[1]].image, b[0].smooth))) + elif len(materials) > 1: + try: + face_index_pairs.sort(key = lambda a: (a[0].material_index, a[0].smooth)) + except: + face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, a[0].smooth), + (b[0].material_index, b[0].smooth))) + else: + # no materials + try: + face_index_pairs.sort(key = lambda a: a[0].smooth) + except: + face_index_pairs.sort(lambda a,b: cmp(a[0].smooth, b[0].smooth)) +# if EXPORT_KEEP_VERT_ORDER: +# pass +# elif faceuv: +# try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth)) +# except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth))) +# elif len(materials) > 1: +# try: faces.sort(key = lambda a: (a.mat, a.smooth)) +# except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth))) +# else: +# # no materials +# try: faces.sort(key = lambda a: a.smooth) +# except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth)) + + faces = [pair[0] for pair in face_index_pairs] + + # Set the default mat to no material and no image. + contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get. + contextSmooth = None # Will either be true or false, set bad to force initialization switch. + + if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB: + name1 = ob.name + name2 = ob.data.name + if name1 == name2: + obnamestring = fixName(name1) + else: + obnamestring = '%s_%s' % (fixName(name1), fixName(name2)) + + if EXPORT_BLEN_OBS: + file.write('o %s\n' % obnamestring) # Write Object name + else: # if EXPORT_GROUP_BY_OB: + file.write('g %s\n' % obnamestring) + + + # Vert + for v in me.verts: + file.write('v %.6f %.6f %.6f\n' % tuple(v.co)) + + # UV + if faceuv: + uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/ + + uv_dict = {} # could use a set() here + uv_layer = me.active_uv_texture + for f, f_index in face_index_pairs: + + tface = uv_layer.data[f_index] + + uvs = tface.uv + # uvs = [tface.uv1, tface.uv2, tface.uv3] + + # # add another UV if it's a quad + # if len(f.verts) == 4: + # uvs.append(tface.uv4) + + for uv_index, uv in enumerate(uvs): + uvkey = veckey2d(uv) + try: + uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] + except: + uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) + file.write('vt %.6f %.6f\n' % tuple(uv)) + +# uv_dict = {} # could use a set() here +# for f_index, f in enumerate(faces): + +# for uv_index, uv in enumerate(f.uv): +# uvkey = veckey2d(uv) +# try: +# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] +# except: +# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict) +# file.write('vt %.6f %.6f\n' % tuple(uv)) + + uv_unique_count = len(uv_dict) +# del uv, uvkey, uv_dict, f_index, uv_index + # Only need uv_unique_count and uv_face_mapping + + # NORMAL, Smooth/Non smoothed. + if EXPORT_NORMALS: + for f in faces: + if f.smooth: + for v in f: + noKey = veckey3d(v.normal) + if noKey not in globalNormals: + globalNormals[noKey] = totno + totno +=1 + file.write('vn %.6f %.6f %.6f\n' % noKey) + else: + # Hard, 1 normal from the face. + noKey = veckey3d(f.normal) + if noKey not in globalNormals: + globalNormals[noKey] = totno + totno +=1 + file.write('vn %.6f %.6f %.6f\n' % noKey) + + if not faceuv: + f_image = None + + # XXX + if EXPORT_POLYGROUPS: + # Retrieve the list of vertex groups +# vertGroupNames = me.getVertGroupNames() + + currentVGroup = '' + # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to + vgroupsMap = [[] for _i in range(len(me.verts))] +# vgroupsMap = [[] for _i in xrange(len(me.verts))] + for g in ob.vertex_groups: +# for vertexGroupName in vertGroupNames: + for vIdx, vWeight in getVertsFromGroup(me, g.index): +# for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1): + vgroupsMap[vIdx].append((g.name, vWeight)) + + for f_index, f in enumerate(faces): + f_v = [{"index": index, "vertex": me.verts[index]} for index in f.verts] + + # if f.verts[3] == 0: + # f_v.pop() + +# f_v= f.v + f_smooth= f.smooth + f_mat = min(f.material_index, len(materialNames)-1) +# f_mat = min(f.mat, len(materialNames)-1) + if faceuv: + + tface = me.active_uv_texture.data[face_index_pairs[f_index][1]] + + f_image = tface.image + f_uv = tface.uv + # f_uv= [tface.uv1, tface.uv2, tface.uv3] + # if len(f.verts) == 4: + # f_uv.append(tface.uv4) +# f_image = f.image +# f_uv= f.uv + + # MAKE KEY + if faceuv and f_image: # Object is always true. + key = materialNames[f_mat], f_image.name + else: + key = materialNames[f_mat], None # No image, use None instead. + + # Write the vertex group + if EXPORT_POLYGROUPS: + if len(ob.vertex_groups): + # find what vertext group the face belongs to + theVGroup = findVertexGroupName(f,vgroupsMap) + if theVGroup != currentVGroup: + currentVGroup = theVGroup + file.write('g %s\n' % theVGroup) +# # Write the vertex group +# if EXPORT_POLYGROUPS: +# if vertGroupNames: +# # find what vertext group the face belongs to +# theVGroup = findVertexGroupName(f,vgroupsMap) +# if theVGroup != currentVGroup: +# currentVGroup = theVGroup +# file.write('g %s\n' % theVGroup) + + # CHECK FOR CONTEXT SWITCH + if key == contextMat: + pass # Context alredy switched, dont do anything + else: + if key[0] == None and key[1] == None: + # Write a null material, since we know the context has changed. + if EXPORT_GROUP_BY_MAT: + # can be mat_image or (null) + file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.data.name)) ) # can be mat_image or (null) + file.write('usemtl (null)\n') # mat, image + + else: + mat_data= MTL_DICT.get(key) + if not mat_data: + # First add to global dict so we can export to mtl + # Then write mtl + + # Make a new names from the mat and image name, + # converting any spaces to underscores with fixName. + + # If none image dont bother adding it to the name + if key[1] == None: + mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image + else: + mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image + + if EXPORT_GROUP_BY_MAT: + file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.data.name), mat_data[0]) ) # can be mat_image or (null) + + file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null) + + contextMat = key + if f_smooth != contextSmooth: + if f_smooth: # on now off + file.write('s 1\n') + contextSmooth = f_smooth + else: # was off now on + file.write('s off\n') + contextSmooth = f_smooth + + file.write('f') + if faceuv: + if EXPORT_NORMALS: + if f_smooth: # Smoothed, use vertex normals + for vi, v in enumerate(f_v): + file.write( ' %d/%d/%d' % \ + (v["index"] + totverts, + totuvco + uv_face_mapping[f_index][vi], + globalNormals[ veckey3d(v["vertex"].normal) ]) ) # vert, uv, normal + + else: # No smoothing, face normals + no = globalNormals[ veckey3d(f.normal) ] + for vi, v in enumerate(f_v): + file.write( ' %d/%d/%d' % \ + (v["index"] + totverts, + totuvco + uv_face_mapping[f_index][vi], + no) ) # vert, uv, normal + else: # No Normals + for vi, v in enumerate(f_v): + file.write( ' %d/%d' % (\ + v["index"] + totverts,\ + totuvco + uv_face_mapping[f_index][vi])) # vert, uv + + face_vert_index += len(f_v) + + else: # No UV's + if EXPORT_NORMALS: + if f_smooth: # Smoothed, use vertex normals + for v in f_v: + file.write( ' %d//%d' % + (v["index"] + totverts, globalNormals[ veckey3d(v["vertex"].normal) ]) ) + else: # No smoothing, face normals + no = globalNormals[ veckey3d(f.normal) ] + for v in f_v: + file.write( ' %d//%d' % (v["index"] + totverts, no) ) + else: # No Normals + for v in f_v: + file.write( ' %d' % (v["index"] + totverts) ) + + file.write('\n') + + # Write edges. + if EXPORT_EDGES: + for ed in edges: + if ed.loose: + file.write('f %d %d\n' % (ed.verts[0] + totverts, ed.verts[1] + totverts)) + + # Make the indicies global rather then per mesh + totverts += len(me.verts) + if faceuv: + totuvco += uv_unique_count + + # clean up + bpy.data.remove_mesh(me) + + if ob_main.dupli_type != 'NONE': + ob_main.free_dupli_list() + + file.close() + + + # Now we have all our materials, save them + if EXPORT_MTL: + write_mtl(scene, mtlfilename, EXPORT_COPY_IMAGES) +# if EXPORT_COPY_IMAGES: +# dest_dir = os.path.basename(filename) +# # dest_dir = filename +# # # Remove chars until we are just the path. +# # while dest_dir and dest_dir[-1] not in '\\/': +# # dest_dir = dest_dir[:-1] +# if dest_dir: +# copy_images(dest_dir) +# else: +# print('\tError: "%s" could not be used as a base for an image path.' % filename) + + print("OBJ Export time: %.2f" % (time.clock() - time1)) +# print "OBJ Export time: %.2f" % (sys.time() - time1) + +def do_export(filename, context, + EXPORT_APPLY_MODIFIERS = True, # not used + EXPORT_ROTX90 = True, # wrong + EXPORT_TRI = False, # ok + EXPORT_EDGES = False, + EXPORT_NORMALS = False, # not yet + EXPORT_NORMALS_HQ = False, # not yet + EXPORT_UV = True, # ok + EXPORT_MTL = True, + EXPORT_SEL_ONLY = True, # ok + EXPORT_ALL_SCENES = False, # XXX not working atm + EXPORT_ANIMATION = False, + EXPORT_COPY_IMAGES = False, + EXPORT_BLEN_OBS = True, + EXPORT_GROUP_BY_OB = False, + EXPORT_GROUP_BY_MAT = False, + EXPORT_KEEP_VERT_ORDER = False, + EXPORT_POLYGROUPS = False, + EXPORT_CURVE_AS_NURBS = True): + # Window.EditMode(0) + # Window.WaitCursor(1) + + base_name, ext = splitExt(filename) + context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension + + orig_scene = context.scene + +# if EXPORT_ALL_SCENES: +# export_scenes = bpy.data.scenes +# else: +# export_scenes = [orig_scene] + + # XXX only exporting one scene atm since changing + # current scene is not possible. + # Brecht says that ideally in 2.5 we won't need such a function, + # allowing multiple scenes open at once. + export_scenes = [orig_scene] + + # Export all scenes. + for scn in export_scenes: + # scn.makeCurrent() # If already current, this is not slow. + # context = scn.getRenderingContext() + orig_frame = scn.current_frame + + if EXPORT_ALL_SCENES: # Add scene name into the context_name + context_name[1] = '_%s' % BPySys_cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied. + + # Export an animation? + if EXPORT_ANIMATION: + scene_frames = range(scn.start_frame, context.end_frame+1) # Up to and including the end frame. + else: + scene_frames = [orig_frame] # Dont export an animation. + + # Loop through all frames in the scene and export. + for frame in scene_frames: + if EXPORT_ANIMATION: # Add frame to the filename. + context_name[2] = '_%.6d' % frame + + scn.current_frame = frame + if EXPORT_SEL_ONLY: + export_objects = context.selected_objects + else: + export_objects = scn.objects + + full_path= ''.join(context_name) + + # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad. + # EXPORT THE FILE. + write(full_path, export_objects, scn, + EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS, + EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL, + EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS, + EXPORT_ROTX90, EXPORT_BLEN_OBS, + EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER, + EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS) + + + scn.current_frame = orig_frame + + # Restore old active scene. +# orig_scene.makeCurrent() +# Window.WaitCursor(0) + + +class EXPORT_OT_obj(bpy.types.Operator): + ''' + Currently the exporter lacks these features: + * nurbs + * multiple scene export (only active scene is written) + * particles + ''' + __idname__ = "export.obj" + __label__ = 'Export OBJ' + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the OBJ file", maxlen= 1024, default= ""), + + # context group + bpy.props.BoolProperty(attr="use_selection", name="Selection Only", description="", default= False), + bpy.props.BoolProperty(attr="use_all_scenes", name="All Scenes", description="", default= False), + bpy.props.BoolProperty(attr="use_animation", name="All Animation", description="", default= False), + + # object group + bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="", default= True), + bpy.props.BoolProperty(attr="use_rotate90", name="Rotate X90", description="", default= True), + + # extra data group + bpy.props.BoolProperty(attr="use_edges", name="Edges", description="", default= True), + bpy.props.BoolProperty(attr="use_normals", name="Normals", description="", default= False), + bpy.props.BoolProperty(attr="use_hq_normals", name="High Quality Normals", description="", default= True), + bpy.props.BoolProperty(attr="use_uvs", name="UVs", description="", default= True), + bpy.props.BoolProperty(attr="use_materials", name="Materials", description="", default= True), + bpy.props.BoolProperty(attr="copy_images", name="Copy Images", description="", default= False), + bpy.props.BoolProperty(attr="use_triangles", name="Triangulate", description="", default= False), + bpy.props.BoolProperty(attr="use_vertex_groups", name="Polygroups", description="", default= False), + bpy.props.BoolProperty(attr="use_nurbs", name="Nurbs", description="", default= False), + + # grouping group + bpy.props.BoolProperty(attr="use_blen_objects", name="Objects as OBJ Objects", description="", default= True), + bpy.props.BoolProperty(attr="group_by_object", name="Objects as OBJ Groups ", description="", default= False), + bpy.props.BoolProperty(attr="group_by_material", name="Material Groups", description="", default= False), + bpy.props.BoolProperty(attr="keep_vertex_order", name="Keep Vertex Order", description="", default= False) + ] + + def execute(self, context): + + do_export(self.path, context, + EXPORT_TRI=self.use_triangles, + EXPORT_EDGES=self.use_edges, + EXPORT_NORMALS=self.use_normals, + EXPORT_NORMALS_HQ=self.use_hq_normals, + EXPORT_UV=self.use_uvs, + EXPORT_MTL=self.use_materials, + EXPORT_COPY_IMAGES=self.copy_images, + EXPORT_APPLY_MODIFIERS=self.use_modifiers, + EXPORT_ROTX90=self.use_rotate90, + EXPORT_BLEN_OBS=self.use_blen_objects, + EXPORT_GROUP_BY_OB=self.group_by_object, + EXPORT_GROUP_BY_MAT=self.group_by_material, + EXPORT_KEEP_VERT_ORDER=self.keep_vertex_order, + EXPORT_POLYGROUPS=self.use_vertex_groups, + EXPORT_CURVE_AS_NURBS=self.use_nurbs, + EXPORT_SEL_ONLY=self.use_selection, + EXPORT_ALL_SCENES=self.use_all_scenes) + + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + + def poll(self, context): # Poll isnt working yet + print("Poll") + return context.active_object != None + +bpy.ops.add(EXPORT_OT_obj) + +if __name__ == "__main__": + bpy.ops.EXPORT_OT_obj(filename="/tmp/test.obj") + +# CONVERSION ISSUES +# - matrix problem +# - duplis - only tested dupliverts +# - NURBS - needs API additions +# - all scenes export +# + normals calculation +# - get rid of cleanName somehow diff --git a/release/scripts/io/export_ply.py b/release/scripts/io/export_ply.py new file mode 100644 index 00000000000..8e79c3741bb --- /dev/null +++ b/release/scripts/io/export_ply.py @@ -0,0 +1,279 @@ +import bpy + +__author__ = "Bruce Merry" +__version__ = "0.93" +__bpydoc__ = """\ +This script exports Stanford PLY files from Blender. It supports normals, +colours, and texture coordinates per face or per vertex. +Only one mesh can be exported at a time. +""" + +# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# Vector rounding se we can use as keys +# +# Updated on Aug 11, 2008 by Campbell Barton +# - added 'comment' prefix to comments - Needed to comply with the PLY spec. +# +# Updated on Jan 1, 2007 by Gabe Ghearing +# - fixed normals so they are correctly smooth/flat +# - fixed crash when the model doesn't have uv coords or vertex colors +# - fixed crash when the model has vertex colors but doesn't have uv coords +# - changed float32 to float and uint8 to uchar for compatibility +# Errata/Notes as of Jan 1, 2007 +# - script exports texture coords if they exist even if TexFace isn't selected (not a big deal to me) +# - ST(R) should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either) +# +# Updated on Jan 3, 2007 by Gabe Ghearing +# - fixed "sticky" vertex UV exporting +# - added pupmenu to enable/disable exporting normals, uv coords, and colors +# Errata/Notes as of Jan 3, 2007 +# - ST(R) coords should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either) +# - edges should be exported since PLY files support them +# - code is getting spaghettish, it should be refactored... +# + + +def rvec3d(v): return round(v[0], 6), round(v[1], 6), round(v[2], 6) +def rvec2d(v): return round(v[0], 6), round(v[1], 6) + +def write(filename, scene, ob, \ + EXPORT_APPLY_MODIFIERS= True,\ + EXPORT_NORMALS= True,\ + EXPORT_UV= True,\ + EXPORT_COLORS= True\ + ): + + if not filename.lower().endswith('.ply'): + filename += '.ply' + + if not ob: + raise Exception("Error, Select 1 active object") + return + + file = open(filename, 'w') + + + #EXPORT_EDGES = Draw.Create(0) + """ + is_editmode = Blender.Window.EditMode() + if is_editmode: + Blender.Window.EditMode(0, '', 0) + + Window.WaitCursor(1) + """ + + #mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False, scn) # XXX + if EXPORT_APPLY_MODIFIERS: + mesh = ob.create_mesh(True, 'PREVIEW') + else: + mesh = ob.data + + if not mesh: + raise ("Error, could not get mesh data from active object") + return + + # mesh.transform(ob.matrixWorld) # XXX + + faceUV = len(mesh.uv_textures) > 0 + vertexUV = len(mesh.sticky) > 0 + vertexColors = len(mesh.vertex_colors) > 0 + + if (not faceUV) and (not vertexUV): EXPORT_UV = False + if not vertexColors: EXPORT_COLORS = False + + if not EXPORT_UV: faceUV = vertexUV = False + if not EXPORT_COLORS: vertexColors = False + + if faceUV: + active_uv_layer = None + for lay in mesh.uv_textures: + if lay.active: + active_uv_layer= lay.data + break + if not active_uv_layer: + EXPORT_UV = False + faceUV = None + + if vertexColors: + active_col_layer = None + for lay in mesh.vertex_colors: + if lay.active: + active_col_layer= lay.data + if not active_col_layer: + EXPORT_COLORS = False + vertexColors = None + + # incase + color = uvcoord = uvcoord_key = normal = normal_key = None + + mesh_verts = mesh.verts # save a lookup + ply_verts = [] # list of dictionaries + # vdict = {} # (index, normal, uv) -> new index + vdict = [{} for i in range(len(mesh_verts))] + ply_faces = [[] for f in range(len(mesh.faces))] + vert_count = 0 + for i, f in enumerate(mesh.faces): + + + smooth = f.smooth + if not smooth: + normal = tuple(f.normal) + normal_key = rvec3d(normal) + + if faceUV: + uv = active_uv_layer[i] + uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/ + if vertexColors: + col = active_col_layer[i] + col = col.color1, col.color2, col.color3, col.color4 + + f_verts= f.verts + + pf= ply_faces[i] + for j, vidx in enumerate(f_verts): + v = mesh_verts[vidx] + + if smooth: + normal= tuple(v.normal) + normal_key = rvec3d(normal) + + if faceUV: + uvcoord= uv[j][0], 1.0-uv[j][1] + uvcoord_key = rvec2d(uvcoord) + elif vertexUV: + uvcoord= v.uvco[0], 1.0-v.uvco[1] + uvcoord_key = rvec2d(uvcoord) + + if vertexColors: + color= col[j] + color= int(color[0]*255.0), int(color[1]*255.0), int(color[2]*255.0) + + + key = normal_key, uvcoord_key, color + + vdict_local = vdict[vidx] + pf_vidx = vdict_local.get(key) # Will be None initially + + if pf_vidx == None: # same as vdict_local.has_key(key) + pf_vidx = vdict_local[key] = vert_count; + ply_verts.append((vidx, normal, uvcoord, color)) + vert_count += 1 + + pf.append(pf_vidx) + + file.write('ply\n') + file.write('format ascii 1.0\n') + version = "2.5" # Blender.Get('version') + file.write('comment Created by Blender3D %s - www.blender.org, source file: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] )) + + file.write('element vertex %d\n' % len(ply_verts)) + + file.write('property float x\n') + file.write('property float y\n') + file.write('property float z\n') + + # XXX + """ + if EXPORT_NORMALS: + file.write('property float nx\n') + file.write('property float ny\n') + file.write('property float nz\n') + """ + if EXPORT_UV: + file.write('property float s\n') + file.write('property float t\n') + if EXPORT_COLORS: + file.write('property uchar red\n') + file.write('property uchar green\n') + file.write('property uchar blue\n') + + file.write('element face %d\n' % len(mesh.faces)) + file.write('property list uchar uint vertex_indices\n') + file.write('end_header\n') + + for i, v in enumerate(ply_verts): + file.write('%.6f %.6f %.6f ' % tuple(mesh_verts[v[0]].co)) # co + """ + if EXPORT_NORMALS: + file.write('%.6f %.6f %.6f ' % v[1]) # no + """ + if EXPORT_UV: file.write('%.6f %.6f ' % v[2]) # uv + if EXPORT_COLORS: file.write('%u %u %u' % v[3]) # col + file.write('\n') + + for pf in ply_faces: + if len(pf)==3: file.write('3 %d %d %d\n' % tuple(pf)) + else: file.write('4 %d %d %d %d\n' % tuple(pf)) + + file.close() + print("writing", filename, "done") + + if EXPORT_APPLY_MODIFIERS: + bpy.data.remove_mesh(mesh) + + # XXX + """ + if is_editmode: + Blender.Window.EditMode(1, '', 0) + """ + +class EXPORT_OT_ply(bpy.types.Operator): + '''Export a single object as a stanford PLY with normals, colours and texture coordinates.''' + __idname__ = "export.ply" + __label__ = "Export PLY" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the PLY file", maxlen= 1024, default= ""), + bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default= True), + bpy.props.BoolProperty(attr="use_normals", name="Export Normals", description="Export Normals for smooth and hard shaded faces", default= True), + bpy.props.BoolProperty(attr="use_uvs", name="Export UVs", description="Exort the active UV layer", default= True), + bpy.props.BoolProperty(attr="use_colors", name="Export Vertex Colors", description="Exort the active vertex color layer", default= True) + ] + + def poll(self, context): + return context.active_object != None + + def execute(self, context): + # print("Selected: " + context.active_object.name) + + if not self.path: + raise Exception("filename not set") + + write(self.path, context.scene, context.active_object,\ + EXPORT_APPLY_MODIFIERS = self.use_modifiers, + EXPORT_NORMALS = self.use_normals, + EXPORT_UV = self.use_uvs, + EXPORT_COLORS = self.use_colors, + ) + + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + + +bpy.ops.add(EXPORT_OT_ply) + +if __name__ == "__main__": + bpy.ops.EXPORT_OT_ply(path="/tmp/test.ply") + + diff --git a/release/scripts/io/export_x3d.py b/release/scripts/io/export_x3d.py new file mode 100644 index 00000000000..db29afc7d6d --- /dev/null +++ b/release/scripts/io/export_x3d.py @@ -0,0 +1,1240 @@ +#!BPY +""" Registration info for Blender menus: +Name: 'X3D Extensible 3D (.x3d)...' +Blender: 245 +Group: 'Export' +Tooltip: 'Export selection to Extensible 3D file (.x3d)' +""" + +__author__ = ("Bart", "Campbell Barton") +__email__ = ["Bart, bart:neeneenee*de"] +__url__ = ["Author's (Bart) homepage, http://www.neeneenee.de/vrml"] +__version__ = "2006/01/17" +__bpydoc__ = """\ +This script exports to X3D format. + +Usage: + +Run this script from "File->Export" menu. A pop-up will ask whether you +want to export only selected or all relevant objects. + +Known issues:
+ Doesn't handle multiple materials (don't use material indices);
+ Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);
+ Can't get the texture array associated with material * not the UV ones; +""" + + +# $Id$ +# +#------------------------------------------------------------------------ +# X3D exporter for blender 2.36 or above +# +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# + +#################################### +# Library dependancies +#################################### + +import math +import os + +import bpy +import Mathutils + +from export_3ds import create_derived_objects, free_derived_objects + +# import Blender +# from Blender import Object, Lamp, Draw, Image, Text, sys, Mesh +# from Blender.Scene import Render +# import BPyObject +# import BPyMesh + +# +DEG2RAD=0.017453292519943295 +MATWORLD= Mathutils.RotationMatrix(-90, 4, 'x') + +#################################### +# Global Variables +#################################### + +filename = "" +# filename = Blender.Get('filename') +_safeOverwrite = True + +extension = '' + +########################################################## +# Functions for writing output file +########################################################## + +class x3d_class: + + def __init__(self, filename): + #--- public you can change these --- + self.writingcolor = 0 + self.writingtexture = 0 + self.writingcoords = 0 + self.proto = 1 + self.matonly = 0 + self.share = 0 + self.billnode = 0 + self.halonode = 0 + self.collnode = 0 + self.tilenode = 0 + self.verbose=2 # level of verbosity in console 0-none, 1-some, 2-most + self.cp=3 # decimals for material color values 0.000 - 1.000 + self.vp=3 # decimals for vertex coordinate values 0.000 - n.000 + self.tp=3 # decimals for texture coordinate values 0.000 - 1.000 + self.it=3 + + #--- class private don't touch --- + self.texNames={} # dictionary of textureNames + self.matNames={} # dictionary of materiaNames + self.meshNames={} # dictionary of meshNames + self.indentLevel=0 # keeps track of current indenting + self.filename=filename + self.file = None + if filename.lower().endswith('.x3dz'): + try: + import gzip + self.file = gzip.open(filename, "w") + except: + print("failed to import compression modules, exporting uncompressed") + self.filename = filename[:-1] # remove trailing z + + if self.file == None: + self.file = open(self.filename, "w") + + self.bNav=0 + self.nodeID=0 + self.namesReserved=[ "Anchor","Appearance","Arc2D","ArcClose2D","AudioClip","Background","Billboard", + "BooleanFilter","BooleanSequencer","BooleanToggle","BooleanTrigger","Box","Circle2D", + "Collision","Color","ColorInterpolator","ColorRGBA","component","Cone","connect", + "Contour2D","ContourPolyline2D","Coordinate","CoordinateDouble","CoordinateInterpolator", + "CoordinateInterpolator2D","Cylinder","CylinderSensor","DirectionalLight","Disk2D", + "ElevationGrid","EspduTransform","EXPORT","ExternProtoDeclare","Extrusion","field", + "fieldValue","FillProperties","Fog","FontStyle","GeoCoordinate","GeoElevationGrid", + "GeoLocationLocation","GeoLOD","GeoMetadata","GeoOrigin","GeoPositionInterpolator", + "GeoTouchSensor","GeoViewpoint","Group","HAnimDisplacer","HAnimHumanoid","HAnimJoint", + "HAnimSegment","HAnimSite","head","ImageTexture","IMPORT","IndexedFaceSet", + "IndexedLineSet","IndexedTriangleFanSet","IndexedTriangleSet","IndexedTriangleStripSet", + "Inline","IntegerSequencer","IntegerTrigger","IS","KeySensor","LineProperties","LineSet", + "LoadSensor","LOD","Material","meta","MetadataDouble","MetadataFloat","MetadataInteger", + "MetadataSet","MetadataString","MovieTexture","MultiTexture","MultiTextureCoordinate", + "MultiTextureTransform","NavigationInfo","Normal","NormalInterpolator","NurbsCurve", + "NurbsCurve2D","NurbsOrientationInterpolator","NurbsPatchSurface", + "NurbsPositionInterpolator","NurbsSet","NurbsSurfaceInterpolator","NurbsSweptSurface", + "NurbsSwungSurface","NurbsTextureCoordinate","NurbsTrimmedSurface","OrientationInterpolator", + "PixelTexture","PlaneSensor","PointLight","PointSet","Polyline2D","Polypoint2D", + "PositionInterpolator","PositionInterpolator2D","ProtoBody","ProtoDeclare","ProtoInstance", + "ProtoInterface","ProximitySensor","ReceiverPdu","Rectangle2D","ROUTE","ScalarInterpolator", + "Scene","Script","Shape","SignalPdu","Sound","Sphere","SphereSensor","SpotLight","StaticGroup", + "StringSensor","Switch","Text","TextureBackground","TextureCoordinate","TextureCoordinateGenerator", + "TextureTransform","TimeSensor","TimeTrigger","TouchSensor","Transform","TransmitterPdu", + "TriangleFanSet","TriangleSet","TriangleSet2D","TriangleStripSet","Viewpoint","VisibilitySensor", + "WorldInfo","X3D","XvlShell","VertexShader","FragmentShader","MultiShaderAppearance","ShaderAppearance" ] + self.namesStandard=[ "Empty","Empty.000","Empty.001","Empty.002","Empty.003","Empty.004","Empty.005", + "Empty.006","Empty.007","Empty.008","Empty.009","Empty.010","Empty.011","Empty.012", + "Scene.001","Scene.002","Scene.003","Scene.004","Scene.005","Scene.06","Scene.013", + "Scene.006","Scene.007","Scene.008","Scene.009","Scene.010","Scene.011","Scene.012", + "World","World.000","World.001","World.002","World.003","World.004","World.005" ] + self.namesFog=[ "","LINEAR","EXPONENTIAL","" ] + +########################################################## +# Writing nodes routines +########################################################## + + def writeHeader(self): + #bfile = sys.expandpath( Blender.Get('filename') ).replace('<', '<').replace('>', '>') + bfile = self.filename.replace('<', '<').replace('>', '>') # use outfile name + self.file.write("\n") + self.file.write("\n") + self.file.write("\n") + self.file.write("\n") + self.file.write("\t\n" % os.path.basename(bfile)) + # self.file.write("\t\n" % sys.basename(bfile)) + self.file.write("\t\n" % '2.5') + # self.file.write("\t\n" % Blender.Get('version')) + self.file.write("\t\n") + self.file.write("\n") + self.file.write("\n") + + # This functionality is poorly defined, disabling for now - campbell + ''' + def writeInline(self): + inlines = Blender.Scene.Get() + allinlines = len(inlines) + if scene != inlines[0]: + return + else: + for i in xrange(allinlines): + nameinline=inlines[i].name + if (nameinline not in self.namesStandard) and (i > 0): + self.file.write("" % nameinline) + self.file.write("\n\n") + + + def writeScript(self): + textEditor = Blender.Text.Get() + alltext = len(textEditor) + for i in xrange(alltext): + nametext = textEditor[i].name + nlines = textEditor[i].getNLines() + if (self.proto == 1): + if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None): + nalllines = len(textEditor[i].asLines()) + alllines = textEditor[i].asLines() + for j in xrange(nalllines): + self.writeIndented(alllines[j] + "\n") + elif (self.proto == 0): + if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None): + nalllines = len(textEditor[i].asLines()) + alllines = textEditor[i].asLines() + for j in xrange(nalllines): + self.writeIndented(alllines[j] + "\n") + self.writeIndented("\n") + ''' + + def writeViewpoint(self, ob, mat, scene): + context = scene.render_data + # context = scene.render + ratio = float(context.resolution_x)/float(context.resolution_y) + # ratio = float(context.imageSizeY())/float(context.imageSizeX()) + lens = (360* (math.atan(ratio *16 / ob.data.lens) / math.pi))*(math.pi/180) + # lens = (360* (math.atan(ratio *16 / ob.data.getLens()) / math.pi))*(math.pi/180) + lens = min(lens, math.pi) + + # get the camera location, subtract 90 degress from X to orient like X3D does + # mat = ob.matrixWorld - mat is now passed! + + loc = self.rotatePointForVRML(mat.translationPart()) + rot = mat.toEuler() + rot = (((rot[0]-90)), rot[1], rot[2]) + # rot = (((rot[0]-90)*DEG2RAD), rot[1]*DEG2RAD, rot[2]*DEG2RAD) + nRot = self.rotatePointForVRML( rot ) + # convert to Quaternion and to Angle Axis + Q = self.eulerToQuaternions(nRot[0], nRot[1], nRot[2]) + Q1 = self.multiplyQuaternions(Q[0], Q[1]) + Qf = self.multiplyQuaternions(Q1, Q[2]) + angleAxis = self.quaternionToAngleAxis(Qf) + self.file.write("\n\n" % (lens)) + + def writeFog(self, world): + if world: + mtype = world.mist.falloff + # mtype = world.getMistype() + mparam = world.mist + # mparam = world.getMist() + grd = world.horizon_color + # grd = world.getHor() + grd0, grd1, grd2 = grd[0], grd[1], grd[2] + else: + return + if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'): + mtype = 1 if mtype == 'LINEAR' else 2 + # if (mtype == 1 or mtype == 2): + self.file.write("\n\n" % round(mparam[2],self.cp)) + else: + return + + def writeNavigationInfo(self, scene): + self.file.write('\n') + + def writeSpotLight(self, ob, mtx, lamp, world): + safeName = self.cleanStr(ob.name) + if world: + ambi = world.ambient_color + # ambi = world.amb + ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5 + else: + ambi = 0 + ambientIntensity = 0 + + # compute cutoff and beamwidth + intensity=min(lamp.energy/1.75,1.0) + beamWidth=((lamp.spot_size*math.pi)/180.0)*.37; + # beamWidth=((lamp.spotSize*math.pi)/180.0)*.37; + cutOffAngle=beamWidth*1.3 + + dx,dy,dz=self.computeDirection(mtx) + # note -dx seems to equal om[3][0] + # note -dz seems to equal om[3][1] + # note dy seems to equal om[3][2] + + #location=(ob.matrixWorld*MATWORLD).translationPart() # now passed + location=(mtx*MATWORLD).translationPart() + + radius = lamp.distance*math.cos(beamWidth) + # radius = lamp.dist*math.cos(beamWidth) + self.file.write("\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3))) + + + def writeDirectionalLight(self, ob, mtx, lamp, world): + safeName = self.cleanStr(ob.name) + if world: + ambi = world.ambient_color + # ambi = world.amb + ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5 + else: + ambi = 0 + ambientIntensity = 0 + + intensity=min(lamp.energy/1.75,1.0) + (dx,dy,dz)=self.computeDirection(mtx) + self.file.write("\n\n" % (round(dx,4),round(dy,4),round(dz,4))) + + def writePointLight(self, ob, mtx, lamp, world): + safeName = self.cleanStr(ob.name) + if world: + ambi = world.ambient_color + # ambi = world.amb + ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5 + else: + ambi = 0 + ambientIntensity = 0 + + # location=(ob.matrixWorld*MATWORLD).translationPart() # now passed + location= (mtx*MATWORLD).translationPart() + + self.file.write("\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3))) + ''' + def writeNode(self, ob, mtx): + obname=str(ob.name) + if obname in self.namesStandard: + return + else: + dx,dy,dz = self.computeDirection(mtx) + # location=(ob.matrixWorld*MATWORLD).translationPart() + location=(mtx*MATWORLD).translationPart() + self.writeIndented("<%s\n" % obname,1) + self.writeIndented("direction=\"%s %s %s\"\n" % (round(dx,3),round(dy,3),round(dz,3))) + self.writeIndented("location=\"%s %s %s\"\n" % (round(location[0],3), round(location[1],3), round(location[2],3))) + self.writeIndented("/>\n",-1) + self.writeIndented("\n") + ''' + def secureName(self, name): + name = name + str(self.nodeID) + self.nodeID=self.nodeID+1 + if len(name) <= 3: + newname = "_" + str(self.nodeID) + return "%s" % (newname) + else: + for bad in ['"','#',"'",',','.','[','\\',']','{','}']: + name=name.replace(bad,'_') + if name in self.namesReserved: + newname = name[0:3] + "_" + str(self.nodeID) + return "%s" % (newname) + elif name[0].isdigit(): + newname = "_" + name + str(self.nodeID) + return "%s" % (newname) + else: + newname = name + return "%s" % (newname) + + def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI = False): + imageMap={} # set of used images + sided={} # 'one':cnt , 'two':cnt + vColors={} # 'multi':1 + meshName = self.cleanStr(ob.name) + + meshME = self.cleanStr(ob.data.name) # We dont care if its the mesh name or not + # meshME = self.cleanStr(ob.getData(mesh=1).name) # We dont care if its the mesh name or not + if len(mesh.faces) == 0: return + mode = [] + # mode = 0 + if mesh.active_uv_texture: + # if mesh.faceUV: + for face in mesh.active_uv_texture.data: + # for face in mesh.faces: + if face.halo and 'HALO' not in mode: + mode += ['HALO'] + if face.billboard and 'BILLBOARD' not in mode: + mode += ['BILLBOARD'] + if face.object_color and 'OBJECT_COLOR' not in mode: + mode += ['OBJECT_COLOR'] + if face.collision and 'COLLISION' not in mode: + mode += ['COLLISION'] + # mode |= face.mode + + if 'HALO' in mode and self.halonode == 0: + # if mode & Mesh.FaceModes.HALO and self.halonode == 0: + self.writeIndented("\n",1) + self.halonode = 1 + elif 'BILLBOARD' in mode and self.billnode == 0: + # elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0: + self.writeIndented("\n",1) + self.billnode = 1 + elif 'OBJECT_COLOR' in mode and self.matonly == 0: + # elif mode & Mesh.FaceModes.OBCOL and self.matonly == 0: + self.matonly = 1 + # TF_TILES is marked as deprecated in DNA_meshdata_types.h + # elif mode & Mesh.FaceModes.TILES and self.tilenode == 0: + # self.tilenode = 1 + elif 'COLLISION' not in mode and self.collnode == 0: + # elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0: + self.writeIndented("\n",1) + self.collnode = 1 + + nIFSCnt=self.countIFSSetsNeeded(mesh, imageMap, sided, vColors) + + if nIFSCnt > 1: + self.writeIndented("\n" % ("G_", meshName),1) + + if 'two' in sided and sided['two'] > 0: + bTwoSided=1 + else: + bTwoSided=0 + + # mtx = ob.matrixWorld * MATWORLD # mtx is now passed + mtx = mtx * MATWORLD + + loc= mtx.translationPart() + sca= mtx.scalePart() + quat = mtx.toQuat() + rot= quat.axis + + self.writeIndented('\n' % \ + (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle) ) + # self.writeIndented('\n' % \ + # (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle*DEG2RAD) ) + + self.writeIndented("\n",1) + maters=mesh.materials + hasImageTexture=0 + issmooth=0 + + if len(maters) > 0 or mesh.active_uv_texture: + # if len(maters) > 0 or mesh.faceUV: + self.writeIndented("\n", 1) + # right now this script can only handle a single material per mesh. + if len(maters) >= 1: + mat=maters[0] + # matFlags = mat.getMode() + if not mat.face_texture: + # if not matFlags & Blender.Material.Modes['TEXFACE']: + self.writeMaterial(mat, self.cleanStr(mat.name,''), world) + # self.writeMaterial(mat, self.cleanStr(maters[0].name,''), world) + if len(maters) > 1: + print("Warning: mesh named %s has multiple materials" % meshName) + print("Warning: only one material per object handled") + + #-- textures + face = None + if mesh.active_uv_texture: + # if mesh.faceUV: + for face in mesh.active_uv_texture.data: + # for face in mesh.faces: + if face.image: + # if (hasImageTexture == 0) and (face.image): + self.writeImageTexture(face.image) + # hasImageTexture=1 # keep track of face texture + break + if self.tilenode == 1 and face and face.image: + # if self.tilenode == 1: + self.writeIndented("\n" % (face.image.xrep, face.image.yrep)) + self.tilenode = 0 + self.writeIndented("\n", -1) + + #-- IndexedFaceSet or IndexedLineSet + + # user selected BOUNDS=1, SOLID=3, SHARED=4, or TEXTURE=5 + ifStyle="IndexedFaceSet" + # look up mesh name, use it if available + if meshME in self.meshNames: + self.writeIndented("<%s USE=\"ME_%s\">" % (ifStyle, meshME), 1) + self.meshNames[meshME]+=1 + else: + if int(mesh.users) > 1: + self.writeIndented("<%s DEF=\"ME_%s\" " % (ifStyle, meshME), 1) + self.meshNames[meshME]=1 + else: + self.writeIndented("<%s " % ifStyle, 1) + + if bTwoSided == 1: + self.file.write("solid=\"false\" ") + else: + self.file.write("solid=\"true\" ") + + for face in mesh.faces: + if face.smooth: + issmooth=1 + break + if issmooth==1: + creaseAngle=(mesh.autosmooth_angle)*(math.pi/180.0) + # creaseAngle=(mesh.degr)*(math.pi/180.0) + self.file.write("creaseAngle=\"%s\" " % (round(creaseAngle,self.cp))) + + #--- output textureCoordinates if UV texture used + if mesh.active_uv_texture: + # if mesh.faceUV: + if self.matonly == 1 and self.share == 1: + self.writeFaceColors(mesh) + elif hasImageTexture == 1: + self.writeTextureCoordinates(mesh) + #--- output coordinates + self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI) + + self.writingcoords = 1 + self.writingtexture = 1 + self.writingcolor = 1 + self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI) + + #--- output textureCoordinates if UV texture used + if mesh.active_uv_texture: + # if mesh.faceUV: + if hasImageTexture == 1: + self.writeTextureCoordinates(mesh) + elif self.matonly == 1 and self.share == 1: + self.writeFaceColors(mesh) + #--- output vertexColors + self.matonly = 0 + self.share = 0 + + self.writingcoords = 0 + self.writingtexture = 0 + self.writingcolor = 0 + #--- output closing braces + self.writeIndented("\n" % ifStyle, -1) + self.writeIndented("\n", -1) + self.writeIndented("\n", -1) + + if self.halonode == 1: + self.writeIndented("\n", -1) + self.halonode = 0 + + if self.billnode == 1: + self.writeIndented("\n", -1) + self.billnode = 0 + + if self.collnode == 1: + self.writeIndented("\n", -1) + self.collnode = 0 + + if nIFSCnt > 1: + self.writeIndented("\n", -1) + + self.file.write("\n") + + def writeCoordinates(self, ob, mesh, meshName, EXPORT_TRI = False): + # create vertex list and pre rotate -90 degrees X for VRML + + if self.writingcoords == 0: + self.file.write('coordIndex="') + for face in mesh.faces: + fv = face.verts + # fv = face.v + + if len(fv)==3: + # if len(face)==3: + self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2])) + # self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index)) + else: + if EXPORT_TRI: + self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2])) + # self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index)) + self.file.write("%i %i %i -1, " % (fv[0], fv[2], fv[3])) + # self.file.write("%i %i %i -1, " % (fv[0].index, fv[2].index, fv[3].index)) + else: + self.file.write("%i %i %i %i -1, " % (fv[0], fv[1], fv[2], fv[3])) + # self.file.write("%i %i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index, fv[3].index)) + + self.file.write("\">\n") + else: + #-- vertices + # mesh.transform(ob.matrixWorld) + self.writeIndented("") + self.writeIndented("\n", -1) + + def writeTextureCoordinates(self, mesh): + texCoordList=[] + texIndexList=[] + j=0 + + for face in mesh.active_uv_texture.data: + # for face in mesh.faces: + uvs = face.uv + # uvs = [face.uv1, face.uv2, face.uv3, face.uv4] if face.verts[3] else [face.uv1, face.uv2, face.uv3] + + for uv in uvs: + # for uv in face.uv: + texIndexList.append(j) + texCoordList.append(uv) + j=j+1 + texIndexList.append(-1) + if self.writingtexture == 0: + self.file.write("\n\t\t\ttexCoordIndex=\"") + texIndxStr="" + for i in range(len(texIndexList)): + texIndxStr = texIndxStr + "%d, " % texIndexList[i] + if texIndexList[i]==-1: + self.file.write(texIndxStr) + texIndxStr="" + self.file.write("\"\n\t\t\t") + else: + self.writeIndented("") + self.writeIndented("\n", -1) + + def writeFaceColors(self, mesh): + if self.writingcolor == 0: + self.file.write("colorPerVertex=\"false\" ") + elif mesh.active_vertex_color: + # else: + self.writeIndented(" 2: + print("Debug: face.col r=%d g=%d b=%d" % (c[0], c[1], c[2])) + # print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b)) + aColor = self.rgbToFS(c) + self.file.write("%s, " % aColor) + + # for face in mesh.faces: + # if face.col: + # c=face.col[0] + # if self.verbose > 2: + # print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b)) + # aColor = self.rgbToFS(c) + # self.file.write("%s, " % aColor) + self.file.write("\" />") + self.writeIndented("\n",-1) + + def writeMaterial(self, mat, matName, world): + # look up material name, use it if available + if matName in self.matNames: + self.writeIndented("\n" % matName) + self.matNames[matName]+=1 + return; + + self.matNames[matName]=1 + + ambient = mat.ambient/3 + # ambient = mat.amb/3 + diffuseR, diffuseG, diffuseB = tuple(mat.diffuse_color) + # diffuseR, diffuseG, diffuseB = mat.rgbCol[0], mat.rgbCol[1],mat.rgbCol[2] + if world: + ambi = world.ambient_color + # ambi = world.getAmb() + ambi0, ambi1, ambi2 = (ambi[0]*mat.ambient)*2, (ambi[1]*mat.ambient)*2, (ambi[2]*mat.ambient)*2 + # ambi0, ambi1, ambi2 = (ambi[0]*mat.amb)*2, (ambi[1]*mat.amb)*2, (ambi[2]*mat.amb)*2 + else: + ambi0, ambi1, ambi2 = 0, 0, 0 + emisR, emisG, emisB = (diffuseR*mat.emit+ambi0)/2, (diffuseG*mat.emit+ambi1)/2, (diffuseB*mat.emit+ambi2)/2 + + shininess = mat.specular_hardness/512.0 + # shininess = mat.hard/512.0 + specR = (mat.specular_color[0]+0.001)/(1.25/(mat.specular_intensity+0.001)) + # specR = (mat.specCol[0]+0.001)/(1.25/(mat.spec+0.001)) + specG = (mat.specular_color[1]+0.001)/(1.25/(mat.specular_intensity+0.001)) + # specG = (mat.specCol[1]+0.001)/(1.25/(mat.spec+0.001)) + specB = (mat.specular_color[2]+0.001)/(1.25/(mat.specular_intensity+0.001)) + # specB = (mat.specCol[2]+0.001)/(1.25/(mat.spec+0.001)) + transp = 1-mat.alpha + # matFlags = mat.getMode() + if mat.shadeless: + # if matFlags & Blender.Material.Modes['SHADELESS']: + ambient = 1 + shine = 1 + specR = emitR = diffuseR + specG = emitG = diffuseG + specB = emitB = diffuseB + self.writeIndented("" % (round(transp,self.cp))) + self.writeIndented("\n",-1) + + def writeImageTexture(self, image): + name = image.name + filename = image.filename.split('/')[-1].split('\\')[-1] + if name in self.texNames: + self.writeIndented("\n" % self.cleanStr(name)) + self.texNames[name] += 1 + return + else: + self.writeIndented("" % name) + self.writeIndented("\n",-1) + self.texNames[name] = 1 + + def writeBackground(self, world, alltextures): + if world: worldname = world.name + else: return + blending = (world.blend_sky, world.paper_sky, world.real_sky) + # blending = world.getSkytype() + grd = world.horizon_color + # grd = world.getHor() + grd0, grd1, grd2 = grd[0], grd[1], grd[2] + sky = world.zenith_color + # sky = world.getZen() + sky0, sky1, sky2 = sky[0], sky[1], sky[2] + mix0, mix1, mix2 = grd[0]+sky[0], grd[1]+sky[1], grd[2]+sky[2] + mix0, mix1, mix2 = mix0/2, mix1/2, mix2/2 + self.file.write("\n\n") + +########################################################## +# export routine +########################################################## + + def export(self, scene, world, alltextures,\ + EXPORT_APPLY_MODIFIERS = False,\ + EXPORT_TRI= False,\ + ): + + print("Info: starting X3D export to " + self.filename + "...") + self.writeHeader() + # self.writeScript() + self.writeNavigationInfo(scene) + self.writeBackground(world, alltextures) + self.writeFog(world) + self.proto = 0 + + + # # COPIED FROM OBJ EXPORTER + # if EXPORT_APPLY_MODIFIERS: + # temp_mesh_name = '~tmp-mesh' + + # # Get the container mesh. - used for applying modifiers and non mesh objects. + # containerMesh = meshName = tempMesh = None + # for meshName in Blender.NMesh.GetNames(): + # if meshName.startswith(temp_mesh_name): + # tempMesh = Mesh.Get(meshName) + # if not tempMesh.users: + # containerMesh = tempMesh + # if not containerMesh: + # containerMesh = Mesh.New(temp_mesh_name) + # -------------------------- + + + for ob_main in [o for o in scene.objects if o.is_visible()]: + # for ob_main in scene.objects.context: + + free, derived = create_derived_objects(ob_main) + + if derived == None: continue + + for ob, ob_mat in derived: + # for ob, ob_mat in BPyObject.getDerivedObjects(ob_main): + objType=ob.type + objName=ob.name + self.matonly = 0 + if objType == "CAMERA": + # if objType == "Camera": + self.writeViewpoint(ob, ob_mat, scene) + elif objType in ("MESH", "CURVE", "SURF", "TEXT") : + # elif objType in ("Mesh", "Curve", "Surf", "Text") : + if EXPORT_APPLY_MODIFIERS or objType != 'MESH': + # if EXPORT_APPLY_MODIFIERS or objType != 'Mesh': + me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW') + # me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scene) + else: + me = ob.data + # me = ob.getData(mesh=1) + + self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI = EXPORT_TRI) + + # free mesh created with create_mesh() + if me != ob.data: + bpy.data.remove_mesh(me) + + elif objType == "LAMP": + # elif objType == "Lamp": + data= ob.data + datatype=data.type + if datatype == 'POINT': + # if datatype == Lamp.Types.Lamp: + self.writePointLight(ob, ob_mat, data, world) + elif datatype == 'SPOT': + # elif datatype == Lamp.Types.Spot: + self.writeSpotLight(ob, ob_mat, data, world) + elif datatype == 'SUN': + # elif datatype == Lamp.Types.Sun: + self.writeDirectionalLight(ob, ob_mat, data, world) + else: + self.writeDirectionalLight(ob, ob_mat, data, world) + # do you think x3d could document what to do with dummy objects? + #elif objType == "Empty" and objName != "Empty": + # self.writeNode(ob, ob_mat) + else: + #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType) + pass + + if free: + free_derived_objects(ob_main) + + self.file.write("\n\n") + + # if EXPORT_APPLY_MODIFIERS: + # if containerMesh: + # containerMesh.verts = None + + self.cleanup() + +########################################################## +# Utility methods +########################################################## + + def cleanup(self): + self.file.close() + self.texNames={} + self.matNames={} + self.indentLevel=0 + print("Info: finished X3D export to %s\n" % self.filename) + + def cleanStr(self, name, prefix='rsvd_'): + """cleanStr(name,prefix) - try to create a valid VRML DEF name from object name""" + + newName=name[:] + if len(newName) == 0: + self.nNodeID+=1 + return "%s%d" % (prefix, self.nNodeID) + + if newName in self.namesReserved: + newName='%s%s' % (prefix,newName) + + if newName[0].isdigit(): + newName='%s%s' % ('_',newName) + + for bad in [' ','"','#',"'",',','.','[','\\',']','{','}']: + newName=newName.replace(bad,'_') + return newName + + def countIFSSetsNeeded(self, mesh, imageMap, sided, vColors): + """ + countIFFSetsNeeded() - should look at a blender mesh to determine + how many VRML IndexFaceSets or IndexLineSets are needed. A + new mesh created under the following conditions: + + o - split by UV Textures / one per mesh + o - split by face, one sided and two sided + o - split by smooth and flat faces + o - split when faces only have 2 vertices * needs to be an IndexLineSet + """ + + imageNameMap={} + faceMap={} + nFaceIndx=0 + + if mesh.active_uv_texture: + # if mesh.faceUV: + for face in mesh.active_uv_texture.data: + # for face in mesh.faces: + sidename=''; + if face.twoside: + # if face.mode & Mesh.FaceModes.TWOSIDE: + sidename='two' + else: + sidename='one' + + if sidename in sided: + sided[sidename]+=1 + else: + sided[sidename]=1 + + image = face.image + if image: + faceName="%s_%s" % (face.image.name, sidename); + try: + imageMap[faceName].append(face) + except: + imageMap[faceName]=[face.image.name,sidename,face] + + if self.verbose > 2: + for faceName in imageMap.keys(): + ifs=imageMap[faceName] + print("Debug: faceName=%s image=%s, solid=%s facecnt=%d" % \ + (faceName, ifs[0], ifs[1], len(ifs)-2)) + + return len(imageMap) + + def faceToString(self,face): + + print("Debug: face.flag=0x%x (bitflags)" % face.flag) + if face.sel: + print("Debug: face.sel=true") + + print("Debug: face.mode=0x%x (bitflags)" % face.mode) + if face.mode & Mesh.FaceModes.TWOSIDE: + print("Debug: face.mode twosided") + + print("Debug: face.transp=0x%x (enum)" % face.transp) + if face.transp == Mesh.FaceTranspModes.SOLID: + print("Debug: face.transp.SOLID") + + if face.image: + print("Debug: face.image=%s" % face.image.name) + print("Debug: face.materialIndex=%d" % face.materialIndex) + + # XXX not used + # def getVertexColorByIndx(self, mesh, indx): + # c = None + # for face in mesh.faces: + # j=0 + # for vertex in face.v: + # if vertex.index == indx: + # c=face.col[j] + # break + # j=j+1 + # if c: break + # return c + + def meshToString(self,mesh): + # print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors) + print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0)) + # print("Debug: mesh.faceUV=%d" % mesh.faceUV) + print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0)) + # print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours()) + print("Debug: mesh.verts=%d" % len(mesh.verts)) + print("Debug: mesh.faces=%d" % len(mesh.faces)) + print("Debug: mesh.materials=%d" % len(mesh.materials)) + + def rgbToFS(self, c): + s="%s %s %s" % (round(c[0]/255.0,self.cp), + round(c[1]/255.0,self.cp), + round(c[2]/255.0,self.cp)) + + # s="%s %s %s" % ( + # round(c.r/255.0,self.cp), + # round(c.g/255.0,self.cp), + # round(c.b/255.0,self.cp)) + return s + + def computeDirection(self, mtx): + x,y,z=(0,-1.0,0) # point down + + ax,ay,az = (mtx*MATWORLD).toEuler() + + # ax *= DEG2RAD + # ay *= DEG2RAD + # az *= DEG2RAD + + # rot X + x1=x + y1=y*math.cos(ax)-z*math.sin(ax) + z1=y*math.sin(ax)+z*math.cos(ax) + + # rot Y + x2=x1*math.cos(ay)+z1*math.sin(ay) + y2=y1 + z2=z1*math.cos(ay)-x1*math.sin(ay) + + # rot Z + x3=x2*math.cos(az)-y2*math.sin(az) + y3=x2*math.sin(az)+y2*math.cos(az) + z3=z2 + + return [x3,y3,z3] + + + # swap Y and Z to handle axis difference between Blender and VRML + #------------------------------------------------------------------------ + def rotatePointForVRML(self, v): + x = v[0] + y = v[2] + z = -v[1] + + vrmlPoint=[x, y, z] + return vrmlPoint + + # For writing well formed VRML code + #------------------------------------------------------------------------ + def writeIndented(self, s, inc=0): + if inc < 1: + self.indentLevel = self.indentLevel + inc + + spaces="" + for x in range(self.indentLevel): + spaces = spaces + "\t" + self.file.write(spaces + s) + + if inc > 0: + self.indentLevel = self.indentLevel + inc + + # Converts a Euler to three new Quaternions + # Angles of Euler are passed in as radians + #------------------------------------------------------------------------ + def eulerToQuaternions(self, x, y, z): + Qx = [math.cos(x/2), math.sin(x/2), 0, 0] + Qy = [math.cos(y/2), 0, math.sin(y/2), 0] + Qz = [math.cos(z/2), 0, 0, math.sin(z/2)] + + quaternionVec=[Qx,Qy,Qz] + return quaternionVec + + # Multiply two Quaternions together to get a new Quaternion + #------------------------------------------------------------------------ + def multiplyQuaternions(self, Q1, Q2): + result = [((Q1[0] * Q2[0]) - (Q1[1] * Q2[1]) - (Q1[2] * Q2[2]) - (Q1[3] * Q2[3])), + ((Q1[0] * Q2[1]) + (Q1[1] * Q2[0]) + (Q1[2] * Q2[3]) - (Q1[3] * Q2[2])), + ((Q1[0] * Q2[2]) + (Q1[2] * Q2[0]) + (Q1[3] * Q2[1]) - (Q1[1] * Q2[3])), + ((Q1[0] * Q2[3]) + (Q1[3] * Q2[0]) + (Q1[1] * Q2[2]) - (Q1[2] * Q2[1]))] + + return result + + # Convert a Quaternion to an Angle Axis (ax, ay, az, angle) + # angle is in radians + #------------------------------------------------------------------------ + def quaternionToAngleAxis(self, Qf): + scale = math.pow(Qf[1],2) + math.pow(Qf[2],2) + math.pow(Qf[3],2) + ax = Qf[1] + ay = Qf[2] + az = Qf[3] + + if scale > .0001: + ax/=scale + ay/=scale + az/=scale + + angle = 2 * math.acos(Qf[0]) + + result = [ax, ay, az, angle] + return result + +########################################################## +# Callbacks, needed before Main +########################################################## + +def x3d_export(filename, + context, + EXPORT_APPLY_MODIFIERS=False, + EXPORT_TRI=False, + EXPORT_GZIP=False): + + if EXPORT_GZIP: + if not filename.lower().endswith('.x3dz'): + filename = '.'.join(filename.split('.')[:-1]) + '.x3dz' + else: + if not filename.lower().endswith('.x3d'): + filename = '.'.join(filename.split('.')[:-1]) + '.x3d' + + + scene = context.scene + # scene = Blender.Scene.GetCurrent() + world = scene.world + + # XXX these are global textures while .Get() returned only scene's? + alltextures = bpy.data.textures + # alltextures = Blender.Texture.Get() + + wrlexport=x3d_class(filename) + wrlexport.export(\ + scene,\ + world,\ + alltextures,\ + \ + EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS,\ + EXPORT_TRI = EXPORT_TRI,\ + ) + + +def x3d_export_ui(filename): + if not filename.endswith(extension): + filename += extension + #if _safeOverwrite and sys.exists(filename): + # result = Draw.PupMenu("File Already Exists, Overwrite?%t|Yes%x1|No%x0") + #if(result != 1): + # return + + # Get user options + EXPORT_APPLY_MODIFIERS = Draw.Create(1) + EXPORT_TRI = Draw.Create(0) + EXPORT_GZIP = Draw.Create( filename.lower().endswith('.x3dz') ) + + # Get USER Options + pup_block = [\ + ('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object.'),\ + ('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\ + ('Compress', EXPORT_GZIP, 'GZip the resulting file, requires a full python install'),\ + ] + + if not Draw.PupBlock('Export...', pup_block): + return + + Blender.Window.EditMode(0) + Blender.Window.WaitCursor(1) + + x3d_export(filename,\ + EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val,\ + EXPORT_TRI = EXPORT_TRI.val,\ + EXPORT_GZIP = EXPORT_GZIP.val\ + ) + + Blender.Window.WaitCursor(0) + + + +######################################################### +# main routine +######################################################### + + +# if __name__ == '__main__': +# Blender.Window.FileSelector(x3d_export_ui,"Export X3D", Blender.Get('filename').replace('.blend', '.x3d')) + +class EXPORT_OT_x3d(bpy.types.Operator): + ''' + X3D Exporter + ''' + __idname__ = "export.x3d" + __label__ = 'Export X3D' + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the X3D file", maxlen= 1024, default= ""), + + bpy.props.BoolProperty(attr="apply_modifiers", name="Apply Modifiers", description="Use transformed mesh data from each object.", default=True), + bpy.props.BoolProperty(attr="triangulate", name="Triangulate", description="Triangulate quads.", default=False), + bpy.props.BoolProperty(attr="compress", name="Compress", description="GZip the resulting file, requires a full python install.", default=False), + ] + + def execute(self, context): + x3d_export(self.path, context, self.apply_modifiers, self.triangulate, self.compress) + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + + def poll(self, context): # Poll isnt working yet + print("Poll") + return context.active_object != None + +bpy.ops.add(EXPORT_OT_x3d) + +# NOTES +# - blender version is hardcoded diff --git a/release/scripts/io/import_3ds.py b/release/scripts/io/import_3ds.py new file mode 100644 index 00000000000..339fac839ea --- /dev/null +++ b/release/scripts/io/import_3ds.py @@ -0,0 +1,1167 @@ +#!BPY +""" +Name: '3D Studio (.3ds)...' +Blender: 244 +Group: 'Import' +Tooltip: 'Import from 3DS file format (.3ds)' +""" + +__author__= ['Bob Holcomb', 'Richard L?rk?ng', 'Damien McGinnes', 'Campbell Barton', 'Mario Lapin'] +__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/") +__version__= '0.996' +__bpydoc__= '''\ + +3ds Importer + +This script imports a 3ds file and the materials into Blender for editing. + +Loader is based on 3ds loader from www.gametutorials.com (Thanks DigiBen). + +0.996 by Mario Lapin (mario.lapin@gmail.com) 13/04/200
+ - Implemented workaround to correct association between name, geometry and materials of + imported meshes. + + Without this patch, version 0.995 of this importer would associate to each mesh object the + geometry and the materials of the previously parsed mesh object. By so, the name of the + first mesh object would be thrown away, and the name of the last mesh object would be + automatically merged with a '.001' at the end. No object would desappear, however object's + names and materials would be completely jumbled. + +0.995 by Campbell Barton
+- workaround for buggy mesh vert delete +- minor tweaks + +0.99 by Bob Holcomb
+- added support for floating point color values that previously broke on import. + +0.98 by Campbell Barton
+- import faces and verts to lists instead of a mesh, convert to a mesh later +- use new index mapping feature of mesh to re-map faces that were not added. + +0.97 by Campbell Barton
+- Strip material names of spaces +- Added import as instance to import the 3ds into its own + scene and add a group instance to the current scene +- New option to scale down imported objects so they are within a limited bounding area. + +0.96 by Campbell Barton
+- Added workaround for bug in setting UV's for Zero vert index UV faces. +- Removed unique name function, let blender make the names unique. + +0.95 by Campbell Barton
+- Removed workarounds for Blender 2.41 +- Mesh objects split by material- many 3ds objects used more then 16 per mesh. +- Removed a lot of unneeded variable creation. + +0.94 by Campbell Barton
+- Face import tested to be about overall 16x speedup over 0.93. +- Material importing speedup. +- Tested with more models. +- Support some corrupt models. + +0.93 by Campbell Barton
+- Tested with 400 3ds files from turbosquid and samples. +- Tactfully ignore faces that used the same verts twice. +- Rollback to 0.83 sloppy un-reorganized code, this broke UV coord loading. +- Converted from NMesh to Mesh. +- Faster and cleaner new names. +- Use external comprehensive image loader. +- Re intergrated 0.92 and 0.9 changes +- Fixes for 2.41 compat. +- Non textured faces do not use a texture flag. + +0.92
+- Added support for diffuse, alpha, spec, bump maps in a single material + +0.9
+- Reorganized code into object/material block functions
+- Use of Matrix() to copy matrix data
+- added support for material transparency
+ +0.83 2005-08-07: Campell Barton +- Aggressive image finding and case insensitivy for posisx systems. + +0.82a 2005-07-22 +- image texture loading (both for face uv and renderer) + +0.82 - image texture loading (for face uv) + +0.81a (fork- not 0.9) Campbell Barton 2005-06-08 +- Simplified import code +- Never overwrite data +- Faster list handling +- Leaves import selected + +0.81 Damien McGinnes 2005-01-09 +- handle missing images better + +0.8 Damien McGinnes 2005-01-08 +- copies sticky UV coords to face ones +- handles images better +- Recommend that you run 'RemoveDoubles' on each imported mesh after using this script + +''' + +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# Script copyright (C) Bob Holcomb +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# -------------------------------------------------------------------------- + +# Importing modules + +import os +import time +import struct + +from import_obj import unpack_face_list, load_image + +import bpy +import Mathutils + +# import Blender +# from Blender import Mesh, Object, Material, Image, Texture, Lamp, Mathutils +# from Blender.Mathutils import Vector +# import BPyImage + +# import BPyMessages + +# try: +# from struct import calcsize, unpack +# except: +# calcsize= unpack= None + + + +# # If python version is less than 2.4, try to get set stuff from module +# try: +# set +# except: +# from sets import Set as set + +BOUNDS_3DS = [] + + +#this script imports uvcoords as sticky vertex coords +#this parameter enables copying these to face uv coords +#which shold be more useful. + +def createBlenderTexture(material, name, image): + texture = bpy.data.textures.new(name) + texture.setType('Image') + texture.image = image + material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) + + + +###################################################### +# Data Structures +###################################################### + +#Some of the chunks that we will see +#----- Primary Chunk, at the beginning of each file +PRIMARY = int('0x4D4D',16) + +#------ Main Chunks +OBJECTINFO = int('0x3D3D',16); #This gives the version of the mesh and is found right before the material and object information +VERSION = int('0x0002',16); #This gives the version of the .3ds file +EDITKEYFRAME= int('0xB000',16); #This is the header for all of the key frame info + +#------ sub defines of OBJECTINFO +MATERIAL = 45055 #0xAFFF // This stored the texture info +OBJECT = 16384 #0x4000 // This stores the faces, vertices, etc... + +#>------ sub defines of MATERIAL +#------ sub defines of MATERIAL_BLOCK +MAT_NAME = int('0xA000',16) # This holds the material name +MAT_AMBIENT = int('0xA010',16) # Ambient color of the object/material +MAT_DIFFUSE = int('0xA020',16) # This holds the color of the object/material +MAT_SPECULAR = int('0xA030',16) # SPecular color of the object/material +MAT_SHINESS = int('0xA040',16) # ?? +MAT_TRANSPARENCY= int('0xA050',16) # Transparency value of material +MAT_SELF_ILLUM = int('0xA080',16) # Self Illumination value of material +MAT_WIRE = int('0xA085',16) # Only render's wireframe + +MAT_TEXTURE_MAP = int('0xA200',16) # This is a header for a new texture map +MAT_SPECULAR_MAP= int('0xA204',16) # This is a header for a new specular map +MAT_OPACITY_MAP = int('0xA210',16) # This is a header for a new opacity map +MAT_REFLECTION_MAP= int('0xA220',16) # This is a header for a new reflection map +MAT_BUMP_MAP = int('0xA230',16) # This is a header for a new bump map +MAT_MAP_FILENAME = int('0xA300',16) # This holds the file name of the texture + +MAT_FLOAT_COLOR = int ('0x0010', 16) #color defined as 3 floats +MAT_24BIT_COLOR = int ('0x0011', 16) #color defined as 3 bytes + +#>------ sub defines of OBJECT +OBJECT_MESH = int('0x4100',16); # This lets us know that we are reading a new object +OBJECT_LAMP = int('0x4600',16); # This lets un know we are reading a light object +OBJECT_LAMP_SPOT = int('0x4610',16); # The light is a spotloght. +OBJECT_LAMP_OFF = int('0x4620',16); # The light off. +OBJECT_LAMP_ATTENUATE = int('0x4625',16); +OBJECT_LAMP_RAYSHADE = int('0x4627',16); +OBJECT_LAMP_SHADOWED = int('0x4630',16); +OBJECT_LAMP_LOCAL_SHADOW = int('0x4640',16); +OBJECT_LAMP_LOCAL_SHADOW2 = int('0x4641',16); +OBJECT_LAMP_SEE_CONE = int('0x4650',16); +OBJECT_LAMP_SPOT_RECTANGULAR = int('0x4651',16); +OBJECT_LAMP_SPOT_OVERSHOOT = int('0x4652',16); +OBJECT_LAMP_SPOT_PROJECTOR = int('0x4653',16); +OBJECT_LAMP_EXCLUDE = int('0x4654',16); +OBJECT_LAMP_RANGE = int('0x4655',16); +OBJECT_LAMP_ROLL = int('0x4656',16); +OBJECT_LAMP_SPOT_ASPECT = int('0x4657',16); +OBJECT_LAMP_RAY_BIAS = int('0x4658',16); +OBJECT_LAMP_INNER_RANGE = int('0x4659',16); +OBJECT_LAMP_OUTER_RANGE = int('0x465A',16); +OBJECT_LAMP_MULTIPLIER = int('0x465B',16); +OBJECT_LAMP_AMBIENT_LIGHT = int('0x4680',16); + + + +OBJECT_CAMERA= int('0x4700',16); # This lets un know we are reading a camera object + +#>------ sub defines of CAMERA +OBJECT_CAM_RANGES= int('0x4720',16); # The camera range values + +#>------ sub defines of OBJECT_MESH +OBJECT_VERTICES = int('0x4110',16); # The objects vertices +OBJECT_FACES = int('0x4120',16); # The objects faces +OBJECT_MATERIAL = int('0x4130',16); # This is found if the object has a material, either texture map or color +OBJECT_UV = int('0x4140',16); # The UV texture coordinates +OBJECT_TRANS_MATRIX = int('0x4160',16); # The Object Matrix + +global scn +scn = None + +#the chunk class +class chunk: + ID = 0 + length = 0 + bytes_read = 0 + + #we don't read in the bytes_read, we compute that + binary_format=' 3): + print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version) + + #is it an object info chunk? + elif (new_chunk.ID == OBJECTINFO): + #print 'elif (new_chunk.ID == OBJECTINFO):' + # print 'found an OBJECTINFO chunk' + process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH) + + #keep track of how much we read in the main chunk + new_chunk.bytes_read += temp_chunk.bytes_read + + #is it an object chunk? + elif (new_chunk.ID == OBJECT): + + if CreateBlenderObject: + putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials) + contextMesh_vertls = []; contextMesh_facels = [] + + ## preparando para receber o proximo objeto + contextMeshMaterials = {} # matname:[face_idxs] + contextMeshUV = None + #contextMesh.vertexUV = 1 # Make sticky coords. + # Reset matrix + contextMatrix_rot = None + #contextMatrix_tx = None + + CreateBlenderObject = True + tempName = read_string(file) + contextObName = tempName + new_chunk.bytes_read += len(tempName)+1 + + #is it a material chunk? + elif (new_chunk.ID == MATERIAL): + +# print("read material") + + #print 'elif (new_chunk.ID == MATERIAL):' + contextMaterial = bpy.data.add_material('Material') +# contextMaterial = bpy.data.materials.new('Material') + + elif (new_chunk.ID == MAT_NAME): + #print 'elif (new_chunk.ID == MAT_NAME):' + material_name = read_string(file) + +# print("material name", material_name) + + #plus one for the null character that ended the string + new_chunk.bytes_read += len(material_name)+1 + + contextMaterial.name = material_name.rstrip() # remove trailing whitespace + MATDICT[material_name]= (contextMaterial.name, contextMaterial) + + elif (new_chunk.ID == MAT_AMBIENT): + #print 'elif (new_chunk.ID == MAT_AMBIENT):' + read_chunk(file, temp_chunk) + if (temp_chunk.ID == MAT_FLOAT_COLOR): + contextMaterial.mirror_color = read_float_color(temp_chunk) +# temp_data = file.read(struct.calcsize('3f')) +# temp_chunk.bytes_read += 12 +# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] + elif (temp_chunk.ID == MAT_24BIT_COLOR): + contextMaterial.mirror_color = read_byte_color(temp_chunk) +# temp_data = file.read(struct.calcsize('3B')) +# temp_chunk.bytes_read += 3 +# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + + elif (new_chunk.ID == MAT_DIFFUSE): + #print 'elif (new_chunk.ID == MAT_DIFFUSE):' + read_chunk(file, temp_chunk) + if (temp_chunk.ID == MAT_FLOAT_COLOR): + contextMaterial.diffuse_color = read_float_color(temp_chunk) +# temp_data = file.read(struct.calcsize('3f')) +# temp_chunk.bytes_read += 12 +# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)] + elif (temp_chunk.ID == MAT_24BIT_COLOR): + contextMaterial.diffuse_color = read_byte_color(temp_chunk) +# temp_data = file.read(struct.calcsize('3B')) +# temp_chunk.bytes_read += 3 +# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb + else: + skip_to_end(file, temp_chunk) + +# print("read material diffuse color", contextMaterial.diffuse_color) + + new_chunk.bytes_read += temp_chunk.bytes_read + + elif (new_chunk.ID == MAT_SPECULAR): + #print 'elif (new_chunk.ID == MAT_SPECULAR):' + read_chunk(file, temp_chunk) + if (temp_chunk.ID == MAT_FLOAT_COLOR): + contextMaterial.specular_color = read_float_color(temp_chunk) +# temp_data = file.read(struct.calcsize('3f')) +# temp_chunk.bytes_read += 12 +# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)] + elif (temp_chunk.ID == MAT_24BIT_COLOR): + contextMaterial.specular_color = read_byte_color(temp_chunk) +# temp_data = file.read(struct.calcsize('3B')) +# temp_chunk.bytes_read += 3 +# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb + else: + skip_to_end(file, temp_chunk) + new_chunk.bytes_read += temp_chunk.bytes_read + + elif (new_chunk.ID == MAT_TEXTURE_MAP): + read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR") +# #print 'elif (new_chunk.ID==MAT_TEXTURE_MAP):' +# new_texture= bpy.data.textures.new('Diffuse') +# new_texture.setType('Image') +# img = None +# while (new_chunk.bytes_read BOUNDS_3DS[i + 3]: + BOUNDS_3DS[i + 3]= v[i] # min + + # Get the max axis x/y/z + max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2]) + # print max_axis + if max_axis < 1 << 30: # Should never be false but just make sure. + + # Get a new scale factor if set as an option + SCALE = 1.0 + while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS: + SCALE/=10 + + # SCALE Matrix + SCALE_MAT = Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1]) +# SCALE_MAT = Blender.Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1]) + + for ob in importedObjects: + ob.setMatrix(ob.matrixWorld * SCALE_MAT) + + # Done constraining to bounds. + + # Select all new objects. + print('finished importing: "%s" in %.4f sec.' % (filename, (time.clock()-time1))) +# print('finished importing: "%s" in %.4f sec.' % (filename, (Blender.sys.time()-time1))) + file.close() +# Blender.Window.WaitCursor(0) + + +DEBUG = False +# if __name__=='__main__' and not DEBUG: +# if calcsize == None: +# Blender.Draw.PupMenu('Error%t|a full python installation not found') +# else: +# Blender.Window.FileSelector(load_3ds, 'Import 3DS', '*.3ds') + +# For testing compatibility +#load_3ds('/metavr/convert/vehicle/truck_002/TruckTanker1.3DS', False) +#load_3ds('/metavr/archive/convert/old/arranged_3ds_to_hpx-2/only-need-engine-trains/Engine2.3DS', False) +''' + +else: + import os + # DEBUG ONLY + TIME = Blender.sys.time() + import os + print 'Searching for files' + os.system('find /metavr/ -iname "*.3ds" > /tmp/temp3ds_list') + # os.system('find /storage/ -iname "*.3ds" > /tmp/temp3ds_list') + print '...Done' + file = open('/tmp/temp3ds_list', 'r') + lines = file.readlines() + file.close() + # sort by filesize for faster testing + lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines] + lines_size.sort() + lines = [f[1] for f in lines_size] + + + def between(v,a,b): + if v <= max(a,b) and v >= min(a,b): + return True + return False + + for i, _3ds in enumerate(lines): + if between(i, 650,800): + #_3ds= _3ds[:-1] + print 'Importing', _3ds, '\nNUMBER', i, 'of', len(lines) + _3ds_file= _3ds.split('/')[-1].split('\\')[-1] + newScn = Blender.Scene.New(_3ds_file) + newScn.makeCurrent() + load_3ds(_3ds, False) + + print 'TOTAL TIME: %.6f' % (Blender.sys.time() - TIME) + +''' + +class IMPORT_OT_3ds(bpy.types.Operator): + ''' + 3DS Importer + ''' + __idname__ = "import.3ds" + __label__ = 'Import 3DS' + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for importing the 3DS file", maxlen= 1024, default= ""), + +# bpy.props.FloatProperty(attr="size_constraint", name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0), +# bpy.props.BoolProperty(attr="search_images", name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True), +# bpy.props.BoolProperty(attr="apply_matrix", name="Transform Fix", description="Workaround for object transformations importing incorrectly", default=False), + ] + + def execute(self, context): + load_3ds(self.path, context, 0.0, False, False) + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + ''' + def poll(self, context): + print("Poll") + return context.active_object != None''' + +bpy.ops.add(IMPORT_OT_3ds) + +# NOTES: +# why add 1 extra vertex? and remove it when done? +# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time) diff --git a/release/scripts/io/import_obj.py b/release/scripts/io/import_obj.py new file mode 100644 index 00000000000..a762005ae7d --- /dev/null +++ b/release/scripts/io/import_obj.py @@ -0,0 +1,1638 @@ +#!BPY + +""" +Name: 'Wavefront (.obj)...' +Blender: 249 +Group: 'Import' +Tooltip: 'Load a Wavefront OBJ File, Shift: batch import all dir.' +""" + +__author__= "Campbell Barton", "Jiri Hnidek", "Paolo Ciccone" +__url__= ['http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj', 'blender.org', 'blenderartists.org'] +__version__= "2.11" + +__bpydoc__= """\ +This script imports a Wavefront OBJ files to Blender. + +Usage: +Run this script from "File->Import" menu and then load the desired OBJ file. +Note, This loads mesh objects and materials only, nurbs and curves are not supported. +""" + +# ***** BEGIN GPL LICENSE BLOCK ***** +# +# Script copyright (C) Campbell J Barton 2007 +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. +# +# ***** END GPL LICENCE BLOCK ***** +# -------------------------------------------------------------------------- + +import os +import time +import bpy +import Mathutils +import Geometry + +# from Blender import Mesh, Draw, Window, Texture, Material, sys +# # import BPyMesh +# import BPyImage +# import BPyMessages + +# try: import os +# except: os= False + +# Generic path functions +def stripFile(path): + '''Return directory, where the file is''' + lastSlash= max(path.rfind('\\'), path.rfind('/')) + if lastSlash != -1: + path= path[:lastSlash] + return '%s%s' % (path, os.sep) +# return '%s%s' % (path, sys.sep) + +def stripPath(path): + '''Strips the slashes from the back of a string''' + return path.split('/')[-1].split('\\')[-1] + +def stripExt(name): # name is a string + '''Strips the prefix off the name before writing''' + index= name.rfind('.') + if index != -1: + return name[ : index ] + else: + return name +# end path funcs + +def unpack_list(list_of_tuples): + l = [] + for t in list_of_tuples: + l.extend(t) + return l + +# same as above except that it adds 0 for triangle faces +def unpack_face_list(list_of_tuples): + l = [] + for t in list_of_tuples: + face = [i for i in t] + + if len(face) != 3 and len(face) != 4: + raise RuntimeError("{0} vertices in face.".format(len(face))) + + # rotate indices if the 4th is 0 + if len(face) == 4 and face[3] == 0: + face = [face[3], face[0], face[1], face[2]] + + if len(face) == 3: + face.append(0) + + l.extend(face) + + return l + +def BPyMesh_ngon(from_data, indices, PREF_FIX_LOOPS= True): + ''' + Takes a polyline of indices (fgon) + and returns a list of face indicie lists. + Designed to be used for importers that need indices for an fgon to create from existing verts. + + from_data: either a mesh, or a list/tuple of vectors. + indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given. + PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly. + ''' + + if not set: # Need sets for this, otherwise do a normal fill. + PREF_FIX_LOOPS= False + + Vector= Mathutils.Vector + if not indices: + return [] + + # return [] + def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6) + def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length + + def vert_treplet(v, i): + return v, rvec(v), i, mlen(v) + + def ed_key_mlen(v1, v2): + if v1[3] > v2[3]: + return v2[1], v1[1] + else: + return v1[1], v2[1] + + + if not PREF_FIX_LOOPS: + ''' + Normal single concave loop filling + ''' + if type(from_data) in (tuple, list): + verts= [Vector(from_data[i]) for ii, i in enumerate(indices)] + else: + verts= [from_data.verts[i].co for ii, i in enumerate(indices)] + + for i in range(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))): + if verts[i][1]==verts[i-1][0]: + verts.pop(i-1) + + fill= Geometry.PolyFill([verts]) + + else: + ''' + Seperate this loop into multiple loops be finding edges that are used twice + This is used by lightwave LWO files a lot + ''' + + if type(from_data) in (tuple, list): + verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)] + else: + verts= [vert_treplet(from_data.verts[i].co, ii) for ii, i in enumerate(indices)] + + edges= [(i, i-1) for i in range(len(verts))] + if edges: + edges[0]= (0,len(verts)-1) + + if not verts: + return [] + + + edges_used= set() + edges_doubles= set() + # We need to check if any edges are used twice location based. + for ed in edges: + edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]]) + if edkey in edges_used: + edges_doubles.add(edkey) + else: + edges_used.add(edkey) + + # Store a list of unconnected loop segments split by double edges. + # will join later + loop_segments= [] + + v_prev= verts[0] + context_loop= [v_prev] + loop_segments= [context_loop] + + for v in verts: + if v!=v_prev: + # Are we crossing an edge we removed? + if ed_key_mlen(v, v_prev) in edges_doubles: + context_loop= [v] + loop_segments.append(context_loop) + else: + if context_loop and context_loop[-1][1]==v[1]: + #raise "as" + pass + else: + context_loop.append(v) + + v_prev= v + # Now join loop segments + + def join_seg(s1,s2): + if s2[-1][1]==s1[0][1]: # + s1,s2= s2,s1 + elif s1[-1][1]==s2[0][1]: + pass + else: + return False + + # If were stuill here s1 and s2 are 2 segments in the same polyline + s1.pop() # remove the last vert from s1 + s1.extend(s2) # add segment 2 to segment 1 + + if s1[0][1]==s1[-1][1]: # remove endpoints double + s1.pop() + + s2[:]= [] # Empty this segment s2 so we dont use it again. + return True + + joining_segments= True + while joining_segments: + joining_segments= False + segcount= len(loop_segments) + + for j in range(segcount-1, -1, -1): #reversed(range(segcount)): + seg_j= loop_segments[j] + if seg_j: + for k in range(j-1, -1, -1): # reversed(range(j)): + if not seg_j: + break + seg_k= loop_segments[k] + + if seg_k and join_seg(seg_j, seg_k): + joining_segments= True + + loop_list= loop_segments + + for verts in loop_list: + while verts and verts[0][1]==verts[-1][1]: + verts.pop() + + loop_list= [verts for verts in loop_list if len(verts)>2] + # DONE DEALING WITH LOOP FIXING + + + # vert mapping + vert_map= [None]*len(indices) + ii=0 + for verts in loop_list: + if len(verts)>2: + for i, vert in enumerate(verts): + vert_map[i+ii]= vert[2] + ii+=len(verts) + + fill= Geometry.PolyFill([ [v[0] for v in loop] for loop in loop_list ]) + #draw_loops(loop_list) + #raise 'done loop' + # map to original indicies + fill= [[vert_map[i] for i in reversed(f)] for f in fill] + + + if not fill: + print('Warning Cannot scanfill, fallback on a triangle fan.') + fill= [ [0, i-1, i] for i in range(2, len(indices)) ] + else: + # Use real scanfill. + # See if its flipped the wrong way. + flip= None + for fi in fill: + if flip != None: + break + for i, vi in enumerate(fi): + if vi==0 and fi[i-1]==1: + flip= False + break + elif vi==1 and fi[i-1]==0: + flip= True + break + + if not flip: + for i, fi in enumerate(fill): + fill[i]= tuple([ii for ii in reversed(fi)]) + + return fill + +def line_value(line_split): + ''' + Returns 1 string represneting the value for this line + None will be returned if theres only 1 word + ''' + length= len(line_split) + if length == 1: + return None + + elif length == 2: + return line_split[1] + + elif length > 2: + return ' '.join( line_split[1:] ) + +# limited replacement for BPyImage.comprehensiveImageLoad +def load_image(imagepath, dirname): + + if os.path.exists(imagepath): + return bpy.data.add_image(imagepath) + + variants = [os.path.join(dirname, imagepath), os.path.join(dirname, os.path.basename(imagepath))] + + for path in variants: + if os.path.exists(path): + return bpy.data.add_image(path) + else: + print(path, "doesn't exist") + + # TODO comprehensiveImageLoad also searched in bpy.config.textureDir + return None + +def obj_image_load(imagepath, DIR, IMAGE_SEARCH): + + if '_' in imagepath: + image= load_image(imagepath.replace('_', ' '), DIR) + if image: return image + + return load_image(imagepath, DIR) + +# def obj_image_load(imagepath, DIR, IMAGE_SEARCH): +# ''' +# Mainly uses comprehensiveImageLoad +# but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores. +# ''' + +# if '_' in imagepath: +# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) +# if image: return image +# # Did the exporter rename the image? +# image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH) +# if image: return image + +# # Return an image, placeholder if it dosnt exist +# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= IMAGE_SEARCH) +# return image + + +def create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH): + ''' + Create all the used materials in this obj, + assign colors and images to the materials from all referenced material libs + ''' + DIR= stripFile(filepath) + + #==================================================================================# + # This function sets textures defined in .mtl file # + #==================================================================================# + def load_material_image(blender_material, context_material_name, imagepath, type): + + texture= bpy.data.add_texture(type) + texture.type= 'IMAGE' +# texture= bpy.data.textures.new(type) +# texture.setType('Image') + + # Absolute path - c:\.. etc would work here + image= obj_image_load(imagepath, DIR, IMAGE_SEARCH) + has_data = image.has_data if image else False + + if image: + texture.image = image + + # Adds textures for materials (rendering) + if type == 'Kd': + if has_data and image.depth == 32: + # Image has alpha + + # XXX bitmask won't work? + blender_material.add_texture(texture, "UV", ("COLOR", "ALPHA")) + texture.mipmap = True + texture.interpolation = True + texture.use_alpha = True + blender_material.z_transparency = True + blender_material.alpha = 0.0 + +# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA) +# texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha') +# blender_material.mode |= Material.Modes.ZTRANSP +# blender_material.alpha = 0.0 + else: + blender_material.add_texture(texture, "UV", "COLOR") +# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL) + + # adds textures to faces (Textured/Alt-Z mode) + # Only apply the diffuse texture to the face if the image has not been set with the inline usemat func. + unique_material_images[context_material_name]= image, has_data # set the texface image + + elif type == 'Ka': + blender_material.add_texture(texture, "UV", "AMBIENT") +# blender_material.setTexture(1, texture, Texture.TexCo.UV, Texture.MapTo.CMIR) # TODO- Add AMB to BPY API + + elif type == 'Ks': + blender_material.add_texture(texture, "UV", "SPECULARITY") +# blender_material.setTexture(2, texture, Texture.TexCo.UV, Texture.MapTo.SPEC) + + elif type == 'Bump': + blender_material.add_texture(texture, "UV", "NORMAL") +# blender_material.setTexture(3, texture, Texture.TexCo.UV, Texture.MapTo.NOR) + elif type == 'D': + blender_material.add_texture(texture, "UV", "ALPHA") + blender_material.z_transparency = True + blender_material.alpha = 0.0 +# blender_material.setTexture(4, texture, Texture.TexCo.UV, Texture.MapTo.ALPHA) +# blender_material.mode |= Material.Modes.ZTRANSP +# blender_material.alpha = 0.0 + # Todo, unset deffuse material alpha if it has an alpha channel + + elif type == 'refl': + blender_material.add_texture(texture, "UV", "REFLECTION") +# blender_material.setTexture(5, texture, Texture.TexCo.UV, Texture.MapTo.REF) + + + # Add an MTL with the same name as the obj if no MTLs are spesified. + temp_mtl= stripExt(stripPath(filepath))+ '.mtl' + + if os.path.exists(DIR + temp_mtl) and temp_mtl not in material_libs: +# if sys.exists(DIR + temp_mtl) and temp_mtl not in material_libs: + material_libs.append( temp_mtl ) + del temp_mtl + + #Create new materials + for name in unique_materials: # .keys() + if name != None: + unique_materials[name]= bpy.data.add_material(name) +# unique_materials[name]= bpy.data.materials.new(name) + unique_material_images[name]= None, False # assign None to all material images to start with, add to later. + + unique_materials[None]= None + unique_material_images[None]= None, False + + for libname in material_libs: + mtlpath= DIR + libname + if not os.path.exists(mtlpath): +# if not sys.exists(mtlpath): + #print '\tError Missing MTL: "%s"' % mtlpath + pass + else: + #print '\t\tloading mtl: "%s"' % mtlpath + context_material= None + mtl= open(mtlpath, 'rU') + for line in mtl: #.xreadlines(): + if line.startswith('newmtl'): + context_material_name= line_value(line.split()) + if context_material_name in unique_materials: + context_material = unique_materials[ context_material_name ] + else: + context_material = None + + elif context_material: + # we need to make a material to assign properties to it. + line_split= line.split() + line_lower= line.lower().lstrip() + if line_lower.startswith('ka'): + context_material.mirror_color = (float(line_split[1]), float(line_split[2]), float(line_split[3])) +# context_material.setMirCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) + elif line_lower.startswith('kd'): + context_material.diffuse_color = (float(line_split[1]), float(line_split[2]), float(line_split[3])) +# context_material.setRGBCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) + elif line_lower.startswith('ks'): + context_material.specular_color = (float(line_split[1]), float(line_split[2]), float(line_split[3])) +# context_material.setSpecCol((float(line_split[1]), float(line_split[2]), float(line_split[3]))) + elif line_lower.startswith('ns'): + context_material.specular_hardness = int((float(line_split[1])*0.51)) +# context_material.setHardness( int((float(line_split[1])*0.51)) ) + elif line_lower.startswith('ni'): # Refraction index + context_material.ior = max(1, min(float(line_split[1]), 3)) +# context_material.setIOR( max(1, min(float(line_split[1]), 3))) # Between 1 and 3 + elif line_lower.startswith('d') or line_lower.startswith('tr'): + context_material.alpha = float(line_split[1]) +# context_material.setAlpha(float(line_split[1])) + elif line_lower.startswith('map_ka'): + img_filepath= line_value(line.split()) + if img_filepath: + load_material_image(context_material, context_material_name, img_filepath, 'Ka') + elif line_lower.startswith('map_ks'): + img_filepath= line_value(line.split()) + if img_filepath: + load_material_image(context_material, context_material_name, img_filepath, 'Ks') + elif line_lower.startswith('map_kd'): + img_filepath= line_value(line.split()) + if img_filepath: + load_material_image(context_material, context_material_name, img_filepath, 'Kd') + elif line_lower.startswith('map_bump'): + img_filepath= line_value(line.split()) + if img_filepath: + load_material_image(context_material, context_material_name, img_filepath, 'Bump') + elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'): # Alpha map - Dissolve + img_filepath= line_value(line.split()) + if img_filepath: + load_material_image(context_material, context_material_name, img_filepath, 'D') + + elif line_lower.startswith('refl'): # Reflectionmap + img_filepath= line_value(line.split()) + if img_filepath: + load_material_image(context_material, context_material_name, img_filepath, 'refl') + mtl.close() + + + + +def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS): + ''' + Takes vert_loc and faces, and seperates into multiple sets of + (verts_loc, faces, unique_materials, dataname) + This is done so objects do not overload the 16 material limit. + ''' + + filename = stripExt(stripPath(filepath)) + + if not SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS: + # use the filename for the object name since we arnt chopping up the mesh. + return [(verts_loc, faces, unique_materials, filename)] + + + def key_to_name(key): + # if the key is a tuple, join it to make a string + if type(key) == tuple: + return '%s_%s' % key + elif not key: + return filename # assume its a string. make sure this is true if the splitting code is changed + else: + return key + + # Return a key that makes the faces unique. + if SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS: + def face_key(face): + return face[4] # object + + elif not SPLIT_OB_OR_GROUP and SPLIT_MATERIALS: + def face_key(face): + return face[2] # material + + else: # Both + def face_key(face): + return face[4], face[2] # object,material + + + face_split_dict= {} + + oldkey= -1 # initialize to a value that will never match the key + + for face in faces: + + key= face_key(face) + + if oldkey != key: + # Check the key has changed. + try: + verts_split, faces_split, unique_materials_split, vert_remap= face_split_dict[key] + except KeyError: + faces_split= [] + verts_split= [] + unique_materials_split= {} + vert_remap= [-1]*len(verts_loc) + + face_split_dict[key]= (verts_split, faces_split, unique_materials_split, vert_remap) + + oldkey= key + + face_vert_loc_indicies= face[0] + + # Remap verts to new vert list and add where needed + for enum, i in enumerate(face_vert_loc_indicies): + if vert_remap[i] == -1: + new_index= len(verts_split) + vert_remap[i]= new_index # set the new remapped index so we only add once and can reference next time. + face_vert_loc_indicies[enum] = new_index # remap to the local index + verts_split.append( verts_loc[i] ) # add the vert to the local verts + + else: + face_vert_loc_indicies[enum] = vert_remap[i] # remap to the local index + + matname= face[2] + if matname and matname not in unique_materials_split: + unique_materials_split[matname] = unique_materials[matname] + + faces_split.append(face) + + + # remove one of the itemas and reorder + return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())] + + +def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname): + ''' + Takes all the data gathered and generates a mesh, adding the new object to new_objects + deals with fgons, sharp edges and assigning materials + ''' + if not has_ngons: + CREATE_FGONS= False + + if unique_smooth_groups: + sharp_edges= {} + smooth_group_users= dict([ (context_smooth_group, {}) for context_smooth_group in list(unique_smooth_groups.keys()) ]) + context_smooth_group_old= -1 + + # Split fgons into tri's + fgon_edges= {} # Used for storing fgon keys + if CREATE_EDGES: + edges= [] + + context_object= None + + # reverse loop through face indicies + for f_idx in range(len(faces)-1, -1, -1): + + face_vert_loc_indicies,\ + face_vert_tex_indicies,\ + context_material,\ + context_smooth_group,\ + context_object= faces[f_idx] + + len_face_vert_loc_indicies = len(face_vert_loc_indicies) + + if len_face_vert_loc_indicies==1: + faces.pop(f_idx)# cant add single vert faces + + elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines + if CREATE_EDGES: + # generators are better in python 2.4+ but can't be used in 2.3 + # edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) ) + edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in range(len_face_vert_loc_indicies-1)] ) + + faces.pop(f_idx) + else: + + # Smooth Group + if unique_smooth_groups and context_smooth_group: + # Is a part of of a smooth group and is a face + if context_smooth_group_old is not context_smooth_group: + edge_dict= smooth_group_users[context_smooth_group] + context_smooth_group_old= context_smooth_group + + for i in range(len_face_vert_loc_indicies): + i1= face_vert_loc_indicies[i] + i2= face_vert_loc_indicies[i-1] + if i1>i2: i1,i2= i2,i1 + + try: + edge_dict[i1,i2]+= 1 + except KeyError: + edge_dict[i1,i2]= 1 + + # FGons into triangles + if has_ngons and len_face_vert_loc_indicies > 4: + + ngon_face_indices= BPyMesh_ngon(verts_loc, face_vert_loc_indicies) + faces.extend(\ + [(\ + [face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\ + [face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\ + context_material,\ + context_smooth_group,\ + context_object)\ + for ngon in ngon_face_indices]\ + ) + + # edges to make fgons + if CREATE_FGONS: + edge_users= {} + for ngon in ngon_face_indices: + for i in (0,1,2): + i1= face_vert_loc_indicies[ngon[i ]] + i2= face_vert_loc_indicies[ngon[i-1]] + if i1>i2: i1,i2= i2,i1 + + try: + edge_users[i1,i2]+=1 + except KeyError: + edge_users[i1,i2]= 1 + + for key, users in edge_users.items(): + if users>1: + fgon_edges[key]= None + + # remove all after 3, means we dont have to pop this one. + faces.pop(f_idx) + + + # Build sharp edges + if unique_smooth_groups: + for edge_dict in list(smooth_group_users.values()): + for key, users in list(edge_dict.items()): + if users==1: # This edge is on the boundry of a group + sharp_edges[key]= None + + + # map the material names to an index + material_mapping= dict([(name, i) for i, name in enumerate(unique_materials)]) # enumerate over unique_materials keys() + + materials= [None] * len(unique_materials) + + for name, index in list(material_mapping.items()): + materials[index]= unique_materials[name] + + me= bpy.data.add_mesh(dataname) +# me= bpy.data.meshes.new(dataname) + + # make sure the list isnt too big + for material in materials[0:16]: + me.add_material(material) +# me.materials= materials[0:16] # make sure the list isnt too big. + #me.verts.extend([(0,0,0)]) # dummy vert + + me.add_geometry(len(verts_loc), 0, len(faces)) + + # verts_loc is a list of (x, y, z) tuples + me.verts.foreach_set("co", unpack_list(verts_loc)) +# me.verts.extend(verts_loc) + + # faces is a list of (vert_indices, texco_indices, ...) tuples + # XXX faces should contain either 3 or 4 verts + # XXX no check for valid face indices + me.faces.foreach_set("verts_raw", unpack_face_list([f[0] for f in faces])) +# face_mapping= me.faces.extend([f[0] for f in faces], indexList=True) + + if verts_tex and me.faces: + me.add_uv_texture() +# me.faceUV= 1 + # TEXMODE= Mesh.FaceModes['TEX'] + + context_material_old= -1 # avoid a dict lookup + mat= 0 # rare case it may be un-initialized. + me_faces= me.faces +# ALPHA= Mesh.FaceTranspModes.ALPHA + + for i, face in enumerate(faces): + if len(face[0]) < 2: + pass #raise "bad face" + elif len(face[0])==2: + if CREATE_EDGES: + edges.append(face[0]) + else: +# face_index_map= face_mapping[i] + + # since we use foreach_set to add faces, all of them are added + if 1: +# if face_index_map!=None: # None means the face wasnt added + + blender_face = me.faces[i] +# blender_face= me_faces[face_index_map] + + face_vert_loc_indicies,\ + face_vert_tex_indicies,\ + context_material,\ + context_smooth_group,\ + context_object= face + + + + if context_smooth_group: + blender_face.smooth= True + + if context_material: + if context_material_old is not context_material: + mat= material_mapping[context_material] + if mat>15: + mat= 15 + context_material_old= context_material + + blender_face.material_index= mat +# blender_face.mat= mat + + + if verts_tex: + + blender_tface= me.uv_textures[0].data[i] + + if context_material: + image, has_data= unique_material_images[context_material] + if image: # Can be none if the material dosnt have an image. + blender_tface.image= image +# blender_face.image= image + if has_data: +# if has_data and image.depth == 32: + blender_tface.transp = 'ALPHA' +# blender_face.transp |= ALPHA + + # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled. + if len(face_vert_loc_indicies)==4: + if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0: + face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1] + else: # length of 3 + if face_vert_loc_indicies[2]==0: + face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0] + # END EEEKADOODLE FIX + + # assign material, uv's and image + blender_tface.uv1= verts_tex[face_vert_tex_indicies[0]] + blender_tface.uv2= verts_tex[face_vert_tex_indicies[1]] + blender_tface.uv3= verts_tex[face_vert_tex_indicies[2]] + + if blender_face.verts[3] != 0: + blender_tface.uv4= verts_tex[face_vert_tex_indicies[3]] + +# for ii, uv in enumerate(blender_face.uv): +# uv.x, uv.y= verts_tex[face_vert_tex_indicies[ii]] + del me_faces +# del ALPHA + + if CREATE_EDGES: + + me.add_geometry(0, len(edges), 0) + + # edges should be a list of (a, b) tuples + me.edges.foreach_set("verts", unpack_list(edges)) +# me_edges.extend( edges ) + +# del me_edges + + # Add edge faces. +# me_edges= me.edges + + def edges_match(e1, e2): + return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0]) + + # XXX slow +# if CREATE_FGONS and fgon_edges: +# for fgon_edge in fgon_edges.keys(): +# for ed in me.edges: +# if edges_match(fgon_edge, ed.verts): +# ed.fgon = True + +# if CREATE_FGONS and fgon_edges: +# FGON= Mesh.EdgeFlags.FGON +# for ed in me.findEdges( fgon_edges.keys() ): +# if ed!=None: +# me_edges[ed].flag |= FGON +# del FGON + + # XXX slow +# if unique_smooth_groups and sharp_edges: +# for sharp_edge in sharp_edges.keys(): +# for ed in me.edges: +# if edges_match(sharp_edge, ed.verts): +# ed.sharp = True + +# if unique_smooth_groups and sharp_edges: +# SHARP= Mesh.EdgeFlags.SHARP +# for ed in me.findEdges( sharp_edges.keys() ): +# if ed!=None: +# me_edges[ed].flag |= SHARP +# del SHARP + + me.update() +# me.calcNormals() + + ob= bpy.data.add_object("MESH", "Mesh") + ob.data= me + scn.add_object(ob) +# ob= scn.objects.new(me) + new_objects.append(ob) + + # Create the vertex groups. No need to have the flag passed here since we test for the + # content of the vertex_groups. If the user selects to NOT have vertex groups saved then + # the following test will never run + for group_name, group_indicies in vertex_groups.items(): + group= ob.add_vertex_group(group_name) +# me.addVertGroup(group_name) + for vertex_index in group_indicies: + ob.add_vertex_to_group(vertex_index, group, 1.0, 'REPLACE') +# me.assignVertsToGroup(group_name, group_indicies, 1.00, Mesh.AssignModes.REPLACE) + + +def create_nurbs(scn, context_nurbs, vert_loc, new_objects): + ''' + Add nurbs object to blender, only support one type at the moment + ''' + deg = context_nurbs.get('deg', (3,)) + curv_range = context_nurbs.get('curv_range', None) + curv_idx = context_nurbs.get('curv_idx', []) + parm_u = context_nurbs.get('parm_u', []) + parm_v = context_nurbs.get('parm_v', []) + name = context_nurbs.get('name', 'ObjNurb') + cstype = context_nurbs.get('cstype', None) + + if cstype == None: + print('\tWarning, cstype not found') + return + if cstype != 'bspline': + print('\tWarning, cstype is not supported (only bspline)') + return + if not curv_idx: + print('\tWarning, curv argument empty or not set') + return + if len(deg) > 1 or parm_v: + print('\tWarning, surfaces not supported') + return + + cu = bpy.data.curves.new(name, 'Curve') + cu.flag |= 1 # 3D curve + + nu = None + for pt in curv_idx: + + pt = vert_loc[pt] + pt = (pt[0], pt[1], pt[2], 1.0) + + if nu == None: + nu = cu.appendNurb(pt) + else: + nu.append(pt) + + nu.orderU = deg[0]+1 + + # get for endpoint flag from the weighting + if curv_range and len(parm_u) > deg[0]+1: + do_endpoints = True + for i in range(deg[0]+1): + + if abs(parm_u[i]-curv_range[0]) > 0.0001: + do_endpoints = False + break + + if abs(parm_u[-(i+1)]-curv_range[1]) > 0.0001: + do_endpoints = False + break + + else: + do_endpoints = False + + if do_endpoints: + nu.flagU |= 2 + + + # close + ''' + do_closed = False + if len(parm_u) > deg[0]+1: + for i in xrange(deg[0]+1): + #print curv_idx[i], curv_idx[-(i+1)] + + if curv_idx[i]==curv_idx[-(i+1)]: + do_closed = True + break + + if do_closed: + nu.flagU |= 1 + ''' + + ob = scn.objects.new(cu) + new_objects.append(ob) + + +def strip_slash(line_split): + if line_split[-1][-1]== '\\': + if len(line_split[-1])==1: + line_split.pop() # remove the \ item + else: + line_split[-1]= line_split[-1][:-1] # remove the \ from the end last number + return True + return False + + + +def get_float_func(filepath): + ''' + find the float function for this obj file + - weather to replace commas or not + ''' + file= open(filepath, 'rU') + for line in file: #.xreadlines(): + line = line.lstrip() + if line.startswith('v'): # vn vt v + if ',' in line: + return lambda f: float(f.replace(',', '.')) + elif '.' in line: + return float + + # incase all vert values were ints + return float + +def load_obj(filepath, + context, + CLAMP_SIZE= 0.0, + CREATE_FGONS= True, + CREATE_SMOOTH_GROUPS= True, + CREATE_EDGES= True, + SPLIT_OBJECTS= True, + SPLIT_GROUPS= True, + SPLIT_MATERIALS= True, + ROTATE_X90= True, + IMAGE_SEARCH=True, + POLYGROUPS=False): + ''' + Called by the user interface or another script. + load_obj(path) - should give acceptable results. + This function passes the file and sends the data off + to be split into objects and then converted into mesh objects + ''' + print('\nimporting obj "%s"' % filepath) + + if SPLIT_OBJECTS or SPLIT_GROUPS or SPLIT_MATERIALS: + POLYGROUPS = False + + time_main= time.time() +# time_main= sys.time() + + verts_loc= [] + verts_tex= [] + faces= [] # tuples of the faces + material_libs= [] # filanems to material libs this uses + vertex_groups = {} # when POLYGROUPS is true + + # Get the string to float conversion func for this file- is 'float' for almost all files. + float_func= get_float_func(filepath) + + # Context variables + context_material= None + context_smooth_group= None + context_object= None + context_vgroup = None + + # Nurbs + context_nurbs = {} + nurbs = [] + context_parm = '' # used by nurbs too but could be used elsewhere + + has_ngons= False + # has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0 + + # Until we can use sets + unique_materials= {} + unique_material_images= {} + unique_smooth_groups= {} + # unique_obects= {} - no use for this variable since the objects are stored in the face. + + # when there are faces that end with \ + # it means they are multiline- + # since we use xreadline we cant skip to the next line + # so we need to know weather + context_multi_line= '' + + print('\tparsing obj file "%s"...' % filepath) + time_sub= time.time() +# time_sub= sys.time() + + file= open(filepath, 'rU') + for line in file: #.xreadlines(): + line = line.lstrip() # rare cases there is white space at the start of the line + + if line.startswith('v '): + line_split= line.split() + # rotate X90: (x,-z,y) + verts_loc.append( (float_func(line_split[1]), -float_func(line_split[3]), float_func(line_split[2])) ) + + elif line.startswith('vn '): + pass + + elif line.startswith('vt '): + line_split= line.split() + verts_tex.append( (float_func(line_split[1]), float_func(line_split[2])) ) + + # Handel faces lines (as faces) and the second+ lines of fa multiline face here + # use 'f' not 'f ' because some objs (very rare have 'fo ' for faces) + elif line.startswith('f') or context_multi_line == 'f': + + if context_multi_line: + # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face + line_split= line.split() + + else: + line_split= line[2:].split() + face_vert_loc_indicies= [] + face_vert_tex_indicies= [] + + # Instance a face + faces.append((\ + face_vert_loc_indicies,\ + face_vert_tex_indicies,\ + context_material,\ + context_smooth_group,\ + context_object\ + )) + + if strip_slash(line_split): + context_multi_line = 'f' + else: + context_multi_line = '' + + for v in line_split: + obj_vert= v.split('/') + + vert_loc_index= int(obj_vert[0])-1 + # Add the vertex to the current group + # *warning*, this wont work for files that have groups defined around verts + if POLYGROUPS and context_vgroup: + vertex_groups[context_vgroup].append(vert_loc_index) + + # Make relative negative vert indicies absolute + if vert_loc_index < 0: + vert_loc_index= len(verts_loc) + vert_loc_index + 1 + + face_vert_loc_indicies.append(vert_loc_index) + + if len(obj_vert)>1 and obj_vert[1]: + # formatting for faces with normals and textures us + # loc_index/tex_index/nor_index + + vert_tex_index= int(obj_vert[1])-1 + # Make relative negative vert indicies absolute + if vert_tex_index < 0: + vert_tex_index= len(verts_tex) + vert_tex_index + 1 + + face_vert_tex_indicies.append(vert_tex_index) + else: + # dummy + face_vert_tex_indicies.append(0) + + if len(face_vert_loc_indicies) > 4: + has_ngons= True + + elif CREATE_EDGES and (line.startswith('l ') or context_multi_line == 'l'): + # very similar to the face load function above with some parts removed + + if context_multi_line: + # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face + line_split= line.split() + + else: + line_split= line[2:].split() + face_vert_loc_indicies= [] + face_vert_tex_indicies= [] + + # Instance a face + faces.append((\ + face_vert_loc_indicies,\ + face_vert_tex_indicies,\ + context_material,\ + context_smooth_group,\ + context_object\ + )) + + if strip_slash(line_split): + context_multi_line = 'l' + else: + context_multi_line = '' + + isline= line.startswith('l') + + for v in line_split: + vert_loc_index= int(v)-1 + + # Make relative negative vert indicies absolute + if vert_loc_index < 0: + vert_loc_index= len(verts_loc) + vert_loc_index + 1 + + face_vert_loc_indicies.append(vert_loc_index) + + elif line.startswith('s'): + if CREATE_SMOOTH_GROUPS: + context_smooth_group= line_value(line.split()) + if context_smooth_group=='off': + context_smooth_group= None + elif context_smooth_group: # is not None + unique_smooth_groups[context_smooth_group]= None + + elif line.startswith('o'): + if SPLIT_OBJECTS: + context_object= line_value(line.split()) + # unique_obects[context_object]= None + + elif line.startswith('g'): + if SPLIT_GROUPS: + context_object= line_value(line.split()) + # print 'context_object', context_object + # unique_obects[context_object]= None + elif POLYGROUPS: + context_vgroup = line_value(line.split()) + if context_vgroup and context_vgroup != '(null)': + vertex_groups.setdefault(context_vgroup, []) + else: + context_vgroup = None # dont assign a vgroup + + elif line.startswith('usemtl'): + context_material= line_value(line.split()) + unique_materials[context_material]= None + elif line.startswith('mtllib'): # usemap or usemat + material_libs.extend( line.split()[1:] ) # can have multiple mtllib filenames per line + + + # Nurbs support + elif line.startswith('cstype '): + context_nurbs['cstype']= line_value(line.split()) # 'rat bspline' / 'bspline' + elif line.startswith('curv ') or context_multi_line == 'curv': + line_split= line.split() + + curv_idx = context_nurbs['curv_idx'] = context_nurbs.get('curv_idx', []) # incase were multiline + + if not context_multi_line: + context_nurbs['curv_range'] = float_func(line_split[1]), float_func(line_split[2]) + line_split[0:3] = [] # remove first 3 items + + if strip_slash(line_split): + context_multi_line = 'curv' + else: + context_multi_line = '' + + + for i in line_split: + vert_loc_index = int(i)-1 + + if vert_loc_index < 0: + vert_loc_index= len(verts_loc) + vert_loc_index + 1 + + curv_idx.append(vert_loc_index) + + elif line.startswith('parm') or context_multi_line == 'parm': + line_split= line.split() + + if context_multi_line: + context_multi_line = '' + else: + context_parm = line_split[1] + line_split[0:2] = [] # remove first 2 + + if strip_slash(line_split): + context_multi_line = 'parm' + else: + context_multi_line = '' + + if context_parm.lower() == 'u': + context_nurbs.setdefault('parm_u', []).extend( [float_func(f) for f in line_split] ) + elif context_parm.lower() == 'v': # surfaces not suported yet + context_nurbs.setdefault('parm_v', []).extend( [float_func(f) for f in line_split] ) + # else: # may want to support other parm's ? + + elif line.startswith('deg '): + context_nurbs['deg']= [int(i) for i in line.split()[1:]] + elif line.startswith('end'): + # Add the nurbs curve + if context_object: + context_nurbs['name'] = context_object + nurbs.append(context_nurbs) + context_nurbs = {} + context_parm = '' + + ''' # How to use usemap? depricated? + elif line.startswith('usema'): # usemap or usemat + context_image= line_value(line.split()) + ''' + + file.close() + time_new= time.time() +# time_new= sys.time() + print('%.4f sec' % (time_new-time_sub)) + time_sub= time_new + + + print('\tloading materials and images...') + create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH) + + time_new= time.time() +# time_new= sys.time() + print('%.4f sec' % (time_new-time_sub)) + time_sub= time_new + + if not ROTATE_X90: + verts_loc[:] = [(v[0], v[2], -v[1]) for v in verts_loc] + + # deselect all +# if context.selected_objects: +# bpy.ops.OBJECT_OT_select_all_toggle() + + scene = context.scene +# scn = bpy.data.scenes.active +# scn.objects.selected = [] + new_objects= [] # put new objects here + + print('\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % ( len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups) )) + # Split the mesh by objects/materials, may + if SPLIT_OBJECTS or SPLIT_GROUPS: SPLIT_OB_OR_GROUP = True + else: SPLIT_OB_OR_GROUP = False + + for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS): + # Create meshes from the data, warning 'vertex_groups' wont support splitting + create_mesh(scene, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname) + + # nurbs support +# for context_nurbs in nurbs: +# create_nurbs(scn, context_nurbs, verts_loc, new_objects) + + + axis_min= [ 1000000000]*3 + axis_max= [-1000000000]*3 + +# if CLAMP_SIZE: +# # Get all object bounds +# for ob in new_objects: +# for v in ob.getBoundBox(): +# for axis, value in enumerate(v): +# if axis_min[axis] > value: axis_min[axis]= value +# if axis_max[axis] < value: axis_max[axis]= value + +# # Scale objects +# max_axis= max(axis_max[0]-axis_min[0], axis_max[1]-axis_min[1], axis_max[2]-axis_min[2]) +# scale= 1.0 + +# while CLAMP_SIZE < max_axis * scale: +# scale= scale/10.0 + +# for ob in new_objects: +# ob.setSize(scale, scale, scale) + + # Better rotate the vert locations + #if not ROTATE_X90: + # for ob in new_objects: + # ob.RotX = -1.570796326794896558 + + time_new= time.time() +# time_new= sys.time() + + print('%.4f sec' % (time_new-time_sub)) + print('finished importing: "%s" in %.4f sec.' % (filepath, (time_new-time_main))) + + +DEBUG= True + + +def load_obj_ui(filepath, BATCH_LOAD= False): + if BPyMessages.Error_NoFile(filepath): + return + + global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90 + + CREATE_SMOOTH_GROUPS= Draw.Create(0) + CREATE_FGONS= Draw.Create(1) + CREATE_EDGES= Draw.Create(1) + SPLIT_OBJECTS= Draw.Create(0) + SPLIT_GROUPS= Draw.Create(0) + SPLIT_MATERIALS= Draw.Create(0) + CLAMP_SIZE= Draw.Create(10.0) + IMAGE_SEARCH= Draw.Create(1) + POLYGROUPS= Draw.Create(0) + KEEP_VERT_ORDER= Draw.Create(1) + ROTATE_X90= Draw.Create(1) + + + # Get USER Options + # Note, Works but not pretty, instead use a more complicated GUI + ''' + pup_block= [\ + 'Import...',\ + ('Smooth Groups', CREATE_SMOOTH_GROUPS, 'Surround smooth groups by sharp edges'),\ + ('Create FGons', CREATE_FGONS, 'Import faces with more then 4 verts as fgons.'),\ + ('Lines', CREATE_EDGES, 'Import lines and faces with 2 verts as edges'),\ + 'Separate objects from obj...',\ + ('Object', SPLIT_OBJECTS, 'Import OBJ Objects into Blender Objects'),\ + ('Group', SPLIT_GROUPS, 'Import OBJ Groups into Blender Objects'),\ + ('Material', SPLIT_MATERIALS, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)'),\ + 'Options...',\ + ('Keep Vert Order', KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\ + ('Clamp Scale:', CLAMP_SIZE, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)'),\ + ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\ + ] + + if not Draw.PupBlock('Import OBJ...', pup_block): + return + + if KEEP_VERT_ORDER.val: + SPLIT_OBJECTS.val = False + SPLIT_GROUPS.val = False + SPLIT_MATERIALS.val = False + ''' + + + + # BEGIN ALTERNATIVE UI ******************* + if True: + + EVENT_NONE = 0 + EVENT_EXIT = 1 + EVENT_REDRAW = 2 + EVENT_IMPORT = 3 + + GLOBALS = {} + GLOBALS['EVENT'] = EVENT_REDRAW + #GLOBALS['MOUSE'] = Window.GetMouseCoords() + GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()] + + def obj_ui_set_event(e,v): + GLOBALS['EVENT'] = e + + def do_split(e,v): + global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS + if SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val: + KEEP_VERT_ORDER.val = 0 + POLYGROUPS.val = 0 + else: + KEEP_VERT_ORDER.val = 1 + + def do_vertorder(e,v): + global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER + if KEEP_VERT_ORDER.val: + SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0 + else: + if not (SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val): + KEEP_VERT_ORDER.val = 1 + + def do_polygroups(e,v): + global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS + if POLYGROUPS.val: + SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0 + + def do_help(e,v): + url = __url__[0] + print('Trying to open web browser with documentation at this address...') + print('\t' + url) + + try: + import webbrowser + webbrowser.open(url) + except: + print('...could not open a browser window.') + + def obj_ui(): + ui_x, ui_y = GLOBALS['MOUSE'] + + # Center based on overall pup size + ui_x -= 165 + ui_y -= 90 + + global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90 + + Draw.Label('Import...', ui_x+9, ui_y+159, 220, 21) + Draw.BeginAlign() + CREATE_SMOOTH_GROUPS = Draw.Toggle('Smooth Groups', EVENT_NONE, ui_x+9, ui_y+139, 110, 20, CREATE_SMOOTH_GROUPS.val, 'Surround smooth groups by sharp edges') + CREATE_FGONS = Draw.Toggle('NGons as FGons', EVENT_NONE, ui_x+119, ui_y+139, 110, 20, CREATE_FGONS.val, 'Import faces with more then 4 verts as fgons') + CREATE_EDGES = Draw.Toggle('Lines as Edges', EVENT_NONE, ui_x+229, ui_y+139, 110, 20, CREATE_EDGES.val, 'Import lines and faces with 2 verts as edges') + Draw.EndAlign() + + Draw.Label('Separate objects by OBJ...', ui_x+9, ui_y+110, 220, 20) + Draw.BeginAlign() + SPLIT_OBJECTS = Draw.Toggle('Object', EVENT_REDRAW, ui_x+9, ui_y+89, 55, 21, SPLIT_OBJECTS.val, 'Import OBJ Objects into Blender Objects', do_split) + SPLIT_GROUPS = Draw.Toggle('Group', EVENT_REDRAW, ui_x+64, ui_y+89, 55, 21, SPLIT_GROUPS.val, 'Import OBJ Groups into Blender Objects', do_split) + SPLIT_MATERIALS = Draw.Toggle('Material', EVENT_REDRAW, ui_x+119, ui_y+89, 60, 21, SPLIT_MATERIALS.val, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)', do_split) + Draw.EndAlign() + + # Only used for user feedback + KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+184, ui_y+89, 113, 21, KEEP_VERT_ORDER.val, 'Keep vert and face order, disables split options, enable for morph targets', do_vertorder) + + ROTATE_X90 = Draw.Toggle('-X90', EVENT_REDRAW, ui_x+302, ui_y+89, 38, 21, ROTATE_X90.val, 'Rotate X 90.') + + Draw.Label('Options...', ui_x+9, ui_y+60, 211, 20) + CLAMP_SIZE = Draw.Number('Clamp Scale: ', EVENT_NONE, ui_x+9, ui_y+39, 130, 21, CLAMP_SIZE.val, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)') + POLYGROUPS = Draw.Toggle('Poly Groups', EVENT_REDRAW, ui_x+144, ui_y+39, 90, 21, POLYGROUPS.val, 'Import OBJ groups as vertex groups.', do_polygroups) + IMAGE_SEARCH = Draw.Toggle('Image Search', EVENT_NONE, ui_x+239, ui_y+39, 100, 21, IMAGE_SEARCH.val, 'Search subdirs for any assosiated images (Warning, may be slow)') + Draw.BeginAlign() + Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 21, 'Load the wiki page for this script', do_help) + Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 21, '', obj_ui_set_event) + Draw.PushButton('Import', EVENT_IMPORT, ui_x+229, ui_y+9, 110, 21, 'Import with these settings', obj_ui_set_event) + Draw.EndAlign() + + + # hack so the toggle buttons redraw. this is not nice at all + while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_IMPORT): + Draw.UIBlock(obj_ui, 0) + + if GLOBALS['EVENT'] != EVENT_IMPORT: + return + + # END ALTERNATIVE UI ********************* + + + + + + + + Window.WaitCursor(1) + + if BATCH_LOAD: # load the dir + try: + files= [ f for f in os.listdir(filepath) if f.lower().endswith('.obj') ] + except: + Window.WaitCursor(0) + Draw.PupMenu('Error%t|Could not open path ' + filepath) + return + + if not files: + Window.WaitCursor(0) + Draw.PupMenu('Error%t|No files at path ' + filepath) + return + + for f in files: + scn= bpy.data.scenes.new( stripExt(f) ) + scn.makeCurrent() + + load_obj(sys.join(filepath, f),\ + CLAMP_SIZE.val,\ + CREATE_FGONS.val,\ + CREATE_SMOOTH_GROUPS.val,\ + CREATE_EDGES.val,\ + SPLIT_OBJECTS.val,\ + SPLIT_GROUPS.val,\ + SPLIT_MATERIALS.val,\ + ROTATE_X90.val,\ + IMAGE_SEARCH.val,\ + POLYGROUPS.val + ) + + else: # Normal load + load_obj(filepath,\ + CLAMP_SIZE.val,\ + CREATE_FGONS.val,\ + CREATE_SMOOTH_GROUPS.val,\ + CREATE_EDGES.val,\ + SPLIT_OBJECTS.val,\ + SPLIT_GROUPS.val,\ + SPLIT_MATERIALS.val,\ + ROTATE_X90.val,\ + IMAGE_SEARCH.val,\ + POLYGROUPS.val + ) + + Window.WaitCursor(0) + + +def load_obj_ui_batch(file): + load_obj_ui(file, True) + +DEBUG= False + +# if __name__=='__main__' and not DEBUG: +# if os and Window.GetKeyQualifiers() & Window.Qual.SHIFT: +# Window.FileSelector(load_obj_ui_batch, 'Import OBJ Dir', '') +# else: +# Window.FileSelector(load_obj_ui, 'Import a Wavefront OBJ', '*.obj') + + # For testing compatibility +''' +else: + # DEBUG ONLY + TIME= sys.time() + DIR = '/fe/obj' + import os + print 'Searching for files' + def fileList(path): + for dirpath, dirnames, filenames in os.walk(path): + for filename in filenames: + yield os.path.join(dirpath, filename) + + files = [f for f in fileList(DIR) if f.lower().endswith('.obj')] + files.sort() + + for i, obj_file in enumerate(files): + if 0 < i < 20: + print 'Importing', obj_file, '\nNUMBER', i, 'of', len(files) + newScn= bpy.data.scenes.new(os.path.basename(obj_file)) + newScn.makeCurrent() + load_obj(obj_file, False, IMAGE_SEARCH=0) + + print 'TOTAL TIME: %.6f' % (sys.time() - TIME) +''' +#load_obj('/test.obj') +#load_obj('/fe/obj/mba1.obj') + + + +class IMPORT_OT_obj(bpy.types.Operator): + ''' + Operator documentation text, will be used for the operator tooltip and python docs. + ''' + __idname__ = "import.obj" + __label__ = "Import OBJ" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [ + bpy.props.StringProperty(attr="path", name="File Path", description="File path used for importing the OBJ file", maxlen= 1024, default= ""), + + bpy.props.BoolProperty(attr="CREATE_SMOOTH_GROUPS", name="Smooth Groups", description="Surround smooth groups by sharp edges", default= True), + bpy.props.BoolProperty(attr="CREATE_FGONS", name="NGons as FGons", description="Import faces with more then 4 verts as fgons", default= True), + bpy.props.BoolProperty(attr="CREATE_EDGES", name="Lines as Edges", description="Import lines and faces with 2 verts as edge", default= True), + bpy.props.BoolProperty(attr="SPLIT_OBJECTS", name="Object", description="Import OBJ Objects into Blender Objects", default= True), + bpy.props.BoolProperty(attr="SPLIT_GROUPS", name="Group", description="Import OBJ Groups into Blender Objects", default= True), + bpy.props.BoolProperty(attr="SPLIT_MATERIALS", name="Material", description="Import each material into a seperate mesh (Avoids > 16 per mesh error)", default= True), + # old comment: only used for user feedback + # disabled this option because in old code a handler for it disabled SPLIT* params, it's not passed to load_obj + # bpy.props.BoolProperty(attr="KEEP_VERT_ORDER", name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True), + bpy.props.BoolProperty(attr="ROTATE_X90", name="-X90", description="Rotate X 90.", default= True), + bpy.props.FloatProperty(attr="CLAMP_SIZE", name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.01, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0), + bpy.props.BoolProperty(attr="POLYGROUPS", name="Poly Groups", description="Import OBJ groups as vertex groups.", default= True), + bpy.props.BoolProperty(attr="IMAGE_SEARCH", name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default= True), + ] + + ''' + def poll(self, context): + return True ''' + + def execute(self, context): + # print("Selected: " + context.active_object.name) + + load_obj(self.path, + context, + self.CLAMP_SIZE, + self.CREATE_FGONS, + self.CREATE_SMOOTH_GROUPS, + self.CREATE_EDGES, + self.SPLIT_OBJECTS, + self.SPLIT_GROUPS, + self.SPLIT_MATERIALS, + self.ROTATE_X90, + self.IMAGE_SEARCH, + self.POLYGROUPS) + + return ('FINISHED',) + + def invoke(self, context, event): + wm = context.manager + wm.add_fileselect(self.__operator__) + return ('RUNNING_MODAL',) + + +bpy.ops.add(IMPORT_OT_obj) + + +# NOTES (all line numbers refer to 2.4x import_obj.py, not this file) +# check later: line 489 +# can convert now: edge flags, edges: lines 508-528 +# ngon (uses python module BPyMesh): 384-414 +# nurbs: 947- +# NEXT clamp size: get bound box with RNA +# get back to l 140 (here) +# search image in bpy.config.textureDir - load_image +# replaced BPyImage.comprehensiveImageLoad with a simplified version that only checks additional directory specified, but doesn't search dirs recursively (obj_image_load) +# bitmask won't work? - 132 +# uses operator bpy.ops.OBJECT_OT_select_all_toggle() to deselect all (not necessary?) +# uses bpy.sys.time() diff --git a/release/scripts/io/netrender/__init__.py b/release/scripts/io/netrender/__init__.py new file mode 100644 index 00000000000..4a1dd2238e3 --- /dev/null +++ b/release/scripts/io/netrender/__init__.py @@ -0,0 +1,19 @@ +# This directory is a Python package. + +import model +import operators +import client +import slave +import master +import master_html +import utils +import balancing +import ui + +# store temp data in bpy module + +import bpy + +bpy.data.netrender_jobs = [] +bpy.data.netrender_slaves = [] +bpy.data.netrender_blacklist = [] \ No newline at end of file diff --git a/release/scripts/io/netrender/balancing.py b/release/scripts/io/netrender/balancing.py new file mode 100644 index 00000000000..637dd5ff92e --- /dev/null +++ b/release/scripts/io/netrender/balancing.py @@ -0,0 +1,94 @@ +import time + +from netrender.utils import * +import netrender.model + +class RatingRule: + def rate(self, job): + return 0 + +class ExclusionRule: + def test(self, job): + return False + +class PriorityRule: + def test(self, job): + return False + +class Balancer: + def __init__(self): + self.rules = [] + self.priorities = [] + self.exceptions = [] + + def addRule(self, rule): + self.rules.append(rule) + + def addPriority(self, priority): + self.priorities.append(priority) + + def addException(self, exception): + self.exceptions.append(exception) + + def applyRules(self, job): + return sum((rule.rate(job) for rule in self.rules)) + + def applyPriorities(self, job): + for priority in self.priorities: + if priority.test(job): + return True # priorities are first + + return False + + def applyExceptions(self, job): + for exception in self.exceptions: + if exception.test(job): + return True # exceptions are last + + return False + + def sortKey(self, job): + return (1 if self.applyExceptions(job) else 0, # exceptions after + 0 if self.applyPriorities(job) else 1, # priorities first + self.applyRules(job)) + + def balance(self, jobs): + if jobs: + jobs.sort(key=self.sortKey) + return jobs[0] + else: + return None + +# ========================== + +class RatingUsage(RatingRule): + def rate(self, job): + # less usage is better + return job.usage / job.priority + +class NewJobPriority(PriorityRule): + def __init__(self, limit = 1): + self.limit = limit + + def test(self, job): + return job.countFrames(status = DONE) < self.limit + +class MinimumTimeBetweenDispatchPriority(PriorityRule): + def __init__(self, limit = 10): + self.limit = limit + + def test(self, job): + return job.countFrames(status = DISPATCHED) == 0 and (time.time() - job.last_dispatched) / 60 > self.limit + +class ExcludeQueuedEmptyJob(ExclusionRule): + def test(self, job): + return job.status != JOB_QUEUED or job.countFrames(status = QUEUED) == 0 + +class ExcludeSlavesLimit(ExclusionRule): + def __init__(self, count_jobs, count_slaves, limit = 0.75): + self.count_jobs = count_jobs + self.count_slaves = count_slaves + self.limit = limit + + def test(self, job): + return not ( self.count_jobs() == 1 or self.count_slaves() <= 1 or float(job.countSlaves() + 1) / self.count_slaves() <= self.limit ) diff --git a/release/scripts/io/netrender/client.py b/release/scripts/io/netrender/client.py new file mode 100644 index 00000000000..65b2937867f --- /dev/null +++ b/release/scripts/io/netrender/client.py @@ -0,0 +1,203 @@ +import bpy +import sys, os, re +import http, http.client, http.server, urllib +import subprocess, shutil, time, hashlib + +import netrender.slave as slave +import netrender.master as master +from netrender.utils import * + + +def clientSendJob(conn, scene, anim = False, chunks = 5): + netsettings = scene.network_render + job = netrender.model.RenderJob() + + if anim: + for f in range(scene.start_frame, scene.end_frame + 1): + job.addFrame(f) + else: + job.addFrame(scene.current_frame) + + filename = bpy.data.filename + job.addFile(filename) + + job_name = netsettings.job_name + path, name = os.path.split(filename) + if job_name == "[default]": + job_name = name + + ########################### + # LIBRARIES + ########################### + for lib in bpy.data.libraries: + lib_path = lib.filename + + if lib_path.startswith("//"): + lib_path = path + os.sep + lib_path[2:] + + job.addFile(lib_path) + + ########################### + # POINT CACHES + ########################### + + root, ext = os.path.splitext(name) + cache_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that + + if os.path.exists(cache_path): + caches = {} + pattern = re.compile("([a-zA-Z0-9]+)_([0-9]+)_[0-9]+\.bphys") + for cache_file in sorted(os.listdir(cache_path)): + match = pattern.match(cache_file) + + if match: + cache_id = match.groups()[0] + cache_frame = int(match.groups()[1]) + + cache_files = caches.get(cache_id, []) + cache_files.append((cache_frame, cache_file)) + caches[cache_id] = cache_files + + for cache in caches.values(): + cache.sort() + + if len(cache) == 1: + cache_frame, cache_file = cache[0] + job.addFile(cache_path + cache_file, cache_frame, cache_frame) + else: + for i in range(len(cache)): + current_item = cache[i] + next_item = cache[i+1] if i + 1 < len(cache) else None + previous_item = cache[i - 1] if i > 0 else None + + current_frame, current_file = current_item + + if not next_item and not previous_item: + job.addFile(cache_path + current_file, current_frame, current_frame) + elif next_item and not previous_item: + next_frame = next_item[0] + job.addFile(cache_path + current_file, current_frame, next_frame - 1) + elif not next_item and previous_item: + previous_frame = previous_item[0] + job.addFile(cache_path + current_file, previous_frame + 1, current_frame) + else: + next_frame = next_item[0] + previous_frame = previous_item[0] + job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1) + + ########################### + # IMAGES + ########################### + for image in bpy.data.images: + if image.source == "FILE" and not image.packed_file: + job.addFile(image.filename) + + # print(job.files) + + job.name = job_name + + for slave in scene.network_render.slaves_blacklist: + job.blacklist.append(slave.id) + + job.chunks = netsettings.chunks + job.priority = netsettings.priority + + # try to send path first + conn.request("POST", "/job", repr(job.serialize())) + response = conn.getresponse() + + job_id = response.getheader("job-id") + + # if not ACCEPTED (but not processed), send files + if response.status == http.client.ACCEPTED: + for filepath, start, end in job.files: + f = open(filepath, "rb") + conn.request("PUT", "/file", f, headers={"job-id": job_id, "job-file": filepath}) + f.close() + response = conn.getresponse() + + # server will reply with NOT_FOUD until all files are found + + return job_id + +def requestResult(conn, job_id, frame): + conn.request("GET", "/render", headers={"job-id": job_id, "job-frame":str(frame)}) + +@rnaType +class NetworkRenderEngine(bpy.types.RenderEngine): + __idname__ = 'NET_RENDER' + __label__ = "Network Render" + def render(self, scene): + if scene.network_render.mode == "RENDER_CLIENT": + self.render_client(scene) + elif scene.network_render.mode == "RENDER_SLAVE": + self.render_slave(scene) + elif scene.network_render.mode == "RENDER_MASTER": + self.render_master(scene) + else: + print("UNKNOWN OPERATION MODE") + + def render_master(self, scene): + netsettings = scene.network_render + + address = "" if netsettings.server_address == "[default]" else netsettings.server_address + + master.runMaster((address, netsettings.server_port), netsettings.server_broadcast, netsettings.path, self.update_stats, self.test_break) + + + def render_slave(self, scene): + slave.render_slave(self, scene) + + def render_client(self, scene): + netsettings = scene.network_render + self.update_stats("", "Network render client initiation") + + + conn = clientConnection(scene) + + if conn: + # Sending file + + self.update_stats("", "Network render exporting") + + job_id = netsettings.job_id + + # reading back result + + self.update_stats("", "Network render waiting for results") + + requestResult(conn, job_id, scene.current_frame) + response = conn.getresponse() + + if response.status == http.client.NO_CONTENT: + netsettings.job_id = clientSendJob(conn, scene) + requestResult(conn, job_id, scene.current_frame) + + while response.status == http.client.ACCEPTED and not self.test_break(): + time.sleep(1) + requestResult(conn, job_id, scene.current_frame) + response = conn.getresponse() + + if response.status != http.client.OK: + conn.close() + return + + r = scene.render_data + x= int(r.resolution_x*r.resolution_percentage*0.01) + y= int(r.resolution_y*r.resolution_percentage*0.01) + + f = open(netsettings.path + "output.exr", "wb") + buf = response.read(1024) + + while buf: + f.write(buf) + buf = response.read(1024) + + f.close() + + result = self.begin_result(0, 0, x, y) + result.load_from_file(netsettings.path + "output.exr", 0, 0) + self.end_result(result) + + conn.close() + diff --git a/release/scripts/io/netrender/master.py b/release/scripts/io/netrender/master.py new file mode 100644 index 00000000000..a3e186a9cfd --- /dev/null +++ b/release/scripts/io/netrender/master.py @@ -0,0 +1,752 @@ +import sys, os +import http, http.client, http.server, urllib, socket +import subprocess, shutil, time, hashlib + +from netrender.utils import * +import netrender.model +import netrender.balancing +import netrender.master_html + +class MRenderFile: + def __init__(self, filepath, start, end): + self.filepath = filepath + self.start = start + self.end = end + self.found = False + + def test(self): + self.found = os.path.exists(self.filepath) + return self.found + + +class MRenderSlave(netrender.model.RenderSlave): + def __init__(self, name, address, stats): + super().__init__() + self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest() + self.name = name + self.address = address + self.stats = stats + self.last_seen = time.time() + + self.job = None + self.job_frames = [] + + netrender.model.RenderSlave._slave_map[self.id] = self + + def seen(self): + self.last_seen = time.time() + + def finishedFrame(self, frame_number): + self.job_frames.remove(frame_number) + if not self.job_frames: + self.job = None + +class MRenderJob(netrender.model.RenderJob): + def __init__(self, job_id, name, files, chunks = 1, priority = 1, blacklist = []): + super().__init__() + self.id = job_id + self.name = name + self.files = files + self.frames = [] + self.chunks = chunks + self.priority = priority + self.usage = 0.0 + self.blacklist = blacklist + self.last_dispatched = time.time() + + # special server properties + self.last_update = 0 + self.save_path = "" + self.files_map = {path: MRenderFile(path, start, end) for path, start, end in files} + self.status = JOB_WAITING + + def save(self): + if self.save_path: + f = open(self.save_path + "job.txt", "w") + f.write(repr(self.serialize())) + f.close() + + def testStart(self): + for f in self.files_map.values(): + if not f.test(): + return False + + self.start() + return True + + def testFinished(self): + for f in self.frames: + if f.status == QUEUED or f.status == DISPATCHED: + break + else: + self.status = JOB_FINISHED + + def start(self): + self.status = JOB_QUEUED + + def addLog(self, frames): + log_name = "_".join(("%04d" % f for f in frames)) + ".log" + log_path = self.save_path + log_name + + for number in frames: + frame = self[number] + if frame: + frame.log_path = log_path + + def addFrame(self, frame_number): + frame = MRenderFrame(frame_number) + self.frames.append(frame) + return frame + + def reset(self, all): + for f in self.frames: + f.reset(all) + + def getFrames(self): + frames = [] + for f in self.frames: + if f.status == QUEUED: + self.last_dispatched = time.time() + frames.append(f) + if len(frames) >= self.chunks: + break + + return frames + +class MRenderFrame(netrender.model.RenderFrame): + def __init__(self, frame): + super().__init__() + self.number = frame + self.slave = None + self.time = 0 + self.status = QUEUED + self.log_path = None + + def reset(self, all): + if all or self.status == ERROR: + self.slave = None + self.time = 0 + self.status = QUEUED + + +# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + +class RenderHandler(http.server.BaseHTTPRequestHandler): + def send_head(self, code = http.client.OK, headers = {}, content = "application/octet-stream"): + self.send_response(code) + self.send_header("Content-type", content) + + for key, value in headers.items(): + self.send_header(key, value) + + self.end_headers() + + def do_HEAD(self): + + if self.path == "/status": + job_id = self.headers.get('job-id', "") + job_frame = int(self.headers.get('job-frame', -1)) + + job = self.server.getJobID(job_id) + if job: + frame = job[job_frame] + + + if frame: + self.send_head(http.client.OK) + else: + # no such frame + self.send_head(http.client.NO_CONTENT) + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + + def do_GET(self): + + if self.path == "/version": + self.send_head() + self.server.stats("", "Version check") + self.wfile.write(VERSION) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/render": + job_id = self.headers['job-id'] + job_frame = int(self.headers['job-frame']) + + job = self.server.getJobID(job_id) + + if job: + frame = job[job_frame] + + if frame: + if frame.status in (QUEUED, DISPATCHED): + self.send_head(http.client.ACCEPTED) + elif frame.status == DONE: + self.server.stats("", "Sending result to client") + f = open(job.save_path + "%04d" % job_frame + ".exr", 'rb') + + self.send_head() + + shutil.copyfileobj(f, self.wfile) + + f.close() + elif frame.status == ERROR: + self.send_head(http.client.PARTIAL_CONTENT) + else: + # no such frame + self.send_head(http.client.NO_CONTENT) + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/log": + job_id = self.headers['job-id'] + job_frame = int(self.headers['job-frame']) + + job = self.server.getJobID(job_id) + + if job: + frame = job[job_frame] + + if frame: + if not frame.log_path or frame.status in (QUEUED, DISPATCHED): + self.send_head(http.client.PROCESSING) + else: + self.server.stats("", "Sending log to client") + f = open(frame.log_path, 'rb') + + self.send_head() + + shutil.copyfileobj(f, self.wfile) + + f.close() + else: + # no such frame + self.send_head(http.client.NO_CONTENT) + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/status": + job_id = self.headers.get('job-id', "") + job_frame = int(self.headers.get('job-frame', -1)) + + if job_id: + + job = self.server.getJobID(job_id) + if job: + if job_frame != -1: + frame = job[frame] + + if frame: + message = frame.serialize() + else: + # no such frame + self.send_heat(http.client.NO_CONTENT) + return + else: + message = job.serialize() + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + return + else: # status of all jobs + message = [] + + for job in self.server: + message.append(job.serialize()) + + + self.server.stats("", "Sending status") + self.send_head() + self.wfile.write(bytes(repr(message), encoding='utf8')) + + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/job": + self.server.balance() + + slave_id = self.headers['slave-id'] + + slave = self.server.getSeenSlave(slave_id) + + if slave: # only if slave id is valid + job, frames = self.server.newDispatch(slave_id) + + if job and frames: + for f in frames: + print("dispatch", f.number) + f.status = DISPATCHED + f.slave = slave + + slave.job = job + slave.job_frames = [f.number for f in frames] + + self.send_head(headers={"job-id": job.id}) + + message = job.serialize(frames) + + self.wfile.write(bytes(repr(message), encoding='utf8')) + + self.server.stats("", "Sending job to slave") + else: + # no job available, return error code + slave.job = None + slave.job_frames = [] + + self.send_head(http.client.ACCEPTED) + else: # invalid slave id + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/file": + slave_id = self.headers['slave-id'] + + slave = self.server.getSeenSlave(slave_id) + + if slave: # only if slave id is valid + job_id = self.headers['job-id'] + job_file = self.headers['job-file'] + + job = self.server.getJobID(job_id) + + if job: + render_file = job.files_map.get(job_file, None) + + if render_file: + self.server.stats("", "Sending file to slave") + f = open(render_file.filepath, 'rb') + + self.send_head() + shutil.copyfileobj(f, self.wfile) + + f.close() + else: + # no such file + self.send_head(http.client.NO_CONTENT) + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + else: # invalid slave id + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/slaves": + message = [] + + self.server.stats("", "Sending slaves status") + + for slave in self.server.slaves: + message.append(slave.serialize()) + + self.send_head() + + self.wfile.write(bytes(repr(message), encoding='utf8')) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + else: + # hand over the rest to the html section + netrender.master_html.get(self) + + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + def do_POST(self): + + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + if self.path == "/job": + + length = int(self.headers['content-length']) + + job_info = netrender.model.RenderJob.materialize(eval(str(self.rfile.read(length), encoding='utf8'))) + + job_id = self.server.nextJobID() + + job = MRenderJob(job_id, job_info.name, job_info.files, chunks = job_info.chunks, priority = job_info.priority, blacklist = job_info.blacklist) + + for frame in job_info.frames: + frame = job.addFrame(frame.number) + + self.server.addJob(job) + + headers={"job-id": job_id} + + if job.testStart(): + self.server.stats("", "New job, missing files") + self.send_head(headers=headers) + else: + self.server.stats("", "New job, started") + self.send_head(http.client.ACCEPTED, headers=headers) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/cancel": + job_id = self.headers.get('job-id', "") + + job = self.server.getJobID(job_id) + + if job: + self.server.stats("", "Cancelling job") + self.server.removeJob(job) + self.send_head() + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/clear": + # cancel all jobs + self.server.stats("", "Clearing jobs") + self.server.clear() + + self.send_head() + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/reset": + job_id = self.headers.get('job-id', "") + job_frame = int(self.headers.get('job-frame', "-1")) + all = bool(self.headers.get('reset-all', "False")) + + job = self.server.getJobID(job_id) + + if job: + if job_frame != -1: + + frame = job[job_frame] + if frame: + self.server.stats("", "Reset job frame") + frame.reset(all) + self.send_head() + else: + # no such frame + self.send_head(http.client.NO_CONTENT) + + else: + self.server.stats("", "Reset job") + job.reset(all) + self.send_head() + + else: # job not found + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/slave": + length = int(self.headers['content-length']) + job_frame_string = self.headers['job-frame'] + + self.server.stats("", "New slave connected") + + slave_info = netrender.model.RenderSlave.materialize(eval(str(self.rfile.read(length), encoding='utf8'))) + + slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats) + + self.send_head(headers = {"slave-id": slave_id}) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/log": + slave_id = self.headers['slave-id'] + + slave = self.server.getSeenSlave(slave_id) + + if slave: # only if slave id is valid + length = int(self.headers['content-length']) + + log_info = netrender.model.LogFile.materialize(eval(str(self.rfile.read(length), encoding='utf8'))) + + job = self.server.getJobID(log_info.job_id) + + if job: + self.server.stats("", "Log announcement") + job.addLog(log_info.frames) + self.send_head(http.client.OK) + else: + # no such job id + self.send_head(http.client.NO_CONTENT) + else: # invalid slave id + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + def do_PUT(self): + + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + if self.path == "/file": + self.server.stats("", "Receiving job") + + length = int(self.headers['content-length']) + job_id = self.headers['job-id'] + job_file = self.headers['job-file'] + + job = self.server.getJobID(job_id) + + if job: + + render_file = job.files_map.get(job_file, None) + + if render_file: + main_file = job.files[0][0] # filename of the first file + + main_path, main_name = os.path.split(main_file) + + if job_file != main_file: + file_path = prefixPath(job.save_path, job_file, main_path) + else: + file_path = job.save_path + main_name + + buf = self.rfile.read(length) + + # add same temp file + renames as slave + + f = open(file_path, "wb") + f.write(buf) + f.close() + del buf + + render_file.filepath = file_path # set the new path + + if job.testStart(): + self.server.stats("", "File upload, starting job") + self.send_head(http.client.OK) + else: + self.server.stats("", "File upload, file missings") + self.send_head(http.client.ACCEPTED) + else: # invalid file + self.send_head(http.client.NO_CONTENT) + else: # job not found + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/render": + self.server.stats("", "Receiving render result") + + slave_id = self.headers['slave-id'] + + slave = self.server.getSeenSlave(slave_id) + + if slave: # only if slave id is valid + job_id = self.headers['job-id'] + + job = self.server.getJobID(job_id) + + if job: + job_frame = int(self.headers['job-frame']) + job_result = int(self.headers['job-result']) + job_time = float(self.headers['job-time']) + + frame = job[job_frame] + + if frame: + if job_result == DONE: + length = int(self.headers['content-length']) + buf = self.rfile.read(length) + f = open(job.save_path + "%04d" % job_frame + ".exr", 'wb') + f.write(buf) + f.close() + + del buf + elif job_result == ERROR: + # blacklist slave on this job on error + job.blacklist.append(slave.id) + + self.server.stats("", "Receiving result") + + slave.finishedFrame(job_frame) + + frame.status = job_result + frame.time = job_time + + job.testFinished() + + self.send_head() + else: # frame not found + self.send_head(http.client.NO_CONTENT) + else: # job not found + self.send_head(http.client.NO_CONTENT) + else: # invalid slave id + self.send_head(http.client.NO_CONTENT) + # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- + elif self.path == "/log": + self.server.stats("", "Receiving log file") + + job_id = self.headers['job-id'] + + job = self.server.getJobID(job_id) + + if job: + job_frame = int(self.headers['job-frame']) + + frame = job[job_frame] + + if frame and frame.log_path: + length = int(self.headers['content-length']) + buf = self.rfile.read(length) + f = open(frame.log_path, 'ab') + f.write(buf) + f.close() + + del buf + + self.server.getSeenSlave(self.headers['slave-id']) + + self.send_head() + else: # frame not found + self.send_head(http.client.NO_CONTENT) + else: # job not found + self.send_head(http.client.NO_CONTENT) + +class RenderMasterServer(http.server.HTTPServer): + def __init__(self, address, handler_class, path): + super().__init__(address, handler_class) + self.jobs = [] + self.jobs_map = {} + self.slaves = [] + self.slaves_map = {} + self.job_id = 0 + self.path = path + "master_" + str(os.getpid()) + os.sep + + self.slave_timeout = 2 + + self.balancer = netrender.balancing.Balancer() + self.balancer.addRule(netrender.balancing.RatingUsage()) + self.balancer.addException(netrender.balancing.ExcludeQueuedEmptyJob()) + self.balancer.addException(netrender.balancing.ExcludeSlavesLimit(self.countJobs, self.countSlaves, limit = 0.9)) + self.balancer.addPriority(netrender.balancing.NewJobPriority()) + self.balancer.addPriority(netrender.balancing.MinimumTimeBetweenDispatchPriority(limit = 2)) + + if not os.path.exists(self.path): + os.mkdir(self.path) + + def nextJobID(self): + self.job_id += 1 + return str(self.job_id) + + def addSlave(self, name, address, stats): + slave = MRenderSlave(name, address, stats) + self.slaves.append(slave) + self.slaves_map[slave.id] = slave + + return slave.id + + def removeSlave(self, slave): + self.slaves.remove(slave) + self.slaves_map.pop(slave.id) + + def getSlave(self, slave_id): + return self.slaves_map.get(slave_id, None) + + def getSeenSlave(self, slave_id): + slave = self.getSlave(slave_id) + if slave: + slave.seen() + + return slave + + def timeoutSlaves(self): + removed = [] + + t = time.time() + + for slave in self.slaves: + if (t - slave.last_seen) / 60 > self.slave_timeout: + removed.append(slave) + + if slave.job: + for f in slave.job_frames: + slave.job[f].status = ERROR + + for slave in removed: + self.removeSlave(slave) + + def updateUsage(self): + blend = 0.5 + for job in self.jobs: + job.usage *= (1 - blend) + + if self.slaves: + slave_usage = blend / self.countSlaves() + + for slave in self.slaves: + if slave.job: + slave.job.usage += slave_usage + + + def clear(self): + removed = self.jobs[:] + + for job in removed: + self.removeJob(job) + + def balance(self): + self.balancer.balance(self.jobs) + + def countJobs(self, status = JOB_QUEUED): + total = 0 + for j in self.jobs: + if j.status == status: + total += 1 + + return total + + def countSlaves(self): + return len(self.slaves) + + def removeJob(self, job): + self.jobs.remove(job) + self.jobs_map.pop(job.id) + + for slave in self.slaves: + if slave.job == job: + slave.job = None + slave.job_frames = [] + + def addJob(self, job): + self.jobs.append(job) + self.jobs_map[job.id] = job + + # create job directory + job.save_path = self.path + "job_" + job.id + os.sep + if not os.path.exists(job.save_path): + os.mkdir(job.save_path) + + job.save() + + def getJobID(self, id): + return self.jobs_map.get(id, None) + + def __iter__(self): + for job in self.jobs: + yield job + + def newDispatch(self, slave_id): + if self.jobs: + for job in self.jobs: + if not self.balancer.applyExceptions(job) and slave_id not in job.blacklist: + return job, job.getFrames() + + return None, None + +def runMaster(address, broadcast, path, update_stats, test_break): + httpd = RenderMasterServer(address, RenderHandler, path) + httpd.timeout = 1 + httpd.stats = update_stats + + if broadcast: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + + start_time = time.time() + + while not test_break(): + httpd.handle_request() + + if time.time() - start_time >= 10: # need constant here + httpd.timeoutSlaves() + + httpd.updateUsage() + + if broadcast: + print("broadcasting address") + s.sendto(bytes("%i" % address[1], encoding='utf8'), 0, ('', 8000)) + start_time = time.time() diff --git a/release/scripts/io/netrender/master_html.py b/release/scripts/io/netrender/master_html.py new file mode 100644 index 00000000000..6a956a70e9f --- /dev/null +++ b/release/scripts/io/netrender/master_html.py @@ -0,0 +1,142 @@ +import re + +from netrender.utils import * + + +def get(handler): + def output(text): + handler.wfile.write(bytes(text, encoding='utf8')) + + def link(text, url): + return "%s" % (url, text) + + def startTable(border=1): + output("" % border) + + def headerTable(*headers): + output("") + + for c in headers: + output("") + + output("") + + def rowTable(*data): + output("") + + for c in data: + output("") + + output("") + + def endTable(): + output("
" + c + "
" + str(c) + "
") + + handler.send_head(content = "text/html") + + if handler.path == "/html" or handler.path == "/": + output("NetRender") + + output("

Master

") + + output("

Slaves

") + + startTable() + headerTable("name", "address", "last seen", "stats", "job") + + for slave in handler.server.slaves: + rowTable(slave.name, slave.address[0], time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None") + + endTable() + + output("

Jobs

") + + startTable() + headerTable( + "name", + "priority", + "usage", + "wait", + "length", + "done", + "dispatched", + "error", + "first", + "exception" + ) + + handler.server.balance() + + for job in handler.server.jobs: + results = job.framesStatus() + rowTable( + link(job.name, "/html/job" + job.id), + job.priority, + "%0.1f%%" % (job.usage * 100), + "%is" % int(time.time() - job.last_dispatched), + len(job), + results[DONE], + results[DISPATCHED], + results[ERROR], + handler.server.balancer.applyPriorities(job), handler.server.balancer.applyExceptions(job) + ) + + endTable() + + output("") + + elif handler.path.startswith("/html/job"): + job_id = handler.path[9:] + + output("NetRender") + + job = handler.server.getJobID(job_id) + + if job: + output("

Frames

") + + startTable() + headerTable("no", "status", "render time", "slave", "log") + + for frame in job.frames: + rowTable(frame.number, frame.statusText(), "%.1fs" % frame.time, frame.slave.name if frame.slave else " ", link("view log", "/html/log%s_%i" % (job_id, frame.number)) if frame.log_path else " ") + + endTable() + else: + output("no such job") + + output("") + + elif handler.path.startswith("/html/log"): + pattern = re.compile("([a-zA-Z0-9]+)_([0-9]+)") + + output("NetRender") + + match = pattern.match(handler.path[9:]) + if match: + job_id = match.groups()[0] + frame_number = int(match.groups()[1]) + + job = handler.server.getJobID(job_id) + + if job: + frame = job[frame_number] + + if frame: + f = open(frame.log_path, 'rb') + + output("
")
+						
+						shutil.copyfileobj(f, handler.wfile)
+						
+						output("
") + + f.close() + else: + output("no such frame") + else: + output("no such job") + else: + output("malformed url") + + output("") diff --git a/release/scripts/io/netrender/model.py b/release/scripts/io/netrender/model.py new file mode 100644 index 00000000000..be97f8d0a81 --- /dev/null +++ b/release/scripts/io/netrender/model.py @@ -0,0 +1,198 @@ +import sys, os +import http, http.client, http.server, urllib +import subprocess, shutil, time, hashlib + +from netrender.utils import * + +class LogFile: + def __init__(self, job_id = 0, frames = []): + self.job_id = job_id + self.frames = frames + + def serialize(self): + return { + "job_id": self.job_id, + "frames": self.frames + } + + @staticmethod + def materialize(data): + if not data: + return None + + logfile = LogFile() + logfile.job_id = data["job_id"] + logfile.frames = data["frames"] + + return logfile + +class RenderSlave: + _slave_map = {} + + def __init__(self): + self.id = "" + self.name = "" + self.address = ("",0) + self.stats = "" + self.total_done = 0 + self.total_error = 0 + self.last_seen = 0.0 + + def serialize(self): + return { + "id": self.id, + "name": self.name, + "address": self.address, + "stats": self.stats, + "total_done": self.total_done, + "total_error": self.total_error, + "last_seen": self.last_seen + } + + @staticmethod + def materialize(data): + if not data: + return None + + slave_id = data["id"] + + if slave_id in RenderSlave._slave_map: + return RenderSlave._slave_map[slave_id] + else: + slave = RenderSlave() + slave.id = slave_id + slave.name = data["name"] + slave.address = data["address"] + slave.stats = data["stats"] + slave.total_done = data["total_done"] + slave.total_error = data["total_error"] + slave.last_seen = data["last_seen"] + + RenderSlave._slave_map[slave_id] = slave + + return slave + +class RenderJob: + def __init__(self): + self.id = "" + self.name = "" + self.files = [] + self.frames = [] + self.chunks = 0 + self.priority = 0 + self.usage = 0.0 + self.blacklist = [] + self.last_dispatched = 0.0 + + def addFile(self, file_path, start=-1, end=-1): + self.files.append((file_path, start, end)) + + def addFrame(self, frame_number): + frame = RenderFrame(frame_number) + self.frames.append(frame) + return frame + + def __len__(self): + return len(self.frames) + + def countFrames(self, status=QUEUED): + total = 0 + for f in self.frames: + if f.status == status: + total += 1 + + return total + + def countSlaves(self): + return len(set((frame.slave for frame in self.frames if frame.status == DISPATCHED))) + + def framesStatus(self): + results = { + QUEUED: 0, + DISPATCHED: 0, + DONE: 0, + ERROR: 0 + } + + for frame in self.frames: + results[frame.status] += 1 + + return results + + def __contains__(self, frame_number): + for f in self.frames: + if f.number == frame_number: + return True + else: + return False + + def __getitem__(self, frame_number): + for f in self.frames: + if f.number == frame_number: + return f + else: + return None + + def serialize(self, frames = None): + min_frame = min((f.number for f in frames)) if frames else -1 + max_frame = max((f.number for f in frames)) if frames else -1 + return { + "id": self.id, + "name": self.name, + "files": [f for f in self.files if f[1] == -1 or not frames or (f[1] <= min_frame <= f[2] or f[1] <= max_frame <= f[2])], + "frames": [f.serialize() for f in self.frames if not frames or f in frames], + "chunks": self.chunks, + "priority": self.priority, + "usage": self.usage, + "blacklist": self.blacklist, + "last_dispatched": self.last_dispatched + } + + @staticmethod + def materialize(data): + if not data: + return None + + job = RenderJob() + job.id = data["id"] + job.name = data["name"] + job.files = data["files"] + job.frames = [RenderFrame.materialize(f) for f in data["frames"]] + job.chunks = data["chunks"] + job.priority = data["priority"] + job.usage = data["usage"] + job.blacklist = data["blacklist"] + job.last_dispatched = data["last_dispatched"] + + return job + +class RenderFrame: + def __init__(self, number = 0): + self.number = number + self.time = 0 + self.status = QUEUED + self.slave = None + + def statusText(self): + return STATUS_TEXT[self.status] + + def serialize(self): + return { + "number": self.number, + "time": self.time, + "status": self.status, + "slave": None if not self.slave else self.slave.serialize() + } + + @staticmethod + def materialize(data): + if not data: + return None + + frame = RenderFrame() + frame.number = data["number"] + frame.time = data["time"] + frame.status = data["status"] + frame.slave = RenderSlave.materialize(data["slave"]) + + return frame diff --git a/release/scripts/io/netrender/operators.py b/release/scripts/io/netrender/operators.py new file mode 100644 index 00000000000..42d1f6a0b86 --- /dev/null +++ b/release/scripts/io/netrender/operators.py @@ -0,0 +1,423 @@ +import bpy +import sys, os +import http, http.client, http.server, urllib, socket +import webbrowser + +from netrender.utils import * +import netrender.client as client +import netrender.model + +@rnaOperator +class RENDER_OT_netclientanim(bpy.types.Operator): + ''' + Operator documentation text, will be used for the operator tooltip and python docs. + ''' + __idname__ = "render.netclientanim" + __label__ = "Net Render Client Anim" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + scene = context.scene + + conn = clientConnection(scene) + + if conn: + # Sending file + scene.network_render.job_id = client.clientSendJob(conn, scene, True) + conn.close() + + bpy.ops.screen.render('INVOKE_AREA', animation=True) + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class RENDER_OT_netclientsend(bpy.types.Operator): + ''' + Operator documentation text, will be used for the operator tooltip and python docs. + ''' + __idname__ = "render.netclientsend" + __label__ = "Net Render Client Send" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + scene = context.scene + + conn = clientConnection(scene) + + if conn: + # Sending file + scene.network_render.job_id = client.clientSendJob(conn, scene, True) + conn.close() + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class RENDER_OT_netclientstatus(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientstatus" + __label__ = "Net Render Client Status" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + conn = clientConnection(context.scene) + + if conn: + conn.request("GET", "/status") + + response = conn.getresponse() + print( response.status, response.reason ) + + jobs = (netrender.model.RenderJob.materialize(j) for j in eval(str(response.read(), encoding='utf8'))) + + while(len(netsettings.jobs) > 0): + netsettings.jobs.remove(0) + + bpy.data.netrender_jobs = [] + + for j in jobs: + bpy.data.netrender_jobs.append(j) + netsettings.jobs.add() + job = netsettings.jobs[-1] + + j.results = j.framesStatus() # cache frame status + + job.name = j.name + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class RENDER_OT_netclientblacklistslave(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientblacklistslave" + __label__ = "Net Render Client Blacklist Slave" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + + if netsettings.active_slave_index >= 0: + + # deal with data + slave = bpy.data.netrender_slaves.pop(netsettings.active_slave_index) + bpy.data.netrender_blacklist.append(slave) + + # deal with rna + netsettings.slaves_blacklist.add() + netsettings.slaves_blacklist[-1].name = slave.name + + netsettings.slaves.remove(netsettings.active_slave_index) + netsettings.active_slave_index = -1 + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class RENDER_OT_netclientwhitelistslave(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientwhitelistslave" + __label__ = "Net Render Client Whitelist Slave" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + + if netsettings.active_blacklisted_slave_index >= 0: + + # deal with data + slave = bpy.data.netrender_blacklist.pop(netsettings.active_blacklisted_slave_index) + bpy.data.netrender_slaves.append(slave) + + # deal with rna + netsettings.slaves.add() + netsettings.slaves[-1].name = slave.name + + netsettings.slaves_blacklist.remove(netsettings.active_blacklisted_slave_index) + netsettings.active_blacklisted_slave_index = -1 + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + + +@rnaOperator +class RENDER_OT_netclientslaves(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientslaves" + __label__ = "Net Render Client Slaves" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + conn = clientConnection(context.scene) + + if conn: + conn.request("GET", "/slaves") + + response = conn.getresponse() + print( response.status, response.reason ) + + slaves = (netrender.model.RenderSlave.materialize(s) for s in eval(str(response.read(), encoding='utf8'))) + + while(len(netsettings.slaves) > 0): + netsettings.slaves.remove(0) + + bpy.data.netrender_slaves = [] + + for s in slaves: + for i in range(len(bpy.data.netrender_blacklist)): + slave = bpy.data.netrender_blacklist[i] + if slave.id == s.id: + bpy.data.netrender_blacklist[i] = s + netsettings.slaves_blacklist[i].name = s.name + break + else: + bpy.data.netrender_slaves.append(s) + + netsettings.slaves.add() + slave = netsettings.slaves[-1] + slave.name = s.name + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class RENDER_OT_netclientcancel(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientcancel" + __label__ = "Net Render Client Cancel" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + netsettings = context.scene.network_render + return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0 + + def execute(self, context): + netsettings = context.scene.network_render + conn = clientConnection(context.scene) + + if conn: + job = bpy.data.netrender_jobs[netsettings.active_job_index] + + conn.request("POST", "/cancel", headers={"job-id":job.id}) + + response = conn.getresponse() + print( response.status, response.reason ) + + netsettings.jobs.remove(netsettings.active_job_index) + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class RENDER_OT_netclientcancelall(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientcancelall" + __label__ = "Net Render Client Cancel All" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + conn = clientConnection(context.scene) + + if conn: + conn.request("POST", "/clear") + + response = conn.getresponse() + print( response.status, response.reason ) + + while(len(netsettings.jobs) > 0): + netsettings.jobs.remove(0) + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class netclientdownload(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientdownload" + __label__ = "Net Render Client Download" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + netsettings = context.scene.network_render + return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0 + + def execute(self, context): + netsettings = context.scene.network_render + rd = context.scene.render_data + + conn = clientConnection(context.scene) + + if conn: + job = bpy.data.netrender_jobs[netsettings.active_job_index] + + for frame in job.frames: + client.requestResult(conn, job.id, frame.number) + response = conn.getresponse() + + if response.status != http.client.OK: + print("missing", frame.number) + continue + + print("got back", frame.number) + + f = open(netsettings.path + "%06d" % frame.number + ".exr", "wb") + buf = response.read(1024) + + while buf: + f.write(buf) + buf = response.read(1024) + + f.close() + + conn.close() + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class netclientscan(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientscan" + __label__ = "Net Render Client Scan" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + s.settimeout(30) + + s.bind(('', 8000)) + + buf, address = s.recvfrom(64) + + print("received:", buf) + + netsettings.server_address = address[0] + netsettings.server_port = int(str(buf, encoding='utf8')) + except socket.timeout: + print("no server info") + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) + +@rnaOperator +class netclientweb(bpy.types.Operator): + '''Operator documentation text, will be used for the operator tooltip and python docs.''' + __idname__ = "render.netclientweb" + __label__ = "Net Render Client Web" + + # List of operator properties, the attributes will be assigned + # to the class instance from the operator settings before calling. + + __props__ = [] + + def poll(self, context): + return True + + def execute(self, context): + netsettings = context.scene.network_render + + + # open connection to make sure server exists + conn = clientConnection(context.scene) + + if conn: + conn.close() + + webbrowser.open("http://%s:%i" % (netsettings.server_address, netsettings.server_port)) + + return ('FINISHED',) + + def invoke(self, context, event): + return self.execute(context) diff --git a/release/scripts/io/netrender/slave.py b/release/scripts/io/netrender/slave.py new file mode 100644 index 00000000000..657e31001e0 --- /dev/null +++ b/release/scripts/io/netrender/slave.py @@ -0,0 +1,207 @@ +import sys, os, platform +import http, http.client, http.server, urllib +import subprocess, time + +from netrender.utils import * +import netrender.model + +CANCEL_POLL_SPEED = 2 +MAX_TIMEOUT = 10 +INCREMENT_TIMEOUT = 1 + +if platform.system() == 'Windows' and platform.version() >= '5': # Error mode is only available on Win2k or higher, that's version 5 + import ctypes + def SetErrorMode(): + val = ctypes.windll.kernel32.SetErrorMode(0x0002) + ctypes.windll.kernel32.SetErrorMode(val | 0x0002) + return val + + def RestoreErrorMode(val): + ctypes.windll.kernel32.SetErrorMode(val) +else: + def SetErrorMode(): + return 0 + + def RestoreErrorMode(val): + pass + +def slave_Info(): + sysname, nodename, release, version, machine, processor = platform.uname() + slave = netrender.model.RenderSlave() + slave.name = nodename + slave.stats = sysname + " " + release + " " + machine + " " + processor + return slave + +def testCancel(conn, job_id, frame_number): + conn.request("HEAD", "/status", headers={"job-id":job_id, "job-frame": str(frame_number)}) + response = conn.getresponse() + + # cancelled if job isn't found anymore + if response.status == http.client.NO_CONTENT: + return True + else: + return False + +def testFile(conn, job_id, slave_id, JOB_PREFIX, file_path, main_path = None): + job_full_path = prefixPath(JOB_PREFIX, file_path, main_path) + + if not os.path.exists(job_full_path): + temp_path = JOB_PREFIX + "slave.temp.blend" + conn.request("GET", "/file", headers={"job-id": job_id, "slave-id":slave_id, "job-file":file_path}) + response = conn.getresponse() + + if response.status != http.client.OK: + return None # file for job not returned by server, need to return an error code to server + + f = open(temp_path, "wb") + buf = response.read(1024) + + while buf: + f.write(buf) + buf = response.read(1024) + + f.close() + + os.renames(temp_path, job_full_path) + + return job_full_path + + +def render_slave(engine, scene): + netsettings = scene.network_render + timeout = 1 + + engine.update_stats("", "Network render node initiation") + + conn = clientConnection(scene) + + if conn: + conn.request("POST", "/slave", repr(slave_Info().serialize())) + response = conn.getresponse() + + slave_id = response.getheader("slave-id") + + NODE_PREFIX = netsettings.path + "slave_" + slave_id + os.sep + if not os.path.exists(NODE_PREFIX): + os.mkdir(NODE_PREFIX) + + while not engine.test_break(): + + conn.request("GET", "/job", headers={"slave-id":slave_id}) + response = conn.getresponse() + + if response.status == http.client.OK: + timeout = 1 # reset timeout on new job + + job = netrender.model.RenderJob.materialize(eval(str(response.read(), encoding='utf8'))) + + JOB_PREFIX = NODE_PREFIX + "job_" + job.id + os.sep + if not os.path.exists(JOB_PREFIX): + os.mkdir(JOB_PREFIX) + + job_path = job.files[0][0] # data in files have format (path, start, end) + main_path, main_file = os.path.split(job_path) + + job_full_path = testFile(conn, job.id, slave_id, JOB_PREFIX, job_path) + print("Fullpath", job_full_path) + print("File:", main_file, "and %i other files" % (len(job.files) - 1,)) + engine.update_stats("", "Render File", main_file, "for job", job.id) + + for file_path, start, end in job.files[1:]: + print("\t", file_path) + testFile(conn, job.id, slave_id, JOB_PREFIX, file_path, main_path) + + frame_args = [] + + for frame in job.frames: + print("frame", frame.number) + frame_args += ["-f", str(frame.number)] + + # announce log to master + logfile = netrender.model.LogFile(job.id, [frame.number for frame in job.frames]) + conn.request("POST", "/log", bytes(repr(logfile.serialize()), encoding='utf8'), headers={"slave-id":slave_id}) + response = conn.getresponse() + + first_frame = job.frames[0].number + + # start render + start_t = time.time() + + val = SetErrorMode() + process = subprocess.Popen([sys.argv[0], "-b", job_full_path, "-o", JOB_PREFIX + "######", "-E", "BLENDER_RENDER", "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + RestoreErrorMode(val) + + headers = {"job-id":job.id, "slave-id":slave_id} + + cancelled = False + stdout = bytes() + run_t = time.time() + while process.poll() == None and not cancelled: + stdout += process.stdout.read(32) + current_t = time.time() + cancelled = engine.test_break() + if current_t - run_t > CANCEL_POLL_SPEED: + + # update logs if needed + if stdout: + # (only need to update on one frame, they are linked + headers["job-frame"] = str(first_frame) + conn.request("PUT", "/log", stdout, headers=headers) + response = conn.getresponse() + + stdout = bytes() + + run_t = current_t + if testCancel(conn, job.id, first_frame): + cancelled = True + + if cancelled: + # kill process if needed + if process.poll() == None: + process.terminate() + continue # to next frame + + total_t = time.time() - start_t + + avg_t = total_t / len(job.frames) + + status = process.returncode + + print("status", status) + + # flush the rest of the logs + if stdout: + # (only need to update on one frame, they are linked + headers["job-frame"] = str(first_frame) + conn.request("PUT", "/log", stdout, headers=headers) + response = conn.getresponse() + + headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)} + + if status == 0: # non zero status is error + headers["job-result"] = str(DONE) + for frame in job.frames: + headers["job-frame"] = str(frame.number) + # send result back to server + f = open(JOB_PREFIX + "%06d" % frame.number + ".exr", 'rb') + conn.request("PUT", "/render", f, headers=headers) + f.close() + response = conn.getresponse() + else: + headers["job-result"] = str(ERROR) + for frame in job.frames: + headers["job-frame"] = str(frame.number) + # send error result back to server + conn.request("PUT", "/render", headers=headers) + response = conn.getresponse() + else: + if timeout < MAX_TIMEOUT: + timeout += INCREMENT_TIMEOUT + + for i in range(timeout): + time.sleep(1) + if engine.test_break(): + conn.close() + return + + conn.close() diff --git a/release/scripts/io/netrender/ui.py b/release/scripts/io/netrender/ui.py new file mode 100644 index 00000000000..7681d4865e9 --- /dev/null +++ b/release/scripts/io/netrender/ui.py @@ -0,0 +1,321 @@ +import bpy +import sys, os +import http, http.client, http.server, urllib +import subprocess, shutil, time, hashlib + +import netrender.slave as slave +import netrender.master as master + +from netrender.utils import * + +VERSION = b"0.3" + +PATH_PREFIX = "/tmp/" + +QUEUED = 0 +DISPATCHED = 1 +DONE = 2 +ERROR = 3 + +class RenderButtonsPanel(bpy.types.Panel): + __space_type__ = "PROPERTIES" + __region_type__ = "WINDOW" + __context__ = "scene" + # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here + + def poll(self, context): + rd = context.scene.render_data + return (rd.use_game_engine==False) and (rd.engine in self.COMPAT_ENGINES) + +# Setting panel, use in the scene for now. +@rnaType +class SCENE_PT_network_settings(RenderButtonsPanel): + __label__ = "Network Settings" + COMPAT_ENGINES = set(['NET_RENDER']) + + def draw_header(self, context): + layout = self.layout + scene = context.scene + + def draw(self, context): + layout = self.layout + + scene = context.scene + rd = scene.render_data + + layout.active = True + + split = layout.split() + + col = split.column() + + col.itemR(scene.network_render, "mode") + col.itemR(scene.network_render, "path") + col.itemR(scene.network_render, "server_address") + col.itemR(scene.network_render, "server_port") + + if scene.network_render.mode == "RENDER_MASTER": + col.itemR(scene.network_render, "server_broadcast") + else: + col.itemO("render.netclientscan", icon="ICON_FILE_REFRESH", text="") + +@rnaType +class SCENE_PT_network_job(RenderButtonsPanel): + __label__ = "Job Settings" + COMPAT_ENGINES = set(['NET_RENDER']) + + def poll(self, context): + scene = context.scene + return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" + + def draw(self, context): + layout = self.layout + + scene = context.scene + rd = scene.render_data + + layout.active = True + + split = layout.split() + + col = split.column() + + col.itemO("render.netclientanim", icon='ICON_RENDER_ANIMATION', text="Animaton on network") + col.itemO("render.netclientsend", icon="ICON_FILE_BLEND", text="Send job") + col.itemO("render.netclientweb", icon="ICON_QUESTION", text="Open Master Monitor") + col.itemR(scene.network_render, "job_name") + col.itemR(scene.network_render, "priority") + col.itemR(scene.network_render, "chunks") + +@rnaType +class SCENE_PT_network_slaves(RenderButtonsPanel): + __label__ = "Slaves Status" + COMPAT_ENGINES = set(['NET_RENDER']) + + def poll(self, context): + scene = context.scene + return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" + + def draw(self, context): + layout = self.layout + + scene = context.scene + netsettings = scene.network_render + + row = layout.row() + row.template_list(netsettings, "slaves", netsettings, "active_slave_index", rows=2) + + col = row.column() + + subcol = col.column(align=True) + subcol.itemO("render.netclientslaves", icon="ICON_FILE_REFRESH", text="") + subcol.itemO("render.netclientblacklistslave", icon="ICON_ZOOMOUT", text="") + + if len(bpy.data.netrender_slaves) == 0 and len(netsettings.slaves) > 0: + while(len(netsettings.slaves) > 0): + netsettings.slaves.remove(0) + + if netsettings.active_slave_index >= 0 and len(netsettings.slaves) > 0: + layout.itemS() + + slave = bpy.data.netrender_slaves[netsettings.active_slave_index] + + layout.itemL(text="Name: " + slave.name) + layout.itemL(text="Address: " + slave.address[0]) + layout.itemL(text="Seen: " + time.ctime(slave.last_seen)) + layout.itemL(text="Stats: " + slave.stats) + +@rnaType +class SCENE_PT_network_slaves_blacklist(RenderButtonsPanel): + __label__ = "Slaves Blacklist" + COMPAT_ENGINES = set(['NET_RENDER']) + + def poll(self, context): + scene = context.scene + return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" + + def draw(self, context): + layout = self.layout + + scene = context.scene + netsettings = scene.network_render + + row = layout.row() + row.template_list(netsettings, "slaves_blacklist", netsettings, "active_blacklisted_slave_index", rows=2) + + col = row.column() + + subcol = col.column(align=True) + subcol.itemO("render.netclientwhitelistslave", icon="ICON_ZOOMOUT", text="") + + if len(bpy.data.netrender_blacklist) == 0 and len(netsettings.slaves_blacklist) > 0: + while(len(netsettings.slaves_blacklist) > 0): + netsettings.slaves_blacklist.remove(0) + + if netsettings.active_blacklisted_slave_index >= 0 and len(netsettings.slaves_blacklist) > 0: + layout.itemS() + + slave = bpy.data.netrender_blacklist[netsettings.active_blacklisted_slave_index] + + layout.itemL(text="Name: " + slave.name) + layout.itemL(text="Address: " + slave.address[0]) + layout.itemL(text="Seen: " + slave.last_seen) + layout.itemL(text="Stats: " + time.ctime(slave.stats)) + +@rnaType +class SCENE_PT_network_jobs(RenderButtonsPanel): + __label__ = "Jobs" + COMPAT_ENGINES = set(['NET_RENDER']) + + def poll(self, context): + scene = context.scene + return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT" + + def draw(self, context): + layout = self.layout + + scene = context.scene + netsettings = scene.network_render + + row = layout.row() + row.template_list(netsettings, "jobs", netsettings, "active_job_index", rows=2) + + col = row.column() + + subcol = col.column(align=True) + subcol.itemO("render.netclientstatus", icon="ICON_FILE_REFRESH", text="") + subcol.itemO("render.netclientcancel", icon="ICON_ZOOMOUT", text="") + subcol.itemO("render.netclientcancelall", icon="ICON_PANEL_CLOSE", text="") + subcol.itemO("render.netclientdownload", icon='ICON_RENDER_ANIMATION', text="") + + if len(bpy.data.netrender_jobs) == 0 and len(netsettings.jobs) > 0: + while(len(netsettings.jobs) > 0): + netsettings.jobs.remove(0) + + if netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0: + layout.itemS() + + job = bpy.data.netrender_jobs[netsettings.active_job_index] + + layout.itemL(text="Name: %s" % job.name) + layout.itemL(text="Length: %04i" % len(job)) + layout.itemL(text="Done: %04i" % job.results[DONE]) + layout.itemL(text="Error: %04i" % job.results[ERROR]) + +@rnaType +class NetRenderSettings(bpy.types.IDPropertyGroup): + pass + +@rnaType +class NetRenderSlave(bpy.types.IDPropertyGroup): + pass + +@rnaType +class NetRenderJob(bpy.types.IDPropertyGroup): + pass + +bpy.types.Scene.PointerProperty(attr="network_render", type=NetRenderSettings, name="Network Render", description="Network Render Settings") + +NetRenderSettings.StringProperty( attr="server_address", + name="Server address", + description="IP or name of the master render server", + maxlen = 128, + default = "[default]") + +NetRenderSettings.IntProperty( attr="server_port", + name="Server port", + description="port of the master render server", + default = 8000, + min=1, + max=65535) + +NetRenderSettings.BoolProperty( attr="server_broadcast", + name="Broadcast server address", + description="broadcast server address on local network", + default = True) + +if os.name == 'nt': + NetRenderSettings.StringProperty( attr="path", + name="Path", + description="Path for temporary files", + maxlen = 128, + default = "C:/tmp/") +else: + NetRenderSettings.StringProperty( attr="path", + name="Path", + description="Path for temporary files", + maxlen = 128, + default = "/tmp/") + +NetRenderSettings.StringProperty( attr="job_name", + name="Job name", + description="Name of the job", + maxlen = 128, + default = "[default]") + +NetRenderSettings.IntProperty( attr="chunks", + name="Chunks", + description="Number of frame to dispatch to each slave in one chunk", + default = 5, + min=1, + max=65535) + +NetRenderSettings.IntProperty( attr="priority", + name="Priority", + description="Priority of the job", + default = 1, + min=1, + max=10) + +NetRenderSettings.StringProperty( attr="job_id", + name="Network job id", + description="id of the last sent render job", + maxlen = 64, + default = "") + +NetRenderSettings.IntProperty( attr="active_slave_index", + name="Index of the active slave", + description="", + default = -1, + min= -1, + max=65535) + +NetRenderSettings.IntProperty( attr="active_blacklisted_slave_index", + name="Index of the active slave", + description="", + default = -1, + min= -1, + max=65535) + +NetRenderSettings.IntProperty( attr="active_job_index", + name="Index of the active job", + description="", + default = -1, + min= -1, + max=65535) + +NetRenderSettings.EnumProperty(attr="mode", + items=( + ("RENDER_CLIENT", "Client", "Act as render client"), + ("RENDER_MASTER", "Master", "Act as render master"), + ("RENDER_SLAVE", "Slave", "Act as render slave"), + ), + name="network mode", + description="mode of operation of this instance", + default="RENDER_CLIENT") + +NetRenderSettings.CollectionProperty(attr="slaves", type=NetRenderSlave, name="Slaves", description="") +NetRenderSettings.CollectionProperty(attr="slaves_blacklist", type=NetRenderSlave, name="Slaves Blacklist", description="") +NetRenderSettings.CollectionProperty(attr="jobs", type=NetRenderJob, name="Job List", description="") + +NetRenderSlave.StringProperty( attr="name", + name="Name of the slave", + description="", + maxlen = 64, + default = "") + +NetRenderJob.StringProperty( attr="name", + name="Name of the job", + description="", + maxlen = 128, + default = "") diff --git a/release/scripts/io/netrender/utils.py b/release/scripts/io/netrender/utils.py new file mode 100644 index 00000000000..06393a738a0 --- /dev/null +++ b/release/scripts/io/netrender/utils.py @@ -0,0 +1,86 @@ +import bpy +import sys, os +import re +import http, http.client, http.server, urllib +import subprocess, shutil, time, hashlib + +import netrender.model + +VERSION = b"0.5" + +# Jobs status +JOB_WAITING = 0 # before all data has been entered +JOB_PAUSED = 1 # paused by user +JOB_FINISHED = 2 # finished rendering +JOB_QUEUED = 3 # ready to be dispatched + +# Frames status +QUEUED = 0 +DISPATCHED = 1 +DONE = 2 +ERROR = 3 + +STATUS_TEXT = { + QUEUED: "Queued", + DISPATCHED: "Dispatched", + DONE: "Done", + ERROR: "Error" + } + +def rnaType(rna_type): + bpy.types.register(rna_type) + return rna_type + +def rnaOperator(rna_op): + bpy.ops.add(rna_op) + return rna_op + +def clientConnection(scene): + netsettings = scene.network_render + + if netsettings.server_address == "[default]": + bpy.ops.render.netclientscan() + + conn = http.client.HTTPConnection(netsettings.server_address, netsettings.server_port) + + if clientVerifyVersion(conn): + return conn + else: + conn.close() + return None + +def clientVerifyVersion(conn): + conn.request("GET", "/version") + response = conn.getresponse() + + if response.status != http.client.OK: + conn.close() + return False + + server_version = response.read() + + if server_version != VERSION: + print("Incorrect server version!") + print("expected", VERSION, "received", server_version) + return False + + return True + +def prefixPath(prefix_directory, file_path, prefix_path): + if os.path.isabs(file_path): + # if an absolute path, make sure path exists, if it doesn't, use relative local path + full_path = file_path + if not os.path.exists(full_path): + p, n = os.path.split(full_path) + + if prefix_path and p.startswith(prefix_path): + directory = prefix_directory + p[len(prefix_path):] + full_path = directory + n + if not os.path.exists(directory): + os.mkdir(directory) + else: + full_path = prefix_directory + n + else: + full_path = prefix_directory + file_path + + return full_path diff --git a/release/scripts/lightwave_export.py b/release/scripts/lightwave_export.py deleted file mode 100644 index bbfb9649c69..00000000000 --- a/release/scripts/lightwave_export.py +++ /dev/null @@ -1,707 +0,0 @@ -#!BPY - -""" -Name: 'LightWave (.lwo)...' -Blender: 243 -Group: 'Export' -Tooltip: 'Export selected meshes to LightWave File Format (.lwo)' -""" - -__author__ = "Anthony D'Agostino (Scorpius)" -__url__ = ("blender", "blenderartists.org", -"Author's homepage, http://www.redrival.com/scorpius") -__version__ = "Part of IOSuite 0.5" - -__bpydoc__ = """\ -This script exports meshes to LightWave file format. - -LightWave is a full-featured commercial modeling and rendering -application. The lwo file format is composed of 'chunks,' is well -defined, and easy to read and write. It is similar in structure to the -trueSpace cob format. - -Usage:
- Select meshes to be exported and run this script from "File->Export" menu. - -Supported:
- UV Coordinates, Meshes, Materials, Material Indices, Specular -Highlights, and Vertex Colors. For added functionality, each object is -placed on its own layer. Someone added the CLIP chunk and imagename support. - -Missing:
- Not too much, I hope! :). - -Known issues:
- Empty objects crash has been fixed. - -Notes:
- For compatibility reasons, it also reads lwo files in the old LW -v5.5 format. -""" - -# $Id$ -# -# +---------------------------------------------------------+ -# | Copyright (c) 2002 Anthony D'Agostino | -# | http://www.redrival.com/scorpius | -# | scorpius@netzero.com | -# | April 21, 2002 | -# | Read and write LightWave Object File Format (*.lwo) | -# +---------------------------------------------------------+ - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - -import Blender -import BPyMesh -try: import struct -except: struct = None -try: import cStringIO -except: cStringIO = None -try: import operator -except: operator = None - -VCOL_NAME = "\251 Per-Face Vertex Colors" -DEFAULT_NAME = "\251 Blender Default" -# ============================== -# === Write LightWave Format === -# ============================== -def write(filename): - start = Blender.sys.time() - file = open(filename, "wb") - - scn = Blender.Scene.GetCurrent() - objects = list(scn.objects.context) - - if not objects: - Blender.Draw.PupMenu('Error%t|No Objects selected') - return - - try: objects.sort( key = lambda a: a.name ) - except: objects.sort(lambda a,b: cmp(a.name, b.name)) - - text = generate_text() - desc = generate_desc() - icon = "" #generate_icon() - - meshes = [] - mesh_object_name_lookup = {} # for name lookups only - - for obj in objects: - mesh = BPyMesh.getMeshFromObject(obj, None, True, False, scn) - if mesh: - mesh.transform(obj.matrixWorld) - meshes.append(mesh) - mesh_object_name_lookup[mesh] = obj.name - del obj - - material_names = get_used_material_names(meshes) - tags = generate_tags(material_names) - surfs = generate_surfs(material_names) - chunks = [text, desc, icon, tags] - - meshdata = cStringIO.StringIO() - - layer_index = 0 - - for mesh in meshes: - layr = generate_layr(mesh_object_name_lookup[mesh], layer_index) - pnts = generate_pnts(mesh) - bbox = generate_bbox(mesh) - pols = generate_pols(mesh) - ptag = generate_ptag(mesh, material_names) - clip = generate_clip(mesh, material_names) - - if mesh.faceUV: - vmad_uv = generate_vmad_uv(mesh) # per face - - if mesh.vertexColors: - #if meshtools.average_vcols: - # vmap_vc = generate_vmap_vc(mesh) # per vert - #else: - vmad_vc = generate_vmad_vc(mesh) # per face - - write_chunk(meshdata, "LAYR", layr); chunks.append(layr) - write_chunk(meshdata, "PNTS", pnts); chunks.append(pnts) - write_chunk(meshdata, "BBOX", bbox); chunks.append(bbox) - write_chunk(meshdata, "POLS", pols); chunks.append(pols) - write_chunk(meshdata, "PTAG", ptag); chunks.append(ptag) - - if mesh.vertexColors: - #if meshtools.average_vcols: - # write_chunk(meshdata, "VMAP", vmap_vc) - # chunks.append(vmap_vc) - #else: - write_chunk(meshdata, "VMAD", vmad_vc) - chunks.append(vmad_vc) - - if mesh.faceUV: - write_chunk(meshdata, "VMAD", vmad_uv) - chunks.append(vmad_uv) - write_chunk(meshdata, "CLIP", clip) - chunks.append(clip) - - layer_index += 1 - mesh.verts = None # save some ram - - del mesh_object_name_lookup - - for surf in surfs: - chunks.append(surf) - - write_header(file, chunks) - write_chunk(file, "ICON", icon) - write_chunk(file, "TEXT", text) - write_chunk(file, "DESC", desc) - write_chunk(file, "TAGS", tags) - file.write(meshdata.getvalue()); meshdata.close() - for surf in surfs: - write_chunk(file, "SURF", surf) - write_chunk(file, "DATE", "August 19, 2005") - - Blender.Window.DrawProgressBar(1.0, "") # clear progressbar - file.close() - print '\a\r', - print "Successfully exported %s in %.3f seconds" % (filename.split('\\')[-1].split('/')[-1], Blender.sys.time() - start) - - -# ======================================= -# === Generate Null-Terminated String === -# ======================================= -def generate_nstring(string): - if len(string)%2 == 0: # even - string += "\0\0" - else: # odd - string += "\0" - return string - -# =============================== -# === Get Used Material Names === -# =============================== -def get_used_material_names(meshes): - matnames = {} - for mesh in meshes: - if (not mesh.materials) and mesh.vertexColors: - # vcols only - matnames[VCOL_NAME] = None - - elif mesh.materials and (not mesh.vertexColors): - # materials only - for material in mesh.materials: - if material: - matnames[material.name] = None - elif (not mesh.materials) and (not mesh.vertexColors): - # neither - matnames[DEFAULT_NAME] = None - else: - # both - for material in mesh.materials: - if material: - matnames[material.name] = None - return matnames.keys() - -# ========================================= -# === Generate Tag Strings (TAGS Chunk) === -# ========================================= -def generate_tags(material_names): - if material_names: - material_names = map(generate_nstring, material_names) - tags_data = reduce(operator.add, material_names) - else: - tags_data = generate_nstring(''); - return tags_data - -# ======================== -# === Generate Surface === -# ======================== -def generate_surface(name): - #if name.find("\251 Per-") == 0: - # return generate_vcol_surf(mesh) - if name == DEFAULT_NAME: - return generate_default_surf() - else: - return generate_surf(name) - -# ====================== -# === Generate Surfs === -# ====================== -def generate_surfs(material_names): - return map(generate_surface, material_names) - -# =================================== -# === Generate Layer (LAYR Chunk) === -# =================================== -def generate_layr(name, idx): - data = cStringIO.StringIO() - data.write(struct.pack(">h", idx)) # layer number - data.write(struct.pack(">h", 0)) # flags - data.write(struct.pack(">fff", 0, 0, 0)) # pivot - data.write(generate_nstring(name)) # name - return data.getvalue() - -# =================================== -# === Generate Verts (PNTS Chunk) === -# =================================== -def generate_pnts(mesh): - data = cStringIO.StringIO() - for i, v in enumerate(mesh.verts): - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.verts), "Writing Verts") - x, y, z = v.co - data.write(struct.pack(">fff", x, z, y)) - return data.getvalue() - -# ========================================== -# === Generate Bounding Box (BBOX Chunk) === -# ========================================== -def generate_bbox(mesh): - data = cStringIO.StringIO() - # need to transform verts here - if mesh.verts: - nv = [v.co for v in mesh.verts] - xx = [ co[0] for co in nv ] - yy = [ co[1] for co in nv ] - zz = [ co[2] for co in nv ] - else: - xx = yy = zz = [0.0,] - - data.write(struct.pack(">6f", min(xx), min(zz), min(yy), max(xx), max(zz), max(yy))) - return data.getvalue() - -# ======================================== -# === Average All Vertex Colors (Fast) === -# ======================================== -''' -def average_vertexcolors(mesh): - vertexcolors = {} - vcolor_add = lambda u, v: [u[0]+v[0], u[1]+v[1], u[2]+v[2], u[3]+v[3]] - vcolor_div = lambda u, s: [u[0]/s, u[1]/s, u[2]/s, u[3]/s] - for i, f in enumerate(mesh.faces): # get all vcolors that share this vertex - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.verts), "Finding Shared VColors") - col = f.col - for j in xrange(len(f)): - index = f[j].index - color = col[j] - r,g,b = color.r, color.g, color.b - vertexcolors.setdefault(index, []).append([r,g,b,255]) - i = 0 - for index, value in vertexcolors.iteritems(): # average them - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.verts), "Averaging Vertex Colors") - vcolor = [0,0,0,0] # rgba - for v in value: - vcolor = vcolor_add(vcolor, v) - shared = len(value) - value[:] = vcolor_div(vcolor, shared) - i+=1 - return vertexcolors -''' - -# ==================================================== -# === Generate Per-Vert Vertex Colors (VMAP Chunk) === -# ==================================================== -# Blender now has all vcols per face -""" -def generate_vmap_vc(mesh): - data = cStringIO.StringIO() - data.write("RGB ") # type - data.write(struct.pack(">H", 3)) # dimension - data.write(generate_nstring("Blender's Vertex Colors")) # name - vertexcolors = average_vertexcolors(mesh) - for i in xrange(len(vertexcolors)): - try: r, g, b, a = vertexcolors[i] # has a face user - except: r, g, b, a = 255,255,255,255 - data.write(struct.pack(">H", i)) # vertex index - data.write(struct.pack(">fff", r/255.0, g/255.0, b/255.0)) - return data.getvalue() -""" - -# ==================================================== -# === Generate Per-Face Vertex Colors (VMAD Chunk) === -# ==================================================== -def generate_vmad_vc(mesh): - data = cStringIO.StringIO() - data.write("RGB ") # type - data.write(struct.pack(">H", 3)) # dimension - data.write(generate_nstring("Blender's Vertex Colors")) # name - for i, f in enumerate(mesh.faces): - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.faces), "Writing Vertex Colors") - col = f.col - f_v = f.v - for j in xrange(len(f)-1, -1, -1): # Reverse order - r,g,b, dummy = tuple(col[j]) - data.write(struct.pack(">H", f_v[j].index)) # vertex index - data.write(struct.pack(">H", i)) # face index - data.write(struct.pack(">fff", r/255.0, g/255.0, b/255.0)) - return data.getvalue() - -# ================================================ -# === Generate Per-Face UV Coords (VMAD Chunk) === -# ================================================ -def generate_vmad_uv(mesh): - layers = mesh.getUVLayerNames() - org_uv = mesh.activeUVLayer - for l in layers: - mesh.activeUVLayer = l - data = cStringIO.StringIO() - data.write("TXUV") # type - data.write(struct.pack(">H", 2)) # dimension - data.write(generate_nstring(l)) # name - for i, f in enumerate(mesh.faces): - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.faces), "Writing UV Coordinates") - - uv = f.uv - f_v = f.v - for j in xrange(len(f)-1, -1, -1): # Reverse order - U,V = uv[j] - v = f_v[j].index - data.write(struct.pack(">H", v)) # vertex index - data.write(struct.pack(">H", i)) # face index - data.write(struct.pack(">ff", U, V)) - - mesh.activeUVLayer = org_uv - return data.getvalue() - -# ====================================== -# === Generate Variable-Length Index === -# ====================================== -def generate_vx(index): - if index < 0xFF00: - value = struct.pack(">H", index) # 2-byte index - else: - value = struct.pack(">L", index | 0xFF000000) # 4-byte index - return value - -# =================================== -# === Generate Faces (POLS Chunk) === -# =================================== -def generate_pols(mesh): - data = cStringIO.StringIO() - data.write("FACE") # polygon type - for i,f in enumerate(mesh.faces): - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.faces), "Writing Faces") - data.write(struct.pack(">H", len(f))) # numfaceverts - numfaceverts = len(f) - f_v = f.v - for j in xrange(numfaceverts-1, -1, -1): # Reverse order - data.write(generate_vx(f_v[j].index)) - return data.getvalue() - -# ================================================= -# === Generate Polygon Tag Mapping (PTAG Chunk) === -# ================================================= -def generate_ptag(mesh, material_names): - - def surf_indicies(mat): - try: - if mat: - return material_names.index(mat.name) - except: - pass - - return 0 - - - data = cStringIO.StringIO() - data.write("SURF") # polygon tag type - mesh_materials = mesh.materials - mesh_surfindicies = [surf_indicies(mat) for mat in mesh_materials] - - try: VCOL_NAME_SURF_INDEX = material_names.index(VCOL_NAME) - except: VCOL_NAME_SURF_INDEX = 0 - - try: DEFAULT_NAME_SURF_INDEX = material_names.index(DEFAULT_NAME) - except: DEFAULT_NAME_SURF_INDEX = 0 - len_mat = len(mesh_materials) - for i, f in enumerate(mesh.faces): # numfaces - f_mat = f.mat - if f_mat >= len_mat: f_mat = 0 # Rare annoying eror - - - if not i%100: - Blender.Window.DrawProgressBar(float(i)/len(mesh.faces), "Writing Surface Indices") - - data.write(generate_vx(i)) - if (not mesh_materials) and mesh.vertexColors: # vcols only - surfidx = VCOL_NAME_SURF_INDEX - elif mesh_materials and not mesh.vertexColors: # materials only - surfidx = mesh_surfindicies[f_mat] - elif (not mesh_materials) and (not mesh.vertexColors): # neither - surfidx = DEFAULT_NAME_SURF_INDEX - else: # both - surfidx = mesh_surfindicies[f_mat] - - data.write(struct.pack(">H", surfidx)) # surface index - return data.getvalue() - -# =================================================== -# === Generate VC Surface Definition (SURF Chunk) === -# =================================================== -def generate_vcol_surf(mesh): - data = cStringIO.StringIO() - if mesh.vertexColors: - surface_name = generate_nstring(VCOL_NAME) - data.write(surface_name) - data.write("\0\0") - - data.write("COLR") - data.write(struct.pack(">H", 14)) - data.write(struct.pack(">fffH", 1, 1, 1, 0)) - - data.write("DIFF") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", 0.0, 0)) - - data.write("LUMI") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", 1.0, 0)) - - data.write("VCOL") - data.write(struct.pack(">H", 34)) - data.write(struct.pack(">fH4s", 1.0, 0, "RGB ")) # intensity, envelope, type - data.write(generate_nstring("Blender's Vertex Colors")) # name - - data.write("CMNT") # material comment - comment = "Vertex Colors: Exported from Blender\256 243" - comment = generate_nstring(comment) - data.write(struct.pack(">H", len(comment))) - data.write(comment) - return data.getvalue() - -# ================================================ -# === Generate Surface Definition (SURF Chunk) === -# ================================================ -def generate_surf(material_name): - data = cStringIO.StringIO() - data.write(generate_nstring(material_name)) - data.write("\0\0") - - try: - material = Blender.Material.Get(material_name) - R,G,B = material.R, material.G, material.B - ref = material.ref - emit = material.emit - spec = material.spec - hard = material.hard - - except: - material = None - - R=G=B = 1.0 - ref = 1.0 - emit = 0.0 - spec = 0.2 - hard = 0.0 - - - data.write("COLR") - data.write(struct.pack(">H", 14)) - data.write(struct.pack(">fffH", R, G, B, 0)) - - data.write("DIFF") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", ref, 0)) - - data.write("LUMI") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", emit, 0)) - - data.write("SPEC") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", spec, 0)) - - data.write("GLOS") - data.write(struct.pack(">H", 6)) - gloss = hard / (255/2.0) - gloss = round(gloss, 1) - data.write(struct.pack(">fH", gloss, 0)) - - data.write("CMNT") # material comment - comment = material_name + ": Exported from Blender\256 243" - comment = generate_nstring(comment) - data.write(struct.pack(">H", len(comment))) - data.write(comment) - - # Check if the material contains any image maps - if material: - mtextures = material.getTextures() # Get a list of textures linked to the material - for mtex in mtextures: - if (mtex) and (mtex.tex.type == Blender.Texture.Types.IMAGE): # Check if the texture is of type "IMAGE" - data.write("BLOK") # Surface BLOK header - data.write(struct.pack(">H", 104)) # Hardcoded and ugly! Will only handle 1 image per material - - # IMAP subchunk (image map sub header) - data.write("IMAP") - data_tmp = cStringIO.StringIO() - data_tmp.write(struct.pack(">H", 0)) # Hardcoded - not sure what it represents - data_tmp.write("CHAN") - data_tmp.write(struct.pack(">H", 4)) - data_tmp.write("COLR") - data_tmp.write("OPAC") # Hardcoded texture layer opacity - data_tmp.write(struct.pack(">H", 8)) - data_tmp.write(struct.pack(">H", 0)) - data_tmp.write(struct.pack(">f", 1.0)) - data_tmp.write(struct.pack(">H", 0)) - data_tmp.write("ENAB") - data_tmp.write(struct.pack(">HH", 2, 1)) # 1 = texture layer enabled - data_tmp.write("NEGA") - data_tmp.write(struct.pack(">HH", 2, 0)) # Disable negative image (1 = invert RGB values) - data_tmp.write("AXIS") - data_tmp.write(struct.pack(">HH", 2, 1)) - data.write(struct.pack(">H", len(data_tmp.getvalue()))) - data.write(data_tmp.getvalue()) - - # IMAG subchunk - data.write("IMAG") - data.write(struct.pack(">HH", 2, 1)) - data.write("PROJ") - data.write(struct.pack(">HH", 2, 5)) # UV projection - - data.write("VMAP") - uvname = generate_nstring("Blender's UV Coordinates") - data.write(struct.pack(">H", len(uvname))) - data.write(uvname) - - return data.getvalue() - -# ============================================= -# === Generate Default Surface (SURF Chunk) === -# ============================================= -def generate_default_surf(): - data = cStringIO.StringIO() - material_name = DEFAULT_NAME - data.write(generate_nstring(material_name)) - data.write("\0\0") - - data.write("COLR") - data.write(struct.pack(">H", 14)) - data.write(struct.pack(">fffH", 1, 1, 1, 0)) - - data.write("DIFF") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", 0.8, 0)) - - data.write("LUMI") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", 0, 0)) - - data.write("SPEC") - data.write(struct.pack(">H", 6)) - data.write(struct.pack(">fH", 0.5, 0)) - - data.write("GLOS") - data.write(struct.pack(">H", 6)) - gloss = 50 / (255/2.0) - gloss = round(gloss, 1) - data.write(struct.pack(">fH", gloss, 0)) - - data.write("CMNT") # material comment - comment = material_name + ": Exported from Blender\256 243" - - # vals = map(chr, xrange(164,255,1)) - # keys = xrange(164,255,1) - # keys = map(lambda x: `x`, keys) - # comment = map(None, keys, vals) - # comment = reduce(operator.add, comment) - # comment = reduce(operator.add, comment) - - comment = generate_nstring(comment) - data.write(struct.pack(">H", len(comment))) - data.write(comment) - return data.getvalue() - -# ============================================ -# === Generate Object Comment (TEXT Chunk) === -# ============================================ -def generate_text(): - comment = "Lightwave Export Script for Blender by Anthony D'Agostino" - return generate_nstring(comment) - -# ============================================== -# === Generate Description Line (DESC Chunk) === -# ============================================== -def generate_desc(): - comment = "Copyright 2002 Scorpius Entertainment" - return generate_nstring(comment) - -# ================================================== -# === Generate Thumbnail Icon Image (ICON Chunk) === -# ================================================== -def generate_icon(): - data = cStringIO.StringIO() - file = open("f:/obj/radiosity/lwo2_icon.tga", "rb") # 60x60 uncompressed TGA - file.read(18) - icon_data = file.read(3600) # ? - file.close() - data.write(struct.pack(">HH", 0, 60)) - data.write(icon_data) - #print len(icon_data) - return data.getvalue() - -# =============================================== -# === Generate CLIP chunk with STIL subchunks === -# =============================================== -def generate_clip(mesh, material_names): - data = cStringIO.StringIO() - clipid = 1 - for i, material in enumerate(mesh.materials): # Run through list of materials used by mesh - if material: - mtextures = material.getTextures() # Get a list of textures linked to the material - for mtex in mtextures: - if (mtex) and (mtex.tex.type == Blender.Texture.Types.IMAGE): # Check if the texture is of type "IMAGE" - pathname = mtex.tex.image.filename # If full path is needed use filename in place of name - pathname = pathname[0:2] + pathname.replace("\\", "/")[3:] # Convert to Modo standard path - imagename = generate_nstring(pathname) - data.write(struct.pack(">L", clipid)) # CLIP sequence/id - data.write("STIL") # STIL image - data.write(struct.pack(">H", len(imagename))) # Size of image name - data.write(imagename) - clipid += 1 - return data.getvalue() - -# =================== -# === Write Chunk === -# =================== -def write_chunk(file, name, data): - file.write(name) - file.write(struct.pack(">L", len(data))) - file.write(data) - -# ============================= -# === Write LWO File Header === -# ============================= -def write_header(file, chunks): - chunk_sizes = map(len, chunks) - chunk_sizes = reduce(operator.add, chunk_sizes) - form_size = chunk_sizes + len(chunks)*8 + len("FORM") - file.write("FORM") - file.write(struct.pack(">L", form_size)) - file.write("LWO2") - -def fs_callback(filename): - if not filename.lower().endswith('.lwo'): filename += '.lwo' - write(filename) - -if struct and cStringIO and operator: - Blender.Window.FileSelector(fs_callback, "Export LWO", Blender.sys.makename(ext='.lwo')) -else: - Blender.Draw.PupMenu("Error%t|This script requires a full python installation") diff --git a/release/scripts/lightwave_import.py b/release/scripts/lightwave_import.py deleted file mode 100644 index 6d02467cef8..00000000000 --- a/release/scripts/lightwave_import.py +++ /dev/null @@ -1,1705 +0,0 @@ -#!BPY -""" -Name: 'LightWave (.lwo)...' -Blender: 239 -Group: 'Import' -Tooltip: 'Import LightWave Object File Format' -""" - -__author__ = ["Alessandro Pirovano, Anthony D'Agostino (Scorpius)", "Campbell Barton (ideasman42)", "ZanQdo"] -__url__ = ("www.blender.org", "blenderartist.org", -"Anthony's homepage, http://www.redrival.com/scorpius", "Alessandro's homepage, http://uaraus.altervista.org") - -importername = "lwo_import 0.4.0" - -# +---------------------------------------------------------+ -# | Save your work before and after use. | -# | Please report any useful comment to: | -# | uaraus-dem@yahoo.it | -# | Thanks | -# +---------------------------------------------------------+ -# +---------------------------------------------------------+ -# | Copyright (c) 2002 Anthony D'Agostino | -# | http://www.redrival.com/scorpius | -# | scorpius@netzero.com | -# | April 21, 2002 | -# | Import Export Suite v0.5 | -# +---------------------------------------------------------+ -# | Read and write LightWave Object File Format (*.lwo) | -# +---------------------------------------------------------+ -# +---------------------------------------------------------+ -# | Alessandro Pirovano tweaked starting on March 2005 | -# | http://uaraus.altervista.org | -# +---------------------------------------------------------+ -# +---------------------------------------------------------- -# | GPL license block -# | -# | This program is free software; you can redistribute it and/or modify -# | it under the terms of the GNU General Public License as published by -# | the Free Software Foundation; either version 2 of the License, or -# | (at your option) any later version. -# | -# | This program is distributed in the hope that it will be useful, -# | but WITHOUT ANY WARRANTY; without even the implied warranty of -# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# | GNU General Public License for more details. -# | -# | You should have received a copy of the GNU General Public License -# | along with this program; if not, write to the Free Software -# | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# +---------------------------------------------------------- -# +---------------------------------------------------------+ -# | Release log: | -# | 0.4.0 : Updated for blender 2.44 | -# | ZanQdo - made the mesh import the right way up | -# | Ideasman42 - Updated functions for the bew API | -# | as well as removing the text object class | -# | 0.2.2 : This code works with Blender 2.42 RC3 | -# | Added a new PolyFill function for BPYMesh's | -# | ngon() to use, checked compatibility | -# | lightwaves ngons are imported as fgons | -# | Checked compatibility against 1711 lwo files | -# | 0.2.1 : This code works with Blender 2.40 RC1 | -# | modified material mode assignment to deal with | -# | Python API modification | -# | Changed script license to GNU GPL | -# | 0.2.0: This code works with Blender 2.40a2 or up | -# | Major rewrite to deal with large meshes | -# | - 2 pass file parsing | -# | - lower memory foot###if DEBUG: print | -# | (as long as python gc allows) | -# | 2.40a2 - Removed subsurf settings patches=poly | -# | 2.40a2 - Edge generation instead of 2vert faces | -# | 0.1.16: fixed (try 2) texture offset calculations | -# | added hint on axis mapping | -# | added hint on texture blending mode | -# | added hint on texture transparency setting | -# | search images in original directory first | -# | fixed texture order application | -# | 0.1.15: added release log | -# | fixed texture offset calculations (non-UV) | -# | fixed reverting vertex order in face generation | -# | associate texture on game-engine settings | -# | vector math definitely based on mathutils | -# | search images in "Images" and "../Images" dir | -# | revised logging facility | -# | fixed subsurf texture and material mappings | -# | 0.1.14: patched missing mod_vector (not definitive) | -# | 0.1.13: first public release | -# +---------------------------------------------------------+ - -#blender related import -import Blender -import bpy - -# use for comprehensiveImageLoad -import BPyImage - -# Use this ngon function -import BPyMesh - -import BPyMessages - -#python specific modules import -try: - import struct, chunk, cStringIO -except: - struct= chunk= cStringIO= None - -# python 2.3 has no reversed() iterator. this will only work on lists and tuples -try: - reversed -except: - def reversed(l): return l[::-1] - -### # Debuggin disabled in release. -### # do a search replace to enabe debug prints -### DEBUG = False - -# =========================================================== -# === Utility Preamble ====================================== -# =========================================================== - -textname = None -#uncomment the following line to enable logging facility to the named text object -#textname = "lwo_log" - -TXMTX = Blender.Mathutils.Matrix(\ -[1, 0, 0, 0],\ -[0, 0, 1, 0],\ -[0, 1, 0, 0],\ -[0, 0, 0, 1]) - -# =========================================================== -# === Make sure it is a string ... deal with strange chars == -# =========================================================== -def safestring(st): - myst = "" - for ll in xrange(len(st)): - if st[ll] < " ": - myst += "#" - else: - myst += st[ll] - return myst - -# =========================================================== -# === Main read functions =================================== -# =========================================================== - -# ============================= -# === Read LightWave Format === -# ============================= -def read(filename): - if BPyMessages.Error_NoFile(filename): - return - - print "This is: %s" % importername - print "Importing file:", filename - bpy.data.scenes.active.objects.selected = [] - - start = Blender.sys.time() - file = open(filename, "rb") - - editmode = Blender.Window.EditMode() # are we in edit mode? If so ... - if editmode: Blender.Window.EditMode(0) # leave edit mode before getting the mesh # === LWO header === - - try: - form_id, form_size, form_type = struct.unpack(">4s1L4s", file.read(12)) - except: - Blender.Draw.PupMenu('Error%t|This is not a lightwave file') - return - - if (form_type == "LWOB"): - read_lwob(file, filename) - elif (form_type == "LWO2"): - read_lwo2(file, filename) - else: - print "Can't read a file with the form_type: %s" % form_type - return - - Blender.Window.DrawProgressBar(1.0, "") # clear progressbar - file.close() - end = Blender.sys.time() - seconds = " in %.2f %s" % (end-start, "seconds") - if form_type == "LWO2": fmt = " (v6.0 Format)" - if form_type == "LWOB": fmt = " (v5.5 Format)" - print "Successfully imported " + filename.split('\\')[-1].split('/')[-1] + fmt + seconds - - if editmode: Blender.Window.EditMode(1) # optional, just being nice - Blender.Redraw() - -# enddef read - - -# ================================= -# === Read LightWave 5.5 format === -# ================================= -def read_lwob(file, filename): - #This function is directly derived from the LWO2 import routine - #dropping all the material analysis parts - - ###if DEBUG: print "LightWave 5.5 format" - - dir_part = Blender.sys.dirname(filename) - fname_part = Blender.sys.basename(filename) - #ask_weird = 1 - - #first initialization of data structures - defaultname = Blender.sys.splitext(fname_part)[0] - tag_list = [] #tag list: global for the whole file? - surf_list = [] #surf list: global for the whole file? - clip_list = [] #clip list: global for the whole file? - object_index = 0 - object_list = None - objspec_list = None - - #add default material for orphaned faces, if any - surf_list.append({'NAME': "_Orphans", 'g_MAT': bpy.data.materials.new("_Orphans")}) - - #pass 2: effectively generate objects - ###if DEBUG: print "Pass 1: dry import" - file.seek(0) - objspec_list = ["imported", {}, [], [], {}, {}, 0, {}, {}] - # === LWO header === - form_id, form_size, form_type = struct.unpack(">4s1L4s", file.read(12)) - if (form_type != "LWOB"): - ###if DEBUG: print "??? Inconsistent file type: %s" % form_type - return - while 1: - try: - lwochunk = chunk.Chunk(file) - except EOFError: - break - ###if DEBUG: print ' ', - if lwochunk.chunkname == "LAYR": - ###if DEBUG: print "---- LAYR", - objname = read_layr(lwochunk) - ###if DEBUG: print objname - if objspec_list != None: #create the object - create_objects(clip_list, objspec_list, surf_list) - update_material(clip_list, objspec_list, surf_list) #give it all the object - objspec_list = [objname, {}, [], [], {}, {}, 0, {}, {}] - object_index += 1 - elif lwochunk.chunkname == "PNTS": # Verts - ###if DEBUG: print "---- PNTS", - verts = read_verts(lwochunk) - objspec_list[2] = verts - elif lwochunk.chunkname == "POLS": # Faces v5.5 - ###if DEBUG: print "-------- POLS(5.5)" - faces = read_faces_5(lwochunk) - flag = 0 - #flag is 0 for regular polygon, 1 for patches (= subsurf), 2 for anything else to be ignored - if flag<2: - if objspec_list[3] != []: - #create immediately the object - create_objects(clip_list, objspec_list, surf_list) - update_material(clip_list, objspec_list, surf_list) #give it all the object - #update with new data - objspec_list = [objspec_list[0], #update name - {}, #init - objspec_list[2], #same vertexes - faces, #give it the new faces - {}, #no need to copy - filled at runtime - {}, #polygon tagging will follow - flag, #patch flag - objspec_list[7], #same uvcoords - {}] #no vmad mapping - object_index += 1 - #end if already has a face list - objspec_list[3] = faces - objname = objspec_list[0] - if objname == None: - objname = defaultname - #end if processing a valid poly type - else: # Misc Chunks - ###if DEBUG: print "---- %s: skipping (definitely!)" % lwochunk.chunkname - lwochunk.skip() - #uncomment here to log data structure as it is built - # ###if DEBUG: print object_list - #last object read - create_objects(clip_list, objspec_list, surf_list) - update_material(clip_list, objspec_list, surf_list) #give it all the object - objspec_list = None - surf_list = None - clip_list = None - - - ###if DEBUG: print "\nFound %d objects:" % object_index - -# enddef read_lwob - - -# ============================= -# === Read LightWave Format === -# ============================= -def read_lwo2(file, filename, typ="LWO2"): - - ###if DEBUG: print "LightWave 6 (and above) format" - - dir_part = Blender.sys.dirname(filename) - fname_part = Blender.sys.basename(filename) - ask_weird = 1 - - #first initialization of data structures - defaultname = Blender.sys.splitext(fname_part)[0] - tag_list = [] #tag list: global for the whole file? - surf_list = [] #surf list: global for the whole file? - clip_list = [] #clip list: global for the whole file? - object_index = 0 - object_list = None - objspec_list = None - # init value is: object_list = [[None, {}, [], [], {}, {}, 0, {}, {}]] - #0 - objname #original name - #1 - obj_dict = {TAG} #objects created - #2 - verts = [] #object vertexes - #3 - faces = [] #object faces (associations poly -> vertexes) - #4 - obj_dim_dict = {TAG} #tuples size and pos in local object coords - used for NON-UV mappings - #5 - polytag_dict = {TAG} #tag to polygons mapping - #6 - patch_flag #0 = surf; 1 = patch (subdivision surface) - it was the image list - #7 - uvcoords_dict = {name} #uvmap coordinates (mixed mode per vertex/per face) - #8 - facesuv_dict = {name} #vmad only coordinates associations poly & vertex -> uv tuples - - #pass 1: look in advance for materials - ###if DEBUG: print "Starting Pass 1: hold on tight" - while 1: - try: - lwochunk = chunk.Chunk(file) - except EOFError: - break - ###if DEBUG: print ' ', - if lwochunk.chunkname == "TAGS": # Tags - ###if DEBUG: print "---- TAGS" - tag_list.extend(read_tags(lwochunk)) - elif lwochunk.chunkname == "SURF": # surfaces - ###if DEBUG: print "---- SURF" - surf_list.append(read_surfs(lwochunk, surf_list, tag_list)) - elif lwochunk.chunkname == "CLIP": # texture images - ###if DEBUG: print "---- CLIP" - clip_list.append(read_clip(lwochunk, dir_part)) - ###if DEBUG: print "read total %s clips up to now" % len(clip_list) - else: # Misc Chunks - if ask_weird: - ckname = safestring(lwochunk.chunkname) - if "#" in ckname: - choice = Blender.Draw.PupMenu("WARNING: file could be corrupted.%t|Import anyway|Give up") - if choice != 1: - ###if DEBUG: print "---- %s: Maybe file corrupted. Terminated by user" % lwochunk.chunkname - return - ask_weird = 0 - ###if DEBUG: print "---- %s: skipping (maybe later)" % lwochunk.chunkname - lwochunk.skip() - - #add default material for orphaned faces, if any - surf_list.append({'NAME': "_Orphans", 'g_MAT': bpy.data.materials.new("_Orphans")}) - - #pass 2: effectively generate objects - ###if DEBUG: print "Pass 2: now for the hard part" - file.seek(0) - # === LWO header === - form_id, form_size, form_type = struct.unpack(">4s1L4s", file.read(12)) - if (form_type != "LWO2"): - ###if DEBUG: print "??? Inconsistent file type: %s" % form_type - return - while 1: - try: - lwochunk = chunk.Chunk(file) - except EOFError: - break - ###if DEBUG: print ' ', - if lwochunk.chunkname == "LAYR": - ###if DEBUG: print "---- LAYR" - objname = read_layr(lwochunk) - ###if DEBUG: print objname - if objspec_list != None: #create the object - create_objects(clip_list, objspec_list, surf_list) - update_material(clip_list, objspec_list, surf_list) #give it all the object - objspec_list = [objname, {}, [], [], {}, {}, 0, {}, {}] - object_index += 1 - elif lwochunk.chunkname == "PNTS": # Verts - ###if DEBUG: print "---- PNTS" - verts = read_verts(lwochunk) - objspec_list[2] = verts - elif lwochunk.chunkname == "VMAP": # MAPS (UV) - ###if DEBUG: print "---- VMAP" - #objspec_list[7] = read_vmap(objspec_list[7], len(objspec_list[2]), lwochunk) - read_vmap(objspec_list[7], len(objspec_list[2]), lwochunk) - elif lwochunk.chunkname == "VMAD": # MAPS (UV) per-face - ###if DEBUG: print "---- VMAD" - #objspec_list[7], objspec_list[8] = read_vmad(objspec_list[7], objspec_list[8], len(objspec_list[3]), len(objspec_list[2]), lwochunk) - read_vmad(objspec_list[7], objspec_list[8], len(objspec_list[3]), len(objspec_list[2]), lwochunk) - elif lwochunk.chunkname == "POLS": # Faces v6.0 - ###if DEBUG: print "-------- POLS(6)" - faces, flag = read_faces_6(lwochunk) - #flag is 0 for regular polygon, 1 for patches (= subsurf), 2 for anything else to be ignored - if flag<2: - if objspec_list[3] != []: - #create immediately the object - create_objects(clip_list, objspec_list, surf_list) - update_material(clip_list, objspec_list, surf_list) #give it all the object - #update with new data - objspec_list = [objspec_list[0], #update name - {}, #init - objspec_list[2], #same vertexes - faces, #give it the new faces - {}, #no need to copy - filled at runtime - {}, #polygon tagging will follow - flag, #patch flag - objspec_list[7], #same uvcoords - {}] #no vmad mapping - object_index += 1 - #end if already has a face list - objspec_list[3] = faces - objname = objspec_list[0] - if objname == None: - objname = defaultname - #end if processing a valid poly type - elif lwochunk.chunkname == "PTAG": # PTags - ###if DEBUG: print "---- PTAG" - polytag_dict = read_ptags(lwochunk, tag_list) - for kk, polytag_dict_val in polytag_dict.iteritems(): objspec_list[5][kk] = polytag_dict_val - else: # Misc Chunks - ###if DEBUG: print "---- %s: skipping (definitely!)" % lwochunk.chunkname - lwochunk.skip() - #uncomment here to log data structure as it is built - - #last object read - create_objects(clip_list, objspec_list, surf_list) - update_material(clip_list, objspec_list, surf_list) #give it all the object - objspec_list = None - surf_list = None - clip_list = None - - ###if DEBUG: print "\nFound %d objects:" % object_index -# enddef read_lwo2 - - - - - - -# =========================================================== -# === File reading routines ================================= -# =========================================================== -# ================== -# === Read Verts === -# ================== -def read_verts(lwochunk): - #data = cStringIO.StringIO(lwochunk.read()) - numverts = lwochunk.chunksize/12 - return [struct.unpack(">fff", lwochunk.read(12)) for i in xrange(numverts)] -# enddef read_verts - - -# ================= -# === Read Name === -# ================= -# modified to deal with odd lenght strings -def read_name(file): - name = "" - while 1: - char = file.read(1) - if char == "\0": break - else: name += char - len_name = len(name) + 1 #count the trailing zero - if len_name%2==1: - char = file.read(1) #remove zero padding to even lenght - len_name += 1 - return name, len_name - - -# ================== -# === Read Layer === -# ================== -def read_layr(lwochunk): - data = cStringIO.StringIO(lwochunk.read()) - idx, flags = struct.unpack(">hh", data.read(4)) - pivot = struct.unpack(">fff", data.read(12)) - layer_name, discard = read_name(data) - if not layer_name: layer_name = "NoName" - return layer_name -# enddef read_layr - - -# ====================== -# === Read Faces 5.5 === -# ====================== -def read_faces_5(lwochunk): - data = cStringIO.StringIO(lwochunk.read()) - faces = [] - i = 0 - while i < lwochunk.chunksize: - #if not i%1000 and my_meshtools.show_progress: - # Blender.Window.DrawProgressBar(float(i)/lwochunk.chunksize, "Reading Faces") - - numfaceverts, = struct.unpack(">H", data.read(2)) - facev = [struct.unpack(">H", data.read(2))[0] for j in xrange(numfaceverts)] - facev.reverse() - faces.append(facev) - surfaceindex, = struct.unpack(">H", data.read(2)) - if surfaceindex < 0: - ###if DEBUG: print "***Error. Referencing uncorrect surface index" - return - i += (4+numfaceverts*2) - return faces - - -# ================================== -# === Read Variable-Length Index === -# ================================== -def read_vx(data): - byte1, = struct.unpack(">B", data.read(1)) - if byte1 != 0xFF: # 2-byte index - byte2, = struct.unpack(">B", data.read(1)) - index = byte1*256 + byte2 - index_size = 2 - else: # 4-byte index - byte2, byte3, byte4 = struct.unpack(">3B", data.read(3)) - index = byte2*65536 + byte3*256 + byte4 - index_size = 4 - return index, index_size - - -# ====================== -# === Read uvmapping === -# ====================== -def read_vmap(uvcoords_dict, maxvertnum, lwochunk): - - if maxvertnum == 0: - ###if DEBUG: print "Found VMAP but no vertexes to map!" - return uvcoords_dict - data = cStringIO.StringIO(lwochunk.read()) - map_type = data.read(4) - if map_type != "TXUV": - ###if DEBUG: print "Reading VMAP: No Texture UV map Were Found. Map Type: %s" % map_type - return uvcoords_dict - dimension, = struct.unpack(">H", data.read(2)) - name, i = read_name(data) #i initialized with string lenght + zeros - ###if DEBUG: print "TXUV %d %s" % (dimension, name) - #note if there is already a VMAD it will be lost - #it is assumed that VMAD will follow the corresponding VMAP - Vector = Blender.Mathutils.Vector - try: #if uvcoords_dict.has_key(name): - my_uv_dict = uvcoords_dict[name] #update existing - except: #else: - my_uv_dict = {} #start a brand new: this could be made more smart - while (i < lwochunk.chunksize - 6): #4+2 header bytes already read - vertnum, vnum_size = read_vx(data) - uv = struct.unpack(">ff", data.read(8)) - if vertnum >= maxvertnum: - ###if DEBUG: print "Hem: more uvmap than vertexes? ignoring uv data for vertex %d" % vertnum - pass - else: - my_uv_dict[vertnum] = Vector(uv) - i += 8 + vnum_size - #end loop on uv pairs - uvcoords_dict[name] = my_uv_dict - #this is a per-vertex mapping AND the uv tuple is vertex-ordered, so faces_uv is the same as faces - #return uvcoords_dict - return - -# ======================== -# === Read uvmapping 2 === -# ======================== -def read_vmad(uvcoords_dict, facesuv_dict, maxfacenum, maxvertnum, lwochunk): - if maxvertnum == 0 or maxfacenum == 0: - ###if DEBUG: print "Found VMAD but no vertexes to map!" - return uvcoords_dict, facesuv_dict - data = cStringIO.StringIO(lwochunk.read()) - map_type = data.read(4) - if map_type != "TXUV": - ###if DEBUG: print "Reading VMAD: No Texture UV map Were Found. Map Type: %s" % map_type - return uvcoords_dict, facesuv_dict - dimension, = struct.unpack(">H", data.read(2)) - name, i = read_name(data) #i initialized with string lenght + zeros - ###if DEBUG: print "TXUV %d %s" % (dimension, name) - try: #if uvcoords_dict.has_key(name): - my_uv_dict = uvcoords_dict[name] #update existing - except: #else: - my_uv_dict = {} #start a brand new: this could be made more smart - my_facesuv_list = [] - newindex = maxvertnum + 10 #why +10? Why not? - #end variable initialization - Vector = Blender.Mathutils.Vector - while (i < lwochunk.chunksize - 6): #4+2 header bytes already read - vertnum, vnum_size = read_vx(data) - i += vnum_size - polynum, vnum_size = read_vx(data) - i += vnum_size - uv = struct.unpack(">ff", data.read(8)) - if polynum >= maxfacenum or vertnum >= maxvertnum: - ###if DEBUG: print "Hem: more uvmap than vertexes? ignorig uv data for vertex %d" % vertnum - pass - else: - my_uv_dict[newindex] = Vector(uv) - my_facesuv_list.append([polynum, vertnum, newindex]) - newindex += 1 - i += 8 - #end loop on uv pairs - uvcoords_dict[name] = my_uv_dict - facesuv_dict[name] = my_facesuv_list - ###if DEBUG: print "updated %d vertexes data" % (newindex-maxvertnum-10) - return - - -# ================= -# === Read tags === -# ================= -def read_tags(lwochunk): - data = cStringIO.StringIO(lwochunk.read()) - tag_list = [] - current_tag = "" - i = 0 - while i < lwochunk.chunksize: - char = data.read(1) - if char == "\0": - tag_list.append(current_tag) - if (len(current_tag) % 2 == 0): char = data.read(1) - current_tag = "" - else: - current_tag += char - i += 1 - ###if DEBUG: print "read %d tags, list follows: %s" % (len(tag_list), tag_list) - return tag_list - - -# ================== -# === Read Ptags === -# ================== -def read_ptags(lwochunk, tag_list): - data = cStringIO.StringIO(lwochunk.read()) - polygon_type = data.read(4) - if polygon_type != "SURF": - ###if DEBUG: print "No Surf Were Found. Polygon Type: %s" % polygon_type - return {} - ptag_dict = {} - i = 0 - while(i < lwochunk.chunksize-4): #4 bytes polygon type already read - #if not i%1000 and my_meshtools.show_progress: - # Blender.Window.DrawProgressBar(float(i)/lwochunk.chunksize, "Reading PTAGS") - poln, poln_size = read_vx(data) - i += poln_size - tag_index, = struct.unpack(">H", data.read(2)) - if tag_index > (len(tag_list)): - ###if DEBUG: print "Reading PTAG: Surf belonging to undefined TAG: %d. Skipping" % tag_index - return {} - i += 2 - tag_key = tag_list[tag_index] - try: - ptag_dict[tag_list[tag_index]].append(poln) - except: #if not(ptag_dict.has_key(tag_key)): - ptag_dict[tag_list[tag_index]] = [poln] - - ###if DEBUG: for i, ptag_dict_val in ptag_dict.iteritems(): print "read %d polygons belonging to TAG %s" % (len(ptag_dict_val ), i) - return ptag_dict - - - -# ================== -# === Read Clips === -# ================== -def read_clip(lwochunk, dir_part): -# img, IMG, g_IMG refers to blender image objects -# ima, IMAG, g_IMAG refers to clip dictionary 'ID' entries: refer to blok and surf - clip_dict = {} - data = cStringIO.StringIO(lwochunk.read()) - data_str = data.read(4) - if len(data_str) < 4: # can be zero also??? :/ - # Should not happen but lw can import so we should too - return - - image_index, = struct.unpack(">L", data_str) - clip_dict['ID'] = image_index - i = 4 - while(i < lwochunk.chunksize): - subchunkname, = struct.unpack("4s", data.read(4)) - subchunklen, = struct.unpack(">H", data.read(2)) - if subchunkname == "STIL": - ###if DEBUG: print "-------- STIL" - clip_name, k = read_name(data) - #now split text independently from platform - #depend on the system where image was saved. NOT the one where the script is run - no_sep = "\\" - if Blender.sys.sep == no_sep: no_sep ="/" - if (no_sep in clip_name): - clip_name = clip_name.replace(no_sep, Blender.sys.sep) - short_name = Blender.sys.basename(clip_name) - if clip_name == "" or short_name == "": - ###if DEBUG: print "Reading CLIP: Empty clip name not allowed. Skipping" - discard = data.read(subchunklen-k) - clip_dict['NAME'] = clip_name - clip_dict['BASENAME'] = short_name - elif subchunkname == "XREF": #cross reference another image - ###if DEBUG: print "-------- XREF" - image_index, = struct.unpack(">L", data.read(4)) - clip_name, k = read_name(data) - clip_dict['NAME'] = clip_name - clip_dict['XREF'] = image_index - elif subchunkname == "NEGA": #negate texture effect - ###if DEBUG: print "-------- NEGA" - n, = struct.unpack(">H", data.read(2)) - clip_dict['NEGA'] = n - else: # Misc Chunks - ###if DEBUG: print "-------- CLIP:%s: skipping" % subchunkname - discard = data.read(subchunklen) - i = i + 6 + subchunklen - #end loop on surf chunks - ###if DEBUG: print "read image:%s" % clip_dict - if 'XREF' in clip_dict: # has_key - ###if DEBUG: print "Cross-reference: no image pre-allocated." - return clip_dict - #look for images - #img = load_image("",clip_dict['NAME']) - NAME= BASENAME= None - - try: - NAME= clip_dict['NAME'] - BASENAME= clip_dict['BASENAME'] - except: - clip_dict['g_IMG'] = None - return - # ###if DEBUG: print 'test', NAME, BASENAME - img = BPyImage.comprehensiveImageLoad(NAME, dir_part, PLACE_HOLDER= False, RECURSIVE=False) - if not img: - ###if DEBUG: print "***No image %s found: trying LWO file subdir" % NAME - img = BPyImage.comprehensiveImageLoad(BASENAME, dir_part, PLACE_HOLDER= False, RECURSIVE=False) - - ###if DEBUG: if not img: print "***No image %s found: giving up" % BASENAME - #lucky we are: we have an image - ###if DEBUG: print "Image pre-allocated." - clip_dict['g_IMG'] = img - - return clip_dict - - -# =========================== -# === Read Surfaces Block === -# =========================== -def read_surfblok(subchunkdata): - lenght = len(subchunkdata) - my_dict = {} - my_uvname = "" - data = cStringIO.StringIO(subchunkdata) - ############################################################## - # blok header sub-chunk - ############################################################## - subchunkname, = struct.unpack("4s", data.read(4)) - subchunklen, = struct.unpack(">h", data.read(2)) - accumulate_i = subchunklen + 6 - if subchunkname != 'IMAP': - ###if DEBUG: print "---------- SURF: BLOK: %s: block aborting" % subchunkname - return {}, "" - ###if DEBUG: print "---------- IMAP" - ordinal, i = read_name(data) - my_dict['ORD'] = ordinal - #my_dict['g_ORD'] = -1 - my_dict['ENAB'] = True - while(i < subchunklen): # ---------left 6------------------------- loop on header parameters - sub2chunkname, = struct.unpack("4s", data.read(4)) - sub2chunklen, = struct.unpack(">h", data.read(2)) - i = i + 6 + sub2chunklen - if sub2chunkname == "CHAN": - ###if DEBUG: print "------------ CHAN" - sub2chunkname, = struct.unpack("4s", data.read(4)) - my_dict['CHAN'] = sub2chunkname - sub2chunklen -= 4 - elif sub2chunkname == "ENAB": #only present if is to be disabled - ###if DEBUG: print "------------ ENAB" - ena, = struct.unpack(">h", data.read(2)) - my_dict['ENAB'] = ena - sub2chunklen -= 2 - elif sub2chunkname == "NEGA": #only present if is to be enabled - ###if DEBUG: print "------------ NEGA" - ena, = struct.unpack(">h", data.read(2)) - if ena == 1: - my_dict['NEGA'] = ena - sub2chunklen -= 2 - elif sub2chunkname == "OPAC": #only present if is to be disabled - ###if DEBUG: print "------------ OPAC" - opa, = struct.unpack(">h", data.read(2)) - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['OPAC'] = opa - my_dict['OPACVAL'] = s - sub2chunklen -= 6 - elif sub2chunkname == "AXIS": - ###if DEBUG: print "------------ AXIS" - ena, = struct.unpack(">h", data.read(2)) - my_dict['DISPLAXIS'] = ena - sub2chunklen -= 2 - else: # Misc Chunks - ###if DEBUG: print "------------ SURF: BLOK: IMAP: %s: skipping" % sub2chunkname - discard = data.read(sub2chunklen) - #end loop on blok header subchunks - ############################################################## - # blok attributes sub-chunk - ############################################################## - subchunkname, = struct.unpack("4s", data.read(4)) - subchunklen, = struct.unpack(">h", data.read(2)) - accumulate_i += subchunklen + 6 - if subchunkname != 'TMAP': - ###if DEBUG: print "---------- SURF: BLOK: %s: block aborting" % subchunkname - return {}, "" - ###if DEBUG: print "---------- TMAP" - i = 0 - while(i < subchunklen): # -----------left 6----------------------- loop on header parameters - sub2chunkname, = struct.unpack("4s", data.read(4)) - sub2chunklen, = struct.unpack(">h", data.read(2)) - i = i + 6 + sub2chunklen - if sub2chunkname == "CNTR": - ###if DEBUG: print "------------ CNTR" - x, y, z = struct.unpack(">fff", data.read(12)) - envelope, env_size = read_vx(data) - my_dict['CNTR'] = [x, y, z] - sub2chunklen -= (12+env_size) - elif sub2chunkname == "SIZE": - ###if DEBUG: print "------------ SIZE" - x, y, z = struct.unpack(">fff", data.read(12)) - envelope, env_size = read_vx(data) - my_dict['SIZE'] = [x, y, z] - sub2chunklen -= (12+env_size) - elif sub2chunkname == "ROTA": - ###if DEBUG: print "------------ ROTA" - x, y, z = struct.unpack(">fff", data.read(12)) - envelope, env_size = read_vx(data) - my_dict['ROTA'] = [x, y, z] - sub2chunklen -= (12+env_size) - elif sub2chunkname == "CSYS": - ###if DEBUG: print "------------ CSYS" - ena, = struct.unpack(">h", data.read(2)) - my_dict['CSYS'] = ena - sub2chunklen -= 2 - else: # Misc Chunks - ###if DEBUG: print "------------ SURF: BLOK: TMAP: %s: skipping" % sub2chunkname - pass - if sub2chunklen > 0: - discard = data.read(sub2chunklen) - #end loop on blok attributes subchunks - ############################################################## - # ok, now other attributes without sub_chunks - ############################################################## - while(accumulate_i < lenght): # ---------------------------------- loop on header parameters: lenght has already stripped the 6 bypes header - subchunkname, = struct.unpack("4s", data.read(4)) - subchunklen, = struct.unpack(">H", data.read(2)) - accumulate_i = accumulate_i + 6 + subchunklen - if subchunkname == "PROJ": - ###if DEBUG: print "---------- PROJ" - p, = struct.unpack(">h", data.read(2)) - my_dict['PROJ'] = p - subchunklen -= 2 - elif subchunkname == "AXIS": - ###if DEBUG: print "---------- AXIS" - a, = struct.unpack(">h", data.read(2)) - my_dict['MAJAXIS'] = a - subchunklen -= 2 - elif subchunkname == "IMAG": - ###if DEBUG: print "---------- IMAG" - i, i_size = read_vx(data) - my_dict['IMAG'] = i - subchunklen -= i_size - elif subchunkname == "WRAP": - ###if DEBUG: print "---------- WRAP" - ww, wh = struct.unpack(">hh", data.read(4)) - #reduce width and height to just 1 parameter for both - my_dict['WRAP'] = max([ww,wh]) - #my_dict['WRAPWIDTH'] = ww - #my_dict['WRAPHEIGHT'] = wh - subchunklen -= 4 - elif subchunkname == "WRPW": - ###if DEBUG: print "---------- WRPW" - w, = struct.unpack(">f", data.read(4)) - my_dict['WRPW'] = w - envelope, env_size = read_vx(data) - subchunklen -= (env_size+4) - elif subchunkname == "WRPH": - ###if DEBUG: print "---------- WRPH" - w, = struct.unpack(">f", data.read(4)) - my_dict['WRPH'] = w - envelope, env_size = read_vx(data) - subchunklen -= (env_size+4) - elif subchunkname == "VMAP": - ###if DEBUG: print "---------- VMAP" - vmp, i = read_name(data) - my_dict['VMAP'] = vmp - my_uvname = vmp - subchunklen -= i - else: # Misc Chunks - ###if DEBUG: print "---------- SURF: BLOK: %s: skipping" % subchunkname - pass - if subchunklen > 0: - discard = data.read(subchunklen) - #end loop on blok subchunks - return my_dict, my_uvname - - -# ===================== -# === Read Surfaces === -# ===================== -def read_surfs(lwochunk, surf_list, tag_list): - my_dict = {} - data = cStringIO.StringIO(lwochunk.read()) - surf_name, i = read_name(data) - parent_name, j = read_name(data) - i += j - if (surf_name == "") or not(surf_name in tag_list): - ###if DEBUG: print "Reading SURF: Actually empty surf name not allowed. Skipping" - return {} - if (parent_name != ""): - parent_index = [x['NAME'] for x in surf_list].count(parent_name) - if parent_index >0: - my_dict = surf_list[parent_index-1] - my_dict['NAME'] = surf_name - ###if DEBUG: print "Surface data for TAG %s" % surf_name - while(i < lwochunk.chunksize): - subchunkname, = struct.unpack("4s", data.read(4)) - subchunklen, = struct.unpack(">H", data.read(2)) - i = i + 6 + subchunklen #6 bytes subchunk header - if subchunkname == "COLR": #color: mapped on color - ###if DEBUG: print "-------- COLR" - r, g, b = struct.unpack(">fff", data.read(12)) - envelope, env_size = read_vx(data) - my_dict['COLR'] = [r, g, b] - subchunklen -= (12+env_size) - elif subchunkname == "DIFF": #diffusion: mapped on reflection (diffuse shader) - ###if DEBUG: print "-------- DIFF" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['DIFF'] = s - subchunklen -= (4+env_size) - elif subchunkname == "SPEC": #specularity: mapped to specularity (spec shader) - ###if DEBUG: print "-------- SPEC" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['SPEC'] = s - subchunklen -= (4+env_size) - elif subchunkname == "REFL": #reflection: mapped on raymirror - ###if DEBUG: print "-------- REFL" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['REFL'] = s - subchunklen -= (4+env_size) - elif subchunkname == "TRNL": #translucency: mapped on same param - ###if DEBUG: print "-------- TRNL" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['TRNL'] = s - subchunklen -= (4+env_size) - elif subchunkname == "GLOS": #glossiness: mapped on specularity hardness (spec shader) - ###if DEBUG: print "-------- GLOS" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['GLOS'] = s - subchunklen -= (4+env_size) - elif subchunkname == "TRAN": #transparency: inverted and mapped on alpha channel - ###if DEBUG: print "-------- TRAN" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['TRAN'] = s - subchunklen -= (4+env_size) - elif subchunkname == "LUMI": #luminosity: mapped on emit channel - ###if DEBUG: print "-------- LUMI" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['LUMI'] = s - subchunklen -= (4+env_size) - elif subchunkname == "GVAL": #glow: mapped on add channel - ###if DEBUG: print "-------- GVAL" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['GVAL'] = s - subchunklen -= (4+env_size) - elif subchunkname == "SMAN": #smoothing angle - ###if DEBUG: print "-------- SMAN" - s, = struct.unpack(">f", data.read(4)) - my_dict['SMAN'] = s - subchunklen -= 4 - elif subchunkname == "SIDE": #double sided? - ###if DEBUG: print "-------- SIDE" #if 1 side do not define key - s, = struct.unpack(">H", data.read(2)) - if s == 3: - my_dict['SIDE'] = s - subchunklen -= 2 - elif subchunkname == "RIND": #Refraction: mapped on IOR - ###if DEBUG: print "-------- RIND" - s, = struct.unpack(">f", data.read(4)) - envelope, env_size = read_vx(data) - my_dict['RIND'] = s - subchunklen -= (4+env_size) - elif subchunkname == "BLOK": #blocks - ###if DEBUG: print "-------- BLOK" - rr, uvname = read_surfblok(data.read(subchunklen)) - #paranoia setting: preventing adding an empty dict - if rr: # != {} - try: - my_dict['BLOK'].append(rr) - except: - my_dict['BLOK'] = [rr] - - if uvname: # != "": - my_dict['UVNAME'] = uvname #theoretically there could be a number of them: only one used per surf - # all are dictionaries - so testing keys - if not('g_IMAG' in my_dict) and ('CHAN' in rr) and ('OPAC' in rr) and ('IMAG' in rr): - if (rr['CHAN'] == 'COLR') and (rr['OPAC'] == 0): - my_dict['g_IMAG'] = rr['IMAG'] #do not set anything, just save image object for later assignment - subchunklen = 0 #force ending - else: # Misc Chunks - pass - ###if DEBUG: print "-------- SURF:%s: skipping" % subchunkname - if subchunklen > 0: - discard = data.read(subchunklen) - #end loop on surf chunks - try:#if my_dict.has_key('BLOK'): - my_dict['BLOK'].reverse() #texture applied in reverse order with respect to reading from lwo - except: - pass - - #uncomment this if material pre-allocated by read_surf - my_dict['g_MAT'] = bpy.data.materials.new(my_dict['NAME']) - ###if DEBUG: print "-> Material pre-allocated." - return my_dict - -# ========================= -# === Recalculate Faces === -# ========================= - -def get_uvface(complete_list, facenum): - # extract from the complete list only vertexes of the desired polygon - ''' - my_facelist = [] - for elem in complete_list: - if elem[0] == facenum: - my_facelist.append(elem) - return my_facelist - ''' - return [elem for elem in complete_list if elem[0] == facenum] - -def get_newindex(polygon_list, vertnum): - # extract from the polygon list the new index associated to a vertex - if not polygon_list: # == [] - return -1 - for elem in polygon_list: - if elem[1] == vertnum: - return elem[2] - # ###if DEBUG: print "WARNING: expected vertex %s for polygon %s. Polygon_list dump follows" % (vertnum, polygon_list[0][0]) - # ###if DEBUG: print polygon_list - return -1 - -def get_surf(surf_list, cur_tag): - for elem in surf_list: # elem can be None - if elem and elem['NAME'] == cur_tag: - return elem - return {} - - - -# ==================================== -# === Modified Create Blender Mesh === -# ==================================== -def my_create_mesh(clip_list, surf, objspec_list, current_facelist, objname, not_used_faces): - #take the needed faces and update the not-used face list - complete_vertlist = objspec_list[2] - complete_facelist = objspec_list[3] - uvcoords_dict = objspec_list[7] - facesuv_dict = objspec_list[8] - vertex_map = {} #implementation as dict - cur_ptag_faces = [] - cur_ptag_faces_indexes = [] - maxface = len(complete_facelist) - for ff in current_facelist: - if ff >= maxface: - ###if DEBUG: print "Non existent face addressed: Giving up with this object" - return None, not_used_faces #return the created object - cur_face = complete_facelist[ff] - cur_ptag_faces_indexes.append(ff) - if not_used_faces: # != [] - not_used_faces[ff] = -1 - for vv in cur_face: vertex_map[vv] = 1 - #end loop on faces - store_edge = 0 - - scn= bpy.data.scenes.active - msh = bpy.data.meshes.new() - obj = scn.objects.new(msh) - - mat = None - try: - msh.materials = [surf['g_MAT']] - except: - pass - - msh.mode |= Blender.Mesh.Modes.AUTOSMOOTH #smooth it anyway - if 'SMAN' in surf: # has_key - #not allowed mixed mode mesh (all the mesh is smoothed and all with the same angle) - #only one smoothing angle will be active! => take the max one - msh.degr = min(80, int(surf['SMAN']/3.1415926535897932384626433832795*180.0)) #lwo in radians - blender in degrees - - try: - img= lookup_imag(clip_list, surf['g_IMAG'])['g_IMG'] - except: - img= None - - #uv_flag = ((surf.has_key('UVNAME')) and (uvcoords_dict.has_key(surf['UVNAME'])) and (img != None)) - uv_flag = (('UVNAME' in surf) and (surf['UVNAME'] in uvcoords_dict)) - - ###if DEBUG: print "\n#===================================================================#" - ###if DEBUG: print "Processing Object: %s" % objname - ###if DEBUG: print "#===================================================================#" - - if uv_flag: - msh.verts.extend([(0.0,0.0,0.0),]) - j = 1 - else: - j = 0 - - def tmp_get_vert(k, i): - vertex_map[k] = i+j # j is the dummy vert - # ###if DEBUG: print complete_vertlist[i] - return complete_vertlist[k] - - - - msh.verts.extend([tmp_get_vert(k, i) for i, k in enumerate(vertex_map.iterkeys())]) - msh.transform(TXMTX) # faster then applying while reading. - #end sweep over vertexes - - #append faces - FACE_TEX= Blender.Mesh.FaceModes.TEX - FACE_ALPHA= Blender.Mesh.FaceTranspModes.ALPHA - EDGE_DRAW_FLAG= Blender.Mesh.EdgeFlags.EDGEDRAW | Blender.Mesh.EdgeFlags.EDGERENDER - - - edges = [] - face_data = [] # [(indicies, material, uvs, image), ] - face_uvs = [] - edges_fgon = [] - - if uv_flag: - uvcoords_dict_context = uvcoords_dict[surf['UVNAME']] - try: current_uvdict = facesuv_dict[surf['UVNAME']] - except: current_uvdict = None - - default_uv = Blender.Mathutils.Vector(0,0) - def tmp_get_face_uvs(cur_face, i): - uvs = [] - if current_uvdict: - uvface = get_uvface(current_uvdict,i) - for vi in cur_face: - ni = get_newindex(uvface, vi) - if ni == -1: ni = vi - - try: - uvs.append(uvcoords_dict_context[ ni ]) - except: - ###if DEBUG: print '\tWarning, Corrupt UVs' - uvs.append(default_uv) - else: - for vi in cur_face: - try: - uvs.append(uvcoords_dict_context[ vi ]) - except: - ###if DEBUG: print '\tWarning, Corrupt UVs' - uvs.append(default_uv) - - return uvs - cur_face - for i in cur_ptag_faces_indexes: - cur_face = complete_facelist[i] - numfaceverts = len(cur_face) - - if numfaceverts == 2: edges.append((vertex_map[cur_face[0]], vertex_map[cur_face[1]])) - elif numfaceverts == 3 or numfaceverts == 4: - rev_face = [__i for __i in reversed(cur_face)] - face_data.append( [vertex_map[j] for j in rev_face] ) - if uv_flag: face_uvs.append(tmp_get_face_uvs(rev_face, i)) - elif numfaceverts > 4: - meta_faces= BPyMesh.ngon(complete_vertlist, cur_face, PREF_FIX_LOOPS= True) - edge_face_count = {} - for mf in meta_faces: - # These will always be tri's since they are scanfill faces - mf = cur_face[mf[2]], cur_face[mf[1]], cur_face[mf[0]] - face_data.append( [vertex_map[j] for j in mf] ) - - if uv_flag: face_uvs.append(tmp_get_face_uvs(mf, i)) - - #if USE_FGON: - if len(meta_faces) > 1: - mf = face_data[-1] # reuse mf - for j in xrange(3): - v1= mf[j] - v2= mf[j-1] - if v1!=v2: - if v1>v2: - v2,v1= v1,v2 - try: - edge_face_count[v1,v2]+= 1 - except: - edge_face_count[v1,v2]= 0 - - - - if edge_face_count: - edges_fgon.extend( [vert_key for vert_key, count in edge_face_count.iteritems() if count] ) - - if edges: - msh.edges.extend(edges) - - face_mapping_removed = msh.faces.extend(face_data, indexList=True) - if 'TRAN' in surf or (mat and mat.alpha<1.0): # incase mat is null - transp_flag = True - else: - transp_flag = False - - if uv_flag: - msh.faceUV = True - msh_faces= msh.faces - for i, uvs in enumerate(face_uvs): - i_mapped = face_mapping_removed[i] - if i_mapped != None: - f = msh_faces[i_mapped] - f.uv = uvs - if img: - f.image = img - - if transp_flag: f.transp |= FACE_ALPHA - - if edges_fgon: - msh_edges = msh.edges - FGON= Blender.Mesh.EdgeFlags.FGON - edges_fgon = msh.findEdges( edges_fgon ) - if type(edges_fgon) != list: edges_fgon = [edges_fgon] - for ed in edges_fgon: - if ed!=None: - msh_edges[ed].flag |= FGON - - if not(uv_flag): #clear eventual UV data - msh.faceUV = False - - if uv_flag: - msh.verts.delete([0,]) - - return obj, not_used_faces #return the created object - - -# ============================================ -# === Set Subsurf attributes on given mesh === -# ============================================ -def set_subsurf(obj): - mods = obj.modifiers # get the object's modifiers - mod = mods.append(Blender.Modifier.Type.SUBSURF) # add a new subsurf modifier - mod[Blender.Modifier.Settings.LEVELS] = 2 # set subsurf subdivision levels to 2 - mod[Blender.Modifier.Settings.RENDLEVELS] = 2 # set subsurf rendertime subdivision levels to 2 - obj.makeDisplayList() - - -# ================================= -# === object size and dimension === -# ================================= -def obj_size_pos(obj): - bbox = obj.getBoundBox() - bbox_min = map(lambda *row: min(row), *bbox) #transpose & get min - bbox_max = map(lambda *row: max(row), *bbox) #transpose & get max - obj_size = (bbox_max[0]-bbox_min[0], bbox_max[1]-bbox_min[1], bbox_max[2]-bbox_min[2]) - obj_pos = ( (bbox_max[0]+bbox_min[0]) / 2, (bbox_max[1]+bbox_min[1]) / 2, (bbox_max[2]+bbox_min[2]) / 2) - return (obj_size, obj_pos) - - -# ========================= -# === Create the object === -# ========================= -def create_objects(clip_list, objspec_list, surf_list): - nf = len(objspec_list[3]) - not_used_faces = range(nf) - ptag_dict = objspec_list[5] - obj_dict = {} #links tag names to object, used for material assignments - obj_dim_dict = {} - obj_list = [] #have it handy for parent association - middlechar = "+" - endchar = "" - if (objspec_list[6] == 1): - middlechar = endchar = "#" - for cur_tag, ptag_dict_val in ptag_dict.iteritems(): - if ptag_dict_val != []: - cur_surf = get_surf(surf_list, cur_tag) - cur_obj, not_used_faces= my_create_mesh(clip_list, cur_surf, objspec_list, ptag_dict_val, objspec_list[0][:9]+middlechar+cur_tag[:9], not_used_faces) - # Works now with new modifiers - if objspec_list[6] == 1: - set_subsurf(cur_obj) - if cur_obj: # != None - obj_dict[cur_tag] = cur_obj - obj_dim_dict[cur_tag] = obj_size_pos(cur_obj) - obj_list.append(cur_obj) - #end loop on current group - #and what if some faces not used in any named PTAG? get rid of unused faces - orphans = [] - for tt in not_used_faces: - if tt > -1: orphans.append(tt) - #end sweep on unused face list - not_used_faces = None - if orphans: # != [] - cur_surf = get_surf(surf_list, "_Orphans") - cur_obj, not_used_faces = my_create_mesh(clip_list, cur_surf, objspec_list, orphans, objspec_list[0][:9]+middlechar+"Orphans", []) - if cur_obj: # != None - if objspec_list[6] == 1: - set_subsurf(cur_obj) - obj_dict["_Orphans"] = cur_obj - obj_dim_dict["_Orphans"] = obj_size_pos(cur_obj) - obj_list.append(cur_obj) - objspec_list[1]= obj_dict - objspec_list[4]= obj_dim_dict - - return - - - -# =========================================== -# === Lookup for image index in clip_list === -# =========================================== -def lookup_imag(clip_list, ima_id): - for ii in clip_list: - if ii and ii['ID'] == ima_id: - if 'XREF' in ii: # has_key - #cross reference - recursively look for images - return lookup_imag(clip_list, ii['XREF']) - else: - return ii - return None - - -# =================================================== -# === Create and assign image mapping to material === -# =================================================== -def create_blok(surf, mat, clip_list, obj_size, obj_pos): - - def output_size_ofs(size, pos, blok): - #just automate repetitive task - # 0 == X, 1 == Y, 2 == Z - size_default = [1.0] * 3 - size2 = [1.0] * 3 - ofs_default = [0.0] * 3 - offset = [1.0] * 3 - axis_default = [Blender.Texture.Proj.X, Blender.Texture.Proj.Y, Blender.Texture.Proj.Z] - axis = [1.0] * 3 - c_map_txt = [" X--", " -Y-", " --Z"] - c_map = [0,1,2] # standard, good for Z axis projection - if blok['MAJAXIS'] == 0: - c_map = [1,2,0] # X axis projection - if blok['MAJAXIS'] == 2: - c_map = [0,2,1] # Y axis projection - - ###if DEBUG: print "!!!axis mapping:" - #this is the smart way - ###if DEBUG: for mp in c_map: print c_map_txt[mp] - - if blok['SIZE'][0] != 0.0: #paranoia controls - size_default[0] = (size[0]/blok['SIZE'][0]) - ofs_default[0] = ((blok['CNTR'][0]-pos[0])/blok['SIZE'][0]) - if blok['SIZE'][1] != 0.0: - size_default[2] = (size[2]/blok['SIZE'][1]) - ofs_default[2] = ((blok['CNTR'][1]-pos[2])/blok['SIZE'][1]) - if blok['SIZE'][2] != 0.0: - size_default[1] = (size[1]/blok['SIZE'][2]) - ofs_default[1] = ((blok['CNTR'][2]-pos[1])/blok['SIZE'][2]) - - for mp in xrange(3): - axis[mp] = axis_default[c_map[mp]] - size2[mp] = size_default[c_map[mp]] - offset[mp] = ofs_default[c_map[mp]] - if offset[mp]>10.0: offset[mp]-10.0 - if offset[mp]<-10.0: offset[mp]+10.0 -# size = [size_default[mp] for mp in c_map] - - ###if DEBUG: print "!!!texture size and offsets:" - ###if DEBUG: print " sizeX = %.5f; sizeY = %.5f; sizeZ = %.5f" % (size[0],size[1],size[2]) - ###if DEBUG: print " ofsX = %.5f; ofsY = %.5f; ofsZ = %.5f" % (offset[0],offset[1],offset[2]) - return axis, size2, offset - - ti = 0 - alphaflag = 0 #switched to 1 if some tex in this block is using alpha - lastimag = 0 #experimental .... - for blok in surf['BLOK']: - ###if DEBUG: print "#...................................................................#" - ###if DEBUG: print "# Processing texture block no.%s for surf %s" % (ti,surf['NAME']) - ###if DEBUG: print "#...................................................................#" - # tobj.pdict (blok) - if ti > 9: break #only 8 channels 0..7 allowed for texture mapping - #if not blok['ENAB']: - # ###if DEBUG: print "***Image is not ENABled! Quitting this block" - # break - if not('IMAG' in blok): # has_key - ###if DEBUG: print "***No IMAGE for this block? Quitting" - break #extract out the image index within the clip_list - if blok['IMAG'] == 0: blok['IMAG'] = lastimag #experimental .... - ###if DEBUG: print "looking for image number %d" % blok['IMAG'] - ima = lookup_imag(clip_list, blok['IMAG']) - if ima == None: - ###if DEBUG: print "***Block index image not within CLIP list? Quitting Block" - break #safety check (paranoia setting) - img = ima['g_IMG'] - lastimag = blok['IMAG'] #experimental .... - if img == None: - ###if DEBUG: print "***Failed to pre-allocate image %s found: giving up" % ima['BASENAME'] - break - tname = str(ima['ID']) - if blok['ENAB']: - tname += "+" - else: - tname += "x" #let's signal when should not be enabled - if 'CHAN' in blok: # has_key - tname += blok['CHAN'] - newtex = bpy.data.textures.new(tname) - newtex.setType('Image') # make it anu image texture - newtex.image = img - #how does it extends beyond borders - if 'WRAP' in blok: # has_key - if (blok['WRAP'] == 3) or (blok['WRAP'] == 2): - newtex.setExtend('Extend') - elif (blok['WRAP'] == 1): - newtex.setExtend('Repeat') - elif (blok['WRAP'] == 0): - newtex.setExtend('Clip') - ###if DEBUG: print "generated texture %s" % tname - - #MapTo is determined by CHAN parameter - #assign some defaults - colfac = 1.0 - dvar = 1.0 - norfac = 0.5 - nega = False - mapflag = Blender.Texture.MapTo.COL #default to color - maptype = Blender.Texture.Mappings.FLAT - if 'CHAN' in blok: # has_key - if blok['CHAN'] == 'COLR' and 'OPACVAL' in blok: # has_key - colfac = blok['OPACVAL'] - # Blender needs this to be clamped - colfac = max(0.0, min(1.0, colfac)) - ###if DEBUG: print "!!!Set Texture -> MapTo -> Col = %.3f" % colfac - if blok['CHAN'] == 'BUMP': - mapflag = Blender.Texture.MapTo.NOR - if 'OPACVAL' in blok: norfac = blok['OPACVAL'] # has_key - ###if DEBUG: print "!!!Set Texture -> MapTo -> Nor = %.3f" % norfac - if blok['CHAN'] == 'LUMI': - mapflag = Blender.Texture.MapTo.EMIT - if 'OPACVAL' in blok: dvar = blok['OPACVAL'] # has_key - ###if DEBUG: print "!!!Set Texture -> MapTo -> DVar = %.3f" % dvar - if blok['CHAN'] == 'DIFF': - mapflag = Blender.Texture.MapTo.REF - if 'OPACVAL' in blok: dvar = blok['OPACVAL'] # has_key - ###if DEBUG: print "!!!Set Texture -> MapTo -> DVar = %.3f" % dvar - if blok['CHAN'] == 'SPEC': - mapflag = Blender.Texture.MapTo.SPEC - if 'OPACVAL' in blok: dvar = blok['OPACVAL'] # has_key - ###if DEBUG: print "!!!Set Texture -> MapTo -> DVar = %.3f" % dvar - if blok['CHAN'] == 'TRAN': - mapflag = Blender.Texture.MapTo.ALPHA - if 'OPACVAL' in blok: dvar = blok['OPACVAL'] # has_key - ###if DEBUG: print "!!!Set Texture -> MapTo -> DVar = %.3f" % dvar - alphaflag = 1 - nega = True - if 'NEGA' in blok: # has_key - ###if DEBUG: print "!!!Watch-out: effect of this texture channel must be INVERTED!" - nega = not nega - - blendmode_list = ['Mix', - 'Subtractive', - 'Difference', - 'Multiply', - 'Divide', - 'Mix with calculated alpha layer and stencil flag', - 'Texture Displacement', - 'Additive'] - set_blendmode = 7 #default additive - if 'OPAC' in blok: # has_key - set_blendmode = blok['OPAC'] - if set_blendmode == 5: #transparency - newtex.imageFlags |= Blender.Texture.ImageFlags.CALCALPHA - if nega: newtex.flags |= Blender.Texture.Flags.NEGALPHA - ###if DEBUG: print "!!!Set Texture -> MapTo -> Blending Mode = %s" % blendmode_list[set_blendmode] - - #the TexCo flag is determined by PROJ parameter - axis = [Blender.Texture.Proj.X, Blender.Texture.Proj.Y, Blender.Texture.Proj.Z] - size = [1.0] * 3 - ofs = [0.0] * 3 - if 'PROJ' in blok: # has_key - if blok['PROJ'] == 0: #0 - Planar - ###if DEBUG: print "!!!Flat projection" - coordflag = Blender.Texture.TexCo.ORCO - maptype = Blender.Texture.Mappings.FLAT - elif blok['PROJ'] == 1: #1 - Cylindrical - ###if DEBUG: print "!!!Cylindrical projection" - coordflag = Blender.Texture.TexCo.ORCO - maptype = Blender.Texture.Mappings.TUBE - elif blok['PROJ'] == 2: #2 - Spherical - ###if DEBUG: print "!!!Spherical projection" - coordflag = Blender.Texture.TexCo.ORCO - maptype = Blender.Texture.Mappings.SPHERE - elif blok['PROJ'] == 3: #3 - Cubic - ###if DEBUG: print "!!!Cubic projection" - coordflag = Blender.Texture.TexCo.ORCO - maptype = Blender.Texture.Mappings.CUBE - elif blok['PROJ'] == 4: #4 - Front Projection - ###if DEBUG: print "!!!Front projection" - coordflag = Blender.Texture.TexCo.ORCO - maptype = Blender.Texture.Mappings.FLAT # ??? could it be a FLAT with some other TexCo type? - elif blok['PROJ'] == 5: #5 - UV - ###if DEBUG: print "UVMapped" - coordflag = Blender.Texture.TexCo.UV - maptype = Blender.Texture.Mappings.FLAT #in case of UV default to FLAT mapping => effectively not used - if blok['PROJ'] != 5: #This holds for any projection map except UV - axis, size, ofs = output_size_ofs(obj_size, obj_pos, blok) - - # Clamp ofs and size else blender will raise an error - for ii in xrange(3): - ofs[ii]= min(10.0, max(-10, ofs[ii])) - size[ii]= min(100, max(-100, size[ii])) - - mat.setTexture(ti, newtex, coordflag, mapflag) - current_mtex = mat.getTextures()[ti] - current_mtex.mapping = maptype - current_mtex.colfac = colfac - current_mtex.dvar = dvar - current_mtex.norfac = norfac - current_mtex.neg = nega - current_mtex.xproj = axis[0] - current_mtex.yproj = axis[1] - current_mtex.zproj = axis[2] - current_mtex.size = tuple(size) - current_mtex.ofs = tuple(ofs) - if (set_blendmode == 5): #transparency - current_mtex.stencil = not (nega) - - ti += 1 - #end loop over bloks - return alphaflag - - -# ======================================== -# === Create and assign a new material === -# ======================================== -#def update_material(surf_list, ptag_dict, obj, clip_list, uv_dict, dir_part): -def update_material(clip_list, objspec, surf_list): - if (surf_list == []) or (objspec[5] == {}) or (objspec[1] == {}): - ###if DEBUG: print "something getting wrong in update_material: dump follows ..." - ###if DEBUG: print surf_list - ###if DEBUG: print objspec[5] - ###if DEBUG: print objspec[1] - return - obj_dict = objspec[1] - all_faces = objspec[3] - obj_dim_dict = objspec[4] - ptag_dict = objspec[5] - uvcoords_dict = objspec[7] - facesuv_dict = objspec[8] - for surf in surf_list: - if surf and surf['NAME'] in ptag_dict: # in ptag_dict.keys() - ###if DEBUG: print "#-------------------------------------------------------------------#" - ###if DEBUG: print "Processing surface (material): %s" % surf['NAME'] - ###if DEBUG: print "#-------------------------------------------------------------------#" - #material set up - facelist = ptag_dict[surf['NAME']] - #bounding box and position - cur_obj = obj_dict[surf['NAME']] - obj_size = obj_dim_dict[surf['NAME']][0] - obj_pos = obj_dim_dict[surf['NAME']][1] - ###if DEBUG: print surf - #uncomment this if material pre-allocated by read_surf - mat = surf['g_MAT'] - if mat == None: - ###if DEBUG: print "Sorry, no pre-allocated material to update. Giving up for %s." % surf['NAME'] - break - #mat = Blender.Material.New(surf['NAME']) - #surf['g_MAT'] = mat - if 'COLR' in surf: # has_key - mat.rgbCol = surf['COLR'] - if 'LUMI' in surf: - mat.setEmit(surf['LUMI']) - if 'GVAL' in surf: # has_key - mat.setAdd(surf['GVAL']) - if 'SPEC' in surf: # has_key - mat.setSpec(surf['SPEC']) #it should be * 2 but seems to be a bit higher lwo [0.0, 1.0] - blender [0.0, 2.0] - if 'DIFF' in surf: # has_key - mat.setRef(surf['DIFF']) #lwo [0.0, 1.0] - blender [0.0, 1.0] - if 'GLOS' in surf: # has_key #lwo [0.0, 1.0] - blender [0, 255] - glo = int(371.67 * surf['GLOS'] - 42.334) #linear mapping - seems to work better than exp mapping - if glo <32: glo = 32 #clamped to 32-255 - if glo >255: glo = 255 - mat.setHardness(glo) - if 'TRNL' in surf: # has_key - mat.setTranslucency(surf['TRNL']) #NOT SURE ABOUT THIS lwo [0.0, 1.0] - blender [0.0, 1.0] - - mm = mat.mode - mm |= Blender.Material.Modes.TRANSPSHADOW - if 'REFL' in surf: # has_key - mat.setRayMirr(surf['REFL']) #lwo [0.0, 1.0] - blender [0.0, 1.0] - mm |= Blender.Material.Modes.RAYMIRROR - if 'TRAN' in surf: # has_key - mat.setAlpha(1.0-surf['TRAN']) #lwo [0.0, 1.0] - blender [1.0, 0.0] - mm |= Blender.Material.Modes.RAYTRANSP - if 'RIND' in surf: # has_key - s = surf['RIND'] - if s < 1.0: s = 1.0 - if s > 3.0: s = 3.0 - mat.setIOR(s) #clipped to blender [1.0, 3.0] - mm |= Blender.Material.Modes.RAYTRANSP - if 'BLOK' in surf and surf['BLOK'] != []: - #update the material according to texture. - alphaflag = create_blok(surf, mat, clip_list, obj_size, obj_pos) - if alphaflag: - mm |= Blender.Material.Modes.RAYTRANSP - mat.mode = mm - #finished setting up the material - #end if exist SURF - #end loop on materials (SURFs) - return - - -# ====================== -# === Read Faces 6.0 === -# ====================== -def read_faces_6(lwochunk): - data = cStringIO.StringIO(lwochunk.read()) - faces = [] - polygon_type = data.read(4) - subsurf = 0 - if polygon_type != "FACE" and polygon_type != "PTCH": - ###if DEBUG: print "No FACE/PATCH Were Found. Polygon Type: %s" % polygon_type - return "", 2 - if polygon_type == 'PTCH': subsurf = 1 - i = 0 - while(i < lwochunk.chunksize-4): - #if not i%1000 and my_meshtools.show_progress: - # Blender.Window.DrawProgressBar(float(i)/lwochunk.chunksize, "Reading Faces") - facev = [] - numfaceverts, = struct.unpack(">H", data.read(2)) - i += 2 - - for j in xrange(numfaceverts): - index, index_size = read_vx(data) - i += index_size - facev.append(index) - faces.append(facev) - ###if DEBUG: print "read %s faces; type of block %d (0=FACE; 1=PATCH)" % (len(faces), subsurf) - return faces, subsurf - -def main(): - if not struct: - Blender.Draw.PupMenu('This importer requires a full python install') - return - - Blender.Window.FileSelector(read, "Import LWO", '*.lwo') - -if __name__=='__main__': - main() - - -# Cams debugging lwo loader -""" -TIME= Blender.sys.time() -import os -print 'Searching for files' -os.system('find /fe/lwo/Objects/ -follow -iname "*.lwo" > /tmp/templwo_list') -# os.system('find /storage/ -iname "*.lwo" > /tmp/templwo_list') -print '...Done' -file= open('/tmp/templwo_list', 'r') -lines= file.readlines() - -# sort by filesize for faster testing -lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines] -lines_size.sort() -lines = [f[1] for f in lines_size] - -file.close() - -def between(v,a,b): - if v <= max(a,b) and v >= min(a,b): - return True - - return False -size= 0.0 -for i, _lwo in enumerate(lines): - #if i==425: # SCANFILL - #if 1: - #if i==520: # SCANFILL CRASH - #if i==47: # SCANFILL CRASH - #if between(i, 525, 550): - #if i > 1635: - #if i != 1519: # 730 - if i>141: - #if 1: - # _lwo= _lwo[:-1] - print 'Importing', _lwo, '\nNUMBER', i, 'of', len(lines) - _lwo_file= _lwo.split('/')[-1].split('\\')[-1] - newScn= bpy.data.scenes.new(_lwo_file) - bpy.data.scenes.active = newScn - size += ((os.path.getsize(_lwo)/1024.0))/ 1024.0 - read(_lwo) - # Remove objects to save memory? - ''' - for ob in newScn.objects: - if ob.type=='Mesh': - me= ob.getData(mesh=1) - me.verts= None - newScn.unlink(ob) - ''' - print 'mb size so far', size - -print 'TOTAL TIME: %.6f' % (Blender.sys.time() - TIME) -""" \ No newline at end of file diff --git a/release/scripts/md2_export.py b/release/scripts/md2_export.py deleted file mode 100644 index f0fe6b9af40..00000000000 --- a/release/scripts/md2_export.py +++ /dev/null @@ -1,1271 +0,0 @@ -#!BPY - -""" -Name: 'MD2 (.md2)' -Blender: 243 -Group: 'Export' -Tooltip: 'Export to Quake file format (.md2).' -""" - -__author__ = 'Bob Holcomb' -__version__ = '0.18.1 patch 1' -__url__ = ["Bob's site, http://bane.servebeer.com", - "Support forum, http://bane.servebeer.com", "blender", "blenderartists.org"] -__email__ = ["Bob Holcomb, bob_holcomb:hotmail*com", "scripts"] -__bpydoc__ = """\ -This script Exports a Quake 2 file (MD2). - - Additional help from: Shadwolf, Skandal, Rojo, Cambo
- Thanks Guys! -""" - -# This is a PATCHED VERSION, fixing the bug due to which animations would -# (almost) never work. It is now also possible to output a MD2 model without -# texture. -# On: 23 january 2008 -# By: Boris van Schooten (schooten@cs.utwente.nl) - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C): Bob Holcomb -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -import Blender -from Blender import * -from Blender.Draw import * -from Blender.BGL import * -from Blender.Window import * - -import struct, string -from types import * - - - -###################################################### -# GUI Loader -###################################################### - -# Export globals -g_filename=Create("tris.md2") -g_frame_filename=Create("default") - -g_filename_search=Create("") -g_frame_search=Create("default") - -g_texture_path=Create("") - -user_frame_list=[] - -#Globals -g_scale=Create(1.0) - -# Events -EVENT_NOEVENT=1 -EVENT_SAVE_MD2=2 -EVENT_CHOOSE_FILENAME=3 -EVENT_CHOOSE_FRAME=4 -EVENT_EXIT=100 - -###################################################### -# Callbacks for Window functions -###################################################### -def filename_callback(input_filename): - global g_filename - g_filename.val=input_filename - -def frame_callback(input_frame): - global g_frame_filename - g_frame_filename.val=input_frame - -def draw_gui(): - global g_scale - global g_filename - global g_frame_filename - global EVENT_NOEVENT,EVENT_SAVE_MD2,EVENT_CHOOSE_FILENAME,EVENT_CHOOSE_FRAME,EVENT_EXIT - global g_texture_path - - ########## Titles - glClear(GL_COLOR_BUFFER_BIT) - glRasterPos2d(10, 120) - Text("MD2 Export") - - ######### Parameters GUI Buttons - ######### MD2 Filename text entry - g_filename = String("MD2 file to save: ", EVENT_NOEVENT, 10, 75, 210, 18, - g_filename.val, 255, "MD2 file to save") - ########## MD2 File Search Button - Button("Browse",EVENT_CHOOSE_FILENAME,220,75,80,18) - - ########## MD2 Frame List Text entry - g_frame_filename = String("Frame List file to load: ", EVENT_NOEVENT, 10, 55, 210, 18, - g_frame_filename.val, 255, "Frame List to load-overrides MD2 defaults") - ########## Frame List Search Button - Button("Browse",EVENT_CHOOSE_FRAME,220,55,80,18) - - ########## Texture path to append - g_texture_path=String("Texture Path: ", EVENT_NOEVENT, 10,35,210,18, - g_texture_path.val,255, "Texture path to prepend") - - - ########## Scale slider-default is 1/8 which is a good scale for md2->blender - g_scale= Slider("Scale Factor: ", EVENT_NOEVENT, 10, 95, 210, 18, - 1.0, 0.001, 10.0, 1, "Scale factor for object Model"); - - ######### Draw and Exit Buttons - Button("Export",EVENT_SAVE_MD2 , 10, 10, 80, 18) - Button("Exit",EVENT_EXIT , 170, 10, 80, 18) - -def event(evt, val): - if (evt == QKEY and not val): - Exit() - -def bevent(evt): - global g_filename - global g_frame_filename - global EVENT_NOEVENT,EVENT_SAVE_MD2,EVENT_EXIT - - ######### Manages GUI events - if (evt==EVENT_EXIT): - Blender.Draw.Exit() - elif (evt==EVENT_CHOOSE_FILENAME): - FileSelector(filename_callback, "MD2 File Selection") - elif (evt==EVENT_CHOOSE_FRAME): - FileSelector(frame_callback, "Frame Selection") - elif (evt==EVENT_SAVE_MD2): - save_md2(g_filename.val) - Blender.Draw.Exit() - return - -Register(draw_gui, event, bevent) - -###################################################### -# MD2 Model Constants -###################################################### -MD2_MAX_TRIANGLES=4096 -MD2_MAX_VERTICES=2048 -MD2_MAX_TEXCOORDS=2048 -MD2_MAX_FRAMES=512 -MD2_MAX_SKINS=32 -MD2_MAX_FRAMESIZE=(MD2_MAX_VERTICES * 4 + 128) - -MD2_FRAME_NAME_LIST=(("stand",1,40), - ("run",41,46), - ("attack",47,54), - ("pain1",55,58), - ("pain2",59,62), - ("pain3",63,66), - ("jump",67,72), - ("flip",73,84), - ("salute", 85,95), - ("taunt",96,112), - ("wave",113,123), - ("point",124,135), - ("crstnd",136,154), - ("crwalk",155,160), - ("crattack",161,169), - ("crpain",170,173), - ("crdeath",174,178), - ("death1",179,184), - ("death2",185,190), - ("death3",191,198)) - #198 frames - -MD2_NORMALS=((-0.525731, 0.000000, 0.850651), - (-0.442863, 0.238856, 0.864188), - (-0.295242, 0.000000, 0.955423), - (-0.309017, 0.500000, 0.809017), - (-0.162460, 0.262866, 0.951056), - (0.000000, 0.000000, 1.000000), - (0.000000, 0.850651, 0.525731), - (-0.147621, 0.716567, 0.681718), - (0.147621, 0.716567, 0.681718), - (0.000000, 0.525731, 0.850651), - (0.309017, 0.500000, 0.809017), - (0.525731, 0.000000, 0.850651), - (0.295242, 0.000000, 0.955423), - (0.442863, 0.238856, 0.864188), - (0.162460, 0.262866, 0.951056), - (-0.681718, 0.147621, 0.716567), - (-0.809017, 0.309017, 0.500000), - (-0.587785, 0.425325, 0.688191), - (-0.850651, 0.525731, 0.000000), - (-0.864188, 0.442863, 0.238856), - (-0.716567, 0.681718, 0.147621), - (-0.688191, 0.587785, 0.425325), - (-0.500000, 0.809017, 0.309017), - (-0.238856, 0.864188, 0.442863), - (-0.425325, 0.688191, 0.587785), - (-0.716567, 0.681718, -0.147621), - (-0.500000, 0.809017, -0.309017), - (-0.525731, 0.850651, 0.000000), - (0.000000, 0.850651, -0.525731), - (-0.238856, 0.864188, -0.442863), - (0.000000, 0.955423, -0.295242), - (-0.262866, 0.951056, -0.162460), - (0.000000, 1.000000, 0.000000), - (0.000000, 0.955423, 0.295242), - (-0.262866, 0.951056, 0.162460), - (0.238856, 0.864188, 0.442863), - (0.262866, 0.951056, 0.162460), - (0.500000, 0.809017, 0.309017), - (0.238856, 0.864188, -0.442863), - (0.262866, 0.951056, -0.162460), - (0.500000, 0.809017, -0.309017), - (0.850651, 0.525731, 0.000000), - (0.716567, 0.681718, 0.147621), - (0.716567, 0.681718, -0.147621), - (0.525731, 0.850651, 0.000000), - (0.425325, 0.688191, 0.587785), - (0.864188, 0.442863, 0.238856), - (0.688191, 0.587785, 0.425325), - (0.809017, 0.309017, 0.500000), - (0.681718, 0.147621, 0.716567), - (0.587785, 0.425325, 0.688191), - (0.955423, 0.295242, 0.000000), - (1.000000, 0.000000, 0.000000), - (0.951056, 0.162460, 0.262866), - (0.850651, -0.525731, 0.000000), - (0.955423, -0.295242, 0.000000), - (0.864188, -0.442863, 0.238856), - (0.951056, -0.162460, 0.262866), - (0.809017, -0.309017, 0.500000), - (0.681718, -0.147621, 0.716567), - (0.850651, 0.000000, 0.525731), - (0.864188, 0.442863, -0.238856), - (0.809017, 0.309017, -0.500000), - (0.951056, 0.162460, -0.262866), - (0.525731, 0.000000, -0.850651), - (0.681718, 0.147621, -0.716567), - (0.681718, -0.147621, -0.716567), - (0.850651, 0.000000, -0.525731), - (0.809017, -0.309017, -0.500000), - (0.864188, -0.442863, -0.238856), - (0.951056, -0.162460, -0.262866), - (0.147621, 0.716567, -0.681718), - (0.309017, 0.500000, -0.809017), - (0.425325, 0.688191, -0.587785), - (0.442863, 0.238856, -0.864188), - (0.587785, 0.425325, -0.688191), - (0.688191, 0.587785, -0.425325), - (-0.147621, 0.716567, -0.681718), - (-0.309017, 0.500000, -0.809017), - (0.000000, 0.525731, -0.850651), - (-0.525731, 0.000000, -0.850651), - (-0.442863, 0.238856, -0.864188), - (-0.295242, 0.000000, -0.955423), - (-0.162460, 0.262866, -0.951056), - (0.000000, 0.000000, -1.000000), - (0.295242, 0.000000, -0.955423), - (0.162460, 0.262866, -0.951056), - (-0.442863, -0.238856, -0.864188), - (-0.309017, -0.500000, -0.809017), - (-0.162460, -0.262866, -0.951056), - (0.000000, -0.850651, -0.525731), - (-0.147621, -0.716567, -0.681718), - (0.147621, -0.716567, -0.681718), - (0.000000, -0.525731, -0.850651), - (0.309017, -0.500000, -0.809017), - (0.442863, -0.238856, -0.864188), - (0.162460, -0.262866, -0.951056), - (0.238856, -0.864188, -0.442863), - (0.500000, -0.809017, -0.309017), - (0.425325, -0.688191, -0.587785), - (0.716567, -0.681718, -0.147621), - (0.688191, -0.587785, -0.425325), - (0.587785, -0.425325, -0.688191), - (0.000000, -0.955423, -0.295242), - (0.000000, -1.000000, 0.000000), - (0.262866, -0.951056, -0.162460), - (0.000000, -0.850651, 0.525731), - (0.000000, -0.955423, 0.295242), - (0.238856, -0.864188, 0.442863), - (0.262866, -0.951056, 0.162460), - (0.500000, -0.809017, 0.309017), - (0.716567, -0.681718, 0.147621), - (0.525731, -0.850651, 0.000000), - (-0.238856, -0.864188, -0.442863), - (-0.500000, -0.809017, -0.309017), - (-0.262866, -0.951056, -0.162460), - (-0.850651, -0.525731, 0.000000), - (-0.716567, -0.681718, -0.147621), - (-0.716567, -0.681718, 0.147621), - (-0.525731, -0.850651, 0.000000), - (-0.500000, -0.809017, 0.309017), - (-0.238856, -0.864188, 0.442863), - (-0.262866, -0.951056, 0.162460), - (-0.864188, -0.442863, 0.238856), - (-0.809017, -0.309017, 0.500000), - (-0.688191, -0.587785, 0.425325), - (-0.681718, -0.147621, 0.716567), - (-0.442863, -0.238856, 0.864188), - (-0.587785, -0.425325, 0.688191), - (-0.309017, -0.500000, 0.809017), - (-0.147621, -0.716567, 0.681718), - (-0.425325, -0.688191, 0.587785), - (-0.162460, -0.262866, 0.951056), - (0.442863, -0.238856, 0.864188), - (0.162460, -0.262866, 0.951056), - (0.309017, -0.500000, 0.809017), - (0.147621, -0.716567, 0.681718), - (0.000000, -0.525731, 0.850651), - (0.425325, -0.688191, 0.587785), - (0.587785, -0.425325, 0.688191), - (0.688191, -0.587785, 0.425325), - (-0.955423, 0.295242, 0.000000), - (-0.951056, 0.162460, 0.262866), - (-1.000000, 0.000000, 0.000000), - (-0.850651, 0.000000, 0.525731), - (-0.955423, -0.295242, 0.000000), - (-0.951056, -0.162460, 0.262866), - (-0.864188, 0.442863, -0.238856), - (-0.951056, 0.162460, -0.262866), - (-0.809017, 0.309017, -0.500000), - (-0.864188, -0.442863, -0.238856), - (-0.951056, -0.162460, -0.262866), - (-0.809017, -0.309017, -0.500000), - (-0.681718, 0.147621, -0.716567), - (-0.681718, -0.147621, -0.716567), - (-0.850651, 0.000000, -0.525731), - (-0.688191, 0.587785, -0.425325), - (-0.587785, 0.425325, -0.688191), - (-0.425325, 0.688191, -0.587785), - (-0.425325, -0.688191, -0.587785), - (-0.587785, -0.425325, -0.688191), - (-0.688191, -0.587785, -0.425325)) - - -###################################################### -# MD2 data structures -###################################################### -class md2_point: - vertices=[] - lightnormalindex=0 - binary_format="<3BB" - def __init__(self): - self.vertices=[0]*3 - self.lightnormalindex=0 - def save(self, file): - temp_data=[0]*4 - temp_data[0]=self.vertices[0] - temp_data[1]=self.vertices[1] - temp_data[2]=self.vertices[2] - temp_data[3]=self.lightnormalindex - data=struct.pack(self.binary_format, temp_data[0], temp_data[1], temp_data[2], temp_data[3]) - file.write(data) - def dump(self): - print "MD2 Point Structure" - print "vertex X: ", self.vertices[0] - print "vertex Y: ", self.vertices[1] - print "vertex Z: ", self.vertices[2] - print "lightnormalindex: ",self.lightnormalindex - print "" - -class md2_face: - vertex_index=[] - texture_index=[] - binary_format="<3h3h" - def __init__(self): - self.vertex_index = [ 0, 0, 0 ] - self.texture_index = [ 0, 0, 0] - def save(self, file): - temp_data=[0]*6 - #swap vertices around so they draw right - temp_data[0]=self.vertex_index[0] - temp_data[1]=self.vertex_index[2] - temp_data[2]=self.vertex_index[1] - #swap texture vertices around so they draw right - temp_data[3]=self.texture_index[0] - temp_data[4]=self.texture_index[2] - temp_data[5]=self.texture_index[1] - data=struct.pack(self.binary_format,temp_data[0],temp_data[1],temp_data[2],temp_data[3],temp_data[4],temp_data[5]) - file.write(data) - def dump (self): - print "MD2 Face Structure" - print "vertex 1 index: ", self.vertex_index[0] - print "vertex 2 index: ", self.vertex_index[1] - print "vertex 3 index: ", self.vertex_index[2] - print "texture 1 index: ", self.texture_index[0] - print "texture 2 index: ", self.texture_index[1] - print "texture 3 index: ", self.texture_index[2] - print "" - -class md2_tex_coord: - u=0 - v=0 - binary_format="<2h" - def __init__(self): - self.u=0 - self.v=0 - def save(self, file): - temp_data=[0]*2 - temp_data[0]=self.u - temp_data[1]=self.v - data=struct.pack(self.binary_format, temp_data[0], temp_data[1]) - file.write(data) - def dump (self): - print "MD2 Texture Coordinate Structure" - print "texture coordinate u: ",self.u - print "texture coordinate v: ",self.v - print "" - -class md2_GL_command: - s=0.0 - t=0.0 - vert_index=0 - binary_format="<2fi" - - def __init__(self): - self.s=0.0 - self.t=0.0 - vert_index=0 - def save(self,file): - temp_data=[0]*3 - temp_data[0]=float(self.s) - temp_data[1]=float(self.t) - temp_data[2]=self.vert_index - data=struct.pack(self.binary_format, temp_data[0],temp_data[1],temp_data[2]) - file.write(data) - def dump (self): - print "MD2 OpenGL Command" - print "s: ", self.s - print "t: ", self.t - print "Vertex Index: ", self.vert_index - print "" - -class md2_GL_cmd_list: - num=0 - cmd_list=[] - binary_format="MD2_MAX_TRIANGLES: - print "Number of triangles exceeds MD2 standard: ", face_count,">",MD2_MAX_TRIANGLES - result=Blender.Draw.PupMenu("Number of triangles exceeds MD2 standard: Continue?%t|YES|NO") - if(result==2): - return False - if vert_count>MD2_MAX_VERTICES: - print "Number of verticies exceeds MD2 standard",vert_count,">",MD2_MAX_VERTICES - result=Blender.Draw.PupMenu("Number of verticies exceeds MD2 standard: Continue?%t|YES|NO") - if(result==2): - return False - if frame_count>MD2_MAX_FRAMES: - print "Number of frames exceeds MD2 standard of",frame_count,">",MD2_MAX_FRAMES - result=Blender.Draw.PupMenu("Number of frames exceeds MD2 standard: Continue?%t|YES|NO") - if(result==2): - return False - #model is OK - return True - -###################################################### -# Fill MD2 data structure -###################################################### -def fill_md2(md2, object): - #global defines - global user_frame_list - global g_texture_path - - Blender.Window.DrawProgressBar(0.25,"Filling MD2 Data") - - #get a Mesh, not NMesh - mesh=object.getData(False, True) - #don't forget to copy the data! -- Boris van Schooten - mesh=mesh.__copy__(); - #load up some intermediate data structures - tex_list={} - tex_count=0 - #create the vertex list from the first frame - Blender.Set("curframe", 1) - - has_uvs = mesh.faceUV - - #header information - md2.ident=844121161 - md2.version=8 - md2.num_vertices=len(mesh.verts) - md2.num_faces=len(mesh.faces) - - #get the skin information - #use the first faces' image for the texture information - if has_uvs: - mesh_image=mesh.faces[0].image - try: size=mesh_image.getSize() - except: size= 256,256 - - md2.skin_width=size[0] - md2.skin_height=size[1] - md2.num_skins=1 - #add a skin node to the md2 data structure - md2.skins.append(md2_skin()) - md2.skins[0].name=g_texture_path.val+Blender.sys.basename(mesh_image.getFilename()) - if len(md2.skins[0].name)>64: - print "Texture Path and name is more than 64 characters" - result=Blender.Draw.PupMenu("Texture path and name is more than 64 characters-Quitting") - return False - - #put texture information in the md2 structure - #build UV coord dictionary (prevents double entries-saves space) - if not has_uvs: - t=(0,0) - - for face in mesh.faces: - for i in xrange(0,3): - if has_uvs: - t=(face.uv[i]) - - tex_key=(t[0],t[1]) - if not tex_list.has_key(tex_key): - tex_list[tex_key]=tex_count - tex_count+=1 - md2.num_tex_coords=tex_count #each vert has its own UV coord - - for this_tex in xrange (0, md2.num_tex_coords): - md2.tex_coords.append(md2_tex_coord()) - for coord, index in tex_list.iteritems(): - #md2.tex_coords.append(md2_tex_coord()) - md2.tex_coords[index].u=int(coord[0]*md2.skin_width) - md2.tex_coords[index].v=int((1-coord[1])*md2.skin_height) - - #put faces in the md2 structure - #for each face in the model - - if not has_uvs: - uv_coords=[(0,0)]*3 - - for this_face in xrange(0, md2.num_faces): - md2.faces.append(md2_face()) - mf = mesh.faces[this_face] - mf_v = mf.v - if has_uvs: - uv_coords = mf.uv - - for i in xrange(0,3): - #blender uses indexed vertexes so this works very well - md2.faces[this_face].vertex_index[i] = mf_v[i].index - #lookup texture index in dictionary - if has_uvs: - uv_coord = uv_coords[i] - # otherwise we set it before - - tex_key=(uv_coord[0],uv_coord[1]) - tex_index=tex_list[tex_key] - md2.faces[this_face].texture_index[i]=tex_index - - Blender.Window.DrawProgressBar(0.5, "Computing GL Commands") - - #compute GL commands - md2.num_GL_commands=build_GL_commands(md2, mesh) - - #get the frame data - #calculate 1 frame size + (1 vert size*num_verts) - md2.frame_size=40+(md2.num_vertices*4) #in bytes - - #get the frame list - user_frame_list=get_frame_list() - if user_frame_list=="default": - md2.num_frames=198 - else: - temp=user_frame_list[len(user_frame_list)-1] #last item - md2.num_frames=temp[2] #last frame number - - - progress=0.5 - progressIncrement=0.25/md2.num_frames - - #fill in each frame with frame info and all the vertex data for that frame - for frame_counter in xrange(0,md2.num_frames): - - progress+=progressIncrement - Blender.Window.DrawProgressBar(progress, "Calculating Frame: %d of %d" % (frame_counter, md2.num_frames)) - - #add a frame - md2.frames.append(md2_frame()) - #update the mesh objects vertex positions for the animation - Blender.Set("curframe", frame_counter) #set blender to the correct frame - - - - - mesh.getFromObject(object) #update the mesh to make verts current - mesh.transform(object.matrixWorld) - -#each frame has a scale and transform value that gets the vertex value between 0-255 -#since the scale and transform are the same for the all the verts in the frame, we only need -#to figure this out once per frame - - #we need to start with the bounding box - #bounding_box=object.getBoundBox() #uses the object, not the mesh data - #initialize with the first vertex for both min and max. X and Y are swapped for MD2 format - - #initialize - frame_min_x=100000.0 - frame_max_x=-100000.0 - frame_min_y=100000.0 - frame_max_y=-100000.0 - frame_min_z=100000.0 - frame_max_z=-100000.0 - - for face in mesh.faces: - for vert in face: - co = vert.co - if frame_min_x>co[1]: frame_min_x=co[1] - if frame_max_xco[0]: frame_min_y=co[0] - if frame_max_yco[2]: frame_min_z=co[2] - if frame_max_z maxdot): - maxdot = dot; - maxdotindex = j; - - # See patch [#19206], gives good info on this line below. - md2.frames[frame_counter].vertices[vert_counter].lightnormalindex=maxdotindex - - del maxdot, maxdotindex - del new_x, new_y, new_z - del frame_max_x, frame_max_y, frame_max_z, frame_min_x, frame_min_y, frame_min_z - del frame_scale_x, frame_scale_y, frame_scale_z, frame_trans_x, frame_trans_y, frame_trans_z - - - #output all the frame names-user_frame_list is loaded during the validation - for frame_set in user_frame_list: - for counter in xrange(frame_set[1]-1, frame_set[2]): - md2.frames[counter].name=frame_set[0]+"_"+str(counter-frame_set[1]+2) - - #compute these after everthing is loaded into a md2 structure - header_size=17*4 #17 integers, and each integer is 4 bytes - skin_size=64*md2.num_skins #64 char per skin * number of skins - tex_coord_size=4*md2.num_tex_coords #2 short * number of texture coords - face_size=12*md2.num_faces #3 shorts for vertex index, 3 shorts for tex index - frames_size=(((12+12+16)+(4*md2.num_vertices)) * md2.num_frames) #frame info+verts per frame*num frames - GL_command_size=md2.num_GL_commands*4 #each is an int or float, so 4 bytes per - - #fill in the info about offsets - md2.offset_skins=0+header_size - md2.offset_tex_coords=md2.offset_skins+skin_size - md2.offset_faces=md2.offset_tex_coords+tex_coord_size - md2.offset_frames=md2.offset_faces+face_size - md2.offset_GL_commands=md2.offset_frames+frames_size - md2.offset_end=md2.offset_GL_commands+GL_command_size - -###################################################### -# Get Frame List -###################################################### -def get_frame_list(): - global g_frame_filename - frame_list=[] - - if g_frame_filename.val=="default": - return MD2_FRAME_NAME_LIST - - else: - #check for file - if (Blender.sys.exists(g_frame_filename.val)==1): - #open file and read it in - file=open(g_frame_filename.val,"r") - lines=file.readlines() - file.close() - - #check header (first line) - if lines[0].strip() != "# MD2 Frame Name List": - print "its not a valid file" - result=Blender.Draw.PupMenu("This is not a valid frame definition file-using default%t|OK") - return MD2_FRAME_NAME_LIST - else: - #read in the data - num_frames=0 - for counter in xrange(1, len(lines)): - current_line=lines[counter].strip() - if current_line[0]=="#": - #found a comment - pass - else: - data=current_line.split() - frame_list.append([data[0],num_frames+1, num_frames+int(data[1])]) - num_frames+=int(data[1]) - return frame_list - else: - print "Cannot find file" - result=Blender.Draw.PupMenu("Cannot find frame definion file-using default%t|OK") - return MD2_FRAME_NAME_LIST - -###################################################### -# Globals for GL command list calculations -###################################################### -used_tris=[] -edge_dict={} -strip_verts=[] -strip_st=[] -strip_tris=[] -strip_first_run=True -odd=False - -###################################################### -# Find Strip length function -###################################################### -def find_strip_length(mesh, start_tri, edge_key): - #print "Finding strip length" - - global used_tris - global edge_dict - global strip_tris - global strip_st - global strip_verts - global strip_first_run - global odd - - used_tris[start_tri]=2 - - strip_tris.append(start_tri) #add this tri to the potential list of tri-strip - - #print "I am face: ", start_tri - #print "Using edge Key: ", edge_key - - faces=edge_dict[edge_key] #get list of face indexes that share this edge - if (len(faces)==0): - #print "Cant find edge with key: ", edge_key - pass - - #print "Faces sharing this edge: ", faces - for face_index in faces: - face=mesh.faces[face_index] - if face_index==start_tri: #don't want to check myself - #print "I found myself, continuing" - pass - else: - if used_tris[face_index]!=0: #found a used tri-move along - #print "Found a used tri: ", face_index - pass - else: - #find non-shared vert - for vert_counter in xrange(0,3): - if (face.verts[vert_counter].index!=edge_key[0] and face.verts[vert_counter].index!=edge_key[1]): - next_vert=vert_counter - - if(odd==False): - #print "Found a suitable even connecting tri: ", face_index - used_tris[face_index]=2 #mark as dirty for this rum - odd=True - - #find the new edge - if(face.verts[next_vert].index < face.verts[(next_vert+2)%3].index): - temp_key=(face.verts[next_vert].index,face.verts[(next_vert+2)%3].index) - else: - temp_key=(face.verts[(next_vert+2)%3].index, face.verts[next_vert].index) - - #print "temp key: ", temp_key - temp_faces=edge_dict[temp_key] - - if(len(temp_faces)==0): - print "Can't find any other faces with key: ", temp_key - else: - #search the new edge - #print "found other faces, searching them" - find_strip_length(mesh, face_index, temp_key) #recursive greedy-takes first tri it finds as best - break; - else: - #print "Found a suitable odd connecting tri: ", face_index - used_tris[face_index]=2 #mark as dirty for this rum - odd=False - - #find the new edge - if(face.verts[next_vert].index < face.verts[(next_vert+1)%3].index): - temp_key=(face.verts[next_vert].index,face.verts[(next_vert+1)%3].index) - else: - temp_key=(face.verts[(next_vert+1)%3].index, face.verts[next_vert].index) - #print "temp key: ", temp_key - temp_faces=edge_dict[temp_key] - if(len(temp_faces)==0): - print "Can't find any other faces with key: ", temp_key - else: - #search the new edge - #print "found other faces, searching them" - find_strip_length(mesh, face_index, temp_key) #recursive greedy-takes first tri it finds as best - break; - - return len(strip_tris) - - -###################################################### -# Tri-Stripify function -###################################################### -def stripify_tri_list(mesh, edge_key): - global edge_dict - global strip_tris - global strip_st - global strip_verts - - shared_edge=[] - key=[] - - #print "*****Stripify the triangle list*******" - #print "strip tris: ", strip_tris - #print "strip_tri length: ", len(strip_tris) - - for tri_counter in xrange(0, len(strip_tris)): - face=mesh.faces[strip_tris[tri_counter]] - if (tri_counter==0): #first one only - #find non-edge vert - for vert_counter in xrange(0,3): - if (face.verts[vert_counter].index!=edge_key[0] and face.verts[vert_counter].index!=edge_key[1]): - start_vert=vert_counter - strip_verts.append(face.verts[start_vert].index) - strip_st.append(face.uv[start_vert]) - - strip_verts.append(face.verts[(start_vert+2)%3].index) - strip_st.append(face.uv[(start_vert+2)%3]) - - strip_verts.append(face.verts[(start_vert+1)%3].index) - strip_st.append(face.uv[(start_vert+1)%3]) - else: - for vert_counter in xrange(0,3): - if(face.verts[vert_counter].index!=strip_verts[-1] and face.verts[vert_counter].index!=strip_verts[-2]): - strip_verts.append(face.verts[vert_counter].index) - strip_st.append(face.uv[vert_counter]) - break - - - -###################################################### -# Build GL command List -###################################################### -def build_GL_commands(md2, mesh): - # we can't output gl command structure without uv - if not mesh.faceUV: - print "No UV: not building GL Commands" - return 0 - - print "Building GL Commands" - - global used_tris - global edge_dict - global strip_verts - global strip_tris - global strip_st - - #globals initialization - used_tris=[0]*len(mesh.faces) - #print "Used: ", used_tris - num_commands=0 - - #edge dictionary generation - edge_dict=dict([(ed.key,[]) for ed in mesh.edges]) - for face in (mesh.faces): - for key in face.edge_keys: - edge_dict[key].append(face.index) - - #print "edge Dict: ", edge_dict - - for tri_counter in xrange(0,len(mesh.faces)): - if used_tris[tri_counter]!=0: - #print "Found a used triangle: ", tri_counter - pass - else: - #print "Found an unused triangle: ", tri_counter - - #intialization - strip_tris=[0]*0 - strip_verts=[0]*0 - strip_st=[0]*0 - strip_first_run=True - odd=True - - #find the strip length - strip_length=find_strip_length(mesh, tri_counter, mesh.faces[tri_counter].edge_keys[0]) - - #mark tris as used - for used_counter in xrange(0,strip_length): - used_tris[strip_tris[used_counter]]=1 - - stripify_tri_list(mesh, mesh.faces[tri_counter].edge_keys[0]) - - #create command list - cmd_list=md2_GL_cmd_list() - #number of commands in this list - print "strip length: ", strip_length - cmd_list.num=(len(strip_tris)+2) #positive for strips, fans would be negative, but not supported yet - num_commands+=1 - - #add s,t,vert for this command list - for command_counter in xrange(0, len(strip_tris)+2): - cmd=md2_GL_command() - cmd.s=strip_st[command_counter][0] - cmd.t=1.0-strip_st[command_counter][1] #flip upside down - cmd.vert_index=strip_verts[command_counter] - num_commands+=3 - cmd_list.cmd_list.append(cmd) - print "Cmd List length: ", len(cmd_list.cmd_list) - print "Cmd list num: ", cmd_list.num - print "Cmd List: ", cmd_list.dump() - md2.GL_commands.append(cmd_list) - - #add the null command at the end - temp_cmdlist=md2_GL_cmd_list() - temp_cmdlist.num=0 - md2.GL_commands.append(temp_cmdlist) - num_commands+=1 - - #cleanup and return - used=strip_vert=strip_st=strip_tris=0 - return num_commands - - - - -###################################################### -# Save MD2 Format -###################################################### -def save_md2(filename): - print "" - print "***********************************" - print "MD2 Export" - print "***********************************" - print "" - - Blender.Window.DrawProgressBar(0.0,"Begining MD2 Export") - - md2=md2_obj() #blank md2 object to save - - #get the object - mesh_objs = Blender.Object.GetSelected() - - #check there is a blender object selected - if len(mesh_objs)==0: - print "Fatal Error: Must select a mesh to output as MD2" - print "Found nothing" - result=Blender.Draw.PupMenu("Must select an object to export%t|OK") - return - - mesh_obj=mesh_objs[0] #this gets the first object (should be only one) - - #check if it's a mesh object - if mesh_obj.getType()!="Mesh": - print "Fatal Error: Must select a mesh to output as MD2" - print "Found: ", mesh_obj.getType() - result=Blender.Draw.PupMenu("Selected Object must be a mesh to output as MD2%t|OK") - return - - ok=validation(mesh_obj) - if ok==False: - return - - fill_md2(md2, mesh_obj) - md2.dump() - - Blender.Window.DrawProgressBar(1.0, "Writing to Disk") - - #actually write it to disk - file=open(filename,"wb") - md2.save(file) - file.close() - - #cleanup - md2=0 - - print "Closed the file" - diff --git a/release/scripts/md2_import.py b/release/scripts/md2_import.py deleted file mode 100644 index f52746259a6..00000000000 --- a/release/scripts/md2_import.py +++ /dev/null @@ -1,600 +0,0 @@ -#!BPY - -""" -Name: 'MD2 (.md2)' -Blender: 239 -Group: 'Import' -Tooltip: 'Import from Quake file format (.md2).' -""" - -__author__ = 'Bob Holcomb' -__version__ = '0.16' -__url__ = ["Bob's site, http://bane.servebeer.com", - "Support forum, http://scourage.servebeer.com/phpbb/", "blender", "blenderartists.org"] -__email__ = ["Bob Holcomb, bob_holcomb:hotmail*com", "scripts"] -__bpydoc__ = """\ -This script imports a Quake 2 file (MD2), textures, -and animations into blender for editing. Loader is based on MD2 loader from www.gametutorials.com-Thanks DigiBen! and the md3 blender loader by PhaethonH
- - Additional help from: Shadwolf, Skandal, Rojo and Campbell Barton
- Thanks Guys! -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Bob Holcomb -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -import Blender -from Blender import Mesh, Object, sys -from Blender.BGL import * -from Blender.Draw import * -from Blender.Window import * -from Blender.Mathutils import Vector -import struct -from types import * - - -###################################################### -# Main Body -###################################################### - -#returns the string from a null terminated string -def asciiz (s): - n = 0 - while (ord(s[n]) != 0): - n = n + 1 - return s[0:n] - - -###################################################### -# MD2 Model Constants -###################################################### -MD2_MAX_TRIANGLES=4096 -MD2_MAX_VERTICES=2048 -MD2_MAX_TEXCOORDS=2048 -MD2_MAX_FRAMES=512 -MD2_MAX_SKINS=32 -MD2_MAX_FRAMESIZE=(MD2_MAX_VERTICES * 4 + 128) - -###################################################### -# MD2 data structures -###################################################### -class md2_alias_triangle(object): - __slots__ = 'vertices', 'lightnormalindex' - binary_format="<3BB" #little-endian (<), 3 Unsigned char - - def __init__(self): - self.vertices=[0]*3 - self.lightnormalindex=0 - - def load(self, file): - temp_data = file.read(struct.calcsize(self.binary_format)) - data = struct.unpack(self.binary_format, temp_data) - self.vertices[0]=data[0] - self.vertices[1]=data[1] - self.vertices[2]=data[2] - self.lightnormalindex=data[3] - return self - - def dump(self): - print "MD2 Alias_Triangle Structure" - print "vertex: ", self.vertices[0] - print "vertex: ", self.vertices[1] - print "vertex: ", self.vertices[2] - print "lightnormalindex: ",self.lightnormalindex - print "" - -class md2_face(object): - - binary_format="<3h3h" #little-endian (<), 3 short, 3 short - - __slots__ = 'vertex_index', 'texture_index' - - def __init__(self): - self.vertex_index = [ 0, 0, 0 ] - self.texture_index = [ 0, 0, 0] - - def load (self, file): - temp_data=file.read(struct.calcsize(self.binary_format)) - data=struct.unpack(self.binary_format, temp_data) - self.vertex_index[0]=data[0] - self.vertex_index[1]=data[1] - self.vertex_index[2]=data[2] - self.texture_index[0]=data[3] - self.texture_index[1]=data[4] - self.texture_index[2]=data[5] - return self - - def dump (self): - print "MD2 Face Structure" - print "vertex index: ", self.vertex_index[0] - print "vertex index: ", self.vertex_index[1] - print "vertex index: ", self.vertex_index[2] - print "texture index: ", self.texture_index[0] - print "texture index: ", self.texture_index[1] - print "texture index: ", self.texture_index[2] - print "" - -class md2_tex_coord(object): - __slots__ = 'u', 'v' - binary_format="<2h" #little-endian (<), 2 unsigned short - - def __init__(self): - self.u=0 - self.v=0 - - def load (self, file): - temp_data=file.read(struct.calcsize(self.binary_format)) - data=struct.unpack(self.binary_format, temp_data) - self.u=data[0] - self.v=data[1] - return self - - def dump (self): - print "MD2 Texture Coordinate Structure" - print "texture coordinate u: ",self.u - print "texture coordinate v: ",self.v - print "" - - -class md2_skin(object): - __slots__ = 'name' - binary_format="<64s" #little-endian (<), char[64] - - def __init__(self): - self.name="" - - def load (self, file): - temp_data=file.read(struct.calcsize(self.binary_format)) - data=struct.unpack(self.binary_format, temp_data) - self.name=asciiz(data[0]) - return self - - def dump (self): - print "MD2 Skin" - print "skin name: ",self.name - print "" - -class md2_alias_frame(object): - __slots__ = 'scale', 'translate', 'name', 'vertices' - binary_format="<3f3f16s" #little-endian (<), 3 float, 3 float char[16] - #did not add the "3bb" to the end of the binary format - #because the alias_vertices will be read in through - #thier own loader - - def __init__(self): - self.scale=[0.0]*3 - self.translate=[0.0]*3 - self.name="" - self.vertices=[] - - - def load (self, file): - temp_data=file.read(struct.calcsize(self.binary_format)) - data=struct.unpack(self.binary_format, temp_data) - self.scale[0]=data[0] - self.scale[1]=data[1] - self.scale[2]=data[2] - self.translate[0]=data[3] - self.translate[1]=data[4] - self.translate[2]=data[5] - self.name=asciiz(data[6]) - return self - - def dump (self): - print "MD2 Alias Frame" - print "scale x: ",self.scale[0] - print "scale y: ",self.scale[1] - print "scale z: ",self.scale[2] - print "translate x: ",self.translate[0] - print "translate y: ",self.translate[1] - print "translate z: ",self.translate[2] - print "name: ",self.name - print "" - -class md2_obj(object): - __slots__ =\ - 'tex_coords', 'faces', 'frames',\ - 'skins', 'ident', 'version',\ - 'skin_width', 'skin_height',\ - 'frame_size', 'num_skins', 'num_vertices',\ - 'num_tex_coords', 'num_faces', 'num_GL_commands',\ - 'num_frames', 'offset_skins', 'offset_tex_coords',\ - 'offset_faces', 'offset_frames', 'offset_GL_commands' - - ''' - #Header Structure - ident=0 #int 0 This is used to identify the file - version=0 #int 1 The version number of the file (Must be 8) - skin_width=0 #int 2 The skin width in pixels - skin_height=0 #int 3 The skin height in pixels - frame_size=0 #int 4 The size in bytes the frames are - num_skins=0 #int 5 The number of skins associated with the model - num_vertices=0 #int 6 The number of vertices (constant for each frame) - num_tex_coords=0 #int 7 The number of texture coordinates - num_faces=0 #int 8 The number of faces (polygons) - num_GL_commands=0 #int 9 The number of gl commands - num_frames=0 #int 10 The number of animation frames - offset_skins=0 #int 11 The offset in the file for the skin data - offset_tex_coords=0 #int 12 The offset in the file for the texture data - offset_faces=0 #int 13 The offset in the file for the face data - offset_frames=0 #int 14 The offset in the file for the frames data - offset_GL_commands=0#int 15 The offset in the file for the gl commands data - offset_end=0 #int 16 The end of the file offset - ''' - binary_format="<17i" #little-endian (<), 17 integers (17i) - - #md2 data objects - - def __init__ (self): - self.tex_coords=[] - self.faces=[] - self.frames=[] - self.skins=[] - - - def load (self, file): - temp_data = file.read(struct.calcsize(self.binary_format)) - data = struct.unpack(self.binary_format, temp_data) - - self.ident=data[0] - self.version=data[1] - - if (self.ident!=844121161 or self.version!=8): - print "Not a valid MD2 file" - Exit() - - self.skin_width=data[2] - self.skin_height=data[3] - self.frame_size=data[4] - - #make the # of skin objects for model - self.num_skins=data[5] - for i in xrange(0,self.num_skins): - self.skins.append(md2_skin()) - - self.num_vertices=data[6] - - #make the # of texture coordinates for model - self.num_tex_coords=data[7] - for i in xrange(0,self.num_tex_coords): - self.tex_coords.append(md2_tex_coord()) - - #make the # of triangle faces for model - self.num_faces=data[8] - for i in xrange(0,self.num_faces): - self.faces.append(md2_face()) - - self.num_GL_commands=data[9] - - #make the # of frames for the model - self.num_frames=data[10] - for i in xrange(0,self.num_frames): - self.frames.append(md2_alias_frame()) - #make the # of vertices for each frame - for j in xrange(0,self.num_vertices): - self.frames[i].vertices.append(md2_alias_triangle()) - - self.offset_skins=data[11] - self.offset_tex_coords=data[12] - self.offset_faces=data[13] - self.offset_frames=data[14] - self.offset_GL_commands=data[15] - - #load the skin info - file.seek(self.offset_skins,0) - for i in xrange(0, self.num_skins): - self.skins[i].load(file) - #self.skins[i].dump() - - #load the texture coordinates - file.seek(self.offset_tex_coords,0) - for i in xrange(0, self.num_tex_coords): - self.tex_coords[i].load(file) - #self.tex_coords[i].dump() - - #load the face info - file.seek(self.offset_faces,0) - for i in xrange(0, self.num_faces): - self.faces[i].load(file) - #self.faces[i].dump() - - #load the frames - file.seek(self.offset_frames,0) - for i in xrange(0, self.num_frames): - self.frames[i].load(file) - #self.frames[i].dump() - for j in xrange(0,self.num_vertices): - self.frames[i].vertices[j].load(file) - #self.frames[i].vertices[j].dump() - return self - - def dump (self): - print "Header Information" - print "ident: ", self.ident - print "version: ", self.version - print "skin width: ", self.skin_width - print "skin height: ", self.skin_height - print "frame size: ", self.frame_size - print "number of skins: ", self.num_skins - print "number of texture coordinates: ", self.num_tex_coords - print "number of faces: ", self.num_faces - print "number of frames: ", self.num_frames - print "number of vertices: ", self.num_vertices - print "offset skins: ", self.offset_skins - print "offset texture coordinates: ", self.offset_tex_coords - print "offset faces: ", self.offset_faces - print "offset frames: ",self.offset_frames - print "" - -###################################################### -# Import functions -###################################################### -def load_textures(md2, texture_filename): - #did the user specify a texture they wanted to use? - if texture_filename: - if (Blender.sys.exists(texture_filename)): - try: return Blender.Image.Load(texture_filename) - except: return -1 # could not load? - - #does the model have textures specified with it? - if int(md2.num_skins) > 0: - for i in xrange(0,md2.num_skins): - #md2.skins[i].dump() - if (Blender.sys.exists(md2.skins[i].name)): - try: return Blender.Image.Load(md2.skins[i].name) - except: return -1 - - -def animate_md2(md2, mesh): - ######### Animate the verts through keyframe animation - - # Fast access to the meshes vertex coords - verts = [v.co for v in mesh.verts] - scale = g_scale.val - - for i in xrange(1, md2.num_frames): - frame = md2.frames[i] - #update the vertices - for j in xrange(md2.num_vertices): - x=(frame.scale[0] * frame.vertices[j].vertices[0] + frame.translate[0]) * scale - y=(frame.scale[1] * frame.vertices[j].vertices[1] + frame.translate[1]) * scale - z=(frame.scale[2] * frame.vertices[j].vertices[2] + frame.translate[2]) * scale - - #put the vertex in the right spot - verts[j][:] = y,-x,z - - mesh.insertKey(i,"absolute") - # mesh.insertKey(i) - - #not really necissary, but I like playing with the frame counter - Blender.Set("curframe", i) - - - # Make the keys animate in the 3d view. - key = mesh.key - key.relative = False - - # Add an IPO to teh Key - ipo = Blender.Ipo.New('Key', 'md2') - key.ipo = ipo - # Add a curve to the IPO - curve = ipo.addCurve('Basis') - - # Add 2 points to cycle through the frames. - curve.append((1, 0)) - curve.append((md2.num_frames, (md2.num_frames-1)/10.0)) - curve.interpolation = Blender.IpoCurve.InterpTypes.LINEAR - - - -def load_md2(md2_filename, texture_filename): - #read the file in - file=open(md2_filename,"rb") - WaitCursor(1) - DrawProgressBar(0.0, 'Loading MD2') - md2=md2_obj() - md2.load(file) - #md2.dump() - file.close() - - ######### Creates a new mesh - mesh = Mesh.New() - - uv_coord=[] - #uv_list=[] - verts_extend = [] - #load the textures to use later - #-1 if there is no texture to load - mesh_image=load_textures(md2, texture_filename) - if mesh_image == -1 and texture_filename: - print 'MD2 Import, Warning, texture "%s" could not load' - - ######### Make the verts - DrawProgressBar(0.25,"Loading Vertex Data") - frame = md2.frames[0] - scale = g_scale.val - - def tmp_get_vertex(i): - #use the first frame for the mesh vertices - x=(frame.scale[0]*frame.vertices[i].vertices[0]+frame.translate[0])*scale - y=(frame.scale[1]*frame.vertices[i].vertices[1]+frame.translate[1])*scale - z=(frame.scale[2]*frame.vertices[i].vertices[2]+frame.translate[2])*scale - return y,-x,z - - mesh.verts.extend( [tmp_get_vertex(i) for i in xrange(0,md2.num_vertices)] ) - del tmp_get_vertex - - ######## Make the UV list - DrawProgressBar(0.50,"Loading UV Data") - - w = float(md2.skin_width) - h = float(md2.skin_height) - if w <= 0.0: w = 1.0 - if h <= 0.0: h = 1.0 - #for some reason quake2 texture maps are upside down, flip that - uv_list = [Vector(co.u/w, 1-(co.v/h)) for co in md2.tex_coords] - del w, h - - ######### Make the faces - DrawProgressBar(0.75,"Loading Face Data") - faces = [] - face_uvs = [] - for md2_face in md2.faces: - f = md2_face.vertex_index[0], md2_face.vertex_index[2], md2_face.vertex_index[1] - uv = uv_list[md2_face.texture_index[0]], uv_list[md2_face.texture_index[2]], uv_list[md2_face.texture_index[1]] - - if f[2] == 0: - # EEKADOODLE :/ - f= f[1], f[2], f[0] - uv= uv[1], uv[2], uv[0] - - #ditto in reverse order with the texture verts - faces.append(f) - face_uvs.append(uv) - - - face_mapping = mesh.faces.extend(faces, indexList=True) - print len(faces) - print len(mesh.faces) - mesh.faceUV= True #turn on face UV coordinates for this mesh - mesh_faces = mesh.faces - for i, uv in enumerate(face_uvs): - if face_mapping[i] != None: - f = mesh_faces[face_mapping[i]] - f.uv = uv - if (mesh_image!=-1): - f.image=mesh_image - - scn= Blender.Scene.GetCurrent() - mesh_obj= scn.objects.new(mesh) - animate_md2(md2, mesh) - DrawProgressBar(0.98,"Loading Animation Data") - - #locate the Object containing the mesh at the cursor location - cursor_pos=Blender.Window.GetCursorPos() - mesh_obj.setLocation(float(cursor_pos[0]),float(cursor_pos[1]),float(cursor_pos[2])) - DrawProgressBar (1.0, "") - WaitCursor(0) - -#*********************************************** -# MAIN -#*********************************************** - -# Import globals -g_md2_filename=Create("*.md2") -#g_md2_filename=Create("/d/warvet/tris.md2") -g_texture_filename=Create('') -# g_texture_filename=Create("/d/warvet/warvet.jpg") - -g_filename_search=Create("*.md2") -g_texture_search=Create('') -# g_texture_search=Create("/d/warvet/warvet.jpg") - -#Globals -g_scale=Create(1.0) - -# Events -EVENT_NOEVENT=1 -EVENT_LOAD_MD2=2 -EVENT_CHOOSE_FILENAME=3 -EVENT_CHOOSE_TEXTURE=4 -EVENT_SAVE_MD2=5 -EVENT_EXIT=100 - -###################################################### -# Callbacks for Window functions -###################################################### -def filename_callback(input_filename): - global g_md2_filename - g_md2_filename.val=input_filename - -def texture_callback(input_texture): - global g_texture_filename - g_texture_filename.val=input_texture - -###################################################### -# GUI Loader -###################################################### - - -def draw_gui(): - global g_scale - global g_md2_filename - global g_texture_filename - global EVENT_NOEVENT,EVENT_LOAD_MD2,EVENT_CHOOSE_FILENAME,EVENT_CHOOSE_TEXTURE,EVENT_EXIT - - ########## Titles - glClear(GL_COLOR_BUFFER_BIT) - glRasterPos2d(8, 125) - Text("MD2 loader") - - ######### Parameters GUI Buttons - BeginAlign() - g_md2_filename = String("MD2 file to load: ", EVENT_NOEVENT, 10, 55, 210, 18, - g_md2_filename.val, 255, "MD2 file to load") - ########## MD2 File Search Button - Button("Browse",EVENT_CHOOSE_FILENAME,220,55,80,18) - EndAlign() - - BeginAlign() - g_texture_filename = String("Texture file to load: ", EVENT_NOEVENT, 10, 35, 210, 18, - g_texture_filename.val, 255, "Texture file to load-overrides MD2 file") - ########## Texture Search Button - Button("Browse",EVENT_CHOOSE_TEXTURE,220,35,80,18) - EndAlign() - - ########## Scale slider-default is 1/8 which is a good scale for md2->blender - g_scale= Slider("Scale Factor: ", EVENT_NOEVENT, 10, 75, 210, 18, - 1.0, 0.001, 10.0, 1, "Scale factor for obj Model"); - - ######### Draw and Exit Buttons - Button("Load",EVENT_LOAD_MD2 , 10, 10, 80, 18) - Button("Exit",EVENT_EXIT , 170, 10, 80, 18) - -def event(evt, val): - if (evt == QKEY and not val): - Blender.Draw.Exit() - -def bevent(evt): - global g_md2_filename - global g_texture_filename - global EVENT_NOEVENT,EVENT_LOAD_MD2,EVENT_SAVE_MD2,EVENT_EXIT - - ######### Manages GUI events - if (evt==EVENT_EXIT): - Blender.Draw.Exit() - elif (evt==EVENT_CHOOSE_FILENAME): - FileSelector(filename_callback, "MD2 File Selection") - elif (evt==EVENT_CHOOSE_TEXTURE): - FileSelector(texture_callback, "Texture Selection") - elif (evt==EVENT_LOAD_MD2): - if not Blender.sys.exists(g_md2_filename.val): - PupMenu('Model file does not exist') - return - else: - load_md2(g_md2_filename.val, g_texture_filename.val) - Blender.Redraw() - Blender.Draw.Exit() - return - -if __name__ == '__main__': - Register(draw_gui, event, bevent) diff --git a/release/scripts/mesh_boneweight_copy.py b/release/scripts/mesh_boneweight_copy.py deleted file mode 100644 index d2a477fbc0b..00000000000 --- a/release/scripts/mesh_boneweight_copy.py +++ /dev/null @@ -1,287 +0,0 @@ -#!BPY -""" -Name: 'Bone Weight Copy' -Blender: 245 -Group: 'Object' -Tooltip: 'Copy Bone Weights from 1 mesh, to all other selected meshes.' -""" - -__author__ = "Campbell Barton aka ideasman42" -__url__ = ["www.blender.org", "blenderartists.org", "www.python.org"] -__version__ = "0.1" -__bpydoc__ = """\ - -Bone Weight Copy - -This script is used to copy bone weights from 1 mesh with weights (the source mesh) to many (the target meshes). -Weights are copied from 1 mesh to another based on how close they are together. - -For normal operation, select 1 source mesh with vertex weights and any number of unweighted meshes that overlap the source mesh. -Then run this script using default options and check the new weigh. - - -A differnt way to use this script is to update the weights an an alredy weighted mesh. -this is done using the "Copy to Selected" option enabled and works a bit differently, -With the target mesh, select the verts you want to update. -since all meshes have weights we cant just use the weighted mesh as the source, -so the Active Object is used for the source mesh. -Run the script and the selected verts on all non active meshes will be updated. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from Blender import Armature, Object, Mathutils, Window, Mesh -Vector= Mathutils.Vector -SMALL_NUM= 0.000001 -def copy_bone_influences(_from, _to, PREF_SEL_ONLY, PREF_NO_XCROSS): - ob_from, me_from, world_verts_from, from_groups= _from - ob_to, me_to, world_verts_to, dummy= _to - del dummy - - def getSnapIdx(seek_vec, vecs): - ''' - Returns the closest vec to snap_points - ''' - - # First seek the closest Z axis vert idx/v - seek_vec_x,seek_vec_y,seek_vec_z= seek_vec - - from_vec_idx= 0 - - len_vecs= len(vecs) - - upidx= len_vecs-1 - loidx= 0 - - while from_vec_idx < len_vecs and vecs[from_vec_idx][1].z < seek_vec_z: - from_vec_idx+=1 - - # Clamp if we overstepped. - if from_vec_idx >= len_vecs: - from_vec_idx-=1 - - close_dist= (vecs[from_vec_idx][1]-seek_vec).length - close_idx= vecs[from_vec_idx][0] - - upidx= from_vec_idx+1 - loidx= from_vec_idx-1 - - # Set uselo/useup. This means we can keep seeking up/down. - if upidx >= len_vecs: useup= False - else: useup= True - - if loidx < 0: uselo= False - else: uselo= True - - # Seek up/down to find the closest v to seek vec. - while uselo or useup: - if useup: - if upidx >= len_vecs: - useup= False - else: - i,v= vecs[upidx] - if (not PREF_NO_XCROSS) or ((v.x >= -SMALL_NUM and seek_vec_x >= -SMALL_NUM) or (v.x <= SMALL_NUM and seek_vec_x <= SMALL_NUM)): # enfoce xcrossing - if v.z-seek_vec_z > close_dist: - # the verticle distance is greater then the best distance sofar. we can stop looking up. - useup= False - elif abs(seek_vec_y-v.y) < close_dist and abs(seek_vec_x-v.x) < close_dist: - # This is in the limit measure it. - l= (seek_vec-v).length - if l= -SMALL_NUM and seek_vec_x >= -SMALL_NUM) or (v.x <= SMALL_NUM and seek_vec_x <= SMALL_NUM)): # enfoce xcrossing - if seek_vec_z-v.z > close_dist: - # the verticle distance is greater then the best distance sofar. we can stop looking up. - uselo= False - elif abs(seek_vec_y-v.y) < close_dist and abs(seek_vec_x-v.x) < close_dist: - # This is in the limit measure it. - l= (seek_vec-v).length - if l "%s" ' % (ob_from.name, ob_to.name)) - - from_idx= getSnapIdx(co, world_verts_from) - from_infs= me_from.getVertexInfluences(from_idx) - - for group, weight in from_infs: - - # Add where needed. - if PREF_SEL_ONLY and group not in to_groups: - me_to.addVertGroup(group) - to_groups.append(group) - - me_to.assignVertsToGroup(group, [i], weight, add_) - - me_to.update() - -# ZSORT return (i/co) tuples, used for fast seeking of the snapvert. -def worldspace_verts_idx(me, ob): - mat= ob.matrixWorld - verts_zsort= [ (i, v.co*mat) for i, v in enumerate(me.verts) ] - - # Sorts along the Z Axis so we can optimize the getsnap. - try: verts_zsort.sort(key = lambda a: a[1].z) - except: verts_zsort.sort(lambda a,b: cmp(a[1].z, b[1].z,)) - - return verts_zsort - - -def worldspace_verts(me, ob): - mat= ob.matrixWorld - return [ v.co*mat for v in me.verts ] - -def subdivMesh(me, subdivs): - oldmode = Mesh.Mode() - Mesh.Mode(Mesh.SelectModes['FACE']) - me.sel= 1 - for i in xrange(subdivs): - me.subdivide(0) - Mesh.Mode(oldmode) - - -def main(): - print '\nStarting BoneWeight Copy...' - scn= Blender.Scene.GetCurrent() - contextSel= Object.GetSelected() - if not contextSel: - Blender.Draw.PupMenu('Error%t|2 or more mesh objects need to be selected.|aborting.') - return - - PREF_QUALITY= Blender.Draw.Create(0) - PREF_NO_XCROSS= Blender.Draw.Create(0) - PREF_SEL_ONLY= Blender.Draw.Create(0) - - pup_block = [\ - ('Quality:', PREF_QUALITY, 0, 4, 'Generate interpolated verts for a higher quality result.'),\ - ('No X Crossing', PREF_NO_XCROSS, 'Do not snap across the zero X axis'),\ - '',\ - '"Update Selected" copies',\ - 'active object weights to',\ - 'selected verts on the other',\ - 'selected mesh objects.',\ - ('Update Selected', PREF_SEL_ONLY, 'Only copy new weights to selected verts on the target mesh. (use active object as source)'),\ - ] - - - if not Blender.Draw.PupBlock("Copy Weights for %i Meshs" % len(contextSel), pup_block): - return - - PREF_SEL_ONLY= PREF_SEL_ONLY.val - PREF_NO_XCROSS= PREF_NO_XCROSS.val - quality= PREF_QUALITY.val - - act_ob= scn.objects.active - if PREF_SEL_ONLY and act_ob==None: - Blender.Draw.PupMenu('Error%t|When dealing with 2 or more meshes with vgroups|There must be an active object|to be used as a source|aborting.') - return - - sel=[] - from_data= None - - for ob in contextSel: - if ob.type=='Mesh': - me= ob.getData(mesh=1) - groups= me.getVertGroupNames() - - # If this is the only mesh with a group OR if its one of many, but its active. - if groups and ((ob==act_ob and PREF_SEL_ONLY) or (not PREF_SEL_ONLY)): - if from_data: - Blender.Draw.PupMenu('More then 1 mesh has vertex weights, only select 1 mesh with weights. aborting.') - return - else: - # This uses worldspace_verts_idx which gets (idx,co) pairs, then zsorts. - if quality: - for _ob in contextSel: - _ob.sel=0 - ob.sel=1 - Object.Duplicate(mesh=1) - ob= scn.objects.active - me= ob.getData(mesh=1) - # groups will be the same - print '\tGenerating higher %ix quality weights.' % quality - subdivMesh(me, quality) - scn.unlink(ob) - from_data= (ob, me, worldspace_verts_idx(me, ob), groups) - - else: - data= (ob, me, worldspace_verts(me, ob), groups) - sel.append(data) - - if not from_data: - Blender.Draw.PupMenu('Error%t|No mesh with vertex groups found.') - return - - if not sel: - Blender.Draw.PupMenu('Error%t|Select 2 or more mesh objects, aborting.') - if quality: from_data[1].verts= None - return - - t= Blender.sys.time() - Window.WaitCursor(1) - - # Now do the copy. - print '\tCopying from "%s" to %i other mesh(es).' % (from_data[0].name, len(sel)) - for data in sel: - copy_bone_influences(from_data, data, PREF_SEL_ONLY, PREF_NO_XCROSS) - - # We cant unlink the mesh, but at least remove its data. - if quality: - from_data[1].verts= None - - print 'Copy Complete in %.6f sec' % (Blender.sys.time()-t) - Window.DrawProgressBar(1.0, '') - Window.WaitCursor(0) - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/release/scripts/mesh_cleanup.py b/release/scripts/mesh_cleanup.py deleted file mode 100644 index 27adca335cb..00000000000 --- a/release/scripts/mesh_cleanup.py +++ /dev/null @@ -1,456 +0,0 @@ -#!BPY -""" -Name: 'Clean Meshes' -Blender: 245 -Group: 'Mesh' -Tooltip: 'Clean unused data from all selected mesh objects.' -""" - -__author__ = "Campbell Barton aka ideasman42" -__url__ = ["www.blender.org", "blenderartists.org", "www.python.org"] -__version__ = "0.1" -__bpydoc__ = """\ -Clean Meshes - -Cleans unused data from selected meshes -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -from Blender import * -import bpy -from Blender.Mathutils import TriangleArea - -import Blender -import BPyMesh -dict2MeshWeight= BPyMesh.dict2MeshWeight -meshWeight2Dict= BPyMesh.meshWeight2Dict - -def rem_free_verts(me): - vert_users= [0] * len(me.verts) - for f in me.faces: - for v in f: - vert_users[v.index]+=1 - - for e in me.edges: - for v in e: # loop on edge verts - vert_users[v.index]+=1 - - verts_free= [i for i, users in enumerate(vert_users) if not users] - - if verts_free: - pass - me.verts.delete(verts_free) - return len(verts_free) - -def rem_free_edges(me, limit=None): - ''' Only remove based on limit if a limit is set, else remove all ''' - - edgeDict= {} # will use a set when python 2.4 is standard. - - for f in me.faces: - for edkey in f.edge_keys: - edgeDict[edkey] = None - - edges_free= [] - for e in me.edges: - if not edgeDict.has_key(e.key): - edges_free.append(e) - - if limit != None: - edges_free= [e for e in edges_free if e.length <= limit] - - me.edges.delete(edges_free) - return len(edges_free) - -def rem_area_faces(me, limit=0.001): - ''' Faces that have an area below the limit ''' - rem_faces= [f for f in me.faces if f.area <= limit] - if rem_faces: - me.faces.delete( 0, rem_faces ) - return len(rem_faces) - -def rem_perimeter_faces(me, limit=0.001): - ''' Faces whos combine edge length is below the limit ''' - def faceEdLen(f): - v= f.v - if len(v) == 3: - return\ - (v[0].co-v[1].co).length +\ - (v[1].co-v[2].co).length +\ - (v[2].co-v[0].co).length - else: # 4 - return\ - (v[0].co-v[1].co).length +\ - (v[1].co-v[2].co).length +\ - (v[2].co-v[3].co).length +\ - (v[3].co-v[0].co).length - rem_faces= [f for f in me.faces if faceEdLen(f) <= limit] - if rem_faces: - me.faces.delete( 0, rem_faces ) - return len(rem_faces) - -def rem_unused_materials(me): - materials= me.materials - len_materials= len(materials) - if len_materials < 2: - return 0 - - rem_materials= 0 - - material_users= dict( [(i,0) for i in xrange(len_materials)] ) - - for f in me.faces: - f_mat = f.mat - # Make sure the face index isnt too big. this happens sometimes. - if f_mat >= len_materials: - f_mat = f.mat = 0 - material_users[f_mat] += 1 - - # mat_idx_subtract= 0 - # reindex_mapping= dict( [(i,0) for i in xrange(len_materials)] ) - - reindex_mapping_ls = range(len_materials) - for i in range(len_materials-1, -1, -1): - if material_users[i] == 0: - del reindex_mapping_ls[i] - del materials[i] - rem_materials+=1 - - reindex_mapping= {} - - for i, mat in enumerate(reindex_mapping_ls): - reindex_mapping[mat] = i - - for f in me.faces: - f.mat= reindex_mapping[f.mat] - - me.materials= materials - return rem_materials - - -def rem_free_groups(me, groupNames, vWeightDict): - ''' cound how many vert users a group has and remove unused groups ''' - rem_groups = 0 - groupUserDict= dict([(group,0) for group in groupNames]) - - for vertexWeight in vWeightDict: - for group, weight in vertexWeight.iteritems(): - groupUserDict[group] += 1 - - i=len(groupNames) - while i: - i-=1 - group= groupNames[i] - if groupUserDict[group] == 0: - del groupNames[i] - print '\tremoving, vgroup', group - rem_groups+=1 - return rem_groups - -def rem_zero_weights(me, limit, groupNames, vWeightDict): - ''' remove verts from a group when their weight is zero.''' - rem_vweight_count= 0 - for vertexWeight in vWeightDict: - items= vertexWeight.items() - for group, weight in items: - if weight < limit: - del vertexWeight[group] - rem_vweight_count+= 1 - - return rem_vweight_count - - -def normalize_vweight(me, groupNames, vWeightDict): - for vertexWeight in vWeightDict: - unit= 0.0 - for group, weight in vertexWeight.iteritems(): - unit+= weight - - if unit != 1.0 and unit != 0.0: - for group, weight in vertexWeight.iteritems(): - vertexWeight[group]= weight/unit - -def isnan(f): - fstring = str(f).lower() - if 'nan' in fstring: - return True - if 'inf' in fstring: - return True - - return False - -def fix_nan_verts__internal(me): - rem_nan = 0 - for v in me.verts: - co = v.co - for i in (0,1,2): - if isnan(co[i]): - co[i] = 0.0 - rem_nan += 1 - return rem_nan - -def fix_nan_verts(me): - rem_nan = 0 - key = me.key - if key: - # Find the object, and get a mesh thats thinked to the oblink. - # this is a bit crap but needed to set the active key. - me_oblink = None - for ob in bpy.data.objects: - me_oblink = ob.getData(mesh=1) - if me_oblink == me: - me = me_oblink - break - if not me_oblink: - ob = None - - if key and ob: - blocks = key.blocks - # print blocks - orig_pin = ob.pinShape - orig_shape = ob.activeShape - orig_relative = key.relative - ob.pinShape = True - for i, block in enumerate(blocks): - ob.activeShape = i+1 - ob.makeDisplayList() - rem_nan += fix_nan_verts__internal(me) - me.update(block.name) # get the new verts - ob.pinShape = orig_pin - ob.activeShape = orig_shape - key.relative = orig_relative - - else: # No keys, simple operation - rem_nan = fix_nan_verts__internal(me) - - return rem_nan - -def fix_nan_uvs(me): - rem_nan = 0 - if me.faceUV: - orig_uvlayer = me.activeUVLayer - for uvlayer in me.getUVLayerNames(): - me.activeUVLayer = uvlayer - for f in me.faces: - for uv in f.uv: - for i in (0,1): - if isnan(uv[i]): - uv[i] = 0.0 - rem_nan += 1 - me.activeUVLayer = orig_uvlayer - return rem_nan - - -def has_vcol(me): - for f in me.faces: - for col in f.col: - if not (255 == col.r == col.g == col.b): - return True - return False - -def rem_white_vcol_layers(me): - vcols_removed = 0 - if me.vertexColors: - for col in me.getColorLayerNames(): - me.activeColorLayer = col - if not has_vcol(me): - me.removeColorLayer(col) - vcols_removed += 1 - - return vcols_removed - - -def main(): - sce= bpy.data.scenes.active - obsel= list(sce.objects.context) - actob= sce.objects.active - - is_editmode= Window.EditMode() - - # Edit mode object is not active, add it to the list. - if is_editmode and (not actob.sel): - obsel.append(actob) - - - #====================================# - # Popup menu to select the functions # - #====================================# - - CLEAN_ALL_DATA= Draw.Create(0) - CLEAN_VERTS_FREE= Draw.Create(1) - CLEAN_EDGE_NOFACE= Draw.Create(0) - CLEAN_EDGE_SMALL= Draw.Create(0) - CLEAN_FACE_PERIMETER= Draw.Create(0) - CLEAN_FACE_SMALL= Draw.Create(0) - - CLEAN_MATERIALS= Draw.Create(0) - CLEAN_WHITE_VCOL_LAYERS= Draw.Create(0) - CLEAN_GROUP= Draw.Create(0) - CLEAN_VWEIGHT= Draw.Create(0) - CLEAN_WEIGHT_NORMALIZE= Draw.Create(0) - limit= Draw.Create(0.01) - - CLEAN_NAN_VERTS= Draw.Create(0) - CLEAN_NAN_UVS= Draw.Create(0) - - # Get USER Options - - pup_block= [\ - ('Verts: free', CLEAN_VERTS_FREE, 'Remove verts that are not used by an edge or a face.'),\ - ('Edges: free', CLEAN_EDGE_NOFACE, 'Remove edges that are not in a face.'),\ - ('Edges: short', CLEAN_EDGE_SMALL, 'Remove edges that are below the length limit.'),\ - ('Faces: small perimeter', CLEAN_FACE_PERIMETER, 'Remove faces below the perimeter limit.'),\ - ('Faces: small area', CLEAN_FACE_SMALL, 'Remove faces below the area limit (may remove faces stopping T-face artifacts).'),\ - ('limit: ', limit, 0.001, 1.0, 'Limit for the area and length tests above (a higher limit will remove more data).'),\ - ('Material Clean', CLEAN_MATERIALS, 'Remove unused materials.'),\ - ('Color Layers', CLEAN_WHITE_VCOL_LAYERS, 'Remove vertex color layers that are totaly white'),\ - ('VGroup Clean', CLEAN_GROUP, 'Remove vertex groups that have no verts using them.'),\ - ('Weight Clean', CLEAN_VWEIGHT, 'Remove zero weighted verts from groups (limit is zero threshold).'),\ - ('WeightNormalize', CLEAN_WEIGHT_NORMALIZE, 'Make the sum total of vertex weights accross vgroups 1.0 for each vertex.'),\ - 'Clean NAN values',\ - ('NAN Verts', CLEAN_NAN_VERTS, 'Make NAN or INF verts (0,0,0)'),\ - ('NAN UVs', CLEAN_NAN_UVS, 'Make NAN or INF UVs (0,0)'),\ - '',\ - ('All Mesh Data', CLEAN_ALL_DATA, 'Warning! Operate on ALL mesh objects in your Blend file. Use with care'),\ - ] - - if not Draw.PupBlock('Clean Selected Meshes...', pup_block): - return - - CLEAN_VERTS_FREE= CLEAN_VERTS_FREE.val - CLEAN_EDGE_NOFACE= CLEAN_EDGE_NOFACE.val - CLEAN_EDGE_SMALL= CLEAN_EDGE_SMALL.val - CLEAN_FACE_PERIMETER= CLEAN_FACE_PERIMETER.val - CLEAN_FACE_SMALL= CLEAN_FACE_SMALL.val - CLEAN_MATERIALS= CLEAN_MATERIALS.val - CLEAN_WHITE_VCOL_LAYERS= CLEAN_WHITE_VCOL_LAYERS.val - CLEAN_GROUP= CLEAN_GROUP.val - CLEAN_VWEIGHT= CLEAN_VWEIGHT.val - CLEAN_WEIGHT_NORMALIZE= CLEAN_WEIGHT_NORMALIZE.val - limit= limit.val - CLEAN_ALL_DATA= CLEAN_ALL_DATA.val - CLEAN_NAN_VERTS= CLEAN_NAN_VERTS.val - CLEAN_NAN_UVS= CLEAN_NAN_UVS.val - - if is_editmode: Window.EditMode(0) - - if CLEAN_ALL_DATA: - if CLEAN_GROUP or CLEAN_VWEIGHT or CLEAN_WEIGHT_NORMALIZE: - # For groups we need the objects linked to the mesh - meshes= [ob.getData(mesh=1) for ob in bpy.data.objects if ob.type == 'Mesh' if not ob.lib] - else: - meshes= bpy.data.meshes - else: - meshes= [ob.getData(mesh=1) for ob in obsel if ob.type == 'Mesh'] - - tot_meshes = len(meshes) # so we can decrement libdata - rem_face_count= rem_edge_count= rem_vert_count= rem_material_count= rem_vcol_layer_count= rem_group_count= rem_vweight_count= fix_nan_vcount= fix_nan_uvcount= 0 - if not meshes: - if is_editmode: Window.EditMode(1) - Draw.PupMenu('No meshes to clean') - - Blender.Window.WaitCursor(1) - bpy.data.meshes.tag = False - for me in meshes: - - # Dont touch the same data twice - if me.tag: - tot_meshes -= 1 - continue - me.tag = True - - if me.lib: - tot_meshes -= 1 - continue - - if me.multires: - multires_level_orig = me.multiresDrawLevel - me.multiresDrawLevel = 1 - print 'Warning, cannot perform destructive operations on multires mesh:', me.name - else: - if CLEAN_FACE_SMALL: - rem_face_count += rem_area_faces(me, limit) - - if CLEAN_FACE_PERIMETER: - rem_face_count += rem_perimeter_faces(me, limit) - - if CLEAN_EDGE_SMALL: # for all use 2- remove all edges. - rem_edge_count += rem_free_edges(me, limit) - - if CLEAN_EDGE_NOFACE: - rem_edge_count += rem_free_edges(me) - - if CLEAN_VERTS_FREE: - rem_vert_count += rem_free_verts(me) - - if CLEAN_MATERIALS: - rem_material_count += rem_unused_materials(me) - - if CLEAN_WHITE_VCOL_LAYERS: - rem_vcol_layer_count += rem_white_vcol_layers(me) - - if CLEAN_VWEIGHT or CLEAN_GROUP or CLEAN_WEIGHT_NORMALIZE: - groupNames, vWeightDict= meshWeight2Dict(me) - - if CLEAN_VWEIGHT: - rem_vweight_count += rem_zero_weights(me, limit, groupNames, vWeightDict) - - if CLEAN_GROUP: - rem_group_count += rem_free_groups(me, groupNames, vWeightDict) - pass - - if CLEAN_WEIGHT_NORMALIZE: - normalize_vweight(me, groupNames, vWeightDict) - - # Copy back to mesh vertex groups. - dict2MeshWeight(me, groupNames, vWeightDict) - - if CLEAN_NAN_VERTS: - fix_nan_vcount = fix_nan_verts(me) - - if CLEAN_NAN_UVS: - fix_nan_uvcount = fix_nan_uvs(me) - - # restore multires. - if me.multires: - me.multiresDrawLevel = multires_level_orig - - Blender.Window.WaitCursor(0) - if is_editmode: Window.EditMode(0) - stat_string= 'Removed from ' + str(tot_meshes) + ' Mesh(es)%t|' - - if CLEAN_VERTS_FREE: stat_string+= 'Verts: %i|' % rem_vert_count - if CLEAN_EDGE_SMALL or CLEAN_EDGE_NOFACE: stat_string+= 'Edges: %i|' % rem_edge_count - if CLEAN_FACE_SMALL or CLEAN_FACE_PERIMETER: stat_string+= 'Faces: %i|' % rem_face_count - if CLEAN_MATERIALS: stat_string+= 'Materials: %i|' % rem_material_count - if CLEAN_WHITE_VCOL_LAYERS: stat_string+= 'Color Layers: %i|' % rem_vcol_layer_count - if CLEAN_VWEIGHT: stat_string+= 'VWeights: %i|' % rem_vweight_count - if CLEAN_GROUP: stat_string+= 'VGroups: %i|' % rem_group_count - if CLEAN_NAN_VERTS: stat_string+= 'Vert Nan Fix: %i|' % fix_nan_vcount - if CLEAN_NAN_UVS: stat_string+= 'UV Nan Fix: %i|' % fix_nan_uvcount - Draw.PupMenu(stat_string) - - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/release/scripts/mesh_edges2curves.py b/release/scripts/mesh_edges2curves.py deleted file mode 100644 index 670165dda51..00000000000 --- a/release/scripts/mesh_edges2curves.py +++ /dev/null @@ -1,166 +0,0 @@ -#!BPY -""" Registration info for Blender menus: -Name: 'Edges to Curve' -Blender: 241 -Group: 'Mesh' -Tip: 'Edges not used by a face are converted into polyline(s)' -""" -__author__ = ("Campbell Barton") -__url__ = ("blender", "blenderartists.org") -__version__ = "1.0 2006/02/08" - -__bpydoc__ = """\ -Edges to Curves - -This script converts open and closed edge loops into curve polylines - -Supported:
- Polylines where each vert has no more then 2 edges attached to it. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from Blender import * - -def polysFromMesh(me): - # a polyline is 2 - #polylines are a list - polyLines = [] - - # Get edges not used by a face - edgeDict= dict([ (ed.key, ed) for ed in me.edges ]) - for f in me.faces: - for key in f.edge_keys: - try: - del edgeDict[key] - except: - pass - - edges= edgeDict.values() - - - while edges: - currentEdge= edges.pop() - startVert= currentEdge.v2 - endVert= currentEdge.v1 - polyLine= [startVert, endVert] - ok= 1 - while ok: - ok= 0 - #for i, ed in enumerate(edges): - i=len(edges) - while i: - i-=1 - ed= edges[i] - if ed.v1 == endVert: - polyLine.append(ed.v2) - endVert= polyLine[-1] - ok=1 - del edges[i] - #break - elif ed.v2 == endVert: - polyLine.append(ed.v1) - endVert= polyLine[-1] - ok=1 - del edges[i] - #break - elif ed.v1 == startVert: - polyLine.insert(0, ed.v2) - startVert= polyLine[0] - ok=1 - del edges[i] - #break - elif ed.v2 == startVert: - polyLine.insert(0, ed.v1) - startVert= polyLine[0] - ok=1 - del edges[i] - #break - polyLines.append((polyLine, polyLine[0]==polyLine[-1])) - # print len(edges), len(polyLines) - return polyLines - - -def mesh2polys(): - scn= Scene.GetCurrent() - scn.objects.selected = [] - - meshOb= scn.objects.active - if meshOb==None or meshOb.type != 'Mesh': - Draw.PupMenu( 'ERROR: No Active Mesh Selected, Aborting' ) - return - Window.WaitCursor(1) - Window.EditMode(0) - me = meshOb.getData(mesh=1) - polygons= polysFromMesh(me) - w = 1.0 - cu= Curve.New() - cu.name = me.name - cu.setFlag(1) - - ob = scn.objects.active = scn.objects.new(cu) - ob.setMatrix(meshOb.matrixWorld) - - i=0 - for poly, closed in polygons: - if closed: - vIdx= 1 - else: - vIdx= 0 - - v= poly[vIdx] - cu.appendNurb((v.co.x, v.co.y, v.co.z, w)) - vIdx += 1 - cu[i].type= 0 # Poly Line - - # Close the polyline if its closed. - if closed: - cu[i].setFlagU(1) - - # Add all the points in the polyline. - while vIdx 0] - - else: - # Use a small margin verts must be outside before we mirror them. - neg_vts = [v for v in me.verts if v.sel if v.co.x < -PREF_XZERO_THRESH] - pos_vts = [v for v in me.verts if v.sel if v.co.x > PREF_XZERO_THRESH] - - - - #*Mirror Location*********************************************************# - if PREF_MIRROR_LOCATION: - mirror_pairs= [] - # allign the negative with the positive. - flipvec= Mathutils.Vector() - len_neg_vts= float(len(neg_vts)) - for i1, nv in enumerate(neg_vts): - if nv.sel: # we may alredy be mirrored, if so well be deselected - nv_co= nv.co - for i2, pv in enumerate(pos_vts): - if pv.sel: - # Enforce edge users. - if not PREF_EDGE_USERS or edge_users[i1]==edge_users[i2]: - flipvec[:]= pv.co - flipvec.x= -flipvec.x - l= (nv_co-flipvec).length - - if l==0.0: # Both are alredy mirrored so we dont need to think about them. - # De-Select so we dont use again/ - pv.sel= nv.sel= 0 - - # Record a match. - elif l<=PREF_MAX_DIST: - - # We can adjust the length by the normal, now we know the length is under the limit. - # DISABLED, WASNT VERY USEFULL - ''' - if PREF_NOR_WEIGHT>0: - # Get the normal and flipm reuse flipvec - flipvec[:]= pv.no - flipvec.x= -flipvec.x - try: - ang= Mathutils.AngleBetweenVecs(nv.no, flipvec)/180.0 - except: # on rare occasions angle between vecs will fail.- zero length vec. - ang= 0 - - l=l*(1+(ang*PREF_NOR_WEIGHT)) - ''' - # Record the pairs for sorting to see who will get joined - mirror_pairs.append((l, nv, pv)) - - # Update every 20 loops - if i1 % 10 == 0: - Window.DrawProgressBar(0.8 * (i1/len_neg_vts), 'Mirror verts %i of %i' % (i1, len_neg_vts)) - - Window.DrawProgressBar(0.9, 'Mirror verts: Updating locations') - - # Now we have a list of the pairs we might use, lets find the best and do them first. - # de-selecting as we go. so we can makke sure not to mess it up. - try: mirror_pairs.sort(key = lambda a: a[0]) - except: mirror_pairs.sort(lambda a,b: cmp(a[0], b[0])) - - for dist, v1,v2 in mirror_pairs: # dist, neg, pos - if v1.sel and v2.sel: - if PREF_MODE==0: # Middle - flipvec[:]= v2.co # positive - flipvec.x= -flipvec.x # negatve - v2.co= v1.co= (flipvec+v1.co)*0.5 # midway - v2.co.x= -v2.co.x - elif PREF_MODE==2: # Left - v2.co= v1.co - v2.co.x= -v2.co.x - elif PREF_MODE==1: # Right - v1.co= v2.co - v1.co.x= -v1.co.x - v1.sel= v2.sel= 0 - - - #*Mirror Weights**********************************************************# - if PREF_MIRROR_WEIGHTS: - - groupNames, vWeightDict= BPyMesh.meshWeight2Dict(me) - mirror_pairs_l2r= [] # Stor a list of matches for these verts. - mirror_pairs_r2l= [] # Stor a list of matches for these verts. - - # allign the negative with the positive. - flipvec= Mathutils.Vector() - len_neg_vts= float(len(neg_vts)) - - # Here we make a tuple to look through, if were middle well need to look through both. - if PREF_MODE==0: # Middle - find_set= ((neg_vts, pos_vts, mirror_pairs_l2r), (pos_vts, neg_vts, mirror_pairs_r2l)) - elif PREF_MODE==1: # Left - find_set= ((neg_vts, pos_vts, mirror_pairs_l2r), ) - elif PREF_MODE==2: # Right - find_set= ((pos_vts, neg_vts, mirror_pairs_r2l), ) - - - # Do a locational lookup again :/ - This isnt that good form but if we havnt mirrored weights well need to do it anyway. - # The Difference with this is that we dont need to have 1:1 match for each vert- just get each vert to find another mirrored vert - # and use its weight. - # Use "find_set" so we can do a flipped search L>R and R>L without duplicate code. - for vtls_A, vtls_B, pair_ls in find_set: - for i1, vA in enumerate(vtls_A): - best_len=1<<30 # BIGNUM - best_idx=-1 - - # Find the BEST match - vA_co= vA.co - for i2, vB in enumerate(vtls_B): - # Enforce edge users. - if not PREF_EDGE_USERS or edge_users[i1]==edge_users[i2]: - flipvec[:]= vB.co - flipvec.x= -flipvec.x - l= (vA_co-flipvec).length - - if l Right', PREF_MODE_L2R, 'Copy from the Left to Right of the mesh. Enable Both for a mid loc/weight.'),\ - ('Right > Left', PREF_MODE_R2L, 'Copy from the Right to Left of the mesh. Enable Both for a mid loc/weight.'),\ - '',\ - ('MaxDist:', PREF_MAX_DIST, 0.0, 1.0, 'Generate interpolated verts so closer vert weights can be copied.'),\ - ('XZero limit:', PREF_XZERO_THRESH, 0.0, 1.0, 'Mirror verts above this distance from the middle, else lock to X/zero.'),\ - ('Sel Verts Only', PREF_SEL_ONLY, 'Only mirror selected verts. Else try and mirror all'),\ - ('Edge Users', PREF_EDGE_USERS, 'Only match up verts that have the same number of edge users.'),\ - 'Location Prefs',\ - ('Mirror Location', PREF_MIRROR_LOCATION, 'Mirror vertex locations.'),\ - ('XMidSnap Verts', PREF_XMID_SNAP, 'Snap middle verts to X Zero (uses XZero limit)'),\ - 'Weight Prefs',\ - ('Mirror Weights', PREF_MIRROR_WEIGHTS, 'Mirror vertex locations.'),\ - ('Flip Groups', PREF_FLIP_NAMES, 'Mirror flip names.'),\ - ('New Flip Groups', PREF_CREATE_FLIP_NAMES, 'Make new groups for flipped names.'),\ - ] - - if not Draw.PupBlock("X Mirror mesh tool", pup_block): - return - - # WORK OUT THE MODE 0 - # PREF_MODE, 0:middle, 1: Left. 2:Right. - PREF_MODE_R2L= PREF_MODE_R2L.val - PREF_MODE_L2R= PREF_MODE_L2R.val - - if PREF_MODE_R2L and PREF_MODE_L2R: - PREF_MODE= 0 # Middle - elif not PREF_MODE_R2L and PREF_MODE_L2R: - PREF_MODE= 1 # Left to Right - elif PREF_MODE_R2L and not PREF_MODE_L2R: - PREF_MODE= 2 # Right to Left - else: # Neither Selected. Do middle anyway - PREF_MODE= 0 - - - PREF_EDITMESH_ONLY= PREF_EDITMESH_ONLY.val - PREF_MIRROR_LOCATION= PREF_MIRROR_LOCATION.val - PREF_XMID_SNAP= PREF_XMID_SNAP.val - PREF_MAX_DIST= PREF_MAX_DIST.val - PREF_XZERO_THRESH= PREF_XZERO_THRESH.val - PREF_SEL_ONLY= PREF_SEL_ONLY.val - PREF_EDGE_USERS= PREF_EDGE_USERS.val - # weights - PREF_MIRROR_WEIGHTS= PREF_MIRROR_WEIGHTS.val - PREF_FLIP_NAMES= PREF_FLIP_NAMES.val - PREF_CREATE_FLIP_NAMES= PREF_CREATE_FLIP_NAMES.val - - t= sys.time() - - is_editmode = Window.EditMode() # Exit Editmode. - if is_editmode: Window.EditMode(0) - Mesh.Mode(Mesh.SelectModes['VERTEX']) - Window.WaitCursor(1) - - if act_ob: - mesh_mirror(act_ob.getData(mesh=1), PREF_MIRROR_LOCATION, PREF_XMID_SNAP, PREF_MAX_DIST, PREF_XZERO_THRESH, PREF_MODE, PREF_SEL_ONLY, PREF_EDGE_USERS, PREF_MIRROR_WEIGHTS, PREF_FLIP_NAMES, PREF_CREATE_FLIP_NAMES) - if (not PREF_EDITMESH_ONLY) and sel: - for ob in sel: - mesh_mirror(ob.getData(mesh=1), PREF_MIRROR_LOCATION, PREF_XMID_SNAP, PREF_MAX_DIST, PREF_XZERO_THRESH, PREF_MODE, PREF_SEL_ONLY, PREF_EDGE_USERS, PREF_MIRROR_WEIGHTS, PREF_FLIP_NAMES, PREF_CREATE_FLIP_NAMES) - - if is_editmode: Window.EditMode(1) - Window.WaitCursor(0) - Window.DrawProgressBar(1.0, '') - Window.RedrawAll() - - print 'Mirror done in %.6f sec.' % (sys.time()-t) - -if __name__ == '__main__': - main() diff --git a/release/scripts/mesh_poly_reduce.py b/release/scripts/mesh_poly_reduce.py deleted file mode 100644 index 6dfd7a90efc..00000000000 --- a/release/scripts/mesh_poly_reduce.py +++ /dev/null @@ -1,143 +0,0 @@ -#!BPY -""" -Name: 'Poly Reducer' -Blender: 243 -Group: 'Mesh' -Tooltip: 'Removed polygons from a mesh while maintaining the shape, textures and weights.' -""" - -__author__ = "Campbell Barton" -__url__ = ("blender", "blenderartists.org") -__version__ = "1.0 2006/02/07" - -__bpydoc__ = """\ -This script simplifies the mesh by removing faces, keeping the overall shape of the mesh. -""" - -from Blender import Draw, Window, Scene, Mesh, Mathutils, sys, Object -import BPyMesh -# reload(BPyMesh) -import BPyMessages - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -def main(): - scn = Scene.GetCurrent() - act_ob= scn.objects.active - if not act_ob or act_ob.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - act_me= act_ob.getData(mesh=1) - - if act_me.multires: - BPyMessages.Error_NoMeshMultiresEdit() - return - - act_group= act_me.activeGroup - if not act_group: act_group= '' - - - # Defaults - PREF_REDUX= Draw.Create(0.5) - PREF_BOUNDRY_WEIGHT= Draw.Create(5.0) - PREF_REM_DOUBLES= Draw.Create(1) - PREF_FACE_AREA_WEIGHT= Draw.Create(1.0) - PREF_FACE_TRIANGULATE= Draw.Create(1) - - VGROUP_INF_ENABLE= Draw.Create(0) - VGROUP_INF_REDUX= Draw.Create(act_group) - VGROUP_INF_WEIGHT= Draw.Create(10.0) - - PREF_DO_UV= Draw.Create(1) - PREF_DO_VCOL= Draw.Create(1) - PREF_DO_WEIGHTS= Draw.Create(1) - PREF_OTHER_SEL_OBS= Draw.Create(0) - - pup_block = [\ - ('Poly Reduce:', PREF_REDUX, 0.05, 0.95, 'Scale the meshes poly count by this value.'),\ - ('Boundry Weight:', PREF_BOUNDRY_WEIGHT, 0.0, 20.0, 'Weight boundry verts by this scale, 0.0 for no boundry weighting.'),\ - ('Area Weight:', PREF_FACE_AREA_WEIGHT, 0.0, 20.0, 'Collapse edges effecting lower area faces first.'),\ - ('Triangulate', PREF_FACE_TRIANGULATE, 'Convert quads to tris before reduction, for more choices of edges to collapse.'),\ - '',\ - ('VGroup Weighting', VGROUP_INF_ENABLE, 'Use a vertex group to influence the reduction, higher weights for higher quality '),\ - ('vgroup name: ', VGROUP_INF_REDUX, 0, 32, 'The name of the vertex group to use for the weight map'),\ - ('vgroup mult: ', VGROUP_INF_WEIGHT, 0.0, 100.0, 'How much to make the weight effect the reduction'),\ - ('Other Selected Obs', PREF_OTHER_SEL_OBS, 'reduce other selected objects.'),\ - '',\ - '',\ - '',\ - ('UV Coords', PREF_DO_UV, 'Interpolate UV Coords.'),\ - ('Vert Colors', PREF_DO_VCOL, 'Interpolate Vertex Colors'),\ - ('Vert Weights', PREF_DO_WEIGHTS, 'Interpolate Vertex Weights'),\ - ('Remove Doubles', PREF_REM_DOUBLES, 'Remove doubles before reducing to avoid boundry tearing.'),\ - ] - - if not Draw.PupBlock("Poly Reducer", pup_block): - return - - PREF_REDUX= PREF_REDUX.val - PREF_BOUNDRY_WEIGHT= PREF_BOUNDRY_WEIGHT.val - PREF_REM_DOUBLES= PREF_REM_DOUBLES.val - PREF_FACE_AREA_WEIGHT= PREF_FACE_AREA_WEIGHT.val - PREF_FACE_TRIANGULATE= PREF_FACE_TRIANGULATE.val - - VGROUP_INF_ENABLE= VGROUP_INF_ENABLE.val - VGROUP_INF_WEIGHT= VGROUP_INF_WEIGHT.val - - if VGROUP_INF_ENABLE and VGROUP_INF_WEIGHT: - VGROUP_INF_REDUX= VGROUP_INF_REDUX.val - else: - VGROUP_INF_WEIGHT= 0.0 - VGROUP_INF_REDUX= None - - - PREF_DO_UV= PREF_DO_UV.val - PREF_DO_VCOL= PREF_DO_VCOL.val - PREF_DO_WEIGHTS= PREF_DO_WEIGHTS.val - PREF_OTHER_SEL_OBS= PREF_OTHER_SEL_OBS.val - - - t= sys.time() - - is_editmode = Window.EditMode() # Exit Editmode. - if is_editmode: Window.EditMode(0) - Window.WaitCursor(1) - print 'reducing:', act_ob.name, act_ob.getData(1) - BPyMesh.redux(act_ob, PREF_REDUX, PREF_BOUNDRY_WEIGHT, PREF_REM_DOUBLES, PREF_FACE_AREA_WEIGHT, PREF_FACE_TRIANGULATE, PREF_DO_UV, PREF_DO_VCOL, PREF_DO_WEIGHTS, VGROUP_INF_REDUX, VGROUP_INF_WEIGHT) - - if PREF_OTHER_SEL_OBS: - for ob in scn.objects.context: - if ob.type == 'Mesh' and ob != act_ob: - print 'reducing:', ob.name, ob.getData(1) - BPyMesh.redux(ob, PREF_REDUX, PREF_BOUNDRY_WEIGHT, PREF_REM_DOUBLES, PREF_FACE_AREA_WEIGHT, PREF_FACE_TRIANGULATE, PREF_DO_UV, PREF_DO_VCOL, PREF_DO_WEIGHTS, VGROUP_INF_REDUX, VGROUP_INF_WEIGHT) - Window.RedrawAll() - - if is_editmode: Window.EditMode(1) - Window.WaitCursor(0) - Window.RedrawAll() - - print 'Reduction done in %.6f sec.' % (sys.time()-t) - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/release/scripts/mesh_poly_reduce_grid.py b/release/scripts/mesh_poly_reduce_grid.py deleted file mode 100644 index 2903909027a..00000000000 --- a/release/scripts/mesh_poly_reduce_grid.py +++ /dev/null @@ -1,351 +0,0 @@ -#!BPY -""" -Name: 'Poly Reduce Selection (Unsubsurf)' -Blender: 245 -Group: 'Mesh' -Tooltip: 'predictable mesh simplifaction maintaining face loops' -""" - -from Blender import Scene, Mesh, Window, sys -import BPyMessages -import bpy - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell J Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -def my_mesh_util(me): - me_verts = me.verts - - vert_faces = [ [] for v in me_verts] - vert_faces_corner = [ [] for v in me_verts] - - - # Ignore topology where there are not 2 faces connected to an edge. - edge_count = {} - for f in me.faces: - for edkey in f.edge_keys: - try: - edge_count[edkey] += 1 - except: - edge_count[edkey] = 1 - - for edkey, count in edge_count.iteritems(): - - # Ignore verts that connect to edges with more than 2 faces. - if count != 2: - vert_faces[edkey[0]] = None - vert_faces[edkey[1]] = None - # Done - - - - def faces_set_verts(face_ls): - unique_verts = set() - for f in face_ls: - for v in f: - unique_verts.add(v.index) - return unique_verts - - for f in me.faces: - for corner, v in enumerate(f): - i = v.index - if vert_faces[i] != None: - vert_faces[i].append(f) - vert_faces_corner[i].append( corner ) - - grid_data_ls = [] - - for vi, face_ls in enumerate(vert_faces): - if face_ls != None: - if len(face_ls) == 4: - if face_ls[0].sel and face_ls[1].sel and face_ls[2].sel and face_ls[3].sel: - # Support triangles also - unique_vert_count = len(faces_set_verts(face_ls)) - quads = 0 - for f in face_ls: - if len(f) ==4: - quads += 1 - if unique_vert_count==5+quads: # yay we have a grid - grid_data_ls.append( (vi, face_ls) ) - - elif len(face_ls) == 3: - if face_ls[0].sel and face_ls[1].sel and face_ls[2].sel: - unique_vert_count = len(faces_set_verts(face_ls)) - if unique_vert_count==4: # yay we have 3 triangles to make into a bigger triangle - grid_data_ls.append( (vi, face_ls) ) - - - - # Now sort out which grid faces to use - - - # This list will be used for items we can convert, vertex is key, faces are values - grid_data_dict = {} - - if not grid_data_ls: - print "doing nothing" - return - - # quick lookup for the opposing corner of a qiad - quad_diag_mapping = 2,3,0,1 - - verts_used = [0] * len(me_verts) # 0 == untouched, 1==should touch, 2==touched - verts_used[grid_data_ls[0][0]] = 1 # start touching 1! - - # From the corner vert, get the 2 edges that are not the corner or its opposing vert, this edge will make a new face - quad_edge_mapping = (1,3), (2,0), (1,3), (0,2) # hi-low, low-hi order is intended - tri_edge_mapping = (1,2), (0,2), (0,1) - - done_somthing = True - while done_somthing: - done_somthing = False - grid_data_ls_index = -1 - - for vi, face_ls in grid_data_ls: - grid_data_ls_index += 1 - if len(face_ls) == 3: - grid_data_dict[vi] = face_ls - grid_data_ls.pop( grid_data_ls_index ) - break - elif len(face_ls) == 4: - # print vi - if verts_used[vi] == 1: - verts_used[vi] = 2 # dont look at this again. - done_somthing = True - - grid_data_dict[vi] = face_ls - - # Tag all faces verts as used - - for i, f in enumerate(face_ls): - # i == face index on vert, needed to recall which corner were on. - v_corner = vert_faces_corner[vi][i] - fv =f.v - - if len(f) == 4: - v_other = quad_diag_mapping[v_corner] - # get the 2 other corners - corner1, corner2 = quad_edge_mapping[v_corner] - if verts_used[fv[v_other].index] == 0: - verts_used[fv[v_other].index] = 1 # TAG for touching! - else: - corner1, corner2 = tri_edge_mapping[v_corner] - - verts_used[fv[corner1].index] = 2 # Dont use these, they are - verts_used[fv[corner2].index] = 2 - - - # remove this since we have used it. - grid_data_ls.pop( grid_data_ls_index ) - - break - - if done_somthing == False: - # See if there are any that have not even been tagged, (probably on a different island), then tag them. - - for vi, face_ls in grid_data_ls: - if verts_used[vi] == 0: - verts_used[vi] = 1 - done_somthing = True - break - - - # Now we have all the areas we will fill, calculate corner triangles we need to fill in. - new_faces = [] - quad_del_vt_map = (1,2,3), (0,2,3), (0,1,3), (0,1,2) - for vi, face_ls in grid_data_dict.iteritems(): - for i, f in enumerate(face_ls): - if len(f) == 4: - # i == face index on vert, needed to recall which corner were on. - v_corner = vert_faces_corner[vi][i] - v_other = quad_diag_mapping[v_corner] - fv =f.v - - #print verts_used[fv[v_other].index] - #if verts_used[fv[v_other].index] != 2: # DOSNT WORK ALWAYS - - if 1: # THIS IS LAzY - some of these faces will be removed after adding. - # Ok we are removing half of this face, add the other half - - # This is probably slower - # new_faces.append( [fv[ii].index for ii in (0,1,2,3) if ii != v_corner ] ) - - # do this instead - new_faces.append( (fv[quad_del_vt_map[v_corner][0]], fv[quad_del_vt_map[v_corner][1]], fv[quad_del_vt_map[v_corner][2]]) ) - - del grid_data_ls - - - # me.sel = 0 - def faceCombine4(vi, face_ls): - edges = [] - - for i, f in enumerate(face_ls): - fv = f.v - v_corner = vert_faces_corner[vi][i] - if len(f)==4: ed = quad_edge_mapping[v_corner] - else: ed = tri_edge_mapping[v_corner] - - edges.append( [fv[ed[0]].index, fv[ed[1]].index] ) - - # get the face from the edges - face = edges.pop() - while len(face) != 4: - # print len(edges), edges, face - for ed_idx, ed in enumerate(edges): - if face[-1] == ed[0] and (ed[1] != face[0]): - face.append(ed[1]) - elif face[-1] == ed[1] and (ed[0] != face[0]): - face.append(ed[0]) - else: - continue - - edges.pop(ed_idx) # we used the edge alredy - break - - return face - - for vi, face_ls in grid_data_dict.iteritems(): - if len(face_ls) == 4: - new_faces.append( faceCombine4(vi, face_ls) ) - #pass - if len(face_ls) == 3: # 3 triangles - face = list(faces_set_verts(face_ls)) - face.remove(vi) - new_faces.append( face ) - - - # Now remove verts surounded by 3 triangles - - - - # print new_edges - # me.faces.extend(new_faces, ignoreDups=True) - - ''' - faces_remove = [] - for vi, face_ls in grid_data_dict.iteritems(): - faces_remove.extend(face_ls) - ''' - - orig_facelen = len(me.faces) - - orig_faces = list(me.faces) - me.faces.extend(new_faces, ignoreDups=True) - new_faces = list(me.faces)[len(orig_faces):] - - - - - - if me.faceUV: - uvnames = me.getUVLayerNames() - act_uvlay = me.activeUVLayer - - vert_faces_uvs = [] - vert_faces_images = [] - - - act_uvlay = me.activeUVLayer - - for uvlay in uvnames: - me.activeUVLayer = uvlay - vert_faces_uvs[:] = [None] * len(me.verts) - vert_faces_images[:] = vert_faces_uvs[:] - - for i,f in enumerate(orig_faces): - img = f.image - fv = f.v - uv = f.uv - mat = f.mat - for i,v in enumerate(fv): - vi = v.index - vert_faces_uvs[vi] = uv[i] # no nice averaging - vert_faces_images[vi] = img - - - # Now copy UVs across - for f in new_faces: - fi = [v.index for v in f.v] - f.image = vert_faces_images[fi[0]] - uv = f.uv - for i,vi in enumerate(fi): - uv[i][:] = vert_faces_uvs[vi] - - if len(me.materials) > 1: - vert_faces_mats = [None] * len(me.verts) - for i,f in enumerate(orig_faces): - mat = f.mat - for i,v in enumerate(f.v): - vi = v.index - vert_faces_mats[vi] = mat - - # Now copy UVs across - for f in new_faces: - print vert_faces_mats[f.v[0].index] - f.mat = vert_faces_mats[f.v[0].index] - - - me.verts.delete(grid_data_dict.keys()) - - # me.faces.delete(1, faces_remove) - - if me.faceUV: - me.activeUVLayer = act_uvlay - - me.calcNormals() - -def main(): - - # Gets the current scene, there can be many scenes in 1 blend file. - sce = bpy.data.scenes.active - - # Get the active object, there can only ever be 1 - # and the active object is always the editmode object. - ob_act = sce.objects.active - - if not ob_act or ob_act.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - is_editmode = Window.EditMode() - if is_editmode: Window.EditMode(0) - - Window.WaitCursor(1) - me = ob_act.getData(mesh=1) # old NMesh api is default - t = sys.time() - - # Run the mesh editing function - my_mesh_util(me) - - # Restore editmode if it was enabled - if is_editmode: Window.EditMode(1) - - # Timing the script is a good way to be aware on any speed hits when scripting - print 'My Script finished in %.2f seconds' % (sys.time()-t) - Window.WaitCursor(0) - - -# This lets you can import the script without running it -if __name__ == '__main__': - main() - diff --git a/release/scripts/mesh_skin.py b/release/scripts/mesh_skin.py deleted file mode 100644 index 4a330a516fb..00000000000 --- a/release/scripts/mesh_skin.py +++ /dev/null @@ -1,639 +0,0 @@ -#!BPY - -""" -Name: 'Skin Faces/Edge-Loops' -Blender: 243 -Group: 'MeshFaceKey' -Tooltip: 'Select 2 vert loops, then run this script.' -""" - -__author__ = "Campbell Barton AKA Ideasman" -__url__ = ["blenderartists.org", "www.blender.org"] -__version__ = "1.1 2006/12/26" - -__bpydoc__ = """\ -With this script vertex loops can be skinned: faces are created to connect the -selected loops of vertices. - -Usage: - -In mesh Edit mode select the vertices of the loops (closed paths / curves of -vertices: circles, for example) that should be skinned, then run this script. -A pop-up will provide further options, if the results of a method are not adequate try one of the others. -""" - - -# $Id$ -# -# -------------------------------------------------------------------------- -# Skin Selected edges 1.0 By Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -# Made by Ideasman/Campbell 2005/06/15 - cbarton@metavr.com - -import Blender -import bpy -from Blender import Window -from Blender.Mathutils import MidpointVecs, Vector -from Blender.Mathutils import AngleBetweenVecs as _AngleBetweenVecs_ -import BPyMessages - -from Blender.Draw import PupMenu - -BIG_NUM = 1<<30 - -global CULL_METHOD -CULL_METHOD = 0 - -def AngleBetweenVecs(a1,a2): - try: - return _AngleBetweenVecs_(a1,a2) - except: - return 180.0 - -class edge(object): - __slots__ = 'v1', 'v2', 'co1', 'co2', 'length', 'removed', 'match', 'cent', 'angle', 'next', 'prev', 'normal', 'fake' - def __init__(self, v1,v2): - self.v1 = v1 - self.v2 = v2 - co1, co2= v1.co, v2.co - self.co1= co1 - self.co2= co2 - - # uv1 uv2 vcol1 vcol2 # Add later - self.length = (co1 - co2).length - self.removed = 0 # Have we been culled from the eloop - self.match = None # The other edge were making a face with - - self.cent= MidpointVecs(co1, co2) - self.angle= 0.0 - self.fake= False - -class edgeLoop(object): - __slots__ = 'centre', 'edges', 'normal', 'closed', 'backup_edges' - def __init__(self, loop, me, closed): # Vert loop - # Use next and prev, nextDist, prevDist - - # Get Loops centre. - fac= len(loop) - verts = me.verts - self.centre= reduce(lambda a,b: a+verts[b].co/fac, loop, Vector()) - - # Convert Vert loop to Edges. - self.edges = [edge(verts[loop[vIdx-1]], verts[loop[vIdx]]) for vIdx in xrange(len(loop))] - - if not closed: - self.edges[0].fake = True # fake edge option - - self.closed = closed - - - # Assign linked list - for eIdx in xrange(len(self.edges)-1): - self.edges[eIdx].next = self.edges[eIdx+1] - self.edges[eIdx].prev = self.edges[eIdx-1] - # Now last - self.edges[-1].next = self.edges[0] - self.edges[-1].prev = self.edges[-2] - - - - # GENERATE AN AVERAGE NORMAL FOR THE WHOLE LOOP. - self.normal = Vector() - for e in self.edges: - n = (self.centre-e.co1).cross(self.centre-e.co2) - # Do we realy need tot normalize? - n.normalize() - self.normal += n - - # Generate the angle - va= e.cent - e.prev.cent - vb= e.next.cent - e.cent - - e.angle= AngleBetweenVecs(va, vb) - - # Blur the angles - #for e in self.edges: - # e.angle= (e.angle+e.next.angle)/2 - - # Blur the angles - #for e in self.edges: - # e.angle= (e.angle+e.prev.angle)/2 - - self.normal.normalize() - - # Generate a normal for each edge. - for e in self.edges: - - n1 = e.co1 - n2 = e.co2 - n3 = e.prev.co1 - - a = n1-n2 - b = n1-n3 - normal1 = a.cross(b) - normal1.normalize() - - n1 = e.co2 - n3 = e.next.co2 - n2 = e.co1 - - a = n1-n2 - b = n1-n3 - - normal2 = a.cross(b) - normal2.normalize() - - # Reuse normal1 var - normal1 += normal1 + normal2 - normal1.normalize() - - e.normal = normal1 - #print e.normal - - - - def backup(self): - # Keep a backup of the edges - self.backup_edges = self.edges[:] - - def restore(self): - self.edges = self.backup_edges[:] - for e in self.edges: - e.removed = 0 - - def reverse(self): - self.edges.reverse() - self.normal.negate() - - for e in self.edges: - e.normal.negate() - e.v1, e.v2 = e.v2, e.v1 - e.co1, e.co2 = e.co2, e.co1 - e.next, e.prev = e.prev, e.next - - - def removeSmallest(self, cullNum, otherLoopLen): - ''' - Removes N Smallest edges and backs up the loop, - this is so we can loop between 2 loops as if they are the same length, - backing up and restoring incase the loop needs to be skinned with another loop of a different length. - ''' - global CULL_METHOD - if CULL_METHOD == 1: # Shortest edge - eloopCopy = self.edges[:] - - # Length sort, smallest first - try: eloopCopy.sort(key = lambda e1: e1.length) - except: eloopCopy.sort(lambda e1, e2: cmp(e1.length, e2.length )) - - # Dont use atm - #eloopCopy.sort(lambda e1, e2: cmp(e1.angle*e1.length, e2.angle*e2.length)) # Length sort, smallest first - #eloopCopy.sort(lambda e1, e2: cmp(e1.angle, e2.angle)) # Length sort, smallest first - - remNum = 0 - for i, e in enumerate(eloopCopy): - if not e.fake: - e.removed = 1 - self.edges.remove( e ) # Remove from own list, still in linked list. - remNum += 1 - - if not remNum < cullNum: - break - - else: # CULL METHOD is even - - culled = 0 - - step = int(otherLoopLen / float(cullNum)) * 2 - - currentEdge = self.edges[0] - while culled < cullNum: - - # Get the shortest face in the next STEP - step_count= 0 - bestAng= 360.0 - smallestEdge= None - while step_count<=step or smallestEdge==None: - step_count+=1 - if not currentEdge.removed: # 0 or -1 will not be accepted - if currentEdge.angle 2: - return None - - vert_used[i] = True - - # do an edgeloop seek - if len(sbl) == 2: - contextVertLoop= [sbl[0], i, sbl[1]] # start the vert loop - vert_used[contextVertLoop[ 0]] = True - vert_used[contextVertLoop[-1]] = True - else: - contextVertLoop= [i, sbl[0]] - vert_used[contextVertLoop[ 1]] = True - - # Always seek up - ok = True - while ok: - ok = False - closed = False - sbl = vert_siblings[contextVertLoop[-1]] - if len(sbl) == 2: - next = sbl[not sbl.index( contextVertLoop[-2] )] - if vert_used[next]: - closed = True - # break - else: - contextVertLoop.append( next ) # get the vert that isnt the second last - vert_used[next] = True - ok = True - - # Seek down as long as the starting vert was not at the edge. - if not closed and len(vert_siblings[i]) == 2: - - ok = True - while ok: - ok = False - sbl = vert_siblings[contextVertLoop[0]] - if len(sbl) == 2: - next = sbl[not sbl.index( contextVertLoop[1] )] - if vert_used[next]: - closed = True - else: - contextVertLoop.insert(0, next) # get the vert that isnt the second last - vert_used[next] = True - ok = True - - mainVertLoops.append((contextVertLoop, closed)) - - - verts = me.verts - # convert from indicies to verts - # mainVertLoops = [([verts[i] for i in contextVertLoop], closed) for contextVertLoop, closed in mainVertLoops] - # print len(mainVertLoops) - return mainVertLoops - - - -def skin2EdgeLoops(eloop1, eloop2, me, ob, MODE): - - new_faces= [] # - - # Make sure e1 loops is bigger then e2 - if len(eloop1.edges) != len(eloop2.edges): - if len(eloop1.edges) < len(eloop2.edges): - eloop1, eloop2 = eloop2, eloop1 - - eloop1.backup() # were about to cull faces - CULL_FACES = len(eloop1.edges) - len(eloop2.edges) - eloop1.removeSmallest(CULL_FACES, len(eloop1.edges)) - else: - CULL_FACES = 0 - # First make sure poly vert loops are in sync with eachother. - - # The vector allong which we are skinning. - skinVector = eloop1.centre - eloop2.centre - - loopDist = skinVector.length - - # IS THE LOOP FLIPPED, IF SO FLIP BACK. we keep it flipped, its ok, - if eloop1.closed or eloop2.closed: - angleBetweenLoopNormals = AngleBetweenVecs(eloop1.normal, eloop2.normal) - if angleBetweenLoopNormals > 90: - eloop2.reverse() - - - DIR= eloop1.centre - eloop2.centre - - # if eloop2.closed: - bestEloopDist = BIG_NUM - bestOffset = 0 - # Loop rotation offset to test.1 - eLoopIdxs = range(len(eloop1.edges)) - for offset in xrange(len(eloop1.edges)): - totEloopDist = 0 # Measure this total distance for thsi loop. - - offsetIndexLs = eLoopIdxs[offset:] + eLoopIdxs[:offset] # Make offset index list - - - # e1Idx is always from 0uu to N, e2Idx is offset. - for e1Idx, e2Idx in enumerate(offsetIndexLs): - e1= eloop1.edges[e1Idx] - e2= eloop2.edges[e2Idx] - - - # Include fan connections in the measurement. - OK= True - while OK or e1.removed: - OK= False - - # Measure the vloop distance =============== - diff= ((e1.cent - e2.cent).length) #/ nangle1 - - ed_dir= e1.cent-e2.cent - a_diff= AngleBetweenVecs(DIR, ed_dir)/18 # 0 t0 18 - - totEloopDist += (diff * (1+a_diff)) / (1+loopDist) - - # Premeture break if where no better off - if totEloopDist > bestEloopDist: - break - - e1=e1.next - - if totEloopDist < bestEloopDist: - bestOffset = offset - bestEloopDist = totEloopDist - - # Modify V2 LS for Best offset - eloop2.edges = eloop2.edges[bestOffset:] + eloop2.edges[:bestOffset] - - else: - # Both are open loops, easier to calculate. - - - # Make sure the fake edges are at the start. - for i, edloop in enumerate((eloop1, eloop2)): - # print "LOOPO" - if edloop.edges[0].fake: - # alredy at the start - #print "A" - pass - elif edloop.edges[-1].fake: - # put the end at the start - edloop.edges.insert(0, edloop.edges.pop()) - #print "B" - - else: - for j, ed in enumerate(edloop.edges): - if ed.fake: - #print "C" - edloop.edges = edloop.edges = edloop.edges[j:] + edloop.edges[:j] - break - # print "DONE" - ed1, ed2 = eloop1.edges[0], eloop2.edges[0] - - if not ed1.fake or not ed2.fake: - raise "Error" - - # Find the join that isnt flipped (juts like detecting a bow-tie face) - a1 = (ed1.co1 - ed2.co1).length + (ed1.co2 - ed2.co2).length - a2 = (ed1.co1 - ed2.co2).length + (ed1.co2 - ed2.co1).length - - if a1 > a2: - eloop2.reverse() - # make the first edge the start edge still - eloop2.edges.insert(0, eloop2.edges.pop()) - - - - - for loopIdx in xrange(len(eloop2.edges)): - e1 = eloop1.edges[loopIdx] - e2 = eloop2.edges[loopIdx] - - # Remember the pairs for fan filling culled edges. - e1.match = e2; e2.match = e1 - - if not (e1.fake or e2.fake): - new_faces.append([e1.v1, e1.v2, e2.v2, e2.v1]) - - # FAN FILL MISSING FACES. - if CULL_FACES: - # Culled edges will be in eloop1. - FAN_FILLED_FACES = 0 - - contextEdge = eloop1.edges[0] # The larger of teh 2 - while FAN_FILLED_FACES < CULL_FACES: - while contextEdge.next.removed == 0: - contextEdge = contextEdge.next - - vertFanPivot = contextEdge.match.v2 - - while contextEdge.next.removed == 1: - #if not contextEdge.next.fake: - new_faces.append([contextEdge.next.v1, contextEdge.next.v2, vertFanPivot]) - - # Should we use another var?, this will work for now. - contextEdge.next.removed = 1 - - contextEdge = contextEdge.next - FAN_FILLED_FACES += 1 - - # may need to fan fill backwards 1 for non closed loops. - - eloop1.restore() # Add culled back into the list. - - return new_faces - -def main(): - global CULL_METHOD - - is_editmode = Window.EditMode() - if is_editmode: Window.EditMode(0) - ob = bpy.data.scenes.active.objects.active - if ob == None or ob.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - me = ob.getData(mesh=1) - - if me.multires: - BPyMessages.Error_NoMeshMultiresEdit() - return - - time1 = Blender.sys.time() - selEdges = getSelectedEdges(me, ob) - vertLoops = getVertLoops(selEdges, me) # list of lists of edges. - if vertLoops == None: - PupMenu('Error%t|Selection includes verts that are a part of more then 1 loop') - if is_editmode: Window.EditMode(1) - return - # print len(vertLoops) - - - if len(vertLoops) > 2: - choice = PupMenu('Loft '+str(len(vertLoops))+' edge loops%t|loop|segment') - if choice == -1: - if is_editmode: Window.EditMode(1) - return - elif len(vertLoops) < 2: - PupMenu('Error%t|No Vertloops found!') - if is_editmode: Window.EditMode(1) - return - else: - choice = 2 - - - # The line below checks if any of the vert loops are differenyt in length. - if False in [len(v[0]) == len(vertLoops[0][0]) for v in vertLoops]: - CULL_METHOD = PupMenu('Small to large edge loop distrobution method%t|remove edges evenly|remove smallest edges') - if CULL_METHOD == -1: - if is_editmode: Window.EditMode(1) - return - - if CULL_METHOD ==1: # RESET CULL_METHOD - CULL_METHOD = 0 # shortest - else: - CULL_METHOD = 1 # even - - - time1 = Blender.sys.time() - # Convert to special edge data. - edgeLoops = [] - for vloop, closed in vertLoops: - edgeLoops.append(edgeLoop(vloop, me, closed)) - - - # VERT LOOP ORDERING CODE - # "Build a worm" list - grow from Both ends - edgeOrderedList = [edgeLoops.pop()] - - # Find the closest. - bestSoFar = BIG_NUM - bestIdxSoFar = None - for edLoopIdx, edLoop in enumerate(edgeLoops): - l =(edgeOrderedList[-1].centre - edLoop.centre).length - if l < bestSoFar: - bestIdxSoFar = edLoopIdx - bestSoFar = l - - edgeOrderedList.append( edgeLoops.pop(bestIdxSoFar) ) - - # Now we have the 2 closest, append to either end- - # Find the closest. - while edgeLoops: - bestSoFar = BIG_NUM - bestIdxSoFar = None - first_or_last = 0 # Zero is first - for edLoopIdx, edLoop in enumerate(edgeLoops): - l1 =(edgeOrderedList[-1].centre - edLoop.centre).length - - if l1 < bestSoFar: - bestIdxSoFar = edLoopIdx - bestSoFar = l1 - first_or_last = 1 # last - - l2 =(edgeOrderedList[0].centre - edLoop.centre).length - if l2 < bestSoFar: - bestIdxSoFar = edLoopIdx - bestSoFar = l2 - first_or_last = 0 # last - - if first_or_last: # add closest Last - edgeOrderedList.append( edgeLoops.pop(bestIdxSoFar) ) - else: # Add closest First - edgeOrderedList.insert(0, edgeLoops.pop(bestIdxSoFar) ) # First - - faces = [] - - for i in xrange(len(edgeOrderedList)-1): - faces.extend( skin2EdgeLoops(edgeOrderedList[i], edgeOrderedList[i+1], me, ob, 0) ) - if choice == 1 and len(edgeOrderedList) > 2: # Loop - faces.extend( skin2EdgeLoops(edgeOrderedList[0], edgeOrderedList[-1], me, ob, 0) ) - - # REMOVE SELECTED FACES. - MESH_MODE= Blender.Mesh.Mode() - if MESH_MODE & Blender.Mesh.SelectModes.EDGE or MESH_MODE & Blender.Mesh.SelectModes.VERTEX: pass - elif MESH_MODE & Blender.Mesh.SelectModes.FACE: - try: me.faces.delete(1, [ f for f in me.faces if f.sel ]) - except: pass - - me.faces.extend(faces, smooth = True) - - print '\nSkin done in %.4f sec.' % (Blender.sys.time()-time1) - - - if is_editmode: Window.EditMode(1) - -if __name__ == '__main__': - main() diff --git a/release/scripts/mesh_solidify.py b/release/scripts/mesh_solidify.py deleted file mode 100644 index 9e11ed68c63..00000000000 --- a/release/scripts/mesh_solidify.py +++ /dev/null @@ -1,345 +0,0 @@ -#!BPY -""" -Name: 'Solidify Selection' -Blender: 243 -Group: 'Mesh' -Tooltip: 'Makes the mesh solid by creating a second skin.' -""" - -__author__ = "Campbell Barton" -__url__ = ("www.blender.org", "blenderartists.org") -__version__ = "1.1" - -__bpydoc__ = """\ -This script makes a skin from the selected faces. -Optionaly you can skin between the original and new faces to make a watertight solid object -""" - -# -------------------------------------------------------------------------- -# Solidify Selection 1.0 by Campbell Barton (AKA Ideasman42) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -from Blender import * -import bpy -import BPyMesh -# reload(BPyMesh) -import BPyMessages -# reload(BPyMessages) - -from BPyMathutils import angleToLength - -# python 2.3 has no reversed() iterator. this will only work on lists and tuples -try: - reversed -except: - def reversed(l): return l[::-1] - -def copy_facedata_multilayer(me, from_faces, to_faces): - ''' - Tkes 2 lists of faces and copies multilayer data from 1 to another - make sure they are aligned, cant copy from a quad to a tri, used for solidify selection. - ''' - - def copy_default_face(data): - face_from, face_to = data - face_to.mat = face_from.mat - face_to.smooth = face_from.smooth - face_to.sel = True - face_from.sel = False - - def copy_tex_face(data): - face_from, face_to = data - face_to.uv = [c for c in reversed(face_from.uv)] - face_to.mode = face_from.mode - face_to.flag = face_from.flag - face_to.image = face_from.image - - def copy_col_face(data): - face_from, face_to = data - face_to.col = [c for c in reversed(face_from.col)] - - # make a list of face_from, face_to pairs - #face_pairs = zip(faces_sel, [me_faces[len_faces + i] for i in xrange(len(faces_sel))]) - face_pairs = zip(from_faces, to_faces) - - # Copy properties from 1 set of faces to another. - map(copy_default_face, face_pairs) - - for uvlayer in me.getUVLayerNames(): - me.activeUVLayer = uvlayer - map(copy_tex_face, face_pairs) - - for collayer in me.getColorLayerNames(): - me.activeColorLayer = collayer - map(copy_col_face, face_pairs) - - # Now add quads between if we wants - - -Ang= Mathutils.AngleBetweenVecs -SMALL_NUM=0.00001 - -def solidify(me, PREF_THICK, PREF_SKIN_SIDES=True, PREF_REM_ORIG=False, PREF_COLLAPSE_SIDES=False): - - # Main code function - me_faces = me.faces - faces_sel= [f for f in me_faces if f.sel] - - BPyMesh.meshCalcNormals(me) - normals= [v.no for v in me.verts] - vertFaces= [[] for i in xrange(len(me.verts))] - for f in me_faces: - no=f.no - for v in f: - vertFaces[v.index].append(no) - - # Scale the normals by the face angles from the vertex Normals. - for i in xrange(len(me.verts)): - length=0.0 - if vertFaces[i]: - for fno in vertFaces[i]: - try: - a= Ang(fno, normals[i]) - except: - a= 0 - if a>=90: - length+=1 - elif a < SMALL_NUM: - length+= 1 - else: - length+= angleToLength(a) - - length= length/len(vertFaces[i]) - #print 'LENGTH %.6f' % length - # normals[i]= (normals[i] * length) * PREF_THICK - normals[i] *= length * PREF_THICK - - - - len_verts = len( me.verts ) - len_faces = len( me_faces ) - - vert_mapping= [-1] * len(me.verts) - verts= [] - for f in faces_sel: - for v in f: - i= v.index - if vert_mapping[i]==-1: - vert_mapping[i]= len_verts + len(verts) - verts.append(v.co + normals[i]) - - #verts= [v.co + normals[v.index] for v in me.verts] - - me.verts.extend( verts ) - #faces= [tuple([ me.verts[v.index+len_verts] for v in reversed(f.v)]) for f in me_faces ] - faces= [ tuple([vert_mapping[v.index] for v in reversed(f.v)]) for f in faces_sel ] - me_faces.extend( faces ) - - - - - # Old method before multi UVs - """ - has_uv = me.faceUV - has_vcol = me.vertexColors - for i, orig_f in enumerate(faces_sel): - new_f= me_faces[len_faces + i] - new_f.mat = orig_f.mat - new_f.smooth = orig_f.smooth - orig_f.sel=False - new_f.sel= True - new_f = me_faces[i+len_faces] - if has_uv: - new_f.uv = [c for c in reversed(orig_f.uv)] - new_f.mode = orig_f.mode - new_f.flag = orig_f.flag - if orig_f.image: - new_f.image = orig_f.image - if has_vcol: - new_f.col = [c for c in reversed(orig_f.col)] - """ - copy_facedata_multilayer(me, faces_sel, [me_faces[len_faces + i] for i in xrange(len(faces_sel))]) - - if PREF_SKIN_SIDES or PREF_COLLAPSE_SIDES: - skin_side_faces= [] - skin_side_faces_orig= [] - # Get edges of faces that only have 1 user - so we can make walls - edges = {} - - # So we can reference indicies that wrap back to the start. - ROT_TRI_INDEX = 0,1,2,0 - ROT_QUAD_INDEX = 0,1,2,3,0 - - for f in faces_sel: - f_v= f.v - for i, edgekey in enumerate(f.edge_keys): - if edges.has_key(edgekey): - edges[edgekey]= None - else: - if len(f_v) == 3: - edges[edgekey] = f, f_v, i, ROT_TRI_INDEX[i+1] - else: - edges[edgekey] = f, f_v, i, ROT_QUAD_INDEX[i+1] - del ROT_QUAD_INDEX, ROT_TRI_INDEX - - # So we can remove doubles with edges only. - if PREF_COLLAPSE_SIDES: - me.sel = False - - # Edges are done. extrude the single user edges. - for edge_face_data in edges.itervalues(): - if edge_face_data: # != None - f, f_v, i1, i2 = edge_face_data - v1i,v2i= f_v[i1].index, f_v[i2].index - - if PREF_COLLAPSE_SIDES: - # Collapse - cv1 = me.verts[v1i] - cv2 = me.verts[vert_mapping[v1i]] - - cv3 = me.verts[v2i] - cv4 = me.verts[vert_mapping[v2i]] - - cv1.co = cv2.co = (cv1.co+cv2.co)/2 - cv3.co = cv4.co = (cv3.co+cv4.co)/2 - - cv1.sel=cv2.sel=cv3.sel=cv4.sel=True - - - - else: - # Now make a new Face - # skin_side_faces.append( (v1i, v2i, vert_mapping[v2i], vert_mapping[v1i]) ) - skin_side_faces.append( (v2i, v1i, vert_mapping[v1i], vert_mapping[v2i]) ) - skin_side_faces_orig.append((f, len(me_faces) + len(skin_side_faces_orig), i1, i2)) - - if PREF_COLLAPSE_SIDES: - me.remDoubles(0.0001) - else: - me_faces.extend(skin_side_faces) - # Now assign properties. - """ - # Before MultiUVs - for i, origfData in enumerate(skin_side_faces_orig): - orig_f, new_f_idx, i1, i2 = origfData - new_f= me_faces[new_f_idx] - - new_f.mat= orig_f.mat - new_f.smooth= orig_f.smooth - if has_uv: - new_f.mode= orig_f.mode - new_f.flag= orig_f.flag - if orig_f.image: - new_f.image= orig_f.image - - uv1= orig_f.uv[i1] - uv2= orig_f.uv[i2] - new_f.uv= (uv1, uv2, uv2, uv1) - - if has_vcol: - col1= orig_f.col[i1] - col2= orig_f.col[i2] - new_f.col= (col1, col2, col2, col1) - """ - - for i, origfData in enumerate(skin_side_faces_orig): - orig_f, new_f_idx, i2, i1 = origfData - new_f= me_faces[new_f_idx] - - new_f.mat= orig_f.mat - new_f.smooth= orig_f.smooth - - for uvlayer in me.getUVLayerNames(): - me.activeUVLayer = uvlayer - for i, origfData in enumerate(skin_side_faces_orig): - orig_f, new_f_idx, i2, i1 = origfData - new_f= me_faces[new_f_idx] - - new_f.mode= orig_f.mode - new_f.flag= orig_f.flag - new_f.image= orig_f.image - - uv1= orig_f.uv[i1] - uv2= orig_f.uv[i2] - new_f.uv= (uv1, uv2, uv2, uv1) - - for collayer in me.getColorLayerNames(): - me.activeColorLayer = collayer - for i, origfData in enumerate(skin_side_faces_orig): - orig_f, new_f_idx, i2, i1 = origfData - new_f= me_faces[new_f_idx] - - col1= orig_f.col[i1] - col2= orig_f.col[i2] - new_f.col= (col1, col2, col2, col1) - - - if PREF_REM_ORIG: - me_faces.delete(0, faces_sel) - - - - -def main(): - scn = bpy.data.scenes.active - ob = scn.objects.active - - if not ob or ob.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - me = ob.getData(mesh=1) - if me.multires: - BPyMessages.Error_NoMeshMultiresEdit() - return - - # Create the variables. - PREF_THICK = Draw.Create(-0.1) - PREF_SKIN_SIDES= Draw.Create(1) - PREF_COLLAPSE_SIDES= Draw.Create(0) - PREF_REM_ORIG= Draw.Create(0) - - pup_block = [\ - ('Thick:', PREF_THICK, -10, 10, 'Skin thickness in mesh space.'),\ - ('Skin Sides', PREF_SKIN_SIDES, 'Skin between the original and new faces.'),\ - ('Collapse Sides', PREF_COLLAPSE_SIDES, 'Skin between the original and new faces.'),\ - ('Remove Original', PREF_REM_ORIG, 'Remove the selected faces after skinning.'),\ - ] - - if not Draw.PupBlock('Solid Skin Selection', pup_block): - return - - is_editmode = Window.EditMode() - if is_editmode: Window.EditMode(0) - - Window.WaitCursor(1) - - me = ob.getData(mesh=1) - solidify(me, PREF_THICK.val, PREF_SKIN_SIDES.val, PREF_REM_ORIG.val, PREF_COLLAPSE_SIDES.val) - - - Window.WaitCursor(0) - if is_editmode: Window.EditMode(1) - - Window.RedrawAll() - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/release/scripts/mesh_unfolder.py b/release/scripts/mesh_unfolder.py deleted file mode 100644 index f5c19a92bd0..00000000000 --- a/release/scripts/mesh_unfolder.py +++ /dev/null @@ -1,1582 +0,0 @@ -#!BPY -""" -Name: 'Unfold' -Blender: 245 -Group: 'Mesh' -Tip: 'Unfold meshes to create nets' -Version: v2.5 -Author: Matthew Chadwick -""" -import Blender -from Blender import * -from Blender.Mathutils import * -try: - import sys - import traceback - import math - import re - from math import * - import sys - import random - import xml.sax, xml.sax.handler, xml.sax.saxutils - - # annoying but need so classes dont raise errors - xml_sax_handler_ContentHandler = xml.sax.handler.ContentHandler - -except: - Draw.PupMenu('Error%t|A full python installation is required to run this script.') - xml = None - xml_sax_handler_ContentHandler = type(0) - -__author__ = 'Matthew Chadwick' -__version__ = '2.5 06102007' -__url__ = ["http://celeriac.net/unfolder/", "blender", "blenderartist"] -__email__ = ["post at cele[remove this text]riac.net", "scripts"] -__bpydoc__ = """\ - -Mesh Unfolder - -Unfolds the selected mesh onto a plane to form a net - -Not all meshes can be unfolded - -Meshes must be free of holes, -isolated edges (not part of a face), twisted quads and other rubbish. -Nice clean triangulated meshes unfold best - -This program is free software; you can distribute it and/or modify it under the terms -of the GNU General Public License as published by the Free Software Foundation; version 2 -or later, currently at http://www.gnu.org/copyleft/gpl.html - -The idea came while I was riding a bike. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - -# Face lookup -class FacesAndEdges: - def __init__(self, mesh): - self.nfaces = 0 - # straight from the documentation - self.edgeFaces = dict([(edge.key, []) for edge in mesh.edges]) - for face in mesh.faces: - face.sel = False - for key in face.edge_keys: - self.edgeFaces[key].append(face) - def findTakenAdjacentFace(self, bface, edge): - return self.findAdjacentFace(bface, edge) - # find the first untaken (non-selected) adjacent face in the list of adjacent faces for the given edge (allows for manifold meshes too) - def findAdjacentFace(self, bface, edge): - faces = self.edgeFaces[edge.key()] - for i in xrange(len(faces)): - if faces[i] == bface: - j = (i+1) % len(faces) - while(faces[j]!=bface): - if faces[j].sel == False: - return faces[j] - j = (j+1) % len(faces) - return None - def returnFace(self, face): - face.sel = False - self.nfaces-=1 - def facesTaken(self): - return self.nfaces - def takeAdjacentFace(self, bface, edge): - if (edge==None): - return None - face = self.findAdjacentFace(bface, edge) - if(face!=None): - face.sel = True - self.nfaces+=1 - return face - def takeFace(self, bface): - if(bface!=None): - bface.sel= True - self.nfaces+=1 - - -# A fold between two faces with a common edge -class Fold: - ids = -1 - def __init__(self, parent, refPoly, poly, edge, angle=None): - Fold.ids+=1 - self.id = Fold.ids - self.refPoly = refPoly - self.poly = poly - self.srcFace = None - self.desFace = None - self.edge = edge - self.foldedEdge = edge - self.rm = None - self.parent = parent - self.tree = None - if(refPoly!=None): - self.refPolyNormal = refPoly.normal() - self.polyNormal = poly.normal() - if(angle==None): - self.angle = self.calculateAngle() - self.foldingPoly = poly.rotated(edge, self.angle) - else: - self.angle = angle - self.foldingPoly = poly - self.unfoldedEdge = self.edge - self.unfoldedNormal = None - self.animAngle = self.angle - self.cr = None - self.nancestors = None - def reset(self): - self.foldingPoly = self.poly.rotated(self.edge, self.dihedralAngle()) - def getID(self): - return self.id - def getParent(self): - return self.parent - def ancestors(self): - if(self.nancestors==None): - self.nancestors = self.computeAncestors() - return self.nancestors - def computeAncestors(self): - if(self.parent==None): - return 0 - else: - return self.parent.ancestors()+1 - def dihedralAngle(self): - return self.angle - def unfoldTo(self, f): - self.animAngle = self.angle*f - self.foldingPoly = self.poly.rotated(self.edge, self.animAngle) - def calculateAngle(self): - sangle = Mathutils.AngleBetweenVecs(self.refPolyNormal, self.polyNormal) - if(sangle!=sangle): - sangle=0.0 - ncp = self.refPolyNormal.cross(self.polyNormal) - dp = ncp.dot(self.edge.vector) - if(dp>0.0): - return +sangle - else: - return -sangle - def alignWithParent(self): - pass - def unfoldedNormal(self): - return self.unfoldedNormal - def getEdge(self): - return self.edge - def getFace(self): - return self.poly - def testFace(self): - return Poly.fromVectors([self.edge.v1, self.edge.v2, Vector([0,0,0])]) - def unfoldedFace(self): - return self.foldingPoly - def unfold(self): - if(self.parent!=None): - self.parent.foldFace(self) - def foldFace(self, child): - child.foldingPoly.rotate(self.edge, self.animAngle) - if(self.parent!=None): - self.parent.foldFace(child) - -class Cut(Fold): - pass - -# Trees build folds by traversing the mesh according to a local measure -class Tree: - def __init__(self, net, parent,fold,otherConstructor=None): - self.net = net - self.fold = fold - self.face = fold.srcFace - self.poly = Poly.fromBlenderFace(self.face) - self.generations = net.generations - self.growing = True - self.tooLong = False - self.parent = parent - self.grown = False - if not(otherConstructor): - self.edges = net.edgeIteratorClass(self) - def goodness(self): - return self.edges.goodness() - def compare(self, other): - if(self.goodness() > other.goodness()): - return +1 - else: - return -1 - def isGrowing(self): - return self.growing - def beGrowing(self): - self.growing = True - def grow(self): - self.tooLong = self.fold.ancestors()>self.generations - if(self.edges.hasNext() and self.growing): - edge = self.edges.next() - tface = self.net.facesAndEdges.takeAdjacentFace(self.face, edge) - if(tface!=None): - self.branch(tface, edge) - if(self.parent==None): - self.grow() - else: - self.grown = True - def isGrown(self): - return self.grown - def canGrow(self): - return (self.parent!=None and self.parent.grown) - def getNet(self): - return self.net - def getFold(self): - return self.fold - def getFace(self): - return self.face - def branch(self, tface, edge): - fold = Fold(self.fold, self.poly, Poly.fromBlenderFace(tface), edge) - fold.srcFace = tface - self.net.myFacesVisited+=1 - tree = Tree(self.net, self, fold) - fold.tree = tree - fold.unfold() - overlaps = self.net.checkOverlaps(fold) - nc = len(overlaps) - self.net.overlaps+=nc - if(nc>0 and self.net.avoidsOverlaps): - self.handleOverlap(fold, overlaps) - else: - self.addFace(fold) - def handleOverlap(self, fold, overlaps): - self.net.facesAndEdges.returnFace(fold.srcFace) - self.net.myFacesVisited-=1 - for cfold in overlaps: - ttree = cfold.tree - ttree.growing = True - ttree.grow() - def addFace(self, fold): - ff = fold.unfoldedFace() - fold.desFace = self.net.addFace(ff, fold.srcFace) - self.net.folds.append(fold) - self.net.addBranch(fold.tree) - fold.tree.growing = not(self.tooLong) - if(self.net.diffuse==False): - fold.tree.grow() - -# A Net is the result of the traversal of the mesh by Trees -class Net: - def __init__(self, src, des): - self.src = src - self.des = des - self.firstFace = None - self.firstPoly = None - self.refFold = None - self.edgeIteratorClass = RandomEdgeIterator - if(src!=None): - self.srcFaces = src.faces - self.facesAndEdges = FacesAndEdges(self.src) - self.myFacesVisited = 0 - self.facesAdded = 0 - self.folds = [] - self.cuts = [] - self.branches = [] - self.overlaps = 0 - self.avoidsOverlaps = True - self.frame = 1 - self.ff = 180.0 - self.firstFaceIndex = None - self.trees = 0 - self.foldIPO = None - self.perFoldIPO = None - self.IPOCurves = {} - self.generations = 128 - self.diffuse = True - self.noise = 0.0 - self.grownBranches = 0 - self.assignsUV = True - self.animates = False - self.showProgress = False - self.feedback = None - def setSelectedFaces(self, faces): - self.srcFaces = faces - self.facesAndEdges = FacesAndEdges(self.srcFaces) - def setShowProgress(self, show): - self.showProgress = show - # this method really needs work - def unfold(self): - selectedFaces = [face for face in self.src.faces if (self.src.faceUV and face.sel)] - if(self.avoidsOverlaps): - print "unfolding with overlap detection" - if(self.firstFaceIndex==None): - self.firstFaceIndex = random.randint(0, len(self.src.faces)-1) - else: - print "Using user-selected seed face ", self.firstFaceIndex - self.firstFace = self.src.faces[self.firstFaceIndex] - z = min([v.co.z for v in self.src.verts])-0.1 - ff = Poly.fromBlenderFace(self.firstFace) - if(len(ff.v)<3): - raise Exception("This mesh contains an isolated edge - it must consist only of faces") - testFace = Poly.fromVectors( [ Vector([0.0,0.0,0.0]), Vector([0.0,1.0,0.0]), Vector([1.0,1.0,0.0]) ] ) - # hmmm. I honestly can't remember why this needs to be done, but it does. - u=0 - v=1 - w=2 - if ff.v[u].x==ff.v[u+1].x and ff.v[u].y==ff.v[u+1].y: - u=1 - v=2 - w=0 - # here we make a couple of folds, not part of the net, which serve to get the net into the xy plane - xyFace = Poly.fromList( [ [ff.v[u].x,ff.v[u].y, z] , [ff.v[v].x,ff.v[v].y, z] , [ff.v[w].x+0.1,ff.v[w].y+0.1, z] ] ) - refFace = Poly.fromVectors([ ff.v[u], ff.v[v], xyFace.v[1], xyFace.v[0] ] ) - xyFold = Fold(None, xyFace, refFace, Edge(xyFace.v[0], xyFace.v[1] )) - self.refFold = Fold(xyFold, refFace, ff, Edge(refFace.v[0], refFace.v[1] )) - self.refFold.srcFace = self.firstFace - # prepare to grow the trees - trunk = Tree(self, None, self.refFold) - trunk.generations = self.generations - self.firstPoly = ff - self.facesAndEdges.takeFace(self.firstFace) - self.myFacesVisited+=1 - self.refFold.unfold() - self.refFold.tree = trunk - self.refFold.desFace = self.addFace(self.refFold.unfoldedFace(), self.refFold.srcFace) - self.folds.append(self.refFold) - trunk.grow() - i = 0 - # keep the trees growing while they can - while(self.myFacesVisited 0): - if self.edgeIteratorClass==RandomEdgeIterator: - i = random.randint(0,len(self.branches)-1) - tree = self.branches[i] - if(tree.isGrown()): - self.branches.pop(i) - else: - tree.beGrowing() - if(tree.canGrow()): - tree.grow() - i = 0 - else: - i = (i + 1) % len(self.branches) - if self.src.faceUV: - for face in self.src.faces: - face.sel = False - for face in selectedFaces: - face.sel = True - self.src.update() - Window.RedrawAll() - def assignUVs(self): - for fold in self.folds: - self.assignUV(fold.srcFace, fold.unfoldedFace()) - print " assigned uv to ", len(self.folds), len(self.src.faces) - self.src.update() - def checkOverlaps(self, fold): - #return self.getOverlapsBetween(fold, self.folds) - return self.getOverlapsBetweenGL(fold, self.folds) - def getOverlapsBetween(self, fold, folds): - if(fold.parent==None): - return [] - mf = fold.unfoldedFace() - c = [] - for afold in folds: - mdf = afold.unfoldedFace() - if(afold!=fold): - # currently need to get agreement from both polys because - # a touch by a vertex of one the other's edge is acceptable & - # they disagree on that - intersects = mf.intersects2D(mdf) and mdf.intersects2D(mf) - inside = ( mdf.containsAnyOf(mf) or mf.containsAnyOf(mdf) ) - if( intersects or inside or mdf.overlays(mf)): - c.append(afold) - return c - def getOverlapsBetweenGL(self, fold, folds): - b = fold.unfoldedFace().bounds() - polys = len(folds)*4+16 # the buffer is nhits, mindepth, maxdepth, name - buffer = BGL.Buffer(BGL.GL_INT, polys) - BGL.glSelectBuffer(polys, buffer) - BGL.glRenderMode(BGL.GL_SELECT) - BGL.glInitNames() - BGL.glPushName(0) - BGL.glPushMatrix() - BGL.glMatrixMode(BGL.GL_PROJECTION) - BGL.glLoadIdentity() - BGL.glOrtho(b[0].x, b[1].x, b[1].y, b[0].y, 0.0, 10.0) - #clip = BGL.Buffer(BGL.GL_FLOAT, 4) - #clip.list = [0,0,0,0] - #BGL.glClipPlane(BGL.GL_CLIP_PLANE1, clip) - # could use clipping planes here too - BGL.glMatrixMode(BGL.GL_MODELVIEW) - BGL.glLoadIdentity() - bx = (b[1].x - b[0].x) - by = (b[1].y - b[0].y) - cx = bx / 2.0 - cy = by / 2.0 - for f in xrange(len(folds)): - afold = folds[f] - if(fold!=afold): - BGL.glLoadName(f) - BGL.glBegin(BGL.GL_LINE_LOOP) - for v in afold.unfoldedFace().v: - BGL.glVertex2f(v.x, v.y) - BGL.glEnd() - BGL.glPopMatrix() - BGL.glFlush() - hits = BGL.glRenderMode(BGL.GL_RENDER) - buffer = [buffer[i] for i in xrange(3, 4*hits, 4)] - o = [folds[buffer[i]] for i in xrange(len(buffer))] - return self.getOverlapsBetween(fold, o) - def colourFace(self, face, cr): - for c in face.col: - c.r = int(cr[0]) - c.g = int(cr[1]) - c.b = int(cr[2]) - c.a = int(cr[3]) - self.src.update() - def setAvoidsOverlaps(self, avoids): - self.avoidsOverlaps = avoids - def addBranch(self, branch): - self.branches.append(branch) - if self.edgeIteratorClass!=RandomEdgeIterator: - self.branches.sort(lambda b1, b2: b1.compare(b2)) - def srcSize(self): - return len(self.src.faces) - def nBranches(self): - return len(self.branches) - def facesCreated(self): - return len(self.des.faces) - def facesVisited(self): - return self.myFacesVisited - def getOverlaps(self): - return self.overlaps - def sortOutIPOSource(self): - print "Sorting out IPO" - if self.foldIPO!=None: - return - o = None - try: - o = Blender.Object.Get("FoldRate") - except: - o = Blender.Object.New("Empty", "FoldRate") - Blender.Scene.GetCurrent().objects.link(o) - if(o.getIpo()==None): - ipo = Blender.Ipo.New("Object", "FoldRateIPO") - z = ipo.addCurve("RotZ") - print " added RotZ IPO curve" - z.addBezier((1,0)) - # again, why is this 10x out ? - z.addBezier((180, self.ff/10.0)) - z.addBezier((361, 0.0)) - o.setIpo(ipo) - z.recalc() - z.setInterpolation("Bezier") - z.setExtrapolation("Cyclic") - self.setIPOSource(o) - print " added IPO source" - def setIPOSource(self, object): - try: - self.foldIPO = object - for i in xrange(self.foldIPO.getIpo().getNcurves()): - self.IPOCurves[self.foldIPO.getIpo().getCurves()[i].getName()] = i - print " added ", self.foldIPO.getIpo().getCurves()[i].getName() - except: - print "Problem setting IPO object" - print sys.exc_info()[1] - traceback.print_exc(file=sys.stdout) - def setFoldFactor(self, ff): - self.ff = ff - def sayTree(self): - for fold in self.folds: - if(fold.getParent()!=None): - print fold.getID(), fold.dihedralAngle(), fold.getParent().getID() - def report(self): - p = int(float(self.myFacesVisited)/float(len(self.src.faces)) * 100) - print str(p) + "% unfolded" - print "faces created:", self.facesCreated() - print "faces visited:", self.facesVisited() - print "originalfaces:", len(self.src.faces) - n=0 - if(self.avoidsOverlaps): - print "net avoided at least ", self.getOverlaps(), " overlaps ", - n = len(self.src.faces) - self.facesCreated() - if(n>0): - print "but was unable to avoid ", n, " overlaps. Incomplete net." - else: - print "- A complete net." - else: - print "net has at least ", self.getOverlaps(), " collision(s)" - return n - # fold all my folds to a fraction of their total fold angle - def unfoldToCurrentFrame(self): - self.unfoldTo(Blender.Scene.GetCurrent().getRenderingContext().currentFrame()) - def unfoldTo(self, frame): - frames = Blender.Scene.GetCurrent().getRenderingContext().endFrame() - if(self.foldIPO!=None and self.foldIPO.getIpo()!=None): - f = self.foldIPO.getIpo().EvaluateCurveOn(self.IPOCurves["RotZ"],frame) - # err, this number seems to be 10x less than it ought to be - fff = 1.0 - (f*10.0 / self.ff) - else: - fff = 1.0-((frame)/(frames*1.0)) - for fold in self.folds: - fold.unfoldTo(fff) - for fold in self.folds: - fold.unfold() - tface = fold.unfoldedFace() - bface = fold.desFace - i = 0 - for v in bface.verts: - v.co.x = tface.v[i].x - v.co.y = tface.v[i].y - v.co.z = tface.v[i].z - i+=1 - Window.Redraw(Window.Types.VIEW3D) - return None - def addFace(self, poly, originalFace=None): - originalLength = len(self.des.verts) - self.des.verts.extend([Vector(vv.x, vv.y, vv.z) for vv in poly.v]) - self.des.faces.extend([ range(originalLength, originalLength + poly.size()) ]) - newFace = self.des.faces[len(self.des.faces)-1] - newFace.uv = [vv for vv in poly.v] - if(originalFace!=None and self.src.vertexColors): - newFace.col = [c for c in originalFace.col] - if(self.feedback!=None): - pu = str(int(self.fractionUnfolded() * 100))+"% unfolded" - howMuchDone = str(self.myFacesVisited)+" of "+str(len(self.src.faces))+" "+pu - self.feedback.say(howMuchDone) - #Window.DrawProgressBar (p, pu) - if(self.showProgress): - Window.Redraw(Window.Types.VIEW3D) - return newFace - def fractionUnfolded(self): - return float(self.myFacesVisited)/float(len(self.src.faces)) - def assignUV(self, face, uv): - face.uv = [Vector(v.x, v.y) for v in uv.v] - def unfoldAll(feedback=None): - objects = Blender.Object.Get() - for object in objects: - if(object.getType()=='Mesh' and not(object.getName().endswith("_net")) and len(object.getData(False, True).faces)>1): - net = Net.createNet(object, feedback) - net.searchForUnfolding() - svg = SVGExporter(net, object.getName()+".svg") - svg.export() - unfoldAll = staticmethod(unfoldAll) - def searchForUnfolding(self, limit=-1): - overlaps = 1 - attempts = 0 - while(overlaps > 0 or attempts=0 and (mesh.faces[mesh.activeFace].sel): - net.firstFaceIndex = mesh.activeFace - net.object = ob - net.feedback = feedback - return net - createNet = staticmethod(createNet) - def importNet(filename): - netName = filename.rstrip(".svg").replace("\\","/") - netName = netName[netName.rfind("/")+1:] - try: - netObject = Blender.Object.Get(netName) - except: - netObject = Blender.Object.New("Mesh", netName) - netObject.getData(mesh=1).name = netName - try: - Blender.Scene.GetCurrent().objects.link(netObject) - except: - pass - net = Net(None, netObject.getData(mesh=1)) - handler = NetHandler(net) - xml.sax.parse(filename, handler) - Window.Redraw(Window.Types.VIEW3D) - return net - importNet = staticmethod(importNet) - def getSourceMesh(self): - return self.src - -# determines the order in which to visit faces according to a local measure -class EdgeIterator: - def __init__(self, branch, otherConstructor=None): - self.branch = branch - self.bface = branch.getFace() - self.edge = branch.getFold().getEdge() - self.net = branch.getNet() - self.n = len(self.bface) - self.edges = [] - self.i = 0 - self.gooodness = 0 - self.createEdges() - self.computeGoodness() - if(otherConstructor==None): - self.sequenceEdges() - def createEdges(self): - edge = None - e = Edge.edgesOfBlenderFace(self.net.getSourceMesh(), self.bface) - for edge in e: - if not(edge.isBlenderSeam() and edge!=self.edge): - self.edges.append(edge) - def sequenceEdges(self): - pass - def next(self): - edge = self.edges[self.i] - self.i+=1 - return edge - def size(self): - return len(self.edges) - def reset(self): - self.i = 0 - def hasNext(self): - return (self.ilen(bface)-1): - return None - if(i==len(bface)-1): - j = 0 - else: - j = i+1 - edge = Edge( bface.v[i].co.copy(), bface.v[j].co.copy() ) - edge.bEdge = mesh.findEdge(bface.v[i], bface.v[j]) - edge.idx = i - return edge - fromBlenderFace=staticmethod(fromBlenderFace) - def edgesOfBlenderFace(mesh, bmFace): - edges = [mesh.edges[mesh.findEdges(edge[0], edge[1])] for edge in bmFace.edge_keys] - v = bmFace.verts - e = [] - vi = v[0] - i=0 - for j in xrange(1, len(bmFace)+1): - vj = v[j%len(bmFace)] - for ee in edges: - if((ee.v1.index==vi.index and ee.v2.index==vj.index) or (ee.v2.index==vi.index and ee.v1.index==vj.index)): - e.append(Edge(vi.co, vj.co, ee, i)) - i+=1 - vi = vj - return e - edgesOfBlenderFace=staticmethod(edgesOfBlenderFace) - def isBlenderSeam(self): - return (self.bmEdge.flag & Mesh.EdgeFlags.SEAM) - def isInFGon(self): - return (self.bmEdge.flag & Mesh.EdgeFlags.FGON) - def mapTo(self, poly): - if(self.idx==len(poly.v)-1): - j = 0 - else: - j = self.idx+1 - return Edge(poly.v[self.idx], poly.v[j]) - def isDegenerate(self): - return self.vector.length==0 - def vertices(s): - return [ [s.v1.x, s.v1.y, s.v1.z], [s.v2.x, s.v2.y,s.v2.z] ] - def key(self): - return self.bmEdge.key - def goodness(self): - return self.gooodness - def setGoodness(self, g): - self.gooodness = g - def compare(self, other): - if(self.goodness() > other.goodness()): - return +1 - else: - return -1 - # Does the given segment intersect this, for overlap detection. - # endpoints are allowed to touch the line segment - def intersects2D(self, s): - if(self.matches(s)): - return False - else: - i = Geometry.LineIntersect2D(self.v1, self.v2, s.v1, s.v2) - if(i!=None): - i.resize4D() - i.z = self.v1.z # hack to put the point on the same plane as this edge for comparison - return(i!=None and not(self.endsWith(i))) - def matches(self, s): - return ( (self.v1==s.v1 and self.v2==s.v2) or (self.v2==s.v1 and self.v1==s.v2) ) - # Is the given point on the end of this segment ? 10-5 seems to an acceptable limit for closeness in Blender - def endsWith(self, aPoint, e=0.0001): - return ( (self.v1-aPoint).length < e or (self.v2-aPoint).length < e ) - - -class Poly: - ids = -1 - def __init__(self): - Poly.ids+=1 - self.v = [] - self.id = Poly.ids - self.boundz = None - self.edges = None - def getID(self): - return self.id - def normal(self): - a =self.v[0] - b=self.v[1] - c=self.v[2] - p = b-a - p.resize3D() - q = a-c - q.resize3D() - return p.cross(q) - def makeEdges(self): - self.edges = [] - for i in xrange(self.nPoints()): - self.edges.append(Edge( self.v[i % self.nPoints()], self.v[ (i+1) % self.nPoints()] )) - def edgeAt(self, i): - if(self.edges==None): - self.makeEdges() - return self.edges[i] - def intersects2D(self, poly): - for i in xrange(self.nPoints()): - edge = self.edgeAt(i) - for j in xrange(poly.nPoints()): - if edge.intersects2D(poly.edgeAt(j)): - return True - return False - def isBad(self): - badness = 0 - for vv in self.v: - if(vv.x!=vv.x or vv.y!=vv.y or vv.z!=vv.z): # Nan check - badness+=1 - return (badness>0) - def midpoint(self): - x=y=z = 0.0 - n = 0 - for vv in self.v: - x+=vv.x - y+=vv.y - z+=vv.z - n+=1 - return [ x/n, y/n, z/n ] - def centerAtOrigin(self): - mp = self.midpoint() - mp = -mp - toOrigin = TranslationMatrix(mp) - self.v = [(vv * toOrigin) for vv in self.v] - def move(self, tv): - mv = TranslationMatrix(tv) - self.v = [(vv * mv) for vv in self.v] - def scale(self, s): - mp = Vector(self.midpoint()) - fromOrigin = TranslationMatrix(mp) - mp = -mp - toOrigin = TranslationMatrix(mp) - sm = ScaleMatrix(s, 4) - # Todo, the 3 lines below in 1 LC - self.v = [(vv * toOrigin) for vv in self.v] - self.v = [(sm * vv) for vv in self.v] - self.v = [(vv * fromOrigin) for vv in self.v] - def nPoints(self): - return len(self.v) - def size(self): - return len(self.v) - def rotated(self, axis, angle): - p = self.clone() - p.rotate(axis, angle) - return p - def rotate(self, axis, angle): - rotation = RotationMatrix(angle, 4, "r", axis.vector) - toOrigin = TranslationMatrix(axis.v1n) - fromOrigin = TranslationMatrix(axis.v1) - # Todo, the 3 lines below in 1 LC - self.v = [(vv * toOrigin) for vv in self.v] - self.v = [(rotation * vv) for vv in self.v] - self.v = [(vv * fromOrigin) for vv in self.v] - def moveAlong(self, vector, distance): - t = TranslationMatrix(vector) - s = ScaleMatrix(distance, 4) - ts = t*s - self.v = [(vv * ts) for vv in self.v] - def bounds(self): - if(self.boundz == None): - vv = [vv for vv in self.v] - vv.sort(key=lambda v: v.x) - minx = vv[0].x - maxx = vv[len(vv)-1].x - vv.sort(key=lambda v: v.y) - miny = vv[0].y - maxy = vv[len(vv)-1].y - self.boundz = [Vector(minx, miny, 0), Vector(maxx, maxy, 0)] - return self.boundz - def fromBlenderFace(bface): - p = Poly() - for vv in bface.v: - vec = Vector([vv.co[0], vv.co[1], vv.co[2] , 1.0]) - p.v.append(vec) - return p - fromBlenderFace = staticmethod(fromBlenderFace) - def fromList(list): - p = Poly() - for vv in list: - vec = Vector( [vvv for vvv in vv] ) - vec.resize4D() - p.v.append(vec) - return p - fromList = staticmethod(fromList) - def fromVectors(vectors): - p = Poly() - p.v.extend([v.copy().resize4D() for v in vectors]) - return p - fromVectors = staticmethod(fromVectors) - def clone(self): - p = Poly() - p.v.extend(self.v) - return p - def hasVertex(self, ttv): - v = Mathutils.Vector(ttv) - v.normalize() - for tv in self.v: - vv = Mathutils.Vector(tv) - vv.normalize() - t = 0.00001 - if abs(vv.x-v.x)0): j=i-1 - cv = self.v[i] - nv = self.v[j] - if ((((cv.y<=tp.y) and (tp.y") - self.e.endElement("style") - self.e.endElement("defs") - #self.addClipPath() - self.addMeta() - def addMeta(self): - self.e.startElement("metadata", xml.sax.xmlreader.AttributesImpl({})) - self.e.startElement("nets:net", xml.sax.xmlreader.AttributesImpl({})) - for i in xrange(1, len(self.net.folds)): - fold = self.net.folds[i] - # AttributesNSImpl - documentation is rubbish. using this hack. - atts = {} - atts["nets:id"] = "fold"+str(fold.getID()) - if(fold.parent!=None): - atts["nets:parent"] = "fold"+str(fold.parent.getID()) - else: - atts["nets:parent"] = "null" - atts["nets:da"] = str(fold.dihedralAngle()) - if(fold.parent!=None): - atts["nets:ofPoly"] = "poly"+str(fold.parent.foldingPoly.getID()) - else: - atts["nets:ofPoly"] = "" - atts["nets:toPoly"] = "poly"+str(fold.foldingPoly.getID()) - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("nets:fold", a) - self.e.endElement("nets:fold") - self.e.endElement("nets:net") - self.e.endElement("metadata") - def end(self): - self.e.endElement("svg") - self.e.endDocument() - print "grown." - def export(self): - self.net.unfoldTo(1) - bb = self.object.getBoundBox() - print bb - self.vxmin = bb[0][0] - self.vymin = bb[0][1] - self.vxmax = bb[7][0] - self.vymax = bb[7][1] - self.start() - atts = {} - atts["id"] = self.object.getName() - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("g", a) - #self.addUVImage() - self.addPolys() - self.addFoldLines() - #self.addCutLines() - self.e.endElement("g") - self.end() - def addClipPath(self): - atts = {} - atts["id"] = "netClip" - atts["clipPathUnits"] = "userSpaceOnUse" - atts["x"] = str(self.vxmin) - atts["y"] = str(self.vymin) - atts["width"] = "100%" - atts["height"] = "100%" - self.e.startElement("clipPath", atts) - self.addPolys() - self.e.endElement("clipPath") - def addUVImage(self): - image = Blender.Image.GetCurrent() #hmm - how to determine the desired image ? - if image==None: - return - ifn = image.getFilename() - ifn = self.filename.replace(".svg", ".jpg") - image.setFilename(ifn) - ifn = ifn[ifn.rfind("/")+1:] - image.save() - atts = {} - atts["clip-path"] = "url(#netClip)" - atts["xlink:href"] = ifn - self.e.startElement("image", atts) - self.e.endElement("image") - def addPolys(self): - atts = {} - atts["id"] = "polys" - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("g", a) - for i in xrange(len(self.net.folds)): - self.addPoly(self.net.folds[i]) - self.e.endElement("g") - def addFoldLines(self): - atts = {} - atts["id"] = "foldLines" - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("g", a) - for i in xrange( 1, len(self.net.folds)): - self.addFoldLine(self.net.folds[i]) - self.e.endElement("g") - def addFoldLine(self, fold): - edge = fold.edge.mapTo(fold.parent.foldingPoly) - if fold.dihedralAngle()>0: - foldType="valley" - else: - foldType="mountain" - atts={} - atts["x1"] = str(edge.v1.x) - atts["y1"] = str(edge.v1.y) - atts["x2"] = str(edge.v2.x) - atts["y2"] = str(edge.v2.y) - atts["id"] = "fold"+str(fold.getID()) - atts["class"] = foldType - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("line", a) - self.e.endElement("line") - def addCutLines(self): - atts = {} - atts["id"] = "cutLines" - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("g", a) - for i in xrange( 1, len(self.net.cuts)): - self.addCutLine(self.net.cuts[i]) - self.e.endElement("g") - def addCutLine(self, cut): - edge = cut.edge.mapTo(cut.parent.foldingPoly) - if cut.dihedralAngle()>0: - foldType="valley" - else: - foldType="mountain" - atts={} - atts["x1"] = str(edge.v1.x) - atts["y1"] = str(edge.v1.y) - atts["x2"] = str(edge.v2.x) - atts["y2"] = str(edge.v2.y) - atts["id"] = "cut"+str(cut.getID()) - atts["class"] = foldType - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("line", a) - self.e.endElement("line") - def addPoly(self, fold): - face = fold.foldingPoly - atts = {} - if fold.desFace.col: - col = fold.desFace.col[0] - rgb = "rgb("+str(col.r)+","+str(col.g)+","+str(col.b)+")" - atts["fill"] = rgb - atts["class"] = "poly" - atts["id"] = "poly"+str(face.getID()) - points = "" - first = True - for vv in face.v: - if(not(first)): - points+=',' - first = False - points+=str(vv[0]) - points+=' ' - points+=str(vv[1]) - atts["points"] = points - a = xml.sax.xmlreader.AttributesImpl(atts) - self.e.startElement("polygon", a) - self.e.endElement("polygon") - def fileSelected(filename): - try: - net = Registry.GetKey('unfolder')['net'] - exporter = SVGExporter(net, filename) - exporter.export() - except: - print "Problem exporting SVG" - traceback.print_exc(file=sys.stdout) - fileSelected = staticmethod(fileSelected) - -# for importing nets saved by the above exporter -class NetHandler(xml.sax.handler.ContentHandler): - def __init__(self, net): - self.net = net - self.first = (41==41) - self.currentElement = None - self.chars = None - self.currentAction = None - self.foldsPending = {} - self.polys = {} - self.actions = {} - self.actions["nets:fold"] = self.foldInfo - self.actions["line"] = self.cutOrFold - self.actions["polygon"] = self.createPoly - def setDocumentLocator(self, locator): - pass - def startDocument(self): - pass - def endDocument(self): - for fold in self.foldsPending.values(): - face = self.net.addFace(fold.unfoldedFace()) - fold.desFace = face - self.net.folds.append(fold) - self.net.addFace(self.first) - self.foldsPending = None - self.polys = None - def startPrefixMapping(self, prefix, uri): - pass - def endPrefixMapping(self, prefix): - pass - def startElement(self, name, attributes): - self.currentAction = None - try: - self.currentAction = self.actions[name] - except: - pass - if(self.currentAction!=None): - self.currentAction(attributes) - def endElement(self, name): - pass - def startElementNS(self, name, qname, attrs): - self.currentAction = self.actions[name] - if(self.currentAction!=None): - self.currentAction(attributes) - def endElementNS(self, name, qname): - pass - def characters(self, content): - pass - def ignorableWhitespace(self): - pass - def processingInstruction(self, target, data): - pass - def skippedEntity(self, name): - pass - def foldInfo(self, atts): - self.foldsPending[atts["nets:id"]] = atts - def createPoly(self, atts): - xy = re.split('[, ]' , atts["points"]) - vectors = [] - for i in xrange(0, len(xy)-1, 2): - v = Vector([float(xy[i]), float(xy[i+1]), 0.0]) - vectors.append(v) - poly = Poly.fromVectors(vectors) - if(self.first==True): - self.first = poly - self.polys[atts["id"]] = poly - def cutOrFold(self, atts): - fid = atts["id"] - try: - fi = self.foldsPending[fid] - except: - pass - p1 = Vector([float(atts["x1"]), float(atts["y1"]), 0.0]) - p2 = Vector([float(atts["x2"]), float(atts["y2"]), 0.0]) - edge = Edge(p1, p2) - parent = None - ofPoly = None - toPoly = None - try: - parent = self.foldsPending[fi["nets:parent"]] - except: - pass - try: - ofPoly = self.polys[fi["nets:ofPoly"]] - except: - pass - try: - toPoly = self.polys[fi["nets:toPoly"]] - except: - pass - fold = Fold(parent, ofPoly , toPoly, edge, float(fi["nets:da"])) - self.foldsPending[fid] = fold - def fileSelected(filename): - try: - net = Net.importNet(filename) - try: - Registry.GetKey('unfolder')['net'] = net - except: - Registry.SetKey('unfolder', {}) - Registry.GetKey('unfolder')['net'] = net - Registry.GetKey('unfolder')['lastpath'] = filename - except: - print "Problem importing SVG" - traceback.print_exc(file=sys.stdout) - fileSelected = staticmethod(fileSelected) - - -class GUI: - def __init__(self): - self.overlaps = Draw.Create(0) - self.ani = Draw.Create(0) - self.selectedFaces =0 - self.search = Draw.Create(0) - self.diffuse = True - self.ancestors = Draw.Create(0) - self.noise = Draw.Create(0.0) - self.shape = Draw.Create(0) - self.nOverlaps = 1==2 - self.iterators = [RandomEdgeIterator,Brightest,Curvature,EdgeIterator,OddEven,Largest] - self.iterator = RandomEdgeIterator - self.overlapsText = "*" - self.message = " " - def makePopupGUI(self): - useRandom = Draw.Create(0) - pub = [] - pub.append(("Search", self.search, "Search for non-overlapping net (maybe forever)")) - pub.append(("Random", useRandom, "Random style net")) - ok = True - while ok: - ok = Blender.Draw.PupBlock("Unfold", pub) - if ok: - if useRandom.val: - self.iterator = RandomEdgeIterator - else: - self.iterator = Curvature - self.unfold() - def makeStandardGUI(self): - Draw.Register(self.draw, self.keyOrMouseEvent, self.buttonEvent) - def installScriptLink(self): - print "Adding script link for animation" - s = Blender.Scene.GetCurrent().getScriptLinks("FrameChanged") - if(s!=None and s.count("frameChanged.py")>0): - return - try: - script = Blender.Text.Get("frameChanged.py") - except: - script = Blender.Text.New("frameChanged.py") - script.write("import Blender\n") - script.write("import mesh_unfolder as Unfolder\n") - script.write("u = Blender.Registry.GetKey('unfolder')\n") - script.write("if u!=None:\n") - script.write("\tn = u['net']\n") - script.write("\tif(n!=None and n.animates):\n") - script.write("\t\tn.unfoldToCurrentFrame()\n") - Blender.Scene.GetCurrent().addScriptLink("frameChanged.py", "FrameChanged") - def unfold(self): - anc = self.ancestors.val - n = 0.0 - s = True - self.nOverlaps = 0 - searchLimit = 10 - search = 1 - Draw.Redraw(1) - net = None - name = None - try: - self.say("Unfolding...") - Draw.Redraw(1) - while(s):# and search < searchLimit): - if(net!=None): - name = net.des.name - net = Net.fromSelected(self, name) - net.setAvoidsOverlaps(not(self.overlaps.val)) - print - print "Unfolding selected object" - net.edgeIteratorClass = self.iterator - print "Using ", net.edgeIteratorClass - net.animates = self.ani.val - self.diffuse = (self.ancestors.val==0) - net.diffuse = self.diffuse - net.generations = self.ancestors.val - net.noise = self.noise.val - print "even:", net.diffuse, " depth:", net.generations - net.unfold() - n = net.report() - t = "." - if(n<1.0): - t = "Overlaps>="+str(n) - else: - t = "A complete net." - self.nOverlaps = (n>=1) - if(self.nOverlaps): - self.say(self.message+" - unfolding failed - try again ") - elif(not(self.overlaps.val)): - self.say("Success. Complete net - no overlaps ") - else: - self.say("Unfolding complete") - self.ancestors.val = anc - s = (self.search.val and n>=1.0) - dict = Registry.GetKey('unfolder') - if(not(dict)): - dict = {} - dict['net'] = net - Registry.SetKey('unfolder', dict) - if(s): - net = net.clone() - search += 1 - except(IndexError): - self.say("Please select an object to unfold") - except: - self.say("Problem unfolding selected object - see console for details") - print "Problem unfolding selected object:" - print sys.exc_info()[1] - traceback.print_exc(file=sys.stdout) - if(self.ani): - if Registry.GetKey('unfolder')==None: - print "no net!" - return - Registry.GetKey('unfolder')['net'].sortOutIPOSource() - self.installScriptLink() - Draw.Redraw(1) - def keyOrMouseEvent(self, evt, val): - if (evt == Draw.ESCKEY and not val): - Draw.Exit() - def buttonEvent(self, evt): - if (evt == 1): - self.unfold() - if (evt == 5): - try: - Registry.GetKey('unfolder')['net'].setAvoidsOverlaps(self.overlaps.val) - except: - pass - if (evt == 2): - print "Trying to set IPO curve" - try: - s = Blender.Object.GetSelected() - if(s!=None): - Registry.GetKey('unfolder')['net'].setIPOSource( s[0] ) - print "Set IPO curve" - else: - print "Please select an object to use the IPO of" - except: - print "Problem setting IPO source" - Draw.Redraw(1) - if (evt == 6): - Draw.Exit() - if (evt == 7): - try: - if (Registry.GetKey('unfolder')['net']!=None): - Registry.GetKey('unfolder')['net'].animates = self.ani.val - if(self.ani): - Registry.GetKey('unfolder')['net'].sortOutIPOSource() - self.installScriptLink() - except: - print sys.exc_info()[1] - traceback.print_exc(file=sys.stdout) - Draw.Redraw(1) - if (evt == 19): - pass - if (evt == 87): - try: - if (Registry.GetKey('unfolder')['net']!=None): - Registry.GetKey('unfolder')['net'].assignUVs() - self.say("Assigned UVs") - except: - print sys.exc_info()[1] - traceback.print_exc(file=sys.stdout) - Draw.Redraw(1) - if(evt==91): - if( testOverlap() == True): - self.nOverlaps = 1 - else: - self.nOverlaps = 0 - Draw.Redraw(1) - if(evt==233): - f1 = Poly.fromBlenderFace(Blender.Object.GetSelected()[0].getData().faces[0]) - f2 = Poly.fromBlenderFace(Blender.Object.GetSelected()[1].getData().faces[0]) - print - print Blender.Object.GetSelected()[0].getName() - print Blender.Object.GetSelected()[1].getName() - print f1.intersects2D(f2) - print f2.intersects2D(f1) - if(evt==714): - Net.unfoldAll(self) - Draw.Redraw(1) - if(evt==713): - self.iterator = self.iterators[self.shape.val] - Draw.Redraw(1) - if(evt==92): - if( testContains() == True): - self.nOverlaps = 1 - else: - self.nOverlaps = 0 - Draw.Redraw(1) - if(evt==104): - try: - filename = "net.svg" - s = Blender.Object.GetSelected() - if(s!=None and len(s)>0): - filename = s[0].getName()+".svg" - else: - if (Registry.GetKey('unfolder')['net']!=None): - filename = Registry.GetKey('unfolder')['net'].des.name - if(filename==None): - filename="net.svg" - else: - filename=filename+".svg" - Window.FileSelector(SVGExporter.fileSelected, "Select filename", filename) - except: - print "Problem exporting SVG" - traceback.print_exc(file=sys.stdout) - if(evt==107): - try: - Window.FileSelector(NetHandler.fileSelected, "Select file") - except: - print "Problem importing SVG" - traceback.print_exc(file=sys.stdout) - def say(self, m): - self.message = m - Draw.Redraw(1) - Window.Redraw(Window.Types.SCRIPT) - def draw(self): - cw = 64 - ch = 16 - l = FlowLayout(32, cw, ch, 350, 64) - l.y = 70 - self.search = Draw.Toggle("search", 19, l.nx(), l.ny(), l.cw, l.ch, self.search.val, "Search for non-overlapping mesh (potentially indefinitely)") - self.overlaps = Draw.Toggle("overlaps", 5, l.nx(), l.ny(), l.cw, l.ch, self.overlaps.val, "Allow overlaps / avoid overlaps - if off, will not place overlapping faces") - self.ani = Draw.Toggle("ani", 7, l.nx(), l.ny(), l.cw, l.ch, self.ani.val, "Animate net") - Draw.Button("uv", 87, l.nx(), l.ny(), l.cw, l.ch, "Assign net as UV to source mesh (overwriting existing UV)") - Draw.Button("Unfold", 1, l.nx(), l.ny(), l.cw, l.ch, "Unfold selected mesh to net") - Draw.Button("save", 104, l.nx(), l.ny(), l.cw, l.ch, "Save net as SVG") - Draw.Button("load", 107, l.nx(), l.ny(), l.cw, l.ch, "Load net from SVG") - #Draw.Button("test", 233, l.nx(), l.ny(), l.cw, l.ch, "test") - # unfolding enthusiasts - try uncommenting this - self.ancestors = Draw.Number("depth", 654, l.nx(), l.ny(), cw, ch, self.ancestors.val, 0, 9999, "depth of branching 0=diffuse") - #self.noise = Draw.Number("noise", 631, l.nx(), l.ny(), cw, ch, self.noise.val, 0.0, 1.0, "noisyness of branching") - #Draw.Button("UnfoldAll", 714, l.nx(), l.ny(), l.cw, l.ch, "Unfold all meshes and save their nets") - options = "order %t|random %x0|brightest %x1|curvature %x2|winding %x3| 1010 %x4|largest %x5" - self.shape = Draw.Menu(options, 713, l.nx(), l.ny(), cw, ch, self.shape.val, "shape of net") - Draw.Button("exit", 6, l.nx(), l.ny(), l.cw, l.ch, "exit") - BGL.glClearColor(0.3, 0.3, 0.3, 1) - BGL.glColor3f(0.3,0.3,0.3) - l.newLine() - BGL.glRasterPos2i(32, 100) - Draw.Text(self.message) - -class FlowLayout: - def __init__(self, margin, cw, ch, w, h): - self.x = margin-cw-4 - self.y = margin - self.cw = cw - self.ch = ch - self.width = w - self.height = h - self.margin = margin - def nx(self): - self.x+=(self.cw+4) - if(self.x>self.width): - self.x = self.margin - self.y-=self.ch+4 - return self.x - def ny(self): - return self.y - def newLine(self): - self.y-=self.ch+self.margin - self.x = self.margin - -# if xml is None, then dont bother running the script -if xml: - try: - sys.setrecursionlimit(10000) - gui = GUI() - gui.makeStandardGUI() - #gui.makePopupGUI() - except: - traceback.print_exc(file=sys.stdout) diff --git a/release/scripts/mesh_wire.py b/release/scripts/mesh_wire.py deleted file mode 100644 index bd38c47a9b9..00000000000 --- a/release/scripts/mesh_wire.py +++ /dev/null @@ -1,290 +0,0 @@ -#!BPY -""" -Name: 'Solid Wireframe' -Blender: 243 -Group: 'Mesh' -Tooltip: 'Make a solid wireframe copy of this mesh' -""" - -# -------------------------------------------------------------------------- -# Solid Wireframe1.0 by Campbell Barton (AKA Ideasman42) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- -import Blender -from Blender import Scene, Mesh, Window, sys -from Blender.Mathutils import AngleBetweenVecs, TriangleNormal -from BPyMesh import faceAngles # get angles for face cornders -#import BPyMesh -#reload(BPyMesh) -#faceAngles = BPyMesh.faceAngles - -# works out the distanbce to inset the corners based on angles -from BPyMathutils import angleToLength -#import BPyMathutils -#reload(BPyMathutils) -#angleToLength = BPyMathutils.angleToLength - -import mesh_solidify - -import BPyMessages -reload(BPyMessages) -import bpy - - -def solid_wire(ob_orig, me_orig, sce, PREF_THICKNESS, PREF_SOLID, PREF_SHARP, PREF_XSHARP): - if not PREF_SHARP and PREF_XSHARP: - PREF_XSHARP = False - - # This function runs out of editmode with a mesh - # error cases are alredy checked for - - inset_half = PREF_THICKNESS / 2 - del PREF_THICKNESS - - ob = ob_orig.copy() - me = me_orig.copy() - ob.link(me) - sce.objects.selected = [] - sce.objects.link(ob) - ob.sel = True - sce.objects.active = ob - - # Modify the object, should be a set - FGON= Mesh.EdgeFlags.FGON - edges_fgon = dict([(ed.key,None) for ed in me.edges if ed.flag & FGON]) - # edges_fgon.fromkeys([ed.key for ed in me.edges if ed.flag & FGON]) - - del FGON - - - - # each face needs its own verts - # orig_vert_count =len(me.verts) - new_vert_count = len(me.faces) * 4 - for f in me.faces: - if len(f) == 3: - new_vert_count -= 1 - - if PREF_SHARP == 0: - new_faces_edge= {} - - def add_edge(i1,i2, ni1, ni2): - - if i1>i2: - i1,i2 = i2,i1 - flip = True - else: - flip = False - new_faces_edge.setdefault((i1,i2), []).append((ni1, ni2, flip)) - - - new_verts = [] - new_faces = [] - vert_index = len(me.verts) - - for f in me.faces: - f_v_co = [v.co for v in f] - angles = faceAngles(f_v_co) - f_v_idx = [v.index for v in f] - - def new_vert(fi): - co = f_v_co[fi] - a = angles[fi] - if a > 180: - vert_inset = 1 * inset_half - else: - vert_inset = inset_half * angleToLength( abs((180-a) / 2) ) - - # Calculate the inset direction - co1 = f_v_co[fi-1] - co2 = fi+1 # Wrap this index back to the start - if co2 == len(f_v_co): co2 = 0 - co2 = f_v_co[co2] - - co1 = co1 - co - co2 = co2 - co - co1.normalize() - co2.normalize() - d = co1+co2 - # Done with inset direction - - d.length = vert_inset - return co+d - - new_verts.extend([new_vert(i) for i in xrange(len(f_v_co))]) - - if len(f_v_idx) == 4: - faces = [\ - (f_v_idx[1], f_v_idx[0], vert_index, vert_index+1),\ - (f_v_idx[2], f_v_idx[1], vert_index+1, vert_index+2),\ - (f_v_idx[3], f_v_idx[2], vert_index+2, vert_index+3),\ - (f_v_idx[0], f_v_idx[3], vert_index+3, vert_index),\ - ] - else: - faces = [\ - (f_v_idx[1], f_v_idx[0], vert_index, vert_index+1),\ - (f_v_idx[2], f_v_idx[1], vert_index+1, vert_index+2),\ - (f_v_idx[0], f_v_idx[2], vert_index+2, vert_index),\ - ] - - - if PREF_SHARP == 1: - if not edges_fgon: - new_faces.extend(faces) - else: - for nf in faces: - i1,i2 = nf[0], nf[1] - if i1>i2: i1,i2 = i2,i1 - - if edges_fgon and (i1,i2) not in edges_fgon: - new_faces.append(nf) - - - - elif PREF_SHARP == 0: - for nf in faces: - add_edge(*nf) - - vert_index += len(f_v_co) - - me.verts.extend(new_verts) - - if PREF_SHARP == 0: - def add_tri_flipped(i1,i2,i3): - try: - if AngleBetweenVecs(me.verts[i1].no, TriangleNormal(me.verts[i1].co, me.verts[i2].co, me.verts[i3].co)) < 90: - return i3,i2,i1 - else: - return i1,i2,i3 - except: - return i1,i2,i3 - - # This stores new verts that use this vert - # used for re-averaging this verts location - # based on surrounding verts. looks better but not needed. - vert_users = [set() for i in xrange(vert_index)] - - for (i1,i2), nf in new_faces_edge.iteritems(): - - if len(nf) == 2: - # Add the main face - if edges_fgon and (i1,i2) not in edges_fgon: - new_faces.append((nf[0][0], nf[0][1], nf[1][0], nf[1][1])) - - - if nf[0][2]: key1 = nf[0][1],nf[0][0] - else: key1 = nf[0][0],nf[0][1] - if nf[1][2]: key2 = nf[1][1],nf[1][0] - else: key2 = nf[1][0],nf[1][1] - - # CRAP, cont work out which way to flip so make it oppisite the verts normal. - - ###new_faces.append((i2, key1[0], key2[0])) # NO FLIPPING, WORKS THOUGH - ###new_faces.append((i1, key1[1], key2[1])) - new_faces.append(add_tri_flipped(i2, key1[0], key2[0])) - new_faces.append(add_tri_flipped(i1, key1[1], key2[1])) - - # Average vert loction so its not tooo pointy - # not realy needed but looks better - vert_users[i2].update((key1[0], key2[0])) - vert_users[i1].update((key1[1], key2[1])) - - if len(nf) == 1: - if nf[0][2]: new_faces.append((nf[0][0], nf[0][1], i2, i1)) # flipped - else: new_faces.append((i1,i2, nf[0][0], nf[0][1])) - - - # average points now. - for i, vusers in enumerate(vert_users): - if vusers: - co = me.verts[i].co - co.zero() - - for ii in vusers: - co += me.verts[ii].co - co /= len(vusers) - - me.faces.delete(1, range(len(me.faces))) - - me.faces.extend(new_faces) - - # External function, solidify - me.sel = True - if PREF_SOLID: - mesh_solidify.solidify(me, -inset_half*2, True, False, PREF_XSHARP) - - -def main(): - - # Gets the current scene, there can be many scenes in 1 blend file. - sce = bpy.data.scenes.active - - # Get the active object, there can only ever be 1 - # and the active object is always the editmode object. - ob_act = sce.objects.active - - if not ob_act or ob_act.type != 'Mesh': - BPyMessages.Error_NoMeshActive() - return - - # Saves the editmode state and go's out of - # editmode if its enabled, we cant make - # changes to the mesh data while in editmode. - is_editmode = Window.EditMode() - Window.EditMode(0) - - me = ob_act.getData(mesh=1) # old NMesh api is default - if len(me.faces)==0: - BPyMessages.Error_NoMeshFaces() - if is_editmode: Window.EditMode(1) - return - - # Create the variables. - PREF_THICK = Blender.Draw.Create(0.005) - PREF_SOLID = Blender.Draw.Create(1) - PREF_SHARP = Blender.Draw.Create(1) - PREF_XSHARP = Blender.Draw.Create(0) - - pup_block = [\ - ('Thick:', PREF_THICK, 0.0001, 2.0, 'Skin thickness in mesh space.'),\ - ('Solid Wire', PREF_SOLID, 'If Disabled, will use 6 sided wire segments'),\ - ('Sharp Wire', PREF_SHARP, 'Use the original mesh topology for more accurate sharp wire.'),\ - ('Extra Sharp', PREF_XSHARP, 'Use less geometry to create a sharper looking wire'),\ - ] - - if not Blender.Draw.PupBlock('Solid Wireframe', pup_block): - if is_editmode: Window.EditMode(1) - return - - Window.WaitCursor(1) - t = sys.time() - - # Run the mesh editing function - solid_wire(ob_act, me, sce, PREF_THICK.val, PREF_SOLID.val, PREF_SHARP.val, PREF_XSHARP.val) - - # Timing the script is a good way to be aware on any speed hits when scripting - print 'Solid Wireframe finished in %.2f seconds' % (sys.time()-t) - Window.WaitCursor(0) - if is_editmode: Window.EditMode(1) - - -# This lets you can import the script without running it -if __name__ == '__main__': - main() diff --git a/release/scripts/modules/autocomplete.py b/release/scripts/modules/autocomplete.py new file mode 100644 index 00000000000..9dd489a178e --- /dev/null +++ b/release/scripts/modules/autocomplete.py @@ -0,0 +1,211 @@ + + +def execute(bcon): + ''' + This function has been taken from a BGE console autocomp I wrote a while ago + the dictionaty bcon is not needed but it means I can copy and paste from the old func + which works ok for now. + + 'bcon' dictionary keys, set by the caller + * 'cursor' - index of the editing character (int) + * 'edit_text' - text string for editing (string) + * 'scrollback' - text to add to the scrollback, options are added here. (text) + * 'namespace' - namespace, (dictionary) + + ''' + + + def is_delimiter(ch): + ''' + For skipping words + ''' + if ch == '_': + return False + if ch.isalnum(): + return False + + return True + + def is_delimiter_autocomp(ch): + ''' + When autocompleteing will earch back and + ''' + if ch in '._[] "\'': + return False + if ch.isalnum(): + return False + + return True + + + def do_autocomp(autocomp_prefix, autocomp_members): + ''' + return text to insert and a list of options + ''' + autocomp_members = [v for v in autocomp_members if v.startswith(autocomp_prefix)] + + print("AUTO: '%s'" % autocomp_prefix) + print("MEMBERS: '%s'" % str(autocomp_members)) + + if not autocomp_prefix: + return '', autocomp_members + elif len(autocomp_members) > 1: + # find a common string between all members after the prefix + # 'ge' [getA, getB, getC] --> 'get' + + # get the shortest member + min_len = min([len(v) for v in autocomp_members]) + + autocomp_prefix_ret = '' + + for i in range(len(autocomp_prefix), min_len): + char_soup = set() + for v in autocomp_members: + char_soup.add(v[i]) + + if len(char_soup) > 1: + break + else: + autocomp_prefix_ret += char_soup.pop() + + return autocomp_prefix_ret, autocomp_members + elif len(autocomp_members) == 1: + if autocomp_prefix == autocomp_members[0]: + # the variable matched the prefix exactly + # add a '.' so you can quickly continue. + # Could try add [] or other possible extensions rather then '.' too if we had the variable. + return '.', [] + else: + # finish off the of the word word + return autocomp_members[0][len(autocomp_prefix):], [] + else: + return '', [] + + + def BCon_PrevChar(bcon): + cursor = bcon['cursor']-1 + if cursor<0: + return None + + try: + return bcon['edit_text'][cursor] + except: + return None + + + def BCon_NextChar(bcon): + try: + return bcon['edit_text'][bcon['cursor']] + except: + return None + + def BCon_cursorLeft(bcon): + bcon['cursor'] -= 1 + if bcon['cursor'] < 0: + bcon['cursor'] = 0 + + def BCon_cursorRight(bcon): + bcon['cursor'] += 1 + if bcon['cursor'] > len(bcon['edit_text']): + bcon['cursor'] = len(bcon['edit_text']) + + def BCon_AddScrollback(bcon, text): + + bcon['scrollback'] = bcon['scrollback'] + text + + + def BCon_cursorInsertChar(bcon, ch): + if bcon['cursor']==0: + bcon['edit_text'] = ch + bcon['edit_text'] + elif bcon['cursor']==len(bcon['edit_text']): + bcon['edit_text'] = bcon['edit_text'] + ch + else: + bcon['edit_text'] = bcon['edit_text'][:bcon['cursor']] + ch + bcon['edit_text'][bcon['cursor']:] + + bcon['cursor'] + if bcon['cursor'] > len(bcon['edit_text']): + bcon['cursor'] = len(bcon['edit_text']) + BCon_cursorRight(bcon) + + + TEMP_NAME = '___tempname___' + + cursor_orig = bcon['cursor'] + + ch = BCon_PrevChar(bcon) + while ch != None and (not is_delimiter(ch)): + ch = BCon_PrevChar(bcon) + BCon_cursorLeft(bcon) + + if ch != None: + BCon_cursorRight(bcon) + + #print (cursor_orig, bcon['cursor']) + + cursor_base = bcon['cursor'] + + autocomp_prefix = bcon['edit_text'][cursor_base:cursor_orig] + + print("PREFIX:'%s'" % autocomp_prefix) + + # Get the previous word + if BCon_PrevChar(bcon)=='.': + BCon_cursorLeft(bcon) + ch = BCon_PrevChar(bcon) + while ch != None and is_delimiter_autocomp(ch)==False: + ch = BCon_PrevChar(bcon) + BCon_cursorLeft(bcon) + + cursor_new = bcon['cursor'] + + if ch != None: + cursor_new+=1 + + pytxt = bcon['edit_text'][cursor_new:cursor_base-1].strip() + print("AUTOCOMP EVAL: '%s'" % pytxt) + #try: + if pytxt: + bcon['console'].runsource(TEMP_NAME + '=' + pytxt, '', 'single') + # print val + else: ##except: + val = None + + try: + val = bcon['namespace'][TEMP_NAME] + del bcon['namespace'][TEMP_NAME] + except: + val = None + + if val: + autocomp_members = dir(val) + + autocomp_prefix_ret, autocomp_members = do_autocomp(autocomp_prefix, autocomp_members) + + bcon['cursor'] = cursor_orig + for v in autocomp_prefix_ret: + BCon_cursorInsertChar(bcon, v) + cursor_orig = bcon['cursor'] + + if autocomp_members: + BCon_AddScrollback(bcon, ', '.join(autocomp_members)) + + del val + + else: + # Autocomp global namespace + autocomp_members = bcon['namespace'].keys() + + if autocomp_prefix: + autocomp_members = [v for v in autocomp_members if v.startswith(autocomp_prefix)] + + autocomp_prefix_ret, autocomp_members = do_autocomp(autocomp_prefix, autocomp_members) + + bcon['cursor'] = cursor_orig + for v in autocomp_prefix_ret: + BCon_cursorInsertChar(bcon, v) + cursor_orig = bcon['cursor'] + + if autocomp_members: + BCon_AddScrollback(bcon, ', '.join(autocomp_members)) + + bcon['cursor'] = cursor_orig \ No newline at end of file diff --git a/release/scripts/modules/bpy_ops.py b/release/scripts/modules/bpy_ops.py new file mode 100644 index 00000000000..83c2e82bf6c --- /dev/null +++ b/release/scripts/modules/bpy_ops.py @@ -0,0 +1,141 @@ +# for slightly faster access +from bpy.__ops__ import add as op_add +from bpy.__ops__ import remove as op_remove +from bpy.__ops__ import dir as op_dir +from bpy.__ops__ import call as op_call +from bpy.__ops__ import as_string as op_as_string +from bpy.__ops__ import get_rna as op_get_rna + +# Keep in sync with WM_types.h +context_dict = { + 'INVOKE_DEFAULT':0, + 'INVOKE_REGION_WIN':1, + 'INVOKE_AREA':2, + 'INVOKE_SCREEN':3, + 'EXEC_DEFAULT':4, + 'EXEC_REGION_WIN':5, + 'EXEC_AREA':6, + 'EXEC_SCREEN':7, +} + +class bpy_ops(object): + ''' + Fake module like class. + + bpy.ops + ''' + def add(self, pyop): + op_add(pyop) + + def remove(self, pyop): + op_remove(pyop) + + def __getattr__(self, module): + ''' + gets a bpy.ops submodule + ''' + return bpy_ops_submodule(module) + + def __dir__(self): + + submodules = set() + + # add this classes functions + for id_name in dir(self.__class__): + if not id_name.startswith('__'): + submodules.add(id_name) + + for id_name in op_dir(): + id_split = id_name.split('_OT_', 1) + + if len(id_split) == 2: + submodules.add(id_split[0].lower()) + else: + submodules.add(id_split[0]) + + return list(submodules) + + def __repr__(self): + return "" + + +class bpy_ops_submodule(object): + ''' + Utility class to fake submodules. + + eg. bpy.ops.object + ''' + __keys__ = ('module',) + + def __init__(self, module): + self.module = module + + def __getattr__(self, func): + ''' + gets a bpy.ops.submodule function + ''' + return bpy_ops_submodule_op(self.module, func) + + def __dir__(self): + + functions = set() + + module_upper = self.module.upper() + + for id_name in op_dir(): + id_split = id_name.split('_OT_', 1) + if len(id_split) == 2 and module_upper == id_split[0]: + functions.add(id_split[1]) + + return list(functions) + + def __repr__(self): + return "" % self.module + +class bpy_ops_submodule_op(object): + ''' + Utility class to fake submodule operators. + + eg. bpy.ops.object.somefunc + ''' + __keys__ = ('module', 'func') + def __init__(self, module, func): + self.module = module + self.func = func + + def idname(self): + # submod.foo -> SUBMOD_OT_foo + return self.module.upper() + '_OT_' + self.func + + def __call__(self, *args, **kw): + + # Get the operator from blender + if len(args) > 1: + raise ValueError("only one argument for the execution context is supported ") + + if args: + try: + context = context_dict[args[0]] + except: + raise ValueError("Expected a single context argument in: " + str(list(context_dict.keys()))) + + return op_call(self.idname(), kw, context) + + else: + return op_call(self.idname(), kw) + + def get_rna(self): + ''' + currently only used for '__rna__' + ''' + return op_get_rna(self.idname()) + + + def __repr__(self): # useful display, repr(op) + return op_as_string(self.idname()) + + def __str__(self): # used for print(...) + return "" % (self.module, self.func, id(self)) + +import bpy +bpy.ops = bpy_ops() diff --git a/release/scripts/modules/bpy_sys.py b/release/scripts/modules/bpy_sys.py new file mode 100644 index 00000000000..e60e8b01d09 --- /dev/null +++ b/release/scripts/modules/bpy_sys.py @@ -0,0 +1,12 @@ +import bpy +import os + +def expandpath(path): + if path.startswith("//"): + return os.path.join(os.path.dirname(bpy.data.filename), path[2:]) + + return path + +import types +bpy.sys = types.ModuleType("bpy.sys") +bpy.sys.expandpath = expandpath diff --git a/release/scripts/ms3d_import.py b/release/scripts/ms3d_import.py deleted file mode 100644 index c1438cbfc97..00000000000 --- a/release/scripts/ms3d_import.py +++ /dev/null @@ -1,487 +0,0 @@ -#!BPY -""" -Name: 'MilkShape3D (.ms3d)...' -Blender: 245 -Group: 'Import' -Tooltip: 'Import from MilkShape3D file format (.ms3d)' -""" -# -# Author: Markus Ilmola -# Email: markus.ilmola@pp.inet.fi -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# - -# import needed stuff -import os.path -import math -from math import * -import struct -import Blender -from Blender import Mathutils -from Blender.Mathutils import * - - -# trims a string by removing ending 0 and everything after it -def uku(s): - try: - return s[:s.index('\0')] - except: - return s - - -# Converts ms3d euler angles to a rotation matrix -def RM(a): - sy = sin(a[2]) - cy = cos(a[2]) - sp = sin(a[1]) - cp = cos(a[1]) - sr = sin(a[0]) - cr = cos(a[0]) - return Matrix([cp*cy, cp*sy, -sp], [sr*sp*cy+cr*-sy, sr*sp*sy+cr*cy, sr*cp],[cr*sp*cy+-sr*-sy, cr*sp*sy+-sr*cy, cr*cp]) - - -# Converts ms3d euler angles to a quaternion -def RQ(a): - angle = a[2] * 0.5; - sy = sin(angle); - cy = cos(angle); - angle = a[1] * 0.5; - sp = sin(angle); - cp = cos(angle); - angle = a[0] * 0.5; - sr = sin(angle); - cr = cos(angle); - return Quaternion(cr*cp*cy+sr*sp*sy, sr*cp*cy-cr*sp*sy, cr*sp*cy+sr*cp*sy, cr*cp*sy-sr*sp*cy) - - -# takes a texture filename and tries to load it -def loadImage(path, filename): - image = None - try: - image = Blender.Image.Load(os.path.abspath(filename)) - except IOError: - print "Warning: Failed to load image: " + filename + ". Trying short path instead...\n" - try: - image = Blender.Image.Load(os.path.dirname(path) + "/" + os.path.basename(filename)) - except IOError: - print "Warning: Failed to load image: " + os.path.basename(filename) + "!\n" - return image - - -# imports a ms3d file to the current scene -def import_ms3d(path): - # get scene - scn = Blender.Scene.GetCurrent() - if scn == None: - return "No scene to import to!" - - # open the file - try: - file = open(path, 'rb') - except IOError: - return "Failed to open the file!" - - # get the file size - file.seek(0, os.SEEK_END); - fileSize = file.tell(); - file.seek(0, os.SEEK_SET); - - # read id to check if the file is a MilkShape3D file - id = file.read(10) - if id!="MS3D000000": - return "The file is not a MS3D file!" - - # read version - version = struct.unpack("i", file.read(4))[0] - if version!=4: - return "The file has invalid version!" - - # Create the mesh - scn.objects.selected = [] - mesh = Blender.Mesh.New("MilkShape3D Mesh") - meshOb = scn.objects.new(mesh) - - # read the number of vertices - numVertices = struct.unpack("H", file.read(2))[0] - - # read vertices - coords = [] - boneIds = [] - for i in xrange(numVertices): - # skip flags - file.read(1) - - # read coords - coords.append(struct.unpack("fff", file.read(3*4))) - - # read bone ids - boneIds.append(struct.unpack("b", file.read(1))[0]) - - # skip refcount - file.read(1) - - # add the vertices to the mesh - mesh.verts.extend(coords) - - # read number of triangles - numTriangles = struct.unpack("H", file.read(2))[0] - - # read triangles - faces = [] - uvs = [] - for i in xrange(numTriangles): - # skip flags - file.read(2) - - # read indices (faces) - faces.append(struct.unpack("HHH", file.read(3*2))) - - # read normals - normals = struct.unpack("fffffffff", file.read(3*3*4)) - - # read texture coordinates - s = struct.unpack("fff", file.read(3*4)) - t = struct.unpack("fff", file.read(3*4)) - - # store texture coordinates - uvs.append([[s[0], 1-t[0]], [s[1], 1-t[1]], [s[2], 1-t[2]]]) - - if faces[-1][2] == 0: # Cant have zero at the third index - faces[-1] = faces[-1][1], faces[-1][2], faces[-1][0] - uvs[-1] = uvs[-1][1], uvs[-1][2], uvs[-1][0] - - # skip smooth group - file.read(1) - - # skip group - file.read(1) - - # add the faces to the mesh - mesh.faces.extend(faces) - - # set texture coordinates - for i in xrange(numTriangles): - mesh.faces[i].uv = [Vector(uvs[i][0]), Vector(uvs[i][1]), Vector(uvs[i][2])] - - # read number of groups - numGroups = struct.unpack("H", file.read(2))[0] - - # read groups - for i in xrange(numGroups): - # skip flags - file.read(1) - - # skip name - file.read(32) - - # read the number of triangles in the group - numGroupTriangles = struct.unpack("H", file.read(2))[0] - - # read the group triangles - if numGroupTriangles > 0: - triangleIndices = struct.unpack(str(numGroupTriangles) + "H", file.read(2*numGroupTriangles)); - - # read material - material = struct.unpack("b", file.read(1))[0] - if material>=0: - for j in xrange(numGroupTriangles): - mesh.faces[triangleIndices[j]].mat = material - - # read the number of materials - numMaterials = struct.unpack("H", file.read(2))[0] - - # read materials - for i in xrange(numMaterials): - # read name - name = uku(file.read(32)) - - # create the material - mat = Blender.Material.New(name) - mesh.materials += [mat] - - # read ambient color - ambient = struct.unpack("ffff", file.read(4*4))[0:3] - mat.setAmb((ambient[0]+ambient[1]+ambient[2])/3) - - # read diffuse color - diffuse = struct.unpack("ffff", file.read(4*4))[0:3] - mat.setRGBCol(diffuse) - - # read specular color - specular = struct.unpack("ffff", file.read(4*4))[0:3] - mat.setSpecCol(specular) - - # read emissive color - emissive = struct.unpack("ffff", file.read(4*4))[0:3] - mat.setEmit((emissive[0]+emissive[1]+emissive[2])/3) - - # read shininess - shininess = struct.unpack("f", file.read(4))[0] - - # read transparency - transparency = struct.unpack("f", file.read(4))[0] - mat.setAlpha(transparency) - if transparency < 1: - mat.mode |= Blender.Material.Modes.ZTRANSP - - # read mode - mode = struct.unpack("B", file.read(1))[0] - - # read texturemap - texturemap = uku(file.read(128)) - if len(texturemap)>0: - colorTexture = Blender.Texture.New(name + "_texture") - colorTexture.setType('Image') - colorTexture.setImage(loadImage(path, texturemap)) - mat.setTexture(0, colorTexture, Blender.Texture.TexCo.UV, Blender.Texture.MapTo.COL) - - # read alphamap - alphamap = uku(file.read(128)) - if len(alphamap)>0: - alphaTexture = Blender.Texture.New(name + "_alpha") - alphaTexture.setType('Image') - alphaTexture.setImage(loadImage(path, alphamap)) - mat.setTexture(1, alphaTexture, Blender.Texture.TexCo.UV, Blender.Texture.MapTo.ALPHA) - - # read animation - fps = struct.unpack("f", file.read(4))[0] - time = struct.unpack("f", file.read(4))[0] - frames = struct.unpack("i", file.read(4))[0] - - # read the number of joints - numJoints = struct.unpack("H", file.read(2))[0] - - # create the armature - armature = 0 - armOb = 0 - if numJoints > 0: - armOb = Blender.Object.New('Armature', "MilkShape3D Skeleton") - armature = Blender.Armature.New("MilkShape3D Skeleton") - armature.drawType = Blender.Armature.STICK - armOb.link(armature) - scn.objects.link(armOb) - armOb.makeParentDeform([meshOb]) - armature.makeEditable() - - # read joints - joints = [] - rotKeys = {} - posKeys = {} - for i in xrange(numJoints): - # skip flags - file.read(1) - - # read name - name = uku(file.read(32)) - joints.append(name) - - # create the bone - bone = Blender.Armature.Editbone() - armature.bones[name] = bone - - # read parent - parent = uku(file.read(32)) - if len(parent)>0: - bone.parent = armature.bones[parent] - - # read orientation - rot = struct.unpack("fff", file.read(3*4)) - - # read position - pos = struct.unpack("fff", file.read(3*4)) - - # set head - if bone.hasParent(): - bone.head = Vector(pos) * bone.parent.matrix + bone.parent.head - tempM = RM(rot) * bone.parent.matrix - tempM.transpose; - bone.matrix = tempM - else: - bone.head = Vector(pos) - bone.matrix = RM(rot) - - # set tail - bvec = bone.tail - bone.head - bvec.normalize() - bone.tail = bone.head + 0.01 * bvec - - # Create vertex group for this bone - mesh.addVertGroup(name) - vgroup = [] - for index, v in enumerate(boneIds): - if v==i: - vgroup.append(index) - mesh.assignVertsToGroup(name, vgroup, 1.0, 1) - - # read the number of rotation keys - numKeyFramesRot = struct.unpack("H", file.read(2))[0] - - # read the number of postions keys - numKeyFramesPos = struct.unpack("H", file.read(2))[0] - - # read rotation keys - rotKeys[name] = [] - for j in xrange(numKeyFramesRot): - # read time - time = fps * struct.unpack("f", file.read(4))[0] - # read data - rotKeys[name].append([time, struct.unpack("fff", file.read(3*4))]) - - # read position keys - posKeys[name] = [] - for j in xrange(numKeyFramesPos): - # read time - time = fps * struct.unpack("f", file.read(4))[0] - # read data - posKeys[name].append([time, struct.unpack("fff", file.read(3*4))]) - - # create action and pose - action = 0 - pose = 0 - if armature!=0: - armature.update() - pose = armOb.getPose() - action = armOb.getAction() - if not action: - action = Blender.Armature.NLA.NewAction() - action.setActive(armOb) - - # create animation key frames - for name, pbone in pose.bones.items(): - # create position keys - for key in posKeys[name]: - pbone.loc = Vector(key[1]) - pbone.insertKey(armOb, int(key[0]+0.5), Blender.Object.Pose.LOC, True) - - # create rotation keys - for key in rotKeys[name]: - pbone.quat = RQ(key[1]) - pbone.insertKey(armOb, int(key[0]+0.5), Blender.Object.Pose.ROT, True) - - # The old format ends here. If there is more data then the file is newer version - - # check to see if there are any comments - if file.tell()0: - print "Group comment: " + file.read(size) - - # Material comments - numComments = struct.unpack("i", file.read(4))[0] - for i in range(numComments): - file.read(4) # index - size = struct.unpack("i", file.read(4))[0] # comment size - if size>0: - print "Material comment: " + file.read(size) - - # Joint comments - numComments = struct.unpack("i", file.read(4))[0] - for i in range(numComments): - file.read(4) # index - size = struct.unpack("i", file.read(4))[0] # comment size - if size>0: - print "Joint comment: " + file.read(size) - - # Model comments - numComments = struct.unpack("i", file.read(4))[0] - for i in range(numComments): - file.read(4) # index - size = struct.unpack("i", file.read(4))[0] # comment size - if size>0: - print "Model comment: " + file.read(size) - - # Unknown version give a warning - else: - print "Warning: Unknown version!" - - - # check to see if there is any extra vertex data - if file.tell()=0 or ids[1]>=0 or ids[2]>=0: - mesh.assignVertsToGroup(joints[boneIds[i]], [i], 0.01*weights[0], 1) - if ids[0]>=0: - mesh.assignVertsToGroup(joints[ids[0]], [i], 0.01*weights[1], 1) - if ids[1]>=0: - mesh.assignVertsToGroup(joints[ids[1]], [i], 0.01*weights[2], 1) - if ids[2]>=0: - mesh.assignVertsToGroup(joints[ids[2]], [i], 0.01*(100-(weights[0]+weights[1]+weights[2])), 1) - - elif subVersion==1: - # read extra data for each vertex - for i in xrange(numVertices): - # bone ids - ids = struct.unpack("bbb", file.read(3)) - # weights - weights = struct.unpack("BBB", file.read(3)) - # add extra vertices with weights to deform groups - if ids[0]>=0 or ids[1]>=0 or ids[2]>=0: - mesh.assignVertsToGroup(joints[boneIds[i]], [i], 0.01*weights[0], 1) - if ids[0]>=0: - mesh.assignVertsToGroup(joints[ids[0]], [i], 0.01*weights[1], 1) - if ids[1]>=0: - mesh.assignVertsToGroup(joints[ids[1]], [i], 0.01*weights[2], 1) - if ids[2]>=0: - mesh.assignVertsToGroup(joints[ids[2]], [i], 0.01*(100-(weights[0]+weights[1]+weights[2])), 1) - - # non supported subversion give a warning - else: - print "Warning: Unknown subversion!" - - # rest of the extra data in the file is not imported/used - - # refresh the view - Blender.Redraw() - - # close the file - file.close() - - # succes return empty error string - return "" - - -# load the model -def fileCallback(filename): - error = import_ms3d(filename) - if error!="": - Blender.Draw.PupMenu("An error occured during import: " + error + "|Not all data might have been imported succesfully.", 2) - -Blender.Window.FileSelector(fileCallback, 'Import') diff --git a/release/scripts/ms3d_import_ascii.py b/release/scripts/ms3d_import_ascii.py deleted file mode 100644 index d8c22a1ec99..00000000000 --- a/release/scripts/ms3d_import_ascii.py +++ /dev/null @@ -1,479 +0,0 @@ -#!BPY -""" -Name: 'MilkShape3D ASCII (.txt)...' -Blender: 245 -Group: 'Import' -Tooltip: 'Import from a MilkShape3D ASCII file format (.txt)' -""" -# -# Author: Markus Ilmola -# Email: markus.ilmola@pp.inet.fi -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# - -# import needed stuff -import os.path -import re -import math -from math import * -import Blender -from Blender import Mathutils -from Blender.Mathutils import * - - - -# Converts ms3d euler angles to a rotation matrix -def RM(a): - sy = sin(a[2]) - cy = cos(a[2]) - sp = sin(a[1]) - cp = cos(a[1]) - sr = sin(a[0]) - cr = cos(a[0]) - return Matrix([cp*cy, cp*sy, -sp], [sr*sp*cy+cr*-sy, sr*sp*sy+cr*cy, sr*cp],[cr*sp*cy+-sr*-sy, cr*sp*sy+-sr*cy, cr*cp]) - - -# Converts ms3d euler angles to a quaternion -def RQ(a): - angle = a[2] * 0.5; - sy = sin(angle); - cy = cos(angle); - angle = a[1] * 0.5; - sp = sin(angle); - cp = cos(angle); - angle = a[0] * 0.5; - sr = sin(angle); - cr = cos(angle); - return Quaternion(cr*cp*cy+sr*sp*sy, sr*cp*cy-cr*sp*sy, cr*sp*cy+sr*cp*sy, cr*cp*sy-sr*sp*cy) - - -# takes a texture filename and tries to load it -def loadImage(path, filename): - image = None - try: - image = Blender.Image.Load(os.path.abspath(filename)) - except IOError: - print "Warning: Failed to load image: " + filename + ". Trying short path instead...\n" - try: - image = Blender.Image.Load(os.path.dirname(path) + "/" + os.path.basename(filename)) - except IOError: - print "Warning: Failed to load image: " + os.path.basename(filename) + "!\n" - return image - - - -# returns the next non-empty, non-comment line from the file -def getNextLine(file): - ready = False - while ready==False: - line = file.readline() - if len(line)==0: - print "Warning: End of file reached." - return line - ready = True - line = line.strip() - if len(line)==0 or line.isspace(): - ready = False - if len(line)>=2 and line[0]=='/' and line[1]=='/': - ready = False - return line - - - -# imports a MilkShape3D ascii file to the current scene -def import_ms3d_ascii(path): - # limits - MAX_NUMMESHES = 1000 - MAX_NUMVERTS = 100000 - MAX_NUMNORMALS = 100000 - MAX_NUMTRIS = 100000 - MAX_NUMMATS = 16 - MAX_NUMBONES = 100 - MAX_NUMPOSKEYS = 1000 - MAX_NUMROTKEYS = 1000 - - # get scene - scn = Blender.Scene.GetCurrent() - if scn==None: - return "No scene to import to!" - - # open the file - try: - file = open(path, 'r') - except IOError: - return "Failed to open the file!" - - # Read frame info - try: - lines = getNextLine(file).split() - if len(lines) != 2 or lines[0] != "Frames:": - raise ValueError - lines = getNextLine(file).split() - if len(lines) != 2 or lines[0] != "Frame:": - raise ValueError - except ValueError: - return "Frame information is invalid!" - - # Create the mesh - meshOb = Blender.Object.New('Mesh', "MilkShape3D Object") - mesh = Blender.Mesh.New("MilkShape3D Mesh") - meshOb.link(mesh) - scn.objects.link(meshOb) - - # read the number of meshes - try: - lines = getNextLine(file).split() - if len(lines)!=2 or lines[0]!="Meshes:": - raise ValueError - numMeshes = int(lines[1]) - if numMeshes < 0 or numMeshes > MAX_NUMMESHES: - raise ValueError - except ValueError: - return "Number of meshes is invalid!" - - # read meshes - vertBase = 0 - faceBase = 0 - boneIds = [] - for i in range(numMeshes): - # read name, flags and material - try: - lines = re.findall(r'\".*\"|[^ ]+', getNextLine(file)) - if len(lines)!=3: - raise ValueError - material = int(lines[2]) - except ValueError: - return "Name, flags or material in mesh " + str(i+1) + " are invalid!" - - # read the number of vertices - try: - numVerts = int(getNextLine(file)) - if numVerts < 0 or numVerts > MAX_NUMVERTS: - raise ValueError - except ValueError: - return "Number of vertices in mesh " + str(i+1) + " is invalid!" - - # read vertices - coords = [] - uvs = [] - for j in xrange(numVerts): - try: - lines = getNextLine(file).split() - if len(lines)!=7: - raise ValueError - coords.append([float(lines[1]), float(lines[2]), float(lines[3])]) - uvs.append([float(lines[4]), 1-float(lines[5])]) - boneIds.append(int(lines[6])) - except ValueError: - return "Vertex " + str(j+1) + " in mesh " + str(i+1) + " is invalid!" - mesh.verts.extend(coords) - - # read number of normals - try: - numNormals = int(getNextLine(file)) - if numNormals < 0 or numNormals > MAX_NUMNORMALS: - raise ValueError - except ValueError: - return "Number of normals in mesh " + str(i+1) + " is invalid!" - - # read normals - normals = [] - for j in xrange(numNormals): - try: - lines = getNextLine(file).split() - if len(lines)!=3: - raise ValueError - normals.append([float(lines[0]), float(lines[1]), float(lines[2])]) - except ValueError: - return "Normal " + str(j+1) + " in mesh " + str(i+1) + " is invalid!" - - # read the number of triangles - try: - numTris = int(getNextLine(file)) - if numTris < 0 or numTris > MAX_NUMTRIS: - raise ValueError - except ValueError: - return "Number of triangles in mesh " + str(i+1) + " is invalid!" - - # read triangles - faces = [] - for j in xrange(numTris): - # read the triangle - try: - lines = getNextLine(file).split() - if len(lines)!=8: - raise ValueError - v1 = int(lines[1]) - v2 = int(lines[2]) - v3 = int(lines[3]) - faces.append([v1+vertBase, v2+vertBase, v3+vertBase]) - except ValueError: - return "Triangle " + str(j+1) + " in mesh " + str(i+1) + " is invalid!" - mesh.faces.extend(faces) - - # set texture coordinates and material - for j in xrange(faceBase, len(mesh.faces)): - face = mesh.faces[j] - face.uv = [Vector(uvs[face.verts[0].index-vertBase]), Vector(uvs[face.verts[1].index-vertBase]), Vector(uvs[face.verts[2].index-vertBase])] - if material>=0: - face.mat = material - - # increase vertex and face base - vertBase = len(mesh.verts) - faceBase = len(mesh.faces) - - # read the number of materials - try: - lines = getNextLine(file).split() - if len(lines)!=2 or lines[0]!="Materials:": - raise ValueError - numMats = int(lines[1]) - if numMats < 0 or numMats > MAX_NUMMATS: - raise ValueError - except ValueError: - return "Number of materials is invalid!" - - # read the materials - for i in range(numMats): - # read name - name = getNextLine(file)[1:-1] - - # create the material - mat = Blender.Material.New(name) - mesh.materials += [mat] - - # read ambient color - try: - lines = getNextLine(file).split() - if len(lines)!=4: - raise ValueError - amb = (float(lines[0])+float(lines[1])+float(lines[2]))/3 - mat.setAmb(amb) - except ValueError: - return "Ambient color in material " + str(i+1) + " is invalid!" - - # read diffuse color - try: - lines = getNextLine(file).split() - if len(lines)!=4: - raise ValueError - mat.setRGBCol([float(lines[0]), float(lines[1]), float(lines[2])]) - except ValueError: - return "Diffuse color in material " + str(i+1) + " is invalid!" - - # read specular color - try: - lines = getNextLine(file).split() - if len(lines)!=4: - raise ValueError - mat.setSpecCol([float(lines[0]), float(lines[1]), float(lines[2])]) - except ValueError: - return "Specular color in material " + str(i+1) + " is invalid!" - - # read emissive color - try: - lines = getNextLine(file).split() - if len(lines)!=4: - raise ValueError - emit = (float(lines[0])+float(lines[1])+float(lines[2]))/3 - mat.setEmit(emit) - except ValueError: - return "Emissive color in material " + str(i+1) + " is invalid!" - - # read shininess - try: - shi = float(getNextLine(file)) - #mat.setHardness(int(shi)) - except ValueError: - return "Shininess in material " + str(i+1) + " is invalid!" - - # read transparency - try: - alpha = float(getNextLine(file)) - mat.setAlpha(alpha) - if alpha < 1: - mat.mode |= Blender.Material.Modes.ZTRANSP - except ValueError: - return "Transparency in material " + str(i+1) + " is invalid!" - - # read texturemap - texturemap = getNextLine(file)[1:-1] - if len(texturemap)>0: - colorTexture = Blender.Texture.New(name + "_texture") - colorTexture.setType('Image') - colorTexture.setImage(loadImage(path, texturemap)) - mat.setTexture(0, colorTexture, Blender.Texture.TexCo.UV, Blender.Texture.MapTo.COL) - - # read alphamap - alphamap = getNextLine(file)[1:-1] - if len(alphamap)>0: - alphaTexture = Blender.Texture.New(name + "_alpha") - alphaTexture.setType('Image') - alphaTexture.setImage(loadImage(path, alphamap)) - mat.setTexture(1, alphaTexture, Blender.Texture.TexCo.UV, Blender.Texture.MapTo.ALPHA) - - # read the number of bones - try: - lines = getNextLine(file).split() - if len(lines)!=2 or lines[0]!="Bones:": - raise ValueError - numBones = int(lines[1]) - if numBones < 0 or numBones > MAX_NUMBONES: - raise ValueError - except: - return "Number of bones is invalid!" - - # create the armature - armature = None - armOb = None - if numBones > 0: - armOb = Blender.Object.New('Armature', "MilkShape3D Skeleton") - armature = Blender.Armature.New("MilkShape3D Skeleton") - armature.drawType = Blender.Armature.STICK - armOb.link(armature) - scn.objects.link(armOb) - armOb.makeParentDeform([meshOb]) - armature.makeEditable() - - # read bones - posKeys = {} - rotKeys = {} - for i in range(numBones): - # read name - name = getNextLine(file)[1:-1] - - # create the bone - bone = Blender.Armature.Editbone() - armature.bones[name] = bone - - # read parent - parent = getNextLine(file)[1:-1] - if len(parent)>0: - bone.parent = armature.bones[parent] - - # read position and rotation - try: - lines = getNextLine(file).split() - if len(lines) != 7: - raise ValueError - pos = [float(lines[1]), float(lines[2]), float(lines[3])] - rot = [float(lines[4]), float(lines[5]), float(lines[6])] - except ValueError: - return "Invalid position or orientation in a bone!" - - # set position and orientation - if bone.hasParent(): - bone.head = Vector(pos) * bone.parent.matrix + bone.parent.head - bone.tail = bone.head + Vector([1,0,0]) - tempM = RM(rot) * bone.parent.matrix - tempM.transpose; - bone.matrix = tempM - else: - bone.head = Vector(pos) - bone.tail = bone.head + Vector([1,0,0]) - bone.matrix = RM(rot) - - # Create vertex group for this bone - mesh.addVertGroup(name) - vgroup = [] - for index, v in enumerate(boneIds): - if v==i: - vgroup.append(index) - mesh.assignVertsToGroup(name, vgroup, 1.0, 1) - - # read the number of position key frames - try: - numPosKeys = int(getNextLine(file)) - if numPosKeys < 0 or numPosKeys > MAX_NUMPOSKEYS: - raise ValueError - except ValueError: - return "Invalid number of position key frames!" - - # read position key frames - posKeys[name] = [] - for j in range(numPosKeys): - # read time and position - try: - lines = getNextLine(file).split() - if len(lines) != 4: - raise ValueError - time = float(lines[0]) - pos = [float(lines[1]), float(lines[2]), float(lines[3])] - posKeys[name].append([time, pos]) - except ValueError: - return "Invalid position key frame!" - - # read the number of rotation key frames - try: - numRotKeys = int(getNextLine(file)) - if numRotKeys < 0 or numRotKeys > MAX_NUMROTKEYS: - raise ValueError - except ValueError: - return "Invalid number of rotation key frames!" - - # read rotation key frames - rotKeys[name] = [] - for j in range(numRotKeys): - # read time and rotation - try: - lines = getNextLine(file).split() - if len(lines) != 4: - raise ValueError - time = float(lines[0]) - rot = [float(lines[1]), float(lines[2]), float(lines[3])] - rotKeys[name].append([time, rot]) - except ValueError: - return "Invalid rotation key frame!" - - # create action and pose - action = None - pose = None - if armature != None: - armature.update() - pose = armOb.getPose() - action = armOb.getAction() - if not action: - action = Blender.Armature.NLA.NewAction() - action.setActive(armOb) - - # create animation key frames - for name, pbone in pose.bones.items(): - # create position keys - for key in posKeys[name]: - pbone.loc = Vector(key[1]) - pbone.insertKey(armOb, int(key[0]+0.5), Blender.Object.Pose.LOC, True) - - # create rotation keys - for key in rotKeys[name]: - pbone.quat = RQ(key[1]) - pbone.insertKey(armOb, int(key[0]+0.5), Blender.Object.Pose.ROT, True) - - # set the imported object to be the selected one - scn.objects.selected = [] - meshOb.sel= 1 - Blender.Redraw() - - # The import was a succes! - return "" - - -# load the model -def fileCallback(filename): - error = import_ms3d_ascii(filename) - if error!="": - Blender.Draw.PupMenu("An error occured during import: " + error + "|Not all data might have been imported succesfully.", 2) - -Blender.Window.FileSelector(fileCallback, 'Import') diff --git a/release/scripts/obdatacopier.py b/release/scripts/obdatacopier.py deleted file mode 100644 index 2f5617951de..00000000000 --- a/release/scripts/obdatacopier.py +++ /dev/null @@ -1,215 +0,0 @@ -#!BPY - -""" Registration info for Blender menus: <- these words are ignored -Name: 'Data Copier' -Blender: 232 -Group: 'Object' -Tip: 'Copy data from active object to other selected ones.' -""" - -__author__ = "Jean-Michel Soler (jms), Campbell Barton (Ideasman42)" -__url__ = ("blender", "blenderartists.org", -"Script's homepage, http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_lampdatacopier.htm", -"Communicate problems and errors, http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender") -__version__ = "0.1.2" - -__bpydoc__ = """\ -Use "Data Copier" to copy attributes from the active object to other selected ones of -its same type. - -This script is still in an early version but is already useful for copying -attributes for some types of objects like lamps and cameras. - -Usage: - -Select the objects that will be updated, select the object whose data will -be copied (they must all be of the same type, of course), then run this script. -Toggle the buttons representing the attributes to be copied and press "Copy". -""" - -# ---------------------------------------------------------- -# Object DATA copier 0.1.2 -# (c) 2004 jean-michel soler -# ----------------------------------------------------------- -#---------------------------------------------- -# Page officielle/official page du blender python Object DATA copier: -# http://jmsoler.free.fr/didacticiel/blender/tutor/cpl_lampdatacopier.htm -# Communiquer les problemes et erreurs sur: -# To Communicate problems and errors on: -# http://www.zoo-logique.org/3D.Blender/newsportal/thread.php?group=3D.Blender -#--------------------------------------------- -# Blender Artistic License -# http://download.blender.org/documentation/html/x21254.html -#--------------------------------------------- - -import Blender -from Blender import * -from Blender.Draw import * -from Blender.BGL import * - - -scn= Blender.Scene.GetCurrent() - -type_func_method= type(dir) -type_func= type(lambda:i) -type_dict= type({}) -# type_list= type([]) - -IGNORE_VARS = 'users', 'fakeUser', 'edges', 'faces', 'verts', 'elements' - -def renew(): - scn= Blender.Scene.GetCurrent() - act_ob= scn.objects.active - if act_ob==None: - return {} - - act_ob_type= act_ob.getType() - act_ob_data= act_ob.getData(mesh=1) - - if act_ob_data==None: # Surf? - return {} - - PARAM={} - evt=4 - doc='doc' - - for prop_name in dir(act_ob_data): - if not prop_name.startswith('__') and prop_name not in IGNORE_VARS: - # Get the type - try: exec 'prop_type= type(act_ob_data.%s)' % prop_name - except: prop_type= None - - if prop_type != None and prop_type not in (type_func_method, type_func, type_dict): - - # Now we know that the attribute can be listed in the UI Create a button and tooltip. - - # Tooltip - try: - if prop_name=='mode': - try: - exec "doc=str(%s.Modes)+' ; value : %s'"%( act_ob_type, str(act_ob_data.mode) ) - except: - exec """doc= '%s'+' value = '+ str(act_ob.getData(mesh=1).%s)"""%(prop_name, prop_name) - elif prop_name=='type': - try: - exec "doc=str(%s.Types)+' ; value : %s'"%( act_ob_type, str(act_ob_data.type) ) - except: - exec """doc= '%s'+' value = '+ str(act_ob.getData(mesh=1).%s)"""%(prop_name, prop_name) - else: - exec """doc= '%s'+' value = '+ str(act_ob_data.%s)"""%(prop_name, prop_name) - if doc.find('built-in')!=-1: - exec """doc= 'This is a function ! Doc = '+ str(act_ob_data.%s.__doc__)"""% prop_name - except: - doc='Doc...' - - # Button - PARAM[prop_name]= [Create(0), evt, doc] - evt+=1 - - return PARAM - -def copy(): - global PARAM - - scn= Blender.Scene.GetCurrent() - act_ob= scn.getActiveObject() - if act_ob==None: - Blender.Draw.PupMenu('Error|No Active Object.') - return - - act_ob_type= act_ob.getType() - - if act_ob_type in ('Empty', 'Surf'): - Blender.Draw.PupMenu('Error|Copying Empty or Surf object data isnt supported.') - return - - act_ob_data= act_ob.getData(mesh=1) - - print '\n\nStarting copy for object "%s"' % act_ob.name - some_errors= False - for ob in scn.objects.context: - if ob != act_ob and ob.getType() == act_ob_type: - ob_data= None - for prop_name, value in PARAM.iteritems(): - if value[0].val==1: - - # Init the object data if we havnt alredy - if ob_data==None: - ob_data= ob.getData(mesh=1) - - try: - exec "ob_data.%s = act_ob_data.%s"%(prop_name, prop_name) - except: - some_errors= True - print 'Cant copy property "%s" for type "%s"' % (prop_name, act_ob_type) - if some_errors: - Blender.Draw.PupMenu('Some attributes could not be copied, see console for details.') - -PARAM= renew() - -def EVENT(evt,val): - pass - -def BUTTON(evt): - global PARAM - if (evt==1): - Exit() - - if (evt==2): - copy() - Blender.Redraw() - - if (evt==3): - PARAM= renew() - Blender.Redraw() - -def DRAW(): - global PARAM - - scn= Blender.Scene.GetCurrent() - act_ob= scn.objects.active - - glColor3f(0.7, 0.7, 0.7) - glClear(GL_COLOR_BUFFER_BIT) - glColor3f(0.1, 0.1, 0.15) - - size=Buffer(GL_FLOAT, 4) - glGetFloatv(GL_SCISSOR_BOX, size) - size= size.list - for s in [0,1,2,3]: size[s]=int(size[s]) - ligne=20 - - Button("Exit",1,20,4,80,ligne) - Button("Copy",2,102,4,80,ligne) - Button("Renew",3,184,4,80,ligne) - - glRasterPos2f(20, ligne*2-8) - if act_ob: - Text(act_ob.getType()+" DATA copier") - else: - Text("Please select an object") - - - max=size[3] / 22 -2 - pos = 0 - decal = 20 - key=PARAM.keys() - key.sort() - for p in key: - if pos==max: - decal+=102 - pos=1 - else: - pos+=1 - - PARAM[p][0]=Toggle(p, - PARAM[p][1], - decal, - pos*22+22, - 100, - 20, - PARAM[p][0].val, - str(PARAM[p][2])) - - -Register(DRAW,EVENT,BUTTON) diff --git a/release/scripts/object_active_to_other.py b/release/scripts/object_active_to_other.py deleted file mode 100644 index 68aa6a3a039..00000000000 --- a/release/scripts/object_active_to_other.py +++ /dev/null @@ -1,58 +0,0 @@ -#!BPY -""" -Name: 'Copy Active to Selected' -Blender: 249 -Group: 'Object' -Tooltip: 'For every selected object, copy the active to their loc/size/rot' -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - - -from Blender import Window, sys, Draw -import bpy - -def my_object_util(sce): - ob_act = sce.objects.active - - if not ob_act: - Draw.PupMenu('Error%t|No active object selected') - return - - mats = [(ob, ob.matrixWorld) for ob in sce.objects.context if ob != ob_act] - - for ob, m in mats: - ob_copy = ob_act.copy() - sce.objects.link(ob_copy) - ob_copy.setMatrix(m) - ob_copy.Layers = ob.Layers & (1<<20)-1 - - -def main(): - sce = bpy.data.scenes.active - - Window.WaitCursor(1) - my_object_util(sce) - Window.WaitCursor(0) - -if __name__ == '__main__': - main() diff --git a/release/scripts/object_apply_def.py b/release/scripts/object_apply_def.py deleted file mode 100644 index 006e97463d8..00000000000 --- a/release/scripts/object_apply_def.py +++ /dev/null @@ -1,178 +0,0 @@ -#!BPY - -""" -Name: 'Apply Deformation' -Blender: 242 -Group: 'Object' -Tooltip: 'Make copys of all the selected objects with modifiers, softbodies and fluid baked into a mesh' -""" - -__author__ = "Martin Poirier (theeth), Jean-Michel Soler (jms), Campbell Barton (ideasman)" -# This script is the result of merging the functionalities of two other: -# Martin Poirier's Apply_Def.py and -# Jean-Michel Soler's Fix From Everything - -__url__ = ("http://www.blender.org", "http://blenderartists.org", "http://jmsoler.free.fr") -__version__ = "1.6 07/07/2006" - -__bpydoc__ = """\ -This script creates "raw" copies of deformed meshes. - -Usage: - -Select any number of Objects and run this script. A fixed copy of each selected object -will be created, with the word "_def" appended to its name. If an object with -the same name already exists, it appends a number at the end as Blender itself does. - -Objects in Blender can be deformed by armatures, lattices, curve objects and subdivision, -but this will only change its appearance on screen and rendered -images -- the actual mesh data is still simpler, with vertices in an original -"rest" position and less vertices than the subdivided version. - -Use this script if you want a "real" version of the deformed mesh, so you can -directly manipulate or export its data. - -This script will work with object types: Mesh, Metaballs, Text3d, Curves and Nurbs Surface. -""" - - -# $Id$ -# -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Copyright (C) 2003: Martin Poirier, theeth@yahoo.com -# -# Thanks to Jonathan Hudson for help with the vertex groups part -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** - - -import Blender -import bpy -import BPyMesh - -def copy_vgroups(source_ob, target_ob): - - source_me = source_ob.getData(mesh=1) - - vgroups= source_me.getVertGroupNames() - if vgroups: - ADD= Blender.Mesh.AssignModes.ADD - target_me = target_ob.getData(mesh=1) - for vgroupname in vgroups: - target_me.addVertGroup(vgroupname) - if len(target_me.verts) == len(source_me.verts): - try: # in rare cases this can raise an 'no deform groups assigned to mesh' error - vlist = source_me.getVertsFromGroup(vgroupname, True) - except: - vlist = [] - - try: - for vpair in vlist: - target_me.assignVertsToGroup(vgroupname, [vpair[0]], vpair[1], ADD) - except: - pass - - -def apply_deform(): - scn= bpy.data.scenes.active - #Blender.Window.EditMode(0) - - NAME_LENGTH = 19 - SUFFIX = "_def" - SUFFIX_LENGTH = len(SUFFIX) - # Get all object and mesh names - - - ob_list = list(scn.objects.context) - ob_act = scn.objects.active - - # Assume no soft body - has_sb= False - - # reverse loop so we can remove objects (metaballs in this case) - for ob_idx in xrange(len(ob_list)-1, -1, -1): - ob= ob_list[ob_idx] - - ob.sel = 0 # deselect while where checking the metaballs - - # Test for a softbody - if not has_sb and ob.isSB(): - has_sb= True - - # Remove all numbered metaballs because their disp list is only on the main metaball (un numbered) - if ob.type == 'MBall': - name= ob.name - # is this metaball numbered? - dot_idx= name.rfind('.') + 1 - if name[dot_idx:].isdigit(): - # Not the motherball, ignore it. - del ob_list[ob_idx] - - - if not ob_list: - Blender.Draw.PupMenu('No objects selected, nothing to do.') - return - - - if has_sb: - curframe=Blender.Get('curframe') - for f in xrange(curframe): - Blender.Set('curframe',f+1) - Blender.Window.RedrawAll() - - used_names = [ob.name for ob in Blender.Object.Get()] - used_names.extend(Blender.NMesh.GetNames()) - - - deformedList = [] - for ob in ob_list: - - # Get the mesh data - new_me= BPyMesh.getMeshFromObject(ob, vgroups=False) - - if not new_me: - continue # Object has no display list - - - name = ob.name - new_name = "%s_def" % name[:NAME_LENGTH-SUFFIX_LENGTH] - num = 0 - - while new_name in used_names: - new_name = "%s_def.%.3i" % (name[:NAME_LENGTH-(SUFFIX_LENGTH+SUFFIX_LENGTH)], num) - num += 1 - used_names.append(new_name) - - new_me.name= new_name - - new_ob= scn.objects.new(new_me) - new_ob.setMatrix(ob.matrixWorld) - - # Make the active duplicate also active - if ob == ob_act: - scn.objects.active = new_ob - - # Original object was a mesh? see if we can copy any vert groups. - if ob.type =='Mesh': - copy_vgroups(ob, new_ob) - - Blender.Window.RedrawAll() - -if __name__=='__main__': - apply_deform() diff --git a/release/scripts/object_batch_name_edit.py b/release/scripts/object_batch_name_edit.py deleted file mode 100644 index 4db3a6210db..00000000000 --- a/release/scripts/object_batch_name_edit.py +++ /dev/null @@ -1,274 +0,0 @@ -#!BPY -""" -Name: 'Batch Object Name Edit' -Blender: 240 -Group: 'Object' -Tooltip: 'Apply the chosen rule to rename all selected objects at once.' -""" -__author__ = "Campbell Barton" -__url__ = ("blender", "blenderartists.org") -__version__ = "1.0" - -__bpydoc__ = """\ -"Batch Object Name Edit" allows you to change multiple names of Blender -objects at once. It provides options to define if you want to: replace text -in the current names, truncate their beginnings or endings or prepend / append -strings to them. - -Usage: -Select the objects to be renamed and run this script from the Object->Scripts -menu of the 3d View. -""" -# $Id$ -# -# -------------------------------------------------------------------------- -# Batch Name Edit by Campbell Barton (AKA Ideasman) -# -------------------------------------------------------------------------- -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- -from Blender import * -import bpy - -global renameCount -renameCount = 0 -obsel = [ob for ob in bpy.data.scenes.active.objects.context if not ob.lib] - -def setDataNameWrapper(ob, newname): - if ob.getData(name_only=1) == newname: - return False - - data= ob.getData(mesh=1) - - if data and not data.lib: - data.name= newname - return True - return False - -def main(): - global renameCount - # Rename the datablocks that are used by the object. - def renameLinkedDataFromObject(): - - # Result 1, we want to rename data - for ob in obsel: - if ob.name == ob.getData(name_only=1): - return # Alredy the same name, dont bother. - - data = ob.getData(mesh=1) # use mesh so we dont have to update the nmesh. - if data and not data.lib: - data.name = ob.name - - - def new(): - global renameCount - NEW_NAME_STRING = Draw.Create('') - RENAME_LINKED = Draw.Create(0) - pup_block = [\ - ('New Name: ', NEW_NAME_STRING, 19, 19, 'New Name'),\ - ('Rename ObData', RENAME_LINKED, 'Renames objects data to match the obname'),\ - ] - - if not Draw.PupBlock('Replace in name...', pup_block): - return 0 - - NEW_NAME_STRING= NEW_NAME_STRING.val - - Window.WaitCursor(1) - for ob in obsel: - if ob.name != NEW_NAME_STRING: - ob.name = NEW_NAME_STRING - renameCount+=1 - - return RENAME_LINKED.val - - def replace(): - global renameCount - REPLACE_STRING = Draw.Create('') - WITH_STRING = Draw.Create('') - RENAME_LINKED = Draw.Create(0) - - pup_block = [\ - ('Replace: ', REPLACE_STRING, 19, 19, 'Text to find'),\ - ('With:', WITH_STRING, 19, 19, 'Text to replace with'),\ - ('Rename ObData', RENAME_LINKED, 'Renames objects data to match the obname'),\ - ] - - if not Draw.PupBlock('Replace in name...', pup_block) or\ - ((not REPLACE_STRING.val) and (not WITH_STRING)): - return 0 - - REPLACE_STRING = REPLACE_STRING.val - WITH_STRING = WITH_STRING.val - - Window.WaitCursor(1) - for ob in obsel: - newname = ob.name.replace(REPLACE_STRING, WITH_STRING) - if ob.name != newname: - ob.name = newname - renameCount+=1 - return RENAME_LINKED.val - - - def prefix(): - global renameCount - PREFIX_STRING = Draw.Create('') - RENAME_LINKED = Draw.Create(0) - - pup_block = [\ - ('Prefix: ', PREFIX_STRING, 19, 19, 'Name prefix'),\ - ('Rename ObData', RENAME_LINKED, 'Renames objects data to match the obname'),\ - ] - - if not Draw.PupBlock('Prefix...', pup_block) or\ - not PREFIX_STRING.val: - return 0 - - PREFIX_STRING = PREFIX_STRING.val - - Window.WaitCursor(1) - for ob in obsel: - ob.name = PREFIX_STRING + ob.name - renameCount+=1 # we knows these are different. - return RENAME_LINKED.val - - def suffix(): - global renameCount - SUFFIX_STRING = Draw.Create('') - RENAME_LINKED = Draw.Create(0) - - pup_block = [\ - ('Suffix: ', SUFFIX_STRING, 19, 19, 'Name suffix'),\ - ('Rename ObData', RENAME_LINKED, 'Renames objects data to match the obname'),\ - ] - - if not Draw.PupBlock('Suffix...', pup_block) or\ - not SUFFIX_STRING.val: - return 0 - - SUFFIX_STRING = SUFFIX_STRING.val - - Window.WaitCursor(1) - for ob in obsel: - ob.name = ob.name + SUFFIX_STRING - renameCount+=1 # we knows these are different. - return RENAME_LINKED.val - - def truncate_start(): - global renameCount - TRUNCATE_START = Draw.Create(0) - RENAME_LINKED = Draw.Create(0) - - pup_block = [\ - ('Truncate Start: ', TRUNCATE_START, 0, 19, 'Truncate chars from the start of the name'),\ - ('Rename ObData', RENAME_LINKED, 'Renames objects data to match the obname'),\ - ] - - if not Draw.PupBlock('Truncate Start...', pup_block) or\ - not TRUNCATE_START.val: - return 0 - - Window.WaitCursor(1) - TRUNCATE_START = TRUNCATE_START.val - for ob in obsel: - newname = ob.name[TRUNCATE_START: ] - ob.name = newname - renameCount+=1 - - return RENAME_LINKED.val - - def truncate_end(): - global renameCount - TRUNCATE_END = Draw.Create(0) - RENAME_LINKED = Draw.Create(0) - - pup_block = [\ - ('Truncate End: ', TRUNCATE_END, 0, 19, 'Truncate chars from the end of the name'),\ - ('Rename ObData', RENAME_LINKED, 'Renames objects data to match the obname'),\ - ] - - if not Draw.PupBlock('Truncate End...', pup_block) or\ - not TRUNCATE_END.val: - return 0 - - Window.WaitCursor(1) - TRUNCATE_END = TRUNCATE_END.val - for ob in obsel: - newname = ob.name[: -TRUNCATE_END] - ob.name = newname - renameCount+=1 - - return RENAME_LINKED.val - - def renameObjectFromLinkedData(): - global renameCount - Window.WaitCursor(1) - - for ob in obsel: - newname = ob.getData(name_only=1) - if newname != None and ob.name != newname: - ob.name = newname - renameCount+=1 - return 0 - - def renameObjectFromDupGroup(): - global renameCount - Window.WaitCursor(1) - - for ob in obsel: - group= ob.DupGroup - if group != None: - newname= group.name - if newname != ob.name: - ob.name = newname - renameCount+=1 - return 0 - - def renameLinkedDataFromObject(): - global renameCount - Window.WaitCursor(1) - - for ob in obsel: - if setDataNameWrapper(ob, ob.name): - renameCount+=1 - return 0 - - name = "Selected Object Names%t|New Name|Replace Text|Add Prefix|Add Suffix|Truncate Start|Truncate End|Rename Objects to Data Names|Rename Objects to DupGroup Names|Rename Data to Object Names" - result = Draw.PupMenu(name) - renLinked = 0 # Rename linked data to the object name? - if result == -1: - return - elif result == 1: renLinked= new() - elif result == 2: renLinked= replace() - elif result == 3: renLinked= prefix() - elif result == 4: renLinked= suffix() - elif result == 5: renLinked= truncate_start() - elif result == 6: renLinked= truncate_end() - elif result == 7: renameObjectFromLinkedData() - elif result == 8: renameObjectFromDupGroup() - elif result == 9: renameLinkedDataFromObject() - - if renLinked: - renameLinkedDataFromObject() - - Window.WaitCursor(0) - - Draw.PupMenu('renamed: %d objects.' % renameCount) - -if __name__=='__main__': - main() diff --git a/release/scripts/object_cookie_cutter.py b/release/scripts/object_cookie_cutter.py deleted file mode 100644 index 4950c18c0f4..00000000000 --- a/release/scripts/object_cookie_cutter.py +++ /dev/null @@ -1,667 +0,0 @@ -#!BPY -""" -Name: 'Cookie Cut from View' -Blender: 234 -Group: 'Object' -Tooltip: 'Cut from the view axis, (Sel 3d Curves and Meshes (only edges) into other meshes with faces)' -""" -__author__= "Campbell Barton" -__url__= ["blender", "blenderartist"] -__version__= "1.0" - -__bpydoc__= """\ -This script takes the selected mesh objects, divides them into 2 groups -Cutters and The objects to be cut. - -Cutters are meshes with no faces, just edge loops. and any meshes with faces will be cut. - -Usage: - -Select 2 or more meshes, one with no faces (a closed polyline) and one with faces to cut. - -Align the view on the axis you want to cut. -For shapes that have overlapping faces (from the view), hide any backfacing faces so they will be ignored during the cut. -Run the script. - -You can choose to make the cut verts lie on the face that they were cut from or on the edge that cut them. -This script supports UV coordinates and images. -""" - -# ***** BEGIN GPL LICENSE BLOCK ***** -# -# Script copyright (C) Campbell Barton -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software Foundation, -# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. -# -# ***** END GPL LICENCE BLOCK ***** -# -------------------------------------------------------------------------- - -import Blender -from math import sqrt -import BPyMesh -Vector= Blender.Mathutils.Vector -LineIntersect2D= Blender.Geometry.LineIntersect2D -PointInTriangle2D= Blender.Geometry.PointInTriangle2D - -# Auto class -def auto_class(slots): - exec('class container_class(object): __slots__=%s' % slots) - return container_class - - -bignum= 1<<30 -def bounds_xy(iter_item): - ''' - Works with types - MMesh.verts - MFace - MEdge - ''' - xmin= ymin= bignum - xmax= ymax= -bignum - for v in iter_item: - x= v.co.x - y= v.co.y - if xxmax: xmax= x - if y>ymax: ymax= y - - return xmin, ymin, xmax, ymax - -def bounds_intersect(a,b): - ''' - each tuple is - xmin, ymin, xmax, ymax - ''' - if\ - a[0]>b[2] or\ - a[1]>b[3] or\ - a[2]i2: - i1,i2= i2,i1 - return i1, i2 - -def sorted_indicies(i1, i2): - if i1>i2: - i1,i2= i2,i1 - return i1, i2 - -def fake_length2d(pt1, pt2): - ''' - Only used for comparison so don't sqrt - ''' - #return math.sqrt(abs(pow(x1-x2, 2)+ pow(y1-y2, 2))) - return pow(pt1[0]-pt2[0], 2) + pow(pt1[1]- pt2[1], 2) - -def length2d(pt1, pt2): - ''' - Only used for comparison so don't sqrt - ''' - #return math.sqrt(abs(pow(x1-x2, 2)+ pow(y1-y2, 2))) - return sqrt(pow(pt1[0]-pt2[0], 2) + pow(pt1[1]- pt2[1], 2)) - - - -def tri_area_2d(v1, v2, v3): - e1 = length2d(v1, v2) - e2 = length2d(v2, v3) - e3 = length2d(v3, v1) - p = e1+e2+e3 - return 0.25 * sqrt(abs(p*(p-2*e1)*(p-2*e2)*(p-2*e3))) - -def tri_pt_find_z_2d(pt, tri): - """ Takes a face and 3d vector and assigns the vectors Z to its on the face""" - - l1= tri_area_2d(tri[1], tri[2], pt) - l2= tri_area_2d(tri[0], tri[2], pt) - l3= tri_area_2d(tri[0], tri[1], pt) - - tot= l1+l2+l3 - # Normalize - l1=l1/tot - l2=l2/tot - l3=l3/tot - - z1= tri[0].z*l1 - z2= tri[1].z*l2 - z3= tri[2].z*l3 - - return z1+z2+z3 - - -def tri_pt_find_uv_2d(pt, tri, uvs): - """ Takes a face and 3d vector and assigns the vectors Z to its on the face""" - - l1= tri_area_2d(tri[1], tri[2], pt) - l2= tri_area_2d(tri[0], tri[2], pt) - l3= tri_area_2d(tri[0], tri[1], pt) - - tot= l1+l2+l3 - if not tot: # No area, just return the first uv - return Vector(uvs[0]) - - # Normalize - l1=l1/tot - l2=l2/tot - l3=l3/tot - - uv1= uvs[0]*l1 - uv2= uvs[1]*l2 - uv3= uvs[2]*l3 - - return uv1+uv2+uv3 - - - - -def mesh_edge_dict(me): - ed_dict= {} - for f in me.faces: - if not f.hide: - for edkey in f.edge_keys: - ed_dict.setdefault(edkey, []).append(f) - - return ed_dict - - - -def terrain_cut_2d(t, c, PREF_Z_LOC): - ''' - t is the terrain - c is the cutter - - PREF_Z_LOC: 0 - from terrain face - 1 - from cutter edge - - returns nothing - ''' - - # do we have a 2d intersection - if not bounds_intersect(t.bounds, c.bounds): - return - - # Local vars - me_t= t.mesh - me_c= c.mesh - - has_uv= me_t.faceUV - - Blender.Mesh.Mode(Blender.Mesh.SelectModes['VERTEX']) - ''' - first assign a face terrain face for each cutter verticie - ''' - cut_verts_temp= list(me_c.verts) - cut_vert_terrain_faces= [None] * len(me_c.verts) - vert_z_level= [-10.0] * len(me_c.verts) - - for v in me_c.verts: - v_index= v.index - v_co= v.co - for fidx, f in enumerate(me_t.faces): - if not f.hide: - if point_in_bounds(v_co, t.face_bounds[fidx]): - f_v= [vv.co for vv in f] - if point_in_poly2d(v_co, f_v): - - - if PREF_Z_LOC==0: - ''' - Get the z location from the face. - ''' - - if len(f_v)==3: - vert_z_level[v_index]= tri_pt_find_z_2d(v_co, (f_v[0], f_v[1], f_v[2]) ) - else: - # Quad, which side are we on? - a1= tri_area_2d(f_v[0], f_v[1], v_co) - a2= tri_area_2d(f_v[1], f_v[2], v_co) - - a3= tri_area_2d(f_v[0], f_v[1], f_v[2]) - - if a1+a2