Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Letwory <nathan@letworyinteractive.com>2011-07-09 20:32:38 +0400
committerNathan Letwory <nathan@letworyinteractive.com>2011-07-09 20:32:38 +0400
commitd9275e4abffc6060dc07bb87382995cc27f68598 (patch)
treea0eb4d62558593929d40e91a1c0d2df21ddb89d2
Tag for 2.58av2.58a
-rw-r--r--add_curve_aceous_galore.py1147
-rw-r--r--add_curve_torus_knots.py247
-rw-r--r--add_mesh_BoltFactory/Boltfactory.py311
-rw-r--r--add_mesh_BoltFactory/__init__.py61
-rw-r--r--add_mesh_BoltFactory/createMesh.py2195
-rw-r--r--add_mesh_BoltFactory/preset_utils.py53
-rw-r--r--add_mesh_BoltFactory/presets/M10.py22
-rw-r--r--add_mesh_BoltFactory/presets/M12.py22
-rw-r--r--add_mesh_BoltFactory/presets/M3.py22
-rw-r--r--add_mesh_BoltFactory/presets/M4.py22
-rw-r--r--add_mesh_BoltFactory/presets/M5.py22
-rw-r--r--add_mesh_BoltFactory/presets/M6.py22
-rw-r--r--add_mesh_BoltFactory/presets/M8.py22
-rw-r--r--add_mesh_ant_landscape.py822
-rw-r--r--add_mesh_extra_objects/__init__.py134
-rw-r--r--add_mesh_extra_objects/add_mesh_3d_function_surface.py617
-rw-r--r--add_mesh_extra_objects/add_mesh_extra_objects.py492
-rw-r--r--add_mesh_extra_objects/add_mesh_gears.py802
-rw-r--r--add_mesh_extra_objects/add_mesh_gemstones.py333
-rw-r--r--add_mesh_extra_objects/add_mesh_twisted_torus.py253
-rw-r--r--add_mesh_pipe_joint.py1150
-rw-r--r--add_mesh_solid.py607
-rw-r--r--animation_add_corrective_shape_key.py504
-rw-r--r--animation_animall.py290
-rw-r--r--animation_rotobezier.py382
-rw-r--r--curve_simplify.py597
-rw-r--r--development_api_navigator.py715
-rw-r--r--development_icon_get.py243
-rw-r--r--game_engine_save_as_runtime.py227
-rw-r--r--io_anim_bvh/__init__.py154
-rw-r--r--io_anim_bvh/export_bvh.py281
-rw-r--r--io_anim_bvh/import_bvh.py552
-rw-r--r--io_anim_camera.py170
-rw-r--r--io_coat3D/__init__.py244
-rw-r--r--io_coat3D/coat.py650
-rw-r--r--io_coat3D/tex.py409
-rw-r--r--io_convert_image_to_mesh_img/__init__.py132
-rw-r--r--io_convert_image_to_mesh_img/import_img.py786
-rw-r--r--io_curve_svg/__init__.py84
-rw-r--r--io_curve_svg/import_svg.py1831
-rw-r--r--io_curve_svg/svg_colors.py172
-rw-r--r--io_export_directx_x.py1251
-rw-r--r--io_export_pc2.py203
-rw-r--r--io_export_unreal_psk_psa.py2331
-rw-r--r--io_import_gimp_image_to_scene.py683
-rw-r--r--io_import_images_as_planes.py352
-rw-r--r--io_import_scene_dxf.py2526
-rw-r--r--io_import_scene_lwo.py1256
-rw-r--r--io_import_scene_mhx.py2718
-rw-r--r--io_import_scene_unreal_psk.py794
-rw-r--r--io_mesh_ply/__init__.py134
-rw-r--r--io_mesh_ply/export_ply.py203
-rw-r--r--io_mesh_ply/import_ply.py337
-rw-r--r--io_mesh_raw/__init__.py68
-rw-r--r--io_mesh_raw/export_raw.py112
-rw-r--r--io_mesh_raw/import_raw.py145
-rw-r--r--io_mesh_stl/__init__.py163
-rw-r--r--io_mesh_stl/blender_utils.py80
-rw-r--r--io_mesh_stl/stl_utils.py264
-rw-r--r--io_mesh_uv_layout/__init__.py198
-rw-r--r--io_mesh_uv_layout/export_uv_eps.py84
-rw-r--r--io_mesh_uv_layout/export_uv_png.py149
-rw-r--r--io_mesh_uv_layout/export_uv_svg.py64
-rw-r--r--io_scene_3ds/__init__.py167
-rw-r--r--io_scene_3ds/export_3ds.py1062
-rw-r--r--io_scene_3ds/import_3ds.py935
-rw-r--r--io_scene_fbx/__init__.py168
-rw-r--r--io_scene_fbx/export_fbx.py2865
-rw-r--r--io_scene_m3/__init__.py97
-rw-r--r--io_scene_m3/import_m3.py367
-rw-r--r--io_scene_map/__init__.py93
-rw-r--r--io_scene_map/export_map.py472
-rw-r--r--io_scene_obj/__init__.py259
-rw-r--r--io_scene_obj/export_obj.py791
-rw-r--r--io_scene_obj/import_obj.py1148
-rw-r--r--io_scene_x3d/__init__.py167
-rw-r--r--io_scene_x3d/export_x3d.py1346
-rw-r--r--io_scene_x3d/import_x3d.py2656
-rw-r--r--io_shape_mdd/__init__.py138
-rw-r--r--io_shape_mdd/export_mdd.py130
-rw-r--r--io_shape_mdd/import_mdd.py102
-rw-r--r--light_field_tools/__init__.py119
-rw-r--r--light_field_tools/light_field_tools.py432
-rw-r--r--mesh_bsurfaces.py857
-rw-r--r--mesh_inset/__init__.py203
-rw-r--r--mesh_inset/geom.py719
-rw-r--r--mesh_inset/model.py575
-rw-r--r--mesh_inset/offset.py755
-rw-r--r--mesh_inset/triquad.py1172
-rw-r--r--mesh_looptools.py3710
-rw-r--r--mesh_relax.py132
-rw-r--r--modules/add_utils.py141
-rw-r--r--modules/constants_utils.py34
-rw-r--r--modules/cursor_utils.py61
-rw-r--r--modules/extensions_framework/__init__.py371
-rw-r--r--modules/extensions_framework/ui.py337
-rw-r--r--modules/extensions_framework/util.py232
-rw-r--r--modules/extensions_framework/validate.py213
-rw-r--r--modules/geometry_utils.py215
-rw-r--r--modules/misc_utils.py77
-rw-r--r--modules/ui_utils.py51
-rw-r--r--netrender/__init__.py81
-rw-r--r--netrender/balancing.py195
-rw-r--r--netrender/client.py376
-rw-r--r--netrender/master.py1064
-rw-r--r--netrender/master_html.py316
-rw-r--r--netrender/model.py360
-rw-r--r--netrender/netrender.css88
-rw-r--r--netrender/netrender.js146
-rw-r--r--netrender/operators.py570
-rw-r--r--netrender/repath.py150
-rw-r--r--netrender/slave.py359
-rw-r--r--netrender/thumbnail.py81
-rw-r--r--netrender/ui.py553
-rw-r--r--netrender/utils.py316
-rw-r--r--netrender/versioning.py108
-rw-r--r--object_add_chain.py154
-rw-r--r--object_animrenderbake.py189
-rw-r--r--object_cloud_gen.py753
-rw-r--r--object_fracture/__init__.py81
-rw-r--r--object_fracture/data.blendbin0 -> 253684 bytes
-rw-r--r--object_fracture/fracture_ops.py496
-rw-r--r--object_fracture/fracture_setup.py74
-rw-r--r--paint_palette.py700
-rw-r--r--render_povray/__init__.py531
-rw-r--r--render_povray/render.py2328
-rw-r--r--render_povray/ui.py639
-rw-r--r--render_povray/update_files.py595
-rw-r--r--render_renderfarmfi.py999
-rw-r--r--rigify/CREDITS17
-rw-r--r--rigify/README252
-rw-r--r--rigify/__init__.py166
-rw-r--r--rigify/generate.py428
-rw-r--r--rigify/metarig_menu.py58
-rw-r--r--rigify/metarigs/__init__.py0
-rw-r--r--rigify/metarigs/human.py1149
-rw-r--r--rigify/rig_ui_template.py570
-rw-r--r--rigify/rigs/__init__.py0
-rw-r--r--rigify/rigs/basic/__init__.py0
-rw-r--r--rigify/rigs/basic/copy.py142
-rw-r--r--rigify/rigs/basic/copy_chain.py210
-rw-r--r--rigify/rigs/biped/__init__.py0
-rw-r--r--rigify/rigs/biped/arm/__init__.py235
-rw-r--r--rigify/rigs/biped/arm/deform.py230
-rw-r--r--rigify/rigs/biped/arm/fk.py217
-rw-r--r--rigify/rigs/biped/arm/ik.py339
-rw-r--r--rigify/rigs/biped/leg/__init__.py272
-rw-r--r--rigify/rigs/biped/leg/deform.py263
-rw-r--r--rigify/rigs/biped/leg/fk.py255
-rw-r--r--rigify/rigs/biped/leg/ik.py608
-rw-r--r--rigify/rigs/finger.py412
-rw-r--r--rigify/rigs/misc/__init__.py0
-rw-r--r--rigify/rigs/misc/delta.py161
-rw-r--r--rigify/rigs/neck_short.py392
-rw-r--r--rigify/rigs/palm.py273
-rw-r--r--rigify/rigs/spine.py617
-rw-r--r--rigify/ui.py300
-rw-r--r--rigify/utils.py552
-rw-r--r--space_view3d_3d_navigation.py101
-rw-r--r--space_view3d_align_tools.py342
-rw-r--r--space_view3d_copy_attributes.py817
-rw-r--r--space_view3d_materials_utils.py713
-rw-r--r--space_view3d_math_vis/__init__.py108
-rw-r--r--space_view3d_math_vis/draw.py232
-rw-r--r--space_view3d_math_vis/utils.py68
-rw-r--r--space_view3d_panel_measure.py1136
-rw-r--r--space_view3d_spacebar_menu.py1528
-rw-r--r--system_blend_info.py211
-rw-r--r--system_demo_mode/__init__.py206
-rw-r--r--system_demo_mode/config.py74
-rw-r--r--system_demo_mode/demo_mode.py510
-rw-r--r--system_property_chart.py248
-rw-r--r--texture_paint_layer_manager.py645
173 files changed, 82644 insertions, 0 deletions
diff --git a/add_curve_aceous_galore.py b/add_curve_aceous_galore.py
new file mode 100644
index 00000000..b4df5408
--- /dev/null
+++ b/add_curve_aceous_galore.py
@@ -0,0 +1,1147 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': 'Curveaceous Galore!',
+ 'author': 'Jimmy Hazevoet, testscreenings',
+ 'version': (0,2),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ 'location': 'View3D > Add > Curve',
+ 'description': 'Adds many different types of Curves',
+ 'warning': '', # used for warning icon and text in addons panel
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/' \
+ 'Scripts/Curve/Curves_Galore',
+ 'tracker_url': 'https://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=22404',
+ 'category': 'Add Curve'}
+
+
+##------------------------------------------------------------
+#### import modules
+import bpy
+from bpy.props import *
+from mathutils import *
+from math import *
+import noise as Noise
+###------------------------------------------------------------
+#### Some functions to use with others:
+###------------------------------------------------------------
+
+#------------------------------------------------------------
+# Generate random number:
+def randnum(low=0.0, high=1.0, seed=0):
+ """
+ randnum( low=0.0, high=1.0, seed=0 )
+
+ Create random number
+
+ Parameters:
+ low - lower range
+ (type=float)
+ high - higher range
+ (type=float)
+ seed - the random seed number, if seed is 0, the current time will be used instead
+ (type=int)
+ Returns:
+ a random number
+ (type=float)
+ """
+
+ s = Noise.seed_set(seed)
+ rnum = Noise.random()
+ rnum = rnum*(high-low)
+ rnum = rnum+low
+ return rnum
+
+
+#------------------------------------------------------------
+# Make some noise:
+def vTurbNoise(x,y,z, iScale=0.25, Size=1.0, Depth=6, Hard=0, Basis=0, Seed=0):
+ """
+ vTurbNoise((x,y,z), iScale=0.25, Size=1.0, Depth=6, Hard=0, Basis=0, Seed=0 )
+
+ Create randomised vTurbulence noise
+
+ Parameters:
+ xyz - (x,y,z) float values.
+ (type=3-float tuple)
+ iScale - noise intensity scale
+ (type=float)
+ Size - noise size
+ (type=float)
+ Depth - number of noise values added.
+ (type=int)
+ Hard - noise hardness: 0 - soft noise; 1 - hard noise
+ (type=int)
+ basis - type of noise used for turbulence
+ (type=int)
+ Seed - the random seed number, if seed is 0, the current time will be used instead
+ (type=int)
+ Returns:
+ the generated turbulence vector.
+ (type=3-float list)
+ """
+ sn = 0.001
+ rand = randnum(-100,100,Seed)
+ if Basis == 9: Basis = 14
+ vTurb = Noise.turbulence_vector((x/Size+rand, y/Size+rand, z/Size+rand), Depth, Hard, Basis)
+ tx = vTurb[0]*iScale
+ ty = vTurb[1]*iScale
+ tz = vTurb[2]*iScale
+ return tx,ty,tz
+
+
+#------------------------------------------------------------
+# Axis: ( used in 3DCurve Turbulence )
+def AxisFlip(x,y,z, x_axis=1, y_axis=1, z_axis=1, flip=0 ):
+ if flip != 0:
+ flip *= -1
+ else: flip = 1
+ x *= x_axis*flip
+ y *= y_axis*flip
+ z *= z_axis*flip
+ return x,y,z
+
+
+###-------------------------------------------------------------------
+#### 2D Curve shape functions:
+###-------------------------------------------------------------------
+
+##------------------------------------------------------------
+# 2DCurve: Profile: L, H, T, U, Z
+def ProfileCurve(type=0, a=0.25, b=0.25):
+ """
+ ProfileCurve( type=0, a=0.25, b=0.25 )
+
+ Create profile curve
+
+ Parameters:
+ type - select profile type, L, H, T, U, Z
+ (type=int)
+ a - a scaling parameter
+ (type=float)
+ b - b scaling parameter
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ if type ==1:
+ ## H:
+ a*=0.5
+ b*=0.5
+ newpoints = [ [ -1.0, 1.0, 0.0 ], [ -1.0+a, 1.0, 0.0 ],
+ [ -1.0+a, b, 0.0 ], [ 1.0-a, b, 0.0 ], [ 1.0-a, 1.0, 0.0 ],
+ [ 1.0, 1.0, 0.0 ], [ 1.0, -1.0, 0.0 ], [ 1.0-a, -1.0, 0.0 ],
+ [ 1.0-a, -b, 0.0 ], [ -1.0+a, -b, 0.0 ], [ -1.0+a, -1.0, 0.0 ],
+ [ -1.0, -1.0, 0.0 ] ]
+ elif type ==2:
+ ## T:
+ a*=0.5
+ newpoints = [ [ -1.0, 1.0, 0.0 ], [ 1.0, 1.0, 0.0 ],
+ [ 1.0, 1.0-b, 0.0 ], [ a, 1.0-b, 0.0 ], [ a, -1.0, 0.0 ],
+ [ -a, -1.0, 0.0 ], [ -a, 1.0-b, 0.0 ], [ -1.0, 1.0-b, 0.0 ] ]
+ elif type ==3:
+ ## U:
+ a*=0.5
+ newpoints = [ [ -1.0, 1.0, 0.0 ], [ -1.0+a, 1.0, 0.0 ],
+ [ -1.0+a, -1.0+b, 0.0 ], [ 1.0-a, -1.0+b, 0.0 ], [ 1.0-a, 1.0, 0.0 ],
+ [ 1.0, 1.0, 0.0 ], [ 1.0, -1.0, 0.0 ], [ -1.0, -1.0, 0.0 ] ]
+ elif type ==4:
+ ## Z:
+ a*=0.5
+ newpoints = [ [ -0.5, 1.0, 0.0 ], [ a, 1.0, 0.0 ],
+ [ a, -1.0+b, 0.0 ], [ 1.0, -1.0+b, 0.0 ], [ 1.0, -1.0, 0.0 ],
+ [ -a, -1.0, 0.0 ], [ -a, 1.0-b, 0.0 ], [ -1.0, 1.0-b, 0.0 ],
+ [ -1.0, 1.0, 0.0 ] ]
+ else:
+ ## L:
+ newpoints = [ [ -1.0, 1.0, 0.0 ], [ -1.0+a, 1.0, 0.0 ],
+ [ -1.0+a, -1.0+b, 0.0 ], [ 1.0, -1.0+b, 0.0 ],
+ [ 1.0, -1.0, 0.0 ], [ -1.0, -1.0, 0.0 ] ]
+ return newpoints
+
+##------------------------------------------------------------
+# 2DCurve: Miscellaneous.: Diamond, Arrow1, Arrow2, Square, ....
+def MiscCurve(type=1, a=1.0, b=0.5, c=1.0):
+ """
+ MiscCurve( type=1, a=1.0, b=0.5, c=1.0 )
+
+ Create miscellaneous curves
+
+ Parameters:
+ type - select type, Diamond, Arrow1, Arrow2, Square
+ (type=int)
+ a - a scaling parameter
+ (type=float)
+ b - b scaling parameter
+ (type=float)
+ c - c scaling parameter
+ (type=float)
+ doesn't seem to do anything
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ a*=0.5
+ b*=0.5
+ if type == 1:
+ ## diamond:
+ newpoints = [ [ 0.0, b, 0.0 ], [ a, 0.0, 0.0 ], [ 0.0, -b, 0.0 ], [ -a, 0.0, 0.0 ] ]
+ elif type == 2:
+ ## Arrow1:
+ newpoints = [ [ -a, b, 0.0 ], [ a, 0.0, 0.0 ], [ -a, -b, 0.0 ], [ 0.0, 0.0, 0.0 ] ]
+ elif type == 3:
+ ## Arrow2:
+ newpoints = [ [ -1.0, b, 0.0 ], [ -1.0+a, b, 0.0 ],
+ [ -1.0+a, 1.0, 0.0 ], [ 1.0, 0.0, 0.0 ],
+ [ -1.0+a, -1.0, 0.0 ], [ -1.0+a, -b, 0.0 ],
+ [ -1.0, -b, 0.0 ] ]
+ elif type == 4:
+ ## Rounded square:
+ newpoints = [ [ -a, b-b*0.2, 0.0 ], [ -a+a*0.05, b-b*0.05, 0.0 ], [ -a+a*0.2, b, 0.0 ],
+ [ a-a*0.2, b, 0.0 ], [ a-a*0.05, b-b*0.05, 0.0 ], [ a, b-b*0.2, 0.0 ],
+ [ a, -b+b*0.2, 0.0 ], [ a-a*0.05, -b+b*0.05, 0.0 ], [ a-a*0.2, -b, 0.0 ],
+ [ -a+a*0.2, -b, 0.0 ], [ -a+a*0.05, -b+b*0.05, 0.0 ], [ -a, -b+b*0.2, 0.0 ] ]
+ elif type == 5:
+ ## Rounded Rectangle II:
+ newpoints = []
+ x = a / 2
+ y = b / 2
+ r = c / 2
+
+ if r > x:
+ r = x - 0.0001
+ if r > y:
+ r = y - 0.0001
+
+ if r>0:
+ newpoints.append([-x+r,y,0])
+ newpoints.append([x-r,y,0])
+ newpoints.append([x,y-r,0])
+ newpoints.append([x,-y+r,0])
+ newpoints.append([x-r,-y,0])
+ newpoints.append([-x+r,-y,0])
+ newpoints.append([-x,-y+r,0])
+ newpoints.append([-x,y-r,0])
+ else:
+ newpoints.append([-x,y,0])
+ newpoints.append([x,y,0])
+ newpoints.append([x,-y,0])
+ newpoints.append([-x,-y,0])
+
+ else:
+ ## Square:
+ newpoints = [ [ -a, b, 0.0 ], [ a, b, 0.0 ], [ a, -b, 0.0 ], [ -a, -b, 0.0 ] ]
+ return newpoints
+
+##------------------------------------------------------------
+# 2DCurve: Star:
+def StarCurve(starpoints=8, innerradius=0.5, outerradius=1.0, twist=0.0):
+ """
+ StarCurve( starpoints=8, innerradius=0.5, outerradius=1.0, twist=0.0 )
+
+ Create star shaped curve
+
+ Parameters:
+ starpoints - the number of points
+ (type=int)
+ innerradius - innerradius
+ (type=float)
+ outerradius - outerradius
+ (type=float)
+ twist - twist amount
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ step = (2.0/(starpoints))
+ i = 0
+ while i < starpoints:
+ t = (i*step)
+ x1 = cos(t*pi)*outerradius
+ y1 = sin(t*pi)*outerradius
+ newpoints.append([x1,y1,0])
+ x2 = cos(t*pi+(pi/starpoints+twist))*innerradius
+ y2 = sin(t*pi+(pi/starpoints+twist))*innerradius
+ newpoints.append([x2,y2,0])
+ i+=1
+ return newpoints
+
+##------------------------------------------------------------
+# 2DCurve: Flower:
+def FlowerCurve(petals=8, innerradius=0.5, outerradius=1.0, petalwidth=2.0):
+ """
+ FlowerCurve( petals=8, innerradius=0.5, outerradius=1.0, petalwidth=2.0 )
+
+ Create flower shaped curve
+
+ Parameters:
+ petals - the number of petals
+ (type=int)
+ innerradius - innerradius
+ (type=float)
+ outerradius - outerradius
+ (type=float)
+ petalwidth - width of petals
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ step = (2.0/(petals))
+ pet = (step/pi*2)*petalwidth
+ i = 0
+ while i < petals:
+ t = (i*step)
+ x1 = cos(t*pi-(pi/petals))*innerradius
+ y1 = sin(t*pi-(pi/petals))*innerradius
+ newpoints.append([x1,y1,0])
+ x2 = cos(t*pi-pet)*outerradius
+ y2 = sin(t*pi-pet)*outerradius
+ newpoints.append([x2,y2,0])
+ x3 = cos(t*pi+pet)*outerradius
+ y3 = sin(t*pi+pet)*outerradius
+ newpoints.append([x3,y3,0])
+ i+=1
+ return newpoints
+
+##------------------------------------------------------------
+# 2DCurve: Arc,Sector,Segment,Ring:
+def ArcCurve(sides=6, startangle=0.0, endangle=90.0, innerradius=0.5, outerradius=1.0, type=3):
+ """
+ ArcCurve( sides=6, startangle=0.0, endangle=90.0, innerradius=0.5, outerradius=1.0, type=3 )
+
+ Create arc shaped curve
+
+ Parameters:
+ sides - number of sides
+ (type=int)
+ startangle - startangle
+ (type=float)
+ endangle - endangle
+ (type=float)
+ innerradius - innerradius
+ (type=float)
+ outerradius - outerradius
+ (type=float)
+ type - select type Arc,Sector,Segment,Ring
+ (type=int)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ sides += 1
+ angle = (2.0*(1.0/360.0))
+ endangle-=startangle
+ step = ((angle*endangle)/(sides-1))
+ i = 0
+ while i < sides:
+ t = (i*step) + angle*startangle
+ x1 = sin(t*pi)*outerradius
+ y1 = cos(t*pi)*outerradius
+ newpoints.append([x1,y1,0])
+ i+=1
+
+ #if type ==0:
+ # Arc: turn cyclic curve flag off!
+
+ # Segment:
+ if type ==2:
+ newpoints.append([0,0,0])
+ # Ring:
+ elif type ==3:
+ j=sides-1
+ while j > -1:
+ t = (j*step) + angle*startangle
+ x2 = sin(t*pi)*innerradius
+ y2 = cos(t*pi)*innerradius
+ newpoints.append([x2,y2,0])
+ j-=1
+ return newpoints
+
+##------------------------------------------------------------
+# 2DCurve: Cog wheel:
+def CogCurve(theeth=8, innerradius=0.8, middleradius=0.95, outerradius=1.0, bevel=0.5):
+ """
+ CogCurve( theeth=8, innerradius=0.8, middleradius=0.95, outerradius=1.0, bevel=0.5 )
+
+ Create cog wheel shaped curve
+
+ Parameters:
+ theeth - number of theeth
+ (type=int)
+ innerradius - innerradius
+ (type=float)
+ middleradius - middleradius
+ (type=float)
+ outerradius - outerradius
+ (type=float)
+ bevel - bevel amount
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ step = (2.0/(theeth))
+ pet = (step/pi*2)
+ bevel = 1.0-bevel
+ i = 0
+ while i < theeth:
+ t = (i*step)
+ x1 = cos(t*pi-(pi/theeth)-pet)*innerradius
+ y1 = sin(t*pi-(pi/theeth)-pet)*innerradius
+ newpoints.append([x1,y1,0])
+ x2 = cos(t*pi-(pi/theeth)+pet)*innerradius
+ y2 = sin(t*pi-(pi/theeth)+pet)*innerradius
+ newpoints.append([x2,y2,0])
+ x3 = cos(t*pi-pet)*middleradius
+ y3 = sin(t*pi-pet)*middleradius
+ newpoints.append([x3,y3,0])
+ x4 = cos(t*pi-(pet*bevel))*outerradius
+ y4 = sin(t*pi-(pet*bevel))*outerradius
+ newpoints.append([x4,y4,0])
+ x5 = cos(t*pi+(pet*bevel))*outerradius
+ y5 = sin(t*pi+(pet*bevel))*outerradius
+ newpoints.append([x5,y5,0])
+ x6 = cos(t*pi+pet)*middleradius
+ y6 = sin(t*pi+pet)*middleradius
+ newpoints.append([x6,y6,0])
+ i+=1
+ return newpoints
+
+##------------------------------------------------------------
+# 2DCurve: nSide:
+def nSideCurve(sides=6, radius=1.0):
+ """
+ nSideCurve( sides=6, radius=1.0 )
+
+ Create n-sided curve
+
+ Parameters:
+ sides - number of sides
+ (type=int)
+ radius - radius
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ step = (2.0/(sides))
+ i = 0
+ while i < sides:
+ t = (i*step)
+ x = sin(t*pi)*radius
+ y = cos(t*pi)*radius
+ newpoints.append([x,y,0])
+ i+=1
+ return newpoints
+
+
+##------------------------------------------------------------
+# 2DCurve: Splat:
+def SplatCurve(sides=24, scale=1.0, seed=0, basis=0, radius=1.0):
+ """
+ SplatCurve( sides=24, scale=1.0, seed=0, basis=0, radius=1.0 )
+
+ Create splat curve
+
+ Parameters:
+ sides - number of sides
+ (type=int)
+ scale - noise size
+ (type=float)
+ seed - noise random seed
+ (type=int)
+ basis - noise basis
+ (type=int)
+ radius - radius
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ step = (2.0/(sides))
+ i = 0
+ while i < sides:
+ t = (i*step)
+ turb = vTurbNoise(t,t,t, 1.0, scale, 6, 0, basis, seed )
+ turb = turb[2] * 0.5 + 0.5
+ x = sin(t*pi)*radius * turb
+ y = cos(t*pi)*radius * turb
+ newpoints.append([x,y,0])
+ i+=1
+ return newpoints
+
+###-----------------------------------------------------------
+#### 3D curve shape functions:
+###-----------------------------------------------------------
+
+###------------------------------------------------------------
+# 3DCurve: Helix:
+def HelixCurve( number=100, height=2.0, startangle=0.0, endangle=360.0, width=1.0, a=0.0, b=0.0 ):
+ """
+ HelixCurve( number=100, height=2.0, startangle=0.0, endangle=360.0, width=1.0, a=0.0, b=0.0 )
+
+ Create helix curve
+
+ Parameters:
+ number - the number of points
+ (type=int)
+ height - height
+ (type=float)
+ startangle - startangle
+ (type=float)
+ endangle - endangle
+ (type=float)
+ width - width
+ (type=float)
+ a - a
+ (type=float)
+ b - b
+ (type=float)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ angle = (2.0/360.0)*(endangle-startangle)
+ step = angle/(number-1)
+ h = height/angle
+ start = (startangle*2.0/360.0)
+ a/=angle
+ i = 0
+ while i < number:
+ t = ( i*step+start )
+ x = sin( (t*pi) ) * ( 1.0 + cos( t * pi * a - ( b * pi ) ) ) * ( 0.25 * width )
+ y = cos( (t*pi) ) * ( 1.0 + cos( t * pi * a - ( b * pi ) ) ) * ( 0.25 * width )
+ z = ( t * h ) -h*start
+ newpoints.append([x,y,z])
+ i+=1
+ return newpoints
+
+###------------------------------------------------------------ ?
+# 3DCurve: Cycloid: Cycloid, Epicycloid, Hypocycloid
+def CycloidCurve( number=24, length=2.0, type=0, a=1.0, b=1.0, startangle=0.0, endangle=360.0 ):
+ """
+ CycloidCurve( number=24, length=2.0, type=0, a=1.0, b=1.0, startangle=0.0, endangle=360.0 )
+
+ Create a Cycloid, Epicycloid or Hypocycloid curve
+
+ Parameters:
+ number - the number of points
+ (type=int)
+ length - length of curve
+ (type=float)
+ type - types: Cycloid, Epicycloid, Hypocycloid
+ (type=int)
+ Returns:
+ a list with lists of x,y,z coordinates for curve points, [[x,y,z],[x,y,z],...n]
+ (type=list)
+ """
+
+ newpoints = []
+ angle = (2.0/360.0)*(endangle-startangle)
+ step = angle/(number-1)
+ #h = height/angle
+ d = length
+ start = (startangle*2.0/360.0)
+ a/=angle
+ i = 0
+ if type == 0: # Epitrochoid
+ while i < number:
+ t = ( i*step+start )
+ x = ((a + b) * cos(t*pi)) - (d * cos(((a+b)/b)*t*pi))
+ y = ((a + b) * sin(t*pi)) - (d * sin(((a+b)/b)*t*pi))
+ z = 0 # ( t * h ) -h*start
+ newpoints.append([x,y,z])
+ i+=1
+
+ else:
+ newpoints = [[-1,-1,0], [-1,1,0], [1,1,0], [1,-1,0]]
+ return newpoints
+
+##------------------------------------------------------------
+# calculates the matrix for the new object
+# depending on user pref
+def align_matrix(context):
+ loc = Matrix.Translation(context.scene.cursor_location)
+ obj_align = context.user_preferences.edit.object_align
+ if (context.space_data.type == 'VIEW_3D'
+ and obj_align == 'VIEW'):
+ rot = context.space_data.region_3d.view_matrix.to_3x3().inverted().to_4x4()
+ else:
+ rot = Matrix()
+ align_matrix = loc * rot
+ return align_matrix
+
+##------------------------------------------------------------
+#### Curve creation functions
+# sets bezierhandles to auto
+def setBezierHandles(obj, mode = 'AUTOMATIC'):
+ scene = bpy.context.scene
+ if obj.type != 'CURVE':
+ return
+ scene.objects.active = obj
+ bpy.ops.object.mode_set(mode='EDIT', toggle=True)
+ bpy.ops.curve.select_all(action='SELECT')
+ bpy.ops.curve.handle_type_set(type=mode)
+ bpy.ops.object.mode_set(mode='OBJECT', toggle=True)
+
+# get array of vertcoordinates acording to splinetype
+def vertsToPoints(Verts, splineType):
+ # main vars
+ vertArray = []
+
+ # array for BEZIER spline output (V3)
+ if splineType == 'BEZIER':
+ for v in Verts:
+ vertArray += v
+
+ # array for nonBEZIER output (V4)
+ else:
+ for v in Verts:
+ vertArray += v
+ if splineType == 'NURBS':
+ vertArray.append(1) #for nurbs w=1
+ else: #for poly w=0
+ vertArray.append(0)
+ return vertArray
+
+# create new CurveObject from vertarray and splineType
+def createCurve(vertArray, self, align_matrix):
+ # options to vars
+ splineType = self.outputType # output splineType 'POLY' 'NURBS' 'BEZIER'
+ name = self.GalloreType # GalloreType as name
+
+ # create curve
+ scene = bpy.context.scene
+ newCurve = bpy.data.curves.new(name, type = 'CURVE') # curvedatablock
+ newSpline = newCurve.splines.new(type = splineType) # spline
+
+ # create spline from vertarray
+ if splineType == 'BEZIER':
+ newSpline.bezier_points.add(int(len(vertArray)*0.33))
+ newSpline.bezier_points.foreach_set('co', vertArray)
+ else:
+ newSpline.points.add(int(len(vertArray)*0.25 - 1))
+ newSpline.points.foreach_set('co', vertArray)
+ newSpline.use_endpoint_u = True
+
+ # set curveOptions
+ newCurve.dimensions = self.shape
+ newSpline.use_cyclic_u = self.use_cyclic_u
+ newSpline.use_endpoint_u = self.endp_u
+ newSpline.order_u = self.order_u
+
+ # create object with newCurve
+ new_obj = bpy.data.objects.new(name, newCurve) # object
+ scene.objects.link(new_obj) # place in active scene
+ new_obj.select = True # set as selected
+ scene.objects.active = new_obj # set as active
+ new_obj.matrix_world = align_matrix # apply matrix
+
+ # set bezierhandles
+ if splineType == 'BEZIER':
+ setBezierHandles(new_obj, self.handleType)
+
+ return
+
+##------------------------------------------------------------
+# Main Function
+def main(context, self, align_matrix):
+ # deselect all objects
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # options
+ galType = self.GalloreType
+ splineType = self.outputType
+ innerRadius = self.innerRadius
+ middleRadius = self.middleRadius
+ outerRadius = self.outerRadius
+
+ # get verts
+ if galType == 'Profile':
+ verts = ProfileCurve(self.ProfileCurveType,
+ self.ProfileCurvevar1,
+ self.ProfileCurvevar2)
+ if galType == 'Miscellaneous':
+ verts = MiscCurve(self.MiscCurveType,
+ self.MiscCurvevar1,
+ self.MiscCurvevar2,
+ self.MiscCurvevar3)
+ if galType == 'Flower':
+ verts = FlowerCurve(self.petals,
+ innerRadius,
+ outerRadius,
+ self.petalWidth)
+ if galType == 'Star':
+ verts = StarCurve(self.starPoints,
+ innerRadius,
+ outerRadius,
+ self.starTwist)
+ if galType == 'Arc':
+ verts = ArcCurve(self.arcSides,
+ self.startAngle,
+ self.endAngle,
+ innerRadius,
+ outerRadius,
+ self.arcType)
+ if galType == 'Cogwheel':
+ verts = CogCurve(self.teeth,
+ innerRadius,
+ middleRadius,
+ outerRadius,
+ self.bevel)
+ if galType == 'Nsided':
+ verts = nSideCurve(self.Nsides,
+ outerRadius)
+
+ if galType == 'Splat':
+ verts = SplatCurve(self.splatSides,
+ self.splatScale,
+ self.seed,
+ self.basis,
+ outerRadius)
+
+ if galType == 'Helix':
+ verts = HelixCurve(self.helixPoints,
+ self.helixHeight,
+ self.helixStart,
+ self.helixEnd,
+ self.helixWidth,
+ self.helix_a,
+ self.helix_b)
+ if galType == 'Cycloid':
+ verts = CycloidCurve(self.cycloPoints,
+ self.cyclo_d,
+ self.cycloType,
+ self.cyclo_a,
+ self.cyclo_b,
+ self.cycloStart,
+ self.cycloEnd)
+
+ # turn verts into array
+ vertArray = vertsToPoints(verts, splineType)
+
+ # create object
+ createCurve(vertArray, self, align_matrix)
+
+ return
+
+class Curveaceous_galore(bpy.types.Operator):
+ ''''''
+ bl_idname = "mesh.curveaceous_galore"
+ bl_label = "Curveaceous galore"
+ bl_options = {'REGISTER', 'UNDO'}
+ bl_description = "adds many types of curves"
+
+ # align_matrix for the invoke
+ align_matrix = Matrix()
+
+ #### general properties
+ GalloreTypes = [
+ ('Profile', 'Profile', 'Profile'),
+ ('Miscellaneous', 'Miscellaneous', 'Miscellaneous'),
+ ('Flower', 'Flower', 'Flower'),
+ ('Star', 'Star', 'Star'),
+ ('Arc', 'Arc', 'Arc'),
+ ('Cogwheel', 'Cogwheel', 'Cogwheel'),
+ ('Nsided', 'Nsided', 'Nsided'),
+ ('Splat', 'Splat', 'Splat'),
+ ('Cycloid', 'Cycloid', 'Cycloid'),
+ ('Helix', 'Helix (3D)', 'Helix')]
+ GalloreType = EnumProperty(name="Type",
+ description="Form of Curve to create",
+ items=GalloreTypes)
+ SplineTypes = [
+ ('POLY', 'Poly', 'POLY'),
+ ('NURBS', 'Nurbs', 'NURBS'),
+ ('BEZIER', 'Bezier', 'BEZIER')]
+ outputType = EnumProperty(name="Output splines",
+ description="Type of splines to output",
+ items=SplineTypes)
+
+ #### Curve Options
+ shapeItems = [
+ ('2D', '2D', '2D'),
+ ('3D', '3D', '3D')]
+ shape = EnumProperty(name="2D / 3D",
+ items=shapeItems,
+ description="2D or 3D Curve")
+ use_cyclic_u = BoolProperty(name="Cyclic",
+ default=True,
+ description="make curve closed")
+ endp_u = BoolProperty(name="use_endpoint_u",
+ default=True,
+ description="stretch to endpoints")
+ order_u = IntProperty(name="order_u",
+ default=4,
+ min=2, soft_min=2,
+ max=6, soft_max=6,
+ description="Order of nurbs spline")
+ bezHandles = [
+ ('VECTOR', 'Vector', 'VECTOR'),
+ ('AUTOMATIC', 'Auto', 'AUTOMATIC')]
+ handleType = EnumProperty(name="Handle type",
+ description="bezier handles type",
+ items=bezHandles)
+
+ #### ProfileCurve properties
+ ProfileCurveType = IntProperty(name="Type",
+ min=1, soft_min=1,
+ max=5, soft_max=5,
+ default=1,
+ description="Type of ProfileCurve")
+ ProfileCurvevar1 = FloatProperty(name="var_1",
+ default=0.25,
+ description="var1 of ProfileCurve")
+ ProfileCurvevar2 = FloatProperty(name="var_2",
+ default=0.25,
+ description="var2 of ProfileCurve")
+
+ #### MiscCurve properties
+ MiscCurveType = IntProperty(name="Type",
+ min=1, soft_min=1,
+ max=6, soft_max=6,
+ default=1,
+ description="Type of MiscCurve")
+ MiscCurvevar1 = FloatProperty(name="var_1",
+ default=1.0,
+ description="var1 of MiscCurve")
+ MiscCurvevar2 = FloatProperty(name="var_2",
+ default=0.5,
+ description="var2 of MiscCurve")
+ MiscCurvevar3 = FloatProperty(name="var_3",
+ default=0.1,
+ min=0, soft_min=0,
+ description="var3 of MiscCurve")
+
+ #### Common properties
+ innerRadius = FloatProperty(name="Inner radius",
+ default=0.5,
+ min=0, soft_min=0,
+ description="Inner radius")
+ middleRadius = FloatProperty(name="Middle radius",
+ default=0.95,
+ min=0, soft_min=0,
+ description="Middle radius")
+ outerRadius = FloatProperty(name="Outer radius",
+ default=1.0,
+ min=0, soft_min=0,
+ description="Outer radius")
+
+ #### Flower properties
+ petals = IntProperty(name="Petals",
+ default=8,
+ min=2, soft_min=2,
+ description="Number of petals")
+ petalWidth = FloatProperty(name="Petal width",
+ default=2.0,
+ min=0.01, soft_min=0.01,
+ description="Petal width")
+
+ #### Star properties
+ starPoints = IntProperty(name="Star points",
+ default=8,
+ min=2, soft_min=2,
+ description="Number of star points")
+ starTwist = FloatProperty(name="Twist",
+ default=0.0,
+ description="Twist")
+
+ #### Arc properties
+ arcSides = IntProperty(name="Arc sides",
+ default=6,
+ min=1, soft_min=1,
+ description="Sides of arc")
+ startAngle = FloatProperty(name="Start angle",
+ default=0.0,
+ description="Start angle")
+ endAngle = FloatProperty(name="End angle",
+ default=90.0,
+ description="End angle")
+ arcType = IntProperty(name="Arc type",
+ default=3,
+ min=1, soft_min=1,
+ max=3, soft_max=3,
+ description="Sides of arc")
+
+ #### Cogwheel properties
+ teeth = IntProperty(name="Teeth",
+ default=8,
+ min=2, soft_min=2,
+ description="number of teeth")
+ bevel = FloatProperty(name="Bevel",
+ default=0.5,
+ min=0, soft_min=0,
+ max=1, soft_max=1,
+ description="Bevel")
+
+ #### Nsided property
+ Nsides = IntProperty(name="Sides",
+ default=8,
+ min=3, soft_min=3,
+ description="Number of sides")
+
+ #### Splat properties
+ splatSides = IntProperty(name="Splat sides",
+ default=24,
+ min=3, soft_min=3,
+ description="Splat sides")
+ splatScale = FloatProperty(name="Splat scale",
+ default=1.0,
+ min=0.0001, soft_min=0.0001,
+ description="Splat scale")
+ seed = IntProperty(name="Seed",
+ default=0,
+ min=0, soft_min=0,
+ description="Seed")
+ basis = IntProperty(name="Basis",
+ default=0,
+ min=0, soft_min=0,
+ max=14, soft_max=14,
+ description="Basis")
+
+ #### Helix properties
+ helixPoints = IntProperty(name="resolution",
+ default=100,
+ min=3, soft_min=3,
+ description="resolution")
+ helixHeight = FloatProperty(name="Height",
+ default=2.0,
+ min=0, soft_min=0,
+ description="Helix height")
+ helixStart = FloatProperty(name="Start angle",
+ default=0.0,
+ description="Helix start angle")
+ helixEnd = FloatProperty(name="Endangle",
+ default=360.0,
+ description="Helix end angle")
+ helixWidth = FloatProperty(name="Width",
+ default=1.0,
+ description="Helix width")
+ helix_a = FloatProperty(name="var_1",
+ default=0.0,
+ description="Helix var1")
+ helix_b = FloatProperty(name="var_2",
+ default=0.0,
+ description="Helix var2")
+
+ #### Cycloid properties
+ cycloPoints = IntProperty(name="Resolution",
+ default=100,
+ min=3, soft_min=3,
+ description="Resolution")
+ cyclo_d = FloatProperty(name="var_3",
+ default=1.5,
+ description="Cycloid var3")
+ cycloType = IntProperty(name="Type",
+ default=0,
+ min=0, soft_min=0,
+ max=0, soft_max=0,
+ description="resolution")
+ cyclo_a = FloatProperty(name="var_1",
+ default=5.0,
+ min=0.01, soft_min=0.01,
+ description="Cycloid var1")
+ cyclo_b = FloatProperty(name="var_2",
+ default=0.5,
+ min=0.01, soft_min=0.01,
+ description="Cycloid var2")
+ cycloStart = FloatProperty(name="Start angle",
+ default=0.0,
+ description="Cycloid start angle")
+ cycloEnd = FloatProperty(name="End angle",
+ default=360.0,
+ description="Cycloid end angle")
+
+ ##### DRAW #####
+ def draw(self, context):
+ layout = self.layout
+
+ # general options
+ col = layout.column()
+ col.prop(self, 'GalloreType')
+ col.label(text=self.GalloreType + " Options")
+
+ # options per GalloreType
+ box = layout.box()
+ if self.GalloreType == 'Profile':
+ box.prop(self, 'ProfileCurveType')
+ box.prop(self, 'ProfileCurvevar1')
+ box.prop(self, 'ProfileCurvevar2')
+ if self.GalloreType == 'Miscellaneous':
+ box.prop(self, 'MiscCurveType')
+ box.prop(self, 'MiscCurvevar1', text='Width')
+ box.prop(self, 'MiscCurvevar2', text='Height')
+ if self.MiscCurveType == 5:
+ box.prop(self, 'MiscCurvevar3', text='Rounded')
+ if self.GalloreType == 'Flower':
+ box.prop(self, 'petals')
+ box.prop(self, 'petalWidth')
+ box.prop(self, 'innerRadius')
+ box.prop(self, 'outerRadius')
+ if self.GalloreType == 'Star':
+ box.prop(self, 'starPoints')
+ box.prop(self, 'starTwist')
+ box.prop(self, 'innerRadius')
+ box.prop(self, 'outerRadius')
+ if self.GalloreType == 'Arc':
+ box.prop(self, 'arcSides')
+ box.prop(self, 'arcType') # has only one Type?
+ box.prop(self, 'startAngle')
+ box.prop(self, 'endAngle')
+ box.prop(self, 'innerRadius') # doesn't seem to do anything
+ box.prop(self, 'outerRadius')
+ if self.GalloreType == 'Cogwheel':
+ box.prop(self, 'teeth')
+ box.prop(self, 'bevel')
+ box.prop(self, 'innerRadius')
+ box.prop(self, 'middleRadius')
+ box.prop(self, 'outerRadius')
+ if self.GalloreType == 'Nsided':
+ box.prop(self, 'Nsides')
+ box.prop(self, 'outerRadius', text='Radius')
+
+ if self.GalloreType == 'Splat':
+ box.prop(self, 'splatSides')
+ box.prop(self, 'outerRadius')
+ box.prop(self, 'splatScale')
+ box.prop(self, 'seed')
+ box.prop(self, 'basis')
+
+ if self.GalloreType == 'Helix':
+ box.prop(self, 'helixPoints')
+ box.prop(self, 'helixHeight')
+ box.prop(self, 'helixWidth')
+ box.prop(self, 'helixStart')
+ box.prop(self, 'helixEnd')
+ box.prop(self, 'helix_a')
+ box.prop(self, 'helix_b')
+ if self.GalloreType == 'Cycloid':
+ box.prop(self, 'cycloPoints')
+ #box.prop(self, 'cycloType') # needs the other types first
+ box.prop(self, 'cycloStart')
+ box.prop(self, 'cycloEnd')
+ box.prop(self, 'cyclo_a')
+ box.prop(self, 'cyclo_b')
+ box.prop(self, 'cyclo_d')
+
+ col = layout.column()
+ col.label(text="Output Curve Type")
+ row = layout.row()
+ row.prop(self, 'outputType', expand=True)
+ col = layout.column()
+ col.label(text="Curve Options")
+
+ # output options
+ box = layout.box()
+ if self.outputType == 'NURBS':
+ box.row().prop(self, 'shape', expand=True)
+ #box.prop(self, 'use_cyclic_u')
+ #box.prop(self, 'endp_u')
+ box.prop(self, 'order_u')
+
+ if self.outputType == 'POLY':
+ box.row().prop(self, 'shape', expand=True)
+ #box.prop(self, 'use_cyclic_u')
+
+ if self.outputType == 'BEZIER':
+ box.row().prop(self, 'shape', expand=True)
+ box.row().prop(self, 'handleType', expand=True)
+ #box.prop(self, 'use_cyclic_u')
+
+
+ ##### POLL #####
+ @classmethod
+ def poll(cls, context):
+ return context.scene != None
+
+ ##### EXECUTE #####
+ def execute(self, context):
+ # turn off undo
+ undo = bpy.context.user_preferences.edit.use_global_undo
+ bpy.context.user_preferences.edit.use_global_undo = False
+
+ # deal with 2D - 3D curve differences
+ if self.GalloreType in ['Helix', 'Cycloid']:
+ self.shape = '3D'
+ #else:
+ #self.shape = '2D' # someone decide if we want this
+
+ if self.GalloreType in ['Helix']:
+ self.use_cyclic_u = False
+ else:
+ self.use_cyclic_u = True
+
+
+ # main function
+ main(context, self, self.align_matrix)
+
+ # restore pre operator undo state
+ bpy.context.user_preferences.edit.use_global_undo = undo
+
+ return {'FINISHED'}
+
+ ##### INVOKE #####
+ def invoke(self, context, event):
+ # store creation_matrix
+ self.align_matrix = align_matrix(context)
+ self.execute(context)
+
+ return {'FINISHED'}
+
+################################################################################
+##### REGISTER #####
+
+def Curveaceous_galore_button(self, context):
+ self.layout.operator(Curveaceous_galore.bl_idname, text="curvatures gallore", icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_curve_add.append(Curveaceous_galore_button)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_curve_add.remove(Curveaceous_galore_button)
+
+if __name__ == "__main__":
+ register()
diff --git a/add_curve_torus_knots.py b/add_curve_torus_knots.py
new file mode 100644
index 00000000..c832e03e
--- /dev/null
+++ b/add_curve_torus_knots.py
@@ -0,0 +1,247 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+
+bl_info = {
+ "name": "Torus Knots",
+ "author": "testscreenings",
+ "version": (0,1),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Curve",
+ "description": "Adds many types of (torus) knots",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Curve/Torus_Knot",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22403",
+ "category": "Add Curve"}
+
+
+##------------------------------------------------------------
+#### import modules
+import bpy
+from bpy.props import *
+from math import sin, cos, pi
+from add_utils import *
+
+
+########################################################################
+####################### Knot Definitions ###############################
+########################################################################
+def Torus_Knot(self):
+ p = self.torus_p
+ q = self.torus_q
+ w = self.torus_w
+ res = self.torus_res
+ h = self.torus_h
+ u = self.torus_u
+ v = self.torus_v
+ rounds = self.torus_rounds
+
+ newPoints = []
+ angle = 2*rounds
+ step = angle/(res-1)
+ scale = h
+ height = w
+
+ for i in range(res-1):
+ t = ( i*step*pi)
+
+ x = (2 * scale + cos((q*t)/p*v)) * cos(t * u)
+ y = (2 * scale + cos((q*t)/p*v)) * sin(t * u)
+ z = sin(q*t/p) * height
+
+ newPoints.extend([x,y,z,1])
+
+ return newPoints
+
+
+##------------------------------------------------------------
+# Main Function
+def create_torus_knot(self, context):
+ verts = Torus_Knot(self)
+
+ curve_data = bpy.data.curves.new(name='Torus Knot', type='CURVE')
+ spline = curve_data.splines.new(type='NURBS')
+ spline.points.add(int(len(verts)*0.25 - 1))
+ spline.points.foreach_set('co', verts)
+ spline.use_endpoint_u = True
+ spline.use_cyclic_u = True
+ spline.order_u = 4
+ curve_data.dimensions = '3D'
+
+ if self.geo_surf:
+ curve_data.bevel_depth = self.geo_bDepth
+ curve_data.bevel_resolution = self.geo_bRes
+ curve_data.use_fill_front = False
+ curve_data.use_fill_back = False
+ curve_data.extrude = self.geo_extrude
+ #curve_data.offset = self.geo_width # removed, somehow screws things up all of a sudden
+ curve_data.resolution_u = self.geo_res
+
+ new_obj = add_object_data(context, curve_data, operator=self)
+
+
+class torus_knot_plus(bpy.types.Operator, AddObjectHelper):
+ ''''''
+ bl_idname = "curve.torus_knot_plus"
+ bl_label = "Torus Knot +"
+ bl_options = {'REGISTER', 'UNDO'}
+ bl_description = "adds many types of knots"
+
+ #### general options
+ options_plus = BoolProperty(name="plus options",
+ default=False,
+ description="Show more options (the plus part).")
+
+ #### GEO Options
+ geo_surf = BoolProperty(name="Surface",
+ default=True)
+ geo_bDepth = FloatProperty(name="bevel",
+ default=0.08,
+ min=0, soft_min=0)
+ geo_bRes = IntProperty(name="bevel res",
+ default=2,
+ min=0, soft_min=0,
+ max=4, soft_max=4)
+ geo_extrude = FloatProperty(name="extrude",
+ default=0.0,
+ min=0, soft_min=0)
+ geo_res = IntProperty(name="resolution",
+ default=12,
+ min=1, soft_min=1)
+
+
+ #### Parameters
+ torus_res = IntProperty(name="Resoulution",
+ default=100,
+ min=3, soft_min=3,
+ description='Resolution, Number of controlverticies.')
+ torus_p = IntProperty(name="p",
+ default=2,
+ min=1, soft_min=1,
+ #max=1, soft_max=1,
+ description="p")
+ torus_q = IntProperty(name="q",
+ default=3,
+ min=1, soft_min=1,
+ #max=1, soft_max=1,
+ description="q")
+ torus_w = FloatProperty(name="Height",
+ default=1,
+ #min=0, soft_min=0,
+ #max=1, soft_max=1,
+ description="Height in Z")
+ torus_h = FloatProperty(name="Scale",
+ default=1,
+ #min=0, soft_min=0,
+ #max=1, soft_max=1,
+ description="Scale, in XY")
+ torus_u = IntProperty(name="u",
+ default=1,
+ min=1, soft_min=1,
+ #max=1, soft_max=1,
+ description="u")
+ torus_v = IntProperty(name="v",
+ default=1,
+ min=1, soft_min=1,
+ #max=1, soft_max=1,
+ description="v")
+ torus_rounds = IntProperty(name="Rounds",
+ default=2,
+ min=1, soft_min=1,
+ #max=1, soft_max=1,
+ description="Rounds")
+
+ ##### DRAW #####
+ def draw(self, context):
+ layout = self.layout
+
+ # general options
+ col = layout.column()
+ col.label(text="Torus Knot Parameters")
+
+ # Parameters
+ box = layout.box()
+ box.prop(self, 'torus_res')
+ box.prop(self, 'torus_w')
+ box.prop(self, 'torus_h')
+ box.prop(self, 'torus_p')
+ box.prop(self, 'torus_q')
+ box.prop(self, 'options_plus')
+ if self.options_plus:
+ box.prop(self, 'torus_u')
+ box.prop(self, 'torus_v')
+ box.prop(self, 'torus_rounds')
+
+ # surface Options
+ col = layout.column()
+ col.label(text="Geometry Options")
+ box = layout.box()
+ box.prop(self, 'geo_surf')
+ if self.geo_surf:
+ box.prop(self, 'geo_bDepth')
+ box.prop(self, 'geo_bRes')
+ box.prop(self, 'geo_extrude')
+ box.prop(self, 'geo_res')
+ col = layout.column()
+ col.prop(self, 'location')
+ col.prop(self, 'rotation')
+
+ ##### POLL #####
+ @classmethod
+ def poll(cls, context):
+ return context.scene != None
+
+ ##### EXECUTE #####
+ def execute(self, context):
+ # turn off undo
+ undo = bpy.context.user_preferences.edit.use_global_undo
+ bpy.context.user_preferences.edit.use_global_undo = False
+
+ if not self.options_plus:
+ self.torus_rounds = self.torus_p
+
+ #recoded for add_utils
+ create_torus_knot(self, context)
+
+ # restore pre operator undo state
+ bpy.context.user_preferences.edit.use_global_undo = undo
+
+ return {'FINISHED'}
+
+################################################################################
+##### REGISTER #####
+
+def torus_knot_plus_button(self, context):
+ self.layout.operator(torus_knot_plus.bl_idname, text="Torus Knot +", icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_curve_add.append(torus_knot_plus_button)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_curve_add.remove(torus_knot_plus_button)
+
+if __name__ == "__main__":
+ register()
diff --git a/add_mesh_BoltFactory/Boltfactory.py b/add_mesh_BoltFactory/Boltfactory.py
new file mode 100644
index 00000000..97a489b0
--- /dev/null
+++ b/add_mesh_BoltFactory/Boltfactory.py
@@ -0,0 +1,311 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+
+import bpy
+import mathutils
+from bpy.props import *
+from add_mesh_BoltFactory.createMesh import *
+from add_mesh_BoltFactory.preset_utils import *
+
+
+
+##------------------------------------------------------------
+# calculates the matrix for the new object
+# depending on user pref
+def align_matrix(context):
+ loc = mathutils.Matrix.Translation(context.scene.cursor_location)
+ obj_align = context.user_preferences.edit.object_align
+ if (context.space_data.type == 'VIEW_3D'
+ and obj_align == 'VIEW'):
+ rot = context.space_data.region_3d.view_matrix.to_3x3().inverted().to_4x4()
+ else:
+ rot = mathutils.Matrix()
+ align_matrix = loc * rot
+ return align_matrix
+
+
+
+class add_mesh_bolt(bpy.types.Operator):
+ ''''''
+ bl_idname = "mesh.bolt_add"
+ bl_label = "Add Bolt"
+ bl_options = {'REGISTER', 'UNDO'}
+ bl_description = "adds many types of Bolts"
+
+ align_matrix = mathutils.Matrix()
+ MAX_INPUT_NUMBER = 50
+
+ # edit - Whether to add or update.
+ edit = BoolProperty(name="",
+ description="",
+ default=False,
+ options={'HIDDEN'})
+
+
+ #Model Types
+ Model_Type_List = [('bf_Model_Bolt','BOLT','Bolt Model'),
+ ('bf_Model_Nut','NUT','Nut Model')]
+ bf_Model_Type = EnumProperty( attr='bf_Model_Type',
+ name='Model',
+ description='Choose the type off model you would like',
+ items = Model_Type_List, default = 'bf_Model_Bolt')
+
+ #Head Types
+ Model_Type_List = [('bf_Head_Hex','HEX','Hex Head'),
+ ('bf_Head_Cap','CAP','Cap Head'),
+ ('bf_Head_Dome','DOME','Dome Head'),
+ ('bf_Head_Pan','PAN','Pan Head'),
+ ('bf_Head_CounterSink','COUNTER SINK','Counter Sink Head')]
+ bf_Head_Type = EnumProperty( attr='bf_Head_Type',
+ name='Head',
+ description='Choose the type off Head you would like',
+ items = Model_Type_List, default = 'bf_Head_Hex')
+
+ #Bit Types
+ Bit_Type_List = [('bf_Bit_None','NONE','No Bit Type'),
+ ('bf_Bit_Allen','ALLEN','Allen Bit Type'),
+ ('bf_Bit_Philips','PHILLIPS','Phillips Bit Type')]
+ bf_Bit_Type = EnumProperty( attr='bf_Bit_Type',
+ name='Bit Type',
+ description='Choose the type of bit to you would like',
+ items = Bit_Type_List, default = 'bf_Bit_None')
+
+ #Nut Types
+ Nut_Type_List = [('bf_Nut_Hex','HEX','Hex Nut'),
+ ('bf_Nut_Lock','LOCK','Lock Nut')]
+ bf_Nut_Type = EnumProperty( attr='bf_Nut_Type',
+ name='Nut Type',
+ description='Choose the type of nut you would like',
+ items = Nut_Type_List, default = 'bf_Nut_Hex')
+
+ #Shank Types
+ bf_Shank_Length = FloatProperty(attr='bf_Shank_Length',
+ name='Shank Length', default = 0,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Length of the unthreaded shank')
+
+ bf_Shank_Dia = FloatProperty(attr='bf_Shank_Dia',
+ name='Shank Dia', default = 3,
+ min = 0, soft_min = 0,max = MAX_INPUT_NUMBER,
+ description='Diameter of the shank')
+
+ bf_Phillips_Bit_Depth = FloatProperty(attr='bf_Phillips_Bit_Depth',
+ name='Bit Depth', default = 0, #set in execute
+ options = {'HIDDEN'}, #gets calculated in execute
+ min = 0, soft_min = 0,max = MAX_INPUT_NUMBER,
+ description='Depth of the Phillips Bit')
+
+ bf_Allen_Bit_Depth = FloatProperty(attr='bf_Allen_Bit_Depth',
+ name='Bit Depth', default = 1.5,
+ min = 0, soft_min = 0,max = MAX_INPUT_NUMBER,
+ description='Depth of the Allen Bit')
+
+ bf_Allen_Bit_Flat_Distance = FloatProperty( attr='bf_Allen_Bit_Flat_Distance',
+ name='Flat Dist', default = 2.5,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Flat Distance of the Allen Bit')
+
+ bf_Hex_Head_Height = FloatProperty( attr='bf_Hex_Head_Height',
+ name='Head Height', default = 2,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Height of the Hex Head')
+
+ bf_Hex_Head_Flat_Distance = FloatProperty( attr='bf_Hex_Head_Flat_Distance',
+ name='Flat Dist', default = 5.5,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Flat Distance of the Hex Head')
+
+ bf_CounterSink_Head_Dia = FloatProperty( attr='bf_CounterSink_Head_Dia',
+ name='Head Dia', default = 5.5,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Diameter of the Counter Sink Head')
+
+ bf_Cap_Head_Height = FloatProperty( attr='bf_Cap_Head_Height',
+ name='Head Height', default = 5.5,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Height of the Cap Head')
+
+ bf_Cap_Head_Dia = FloatProperty( attr='bf_Cap_Head_Dia',
+ name='Head Dia', default = 3,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Diameter of the Cap Head')
+
+ bf_Dome_Head_Dia = FloatProperty( attr='bf_Dome_Head_Dia',
+ name='Dome Head Dia', default = 5.6,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Length of the unthreaded shank')
+
+ bf_Pan_Head_Dia = FloatProperty( attr='bf_Pan_Head_Dia',
+ name='Pan Head Dia', default = 5.6,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Diameter of the Pan Head')
+
+ bf_Philips_Bit_Dia = FloatProperty(attr='bf_Philips_Bit_Dia',
+ name='Bit Dia', default = 0, #set in execute
+ options = {'HIDDEN'}, #gets calculated in execute
+ min = 0, soft_min = 0,max = MAX_INPUT_NUMBER,
+ description='Diameter of the Philips Bit')
+
+ bf_Thread_Length = FloatProperty( attr='bf_Thread_Length',
+ name='Thread Length', default = 6,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Length of the Thread')
+
+ bf_Major_Dia = FloatProperty( attr='bf_Major_Dia',
+ name='Major Dia', default = 3,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Outside diameter of the Thread')
+
+ bf_Pitch = FloatProperty( attr='bf_Pitch',
+ name='Pitch', default = 0.35,
+ min = 0.1, soft_min = 0.1, max = 7.0,
+ description='Pitch if the thread')
+
+ bf_Minor_Dia = FloatProperty( attr='bf_Minor_Dia',
+ name='Minor Dia', default = 0, #set in execute
+ options = {'HIDDEN'}, #gets calculated in execute
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Inside diameter of the Thread')
+
+ bf_Crest_Percent = IntProperty( attr='bf_Crest_Percent',
+ name='Crest Percent', default = 10,
+ min = 1, soft_min = 1, max = 90,
+ description='Percent of the pitch that makes up the Crest')
+
+ bf_Root_Percent = IntProperty( attr='bf_Root_Percent',
+ name='Root Percent', default = 10,
+ min = 1, soft_min = 1, max = 90,
+ description='Percent of the pitch that makes up the Root')
+
+ bf_Hex_Nut_Height = FloatProperty( attr='bf_Hex_Nut_Height',
+ name='Hex Nut Height', default = 2.4,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Height of the Hex Nut')
+
+ bf_Hex_Nut_Flat_Distance = FloatProperty( attr='bf_Hex_Nut_Flat_Distance',
+ name='Hex Nut Flat Dist', default = 5.5,
+ min = 0, soft_min = 0, max = MAX_INPUT_NUMBER,
+ description='Flat distance of the Hex Nut')
+
+ presets, presetsPath = getPresets()
+
+ bf_presets = EnumProperty(attr='bf_presets',
+ name='Preset',
+ description="Use Preset from File",
+ default='M3.py',
+ items=presets)
+
+ last_preset = None
+
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+
+ #ENUMS
+ col.prop(self, 'bf_Model_Type')
+ col.prop(self, 'bf_presets')
+ col.separator()
+
+ #Bit
+ if self.bf_Model_Type == 'bf_Model_Bolt':
+ col.prop(self, 'bf_Bit_Type')
+ if self.bf_Bit_Type == 'bf_Bit_None':
+ DoNothing = 1;
+ elif self.bf_Bit_Type == 'bf_Bit_Allen':
+ col.prop(self, 'bf_Allen_Bit_Depth')
+ col.prop(self, 'bf_Allen_Bit_Flat_Distance')
+ elif self.bf_Bit_Type == 'bf_Bit_Philips':
+ col.prop(self, 'bf_Phillips_Bit_Depth')
+ col.prop(self, 'bf_Philips_Bit_Dia')
+ col.separator()
+
+ #Head
+ if self.bf_Model_Type == 'bf_Model_Bolt':
+ col.prop(self, 'bf_Head_Type')
+ if self.bf_Head_Type == 'bf_Head_Hex':
+ col.prop(self, 'bf_Hex_Head_Height')
+ col.prop(self, 'bf_Hex_Head_Flat_Distance')
+ elif self.bf_Head_Type == 'bf_Head_Cap':
+ col.prop(self, 'bf_Cap_Head_Height')
+ col.prop(self, 'bf_Cap_Head_Dia')
+ elif self.bf_Head_Type == 'bf_Head_Dome':
+ col.prop(self, 'bf_Dome_Head_Dia')
+ elif self.bf_Head_Type == 'bf_Head_Pan':
+ col.prop(self, 'bf_Pan_Head_Dia')
+ elif self.bf_Head_Type == 'bf_Head_CounterSink':
+ col.prop(self, 'bf_CounterSink_Head_Dia')
+ col.separator()
+ #Shank
+ if self.bf_Model_Type == 'bf_Model_Bolt':
+ col.label(text='Shank')
+ col.prop(self, 'bf_Shank_Length')
+ col.prop(self, 'bf_Shank_Dia')
+ col.separator()
+ #Nut
+ if self.bf_Model_Type == 'bf_Model_Nut':
+ col.prop(self, 'bf_Nut_Type')
+ col.prop(self, 'bf_Hex_Nut_Height')
+ col.prop(self, 'bf_Hex_Nut_Flat_Distance')
+ #Thread
+ col.label(text='Thread')
+ if self.bf_Model_Type == 'bf_Model_Bolt':
+ col.prop(self, 'bf_Thread_Length')
+ col.prop(self, 'bf_Major_Dia')
+ col.prop(self, 'bf_Minor_Dia')
+ col.prop(self, 'bf_Pitch')
+ col.prop(self, 'bf_Crest_Percent')
+ col.prop(self, 'bf_Root_Percent')
+
+
+
+ ##### POLL #####
+ @classmethod
+ def poll(cls, context):
+ return context.scene != None
+
+ ##### EXECUTE #####
+ def execute(self, context):
+
+ #print('EXECUTING...')
+
+ if not self.last_preset or self.bf_presets != self.last_preset:
+ #print('setting Preset', self.bf_presets)
+ setProps(self, self.bf_presets, self.presetsPath)
+ self.bf_Phillips_Bit_Depth = float(Get_Phillips_Bit_Height(self.bf_Philips_Bit_Dia))
+
+ self.last_preset = self.bf_presets
+
+
+ #self.bf_Phillips_Bit_Depth = float(Get_Phillips_Bit_Height(self.bf_Philips_Bit_Dia))
+ #self.bf_Philips_Bit_Dia = self.bf_Pan_Head_Dia*(1.82/5.6)
+ #self.bf_Minor_Dia = self.bf_Major_Dia - (1.082532 * self.bf_Pitch)
+
+ Create_New_Mesh(self, context, self.align_matrix)
+
+ return {'FINISHED'}
+
+ ##### INVOKE #####
+ def invoke(self, context, event):
+ #print('\n___________START_____________')
+ # store creation_matrix
+ self.align_matrix = align_matrix(context)
+ self.execute(context)
+
+ return {'FINISHED'}
diff --git a/add_mesh_BoltFactory/__init__.py b/add_mesh_BoltFactory/__init__.py
new file mode 100644
index 00000000..ef3f319e
--- /dev/null
+++ b/add_mesh_BoltFactory/__init__.py
@@ -0,0 +1,61 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "BoltFactory",
+ "author": "Aaron Keith",
+ "version": (3, 9),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh",
+ "description": "Adds a Bolt or Nut",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/BoltFactory",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22842",
+ "category": "Add Mesh"}
+
+if "bpy" in locals():
+ import imp
+ imp.reload(Boltfactory)
+else:
+ from add_mesh_BoltFactory import Boltfactory
+
+import bpy
+
+################################################################################
+##### REGISTER #####
+
+def add_mesh_bolt_button(self, context):
+ self.layout.operator(Boltfactory.add_mesh_bolt.bl_idname, text="Bolt", icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_mesh_add.append(add_mesh_bolt_button)
+ #bpy.types.VIEW3D_PT_tools_objectmode.prepend(add_mesh_bolt_button) #just for testing
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_mesh_add.remove(add_mesh_bolt_button)
+ #bpy.types.VIEW3D_PT_tools_objectmode.remove(add_mesh_bolt_button) #just for testing
+
+if __name__ == "__main__":
+ register()
diff --git a/add_mesh_BoltFactory/createMesh.py b/add_mesh_BoltFactory/createMesh.py
new file mode 100644
index 00000000..1ff82229
--- /dev/null
+++ b/add_mesh_BoltFactory/createMesh.py
@@ -0,0 +1,2195 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+
+import os #remove this
+import bpy
+
+try:
+ import mathutils
+ MATHUTILS = mathutils
+except:
+ import Mathutils
+ MATHUTILS = Mathutils
+
+
+
+from math import *
+from bpy.props import IntProperty, FloatProperty ,EnumProperty
+from itertools import *
+
+NARROW_UI = 180
+MAX_INPUT_NUMBER = 50
+
+#Global_Scale = 0.001 #1 blender unit = X mm
+GLOBAL_SCALE = 0.1 #1 blender unit = X mm
+#Global_Scale = 1.0 #1 blender unit = X mm
+
+
+
+
+# next two utility functions are stolen from import_obj.py
+
+def unpack_list(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ l.extend(t)
+ return l
+
+def unpack_face_list(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ face = [i for i in t]
+
+ if len(face) != 3 and len(face) != 4:
+ raise RuntimeError("{0} vertices in face.".format(len(face)))
+
+ # rotate indices if the 4th is 0
+ if len(face) == 4 and face[3] == 0:
+ face = [face[3], face[0], face[1], face[2]]
+
+ if len(face) == 3:
+ face.append(0)
+
+ l.extend(face)
+
+ return l
+
+'''
+Remove Doubles takes a list on Verts and a list of Faces and
+removes the doubles, much like Blender does in edit mode.
+It doesn’t have the range function but it will round the corrdinates
+and remove verts that are very close togther. The function
+is useful because you can perform a “Remove Doubles” with out
+having to enter Edit Mode. Having to enter edit mode has the
+disadvantage of not being able to interactively change the properties.
+'''
+
+
+def RemoveDoubles(verts,faces,Decimal_Places = 4):
+
+ new_verts = []
+ new_faces = []
+ dict_verts = {}
+ Rounded_Verts = []
+
+ for v in verts:
+ Rounded_Verts.append([round(v[0],Decimal_Places),round(v[1],Decimal_Places),round(v[2],Decimal_Places)])
+
+ for face in faces:
+ new_face = []
+ for vert_index in face:
+ Real_co = tuple(verts[vert_index])
+ Rounded_co = tuple(Rounded_Verts[vert_index])
+
+ if Rounded_co not in dict_verts:
+ dict_verts[Rounded_co] = len(dict_verts)
+ new_verts.append(Real_co)
+ if dict_verts[Rounded_co] not in new_face:
+ new_face.append(dict_verts[Rounded_co])
+ if len(new_face) == 3 or len(new_face) == 4:
+ new_faces.append(new_face)
+
+ return new_verts,new_faces
+
+
+
+
+def Scale_Mesh_Verts(verts,scale_factor):
+ Ret_verts = []
+ for v in verts:
+ Ret_verts.append([v[0]*scale_factor,v[1]*scale_factor,v[2]*scale_factor])
+ return Ret_verts
+
+
+
+
+
+#Create a matrix representing a rotation.
+#
+#Parameters:
+#
+# * angle (float) - The angle of rotation desired.
+# * matSize (int) - The size of the rotation matrix to construct. Can be 2d, 3d, or 4d.
+# * axisFlag (string (optional)) - Possible values:
+# o "x - x-axis rotation"
+# o "y - y-axis rotation"
+# o "z - z-axis rotation"
+# o "r - arbitrary rotation around vector"
+# * axis (Vector object. (optional)) - The arbitrary axis of rotation used with "R"
+#
+#Returns: Matrix object.
+# A new rotation matrix.
+def Simple_RotationMatrix(angle, matSize, axisFlag):
+ if matSize != 4 :
+ print ("Simple_RotationMatrix can only do 4x4")
+
+ q = radians(angle) #make the rotation go clockwise
+
+ if axisFlag == 'x':
+ matrix = MATHUTILS.Matrix(((1,0,0,0),(0,cos(q),sin(q),0),(0,-sin(q),cos(q),0),(0,0,0,1)))
+ elif axisFlag == 'y':
+ matrix = MATHUTILS.Matrix(((cos(q),0,-sin(q),0),(0,1,0,0),(sin(q),0,cos(q),0),(0,0,0,1)))
+ elif axisFlag == 'z':
+ matrix = MATHUTILS.Matrix(((cos(q),sin(q),0,0),(-sin(q),cos(q),0,0),(0,0,1,0),(0,0,0,1)))
+ else:
+ print ("Simple_RotationMatrix can only do x y z axis")
+ return matrix
+
+
+##########################################################################################
+##########################################################################################
+## Converter Functions For Bolt Factory
+##########################################################################################
+##########################################################################################
+
+
+def Flat_To_Radius(FLAT):
+ h = (float(FLAT)/2)/cos(radians(30))
+ return h
+
+def Get_Phillips_Bit_Height(Bit_Dia):
+ Flat_Width_half = (Bit_Dia*(0.5/1.82))/2.0
+ Bit_Rad = Bit_Dia / 2.0
+ x = Bit_Rad - Flat_Width_half
+ y = tan(radians(60))*x
+ return float(y)
+
+
+##########################################################################################
+##########################################################################################
+## Miscellaneous Utilities
+##########################################################################################
+##########################################################################################
+
+# Returns a list of verts rotated by the given matrix. Used by SpinDup
+def Rot_Mesh(verts,matrix):
+ ret = []
+ #print ("rot mat",matrix)
+ for v in verts:
+ vec = MATHUTILS.Vector(v) * matrix
+ ret.append([vec.x,vec.y,vec.z])
+ return ret
+
+# Returns a list of faces that has there index incremented by offset
+def Copy_Faces(faces,offset):
+ ret = []
+ for f in faces:
+ fsub = []
+ for i in range(len(f)):
+ fsub.append(f[i]+ offset)
+ ret.append(fsub)
+ return ret
+
+
+# Much like Blenders built in SpinDup.
+def SpinDup(VERTS,FACES,DEGREE,DIVISIONS,AXIS):
+ verts=[]
+ faces=[]
+
+ if DIVISIONS == 0:
+ DIVISIONS = 1
+
+ step = DEGREE/DIVISIONS # set step so pieces * step = degrees in arc
+
+ for i in range(int(DIVISIONS)):
+ rotmat = Simple_RotationMatrix(step*i, 4, AXIS) # 4x4 rotation matrix, 30d about the x axis.
+ Rot = Rot_Mesh(VERTS,rotmat)
+ faces.extend(Copy_Faces(FACES,len(verts)))
+ verts.extend(Rot)
+ return verts,faces
+
+
+
+# Returns a list of verts that have been moved up the z axis by DISTANCE
+def Move_Verts_Up_Z(VERTS,DISTANCE):
+ ret = []
+ for v in VERTS:
+ ret.append([v[0],v[1],v[2]+DISTANCE])
+ return ret
+
+
+# Returns a list of verts and faces that has been mirrored in the AXIS
+def Mirror_Verts_Faces(VERTS,FACES,AXIS,FLIP_POINT =0):
+ ret_vert = []
+ ret_face = []
+ offset = len(VERTS)
+ if AXIS == 'y':
+ for v in VERTS:
+ Delta = v[0] - FLIP_POINT
+ ret_vert.append([FLIP_POINT-Delta,v[1],v[2]])
+ if AXIS == 'x':
+ for v in VERTS:
+ Delta = v[1] - FLIP_POINT
+ ret_vert.append([v[0],FLIP_POINT-Delta,v[2]])
+ if AXIS == 'z':
+ for v in VERTS:
+ Delta = v[2] - FLIP_POINT
+ ret_vert.append([v[0],v[1],FLIP_POINT-Delta])
+
+ for f in FACES:
+ fsub = []
+ for i in range(len(f)):
+ fsub.append(f[i]+ offset)
+ fsub.reverse() # flip the order to make norm point out
+ ret_face.append(fsub)
+
+ return ret_vert,ret_face
+
+
+
+# Returns a list of faces that
+# make up an array of 4 point polygon.
+def Build_Face_List_Quads(OFFSET,COLUM,ROW,FLIP = 0):
+ Ret =[]
+ RowStart = 0;
+ for j in range(ROW):
+ for i in range(COLUM):
+ Res1 = RowStart + i;
+ Res2 = RowStart + i + (COLUM +1)
+ Res3 = RowStart + i + (COLUM +1) +1
+ Res4 = RowStart+i+1
+ if FLIP:
+ Ret.append([OFFSET+Res1,OFFSET+Res2,OFFSET+Res3,OFFSET+Res4])
+ else:
+ Ret.append([OFFSET+Res4,OFFSET+Res3,OFFSET+Res2,OFFSET+Res1])
+ RowStart += COLUM+1
+ return Ret
+
+
+# Returns a list of faces that makes up a fill pattern for a
+# circle
+def Fill_Ring_Face(OFFSET,NUM,FACE_DOWN = 0):
+ Ret =[]
+ Face = [1,2,0]
+ TempFace = [0,0,0]
+ A = 0
+ B = 1
+ C = 2
+ if NUM < 3:
+ return None
+ for i in range(NUM-2):
+ if (i%2):
+ TempFace[0] = Face[C];
+ TempFace[1] = Face[C] + 1;
+ TempFace[2] = Face[B];
+ if FACE_DOWN:
+ Ret.append([OFFSET+Face[2],OFFSET+Face[1],OFFSET+Face[0]])
+ else:
+ Ret.append([OFFSET+Face[0],OFFSET+Face[1],OFFSET+Face[2]])
+ else:
+ TempFace[0] =Face[C];
+ if Face[C] == 0:
+ TempFace[1] = NUM-1;
+ else:
+ TempFace[1] = Face[C] - 1;
+ TempFace[2] = Face[B];
+ if FACE_DOWN:
+ Ret.append([OFFSET+Face[0],OFFSET+Face[1],OFFSET+Face[2]])
+ else:
+ Ret.append([OFFSET+Face[2],OFFSET+Face[1],OFFSET+Face[0]])
+
+ Face[0] = TempFace[0]
+ Face[1] = TempFace[1]
+ Face[2] = TempFace[2]
+ return Ret
+
+######################################################################################
+##########################################################################################
+##########################################################################################
+## Create Allen Bit
+##########################################################################################
+##########################################################################################
+
+
+def Allen_Fill(OFFSET,FLIP= 0):
+ faces = []
+ Lookup = [[19,1,0],
+ [19,2,1],
+ [19,3,2],
+ [19,20,3],
+ [20,4,3],
+ [20,5,4],
+ [20,6,5],
+ [20,7,6],
+ [20,8,7],
+ [20,9,8],
+
+ [20,21,9],
+
+ [21,10,9],
+ [21,11,10],
+ [21,12,11],
+ [21,13,12],
+ [21,14,13],
+ [21,15,14],
+
+ [21,22,15],
+ [22,16,15],
+ [22,17,16],
+ [22,18,17]
+ ]
+ for i in Lookup:
+ if FLIP:
+ faces.append([OFFSET+i[2],OFFSET+i[1],OFFSET+i[0]])
+ else:
+ faces.append([OFFSET+i[0],OFFSET+i[1],OFFSET+i[2]])
+
+ return faces
+
+def Allen_Bit_Dia(FLAT_DISTANCE):
+ Flat_Radius = (float(FLAT_DISTANCE)/2.0)/cos(radians(30))
+ return (Flat_Radius * 1.05) * 2.0
+
+def Allen_Bit_Dia_To_Flat(DIA):
+ Flat_Radius = (DIA/2.0)/1.05
+ return (Flat_Radius * cos (radians(30)))* 2.0
+
+
+
+def Create_Allen_Bit(FLAT_DISTANCE,HEIGHT):
+ Div = 36
+ verts = []
+ faces = []
+
+ Flat_Radius = (float(FLAT_DISTANCE)/2.0)/cos(radians(30))
+ OUTTER_RADIUS = Flat_Radius * 1.05
+ Outter_Radius_Height = Flat_Radius * (0.1/5.77)
+ FaceStart_Outside = len(verts)
+ Deg_Step = 360.0 /float(Div)
+
+ for i in range(int(Div/2)+1): # only do half and mirror later
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,0])
+
+ FaceStart_Inside = len(verts)
+
+ Deg_Step = 360.0 /float(6)
+ for i in range(int(6/2)+1):
+ x = sin(radians(i*Deg_Step))* Flat_Radius
+ y = cos(radians(i*Deg_Step))* Flat_Radius
+ verts.append([x,y,0-Outter_Radius_Height])
+
+ faces.extend(Allen_Fill(FaceStart_Outside,0))
+
+
+ FaceStart_Bottom = len(verts)
+
+ Deg_Step = 360.0 /float(6)
+ for i in range(int(6/2)+1):
+ x = sin(radians(i*Deg_Step))* Flat_Radius
+ y = cos(radians(i*Deg_Step))* Flat_Radius
+ verts.append([x,y,0-HEIGHT])
+
+ faces.extend(Build_Face_List_Quads(FaceStart_Inside,3,1,True))
+ faces.extend(Fill_Ring_Face(FaceStart_Bottom,4))
+
+
+ M_Verts,M_Faces = Mirror_Verts_Faces(verts,faces,'y')
+ verts.extend(M_Verts)
+ faces.extend(M_Faces)
+
+ return verts,faces,OUTTER_RADIUS * 2.0
+
+
+##########################################################################################
+##########################################################################################
+## Create Phillips Bit
+##########################################################################################
+##########################################################################################
+
+
+def Phillips_Fill(OFFSET,FLIP= 0):
+ faces = []
+ Lookup = [[0,1,10],
+ [1,11,10],
+ [1,2,11],
+ [2,12,11],
+
+ [2,3,12],
+ [3,4,12],
+ [4,5,12],
+ [5,6,12],
+ [6,7,12],
+
+ [7,13,12],
+ [7,8,13],
+ [8,14,13],
+ [8,9,14],
+
+
+ [10,11,16,15],
+ [11,12,16],
+ [12,13,16],
+ [13,14,17,16],
+ [15,16,17,18]
+
+
+ ]
+ for i in Lookup:
+ if FLIP:
+ if len(i) == 3:
+ faces.append([OFFSET+i[2],OFFSET+i[1],OFFSET+i[0]])
+ else:
+ faces.append([OFFSET+i[3],OFFSET+i[2],OFFSET+i[1],OFFSET+i[0]])
+ else:
+ if len(i) == 3:
+ faces.append([OFFSET+i[0],OFFSET+i[1],OFFSET+i[2]])
+ else:
+ faces.append([OFFSET+i[0],OFFSET+i[1],OFFSET+i[2],OFFSET+i[3]])
+ return faces
+
+
+
+def Create_Phillips_Bit(FLAT_DIA,FLAT_WIDTH,HEIGHT):
+ Div = 36
+ verts = []
+ faces = []
+
+ FLAT_RADIUS = FLAT_DIA * 0.5
+ OUTTER_RADIUS = FLAT_RADIUS * 1.05
+
+ Flat_Half = float(FLAT_WIDTH)/2.0
+
+ FaceStart_Outside = len(verts)
+ Deg_Step = 360.0 /float(Div)
+ for i in range(int(Div/4)+1): # only do half and mirror later
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,0])
+
+
+ FaceStart_Inside = len(verts)
+ verts.append([0,FLAT_RADIUS,0]) #10
+ verts.append([Flat_Half,FLAT_RADIUS,0]) #11
+ verts.append([Flat_Half,Flat_Half,0]) #12
+ verts.append([FLAT_RADIUS,Flat_Half,0]) #13
+ verts.append([FLAT_RADIUS,0,0]) #14
+
+
+ verts.append([0,Flat_Half,0-HEIGHT]) #15
+ verts.append([Flat_Half,Flat_Half,0-HEIGHT]) #16
+ verts.append([Flat_Half,0,0-HEIGHT]) #17
+
+ verts.append([0,0,0-HEIGHT]) #18
+
+ faces.extend(Phillips_Fill(FaceStart_Outside,True))
+
+ Spin_Verts,Spin_Face = SpinDup(verts,faces,360,4,'z')
+
+ return Spin_Verts,Spin_Face,OUTTER_RADIUS * 2
+
+
+##########################################################################################
+##########################################################################################
+## Create Head Types
+##########################################################################################
+##########################################################################################
+
+def Max_Pan_Bit_Dia(HEAD_DIA):
+ HEAD_RADIUS = HEAD_DIA * 0.5
+ XRad = HEAD_RADIUS * 1.976
+ return (sin(radians(10))*XRad) * 2.0
+
+
+def Create_Pan_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1,RAD2,FACE_OFFSET):
+
+ DIV = 36
+ HOLE_RADIUS = HOLE_DIA * 0.5
+ HEAD_RADIUS = HEAD_DIA * 0.5
+ SHANK_RADIUS = SHANK_DIA * 0.5
+
+ verts = []
+ faces = []
+ Row = 0
+ BEVEL = HEIGHT * 0.01
+ #Dome_Rad = HEAD_RADIUS * (1.0/1.75)
+
+ Dome_Rad = HEAD_RADIUS * 1.12
+ RAD_Offset = HEAD_RADIUS * 0.96
+ OtherRad = HEAD_RADIUS * 0.16
+ OtherRad_X_Offset = HEAD_RADIUS * 0.84
+ OtherRad_Z_Offset = HEAD_RADIUS * 0.504
+ XRad = HEAD_RADIUS * 1.976
+ ZRad = HEAD_RADIUS * 1.768
+ EndRad = HEAD_RADIUS * 0.284
+ EndZOffset = HEAD_RADIUS * 0.432
+ HEIGHT = HEAD_RADIUS * 0.59
+
+# Dome_Rad = 5.6
+# RAD_Offset = 4.9
+# OtherRad = 0.8
+# OtherRad_X_Offset = 4.2
+# OtherRad_Z_Offset = 2.52
+# XRad = 9.88
+# ZRad = 8.84
+# EndRad = 1.42
+# EndZOffset = 2.16
+# HEIGHT = 2.95
+
+ FaceStart = FACE_OFFSET
+
+ z = cos(radians(10))*ZRad
+ verts.append([HOLE_RADIUS,0.0,(0.0-ZRad)+z])
+ Start_Height = 0 - ((0.0-ZRad)+z)
+ Row += 1
+
+ #for i in range(0,30,10): was 0 to 30 more work needed to make this look good.
+ for i in range(10,30,10):
+ x = sin(radians(i))*XRad
+ z = cos(radians(i))*ZRad
+ verts.append([x,0.0,(0.0-ZRad)+z])
+ Row += 1
+
+ for i in range(20,140,10):
+ x = sin(radians(i))*EndRad
+ z = cos(radians(i))*EndRad
+ if ((0.0 - EndZOffset)+z) < (0.0-HEIGHT):
+ verts.append([(HEAD_RADIUS -EndRad)+x,0.0,0.0 - HEIGHT])
+ else:
+ verts.append([(HEAD_RADIUS -EndRad)+x,0.0,(0.0 - EndZOffset)+z])
+ Row += 1
+
+
+ verts.append([SHANK_RADIUS,0.0,(0.0-HEIGHT)])
+ Row += 1
+
+ verts.append([SHANK_RADIUS,0.0,(0.0-HEIGHT)-Start_Height])
+ Row += 1
+
+
+ sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
+ sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
+
+ faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
+
+ Global_Head_Height = HEIGHT ;
+
+
+ return Move_Verts_Up_Z(sVerts,Start_Height),faces,HEIGHT
+
+
+
+def Create_Dome_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1,RAD2,FACE_OFFSET):
+ DIV = 36
+ HOLE_RADIUS = HOLE_DIA * 0.5
+ HEAD_RADIUS = HEAD_DIA * 0.5
+ SHANK_RADIUS = SHANK_DIA * 0.5
+
+ verts = []
+ faces = []
+ Row = 0
+ BEVEL = HEIGHT * 0.01
+ #Dome_Rad = HEAD_RADIUS * (1.0/1.75)
+
+ Dome_Rad = HEAD_RADIUS * 1.12
+ #Head_Height = HEAD_RADIUS * 0.78
+ RAD_Offset = HEAD_RADIUS * 0.98
+ Dome_Height = HEAD_RADIUS * 0.64
+ OtherRad = HEAD_RADIUS * 0.16
+ OtherRad_X_Offset = HEAD_RADIUS * 0.84
+ OtherRad_Z_Offset = HEAD_RADIUS * 0.504
+
+
+# Dome_Rad = 5.6
+# RAD_Offset = 4.9
+# Dome_Height = 3.2
+# OtherRad = 0.8
+# OtherRad_X_Offset = 4.2
+# OtherRad_Z_Offset = 2.52
+#
+
+ FaceStart = FACE_OFFSET
+
+ verts.append([HOLE_RADIUS,0.0,0.0])
+ Row += 1
+
+
+ for i in range(0,60,10):
+ x = sin(radians(i))*Dome_Rad
+ z = cos(radians(i))*Dome_Rad
+ if ((0.0-RAD_Offset)+z) <= 0:
+ verts.append([x,0.0,(0.0-RAD_Offset)+z])
+ Row += 1
+
+
+ for i in range(60,160,10):
+ x = sin(radians(i))*OtherRad
+ z = cos(radians(i))*OtherRad
+ z = (0.0-OtherRad_Z_Offset)+z
+ if z < (0.0-Dome_Height):
+ z = (0.0-Dome_Height)
+ verts.append([OtherRad_X_Offset+x,0.0,z])
+ Row += 1
+
+ verts.append([SHANK_RADIUS,0.0,(0.0-Dome_Height)])
+ Row += 1
+
+
+ sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
+ sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
+
+ faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
+
+ return sVerts,faces,Dome_Height
+
+
+
+def Create_CounterSink_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1):
+ DIV = 36
+
+ HOLE_RADIUS = HOLE_DIA * 0.5
+ HEAD_RADIUS = HEAD_DIA * 0.5
+ SHANK_RADIUS = SHANK_DIA * 0.5
+
+
+ verts = []
+ faces = []
+ Row = 0
+ BEVEL = HEIGHT * 0.01
+
+
+
+# HEAD_RADIUS = (HEIGHT/tan(radians(60))) + SHANK_RADIUS
+ HEIGHT = tan(radians(60)) * (HEAD_RADIUS - SHANK_RADIUS)
+ #print (RAD1)
+
+ FaceStart = len(verts)
+
+ verts.append([HOLE_RADIUS,0.0,0.0])
+ Row += 1
+
+ #rad
+
+ for i in range(0,100,10):
+ x = sin(radians(i))*RAD1
+ z = cos(radians(i))*RAD1
+ verts.append([(HEAD_RADIUS-RAD1)+x,0.0,(0.0-RAD1)+z])
+ Row += 1
+
+
+ verts.append([SHANK_RADIUS,0.0,0.0-HEIGHT])
+ Row += 1
+
+
+ sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
+ sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
+
+
+ faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV,1))
+
+ return sVerts,faces,HEIGHT
+
+
+
+
+def Create_Cap_Head(HOLE_DIA,HEAD_DIA,SHANK_DIA,HEIGHT,RAD1,RAD2):
+ DIV = 36
+
+ HOLE_RADIUS = HOLE_DIA * 0.5
+ HEAD_RADIUS = HEAD_DIA * 0.5
+ SHANK_RADIUS = SHANK_DIA * 0.5
+
+ verts = []
+ faces = []
+ Row = 0
+ BEVEL = HEIGHT * 0.01
+
+
+ FaceStart = len(verts)
+
+ verts.append([HOLE_RADIUS,0.0,0.0])
+ Row += 1
+
+ #rad
+
+ for i in range(0,100,10):
+ x = sin(radians(i))*RAD1
+ z = cos(radians(i))*RAD1
+ verts.append([(HEAD_RADIUS-RAD1)+x,0.0,(0.0-RAD1)+z])
+ Row += 1
+
+
+ verts.append([HEAD_RADIUS,0.0,0.0-HEIGHT+BEVEL])
+ Row += 1
+
+ verts.append([HEAD_RADIUS-BEVEL,0.0,0.0-HEIGHT])
+ Row += 1
+
+ #rad2
+
+ for i in range(0,100,10):
+ x = sin(radians(i))*RAD2
+ z = cos(radians(i))*RAD2
+ verts.append([(SHANK_RADIUS+RAD2)-x,0.0,(0.0-HEIGHT-RAD2)+z])
+ Row += 1
+
+
+ sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
+ sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
+
+
+ faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
+
+ return sVerts,faces,HEIGHT+RAD2
+
+
+def Create_Hex_Head(FLAT,HOLE_DIA,SHANK_DIA,HEIGHT):
+
+ verts = []
+ faces = []
+ HOLE_RADIUS = HOLE_DIA * 0.5
+ Half_Flat = FLAT/2
+ TopBevelRadius = Half_Flat - (Half_Flat* (0.05/8))
+ Undercut_Height = (Half_Flat* (0.05/8))
+ Shank_Bevel = (Half_Flat* (0.05/8))
+ Flat_Height = HEIGHT - Undercut_Height - Shank_Bevel
+ #Undercut_Height = 5
+ SHANK_RADIUS = SHANK_DIA/2
+ Row = 0;
+
+ verts.append([0.0,0.0,0.0])
+
+
+ FaceStart = len(verts)
+ #inner hole
+
+ x = sin(radians(0))*HOLE_RADIUS
+ y = cos(radians(0))*HOLE_RADIUS
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/6))*HOLE_RADIUS
+ y = cos(radians(60/6))*HOLE_RADIUS
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/3))*HOLE_RADIUS
+ y = cos(radians(60/3))*HOLE_RADIUS
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/2))*HOLE_RADIUS
+ y = cos(radians(60/2))*HOLE_RADIUS
+ verts.append([x,y,0.0])
+ Row += 1
+
+ #bevel
+
+ x = sin(radians(0))*TopBevelRadius
+ y = cos(radians(0))*TopBevelRadius
+ vec1 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/6))*TopBevelRadius
+ y = cos(radians(60/6))*TopBevelRadius
+ vec2 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/3))*TopBevelRadius
+ y = cos(radians(60/3))*TopBevelRadius
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/2))*TopBevelRadius
+ y = cos(radians(60/2))*TopBevelRadius
+ vec4 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+ Row += 1
+
+ #Flats
+
+ x = tan(radians(0))*Half_Flat
+ dvec = vec1 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ verts.append([x,Half_Flat,-dvec.length])
+
+
+ x = tan(radians(60/6))*Half_Flat
+ dvec = vec2 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ verts.append([x,Half_Flat,-dvec.length])
+
+
+ x = tan(radians(60/3))*Half_Flat
+ dvec = vec3 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ Lowest_Point = -dvec.length
+ verts.append([x,Half_Flat,-dvec.length])
+
+
+ x = tan(radians(60/2))*Half_Flat
+ dvec = vec4 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ Lowest_Point = -dvec.length
+ verts.append([x,Half_Flat,-dvec.length])
+ Row += 1
+
+ #down Bits Tri
+ x = tan(radians(0))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+
+ x = tan(radians(60/6))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+
+ x = tan(radians(60/3))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+
+ x = tan(radians(60/2))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+ Row += 1
+
+ #down Bits
+
+ x = tan(radians(0))*Half_Flat
+ verts.append([x,Half_Flat,-Flat_Height])
+
+ x = tan(radians(60/6))*Half_Flat
+ verts.append([x,Half_Flat,-Flat_Height])
+
+ x = tan(radians(60/3))*Half_Flat
+ verts.append([x,Half_Flat,-Flat_Height])
+
+ x = tan(radians(60/2))*Half_Flat
+ verts.append([x,Half_Flat,-Flat_Height])
+ Row += 1
+
+
+ #under cut
+
+ x = sin(radians(0))*Half_Flat
+ y = cos(radians(0))*Half_Flat
+ vec1 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height])
+
+ x = sin(radians(60/6))*Half_Flat
+ y = cos(radians(60/6))*Half_Flat
+ vec2 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height])
+
+ x = sin(radians(60/3))*Half_Flat
+ y = cos(radians(60/3))*Half_Flat
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height])
+
+ x = sin(radians(60/2))*Half_Flat
+ y = cos(radians(60/2))*Half_Flat
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height])
+ Row += 1
+
+ #under cut down bit
+ x = sin(radians(0))*Half_Flat
+ y = cos(radians(0))*Half_Flat
+ vec1 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+
+ x = sin(radians(60/6))*Half_Flat
+ y = cos(radians(60/6))*Half_Flat
+ vec2 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+
+ x = sin(radians(60/3))*Half_Flat
+ y = cos(radians(60/3))*Half_Flat
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+
+ x = sin(radians(60/2))*Half_Flat
+ y = cos(radians(60/2))*Half_Flat
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+ Row += 1
+
+ #under cut to Shank BEVEAL
+ x = sin(radians(0))*(SHANK_RADIUS+Shank_Bevel)
+ y = cos(radians(0))*(SHANK_RADIUS+Shank_Bevel)
+ vec1 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+
+ x = sin(radians(60/6))*(SHANK_RADIUS+Shank_Bevel)
+ y = cos(radians(60/6))*(SHANK_RADIUS+Shank_Bevel)
+ vec2 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+
+ x = sin(radians(60/3))*(SHANK_RADIUS+Shank_Bevel)
+ y = cos(radians(60/3))*(SHANK_RADIUS+Shank_Bevel)
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+
+ x = sin(radians(60/2))*(SHANK_RADIUS+Shank_Bevel)
+ y = cos(radians(60/2))*(SHANK_RADIUS+Shank_Bevel)
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height])
+ Row += 1
+
+ #under cut to Shank BEVEAL
+ x = sin(radians(0))*SHANK_RADIUS
+ y = cos(radians(0))*SHANK_RADIUS
+ vec1 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
+
+ x = sin(radians(60/6))*SHANK_RADIUS
+ y = cos(radians(60/6))*SHANK_RADIUS
+ vec2 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
+
+ x = sin(radians(60/3))*SHANK_RADIUS
+ y = cos(radians(60/3))*SHANK_RADIUS
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
+
+ x = sin(radians(60/2))*SHANK_RADIUS
+ y = cos(radians(60/2))*SHANK_RADIUS
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,-Flat_Height-Undercut_Height-Shank_Bevel])
+ Row += 1
+
+
+ #Global_Head_Height = 0 - (-HEIGHT-0.1)
+ faces.extend(Build_Face_List_Quads(FaceStart,3,Row - 1))
+
+
+ Mirror_Verts,Mirror_Faces = Mirror_Verts_Faces(verts,faces,'y')
+ verts.extend(Mirror_Verts)
+ faces.extend(Mirror_Faces)
+
+ Spin_Verts,Spin_Faces = SpinDup(verts,faces,360,6,'z')
+
+
+ return Spin_Verts,Spin_Faces,0 - (-HEIGHT)
+
+
+##########################################################################################
+##########################################################################################
+## Create External Thread
+##########################################################################################
+##########################################################################################
+
+
+
+def Thread_Start3(verts,INNER_RADIUS,OUTTER_RADIUS,PITCH,DIV,CREST_PERCENT,ROOT_PERCENT,Height_Offset):
+
+
+ Ret_Row = 0;
+
+ Half_Pitch = float(PITCH)/2
+ Height_Start = Height_Offset - PITCH
+ Height_Step = float(PITCH)/float(DIV)
+ Deg_Step = 360.0 /float(DIV)
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+#theard start
+
+ Rank = float(OUTTER_RADIUS - INNER_RADIUS)/float(DIV)
+ for j in range(4):
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z])
+ Height_Offset -= Crest_Height
+ Ret_Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z ])
+ Height_Offset -= Crest_to_Root_Height
+ Ret_Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ if j == 0:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ verts.append([x,y,z ])
+ Height_Offset -= Root_Height
+ Ret_Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+
+ if j == 0:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ verts.append([x,y,z ])
+ Height_Offset -= Root_to_Crest_Height
+ Ret_Row += 1
+
+ return Ret_Row,Height_Offset
+
+
+def Create_Shank_Verts(START_DIA,OUTTER_DIA,LENGTH,Z_LOCATION = 0):
+
+ verts = []
+ DIV = 36
+
+ START_RADIUS = START_DIA/2
+ OUTTER_RADIUS = OUTTER_DIA/2
+
+ Opp = abs(START_RADIUS - OUTTER_RADIUS)
+ Taper_Lentgh = Opp/tan(radians(31));
+
+ if Taper_Lentgh > LENGTH:
+ Taper_Lentgh = 0
+
+ Stright_Length = LENGTH - Taper_Lentgh
+
+ Deg_Step = 360.0 /float(DIV)
+
+ Row = 0
+
+ Lowest_Z_Vert = 0;
+
+ Height_Offset = Z_LOCATION
+
+
+ #ring
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*START_RADIUS
+ y = cos(radians(i*Deg_Step))*START_RADIUS
+ z = Height_Offset - 0
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Stright_Length
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*START_RADIUS
+ y = cos(radians(i*Deg_Step))*START_RADIUS
+ z = Height_Offset - 0
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Taper_Lentgh
+ Row += 1
+
+
+ return verts,Row,Height_Offset
+
+
+def Create_Thread_Start_Verts(INNER_DIA,OUTTER_DIA,PITCH,CREST_PERCENT,ROOT_PERCENT,Z_LOCATION = 0):
+
+ verts = []
+ DIV = 36
+
+ INNER_RADIUS = INNER_DIA/2
+ OUTTER_RADIUS = OUTTER_DIA/2
+
+ Half_Pitch = float(PITCH)/2
+ Deg_Step = 360.0 /float(DIV)
+ Height_Step = float(PITCH)/float(DIV)
+
+ Row = 0
+
+ Lowest_Z_Vert = 0;
+
+ Height_Offset = Z_LOCATION
+
+ Height_Start = Height_Offset
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+ Rank = float(OUTTER_RADIUS - INNER_RADIUS)/float(DIV)
+
+ Height_Offset = Z_LOCATION + PITCH
+ Cut_off = Z_LOCATION
+
+
+ for j in range(1):
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ if z > Cut_off : z = Cut_off
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ if z > Cut_off : z = Cut_off
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_to_Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ if z > Cut_off : z = Cut_off
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ if z > Cut_off : z = Cut_off
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_to_Crest_Height
+ Row += 1
+
+
+ for j in range(2):
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z ])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_to_Root_Height
+ Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ if j == 0:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ verts.append([x,y,z ])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+
+ if j == 0:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ verts.append([x,y,z ])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_to_Crest_Height
+ Row += 1
+
+
+ return verts,Row,Height_Offset
+
+
+
+def Create_Thread_Verts(INNER_DIA,OUTTER_DIA,PITCH,HEIGHT,CREST_PERCENT,ROOT_PERCENT,Z_LOCATION = 0):
+ verts = []
+
+ DIV = 36
+
+ INNER_RADIUS = INNER_DIA/2
+ OUTTER_RADIUS = OUTTER_DIA/2
+
+ Half_Pitch = float(PITCH)/2
+ Deg_Step = 360.0 /float(DIV)
+ Height_Step = float(PITCH)/float(DIV)
+
+ NUM_OF_START_THREADS = 4.0
+ NUM_OF_END_THREADS = 3.0
+ Num = int((HEIGHT- ((NUM_OF_START_THREADS*PITCH) + (NUM_OF_END_THREADS*PITCH) ))/PITCH)
+ Row = 0
+
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+
+ Height_Offset = Z_LOCATION
+
+ Lowest_Z_Vert = 0;
+ FaceStart = len(verts)
+
+
+ for j in range(Num):
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_to_Root_Height
+ Row += 1
+
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ z = Height_Offset - (Height_Step*i)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_to_Crest_Height
+ Row += 1
+
+ return verts,Row,Height_Offset
+
+
+
+def Create_Thread_End_Verts(INNER_DIA,OUTTER_DIA,PITCH,CREST_PERCENT,ROOT_PERCENT,Z_LOCATION = 0):
+ verts = []
+
+ DIV = 36
+
+ INNER_RADIUS = INNER_DIA/2
+ OUTTER_RADIUS = OUTTER_DIA/2
+
+ Half_Pitch = float(PITCH)/2
+ Deg_Step = 360.0 /float(DIV)
+ Height_Step = float(PITCH)/float(DIV)
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+ Col = 0
+ Row = 0
+
+ Height_Offset = Z_LOCATION
+
+ Tapper_Height_Start = Height_Offset - PITCH - PITCH
+
+ Max_Height = Tapper_Height_Start - PITCH
+
+ Lowest_Z_Vert = 0;
+
+ FaceStart = len(verts)
+ for j in range(4):
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ z = max(z,Max_Height)
+ Tapper_Radius = OUTTER_RADIUS
+ if z < Tapper_Height_Start:
+ Tapper_Radius = OUTTER_RADIUS - (Tapper_Height_Start - z)
+
+ x = sin(radians(i*Deg_Step))*(Tapper_Radius)
+ y = cos(radians(i*Deg_Step))*(Tapper_Radius)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ z = max(z,Max_Height)
+ Tapper_Radius = OUTTER_RADIUS
+ if z < Tapper_Height_Start:
+ Tapper_Radius = OUTTER_RADIUS - (Tapper_Height_Start - z)
+
+ x = sin(radians(i*Deg_Step))*(Tapper_Radius)
+ y = cos(radians(i*Deg_Step))*(Tapper_Radius)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Crest_to_Root_Height
+ Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ z = max(z,Max_Height)
+ Tapper_Radius = OUTTER_RADIUS - (Tapper_Height_Start - z)
+ if Tapper_Radius > INNER_RADIUS:
+ Tapper_Radius = INNER_RADIUS
+
+ x = sin(radians(i*Deg_Step))*(Tapper_Radius)
+ y = cos(radians(i*Deg_Step))*(Tapper_Radius)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ z = max(z,Max_Height)
+ Tapper_Radius = OUTTER_RADIUS - (Tapper_Height_Start - z)
+ if Tapper_Radius > INNER_RADIUS:
+ Tapper_Radius = INNER_RADIUS
+
+ x = sin(radians(i*Deg_Step))*(Tapper_Radius)
+ y = cos(radians(i*Deg_Step))*(Tapper_Radius)
+ verts.append([x,y,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Height_Offset -= Root_to_Crest_Height
+ Row += 1
+
+ return verts,Row,Height_Offset,Lowest_Z_Vert
+
+
+
+
+def Create_External_Thread(SHANK_DIA,SHANK_LENGTH,INNER_DIA,OUTTER_DIA,PITCH,LENGTH,CREST_PERCENT,ROOT_PERCENT):
+
+ verts = []
+ faces = []
+
+ DIV = 36
+
+ Total_Row = 0
+ Thread_Len = 0;
+
+ Face_Start = len(verts)
+ Offset = 0.0;
+
+
+ Shank_Verts,Shank_Row,Offset = Create_Shank_Verts(SHANK_DIA,OUTTER_DIA,SHANK_LENGTH,Offset)
+ Total_Row += Shank_Row
+
+ Thread_Start_Verts,Thread_Start_Row,Offset = Create_Thread_Start_Verts(INNER_DIA,OUTTER_DIA,PITCH,CREST_PERCENT,ROOT_PERCENT,Offset)
+ Total_Row += Thread_Start_Row
+
+
+ Thread_Verts,Thread_Row,Offset = Create_Thread_Verts(INNER_DIA,OUTTER_DIA,PITCH,LENGTH,CREST_PERCENT,ROOT_PERCENT,Offset)
+ Total_Row += Thread_Row
+
+
+ Thread_End_Verts,Thread_End_Row,Offset,Lowest_Z_Vert = Create_Thread_End_Verts(INNER_DIA,OUTTER_DIA,PITCH,CREST_PERCENT,ROOT_PERCENT,Offset )
+ Total_Row += Thread_End_Row
+
+
+ verts.extend(Shank_Verts)
+ verts.extend(Thread_Start_Verts)
+ verts.extend(Thread_Verts)
+ verts.extend(Thread_End_Verts)
+
+ faces.extend(Build_Face_List_Quads(Face_Start,DIV,Total_Row -1,0))
+ faces.extend(Fill_Ring_Face(len(verts)-DIV,DIV,1))
+
+ return verts,faces,0.0 - Lowest_Z_Vert
+
+
+##########################################################################################
+##########################################################################################
+## Create Nut
+##########################################################################################
+##########################################################################################
+
+def add_Hex_Nut(FLAT,HOLE_DIA,HEIGHT):
+ global Global_Head_Height
+ global Global_NutRad
+
+ verts = []
+ faces = []
+ HOLE_RADIUS = HOLE_DIA * 0.5
+ Half_Flat = FLAT/2
+ Half_Height = HEIGHT/2
+ TopBevelRadius = Half_Flat - 0.05
+
+ Global_NutRad = TopBevelRadius
+
+ Row = 0;
+ Lowest_Z_Vert = 0.0;
+
+ verts.append([0.0,0.0,0.0])
+
+
+ FaceStart = len(verts)
+ #inner hole
+
+ x = sin(radians(0))*HOLE_RADIUS
+ y = cos(radians(0))*HOLE_RADIUS
+ #print ("rad 0 x;", x, "y:" ,y )
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/6))*HOLE_RADIUS
+ y = cos(radians(60/6))*HOLE_RADIUS
+ #print ("rad 60/6x;", x, "y:" ,y )
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/3))*HOLE_RADIUS
+ y = cos(radians(60/3))*HOLE_RADIUS
+ #print ("rad 60/3x;", x, "y:" ,y )
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/2))*HOLE_RADIUS
+ y = cos(radians(60/2))*HOLE_RADIUS
+ #print ("rad 60/2x;", x, "y:" ,y )
+ verts.append([x,y,0.0])
+ Row += 1
+
+
+ #bevel
+
+ x = sin(radians(0))*TopBevelRadius
+ y = cos(radians(0))*TopBevelRadius
+ vec1 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/6))*TopBevelRadius
+ y = cos(radians(60/6))*TopBevelRadius
+ vec2 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/3))*TopBevelRadius
+ y = cos(radians(60/3))*TopBevelRadius
+ vec3 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+
+
+ x = sin(radians(60/2))*TopBevelRadius
+ y = cos(radians(60/2))*TopBevelRadius
+ vec4 = MATHUTILS.Vector([x,y,0.0])
+ verts.append([x,y,0.0])
+ Row += 1
+
+ #Flats
+
+ x = tan(radians(0))*Half_Flat
+ dvec = vec1 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ verts.append([x,Half_Flat,-dvec.length])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,-dvec.length)
+
+
+ x = tan(radians(60/6))*Half_Flat
+ dvec = vec2 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ verts.append([x,Half_Flat,-dvec.length])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,-dvec.length)
+
+
+ x = tan(radians(60/3))*Half_Flat
+ dvec = vec3 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ Lowest_Point = -dvec.length
+ verts.append([x,Half_Flat,-dvec.length])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,-dvec.length)
+
+ x = tan(radians(60/2))*Half_Flat
+ dvec = vec4 - MATHUTILS.Vector([x,Half_Flat,0.0])
+ Lowest_Point = -dvec.length
+ verts.append([x,Half_Flat,-dvec.length])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,-dvec.length)
+ Row += 1
+
+ #down Bits Tri
+ x = tan(radians(0))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+
+
+ x = tan(radians(60/6))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+ x = tan(radians(60/3))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+
+ x = tan(radians(60/2))*Half_Flat
+ verts.append([x,Half_Flat,Lowest_Point])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,Lowest_Point)
+ Row += 1
+
+ #down Bits
+
+ x = tan(radians(0))*Half_Flat
+ verts.append([x,Half_Flat,-Half_Height])
+
+ x = tan(radians(60/6))*Half_Flat
+ verts.append([x,Half_Flat,-Half_Height])
+
+ x = tan(radians(60/3))*Half_Flat
+ verts.append([x,Half_Flat,-Half_Height])
+
+ x = tan(radians(60/2))*Half_Flat
+ verts.append([x,Half_Flat,-Half_Height])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,-Half_Height)
+ Row += 1
+
+ faces.extend(Build_Face_List_Quads(FaceStart,3,Row - 1))
+
+ Global_Head_Height = HEIGHT
+
+ Tvert,tface = Mirror_Verts_Faces(verts,faces,'z',Lowest_Z_Vert)
+ verts.extend(Tvert)
+ faces.extend(tface)
+
+
+ Tvert,tface = Mirror_Verts_Faces(verts,faces,'y')
+ verts.extend(Tvert)
+ faces.extend(tface)
+
+ S_verts,S_faces = SpinDup(verts,faces,360,6,'z')
+
+ #return verts,faces,TopBevelRadius
+ return S_verts,S_faces,TopBevelRadius
+
+
+
+def add_Nylon_Head(OUTSIDE_RADIUS,Z_LOCATION = 0):
+ DIV = 36
+ verts = []
+ faces = []
+ Row = 0
+
+ INNER_HOLE = OUTSIDE_RADIUS - (OUTSIDE_RADIUS * (1.25/4.75))
+ EDGE_THICKNESS = (OUTSIDE_RADIUS * (0.4/4.75))
+ RAD1 = (OUTSIDE_RADIUS * (0.5/4.75))
+ OVER_ALL_HEIGTH = (OUTSIDE_RADIUS * (2.0/4.75))
+
+
+ FaceStart = len(verts)
+
+ Start_Height = 0 - 3
+ Height_Offset = Z_LOCATION
+ Lowest_Z_Vert = 0
+
+ x = INNER_HOLE
+ z = (Height_Offset - OVER_ALL_HEIGTH) + EDGE_THICKNESS
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+ x = INNER_HOLE
+ z = (Height_Offset - OVER_ALL_HEIGTH)
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+
+ for i in range(180,80,-10):
+ x = sin(radians(i))*RAD1
+ z = cos(radians(i))*RAD1
+ verts.append([(OUTSIDE_RADIUS-RAD1)+x,0.0,((Height_Offset - OVER_ALL_HEIGTH)+RAD1)+z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+
+ x = OUTSIDE_RADIUS - 0
+ z = Height_Offset
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+ sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
+ sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
+
+ faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV))
+
+ return Move_Verts_Up_Z(sVerts,0),faces,Lowest_Z_Vert
+
+
+
+def add_Nylon_Part(OUTSIDE_RADIUS,Z_LOCATION = 0):
+ DIV = 36
+ verts = []
+ faces = []
+ Row = 0
+
+ INNER_HOLE = OUTSIDE_RADIUS - (OUTSIDE_RADIUS * (1.5/4.75))
+ EDGE_THICKNESS = (OUTSIDE_RADIUS * (0.4/4.75))
+ RAD1 = (OUTSIDE_RADIUS * (0.5/4.75))
+ OVER_ALL_HEIGTH = (OUTSIDE_RADIUS * (2.0/4.75))
+ PART_THICKNESS = OVER_ALL_HEIGTH - EDGE_THICKNESS
+ PART_INNER_HOLE = (OUTSIDE_RADIUS * (2.5/4.75))
+
+ FaceStart = len(verts)
+
+ Start_Height = 0 - 3
+ Height_Offset = Z_LOCATION
+ Lowest_Z_Vert = 0
+
+
+ x = INNER_HOLE + EDGE_THICKNESS
+ z = Height_Offset
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+ x = PART_INNER_HOLE
+ z = Height_Offset
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+ x = PART_INNER_HOLE
+ z = Height_Offset - PART_THICKNESS
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+ x = INNER_HOLE + EDGE_THICKNESS
+ z = Height_Offset - PART_THICKNESS
+ verts.append([x,0.0,z])
+ Lowest_Z_Vert = min(Lowest_Z_Vert,z)
+ Row += 1
+
+
+ sVerts,sFaces = SpinDup(verts,faces,360,DIV,'z')
+ sVerts.extend(verts) #add the start verts to the Spin verts to complete the loop
+
+ faces.extend(Build_Face_List_Quads(FaceStart,Row-1,DIV,1))
+
+ return sVerts,faces,0 - Lowest_Z_Vert
+
+
+##########################################################################################
+##########################################################################################
+## Create Internal Thread
+##########################################################################################
+##########################################################################################
+
+
+def Create_Internal_Thread_Start_Verts(verts,INNER_RADIUS,OUTTER_RADIUS,PITCH,DIV,CREST_PERCENT,ROOT_PERCENT,Height_Offset):
+
+
+ Ret_Row = 0;
+
+ Height_Offset = Height_Offset + PITCH #Move the offset up so that the verts start at
+ #at the correct place (Height_Start)
+
+ Half_Pitch = float(PITCH)/2
+ Height_Start = Height_Offset - PITCH
+ Height_Step = float(PITCH)/float(DIV)
+ Deg_Step = 360.0 /float(DIV)
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+
+ Rank = float(OUTTER_RADIUS - INNER_RADIUS)/float(DIV)
+ for j in range(1):
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z])
+ Height_Offset -= Crest_Height
+ Ret_Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z ])
+ Height_Offset -= Crest_to_Root_Height
+ Ret_Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ if j == 0:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ verts.append([x,y,z ])
+ Height_Offset -= Root_Height
+ Ret_Row += 1
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z > Height_Start:
+ z = Height_Start
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+
+ if j == 0:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS - (i*Rank))
+ verts.append([x,y,z ])
+ Height_Offset -= Root_to_Crest_Height
+ Ret_Row += 1
+
+ return Ret_Row,Height_Offset
+
+
+def Create_Internal_Thread_End_Verts(verts,INNER_RADIUS,OUTTER_RADIUS,PITCH,DIV,CREST_PERCENT,ROOT_PERCENT,Height_Offset):
+
+
+ Ret_Row = 0;
+
+ Half_Pitch = float(PITCH)/2
+ #Height_End = Height_Offset - PITCH - PITCH - PITCH- PITCH - PITCH- PITCH
+ Height_End = Height_Offset - PITCH
+ #Height_End = -2.1
+ Height_Step = float(PITCH)/float(DIV)
+ Deg_Step = 360.0 /float(DIV)
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+
+ Rank = float(OUTTER_RADIUS - INNER_RADIUS)/float(DIV)
+
+ Num = 0
+
+ for j in range(2):
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z < Height_End:
+ z = Height_End
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z])
+ Height_Offset -= Crest_Height
+ Ret_Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z < Height_End:
+ z = Height_End
+
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,z ])
+ Height_Offset -= Crest_to_Root_Height
+ Ret_Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z < Height_End:
+ z = Height_End
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ if j == Num:
+ x = sin(radians(i*Deg_Step))*(INNER_RADIUS + (i*Rank))
+ y = cos(radians(i*Deg_Step))*(INNER_RADIUS + (i*Rank))
+ if j > Num:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS)
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS )
+
+ verts.append([x,y,z ])
+ Height_Offset -= Root_Height
+ Ret_Row += 1
+
+
+ for i in range(DIV+1):
+ z = Height_Offset - (Height_Step*i)
+ if z < Height_End:
+ z = Height_End
+
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+
+ if j == Num:
+ x = sin(radians(i*Deg_Step))*(INNER_RADIUS + (i*Rank))
+ y = cos(radians(i*Deg_Step))*(INNER_RADIUS + (i*Rank))
+ if j > Num:
+ x = sin(radians(i*Deg_Step))*(OUTTER_RADIUS )
+ y = cos(radians(i*Deg_Step))*(OUTTER_RADIUS )
+
+ verts.append([x,y,z ])
+ Height_Offset -= Root_to_Crest_Height
+ Ret_Row += 1
+
+
+ return Ret_Row,Height_End # send back Height End as this is the lowest point
+
+
+def Create_Internal_Thread(INNER_DIA,OUTTER_DIA,PITCH,HEIGHT,CREST_PERCENT,ROOT_PERCENT,INTERNAL = 1):
+ verts = []
+ faces = []
+
+ DIV = 36
+
+ INNER_RADIUS = INNER_DIA/2
+ OUTTER_RADIUS = OUTTER_DIA/2
+
+ Half_Pitch = float(PITCH)/2
+ Deg_Step = 360.0 /float(DIV)
+ Height_Step = float(PITCH)/float(DIV)
+
+ Num = int(round((HEIGHT- PITCH)/PITCH)) # less one pitch for the start and end that is 1/2 pitch high
+
+ Col = 0
+ Row = 0
+
+
+ Crest_Height = float(PITCH) * float(CREST_PERCENT)/float(100)
+ Root_Height = float(PITCH) * float(ROOT_PERCENT)/float(100)
+ Root_to_Crest_Height = Crest_to_Root_Height = (float(PITCH) - (Crest_Height + Root_Height))/2.0
+
+ Height_Offset = 0
+ FaceStart = len(verts)
+
+ Row_Inc,Height_Offset = Create_Internal_Thread_Start_Verts(verts,INNER_RADIUS,OUTTER_RADIUS,PITCH,DIV,CREST_PERCENT,ROOT_PERCENT,Height_Offset)
+ Row += Row_Inc
+
+ for j in range(Num):
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,Height_Offset - (Height_Step*i) ])
+ Height_Offset -= Crest_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*OUTTER_RADIUS
+ y = cos(radians(i*Deg_Step))*OUTTER_RADIUS
+ verts.append([x,y,Height_Offset - (Height_Step*i) ])
+ Height_Offset -= Crest_to_Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ verts.append([x,y,Height_Offset - (Height_Step*i) ])
+ Height_Offset -= Root_Height
+ Row += 1
+
+ for i in range(DIV+1):
+ x = sin(radians(i*Deg_Step))*INNER_RADIUS
+ y = cos(radians(i*Deg_Step))*INNER_RADIUS
+ verts.append([x,y,Height_Offset - (Height_Step*i) ])
+ Height_Offset -= Root_to_Crest_Height
+ Row += 1
+
+
+ Row_Inc,Height_Offset = Create_Internal_Thread_End_Verts(verts,INNER_RADIUS,OUTTER_RADIUS,PITCH,DIV,CREST_PERCENT,ROOT_PERCENT,Height_Offset)
+ Row += Row_Inc
+
+ faces.extend(Build_Face_List_Quads(FaceStart,DIV,Row -1,INTERNAL))
+
+ return verts,faces,0 - Height_Offset
+
+
+def Nut_Mesh(props, context):
+
+ verts = []
+ faces = []
+ Head_Verts = []
+ Head_Faces= []
+ #sc = context.scene
+
+ New_Nut_Height = 5
+
+ Face_Start = len(verts)
+ Thread_Verts,Thread_Faces,New_Nut_Height = Create_Internal_Thread(props.bf_Minor_Dia,props.bf_Major_Dia,props.bf_Pitch,props.bf_Hex_Nut_Height,props.bf_Crest_Percent,props.bf_Root_Percent,1)
+ verts.extend(Thread_Verts)
+ faces.extend(Copy_Faces(Thread_Faces,Face_Start))
+
+ Face_Start = len(verts)
+ Head_Verts,Head_Faces,Lock_Nut_Rad = add_Hex_Nut(props.bf_Hex_Nut_Flat_Distance,props.bf_Major_Dia,New_Nut_Height)
+ verts.extend((Head_Verts))
+ faces.extend(Copy_Faces(Head_Faces,Face_Start))
+
+ LowZ = 0 - New_Nut_Height
+
+ if props.bf_Nut_Type == 'bf_Nut_Lock':
+ Face_Start = len(verts)
+ Nylon_Head_Verts,Nylon_Head_faces,LowZ = add_Nylon_Head(Lock_Nut_Rad,0-New_Nut_Height)
+ verts.extend((Nylon_Head_Verts))
+ faces.extend(Copy_Faces(Nylon_Head_faces,Face_Start))
+
+ Face_Start = len(verts)
+ Nylon_Verts,Nylon_faces,Temp_LowZ = add_Nylon_Part(Lock_Nut_Rad,0-New_Nut_Height)
+ verts.extend((Nylon_Verts))
+ faces.extend(Copy_Faces(Nylon_faces,Face_Start))
+
+
+ return Move_Verts_Up_Z(verts,0 - LowZ),faces
+
+
+
+##########################################################################################
+##########################################################################################
+##########################################################################################
+## Create Bolt
+##########################################################################################
+##########################################################################################
+
+
+
+def Bolt_Mesh(props, context):
+
+
+ verts = []
+ faces = []
+ Bit_Verts = []
+ Bit_Faces = []
+ Bit_Dia = 0.001
+ Head_Verts = []
+ Head_Faces= []
+ Head_Height = 0.0
+ #sc = context.scene
+
+ ReSized_Allen_Bit_Flat_Distance = props.bf_Allen_Bit_Flat_Distance # set default
+
+
+ Head_Height = props.bf_Hex_Head_Height # will be changed by the Head Functions
+
+
+ if props.bf_Bit_Type == 'bf_Bit_Allen' and props.bf_Head_Type == 'bf_Head_Pan':
+ #need to size Allen bit if it is too big.
+ if Allen_Bit_Dia(props.bf_Allen_Bit_Flat_Distance) > Max_Pan_Bit_Dia(props.bf_Pan_Head_Dia):
+ ReSized_Allen_Bit_Flat_Distance = Allen_Bit_Dia_To_Flat(Max_Pan_Bit_Dia(props.bf_Pan_Head_Dia)) * 1.05
+ #print ("Resized Allen Bit Flat Distance to ",ReSized_Allen_Bit_Flat_Distance)
+
+ #bit Mesh
+ if props.bf_Bit_Type == 'bf_Bit_Allen':
+ Bit_Verts,Bit_Faces,Bit_Dia = Create_Allen_Bit(ReSized_Allen_Bit_Flat_Distance,props.bf_Allen_Bit_Depth)
+
+ if props.bf_Bit_Type == 'bf_Bit_Philips':
+ Bit_Verts,Bit_Faces,Bit_Dia = Create_Phillips_Bit(props.bf_Philips_Bit_Dia,props.bf_Philips_Bit_Dia*(0.5/1.82),props.bf_Phillips_Bit_Depth)
+
+
+ #Head Mesh
+
+ if props.bf_Head_Type =='bf_Head_Hex':
+ Head_Verts,Head_Faces,Head_Height = Create_Hex_Head(props.bf_Hex_Head_Flat_Distance,Bit_Dia,props.bf_Shank_Dia,props.bf_Hex_Head_Height)
+
+ elif props.bf_Head_Type == 'bf_Head_Cap':
+ Head_Verts,Head_Faces,Head_Height = Create_Cap_Head(Bit_Dia,props.bf_Cap_Head_Dia,props.bf_Shank_Dia,props.bf_Cap_Head_Height,props.bf_Cap_Head_Dia*(1.0/19.0),props.bf_Cap_Head_Dia*(1.0/19.0))
+
+ elif props.bf_Head_Type =='bf_Head_Dome':
+ Head_Verts,Head_Faces,Head_Height = Create_Dome_Head(Bit_Dia,props.bf_Dome_Head_Dia,props.bf_Shank_Dia,props.bf_Hex_Head_Height,1,1,0)
+
+ elif props.bf_Head_Type == 'bf_Head_Pan':
+ Head_Verts,Head_Faces,Head_Height = Create_Pan_Head(Bit_Dia,props.bf_Pan_Head_Dia,props.bf_Shank_Dia,props.bf_Hex_Head_Height,1,1,0)
+
+ elif props.bf_Head_Type == 'bf_Head_CounterSink':
+ Head_Verts,Head_Faces,Head_Height = Create_CounterSink_Head(Bit_Dia,props.bf_CounterSink_Head_Dia,props.bf_Shank_Dia,props.bf_CounterSink_Head_Dia,props.bf_CounterSink_Head_Dia*(0.09/6.31))
+#Head_Verts,Head_Faces,Head_Height = Create_CounterSink_Head(Bit_Dia,props.bf_CounterSink_Head_Dia,props.bf_Shank_Dia,props.bf_CounterSink_Head_Dia,props.bf_CounterSink_Head_Dia*(1.0/19.0))
+
+ Face_Start = len(verts)
+ verts.extend(Move_Verts_Up_Z(Bit_Verts,Head_Height))
+ faces.extend(Copy_Faces(Bit_Faces,Face_Start))
+
+ Face_Start = len(verts)
+ verts.extend(Move_Verts_Up_Z(Head_Verts,Head_Height))
+ faces.extend(Copy_Faces(Head_Faces,Face_Start))
+
+ Face_Start = len(verts)
+ Thread_Verts,Thread_Faces,Thread_Height = Create_External_Thread(props.bf_Shank_Dia,props.bf_Shank_Length,props.bf_Minor_Dia,props.bf_Major_Dia,props.bf_Pitch,props.bf_Thread_Length,props.bf_Crest_Percent,props.bf_Root_Percent)
+
+ verts.extend(Move_Verts_Up_Z(Thread_Verts,00))
+ faces.extend(Copy_Faces(Thread_Faces,Face_Start))
+
+ return Move_Verts_Up_Z(verts,Thread_Height),faces
+
+# calculates the matrix for the new object
+# depending on user pref
+def align_matrix(context):
+ loc = Matrix.Translation(context.scene.cursor_location)
+ obj_align = context.user_preferences.edit.object_align
+ if (context.space_data.type == 'VIEW_3D'
+ and obj_align == 'VIEW'):
+ rot = context.space_data.region_3d.view_matrix.to_3x3().inverted().to_4x4()
+ else:
+ rot = Matrix()
+ align_matrix = loc * rot
+ return align_matrix
+
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+# edit ... Replace existing mesh data.
+# Note: Using "edit" will destroy/delete existing mesh data.
+def create_mesh_object(context, verts, edges, faces, name, edit, align_matrix):
+ scene = context.scene
+ obj_act = scene.objects.active
+
+ # Can't edit anything, unless we have an active obj.
+ if edit and not obj_act:
+ return None
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ # Deselect all objects when in object mode
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT')
+
+ if edit:
+ # Replace geometry of existing object
+
+ # Use the active obj and select it.
+ ob_new = obj_act
+ ob_new.select = True
+
+ if obj_act.mode == 'OBJECT':
+ # Get existing mesh datablock.
+ old_mesh = ob_new.data
+
+ # Set object data to nothing
+ ob_new.data = None
+
+ # Clear users of existing mesh datablock.
+ old_mesh.user_clear()
+
+ # Remove old mesh datablock if no users are left.
+ if (old_mesh.users == 0):
+ bpy.data.meshes.remove(old_mesh)
+
+ # Assign new mesh datablock.
+ ob_new.data = mesh
+
+ else:
+ # Create new object
+ ob_new = bpy.data.objects.new(name, mesh)
+
+ # Link new object to the given scene and select it.
+ scene.objects.link(ob_new)
+ ob_new.select = True
+
+ # Place the object at the 3D cursor location.
+ # apply viewRotaion
+ ob_new.matrix_world = align_matrix
+
+ if obj_act and obj_act.mode == 'EDIT':
+ if not edit:
+ # We are in EditMode, switch to ObjectMode.
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Select the active object as well.
+ obj_act.select = True
+
+ # Apply location of new object.
+ scene.update()
+
+ # Join new object into the active.
+ bpy.ops.object.join()
+
+ # Switching back to EditMode.
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ ob_new = obj_act
+
+ else:
+ # We are in ObjectMode.
+ # Make the new object the active one.
+ scene.objects.active = ob_new
+
+ return ob_new
+
+
+def Create_New_Mesh(props, context, align_matrix):
+
+ verts = []
+ faces = []
+ sMeshName =''
+ sObjName =''
+
+ if props.bf_Model_Type == 'bf_Model_Bolt':
+ #print('Create Bolt')
+ verts, faces = Bolt_Mesh(props, context)
+ sMeshName = 'Bolt'
+ sObjName = 'Bolt'
+
+ if props.bf_Model_Type == 'bf_Model_Nut':
+ #print('Create Nut')
+ verts, faces = Nut_Mesh(props, context)
+ sMeshName = 'Nut'
+ sObjName = 'Nut'
+
+
+ verts, faces = RemoveDoubles(verts, faces)
+
+ verts = Scale_Mesh_Verts(verts,GLOBAL_SCALE)
+
+ obj = create_mesh_object(context, verts, [], faces,sObjName,
+ props.edit, align_matrix)
+
+
+ #print("Created_Object")
+ return
+
+
diff --git a/add_mesh_BoltFactory/preset_utils.py b/add_mesh_BoltFactory/preset_utils.py
new file mode 100644
index 00000000..4d31accf
--- /dev/null
+++ b/add_mesh_BoltFactory/preset_utils.py
@@ -0,0 +1,53 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+
+import bpy
+import os, sys
+
+
+def getPresets():
+
+ scriptPath = os.path.dirname(__file__)
+ presetPath = os.path.join(scriptPath, "presets")
+ presetFiles = os.listdir(presetPath)
+ #presetFiles.sort()
+
+ presets = [(presetFile, presetFile.rpartition(".")[0], presetFile)
+ for i, presetFile in enumerate(presetFiles) if presetFile.endswith(".py")]
+
+ #print(presets)
+ return presets, presetPath
+
+
+#presets = getPresets()
+
+
+
+def setProps(props, preset, presetsPath):
+
+ #bpy.ops.script.python_file_run(filepath=presetsPath + '\\' + preset)
+
+ file = open(os.path.join(presetsPath, preset))
+
+ for line in file:
+ exec(line)
+
+ file.close()
+
+ return
diff --git a/add_mesh_BoltFactory/presets/M10.py b/add_mesh_BoltFactory/presets/M10.py
new file mode 100644
index 00000000..03adab07
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M10.py
@@ -0,0 +1,22 @@
+props.bf_Shank_Dia = 10.0
+#props.bf_Pitch = 1.5 # Coarse
+props.bf_Pitch = 1.25 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 10.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 17.0
+props.bf_Hex_Head_Height = 6.4
+props.bf_Cap_Head_Dia = 16.0
+props.bf_Cap_Head_Height = 10.0
+props.bf_CounterSink_Head_Dia = 20.0
+props.bf_Allen_Bit_Flat_Distance = 8.0
+props.bf_Allen_Bit_Depth = 5.0
+props.bf_Pan_Head_Dia = 20.0
+props.bf_Dome_Head_Dia = 20.0
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 8.0
+props.bf_Hex_Nut_Flat_Distance = 17.0
+props.bf_Thread_Length = 20
+props.bf_Shank_Length = 0.0
diff --git a/add_mesh_BoltFactory/presets/M12.py b/add_mesh_BoltFactory/presets/M12.py
new file mode 100644
index 00000000..58674852
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M12.py
@@ -0,0 +1,22 @@
+#props.bf_Pitch = 1.75 # Coarse
+props.bf_Pitch = 1.50 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 12.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 19.0
+props.bf_Hex_Head_Height = 7.5
+props.bf_Cap_Head_Dia = 18.5
+props.bf_Cap_Head_Height = 12.0
+props.bf_CounterSink_Head_Dia = 22.0
+props.bf_Allen_Bit_Flat_Distance = 10.0
+props.bf_Allen_Bit_Depth = 6.0
+props.bf_Pan_Head_Dia = 24.0
+props.bf_Dome_Head_Dia = 24.0
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 10.0
+props.bf_Hex_Nut_Flat_Distance = 19.0
+props.bf_Shank_Dia = 12.0
+props.bf_Shank_Length = 33.0
+props.bf_Thread_Length = 32.0
diff --git a/add_mesh_BoltFactory/presets/M3.py b/add_mesh_BoltFactory/presets/M3.py
new file mode 100644
index 00000000..584bcd35
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M3.py
@@ -0,0 +1,22 @@
+props.bf_Shank_Dia = 3.0
+#props.bf_Pitch = 0.5 # Coarse
+props.bf_Pitch = 0.35 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 3.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 5.5
+props.bf_Hex_Head_Height = 2.0
+props.bf_Cap_Head_Dia = 5.5
+props.bf_Cap_Head_Height = 3.0
+props.bf_CounterSink_Head_Dia = 6.3
+props.bf_Allen_Bit_Flat_Distance = 2.5
+props.bf_Allen_Bit_Depth = 1.5
+props.bf_Pan_Head_Dia = 5.6
+props.bf_Dome_Head_Dia = 5.6
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 2.4
+props.bf_Hex_Nut_Flat_Distance = 5.5
+props.bf_Thread_Length = 6
+props.bf_Shank_Length = 0.0
diff --git a/add_mesh_BoltFactory/presets/M4.py b/add_mesh_BoltFactory/presets/M4.py
new file mode 100644
index 00000000..686fbf56
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M4.py
@@ -0,0 +1,22 @@
+props.bf_Shank_Dia = 4.0
+#props.bf_Pitch = 0.7 # Coarse
+props.bf_Pitch = 0.5 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 4.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 7.0
+props.bf_Hex_Head_Height = 2.8
+props.bf_Cap_Head_Dia = 7.0
+props.bf_Cap_Head_Height = 4.0
+props.bf_CounterSink_Head_Dia = 9.4
+props.bf_Allen_Bit_Flat_Distance = 3.0
+props.bf_Allen_Bit_Depth = 2.0
+props.bf_Pan_Head_Dia = 8.0
+props.bf_Dome_Head_Dia = 8.0
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 3.2
+props.bf_Hex_Nut_Flat_Distance = 7.0
+props.bf_Thread_Length = 8
+props.bf_Shank_Length = 0.0
diff --git a/add_mesh_BoltFactory/presets/M5.py b/add_mesh_BoltFactory/presets/M5.py
new file mode 100644
index 00000000..6249cf49
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M5.py
@@ -0,0 +1,22 @@
+props.bf_Shank_Dia = 5.0
+#props.bf_Pitch = 0.8 # Coarse
+props.bf_Pitch = 0.5 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 5.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 8.0
+props.bf_Hex_Head_Height = 3.5
+props.bf_Cap_Head_Dia = 8.5
+props.bf_Cap_Head_Height = 5.0
+props.bf_CounterSink_Head_Dia = 10.4
+props.bf_Allen_Bit_Flat_Distance = 4.0
+props.bf_Allen_Bit_Depth = 2.5
+props.bf_Pan_Head_Dia = 9.5
+props.bf_Dome_Head_Dia = 9.5
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 4.0
+props.bf_Hex_Nut_Flat_Distance = 8.0
+props.bf_Thread_Length = 10
+props.bf_Shank_Length = 0.0
diff --git a/add_mesh_BoltFactory/presets/M6.py b/add_mesh_BoltFactory/presets/M6.py
new file mode 100644
index 00000000..20c09851
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M6.py
@@ -0,0 +1,22 @@
+props.bf_Shank_Dia = 6.0
+#bf_Pitch = 1.0 # Coarse
+props.bf_Pitch = 0.75 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 6.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 10.0
+props.bf_Hex_Head_Height = 4.0
+props.bf_Cap_Head_Dia = 10.0
+props.bf_Cap_Head_Height = 6.0
+props.bf_CounterSink_Head_Dia = 12.6
+props.bf_Allen_Bit_Flat_Distance = 5.0
+props.bf_Allen_Bit_Depth = 3.0
+props.bf_Pan_Head_Dia = 12.0
+props.bf_Dome_Head_Dia = 12.0
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 5.0
+props.bf_Hex_Nut_Flat_Distance = 10.0
+props.bf_Thread_Length = 12
+props.bf_Shank_Length = 0.0
diff --git a/add_mesh_BoltFactory/presets/M8.py b/add_mesh_BoltFactory/presets/M8.py
new file mode 100644
index 00000000..7dcffb62
--- /dev/null
+++ b/add_mesh_BoltFactory/presets/M8.py
@@ -0,0 +1,22 @@
+props.bf_Shank_Dia = 8.0
+#props.bf_Pitch = 1.25 # Coarse
+props.bf_Pitch = 1.00 # Fine
+props.bf_Crest_Percent = 10
+props.bf_Root_Percent = 10
+props.bf_Major_Dia = 8.0
+props.bf_Minor_Dia = props.bf_Major_Dia - (1.082532 * props.bf_Pitch)
+props.bf_Hex_Head_Flat_Distance = 13.0
+props.bf_Hex_Head_Height = 5.3
+props.bf_Cap_Head_Dia = 13.5
+props.bf_Cap_Head_Height = 8.0
+props.bf_CounterSink_Head_Dia = 17.3
+props.bf_Allen_Bit_Flat_Distance = 6.0
+props.bf_Allen_Bit_Depth = 4.0
+props.bf_Pan_Head_Dia = 16.0
+props.bf_Dome_Head_Dia = 16.0
+props.bf_Philips_Bit_Dia = props.bf_Pan_Head_Dia * (1.82 / 5.6)
+#props.bf_Phillips_Bit_Depth = Get_Phillips_Bit_Height(props.bf_Philips_Bit_Dia)
+props.bf_Hex_Nut_Height = 6.5
+props.bf_Hex_Nut_Flat_Distance = 13.0
+props.bf_Thread_Length = 16
+props.bf_Shank_Length = 0.0
diff --git a/add_mesh_ant_landscape.py b/add_mesh_ant_landscape.py
new file mode 100644
index 00000000..19e680bb
--- /dev/null
+++ b/add_mesh_ant_landscape.py
@@ -0,0 +1,822 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "ANT Landscape",
+ "author": "Jimmy Hazevoet",
+ "version": (0,1,1),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh",
+ "description": "Adds a Landscape Primitive",
+ "warning": "", # used for warning icon and text in addons panel
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/ANT_Landscape",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=23130",
+ "category": "Add Mesh"}
+
+'''
+Another Noise Tool: Landscape mesh generator
+
+MESH OPTIONS:
+Mesh update: Turn this on for interactive mesh update.
+Sphere: Generate sphere or a grid mesh. (Turn height falloff off for sphere mesh)
+Smooth: Generate smooth shaded mesh.
+Subdivision: Number of mesh subdivisions, higher numbers gives more detail but also slows down the script.
+Mesh size: X,Y size of the grid mesh (in blender units).
+
+NOISE OPTIONS: ( Most of these options are the same as in blender textures. )
+Random seed: Use this to randomise the origin of the noise function.
+Noise size: Size of the noise.
+Noise type: Available noise types: multiFractal, ridgedMFractal, hybridMFractal, heteroTerrain, Turbulence, Distorted Noise, Cellnoise, Shattered_hTerrain, Marble
+Noise basis: Blender, Perlin, NewPerlin, Voronoi_F1, Voronoi_F2, Voronoi_F3, Voronoi_F4, Voronoi_F2-F1, Voronoi Crackle, Cellnoise
+VLNoise basis: Blender, Perlin, NewPerlin, Voronoi_F1, Voronoi_F2, Voronoi_F3, Voronoi_F4, Voronoi_F2-F1, Voronoi Crackle, Cellnoise
+Distortion: Distortion amount.
+Hard: Hard/Soft turbulence noise.
+Depth: Noise depth, number of frequencies in the fBm.
+Dimension: Musgrave: Fractal dimension of the roughest areas.
+Lacunarity: Musgrave: Gap between successive frequencies.
+Offset: Musgrave: Raises the terrain from sea level.
+Gain: Musgrave: Scale factor.
+Marble Bias: Sin, Tri, Saw
+Marble Sharpnes: Soft, Sharp, Sharper
+Marble Shape: Shape of the marble function: Default, Ring, Swirl, X, Y
+
+HEIGHT OPTIONS:
+Invert: Invert terrain height.
+Height: Scale terrain height.
+Offset: Terrain height offset.
+Falloff: Terrain height falloff: Type 1, Type 2, X, Y
+Sealevel: Flattens terrain below sealevel.
+Platlevel: Flattens terrain above plateau level.
+Strata: Strata amount, number of strata/terrace layers.
+Strata type: Strata types, Smooth, Sharp-sub, Sharp-add
+'''
+
+# import modules
+import bpy
+from bpy.props import *
+from mathutils import *
+from noise import *
+from math import *
+
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+ scene = context.scene
+ obj_act = scene.objects.active
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+###------------------------------------------------------------
+###------------------------------------------------------------
+# some functions for marble_noise
+def sin_bias(a):
+ return 0.5 + 0.5 * sin(a)
+
+def tri_bias(a):
+ b = 2 * pi
+ a = 1 - 2 * abs(floor((a * (1/b))+0.5) - (a*(1/b)))
+ return a
+
+def saw_bias(a):
+ b = 2 * pi
+ n = int(a/b)
+ a -= n * b
+ if a < 0: a += b
+ return a / b
+
+def soft(a):
+ return a
+
+def sharp(a):
+ return a**0.5
+
+def sharper(a):
+ return sharp(sharp(a))
+
+def shapes(x,y,shape=0):
+ if shape == 1:
+ # ring
+ x = x*2
+ y = y*2
+ s = (-cos(x**2+y**2)/(x**2+y**2+0.5))
+ elif shape == 2:
+ # swirl
+ x = x*2
+ y = y*2
+ s = (( x*sin( x*x+y*y ) + y*cos( x*x+y*y ) ) / (x**2+y**2+0.5))
+ elif shape == 3:
+ # bumps
+ x = x*2
+ y = y*2
+ s = ((cos( x*pi ) + cos( y*pi ))-0.5)
+ elif shape == 4:
+ # y grad.
+ s = (y*pi)
+ elif shape == 5:
+ # x grad.
+ s = (x*pi)
+ else:
+ # marble
+ s = ((x+y)*5)
+ return s
+
+# marble_noise
+def marble_noise(x,y,z, origin, size, shape, bias, sharpnes, turb, depth, hard, basis ):
+ x = x / size
+ y = y / size
+ z = z / size
+ s = shapes(x,y,shape)
+
+ x += origin[0]
+ y += origin[1]
+ z += origin[2]
+ value = s + turb * turbulence_vector((x,y,z), depth, hard, basis )[0]
+
+ if bias == 1:
+ value = tri_bias( value )
+ elif bias == 2:
+ value = saw_bias( value )
+ else:
+ value = sin_bias( value )
+
+ if sharpnes == 1:
+ value = sharp( value )
+ elif sharpnes == 2:
+ value = sharper( value )
+ else:
+ value = soft( value )
+
+ return value
+
+###------------------------------------------------------------
+# custom noise types
+
+# shattered_hterrain:
+def shattered_hterrain( x,y,z, H, lacunarity, octaves, offset, distort, basis ):
+ d = ( turbulence_vector( ( x, y, z ), 6, 0, 0 )[0] * 0.5 + 0.5 )*distort*0.5
+ t1 = ( turbulence_vector( ( x+d, y+d, z ), 0, 0, 7 )[0] + 0.5 )
+ t2 = ( hetero_terrain(( x*2, y*2, z*2 ), H, lacunarity, octaves, offset, basis )*0.5 )
+ return (( t1*t2 )+t2*0.5) * 0.5
+
+# strata_hterrain
+def strata_hterrain( x,y,z, H, lacunarity, octaves, offset, distort, basis ):
+ value = hetero_terrain(( x, y, z ), H, lacunarity, octaves, offset, basis )*0.5
+ steps = ( sin( value*(distort*5)*pi ) * ( 0.1/(distort*5)*pi ) )
+ return ( value * (1.0-0.5) + steps*0.5 )
+
+###------------------------------------------------------------
+# landscape_gen
+def landscape_gen(x,y,z,falloffsize,options=[0,1.0,1, 0,0,1.0,0,6,1.0,2.0,1.0,2.0,0,0,0, 1.0,0.0,1,0.0,1.0,0,0,0]):
+
+ # options
+ rseed = options[0]
+ nsize = options[1]
+ ntype = int( options[2][0] )
+ nbasis = int( options[3][0] )
+ vlbasis = int( options[4][0] )
+ distortion = options[5]
+ hardnoise = options[6]
+ depth = options[7]
+ dimension = options[8]
+ lacunarity = options[9]
+ offset = options[10]
+ gain = options[11]
+ marblebias = int( options[12][0] )
+ marblesharpnes = int( options[13][0] )
+ marbleshape = int( options[14][0] )
+ invert = options[15]
+ height = options[16]
+ heightoffset = options[17]
+ falloff = int( options[18][0] )
+ sealevel = options[19]
+ platlevel = options[20]
+ strata = options[21]
+ stratatype = options[22]
+ sphere = options[23]
+
+ # origin
+ if rseed == 0:
+ origin = 0.0,0.0,0.0
+ origin_x = 0.0
+ origin_y = 0.0
+ origin_z = 0.0
+ else:
+ # randomise origin
+ seed_set( rseed )
+ origin = random_unit_vector()
+ origin_x = ( 0.5 - origin[0] ) * 1000.0
+ origin_y = ( 0.5 - origin[1] ) * 1000.0
+ origin_z = ( 0.5 - origin[2] ) * 1000.0
+
+ # adjust noise size and origin
+ ncoords = ( x / nsize + origin_x, y / nsize + origin_y, z / nsize + origin_z )
+
+ # noise basis type's
+ if nbasis == 9: nbasis = 14 # to get cellnoise basis you must set 14 instead of 9
+ if vlbasis ==9: vlbasis = 14
+ # noise type's
+ if ntype == 0: value = multi_fractal( ncoords, dimension, lacunarity, depth, nbasis ) * 0.5
+ elif ntype == 1: value = ridged_multi_fractal( ncoords, dimension, lacunarity, depth, offset, gain, nbasis ) * 0.5
+ elif ntype == 2: value = hybrid_multi_fractal( ncoords, dimension, lacunarity, depth, offset, gain, nbasis ) * 0.5
+ elif ntype == 3: value = hetero_terrain( ncoords, dimension, lacunarity, depth, offset, nbasis ) * 0.25
+ elif ntype == 4: value = fractal( ncoords, dimension, lacunarity, depth, nbasis )
+ elif ntype == 5: value = turbulence_vector( ncoords, depth, hardnoise, nbasis )[0]
+ elif ntype == 6: value = vl_vector( ncoords, distortion, nbasis, vlbasis ) + 0.5
+ elif ntype == 7: value = marble_noise( x*2.0/falloffsize,y*2.0/falloffsize,z*2/falloffsize, origin, nsize, marbleshape, marblebias, marblesharpnes, distortion, depth, hardnoise, nbasis )
+ elif ntype == 8: value = shattered_hterrain( ncoords[0], ncoords[1], ncoords[2], dimension, lacunarity, depth, offset, distortion, nbasis )
+ elif ntype == 9: value = strata_hterrain( ncoords[0], ncoords[1], ncoords[2], dimension, lacunarity, depth, offset, distortion, nbasis )
+ else:
+ value = 0.0
+
+ # adjust height
+ if invert !=0:
+ value = (1-value) * height + heightoffset
+ else:
+ value = value * height + heightoffset
+
+ # edge falloff
+ if sphere == 0: # no edge falloff if spherical
+ if falloff != 0:
+ fallofftypes = [ 0, sqrt((x*x)**2+(y*y)**2), sqrt(x*x+y*y), sqrt(y*y), sqrt(x*x) ]
+ dist = fallofftypes[ falloff]
+ if falloff ==1:
+ radius = (falloffsize/2)**2
+ else:
+ radius = falloffsize/2
+ value = value - sealevel
+ if( dist < radius ):
+ dist = dist / radius
+ dist = ( (dist) * (dist) * ( 3-2*(dist) ) )
+ value = ( value - value * dist ) + sealevel
+ else:
+ value = sealevel
+
+ # strata / terrace / layered
+ if stratatype !='0':
+ strata = strata / height
+ if stratatype == '1':
+ strata *= 2
+ steps = ( sin( value*strata*pi ) * ( 0.1/strata*pi ) )
+ value = ( value * (1.0-0.5) + steps*0.5 ) * 2.0
+ elif stratatype == '2':
+ steps = -abs( sin( value*(strata)*pi ) * ( 0.1/(strata)*pi ) )
+ value =( value * (1.0-0.5) + steps*0.5 ) * 2.0
+ elif stratatype == '3':
+ steps = abs( sin( value*(strata)*pi ) * ( 0.1/(strata)*pi ) )
+ value =( value * (1.0-0.5) + steps*0.5 ) * 2.0
+ else:
+ value = value
+
+ # clamp height
+ if ( value < sealevel ): value = sealevel
+ if ( value > platlevel ): value = platlevel
+
+ return value
+
+
+# generate grid
+def grid_gen( sub_d, size_me, options ):
+
+ verts = []
+ faces = []
+ edgeloop_prev = []
+
+ delta = size_me / float(sub_d - 1)
+ start = -(size_me / 2.0)
+
+ for row_x in range(sub_d):
+ edgeloop_cur = []
+ x = start + row_x * delta
+ for row_y in range(sub_d):
+ y = start + row_y * delta
+ z = landscape_gen(x,y,0.0,size_me,options)
+
+ edgeloop_cur.append(len(verts))
+ verts.append((x,y,z))
+
+ if len(edgeloop_prev) > 0:
+ faces_row = createFaces(edgeloop_prev, edgeloop_cur)
+ faces.extend(faces_row)
+
+ edgeloop_prev = edgeloop_cur
+
+ return verts, faces
+
+
+# generate sphere
+def sphere_gen( sub_d, size_me, options ):
+
+ verts = []
+ faces = []
+ edgeloop_prev = []
+
+ for row_x in range(sub_d):
+ edgeloop_cur = []
+ for row_y in range(sub_d):
+ u = sin(row_y*pi*2/(sub_d-1)) * cos(-pi/2+row_x*pi/(sub_d-1)) * size_me/2
+ v = cos(row_y*pi*2/(sub_d-1)) * cos(-pi/2+row_x*pi/(sub_d-1)) * size_me/2
+ w = sin(-pi/2+row_x*pi/(sub_d-1)) * size_me/2
+ h = landscape_gen(u,v,w,size_me,options) / size_me
+ u,v,w = u+u*h, v+v*h, w+w*h
+
+ edgeloop_cur.append(len(verts))
+ verts.append((u, v, w))
+
+ if len(edgeloop_prev) > 0:
+ faces_row = createFaces(edgeloop_prev, edgeloop_cur)
+ faces.extend(faces_row)
+
+ edgeloop_prev = edgeloop_cur
+
+ return verts, faces
+
+
+###------------------------------------------------------------
+# Add landscape
+class landscape_add(bpy.types.Operator):
+ '''Add a landscape mesh'''
+ bl_idname = "mesh.landscape_add"
+ bl_label = "Landscape"
+ bl_options = {'REGISTER', 'UNDO'}
+ bl_description = "Add landscape mesh"
+
+ # properties
+ AutoUpdate = BoolProperty(name="Mesh update",
+ default=True,
+ description="Update mesh")
+
+ SphereMesh = BoolProperty(name="Sphere",
+ default=False,
+ description="Generate Sphere mesh")
+
+ SmoothMesh = BoolProperty(name="Smooth",
+ default=True,
+ description="Shade smooth")
+
+ Subdivision = IntProperty(name="Subdivisions",
+ min=4,
+ max=6400,
+ default=64,
+ description="Mesh x y subdivisions")
+
+ MeshSize = FloatProperty(name="Mesh Size",
+ min=0.01,
+ max=100000.0,
+ default=2.0,
+ description="Mesh size")
+
+ RandomSeed = IntProperty(name="Random Seed",
+ min=0,
+ max=9999,
+ default=0,
+ description="Randomize noise origin")
+
+ NoiseSize = FloatProperty(name="Noise Size",
+ min=0.01,
+ max=10000.0,
+ default=1.0,
+ description="Noise size")
+
+ NoiseTypes = [
+ ("0","multiFractal","multiFractal"),
+ ("1","ridgedMFractal","ridgedMFractal"),
+ ("2","hybridMFractal","hybridMFractal"),
+ ("3","heteroTerrain","heteroTerrain"),
+ ("4","fBm","fBm"),
+ ("5","Turbulence","Turbulence"),
+ ("6","Distorted Noise","Distorted Noise"),
+ ("7","Marble","Marble"),
+ ("8","Shattered_hTerrain","Shattered_hTerrain"),
+ ("9","Strata_hTerrain","Strata_hTerrain")]
+
+ NoiseType = EnumProperty(name="Type",
+ description="Noise type",
+ items=NoiseTypes)
+
+ BasisTypes = [
+ ("0","Blender","Blender"),
+ ("1","Perlin","Perlin"),
+ ("2","NewPerlin","NewPerlin"),
+ ("3","Voronoi_F1","Voronoi_F1"),
+ ("4","Voronoi_F2","Voronoi_F2"),
+ ("5","Voronoi_F3","Voronoi_F3"),
+ ("6","Voronoi_F4","Voronoi_F4"),
+ ("7","Voronoi_F2-F1","Voronoi_F2-F1"),
+ ("8","Voronoi Crackle","Voronoi Crackle"),
+ ("9","Cellnoise","Cellnoise")]
+ BasisType = EnumProperty(name="Basis",
+ description="Noise basis",
+ items=BasisTypes)
+
+ VLBasisTypes = [
+ ("0","Blender","Blender"),
+ ("1","Perlin","Perlin"),
+ ("2","NewPerlin","NewPerlin"),
+ ("3","Voronoi_F1","Voronoi_F1"),
+ ("4","Voronoi_F2","Voronoi_F2"),
+ ("5","Voronoi_F3","Voronoi_F3"),
+ ("6","Voronoi_F4","Voronoi_F4"),
+ ("7","Voronoi_F2-F1","Voronoi_F2-F1"),
+ ("8","Voronoi Crackle","Voronoi Crackle"),
+ ("9","Cellnoise","Cellnoise")]
+ VLBasisType = EnumProperty(name="VLBasis",
+ description="VLNoise basis",
+ items=VLBasisTypes)
+
+ Distortion = FloatProperty(name="Distortion",
+ min=0.01,
+ max=1000.0,
+ default=1.0,
+ description="Distortion amount")
+
+ HardNoise = BoolProperty(name="Hard",
+ default=True,
+ description="Hard noise")
+
+ NoiseDepth = IntProperty(name="Depth",
+ min=1,
+ max=16,
+ default=6,
+ description="Noise Depth - number of frequencies in the fBm.")
+
+ mDimension = FloatProperty(name="Dimension",
+ min=0.01,
+ max=2.0,
+ default=1.0,
+ description="H - fractal dimension of the roughest areas.")
+
+ mLacunarity = FloatProperty(name="Lacunarity",
+ min=0.01,
+ max=6.0,
+ default=2.0,
+ description="Lacunarity - gap between successive frequencies.")
+
+ mOffset = FloatProperty(name="Offset",
+ min=0.01,
+ max=6.0,
+ default=1.0,
+ description="Offset - raises the terrain from sea level.")
+
+ mGain = FloatProperty(name="Gain",
+ min=0.01,
+ max=6.0,
+ default=1.0,
+ description="Gain - scale factor.")
+
+ BiasTypes = [
+ ("0","Sin","Sin"),
+ ("1","Tri","Tri"),
+ ("2","Saw","Saw")]
+ MarbleBias = EnumProperty(name="Bias",
+ description="Marble bias",
+ items=BiasTypes)
+
+ SharpTypes = [
+ ("0","Soft","Soft"),
+ ("1","Sharp","Sharp"),
+ ("2","Sharper","Sharper")]
+ MarbleSharp = EnumProperty(name="Sharp",
+ description="Marble sharp",
+ items=SharpTypes)
+
+ ShapeTypes = [
+ ("0","Default","Default"),
+ ("1","Ring","Ring"),
+ ("2","Swirl","Swirl"),
+ ("3","Bump","Bump"),
+ ("4","Y","Y"),
+ ("5","X","X")]
+ MarbleShape = EnumProperty(name="Shape",
+ description="Marble shape",
+ items=ShapeTypes)
+
+ Invert = BoolProperty(name="Invert",
+ default=False,
+ description="Invert noise input")
+
+ Height = FloatProperty(name="Height",
+ min=0.01,
+ max=10000.0,
+ default=0.5,
+ description="Height scale")
+
+ Offset = FloatProperty(name="Offset",
+ min=-10000.0,
+ max=10000.0,
+ default=0.0,
+ description="Height offset")
+
+ fallTypes = [
+ ("0","None","None"),
+ ("1","Type 1","Type 1"),
+ ("2","Type 2","Type 2"),
+ ("3","Y","Y"),
+ ("4","X","X")]
+ Falloff = EnumProperty(name="Falloff",
+ description="Edge falloff",
+ default="1",
+ items=fallTypes)
+
+ Sealevel = FloatProperty(name="Sealevel",
+ min=-10000.0,
+ max=10000.0,
+ default=0.0,
+ description="Sealevel")
+
+ Plateaulevel = FloatProperty(name="Plateau",
+ min=-10000.0,
+ max=10000.0,
+ default=1.0,
+ description="Plateau level")
+
+ Strata = FloatProperty(name="Strata",
+ min=0.01,
+ max=1000.0,
+ default=3.0,
+ description="Strata amount")
+
+ StrataTypes = [
+ ("0","None","None"),
+ ("1","Type 1","Type 1"),
+ ("2","Type 2","Type 2"),
+ ("3","Type 3","Type 3")]
+ StrataType = EnumProperty(name="Strata",
+ description="Strata type",
+ default="0",
+ items=StrataTypes)
+
+ ###------------------------------------------------------------
+ # Draw
+ def draw(self, context):
+ layout = self.layout
+
+ box = layout.box()
+ box.prop(self, 'AutoUpdate')
+ box.prop(self, 'SphereMesh')
+ box.prop(self, 'SmoothMesh')
+ box.prop(self, 'Subdivision')
+ box.prop(self, 'MeshSize')
+
+ box = layout.box()
+ box.prop(self, 'NoiseType')
+ if self.NoiseType != '7':
+ box.prop(self, 'BasisType')
+ box.prop(self, 'RandomSeed')
+ box.prop(self, 'NoiseSize')
+ if self.NoiseType == '0':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ if self.NoiseType == '1':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ box.prop(self, 'mOffset')
+ box.prop(self, 'mGain')
+ if self.NoiseType == '2':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ box.prop(self, 'mOffset')
+ box.prop(self, 'mGain')
+ if self.NoiseType == '3':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ box.prop(self, 'mOffset')
+ if self.NoiseType == '4':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ if self.NoiseType == '5':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'HardNoise')
+ if self.NoiseType == '6':
+ box.prop(self, 'VLBasisType')
+ box.prop(self, 'Distortion')
+ if self.NoiseType == '7':
+ box.prop(self, 'MarbleShape')
+ box.prop(self, 'MarbleBias')
+ box.prop(self, 'MarbleSharp')
+ box.prop(self, 'Distortion')
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'HardNoise')
+ if self.NoiseType == '8':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ box.prop(self, 'mOffset')
+ box.prop(self, 'Distortion')
+ if self.NoiseType == '9':
+ box.prop(self, 'NoiseDepth')
+ box.prop(self, 'mDimension')
+ box.prop(self, 'mLacunarity')
+ box.prop(self, 'mOffset')
+ box.prop(self, 'Distortion')
+
+ box = layout.box()
+ box.prop(self, 'Invert')
+ box.prop(self, 'Height')
+ box.prop(self, 'Offset')
+ box.prop(self, 'Plateaulevel')
+ box.prop(self, 'Sealevel')
+ if self.SphereMesh == False:
+ box.prop(self, 'Falloff')
+ box.prop(self, 'StrataType')
+ if self.StrataType != '0':
+ box.prop(self, 'Strata')
+
+ ###------------------------------------------------------------
+ # Execute
+ def execute(self, context):
+
+ #mesh update
+ if self.AutoUpdate != 0:
+
+ # turn off undo
+ undo = bpy.context.user_preferences.edit.use_global_undo
+ bpy.context.user_preferences.edit.use_global_undo = False
+
+ # deselect all objects when in object mode
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # options
+ options = [
+ self.RandomSeed, #0
+ self.NoiseSize, #1
+ self.NoiseType, #2
+ self.BasisType, #3
+ self.VLBasisType, #4
+ self.Distortion, #5
+ self.HardNoise, #6
+ self.NoiseDepth, #7
+ self.mDimension, #8
+ self.mLacunarity, #9
+ self.mOffset, #10
+ self.mGain, #11
+ self.MarbleBias, #12
+ self.MarbleSharp, #13
+ self.MarbleShape, #14
+ self.Invert, #15
+ self.Height, #16
+ self.Offset, #17
+ self.Falloff, #18
+ self.Sealevel, #19
+ self.Plateaulevel, #20
+ self.Strata, #21
+ self.StrataType, #22
+ self.SphereMesh #23
+ ]
+
+ # Main function
+ if self.SphereMesh !=0:
+ # sphere
+ verts, faces = sphere_gen( self.Subdivision, self.MeshSize, options )
+ else:
+ # grid
+ verts, faces = grid_gen( self.Subdivision, self.MeshSize, options )
+
+ # create mesh object
+ obj = create_mesh_object(context, verts, [], faces, "Landscape")
+
+ # sphere, remove doubles
+ if self.SphereMesh !=0:
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.mesh.remove_doubles(limit=0.0001)
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Shade smooth
+ if self.SmoothMesh !=0:
+ if bpy.ops.object.shade_smooth.poll():
+ bpy.ops.object.shade_smooth()
+ else: # edit mode
+ bpy.ops.mesh.faces_shade_smooth()
+
+ # restore pre operator undo state
+ bpy.context.user_preferences.edit.use_global_undo = undo
+
+ return {'FINISHED'}
+ else:
+ return {'PASS_THROUGH'}
+
+
+###------------------------------------------------------------
+# Register
+
+ # Define "Landscape" menu
+def menu_func_landscape(self, context):
+ self.layout.operator(landscape_add.bl_idname, text="Landscape", icon="PLUGIN")
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_mesh_add.append(menu_func_landscape)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_mesh_add.remove(menu_func_landscape)
+
+if __name__ == "__main__":
+ register()
diff --git a/add_mesh_extra_objects/__init__.py b/add_mesh_extra_objects/__init__.py
new file mode 100644
index 00000000..ad231d02
--- /dev/null
+++ b/add_mesh_extra_objects/__init__.py
@@ -0,0 +1,134 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Extra Objects",
+ "author": "Pontiac, Fourmadmen, varkenvarken, tuga3d, meta-androcto",
+ "version": (0, 1),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh > Extra Objects",
+ "description": "Adds More Object Types.",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_Extra",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22457",
+ "category": "Add Mesh"}
+
+
+if "bpy" in locals():
+ import imp
+ imp.reload(add_mesh_extra_objects)
+ imp.reload(add_mesh_twisted_torus)
+ imp.reload(add_mesh_gemstones)
+ imp.reload(add_mesh_gears)
+ imp.reload(add_mesh_3d_function_surface)
+else:
+ from . import add_mesh_extra_objects
+ from . import add_mesh_twisted_torus
+ from . import add_mesh_gemstones
+ from . import add_mesh_gears
+ from . import add_mesh_3d_function_surface
+
+import bpy
+
+
+class INFO_MT_mesh_extras_add(bpy.types.Menu):
+ # Define the "Extras" menu
+ bl_idname = "INFO_MT_mesh_extra_objects_add"
+ bl_label = "Extra Objects"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.menu("INFO_MT_mesh_gemstones_add", text="Gemstones")
+ layout.menu("INFO_MT_mesh_gears_add", text="Gears")
+ layout.menu("INFO_MT_mesh_math_add", text="Math Function")
+ layout.operator("mesh.primitive_twisted_torus_add",
+ text="Twisted Torus")
+ layout.operator("mesh.primitive_sqorus_add",
+ text="Sqorus")
+ layout.operator("mesh.primitive_wedge_add")
+ layout.operator("mesh.primitive_star_add",
+ text="Star")
+ layout.operator("mesh.primitive_trapezohedron_add",
+ text="Trapezohedron")
+
+class INFO_MT_mesh_gemstones_add(bpy.types.Menu):
+ # Define the "Gemstones" menu
+ bl_idname = "INFO_MT_mesh_gemstones_add"
+ bl_label = "Gemstones"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("mesh.primitive_diamond_add",
+ text="Diamond")
+ layout.operator("mesh.primitive_gem_add",
+ text="Gem")
+
+
+class INFO_MT_mesh_gears_add(bpy.types.Menu):
+ # Define the "Gears" menu
+ bl_idname = "INFO_MT_mesh_gears_add"
+ bl_label = "Gears"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("mesh.primitive_gear",
+ text="Gear")
+ layout.operator("mesh.primitive_worm_gear",
+ text="Worm")
+
+class INFO_MT_mesh_math_add(bpy.types.Menu):
+ # Define the "Math Function" menu
+ bl_idname = "INFO_MT_mesh_math_add"
+ bl_label = "Math Functions"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("mesh.primitive_z_function_surface",
+ text="Z Math Surface")
+ layout.operator("mesh.primitive_xyz_function_surface",
+ text="XYZ Math Surface")
+
+# Register all operators and panels
+
+# Define "Extras" menu
+def menu_func(self, context):
+ self.layout.menu("INFO_MT_mesh_extra_objects_add", icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ # Add "Extras" menu to the "Add Mesh" menu
+ bpy.types.INFO_MT_mesh_add.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ # Remove "Extras" menu from the "Add Mesh" menu.
+ bpy.types.INFO_MT_mesh_add.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/add_mesh_extra_objects/add_mesh_3d_function_surface.py b/add_mesh_extra_objects/add_mesh_3d_function_surface.py
new file mode 100644
index 00000000..8965a820
--- /dev/null
+++ b/add_mesh_extra_objects/add_mesh_3d_function_surface.py
@@ -0,0 +1,617 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+"""
+bl_info = {
+ "name": "3D Function Surfaces",
+ "author": "Buerbaum Martin (Pontiac), Elod Csirmaz",
+ "version": (0, 3, 8),
+ "blender": (2, 5, 7),
+ "api": 37329,
+ "location": "View3D > Add > Mesh",
+ "description": "Create Objects using Math Formulas",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_3d_Function_Surface",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21444",
+ "category": "Add Mesh"}
+"""
+"""
+Z Function Surface
+
+This script lets the user create a surface where the z coordinate
+is a function of the x and y coordinates.
+
+ z = F1(x,y)
+
+X,Y,Z Function Surface
+
+This script lets the user create a surface where the x, y and z
+coordinates are defiend by a function.
+
+ x = F1(u,v)
+ y = F2(u,v)
+ z = F3(u,v)
+
+Usage:
+You have to activated the script in the "Add-Ons" tab (user preferences).
+The functionality can then be accessed via the
+"Add Mesh" -> "Z Function Surface"
+and
+"Add Mesh" -> "X,Y,Z Function Surface"
+menu.
+
+Version history:
+v0.3.8 - Patch by Elod Csirmaz
+ Modified the "Add X,Y,Z Function Surface" part:
+ Changed how wrapping is done to avoid
+ generating unnecessary vertices and make the result more intuitive.
+ Added helper functions the results of which can be used in
+ x(u,v), y(u,v), z(u,v).
+ The script can now close the ends of an U-wrapped surface.
+ It's now possible to create multiple objects with one set of formulae.
+v0.3.7
+ Removed the various "edit" properties - not used anymore.
+ Use generic tracker URL (Blender-Extensions r1369)
+ bl_addon_info now called bl_info
+ Removed align_matrix
+ create_mesh_object now doesn't handle editmode. (See create_mesh_object)
+ This script is now used by the "Extra Objects" script
+v0.3.6 - Various updates to match current Blender API.
+ Removed recall functionality.
+ Better code for align_matrix
+ Hopefully fixed bug where uMax was never reached. May cause other stuff.
+v0.3.5 - createFaces can now "Flip" faces and create fan/star like faces.
+v0.3.4 - Updated store_recall_properties, apply_object_align
+ and create_mesh_object.
+ Changed how recall data is stored.
+v0.3.3 - API change Mathutils -> mathutils (r557)
+v0.3.2 - Various fixes&streamlining by ideasman42/Campbell Barton.
+ r544 Compile expressions for faster execution
+ r544 Use operator reports for errors too
+ r544 Avoid type checks by converting to a float, errors
+ converting to a float are reported too.
+ Fixed an error Campbell overlooked (appending tuples to an
+ array, not single values) Thamnks for the report wild_doogy.
+ Added 'description' field, updated 'wiki_url'.
+ Made the script PEP8 compatible again.
+v0.3.1 - Use hidden "edit" property for "recall" operator.
+ Bugfix: Z Function was mixing up div_x and div_y
+v0.3 - X,Y,Z Function Surface (by Ed Mackey & tuga3d).
+ Renamed old function to "Z Function Surface".
+ Align the geometry to the view if the user preference says so.
+ Store recall properties in newly created object.
+v0.2.3 - Use bl_info for Add-On information.
+v0.2.2 - Fixed Add-On registration text.
+v0.2.1 - Fixed some new API stuff.
+ Mainly we now have the register/unregister functions.
+ Also the new() function for objects now accepts a mesh object.
+ Changed the script so it can be managed from the "Add-Ons" tab
+ in the user preferences.
+ Added dummy "PLUGIN" icon.
+ Corrected FSF address.
+ Clean up of tooltips.
+v0.2 - Added security check for eval() function
+ Check return value of eval() for complex numbers.
+v0.1.1 - Use 'CANCELLED' return value when failing.
+ Updated web links.
+v0.1 - Initial revision.
+More Links:
+http://gitorious.org/blender-scripts/blender-3d-function-surface
+http://blenderartists.org/forum/showthread.php?t=179043
+"""
+import bpy
+from mathutils import *
+from math import *
+from bpy.props import *
+
+# List of safe functions for eval()
+safe_list = ['math', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh',
+ 'degrees', 'e', 'exp', 'fabs', 'floor', 'fmod', 'frexp', 'hypot',
+ 'ldexp', 'log', 'log10', 'modf', 'pi', 'pow', 'radians',
+ 'sin', 'sinh', 'sqrt', 'tan', 'tanh']
+
+# Use the list to filter the local namespace
+safe_dict = dict((k, globals().get(k, None)) for k in safe_list)
+
+
+# Stores the values of a list of properties and the
+# operator id in a property group ('recall_op') inside the object.
+# Could (in theory) be used for non-objects.
+# Note: Replaces any existing property group with the same name!
+# ob ... Object to store the properties in.
+# op ... The operator that should be used.
+# op_args ... A dictionary with valid Blender
+# properties (operator arguments/parameters).
+
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+class AddZFunctionSurface(bpy.types.Operator):
+ '''Add a surface defined defined by a function z=f(x,y)'''
+ bl_idname = "mesh.primitive_z_function_surface"
+ bl_label = "Add Z Function Surface"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ equation = StringProperty(name="Z Equation",
+ description="Equation for z=f(x,y)",
+ default="1 - ( x**2 + y**2 )")
+
+ div_x = IntProperty(name="X Subdivisions",
+ description="Number of vertices in x direction.",
+ default=16,
+ min=3,
+ max=256)
+ div_y = IntProperty(name="Y Subdivisions",
+ description="Number of vertices in y direction.",
+ default=16,
+ min=3,
+ max=256)
+
+ size_x = FloatProperty(name="X Size",
+ description="Size of the x axis.",
+ default=2.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ size_y = FloatProperty(name="Y Size",
+ description="Size of the y axis.",
+ default=2.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+
+ def execute(self, context):
+ equation = self.equation
+ div_x = self.div_x
+ div_y = self.div_y
+ size_x = self.size_x
+ size_y = self.size_y
+
+ verts = []
+ faces = []
+
+ delta_x = size_x / float(div_x - 1)
+ delta_y = size_y / float(div_y - 1)
+ start_x = -(size_x / 2.0)
+ start_y = -(size_y / 2.0)
+
+ edgeloop_prev = []
+
+ try:
+ expr_args = (
+ compile(equation, __file__, 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ except:
+ import traceback
+ self.report({'ERROR'}, "Error parsing expression: "
+ + traceback.format_exc(limit=1))
+ return {'CANCELLED'}
+
+ for row_x in range(div_x):
+ edgeloop_cur = []
+ x = start_x + row_x * delta_x
+
+ for row_y in range(div_y):
+ y = start_y + row_y * delta_y
+ z = 0.0
+
+ safe_dict['x'] = x
+ safe_dict['y'] = y
+
+ # Try to evaluate the equation.
+ try:
+ z = float(eval(*expr_args))
+ except:
+ import traceback
+ self.report({'ERROR'}, "Error evaluating expression: "
+ + traceback.format_exc(limit=1))
+ return {'CANCELLED'}
+
+ edgeloop_cur.append(len(verts))
+ verts.append((x, y, z))
+
+ if len(edgeloop_prev) > 0:
+ faces_row = createFaces(edgeloop_prev, edgeloop_cur)
+ faces.extend(faces_row)
+
+ edgeloop_prev = edgeloop_cur
+
+ base = create_mesh_object(context, verts, [], faces, "Z Function")
+
+ return {'FINISHED'}
+
+
+def xyz_function_surface_faces(self, x_eq, y_eq, z_eq,
+ range_u_min, range_u_max, range_u_step, wrap_u,
+ range_v_min, range_v_max, range_v_step, wrap_v,
+ a_eq, b_eq, c_eq, f_eq, g_eq, h_eq, n, close_v):
+
+ verts = []
+ faces = []
+
+ # Distance of each step in Blender Units
+ uStep = (range_u_max - range_u_min) / range_u_step
+ vStep = (range_v_max - range_v_min) / range_v_step
+
+ # Number of steps in the vertex creation loops.
+ # Number of steps is the number of faces
+ # => Number of points is +1 unless wrapped.
+ uRange = range_u_step + 1
+ vRange = range_v_step + 1
+
+ if wrap_u:
+ uRange = uRange - 1
+
+ if wrap_v:
+ vRange = vRange - 1
+
+ try:
+ expr_args_x = (
+ compile(x_eq, __file__.replace(".py", "_x.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_y = (
+ compile(y_eq, __file__.replace(".py", "_y.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_z = (
+ compile(z_eq, __file__.replace(".py", "_z.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_a = (
+ compile(a_eq, __file__.replace(".py", "_a.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_b = (
+ compile(b_eq, __file__.replace(".py", "_b.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_c = (
+ compile(c_eq, __file__.replace(".py", "_c.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_f = (
+ compile(f_eq, __file__.replace(".py", "_f.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_g = (
+ compile(g_eq, __file__.replace(".py", "_g.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ expr_args_h = (
+ compile(h_eq, __file__.replace(".py", "_h.py"), 'eval'),
+ {"__builtins__": None},
+ safe_dict)
+ except:
+ import traceback
+ self.report({'ERROR'}, "Error parsing expression: "
+ + traceback.format_exc(limit=1))
+ return [], []
+
+ for vN in range(vRange):
+ v = range_v_min + (vN * vStep)
+
+ for uN in range(uRange):
+ u = range_u_min + (uN * uStep)
+
+ safe_dict['u'] = u
+ safe_dict['v'] = v
+
+ safe_dict['n'] = n
+
+ # Try to evaluate the equations.
+ try:
+ a = float(eval(*expr_args_a))
+ b = float(eval(*expr_args_b))
+ c = float(eval(*expr_args_c))
+
+ safe_dict['a'] = a
+ safe_dict['b'] = b
+ safe_dict['c'] = c
+
+ f = float(eval(*expr_args_f))
+ g = float(eval(*expr_args_g))
+ h = float(eval(*expr_args_h))
+
+ safe_dict['f'] = f
+ safe_dict['g'] = g
+ safe_dict['h'] = h
+
+ verts.append((
+ float(eval(*expr_args_x)),
+ float(eval(*expr_args_y)),
+ float(eval(*expr_args_z))))
+
+ except:
+ import traceback
+ self.report({'ERROR'}, "Error evaluating expression: "
+ + traceback.format_exc(limit=1))
+ return [], []
+
+ for vN in range(range_v_step):
+ vNext = vN + 1
+
+ if wrap_v and (vNext >= vRange):
+ vNext = 0
+
+ for uN in range(range_u_step):
+ uNext = uN + 1
+
+ if wrap_u and (uNext >= uRange):
+ uNext = 0
+
+ faces.append([(vNext * uRange) + uNext,
+ (vNext * uRange) + uN,
+ (vN * uRange) + uN,
+ (vN * uRange) + uNext])
+
+ if close_v and wrap_u and (not wrap_v):
+ for uN in range(1, range_u_step - 1):
+ faces.append([
+ range_u_step - 1,
+ range_u_step - 1 - uN,
+ range_u_step - 2 - uN])
+ faces.append([
+ range_v_step * uRange,
+ range_v_step * uRange + uN,
+ range_v_step * uRange + uN + 1])
+
+ return verts, faces
+
+
+# Original Script "Parametric.py" by Ed Mackey.
+# -> http://www.blinken.com/blender-plugins.php
+# Partly converted for Blender 2.5 by tuga3d.
+#
+# Sphere:
+# x = sin(2*pi*u)*sin(pi*v)
+# y = cos(2*pi*u)*sin(pi*v)
+# z = cos(pi*v)
+# u_min = v_min = 0
+# u_max = v_max = 1
+#
+# "Snail shell"
+# x = 1.2**v*(sin(u)**2 *sin(v))
+# y = 1.2**v*(sin(u)*cos(u))
+# z = 1.2**v*(sin(u)**2 *cos(v))
+# u_min = 0
+# u_max = pi
+# v_min = -pi/4,
+# v max = 5*pi/2
+class AddXYZFunctionSurface(bpy.types.Operator):
+ '''Add a surface defined defined by 3 functions:''' \
+ + ''' x=F1(u,v), y=F2(u,v) and z=F3(u,v)'''
+ bl_idname = "mesh.primitive_xyz_function_surface"
+ bl_label = "Add X,Y,Z Function Surface"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ x_eq = StringProperty(name="X equation",
+ description="Equation for x=F(u,v). " \
+ "Also available: n, a, b, c, f, g, h",
+ default="cos(v)*(1+cos(u))*sin(v/8)")
+
+ y_eq = StringProperty(name="Y equation",
+ description="Equation for y=F(u,v). " \
+ "Also available: n, a, b, c, f, g, h",
+ default="sin(u)*sin(v/8)+cos(v/8)*1.5")
+
+ z_eq = StringProperty(name="Z equation",
+ description="Equation for z=F(u,v). " \
+ "Also available: n, a, b, c, f, g, h",
+ default="sin(v)*(1+cos(u))*sin(v/8)")
+
+ range_u_min = FloatProperty(name="U min",
+ description="Minimum U value. Lower boundary of U range.",
+ min=-100.00,
+ max=0.00,
+ default=0.00)
+
+ range_u_max = FloatProperty(name="U max",
+ description="Maximum U value. Upper boundary of U range.",
+ min=0.00,
+ max=100.00,
+ default=2 * pi)
+
+ range_u_step = IntProperty(name="U step",
+ description="U Subdivisions",
+ min=1,
+ max=1024,
+ default=32)
+
+ wrap_u = BoolProperty(name="U wrap",
+ description="U Wrap around",
+ default=True)
+
+ range_v_min = FloatProperty(name="V min",
+ description="Minimum V value. Lower boundary of V range.",
+ min=-100.00,
+ max=0.00,
+ default=0.00)
+
+ range_v_max = FloatProperty(name="V max",
+ description="Maximum V value. Upper boundary of V range.",
+ min=0.00,
+ max=100.00,
+ default=4 * pi)
+
+ range_v_step = IntProperty(name="V step",
+ description="V Subdivisions",
+ min=1,
+ max=1024,
+ default=128)
+
+ wrap_v = BoolProperty(name="V wrap",
+ description="V Wrap around",
+ default=False)
+
+ close_v = BoolProperty(name="Close V",
+ description="Create faces for first and last " \
+ "V values (only if U is wrapped)",
+ default=False)
+
+ n_eq = IntProperty(name="Number of objects (n=0..N-1)",
+ description="The parameter n will be the index " \
+ "of the current object, 0 to N-1",
+ min=1,
+ max=100,
+ default=1)
+
+ a_eq = StringProperty(name="A helper function",
+ description="Equation for a=F(u,v). Also available: n",
+ default="0")
+
+ b_eq = StringProperty(name="B helper function",
+ description="Equation for b=F(u,v). Also available: n",
+ default="0")
+
+ c_eq = StringProperty(name="C helper function",
+ description="Equation for c=F(u,v). Also available: n",
+ default="0")
+
+ f_eq = StringProperty(name="F helper function",
+ description="Equation for f=F(u,v). Also available: n, a, b, c",
+ default="0")
+
+ g_eq = StringProperty(name="G helper function",
+ description="Equation for g=F(u,v). Also available: n, a, b, c",
+ default="0")
+
+ h_eq = StringProperty(name="H helper function",
+ description="Equation for h=F(u,v). Also available: n, a, b, c",
+ default="0")
+
+ def execute(self, context):
+
+ for n in range(0, self.n_eq):
+
+ verts, faces = xyz_function_surface_faces(
+ self,
+ self.x_eq,
+ self.y_eq,
+ self.z_eq,
+ self.range_u_min,
+ self.range_u_max,
+ self.range_u_step,
+ self.wrap_u,
+ self.range_v_min,
+ self.range_v_max,
+ self.range_v_step,
+ self.wrap_v,
+ self.a_eq,
+ self.b_eq,
+ self.c_eq,
+ self.f_eq,
+ self.g_eq,
+ self.h_eq,
+ n,
+ self.close_v)
+
+ if not verts:
+ return {'CANCELLED'}
+
+ obj = create_mesh_object(context, verts, [], faces, "XYZ Function")
+
+ return {'FINISHED'}
diff --git a/add_mesh_extra_objects/add_mesh_extra_objects.py b/add_mesh_extra_objects/add_mesh_extra_objects.py
new file mode 100644
index 00000000..c5caa120
--- /dev/null
+++ b/add_mesh_extra_objects/add_mesh_extra_objects.py
@@ -0,0 +1,492 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+from mathutils import *
+from math import *
+from bpy.props import *
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+ scene = context.scene
+ obj_act = scene.objects.active
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+# @todo Clean up vertex&face creation process a bit.
+def add_sqorus(hole_size, subdivide):
+ verts = []
+ faces = []
+
+ size = 2.0
+
+ thickness = (size - hole_size) / 2.0
+ distances = [
+ -size / 2.0,
+ -size / 2.0 + thickness,
+ size / 2.0 - thickness,
+ size / 2.0]
+
+ if subdivide:
+ for i in range(4):
+ y = distances[i]
+
+ for j in range(4):
+ x = distances[j]
+
+ verts.append(Vector((x, y, size / 2.0)))
+ verts.append(Vector((x, y, -size / 2.0)))
+
+ # Top outer loop (vertex indices)
+ vIdx_out_up = [0, 2, 4, 6, 14, 22, 30, 28, 26, 24, 16, 8]
+ # Lower outer loop (vertex indices)
+ vIdx_out_low = [i + 1 for i in vIdx_out_up]
+
+ faces_outside = createFaces(vIdx_out_up, vIdx_out_low, closed=True)
+ faces.extend(faces_outside)
+
+ # Top inner loop (vertex indices)
+ vIdx_inner_up = [10, 12, 20, 18]
+
+ # Lower inner loop (vertex indices)
+ vIdx_inner_low = [i + 1 for i in vIdx_inner_up]
+
+ faces_inside = createFaces(vIdx_inner_up, vIdx_inner_low,
+ closed=True, flipped=True)
+ faces.extend(faces_inside)
+
+ row1_top = [0, 8, 16, 24]
+ row2_top = [i + 2 for i in row1_top]
+ row3_top = [i + 2 for i in row2_top]
+ row4_top = [i + 2 for i in row3_top]
+
+ faces_top1 = createFaces(row1_top, row2_top)
+ faces.extend(faces_top1)
+ faces_top2_side1 = createFaces(row2_top[:2], row3_top[:2])
+ faces.extend(faces_top2_side1)
+ faces_top2_side2 = createFaces(row2_top[2:], row3_top[2:])
+ faces.extend(faces_top2_side2)
+ faces_top3 = createFaces(row3_top, row4_top)
+ faces.extend(faces_top3)
+
+ row1_bot = [1, 9, 17, 25]
+ row2_bot = [i + 2 for i in row1_bot]
+ row3_bot = [i + 2 for i in row2_bot]
+ row4_bot = [i + 2 for i in row3_bot]
+
+ faces_bot1 = createFaces(row1_bot, row2_bot, flipped=True)
+ faces.extend(faces_bot1)
+ faces_bot2_side1 = createFaces(row2_bot[:2], row3_bot[:2],
+ flipped=True)
+ faces.extend(faces_bot2_side1)
+ faces_bot2_side2 = createFaces(row2_bot[2:], row3_bot[2:],
+ flipped=True)
+ faces.extend(faces_bot2_side2)
+ faces_bot3 = createFaces(row3_bot, row4_bot, flipped=True)
+ faces.extend(faces_bot3)
+
+ else:
+ # Do not subdivde outer faces
+
+ vIdx_out_up = []
+ vIdx_out_low = []
+ vIdx_in_up = []
+ vIdx_in_low = []
+
+ for i in range(4):
+ y = distances[i]
+
+ for j in range(4):
+ x = distances[j]
+
+ append = False
+ inner = False
+ # Outer
+ if (i in [0, 3] and j in [0, 3]):
+ append = True
+
+ # Inner
+ if (i in [1, 2] and j in [1, 2]):
+ append = True
+ inner = True
+
+ if append:
+ vert_up = len(verts)
+ verts.append(Vector((x, y, size / 2.0)))
+ vert_low = len(verts)
+ verts.append(Vector((x, y, -size / 2.0)))
+
+ if inner:
+ vIdx_in_up.append(vert_up)
+ vIdx_in_low.append(vert_low)
+
+ else:
+ vIdx_out_up.append(vert_up)
+ vIdx_out_low.append(vert_low)
+
+ # Flip last two vertices
+ vIdx_out_up = vIdx_out_up[:2] + list(reversed(vIdx_out_up[2:]))
+ vIdx_out_low = vIdx_out_low[:2] + list(reversed(vIdx_out_low[2:]))
+ vIdx_in_up = vIdx_in_up[:2] + list(reversed(vIdx_in_up[2:]))
+ vIdx_in_low = vIdx_in_low[:2] + list(reversed(vIdx_in_low[2:]))
+
+ # Create faces
+ faces_top = createFaces(vIdx_in_up, vIdx_out_up, closed=True)
+ faces.extend(faces_top)
+ faces_bottom = createFaces(vIdx_out_low, vIdx_in_low, closed=True)
+ faces.extend(faces_bottom)
+ faces_inside = createFaces(vIdx_in_low, vIdx_in_up, closed=True)
+ faces.extend(faces_inside)
+ faces_outside = createFaces(vIdx_out_up, vIdx_out_low, closed=True)
+ faces.extend(faces_outside)
+
+ return verts, faces
+
+
+def add_wedge(size_x, size_y, size_z):
+ verts = []
+ faces = []
+
+ size_x /= 2.0
+ size_y /= 2.0
+ size_z /= 2.0
+
+ vIdx_top = []
+ vIdx_bot = []
+
+ vIdx_top.append(len(verts))
+ verts.append(Vector((-size_x, -size_y, size_z)))
+ vIdx_bot.append(len(verts))
+ verts.append(Vector((-size_x, -size_y, -size_z)))
+
+ vIdx_top.append(len(verts))
+ verts.append(Vector((size_x, -size_y, size_z)))
+ vIdx_bot.append(len(verts))
+ verts.append(Vector((size_x, -size_y, -size_z)))
+
+ vIdx_top.append(len(verts))
+ verts.append(Vector((-size_x, size_y, size_z)))
+ vIdx_bot.append(len(verts))
+ verts.append(Vector((-size_x, size_y, -size_z)))
+
+ faces.append(vIdx_top)
+ faces.append(vIdx_bot)
+ faces_outside = createFaces(vIdx_top, vIdx_bot, closed=True)
+ faces.extend(faces_outside)
+
+ return verts, faces
+
+def add_star(points, outer_radius, inner_radius, height):
+ PI_2 = pi * 2
+ z_axis = (0, 0, 1)
+
+ verts = []
+ faces = []
+
+ segments = points * 2
+
+ half_height = height / 2.0
+
+ vert_idx_top = len(verts)
+ verts.append(Vector((0.0, 0.0, half_height)))
+
+ vert_idx_bottom = len(verts)
+ verts.append(Vector((0.0, 0.0, -half_height)))
+
+ edgeloop_top = []
+ edgeloop_bottom = []
+
+ for index in range(segments):
+ quat = Quaternion(z_axis, (index / segments) * PI_2)
+
+ if index % 2:
+ # Uneven
+ radius = outer_radius
+ else:
+ # Even
+ radius = inner_radius
+
+ edgeloop_top.append(len(verts))
+ vec = Vector((radius, 0, half_height)) * quat
+ verts.append(vec)
+
+ edgeloop_bottom.append(len(verts))
+ vec = Vector((radius, 0, -half_height)) * quat
+ verts.append(vec)
+
+
+
+ faces_top = createFaces([vert_idx_top], edgeloop_top, closed=True)
+ faces_outside = createFaces(edgeloop_top, edgeloop_bottom, closed=True)
+ faces_bottom = createFaces([vert_idx_bottom], edgeloop_bottom,
+ flipped=True, closed=True)
+
+ faces.extend(faces_top)
+ faces.extend(faces_outside)
+ faces.extend(faces_bottom)
+
+ return verts, faces
+
+def trapezohedron(s,r,h):
+ """
+ s = segments
+ r = base radius
+ h = tip height
+ """
+
+ # calculate constants
+ a = 2*pi/(2*s) # angle between points along the equator
+ l = r*cos(a) # helper for e
+ e = h*(r-l)/(l+r) # the z offset for each vector along the equator so faces are planar
+
+ # rotation for the points
+ quat = Quaternion((0,0,1),a)
+
+ # first 3 vectors, every next one is calculated from the last, and the z-value is negated
+ verts = [Vector(i) for i in [(0,0,h),(0,0,-h),(r,0,e)]]
+ for i in range(2*s-1):
+ verts.append(verts[-1]*quat) # rotate further "a" radians around the z-axis
+ verts[-1].z *= -1 # negate last z-value to account for the zigzag
+
+ faces = []
+ for i in range(2,2+2*s,2):
+ n = [i+1,i+2,i+3] # vertices in current section
+ for j in range(3): # check whether the numbers dont go over len(verts)
+ if n[j]>=2*s+2: n[j]-=2*s # if so, subtract len(verts)-2
+
+ # add faces of current section
+ faces.append([0,i]+n[:2])
+ faces.append([1,n[2],n[1],n[0]])
+
+ return verts,faces
+
+class AddSqorus(bpy.types.Operator):
+ '''Add a sqorus mesh.'''
+ bl_idname = "mesh.primitive_sqorus_add"
+ bl_label = "Add Sqorus"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ hole_size = FloatProperty(name="Hole Size",
+ description="Size of the Hole",
+ min=0.01,
+ max=1.99,
+ default=2.0 / 3.0)
+ subdivide = BoolProperty(name="Subdivide Outside",
+ description="Enable to subdivide the faces on the outside." \
+ " This results in equally spaced vertices.",
+ default=True)
+
+ def execute(self, context):
+
+ # Create mesh geometry
+ verts, faces = add_sqorus(
+ self.hole_size,
+ self.subdivide)
+
+ # Create mesh object (and meshdata)
+ obj = create_mesh_object(context, verts, [], faces, "Sqorus")
+
+ return {'FINISHED'}
+
+
+class AddWedge(bpy.types.Operator):
+ '''Add a wedge mesh.'''
+ bl_idname = "mesh.primitive_wedge_add"
+ bl_label = "Add Wedge"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ size_x = FloatProperty(name="Size X",
+ description="Size along the X axis",
+ min=0.01,
+ max=9999.0,
+ default=2.0)
+ size_y = FloatProperty(name="Size Y",
+ description="Size along the Y axis",
+ min=0.01,
+ max=9999.0,
+ default=2.0)
+ size_z = FloatProperty(name="Size Z",
+ description="Size along the Z axis",
+ min=0.01,
+ max=9999.0,
+ default=2.00)
+
+ def execute(self, context):
+
+ verts, faces = add_wedge(
+ self.size_x,
+ self.size_y,
+ self.size_z)
+
+ obj = create_mesh_object(context, verts, [], faces, "Wedge")
+
+ return {'FINISHED'}
+
+
+class AddStar(bpy.types.Operator):
+ '''Add a star mesh.'''
+ bl_idname = "mesh.primitive_star_add"
+ bl_label = "Add Star"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ points = IntProperty(name="Points",
+ description="Number of points for the star",
+ min=2,
+ max=256,
+ default=5)
+ outer_radius = FloatProperty(name="Outer Radius",
+ description="Outer radius of the star",
+ min=0.01,
+ max=9999.0,
+ default=1.0)
+ innter_radius = FloatProperty(name="Inner Radius",
+ description="Inner radius of the star",
+ min=0.01,
+ max=9999.0,
+ default=0.5)
+ height = FloatProperty(name="Height",
+ description="Height of the star",
+ min=0.01,
+ max=9999.0,
+ default=0.5)
+
+ def execute(self, context):
+
+ verts, faces = add_star(
+ self.points,
+ self.outer_radius,
+ self.innter_radius,
+ self.height)
+
+ obj = create_mesh_object(context, verts, [], faces, "Star")
+
+ return {'FINISHED'}
+
+
+class AddTrapezohedron(bpy.types.Operator):
+ """Add a trapezohedron"""
+ bl_idname = "mesh.primitive_trapezohedron_add"
+ bl_label = "Add trapezohedron"
+ bl_description = "Create one of the regular solids"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ segments = IntProperty(name = "Segments",
+ description = "Number of repeated segments",
+ default = 4, min = 2, max = 256)
+ radius = FloatProperty(name = "Base radius",
+ description = "Radius of the middle",
+ default = 1.0, min = 0.01, max = 100.0)
+ height = FloatProperty(name = "Tip height",
+ description = "Height of the tip",
+ default = 1, min = 0.01, max = 100.0)
+
+ def execute(self,context):
+ # generate mesh
+ verts,faces = trapezohedron(self.segments,
+ self.radius,
+ self.height)
+
+ obj = create_mesh_object(context, verts, [], faces, "Trapazohedron")
+
+ return {'FINISHED'}
+
+
+
diff --git a/add_mesh_extra_objects/add_mesh_gears.py b/add_mesh_extra_objects/add_mesh_gears.py
new file mode 100644
index 00000000..a04e6beb
--- /dev/null
+++ b/add_mesh_extra_objects/add_mesh_gears.py
@@ -0,0 +1,802 @@
+# add_mesh_gear.py (c) 2009, 2010 Michel J. Anders (varkenvarken)
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+"""
+bl_info = {
+ "name": "Gears",
+ "author": "Michel J. Anders (varkenvarken)",
+ "version": (2, 4, 2),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh > Gears ",
+ "description": "Adds a mesh Gear to the Add Mesh menu",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_Gear",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21732",
+ "category": "Add Mesh"}
+"""
+
+"""
+What was needed to port it from 2.49 -> 2.50 alpha 0?
+
+The basic functions that calculate the geometry (verts and faces) are mostly
+unchanged (add_tooth, add_spoke, add_gear)
+
+Also, the vertex group API is changed a little bit but the concepts
+are the same:
+=========
+vertexgroup = ob.vertex_groups.new('NAME_OF_VERTEXGROUP')
+vertexgroup.add(vertexgroup_vertex_indices, weight, 'ADD')
+=========
+
+Now for some reason the name does not 'stick' and we have to set it this way:
+vertexgroup.name = 'NAME_OF_VERTEXGROUP'
+
+Conversion to 2.50 also meant we could simply do away with our crude user
+interface.
+Just definining the appropriate properties in the AddGear() operator will
+display the properties in the Blender GUI with the added benefit of making
+it interactive: changing a property will redo the AddGear() operator providing
+the user with instant feedback.
+
+Finally we had to convert/throw away some print statements to print functions
+as Blender nows uses Python 3.x
+
+The code to actually implement the AddGear() function is mostly copied from
+add_mesh_torus() (distributed with Blender).
+"""
+
+import bpy
+import mathutils
+from math import *
+from bpy.props import *
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+ scene = context.scene
+ obj_act = scene.objects.active
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+# Calculate the vertex coordinates for a single
+# section of a gear tooth.
+# Returns 4 lists of vertex coords (list of tuples):
+# *-*---*---* (1.) verts_inner_base
+# | | | |
+# *-*---*---* (2.) verts_outer_base
+# | | |
+# *---*---* (3.) verts_middle_tooth
+# \ | /
+# *-*-* (4.) verts_tip_tooth
+#
+# a
+# t
+# d
+# radius
+# Ad
+# De
+# base
+# p_angle
+# rack
+# crown
+def add_tooth(a, t, d, radius, Ad, De, base, p_angle, rack=0, crown=0.0):
+ A = [a, a + t / 4, a + t / 2, a + 3 * t / 4]
+ C = [cos(i) for i in A]
+ S = [sin(i) for i in A]
+
+ Ra = radius + Ad
+ Rd = radius - De
+ Rb = Rd - base
+
+ # Pressure angle calc
+ O = Ad * tan(p_angle)
+ p_angle = atan(O / Ra)
+
+ if radius < 0:
+ p_angle = -p_angle
+
+ if rack:
+ S = [sin(t / 4) * I for I in range(-2, 3)]
+ Sp = [0, sin(-t / 4 + p_angle), 0, sin(t / 4 - p_angle)]
+
+ verts_inner_base = [(Rb, radius * S[I], d) for I in range(4)]
+ verts_outer_base = [(Rd, radius * S[I], d) for I in range(4)]
+ verts_middle_tooth = [(radius, radius * S[I], d) for I in range(1, 4)]
+ verts_tip_tooth = [(Ra, radius * Sp[I], d) for I in range(1, 4)]
+
+ else:
+ Cp = [
+ 0,
+ cos(a + t / 4 + p_angle),
+ cos(a + t / 2),
+ cos(a + 3 * t / 4 - p_angle)]
+ Sp = [0,
+ sin(a + t / 4 + p_angle),
+ sin(a + t / 2),
+ sin(a + 3 * t / 4 - p_angle)]
+
+ verts_inner_base = [(Rb * C[I], Rb * S[I], d)
+ for I in range(4)]
+ verts_outer_base = [(Rd * C[I], Rd * S[I], d)
+ for I in range(4)]
+ verts_middle_tooth = [(radius * C[I], radius * S[I], d + crown / 3)
+ for I in range(1, 4)]
+ verts_tip_tooth = [(Ra * Cp[I], Ra * Sp[I], d + crown)
+ for I in range(1, 4)]
+
+ return (verts_inner_base, verts_outer_base,
+ verts_middle_tooth, verts_tip_tooth)
+
+
+# EXPERIMENTAL Calculate the vertex coordinates for a single
+# section of a gearspoke.
+# Returns them as a list of tuples.
+#
+# a
+# t
+# d
+# radius
+# De
+# base
+# s
+# w
+# l
+# gap
+# width
+#
+# @todo Finish this.
+def add_spoke(a, t, d, radius, De, base, s, w, l, gap=0, width=19):
+ Rd = radius - De
+ Rb = Rd - base
+ Rl = Rb
+
+ verts = []
+ edgefaces = []
+ edgefaces2 = []
+ sf = []
+
+ if not gap:
+ for N in range(width, 1, -2):
+ edgefaces.append(len(verts))
+ ts = t / 4
+ tm = a + 2 * ts
+ te = asin(w / Rb)
+ td = te - ts
+ t4 = ts + td * (width - N) / (width - 3.0)
+ A = [tm + (i - int(N / 2)) * t4 for i in range(N)]
+ C = [cos(i) for i in A]
+ S = [sin(i) for i in A]
+
+ verts.extend((Rb * I, Rb * J, d) for (I, J) in zip(C, S))
+ edgefaces2.append(len(verts) - 1)
+
+ Rb = Rb - s
+
+ n = 0
+ for N in range(width, 3, -2):
+ sf.extend([(i + n, i + 1 + n, i + 2 + n, i + N + n)
+ for i in range(0, N - 1, 2)])
+ sf.extend([(i + 2 + n, i + N + n, i + N + 1 + n, i + N + 2 + n)
+ for i in range(0, N - 3, 2)])
+
+ n = n + N
+
+ return verts, edgefaces, edgefaces2, sf
+
+
+# Create gear geometry.
+# Returns:
+# * A list of vertices (list of tuples)
+# * A list of faces (list of lists)
+# * A list (group) of vertices of the tip (list of vertex indices).
+# * A list (group) of vertices of the valley (list of vertex indices).
+#
+# teethNum ... Number of teeth on the gear.
+# radius ... Radius of the gear, negative for crown gear
+# Ad ... Addendum, extent of tooth above radius.
+# De ... Dedendum, extent of tooth below radius.
+# base ... Base, extent of gear below radius.
+# p_angle ... Pressure angle. Skewness of tooth tip. (radiant)
+# width ... Width, thickness of gear.
+# skew ... Skew of teeth. (radiant)
+# conangle ... Conical angle of gear. (radiant)
+# rack
+# crown ... Inward pointing extend of crown teeth.
+#
+# inner radius = radius - (De + base)
+def add_gear(teethNum, radius, Ad, De, base, p_angle,
+ width=1, skew=0, conangle=0, rack=0, crown=0.0):
+
+ if teethNum < 2:
+ return None, None, None, None
+
+ t = 2 * pi / teethNum
+
+ if rack:
+ teethNum = 1
+
+ scale = (radius - 2 * width * tan(conangle)) / radius
+
+ verts = []
+ faces = []
+ vgroup_top = [] # Vertex group of top/tip? vertices.
+ vgroup_valley = [] # Vertex group of valley vertices
+
+ verts_bridge_prev = []
+ for toothCnt in range(teethNum):
+ a = toothCnt * t
+
+ verts_bridge_start = []
+ verts_bridge_end = []
+
+ verts_outside_top = []
+ verts_outside_bottom = []
+ for (s, d, c, top) \
+ in [(0, -width, 1, True), \
+ (skew, width, scale, False)]:
+
+ verts1, verts2, verts3, verts4 = add_tooth(a + s, t, d,
+ radius * c, Ad * c, De * c, base * c, p_angle,
+ rack, crown)
+
+ vertsIdx1 = list(range(len(verts), len(verts) + len(verts1)))
+ verts.extend(verts1)
+ vertsIdx2 = list(range(len(verts), len(verts) + len(verts2)))
+ verts.extend(verts2)
+ vertsIdx3 = list(range(len(verts), len(verts) + len(verts3)))
+ verts.extend(verts3)
+ vertsIdx4 = list(range(len(verts), len(verts) + len(verts4)))
+ verts.extend(verts4)
+
+ verts_outside = []
+ verts_outside.extend(vertsIdx2[:2])
+ verts_outside.append(vertsIdx3[0])
+ verts_outside.extend(vertsIdx4)
+ verts_outside.append(vertsIdx3[-1])
+ verts_outside.append(vertsIdx2[-1])
+
+ if top:
+ #verts_inside_top = vertsIdx1
+ verts_outside_top = verts_outside
+
+ verts_bridge_start.append(vertsIdx1[0])
+ verts_bridge_start.append(vertsIdx2[0])
+ verts_bridge_end.append(vertsIdx1[-1])
+ verts_bridge_end.append(vertsIdx2[-1])
+
+ else:
+ #verts_inside_bottom = vertsIdx1
+ verts_outside_bottom = verts_outside
+
+ verts_bridge_start.append(vertsIdx2[0])
+ verts_bridge_start.append(vertsIdx1[0])
+ verts_bridge_end.append(vertsIdx2[-1])
+ verts_bridge_end.append(vertsIdx1[-1])
+
+ # Valley = first 2 vertices of outer base:
+ vgroup_valley.extend(vertsIdx2[:1])
+ # Top/tip vertices:
+ vgroup_top.extend(vertsIdx4)
+
+ faces_tooth_middle_top = createFaces(vertsIdx2[1:], vertsIdx3,
+ flipped=top)
+ faces_tooth_outer_top = createFaces(vertsIdx3, vertsIdx4,
+ flipped=top)
+
+ faces_base_top = createFaces(vertsIdx1, vertsIdx2, flipped=top)
+ faces.extend(faces_base_top)
+
+ faces.extend(faces_tooth_middle_top)
+ faces.extend(faces_tooth_outer_top)
+
+ #faces_inside = createFaces(verts_inside_top, verts_inside_bottom)
+ #faces.extend(faces_inside)
+
+ faces_outside = createFaces(verts_outside_top, verts_outside_bottom,
+ flipped=True)
+ faces.extend(faces_outside)
+
+ if toothCnt == 0:
+ verts_bridge_first = verts_bridge_start
+
+ # Bridge one tooth to the next
+ if verts_bridge_prev:
+ faces_bridge = createFaces(verts_bridge_prev, verts_bridge_start)
+ #, closed=True (for "inside" faces)
+ faces.extend(faces_bridge)
+
+ # Remember "end" vertices for next tooth.
+ verts_bridge_prev = verts_bridge_end
+
+ # Bridge the first to the last tooth.
+ faces_bridge_f_l = createFaces(verts_bridge_prev, verts_bridge_first)
+ #, closed=True (for "inside" faces)
+ faces.extend(faces_bridge_f_l)
+
+ return verts, faces, vgroup_top, vgroup_valley
+
+
+# Create spokes geometry.
+# Returns:
+# * A list of vertices (list of tuples)
+# * A list of faces (list of lists)
+#
+# teethNum ... Number of teeth on the gear.
+# radius ... Radius of the gear, negative for crown gear
+# De ... Dedendum, extent of tooth below radius.
+# base ... Base, extent of gear below radius.
+# width ... Width, thickness of gear.
+# conangle ... Conical angle of gear. (radiant)
+# rack
+# spoke
+# spbevel
+# spwidth
+# splength
+# spresol
+#
+# @todo Finish this
+# @todo Create a function that takes a "Gear" and creates a
+# matching "Gear Spokes" object.
+def add_spokes(teethNum, radius, De, base, width=1, conangle=0, rack=0,
+ spoke=3, spbevel=0.1, spwidth=0.2, splength=1.0, spresol=9):
+
+ if teethNum < 2:
+ return None, None, None, None
+
+ if spoke < 2:
+ return None, None, None, None
+
+ t = 2 * pi / teethNum
+
+ if rack:
+ teethNum = 1
+
+ scale = (radius - 2 * width * tan(conangle)) / radius
+
+ verts = []
+ faces = []
+
+ c = scale # debug
+
+ fl = len(verts)
+ for toothCnt in range(teethNum):
+ a = toothCnt * t
+ s = 0 # For test
+
+ if toothCnt % spoke == 0:
+ for d in (-width, width):
+ sv, edgefaces, edgefaces2, sf = add_spoke(a + s, t, d,
+ radius * c, De * c, base * c,
+ spbevel, spwidth, splength, 0, spresol)
+ verts.extend(sv)
+ faces.extend([j + fl for j in i] for i in sf)
+ fl += len(sv)
+
+ d1 = fl - len(sv)
+ d2 = fl - 2 * len(sv)
+
+ faces.extend([(i + d2, j + d2, j + d1, i + d1)
+ for (i, j) in zip(edgefaces[:-1], edgefaces[1:])])
+ faces.extend([(i + d2, j + d2, j + d1, i + d1)
+ for (i, j) in zip(edgefaces2[:-1], edgefaces2[1:])])
+
+ else:
+ for d in (-width, width):
+ sv, edgefaces, edgefaces2, sf = add_spoke(a + s, t, d,
+ radius * c, De * c, base * c,
+ spbevel, spwidth, splength, 1, spresol)
+
+ verts.extend(sv)
+ fl += len(sv)
+
+ d1 = fl - len(sv)
+ d2 = fl - 2 * len(sv)
+
+ faces.extend([[i + d2, i + 1 + d2, i + 1 + d1, i + d1]
+ for (i) in range(0, 3)])
+ faces.extend([[i + d2, i + 1 + d2, i + 1 + d1, i + d1]
+ for (i) in range(5, 8)])
+
+ return verts, faces
+
+
+# Create worm geometry.
+# Returns:
+# * A list of vertices
+# * A list of faces
+# * A list (group) of vertices of the tip
+# * A list (group) of vertices of the valley
+#
+# teethNum ... Number of teeth on the worm
+# radius ... Radius of the gear, negative for crown gear
+# Ad ... Addendum, extent of tooth above radius.
+# De ... Dedendum, extent of tooth below radius.
+# p_angle ... Pressure angle. Skewness of tooth tip. (radiant)
+# width ... Width, thickness of gear.
+# crown ... Inward pointing extend of crown teeth.
+#
+# @todo: Fix teethNum. Some numbers are not possible yet.
+# @todo: Create start & end geoemtry (closing faces)
+def add_worm(teethNum, rowNum, radius, Ad, De, p_angle,
+ width=1, skew=radians(11.25), crown=0.0):
+
+ worm = teethNum
+ teethNum = 24
+
+ t = 2 * pi / teethNum
+
+ verts = []
+ faces = []
+ vgroup_top = [] # Vertex group of top/tip? vertices.
+ vgroup_valley = [] # Vertex group of valley vertices
+
+ #width = width / 2.0
+
+ edgeloop_prev = []
+ for Row in range(rowNum):
+ edgeloop = []
+
+ for toothCnt in range(teethNum):
+ a = toothCnt * t
+
+ s = Row * skew
+ d = Row * width
+ c = 1
+
+ isTooth = False
+ if toothCnt % (teethNum / worm) != 0:
+ # Flat
+ verts1, verts2, verts3, verts4 = add_tooth(a + s, t, d,
+ radius - De, 0.0, 0.0, 0, p_angle)
+
+ # Ignore other verts than the "other base".
+ verts1 = verts3 = verts4 = []
+
+ else:
+ # Tooth
+ isTooth = True
+ verts1, verts2, verts3, verts4 = add_tooth(a + s, t, d,
+ radius * c, Ad * c, De * c, 0 * c, p_angle, 0, crown)
+
+ # Remove various unneeded verts (if we are "inside" the tooth)
+ del(verts2[2]) # Central vertex in the base of the tooth.
+ del(verts3[1]) # Central vertex in the middle of the tooth.
+
+ vertsIdx2 = list(range(len(verts), len(verts) + len(verts2)))
+ verts.extend(verts2)
+ vertsIdx3 = list(range(len(verts), len(verts) + len(verts3)))
+ verts.extend(verts3)
+ vertsIdx4 = list(range(len(verts), len(verts) + len(verts4)))
+ verts.extend(verts4)
+
+ if isTooth:
+ verts_current = []
+ verts_current.extend(vertsIdx2[:2])
+ verts_current.append(vertsIdx3[0])
+ verts_current.extend(vertsIdx4)
+ verts_current.append(vertsIdx3[-1])
+ verts_current.append(vertsIdx2[-1])
+
+ # Valley = first 2 vertices of outer base:
+ vgroup_valley.extend(vertsIdx2[:1])
+ # Top/tip vertices:
+ vgroup_top.extend(vertsIdx4)
+
+ else:
+ # Flat
+ verts_current = vertsIdx2
+
+ # Valley - all of them.
+ vgroup_valley.extend(vertsIdx2)
+
+ edgeloop.extend(verts_current)
+
+ # Create faces between rings/rows.
+ if edgeloop_prev:
+ faces_row = createFaces(edgeloop, edgeloop_prev, closed=True)
+ faces.extend(faces_row)
+
+ # Remember last ring/row of vertices for next ring/row iteration.
+ edgeloop_prev = edgeloop
+
+ return verts, faces, vgroup_top, vgroup_valley
+
+
+class AddGear(bpy.types.Operator):
+ '''Add a gear mesh.'''
+ bl_idname = "mesh.primitive_gear"
+ bl_label = "Add Gear"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ number_of_teeth = IntProperty(name="Number of Teeth",
+ description="Number of teeth on the gear",
+ min=2,
+ max=265,
+ default=12)
+ radius = FloatProperty(name="Radius",
+ description="Radius of the gear, negative for crown gear",
+ min=-100.0,
+ max=100.0,
+ default=1.0)
+ addendum = FloatProperty(name="Addendum",
+ description="Addendum, extent of tooth above radius",
+ min=0.01,
+ max=100.0,
+ default=0.1)
+ dedendum = FloatProperty(name="Dedendum",
+ description="Dedendum, extent of tooth below radius",
+ min=0.0,
+ max=100.0,
+ default=0.1)
+ angle = FloatProperty(name="Pressure Angle",
+ description="Pressure angle, skewness of tooth tip (degrees)",
+ min=0.0,
+ max=45.0,
+ default=20.0)
+ base = FloatProperty(name="Base",
+ description="Base, extent of gear below radius",
+ min=0.0,
+ max=100.0,
+ default=0.2)
+ width = FloatProperty(name="Width",
+ description="Width, thickness of gear",
+ min=0.05,
+ max=100.0,
+ default=0.2)
+ skew = FloatProperty(name="Skewness",
+ description="Skew of teeth (degrees)",
+ min=-90.0,
+ max=90.0,
+ default=0.0)
+ conangle = FloatProperty(name="Conical angle",
+ description="Conical angle of gear (degrees)",
+ min=0.0,
+ max=90.0,
+ default=0.0)
+ crown = FloatProperty(name="Crown",
+ description="Inward pointing extend of crown teeth",
+ min=0.0,
+ max=100.0,
+ default=0.0)
+
+ def draw(self, context):
+ layout = self.layout
+ box = layout.box()
+ box.prop(self, 'number_of_teeth')
+ box = layout.box()
+ box.prop(self, 'radius')
+ box.prop(self, 'width')
+ box.prop(self, 'base')
+ box = layout.box()
+ box.prop(self, 'dedendum')
+ box.prop(self, 'addendum')
+ box = layout.box()
+ box.prop(self, 'angle')
+ box.prop(self, 'skew')
+ box.prop(self, 'conangle')
+ box.prop(self, 'crown')
+
+
+ def execute(self, context):
+
+ verts, faces, verts_tip, verts_valley = add_gear(
+ self.number_of_teeth,
+ self.radius,
+ self.addendum,
+ self.dedendum,
+ self.base,
+ radians(self.angle),
+ width=self.width,
+ skew=radians(self.skew),
+ conangle=radians(self.conangle),
+ crown=self.crown)
+
+ # Actually create the mesh object from this geometry data.
+ base = create_mesh_object(context, verts, [], faces, "Gear")
+ obj = base.object
+
+ # Create vertex groups from stored vertices.
+ tipGroup = obj.vertex_groups.new('Tips')
+ tipGroup.add(verts_tip, 1.0, 'ADD')
+
+ valleyGroup = obj.vertex_groups.new('Valleys')
+ valleyGroup.add(verts_valley, 1.0, 'ADD')
+
+ return {'FINISHED'}
+
+
+class AddWormGear(bpy.types.Operator):
+ '''Add a worm gear mesh.'''
+ bl_idname = "mesh.primitive_worm_gear"
+ bl_label = "Add Worm Gear"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ number_of_teeth = IntProperty(name="Number of Teeth",
+ description="Number of teeth on the gear",
+ min=2,
+ max=265,
+ default=12)
+ number_of_rows = IntProperty(name="Number of Rows",
+ description="Number of rows on the worm gear",
+ min=2,
+ max=265,
+ default=32)
+ radius = FloatProperty(name="Radius",
+ description="Radius of the gear, negative for crown gear",
+ min=-100.0,
+ max=100.0,
+ default=1.0)
+ addendum = FloatProperty(name="Addendum",
+ description="Addendum, extent of tooth above radius",
+ min=0.01,
+ max=100.0,
+ default=0.1)
+ dedendum = FloatProperty(name="Dedendum",
+ description="Dedendum, extent of tooth below radius",
+ min=0.0,
+ max=100.0,
+ default=0.1)
+ angle = FloatProperty(name="Pressure Angle",
+ description="Pressure angle, skewness of tooth tip (degrees)",
+ min=0.0,
+ max=45.0,
+ default=20.0)
+ row_height = FloatProperty(name="Row Height",
+ description="Height of each Row",
+ min=0.05,
+ max=100.0,
+ default=0.2)
+ skew = FloatProperty(name="Skewness per Row",
+ description="Skew of each row (degrees)",
+ min=-90.0,
+ max=90.0,
+ default=11.25)
+ crown = FloatProperty(name="Crown",
+ description="Inward pointing extend of crown teeth",
+ min=0.0,
+ max=100.0,
+ default=0.0)
+
+ def draw(self, context):
+ layout = self.layout
+ box = layout.box()
+ box.prop(self, 'number_of_teeth')
+ box.prop(self, 'number_of_rows')
+ box.prop(self, 'radius')
+ box.prop(self, 'row_height')
+ box = layout.box()
+ box.prop(self, 'addendum')
+ box.prop(self, 'dedendum')
+ box = layout.box()
+ box.prop(self, 'angle')
+ box.prop(self, 'skew')
+ box.prop(self, 'crown')
+
+ def execute(self, context):
+
+ verts, faces, verts_tip, verts_valley = add_worm(
+ self.number_of_teeth,
+ self.number_of_rows,
+ self.radius,
+ self.addendum,
+ self.dedendum,
+ radians(self.angle),
+ width=self.row_height,
+ skew=radians(self.skew),
+ crown=self.crown)
+
+ # Actually create the mesh object from this geometry data.
+ base = create_mesh_object(context, verts, [], faces, "Worm Gear")
+ obj = base.object
+
+ # Create vertex groups from stored vertices.
+ tipGroup = obj.vertex_groups.new('Tips')
+ tipGroup.add(verts_tip, 1.0, 'ADD')
+
+ valleyGroup = obj.vertex_groups.new('Valleys')
+ valleyGroup.add(verts_valley, 1.0, 'ADD')
+
+ return {'FINISHED'}
+
diff --git a/add_mesh_extra_objects/add_mesh_gemstones.py b/add_mesh_extra_objects/add_mesh_gemstones.py
new file mode 100644
index 00000000..d4158b68
--- /dev/null
+++ b/add_mesh_extra_objects/add_mesh_gemstones.py
@@ -0,0 +1,333 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+"""
+bl_info = {
+ "name": "Gemstones",
+ "author": "Pontiac, Fourmadmen, Dreampainter",
+ "version": (0, 4),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh > Gemstones",
+ "description": "Adds various gemstone (Diamond & Gem) meshes.",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_Gemstones",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21432",
+ "category": "Add Mesh"}
+"""
+import bpy
+from mathutils import *
+from math import *
+from bpy.props import *
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+ scene = context.scene
+ obj_act = scene.objects.active
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+# @todo Clean up vertex&face creation process a bit.
+def add_gem(r1, r2, seg, h1, h2):
+ """
+ r1 = pavilion radius
+ r2 = crown radius
+ seg = number of segments
+ h1 = pavilion height
+ h2 = crown height
+ Generates the vertices and faces of the gem
+ """
+
+ verts = []
+
+ a = 2.0 * pi / seg # Angle between segments
+ offset = a / 2.0 # Middle between segments
+
+ r3 = ((r1 + r2) / 2.0) / cos(offset) # Middle of crown
+ r4 = (r1 / 2.0) / cos(offset) # Middle of pavilion
+ h3 = h2 / 2.0 # Middle of crown height
+ h4 = -h1 / 2.0 # Middle of pavilion height
+
+ # Tip
+ vert_tip = len(verts)
+ verts.append(Vector((0.0, 0.0, -h1)))
+
+ # Middle vertex of the flat side (crown)
+ vert_flat = len(verts)
+ verts.append(Vector((0.0, 0.0, h2)))
+
+ edgeloop_flat = []
+ for i in range(seg):
+ s1 = sin(i * a)
+ s2 = sin(offset + i * a)
+ c1 = cos(i * a)
+ c2 = cos(offset + i * a)
+
+ verts.append((r4 * s1, r4 * c1, h4)) # Middle of pavilion
+ verts.append((r1 * s2, r1 * c2, 0.0)) # Pavilion
+ verts.append((r3 * s1, r3 * c1, h3)) # Middle crown
+ edgeloop_flat.append(len(verts))
+ verts.append((r2 * s2, r2 * c2, h2)) # Crown
+
+ faces = []
+
+ for index in range(seg):
+ i = index * 4
+ j = ((index + 1) % seg) * 4
+
+ faces.append([j + 2, vert_tip, i + 2, i + 3]) # Tip -> Middle of pav
+ faces.append([j + 2, i + 3, j + 3]) # Middle of pav -> pav
+ faces.append([j + 3, i + 3, j + 4]) # Pav -> Middle crown
+ faces.append([j + 4, i + 3, i + 4, i + 5]) # Crown quads
+ faces.append([j + 4, i + 5, j + 5]) # Middle crown -> crown
+
+ faces_flat = createFaces([vert_flat], edgeloop_flat, closed=True)
+ faces.extend(faces_flat)
+
+ return verts, faces
+
+
+def add_diamond(segments, girdle_radius, table_radius,
+ crown_height, pavilion_height):
+
+ PI_2 = pi * 2.0
+ z_axis = (0.0, 0.0, -1.0)
+
+ verts = []
+ faces = []
+
+ height_flat = crown_height
+ height_middle = 0.0
+ height_tip = -pavilion_height
+
+ # Middle vertex of the flat side (crown)
+ vert_flat = len(verts)
+ verts.append(Vector((0.0, 0.0, height_flat)))
+
+ # Tip
+ vert_tip = len(verts)
+ verts.append(Vector((0.0, 0.0, height_tip)))
+
+ verts_flat = []
+ verts_girdle = []
+
+ for index in range(segments):
+ quat = Quaternion(z_axis, (index / segments) * PI_2)
+
+ angle = PI_2 * index / segments
+
+ # Row for flat side
+ verts_flat.append(len(verts))
+ vec = Vector((table_radius, 0.0, height_flat)) * quat
+ verts.append(vec)
+
+ # Row for the middle/girdle
+ verts_girdle.append(len(verts))
+ vec = Vector((girdle_radius, 0.0, height_middle)) * quat
+ verts.append(vec)
+
+ # Flat face
+ faces_flat = createFaces([vert_flat], verts_flat, closed=True,
+ flipped=True)
+ # Side face
+ faces_side = createFaces(verts_girdle, verts_flat, closed=True)
+ # Tip faces
+ faces_tip = createFaces([vert_tip], verts_girdle, closed=True)
+
+ faces.extend(faces_tip)
+ faces.extend(faces_side)
+ faces.extend(faces_flat)
+
+ return verts, faces
+
+
+class AddDiamond(bpy.types.Operator):
+ '''Add a diamond mesh.'''
+ bl_idname = "mesh.primitive_diamond_add"
+ bl_label = "Add Diamond"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ segments = IntProperty(name="Segments",
+ description="Number of segments for the diamond",
+ min=3,
+ max=256,
+ default=32)
+ girdle_radius = FloatProperty(name="Girdle Radius",
+ description="Girdle radius of the diamond",
+ min=0.01,
+ max=9999.0,
+ default=1.0)
+ table_radius = FloatProperty(name="Table Radius",
+ description="Girdle radius of the diamond",
+ min=0.01,
+ max=9999.0,
+ default=0.6)
+ crown_height = FloatProperty(name="Crown Height",
+ description="Crown height of the diamond",
+ min=0.01,
+ max=9999.0,
+ default=0.35)
+ pavilion_height = FloatProperty(name="Pavilion Height",
+ description="Pavilion height of the diamond",
+ min=0.01,
+ max=9999.0,
+ default=0.8)
+
+ def execute(self, context):
+ verts, faces = add_diamond(self.segments,
+ self.girdle_radius,
+ self.table_radius,
+ self.crown_height,
+ self.pavilion_height)
+
+ obj = create_mesh_object(context, verts, [], faces, "Diamond")
+
+ return {'FINISHED'}
+
+
+class AddGem(bpy.types.Operator):
+ """Add a diamond gem"""
+ bl_idname = "mesh.primitive_gem_add"
+ bl_label = "Add Gem"
+ bl_description = "Create an offset faceted gem."
+ bl_options = {'REGISTER', 'UNDO'}
+
+ segments = IntProperty(name="Segments",
+ description="Longitudial segmentation",
+ min=3,
+ max=265,
+ default=8,)
+ pavilion_radius = FloatProperty(name="Radius",
+ description="Radius of the gem",
+ min=0.01,
+ max=9999.0,
+ default=1.0)
+ crown_radius = FloatProperty(name="Table Radius",
+ description="Radius of the table(top).",
+ min=0.01,
+ max=9999.0,
+ default=0.6)
+ crown_height = FloatProperty(name="Table height",
+ description="Height of the top half.",
+ min=0.01,
+ max=9999.0,
+ default=0.35)
+ pavilion_height = FloatProperty(name="Pavilion height",
+ description="Height of bottom half.",
+ min=0.01,
+ max=9999.0,
+ default=0.8)
+
+ def execute(self, context):
+
+ # create mesh
+ verts, faces = add_gem(
+ self.pavilion_radius,
+ self.crown_radius,
+ self.segments,
+ self.pavilion_height,
+ self.crown_height)
+
+ obj = create_mesh_object(context, verts, [], faces, "Gem")
+
+ return {'FINISHED'}
+
diff --git a/add_mesh_extra_objects/add_mesh_twisted_torus.py b/add_mesh_extra_objects/add_mesh_twisted_torus.py
new file mode 100644
index 00000000..6eb8cc24
--- /dev/null
+++ b/add_mesh_extra_objects/add_mesh_twisted_torus.py
@@ -0,0 +1,253 @@
+# add_mesh_twisted_torus.py Copyright (C) 2009-2010, Paulo Gomes
+# tuga3d {at} gmail {dot} com
+# add twisted torus to the blender 2.50 add->mesh menu
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+"""
+bl_info = {
+ "name": "Twisted Torus",
+ "author": "Paulo_Gomes",
+ "version": (0, 11, 1),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh ",
+ "description": "Adds a mesh Twisted Torus to the Add Mesh menu",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_Twisted_Torus",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21622",
+ "category": "Add Mesh"}
+
+Usage:
+
+* Launch from Add Mesh menu
+
+* Modify parameters as desired or keep defaults
+"""
+
+
+import bpy
+from bpy.props import *
+
+import mathutils
+from mathutils import *
+from math import cos, sin, pi
+
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+ scene = context.scene
+ obj_act = scene.objects.active
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+def add_twisted_torus(major_rad, minor_rad, major_seg, minor_seg, twists):
+ PI_2 = pi * 2.0
+ z_axis = (0.0, 0.0, 1.0)
+
+ verts = []
+ faces = []
+
+ edgeloop_prev = []
+ for major_index in range(major_seg):
+ quat = Quaternion(z_axis, (major_index / major_seg) * PI_2)
+ rot_twists = PI_2 * major_index / major_seg * twists
+
+ edgeloop = []
+
+ # Create section ring
+ for minor_index in range(minor_seg):
+ angle = (PI_2 * minor_index / minor_seg) + rot_twists
+
+ vec = Vector((
+ major_rad + (cos(angle) * minor_rad),
+ 0.0,
+ sin(angle) * minor_rad))
+ vec = vec * quat
+
+ edgeloop.append(len(verts))
+ verts.append(vec)
+
+ # Remember very first edgeloop.
+ if major_index == 0:
+ edgeloop_first = edgeloop
+
+ # Bridge last with current ring
+ if edgeloop_prev:
+ f = createFaces(edgeloop_prev, edgeloop, closed=True)
+ faces.extend(f)
+
+ edgeloop_prev = edgeloop
+
+ # Bridge first and last ring
+ f = createFaces(edgeloop_prev, edgeloop_first, closed=True)
+ faces.extend(f)
+
+ return verts, faces
+
+
+class AddTwistedTorus(bpy.types.Operator):
+ '''Add a torus mesh'''
+ bl_idname = "mesh.primitive_twisted_torus_add"
+ bl_label = "Add Torus"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ major_radius = FloatProperty(name="Major Radius",
+ description="Radius from the origin to the" \
+ " center of the cross section",
+ min=0.01,
+ max=100.0,
+ default=1.0)
+ minor_radius = FloatProperty(name="Minor Radius",
+ description="Radius of the torus' cross section",
+ min=0.01,
+ max=100.0,
+ default=0.25)
+ major_segments = IntProperty(name="Major Segments",
+ description="Number of segments for the main ring of the torus",
+ min=3,
+ max=256,
+ default=48)
+ minor_segments = IntProperty(name="Minor Segments",
+ description="Number of segments for the minor ring of the torus",
+ min=3,
+ max=256,
+ default=12)
+ twists = IntProperty(name="Twists",
+ description="Number of twists of the torus",
+ min=0,
+ max=256,
+ default=1)
+
+ use_abso = BoolProperty(name="Use Int+Ext Controls",
+ description="Use the Int / Ext controls for torus dimensions",
+ default=False)
+ abso_major_rad = FloatProperty(name="Exterior Radius",
+ description="Total Exterior Radius of the torus",
+ min=0.01,
+ max=100.0,
+ default=1.0)
+ abso_minor_rad = FloatProperty(name="Inside Radius",
+ description="Total Interior Radius of the torus",
+ min=0.01,
+ max=100.0,
+ default=0.5)
+
+ def execute(self, context):
+
+ if self.use_abso == True:
+ extra_helper = (self.abso_major_rad - self.abso_minor_rad) * 0.5
+ self.major_radius = self.abso_minor_rad + extra_helper
+ self.minor_radius = extra_helper
+
+ verts, faces = add_twisted_torus(
+ self.major_radius,
+ self.minor_radius,
+ self.major_segments,
+ self.minor_segments,
+ self.twists)
+
+ # Actually create the mesh object from this geometry data.
+ obj = create_mesh_object(context, verts, [], faces, "TwistedTorus")
+
+ return {'FINISHED'}
+
diff --git a/add_mesh_pipe_joint.py b/add_mesh_pipe_joint.py
new file mode 100644
index 00000000..69b547ed
--- /dev/null
+++ b/add_mesh_pipe_joint.py
@@ -0,0 +1,1150 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Pipe Joints",
+ "author": "Buerbaum Martin (Pontiac)",
+ "version": (0, 10, 7),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Add > Mesh > Pipe Joints",
+ "description": "Adds 5 pipe Joint types to the Add Mesh menu",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_Pipe_Joints",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21443",
+ "category": "Add Mesh"}
+
+"""
+Pipe Joints
+This script lets the user create various types of pipe joints.
+
+Usage:
+You have to activated the script in the "Add-Ons" tab (user preferences).
+The functionality can then be accessed via the
+"Add Mesh" -> "Pipe Joints" menu.
+Note: Currently only the "Elbow" type supports odd number of vertices.
+
+Version history:
+v0.10.6 - Removed "recall properties" from all functions.
+ Updated various code for new API.
+ API: mathutils.RotationMatrix -> mathutils.Matrix.Rotation
+ API: xxx.selected -> xxx.select
+ API: "invoke" function for each operator.
+ Updated for new bl_info structure.
+ New code for the "align_matrix".
+ made script PEP8 compatible.
+v0.10.5 - createFaces can now create fan/star like faces.
+v0.10.4 - Updated the function "createFaces" a bit. No functional changes.
+v0.10.3 - Updated store_recall_properties, apply_object_align
+ and create_mesh_object.
+ Changed how recall data is stored.
+ Added 'description'.
+v0.10.2 - API change Mathutils -> mathutils (r557)
+ Fixed wiki url.
+v0.10.1 - Use hidden "edit" property for "recall" operator.
+v0.10 - Store "recall" properties in the created objects.
+ Align the geometry to the view if the user preference says so.
+v0.9.10 - Use bl_info for Add-On information.
+v0.9.9 - Changed the script so it can be managed from the "Add-Ons" tab in
+ the user preferences.
+ Added dummy "PLUGIN" icon.
+v0.9.8 - Fixed some new API stuff.
+ Mainly we now have the register/unregister functions.
+ Also the new() function for objects now accepts a mesh object.
+ Corrected FSF address.
+ Clean up of tooltips.
+v0.9.7 - Use "unit" settings for angles as well.
+ This also lets me use radiant for all internal values..
+v0.9.6 - Use "unit" settings (i.e. none/metric/imperial).
+v0.9.5 - Use mesh.from_pydata() for geometry creation.
+ So we can remove unpack_list and unpack_face_list again.
+v0.9.4 - Creating of the pipe now works in mesh edit mode too.
+ Thanks to ideasman42 (Campbell Barton) for his nice work
+ on the torus script code :-).
+v0.9.3 - Changed to a saner vertex/polygon creation process (previously
+ my usage of mesh.faces.add could only do quads)
+ For this I've copied the functions unpack_list and unpack_face_list
+ from import_scene_obj.py.
+ Elbow joint actually supports 3 vertices per circle.
+ Various comments.
+ Script _should_ now be PEP8 compatible.
+v0.9.2 - Converted from tabs to spaces (4 spaces per tab).
+v0.9.1 - Converted add_mesh and add_object to their new counterparts
+ "bpy.data.meshes.new() and "bpy.data.objects.new()"
+v0.9 - Converted to 2.5. Made mostly pep8 compatible (exept for tabs and
+ stuff the check-script didn't catch).
+v0.8.5 - Fixed bug in Elbow joint. Same problem as in 0.8.1
+v0.8.4 - Fixed bug in Y joint. Same problem as in 0.8.1
+v0.8.3 - Fixed bug in N joint. Same problem as in 0.8.1
+v0.8.2 - Fixed bug in X (cross) joint. Same problem as in 0.8.1
+v0.8.1 - Fixed bug in T joint. Angles greater than 90 deg combined with a
+ radius != 1 resulted in bad geometry (the radius was not taken into
+ account when calculating the joint vertices).
+v0.8 - Added N-Joint.
+ Removed all uses of baseJointLocZ. It just clutters the code.
+v0.7 - Added cross joint
+v0.6 - No visible changes. Lots of internal ones though
+ (complete redesign of face creation process).
+ As a bonus the code is a bit easier to read now.
+ Added a nice&simple little "bridge" function
+ (createFaces) for these changes.
+v0.5.1 - Made it possible to create asymmetric Y joints.
+ Renamed the 2 Wye Joints to something more fitting and unique.
+ One is now the Tee joint, the second one remains the Wye joint.
+v0.5 - Added real Y joint.
+v0.4.3 - Added check for odd vertex numbers. They are not (yet) supported.
+v0.4.2 - Added pipe length to the GUI.
+v0.4.1 - Removed the unfinished menu entries for now.
+v0.4 - Tried to clean up the face creation in addTeeJoint
+v0.3 - Code for wye (Y) shape (straight pipe with "branch" for now)
+v0.2 - Restructured to allow different types of pipe (joints).
+v0.1 - Initial revision.
+
+More links:
+http://gitorious.org/blender-scripts/blender-pipe-joint-script
+http://blenderartists.org/forum/showthread.php?t=154394
+
+TODO:
+
+Use a rotation matrix for rotating the circle vertices:
+rotation_matrix = mathutils.Matrix.Rotation(-math.pi/2, 4, 'x')
+mesh.transform(rotation_matrix)
+"""
+
+import bpy
+import mathutils
+from math import *
+from bpy.props import *
+
+
+# Create a new mesh (object) from verts/edges/faces.
+# verts/edges/faces ... List of vertices/edges/faces for the
+# new mesh (as used in from_pydata).
+# name ... Name of the new mesh (& object).
+def create_mesh_object(context, verts, edges, faces, name):
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, edges, faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ from bpy_extras import object_utils
+ return object_utils.object_data_add(context, mesh, operator=None)
+
+# A very simple "bridge" tool.
+# Connects two equally long vertex rows with faces.
+# Returns a list of the new faces (list of lists)
+#
+# vertIdx1 ... First vertex list (list of vertex indices).
+# vertIdx2 ... Second vertex list (list of vertex indices).
+# closed ... Creates a loop (first & last are closed).
+# flipped ... Invert the normal of the face(s).
+#
+# Note: You can set vertIdx1 to a single vertex index to create
+# a fan/star of faces.
+# Note: If both vertex idx list are the same length they have
+# to have at least 2 vertices.
+def createFaces(vertIdx1, vertIdx2, closed=False, flipped=False):
+ faces = []
+
+ if not vertIdx1 or not vertIdx2:
+ return None
+
+ if len(vertIdx1) < 2 and len(vertIdx2) < 2:
+ return None
+
+ fan = False
+ if (len(vertIdx1) != len(vertIdx2)):
+ if (len(vertIdx1) == 1 and len(vertIdx2) > 1):
+ fan = True
+ else:
+ return None
+
+ total = len(vertIdx2)
+
+ if closed:
+ # Bridge the start with the end.
+ if flipped:
+ face = [
+ vertIdx1[0],
+ vertIdx2[0],
+ vertIdx2[total - 1]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ faces.append(face)
+
+ else:
+ face = [vertIdx2[0], vertIdx1[0]]
+ if not fan:
+ face.append(vertIdx1[total - 1])
+ face.append(vertIdx2[total - 1])
+ faces.append(face)
+
+ # Bridge the rest of the faces.
+ for num in range(total - 1):
+ if flipped:
+ if fan:
+ face = [vertIdx2[num], vertIdx1[0], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx2[num], vertIdx1[num],
+ vertIdx1[num + 1], vertIdx2[num + 1]]
+ faces.append(face)
+ else:
+ if fan:
+ face = [vertIdx1[0], vertIdx2[num], vertIdx2[num + 1]]
+ else:
+ face = [vertIdx1[num], vertIdx2[num],
+ vertIdx2[num + 1], vertIdx1[num + 1]]
+ faces.append(face)
+
+ return faces
+
+
+class AddElbowJoint(bpy.types.Operator):
+ # Create the vertices and polygons for a simple elbow (bent pipe).
+ '''Add an Elbow pipe mesh'''
+ bl_idname = "mesh.primitive_elbow_joint_add"
+ bl_label = "Add Pipe Elbow"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ radius = FloatProperty(name="Radius",
+ description="The radius of the pipe.",
+ default=1.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ div = IntProperty(name="Divisions",
+ description="Number of vertices (divisions).",
+ default=32, min=3, max=256)
+
+ angle = FloatProperty(name="Angle",
+ description="The angle of the branching pipe (i.e. the 'arm')." \
+ " Measured from the center line of the main pipe.",
+ default=radians(45.0),
+ min=radians(-179.9),
+ max=radians(179.9),
+ unit="ROTATION")
+
+ startLength = FloatProperty(name="Length Start",
+ description="Length of the beginning of the pipe.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ endLength = FloatProperty(name="End Length",
+ description="Length of the end of the pipe.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+
+ def execute(self, context):
+
+ radius = self.radius
+ div = self.div
+
+ angle = self.angle
+
+ startLength = self.startLength
+ endLength = self.endLength
+
+ verts = []
+ faces = []
+
+ loop1 = [] # The starting circle
+ loop2 = [] # The elbow circle
+ loop3 = [] # The end circle
+
+ # Create start circle
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+ locZ = -startLength
+ loop1.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ])
+
+ # Create deformed joint circle
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+ locZ = locX * tan(angle / 2.0)
+ loop2.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ * radius])
+
+ # Create end circle
+ baseEndLocX = -endLength * sin(angle)
+ baseEndLocZ = endLength * cos(angle)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 - angle)
+ locX = locX * sin(pi / 2.0 - angle)
+
+ loop3.append(len(verts))
+ # Translate and add circle vertices to the list.
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create faces
+ faces.extend(createFaces(loop1, loop2, closed=True))
+ faces.extend(createFaces(loop2, loop3, closed=True))
+
+ base = create_mesh_object(context, verts, [], faces, "Elbow Joint")
+
+ return {'FINISHED'}
+
+
+class AddTeeJoint(bpy.types.Operator):
+ # Create the vertices and polygons for a simple tee (T) joint.
+ # The base arm of the T can be positioned in an angle if needed though.
+ '''Add a Tee-Joint mesh'''
+ bl_idname = "mesh.primitive_tee_joint_add"
+ bl_label = "Add Pipe Tee-Joint"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ radius = FloatProperty(name="Radius",
+ description="The radius of the pipe.",
+ default=1.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ div = IntProperty(name="Divisions",
+ description="Number of vertices (divisions).",
+ default=32,
+ min=4,
+ max=256)
+
+ angle = FloatProperty(name="Angle",
+ description="The angle of the branching pipe (i.e. the 'arm')." \
+ " Measured from the center line of the main pipe.",
+ default=radians(90.0),
+ min=radians(0.1),
+ max=radians(179.9),
+ unit="ROTATION")
+
+ startLength = FloatProperty(name="Length Start",
+ description="Length of the beginning of the" \
+ " main pipe (the straight one).",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ endLength = FloatProperty(name="End Length",
+ description="Length of the end of the" \
+ " main pipe (the straight one).",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ branchLength = FloatProperty(name="Arm Length",
+ description="Length of the arm pipe (the bent one).",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+
+ def execute(self, context):
+
+ radius = self.radius
+ div = self.div
+
+ angle = self.angle
+
+ startLength = self.startLength
+ endLength = self.endLength
+ branchLength = self.branchLength
+
+ if (div % 2):
+ # Odd vertice number not supported (yet).
+ return {'CANCELLED'}
+
+ verts = []
+ faces = []
+
+ # List of vert indices of each cross section
+ loopMainStart = [] # Vert indices for the
+ # beginning of the main pipe.
+ loopJoint1 = [] # Vert indices for joint that is used
+ # to connect the joint & loopMainStart.
+ loopJoint2 = [] # Vert indices for joint that is used
+ # to connect the joint & loopArm.
+ loopJoint3 = [] # Vert index for joint that is used
+ # to connect the joint & loopMainEnd.
+ loopArm = [] # Vert indices for the end of the arm.
+ loopMainEnd = [] # Vert indices for the
+ # end of the main pipe.
+
+ # Create start circle (main pipe)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+ locZ = -startLength
+ loopMainStart.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ])
+
+ # Create deformed joint circle
+ vertTemp1 = None
+ vertTemp2 = None
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+
+ if vertIdx == 0:
+ vertTemp1 = len(verts)
+ if vertIdx == div / 2:
+ # @todo: This will possibly break if we
+ # ever support odd divisions.
+ vertTemp2 = len(verts)
+
+ loopJoint1.append(len(verts))
+ if (vertIdx < div / 2):
+ # Straight side of main pipe.
+ locZ = 0
+ loopJoint3.append(len(verts))
+ else:
+ # Branching side
+ locZ = locX * tan(angle / 2.0)
+ loopJoint2.append(len(verts))
+
+ verts.append([locX * radius, locY * radius, locZ * radius])
+
+ # Create 2. deformed joint (half-)circle
+ loopTemp = []
+ for vertIdx in range(div):
+ if (vertIdx > div / 2):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = -cos(curVertAngle)
+ locZ = -(radius * locX * tan((pi - angle) / 2.0))
+ loopTemp.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ])
+
+ loopTemp2 = loopTemp[:]
+
+ # Finalise 2. loop
+ loopTemp.reverse()
+ loopTemp.append(vertTemp1)
+ loopJoint2.reverse()
+ loopJoint2.extend(loopTemp)
+ loopJoint2.reverse()
+
+ # Finalise 3. loop
+ loopTemp2.append(vertTemp2)
+ loopTemp2.reverse()
+ loopJoint3.extend(loopTemp2)
+
+ # Create end circle (branching pipe)
+ baseEndLocX = -branchLength * sin(angle)
+ baseEndLocZ = branchLength * cos(angle)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 - angle)
+ locX = locX * sin(pi / 2.0 - angle)
+
+ loopArm.append(len(verts))
+
+ # Add translated circle.
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create end circle (main pipe)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+ locZ = endLength
+ loopMainEnd.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ])
+
+ # Create faces
+ faces.extend(createFaces(loopMainStart, loopJoint1, closed=True))
+ faces.extend(createFaces(loopJoint2, loopArm, closed=True))
+ faces.extend(createFaces(loopJoint3, loopMainEnd, closed=True))
+
+ base = create_mesh_object(context, verts, [], faces, "Tee Joint")
+
+ return {'FINISHED'}
+
+
+class AddWyeJoint(bpy.types.Operator):
+ '''Add a Wye-Joint mesh'''
+ bl_idname = "mesh.primitive_wye_joint_add"
+ bl_label = "Add Pipe Wye-Joint"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ radius = FloatProperty(name="Radius",
+ description="The radius of the pipe.",
+ default=1.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ div = IntProperty(name="Divisions",
+ description="Number of vertices (divisions).",
+ default=32,
+ min=4,
+ max=256)
+
+ angle1 = FloatProperty(name="Angle 1",
+ description="The angle of the 1. branching pipe." \
+ " Measured from the center line of the main pipe.",
+ default=radians(45.0),
+ min=radians(-179.9),
+ max=radians(179.9),
+ unit="ROTATION")
+ angle2 = FloatProperty(name="Angle 2",
+ description="The angle of the 2. branching pipe." \
+ " Measured from the center line of the main pipe.",
+ default=radians(45.0),
+ min=radians(-179.9),
+ max=radians(179.9),
+ unit="ROTATION")
+
+ startLength = FloatProperty(name="Length Start",
+ description="Length of the beginning of the" \
+ " main pipe (the straight one).",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ branch1Length = FloatProperty(name="Length Arm 1",
+ description="Length of the 1. arm.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ branch2Length = FloatProperty(name="Length Arm 2",
+ description="Length of the 2. arm.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+
+ def execute(self, context):
+
+ radius = self.radius
+ div = self.div
+
+ angle1 = self.angle1
+ angle2 = self.angle2
+
+ startLength = self.startLength
+ branch1Length = self.branch1Length
+ branch2Length = self.branch2Length
+
+ if (div % 2):
+ # Odd vertice number not supported (yet).
+ return {'CANCELLED'}
+
+ verts = []
+ faces = []
+
+ # List of vert indices of each cross section
+ loopMainStart = [] # Vert indices for
+ # the beginning of the main pipe.
+ loopJoint1 = [] # Vert index for joint that is used
+ # to connect the joint & loopMainStart.
+ loopJoint2 = [] # Vert index for joint that
+ # is used to connect the joint & loopArm1.
+ loopJoint3 = [] # Vert index for joint that is
+ # used to connect the joint & loopArm2.
+ loopArm1 = [] # Vert idxs for end of the 1. arm.
+ loopArm2 = [] # Vert idxs for end of the 2. arm.
+
+ # Create start circle
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+ locZ = -startLength
+ loopMainStart.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ])
+
+ # Create deformed joint circle
+ vertTemp1 = None
+ vertTemp2 = None
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+
+ if vertIdx == 0:
+ vertTemp2 = len(verts)
+ if vertIdx == div / 2:
+ # @todo: This will possibly break if we
+ # ever support odd divisions.
+ vertTemp1 = len(verts)
+
+ loopJoint1.append(len(verts))
+ if (vertIdx > div / 2):
+ locZ = locX * tan(angle1 / 2.0)
+ loopJoint2.append(len(verts))
+ else:
+ locZ = locX * tan(-angle2 / 2.0)
+ loopJoint3.append(len(verts))
+
+ verts.append([locX * radius, locY * radius, locZ * radius])
+
+ # Create 2. deformed joint (half-)circle
+ loopTemp = []
+ angleJoint = (angle2 - angle1) / 2.0
+ for vertIdx in range(div):
+ if (vertIdx > div / 2):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+
+ locX = (-sin(curVertAngle) * sin(angleJoint)
+ / sin(angle2 - angleJoint))
+ locY = -cos(curVertAngle)
+ locZ = (-(sin(curVertAngle) * cos(angleJoint)
+ / sin(angle2 - angleJoint)))
+
+ loopTemp.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ * radius])
+
+ loopTemp2 = loopTemp[:]
+
+ # Finalise 2. loop
+ loopTemp.append(vertTemp1)
+ loopTemp.reverse()
+ loopTemp.append(vertTemp2)
+ loopJoint2.reverse()
+ loopJoint2.extend(loopTemp)
+ loopJoint2.reverse()
+
+ # Finalise 3. loop
+ loopTemp2.reverse()
+ loopJoint3.extend(loopTemp2)
+
+ # Create end circle (1. branching pipe)
+ baseEndLocX = -branch1Length * sin(angle1)
+ baseEndLocZ = branch1Length * cos(angle1)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 - angle1)
+ locX = locX * sin(pi / 2.0 - angle1)
+
+ loopArm1.append(len(verts))
+ # Add translated circle.
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create end circle (2. branching pipe)
+ baseEndLocX = branch2Length * sin(angle2)
+ baseEndLocZ = branch2Length * cos(angle2)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 + angle2)
+ locX = locX * sin(pi / 2.0 + angle2)
+
+ loopArm2.append(len(verts))
+ # Add translated circle
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create faces
+ faces.extend(createFaces(loopMainStart, loopJoint1, closed=True))
+ faces.extend(createFaces(loopJoint2, loopArm1, closed=True))
+ faces.extend(createFaces(loopJoint3, loopArm2, closed=True))
+
+ base = create_mesh_object(context, verts, [], faces, "Wye Joint")
+
+ return {'FINISHED'}
+
+
+class AddCrossJoint(bpy.types.Operator):
+ '''Add a Cross-Joint mesh'''
+ # Create the vertices and polygons for a coss (+ or X) pipe joint.
+ bl_idname = "mesh.primitive_cross_joint_add"
+ bl_label = "Add Pipe Cross-Joint"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ radius = FloatProperty(name="Radius",
+ description="The radius of the pipe.",
+ default=1.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ div = IntProperty(name="Divisions",
+ description="Number of vertices (divisions).",
+ default=32,
+ min=4,
+ max=256)
+
+ angle1 = FloatProperty(name="Angle 1",
+ description="The angle of the 1. arm (from the main axis).",
+ default=radians(90.0),
+ min=radians(-179.9),
+ max=radians(179.9),
+ unit="ROTATION")
+ angle2 = FloatProperty(name="Angle 2",
+ description="The angle of the 2. arm (from the main axis).",
+ default=radians(90.0),
+ min=radians(-179.9),
+ max=radians(179.9),
+ unit="ROTATION")
+ angle3 = FloatProperty(name="Angle 3 (center)",
+ description="The angle of the center arm (from the main axis).",
+ default=radians(0.0),
+ min=radians(-179.9),
+ max=radians(179.9),
+ unit="ROTATION")
+
+ startLength = FloatProperty(name="Length Start",
+ description="Length of the beginning of the " \
+ "main pipe (the straight one).",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ branch1Length = FloatProperty(name="Length Arm 1",
+ description="Length of the 1. arm.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ branch2Length = FloatProperty(name="Length Arm 2",
+ description="Length of the 2. arm.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ branch3Length = FloatProperty(name="Length Arm 3 (center)",
+ description="Length of the center arm.",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+
+ def execute(self, context):
+
+ radius = self.radius
+ div = self.div
+
+ angle1 = self.angle1
+ angle2 = self.angle2
+ angle3 = self.angle3
+
+ startLength = self.startLength
+ branch1Length = self.branch1Length
+ branch2Length = self.branch2Length
+ branch3Length = self.branch3Length
+ if (div % 2):
+ # Odd vertice number not supported (yet).
+ return {'CANCELLED'}
+
+ verts = []
+ faces = []
+
+ # List of vert indices of each cross section
+ loopMainStart = [] # Vert indices for the
+ # beginning of the main pipe.
+ loopJoint1 = [] # Vert index for joint that is used
+ # to connect the joint & loopMainStart.
+ loopJoint2 = [] # Vert index for joint that is used
+ # to connect the joint & loopArm1.
+ loopJoint3 = [] # Vert index for joint that is used
+ # to connect the joint & loopArm2.
+ loopJoint4 = [] # Vert index for joint that is used
+ # to connect the joint & loopArm3.
+ loopArm1 = [] # Vert idxs for the end of the 1. arm.
+ loopArm2 = [] # Vert idxs for the end of the 2. arm.
+ loopArm3 = [] # Vert idxs for the center arm end.
+
+ # Create start circle
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+ locZ = -startLength
+ loopMainStart.append(len(verts))
+ verts.append([locX * radius, locY * radius, locZ])
+
+ # Create 1. deformed joint circle
+ vertTemp1 = None
+ vertTemp2 = None
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+
+ if vertIdx == 0:
+ vertTemp2 = len(verts)
+ if vertIdx == div / 2:
+ # @todo: This will possibly break if we
+ # ever support odd divisions.
+ vertTemp1 = len(verts)
+
+ loopJoint1.append(len(verts))
+ if (vertIdx > div / 2):
+ locZ = locX * tan(angle1 / 2.0)
+ loopJoint2.append(len(verts))
+ else:
+ locZ = locX * tan(-angle2 / 2.0)
+ loopJoint3.append(len(verts))
+
+ verts.append([locX * radius, locY * radius, locZ * radius])
+
+ loopTemp2 = loopJoint2[:]
+
+ # Create 2. deformed joint circle
+ loopTempA = []
+ loopTempB = []
+ angleJoint1 = (angle1 - angle3) / 2.0
+ angleJoint2 = (angle2 + angle3) / 2.0
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+
+ # Skip pole vertices
+ # @todo: This will possibly break if
+ # we ever support odd divisions.
+ if not (vertIdx == 0) and not (vertIdx == div / 2):
+
+ if (vertIdx > div / 2):
+ angleJoint = angleJoint1
+ angle = angle1
+ Z = -1.0
+ loopTempA.append(len(verts))
+
+ else:
+ angleJoint = angleJoint2
+ angle = angle2
+ Z = 1.0
+ loopTempB.append(len(verts))
+
+ locX = (sin(curVertAngle) * sin(angleJoint)
+ / sin(angle - angleJoint))
+ locY = -cos(curVertAngle)
+ locZ = (Z * (sin(curVertAngle) * cos(angleJoint)
+ / sin(angle - angleJoint)))
+
+ verts.append([locX * radius, locY * radius, locZ * radius])
+
+ loopTempA2 = loopTempA[:]
+ loopTempB2 = loopTempB[:]
+ loopTempB3 = loopTempB[:]
+
+ # Finalise 2. loop
+ loopTempA.append(vertTemp1)
+ loopTempA.reverse()
+ loopTempA.append(vertTemp2)
+ loopJoint2.reverse()
+ loopJoint2.extend(loopTempA)
+ loopJoint2.reverse()
+
+ # Finalise 3. loop
+ loopJoint3.extend(loopTempB3)
+
+ # Finalise 4. loop
+ loopTempA2.append(vertTemp1)
+ loopTempA2.reverse()
+ loopTempB2.append(vertTemp2)
+ loopJoint4.extend(reversed(loopTempB2))
+ loopJoint4.extend(loopTempA2)
+
+ # Create end circle (1. branching pipe)
+ baseEndLocX = -branch1Length * sin(angle1)
+ baseEndLocZ = branch1Length * cos(angle1)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 - angle1)
+ locX = locX * sin(pi / 2.0 - angle1)
+
+ loopArm1.append(len(verts))
+ # Add translated circle.
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create end circle (2. branching pipe)
+ baseEndLocX = branch2Length * sin(angle2)
+ baseEndLocZ = branch2Length * cos(angle2)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 + angle2)
+ locX = locX * sin(pi / 2.0 + angle2)
+
+ loopArm2.append(len(verts))
+ # Add translated circle
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create end circle (center pipe)
+ baseEndLocX = branch3Length * sin(angle3)
+ baseEndLocZ = branch3Length * cos(angle3)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 + angle3)
+ locX = locX * sin(pi / 2.0 + angle3)
+
+ loopArm3.append(len(verts))
+ # Add translated circle
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ # Create faces
+ faces.extend(createFaces(loopMainStart, loopJoint1, closed=True))
+ faces.extend(createFaces(loopJoint2, loopArm1, closed=True))
+ faces.extend(createFaces(loopJoint3, loopArm2, closed=True))
+ faces.extend(createFaces(loopJoint4, loopArm3, closed=True))
+
+ base = create_mesh_object(context, verts, [], faces, "Cross Joint")
+
+ return {'FINISHED'}
+
+
+class AddNJoint(bpy.types.Operator):
+ '''Add a N-Joint mesh'''
+ # Create the vertices and polygons for a regular n-joint.
+ bl_idname = "mesh.primitive_n_joint_add"
+ bl_label = "Add Pipe N-Joint"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ radius = FloatProperty(name="Radius",
+ description="The radius of the pipe.",
+ default=1.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+ div = IntProperty(name="Divisions",
+ description="Number of vertices (divisions).",
+ default=32,
+ min=4,
+ max=256)
+ number = IntProperty(name="Arms/Joints",
+ description="Number of joints/arms",
+ default=5,
+ min=2,
+ max=99999)
+ length = FloatProperty(name="Length",
+ description="Length of each joint/arm",
+ default=3.0,
+ min=0.01,
+ max=100.0,
+ unit="LENGTH")
+
+ def execute(self, context):
+ radius = self.radius
+ div = self.div
+ number = self.number
+ length = self.length
+
+ if (div % 2):
+ # Odd vertice number not supported (yet).
+ return {'CANCELLED'}
+
+ if (number < 2):
+ return {'CANCELLED'}
+
+ verts = []
+ faces = []
+
+ loopsEndCircles = []
+ loopsJointsTemp = []
+ loopsJoints = []
+
+ vertTemp1 = None
+ vertTemp2 = None
+
+ angleDiv = (2.0 * pi / number)
+
+ # Create vertices for the end circles.
+ for num in range(number):
+ circle = []
+ # Create start circle
+ angle = num * angleDiv
+
+ baseEndLocX = length * sin(angle)
+ baseEndLocZ = length * cos(angle)
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ # Create circle
+ locX = sin(curVertAngle) * radius
+ locY = cos(curVertAngle) * radius
+ locZ = 0.0
+
+ # Rotate circle
+ locZ = locX * cos(pi / 2.0 + angle)
+ locX = locX * sin(pi / 2.0 + angle)
+
+ circle.append(len(verts))
+ # Add translated circle
+ verts.append([baseEndLocX + locX, locY, baseEndLocZ + locZ])
+
+ loopsEndCircles.append(circle)
+
+ # Create vertices for the joint circles.
+ loopJoint = []
+ for vertIdx in range(div):
+ curVertAngle = vertIdx * (2.0 * pi / div)
+ locX = sin(curVertAngle)
+ locY = cos(curVertAngle)
+
+ skipVert = False
+ # Store pole vertices
+ if vertIdx == 0:
+ if (num == 0):
+ vertTemp2 = len(verts)
+ else:
+ skipVert = True
+ elif vertIdx == div / 2:
+ # @todo: This will possibly break if we
+ # ever support odd divisions.
+ if (num == 0):
+ vertTemp1 = len(verts)
+ else:
+ skipVert = True
+
+ if not skipVert:
+ if (vertIdx > div / 2):
+ locZ = -locX * tan((pi - angleDiv) / 2.0)
+ loopJoint.append(len(verts))
+
+ # Rotate the vert
+ cosAng = cos(-angle)
+ sinAng = sin(-angle)
+ LocXnew = locX * cosAng - locZ * sinAng
+ LocZnew = locZ * cosAng + locX * sinAng
+ locZ = LocZnew
+ locX = LocXnew
+
+ verts.append([
+ locX * radius,
+ locY * radius,
+ locZ * radius])
+ else:
+ # These two vertices will only be
+ # added the very first time.
+ if vertIdx == 0 or vertIdx == div / 2:
+ verts.append([locX * radius, locY * radius, locZ])
+
+ loopsJointsTemp.append(loopJoint)
+
+ # Create complete loops (loopsJoints) out of the
+ # double number of half loops in loopsJointsTemp.
+ for halfLoopIdx in range(len(loopsJointsTemp)):
+ if (halfLoopIdx == len(loopsJointsTemp) - 1):
+ idx1 = halfLoopIdx
+ idx2 = 0
+ else:
+ idx1 = halfLoopIdx
+ idx2 = halfLoopIdx + 1
+
+ loopJoint = []
+ loopJoint.append(vertTemp2)
+ loopJoint.extend(reversed(loopsJointsTemp[idx2]))
+ loopJoint.append(vertTemp1)
+ loopJoint.extend(loopsJointsTemp[idx1])
+
+ loopsJoints.append(loopJoint)
+
+ # Create faces from the two
+ # loop arrays (loopsJoints -> loopsEndCircles).
+ for loopIdx in range(len(loopsEndCircles)):
+ faces.extend(
+ createFaces(loopsJoints[loopIdx],
+ loopsEndCircles[loopIdx], closed=True))
+
+ base = create_mesh_object(context, verts, [], faces, "N Joint")
+
+ return {'FINISHED'}
+
+
+class INFO_MT_mesh_pipe_joints_add(bpy.types.Menu):
+ # Define the "Pipe Joints" menu
+ bl_idname = "INFO_MT_mesh_pipe_joints_add"
+ bl_label = "Pipe Joints"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("mesh.primitive_elbow_joint_add",
+ text="Pipe Elbow")
+ layout.operator("mesh.primitive_tee_joint_add",
+ text="Pipe T-Joint")
+ layout.operator("mesh.primitive_wye_joint_add",
+ text="Pipe Y-Joint")
+ layout.operator("mesh.primitive_cross_joint_add",
+ text="Pipe Cross-Joint")
+ layout.operator("mesh.primitive_n_joint_add",
+ text="Pipe N-Joint")
+
+################################
+
+
+# Define "Pipe Joints" menu
+def menu_func(self, context):
+ self.layout.menu("INFO_MT_mesh_pipe_joints_add", icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ # Add "Pipe Joints" menu to the "Add Mesh" menu
+ bpy.types.INFO_MT_mesh_add.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ # Remove "Pipe Joints" menu from the "Add Mesh" menu.
+ bpy.types.INFO_MT_mesh_add.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/add_mesh_solid.py b/add_mesh_solid.py
new file mode 100644
index 00000000..01825d29
--- /dev/null
+++ b/add_mesh_solid.py
@@ -0,0 +1,607 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+bl_info = {
+ "name": "Regular Solids",
+ "author": "DreamPainter",
+ "version": (2, 0),
+ "blender": (2, 5, 7),
+ "api": 36336,
+ "location": "View3D > Add > Mesh > Solids",
+ "description": "Add a Regular Solid mesh.",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Add_Solid",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22405",
+ "category": "Add Mesh"}
+
+import bpy
+from bpy.props import FloatProperty,EnumProperty,BoolProperty
+from math import sqrt
+from mathutils import Vector,Matrix
+from functools import reduce
+from bpy_extras.object_utils import object_data_add
+
+# this function creates a chain of quads and, when necessary, a remaining tri
+# for each polygon created in this script. be aware though, that this function
+# assumes each polygon is convex.
+# poly: list of faces, or a single face, like those
+# needed for mesh.from_pydata.
+# returns the tesselated faces.
+def createPolys(poly):
+ # check for faces
+ if len(poly) == 0:
+ return []
+ # one or more faces
+ if type(poly[0]) == type(1):
+ poly = [poly] # if only one, make it a list of one face
+ faces = []
+ for i in poly:
+ L = len(i)
+ # let all faces of 3 or 4 verts be
+ if L < 5:
+ faces.append(i)
+ # split all polygons in half and bridge the two halves
+ else:
+ f = [[i[x],i[x+1],i[L-2-x],i[L-1-x]] for x in range(L//2-1)]
+ faces.extend(f)
+ if L&1 == 1:
+ faces.append([i[L//2-1+x] for x in [0,1,2]])
+ return faces
+
+# function to make the reduce function work as a workaround to sum a list of vectors
+def vSum(list):
+ return reduce(lambda a,b: a+b, list)
+
+# creates the 5 platonic solids as a base for the rest
+# plato: should be one of {"4","6","8","12","20"}. decides what solid the
+# outcome will be.
+# returns a list of vertices and faces
+def source(plato):
+ verts = []
+ faces = []
+
+ # Tetrahedron
+ if plato == "4":
+ # Calculate the necessary constants
+ s = sqrt(2)/3.0
+ t = -1/3
+ u = sqrt(6)/3
+
+ # create the vertices and faces
+ v = [(0,0,1),(2*s,0,t),(-s,u,t),(-s,-u,t)]
+ faces = [[0,1,2],[0,2,3],[0,3,1],[1,3,2]]
+
+ # Hexahedron (cube)
+ elif plato == "6":
+ # Calculate the necessary constants
+ s = 1/sqrt(3)
+
+ # create the vertices and faces
+ v = [(-s,-s,-s),(s,-s,-s),(s,s,-s),(-s,s,-s),(-s,-s,s),(s,-s,s),(s,s,s),(-s,s,s)]
+ faces = [[0,3,2,1],[0,1,5,4],[0,4,7,3],[6,5,1,2],[6,2,3,7],[6,7,4,5]]
+
+ # Octahedron
+ elif plato == "8":
+ # create the vertices and faces
+ v = [(1,0,0),(-1,0,0),(0,1,0),(0,-1,0),(0,0,1),(0,0,-1)]
+ faces = [[4,0,2],[4,2,1],[4,1,3],[4,3,0],[5,2,0],[5,1,2],[5,3,1],[5,0,3]]
+
+ # Dodecahedron
+ elif plato == "12":
+ # Calculate the necessary constants
+ s = 1/sqrt(3)
+ t = sqrt((3-sqrt(5))/6)
+ u = sqrt((3+sqrt(5))/6)
+
+ # create the vertices and faces
+ v = [(s,s,s),(s,s,-s),(s,-s,s),(s,-s,-s),(-s,s,s),(-s,s,-s),(-s,-s,s),(-s,-s,-s),
+ (t,u,0),(-t,u,0),(t,-u,0),(-t,-u,0),(u,0,t),(u,0,-t),(-u,0,t),(-u,0,-t),(0,t,u),
+ (0,-t,u),(0,t,-u),(0,-t,-u)]
+ faces = [[0,8,9,4,16],[0,12,13,1,8],[0,16,17,2,12],[8,1,18,5,9],[12,2,10,3,13],
+ [16,4,14,6,17],[9,5,15,14,4],[6,11,10,2,17],[3,19,18,1,13],[7,15,5,18,19],
+ [7,11,6,14,15],[7,19,3,10,11]]
+
+ # Icosahedron
+ elif plato == "20":
+ # Calculate the necessary constants
+ s = (1+sqrt(5))/2
+ t = sqrt(1+s*s)
+ s = s/t
+ t = 1/t
+
+ # create the vertices and faces
+ v = [(s,t,0),(-s,t,0),(s,-t,0),(-s,-t,0),(t,0,s),(t,0,-s),(-t,0,s),(-t,0,-s),
+ (0,s,t),(0,-s,t),(0,s,-t),(0,-s,-t)]
+ faces = [[0,8,4],[0,5,10],[2,4,9],[2,11,5],[1,6,8],[1,10,7],[3,9,6],[3,7,11],
+ [0,10,8],[1,8,10],[2,9,11],[3,11,9],[4,2,0],[5,0,2],[6,1,3],[7,3,1],
+ [8,6,4],[9,4,6],[10,5,7],[11,7,5]]
+
+ # convert the tuples to Vectors
+ verts = [Vector(i) for i in v]
+
+ return verts,faces
+
+# processes the raw data from source
+def createSolid(plato,vtrunc,etrunc,dual,snub):
+ verts = []
+ faces = []
+ edges = []
+ # the duals from each platonic solid
+ dualSource = {"4":"4",
+ "6":"8",
+ "8":"6",
+ "12":"20",
+ "20":"12"}
+
+ # constants saving space and readability
+ vtrunc *= 0.5
+ etrunc *= 0.5
+ supposedSize = 0
+ noSnub = (snub == "None") or (etrunc == 0.5) or (etrunc == 0)
+ lSnub = (snub == "Left") and (0 < etrunc < 0.5)
+ rSnub = (snub == "Right") and (0 < etrunc < 0.5)
+
+ # no truncation
+ if vtrunc == 0:
+ if dual: # dual is as simple as another, but mirrored platonic solid
+ vInput, fInput = source(dualSource[plato])
+ supposedSize = vSum(vInput[i] for i in fInput[0]).length/len(fInput[0])
+ vInput = [-i*supposedSize for i in vInput] # mirror it
+ return vInput, fInput
+ return source(plato)
+ elif 0 < vtrunc <= 0.5: # simple truncation of the source
+ vInput, fInput = source(plato)
+ else:
+ # truncation is now equal to simple truncation of the dual of the source
+ vInput, fInput = source(dualSource[plato])
+ supposedSize = vSum(vInput[i] for i in fInput[0]).length / len(fInput[0])
+ vtrunc = 1-vtrunc # account for the source being a dual
+ if vtrunc == 0: # no truncation needed
+ if dual:
+ vInput, fInput = source(plato)
+ vInput = [i*supposedSize for i in vInput]
+ return vInput, fInput
+ vInput = [-i*supposedSize for i in vInput]
+ return vInput, fInput
+
+ # generate connection database
+ vDict = [{} for i in vInput]
+ # for every face, store what vertex comes after and before the current vertex
+ for x in range(len(fInput)):
+ i = fInput[x]
+ for j in range(len(i)):
+ vDict[i[j-1]][i[j]] = [i[j-2],x]
+ if len(vDict[i[j-1]]) == 1: vDict[i[j-1]][-1] = i[j]
+
+ # the actual connection database: exists out of:
+ # [vtrunc pos, etrunc pos, connected vert IDs, connected face IDs]
+ vData = [[[],[],[],[]] for i in vInput]
+ fvOutput = [] # faces created from truncated vertices
+ feOutput = [] # faces created from truncated edges
+ vOutput = [] # newly created vertices
+ for x in range(len(vInput)):
+ i = vDict[x] # lookup the current vertex
+ current = i[-1]
+ while True: # follow the chain to get a ccw order of connected verts and faces
+ vData[x][2].append(i[current][0])
+ vData[x][3].append(i[current][1])
+ # create truncated vertices
+ vData[x][0].append((1-vtrunc)*vInput[x] + vtrunc*vInput[vData[x][2][-1]])
+ current = i[current][0]
+ if current == i[-1]: break # if we're back at the first: stop the loop
+ fvOutput.append([]) # new face from truncated vert
+ fOffset = x*(len(i)-1) # where to start off counting faceVerts
+ # only create one vert where one is needed (v1 todo: done)
+ if etrunc == 0.5:
+ for j in range(len(i)-1):
+ vOutput.append((vData[x][0][j]+vData[x][0][j-1])*etrunc) # create vert
+ fvOutput[x].append(fOffset+j) # add to face
+ fvOutput[x] = fvOutput[x][1:]+[fvOutput[x][0]] # rotate face for ease later on
+ # create faces from truncated edges.
+ for j in range(len(i)-1):
+ if x > vData[x][2][j]: #only create when other vertex has been added
+ index = vData[vData[x][2][j]][2].index(x)
+ feOutput.append([fvOutput[x][j],fvOutput[x][j-1],
+ fvOutput[vData[x][2][j]][index],
+ fvOutput[vData[x][2][j]][index-1]])
+ # edge truncation between none and full
+ elif etrunc > 0:
+ for j in range(len(i)-1):
+ # create snubs from selecting verts from rectified meshes
+ if rSnub:
+ vOutput.append(etrunc*vData[x][0][j]+(1-etrunc)*vData[x][0][j-1])
+ fvOutput[x].append(fOffset+j)
+ elif lSnub:
+ vOutput.append((1-etrunc)*vData[x][0][j]+etrunc*vData[x][0][j-1])
+ fvOutput[x].append(fOffset+j)
+ else: #noSnub, select both verts from rectified mesh
+ vOutput.append(etrunc*vData[x][0][j]+(1-etrunc)*vData[x][0][j-1])
+ vOutput.append((1-etrunc)*vData[x][0][j]+etrunc*vData[x][0][j-1])
+ fvOutput[x].append(2*fOffset+2*j)
+ fvOutput[x].append(2*fOffset+2*j+1)
+ # rotate face for ease later on
+ if noSnub: fvOutput[x] = fvOutput[x][2:]+fvOutput[x][:2]
+ else: fvOutput[x] = fvOutput[x][1:]+[fvOutput[x][0]]
+ # create single face for each edge
+ if noSnub:
+ for j in range(len(i)-1):
+ if x > vData[x][2][j]:
+ index = vData[vData[x][2][j]][2].index(x)
+ feOutput.append([fvOutput[x][j*2],fvOutput[x][2*j-1],
+ fvOutput[vData[x][2][j]][2*index],
+ fvOutput[vData[x][2][j]][2*index-1]])
+ # create 2 tri's for each edge for the snubs
+ elif rSnub:
+ for j in range(len(i)-1):
+ if x > vData[x][2][j]:
+ index = vData[vData[x][2][j]][2].index(x)
+ feOutput.append([fvOutput[x][j],fvOutput[x][j-1],
+ fvOutput[vData[x][2][j]][index]])
+ feOutput.append([fvOutput[x][j],fvOutput[vData[x][2][j]][index],
+ fvOutput[vData[x][2][j]][index-1]])
+ elif lSnub:
+ for j in range(len(i)-1):
+ if x > vData[x][2][j]:
+ index = vData[vData[x][2][j]][2].index(x)
+ feOutput.append([fvOutput[x][j],fvOutput[x][j-1],
+ fvOutput[vData[x][2][j]][index-1]])
+ feOutput.append([fvOutput[x][j-1],fvOutput[vData[x][2][j]][index],
+ fvOutput[vData[x][2][j]][index-1]])
+ # special rules fro birectified mesh (v1 todo: done)
+ elif vtrunc == 0.5:
+ for j in range(len(i)-1):
+ if x < vData[x][2][j]: # use current vert, since other one has not passed yet
+ vOutput.append(vData[x][0][j])
+ fvOutput[x].append(len(vOutput)-1)
+ else:
+ # search for other edge to avoid duplicity
+ connectee = vData[x][2][j]
+ fvOutput[x].append(fvOutput[connectee][vData[connectee][2].index(x)])
+ else: # vert truncation only
+ vOutput.extend(vData[x][0]) # use generated verts from way above
+ for j in range(len(i)-1): # create face from them
+ fvOutput[x].append(fOffset+j)
+
+ # calculate supposed vertex length to ensure continuity
+ if supposedSize and not dual: # this to make the vtrunc > 1 work
+ supposedSize *= len(fvOutput[0])/vSum(vOutput[i] for i in fvOutput[0]).length
+ vOutput = [-i*supposedSize for i in vOutput]
+
+ # create new faces by replacing old vert IDs by newly generated verts
+ ffOutput = [[] for i in fInput]
+ for x in range(len(fInput)):
+ # only one generated vert per vertex, so choose accordingly
+ if etrunc == 0.5 or (etrunc == 0 and vtrunc == 0.5) or lSnub or rSnub:
+ ffOutput[x] = [fvOutput[i][vData[i][3].index(x)-1] for i in fInput[x]]
+ # two generated verts per vertex
+ elif etrunc > 0:
+ for i in fInput[x]:
+ ffOutput[x].append(fvOutput[i][2*vData[i][3].index(x)-1])
+ ffOutput[x].append(fvOutput[i][2*vData[i][3].index(x)-2])
+ else: # cutting off corners also makes 2 verts
+ for i in fInput[x]:
+ ffOutput[x].append(fvOutput[i][vData[i][3].index(x)])
+ ffOutput[x].append(fvOutput[i][vData[i][3].index(x)-1])
+
+ if not dual:
+ return vOutput,fvOutput + feOutput + ffOutput
+ else:
+ # do the same procedure as above, only now on the generated mesh
+ # generate connection database
+ vDict = [{} for i in vOutput]
+ dvOutput = [0 for i in fvOutput + feOutput + ffOutput]
+ dfOutput = []
+
+ for x in range(len(dvOutput)): # for every face
+ i = (fvOutput + feOutput + ffOutput)[x] # choose face to work with
+ # find vertex from face
+ normal = (vOutput[i[0]]-vOutput[i[1]]).cross(vOutput[i[2]]-vOutput[i[1]]).normalized()
+ dvOutput[x] = normal/(normal.dot(vOutput[i[0]]))
+ for j in range(len(i)): # create vert chain
+ vDict[i[j-1]][i[j]] = [i[j-2],x]
+ if len(vDict[i[j-1]]) == 1: vDict[i[j-1]][-1] = i[j]
+
+ # calculate supposed size for continuity
+ supposedSize = vSum([vInput[i] for i in fInput[0]]).length/len(fInput[0])
+ supposedSize /= dvOutput[-1].length
+ dvOutput = [i*supposedSize for i in dvOutput]
+
+ # use chains to create faces
+ for x in range(len(vOutput)):
+ i = vDict[x]
+ current = i[-1]
+ face = []
+ while True:
+ face.append(i[current][1])
+ current = i[current][0]
+ if current == i[-1]: break
+ dfOutput.append(face)
+
+ return dvOutput,dfOutput
+
+class Solids(bpy.types.Operator):
+ """Add one of the (regular) solids (mesh)"""
+ bl_idname = "mesh.primitive_solid_add"
+ bl_label = "(Regular) solids"
+ bl_description = "Add one of the Platonic, Archimedean or Catalan solids"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ source = EnumProperty(items = (("4","Tetrahedron",""),
+ ("6","Hexahedron",""),
+ ("8","Octahedron",""),
+ ("12","Dodecahedron",""),
+ ("20","Icosahedron","")),
+ name = "Source",
+ description = "Starting point of your solid")
+ size = FloatProperty(name = "Size",
+ description = "Radius of the sphere through the vertices",
+ min = 0.01,
+ soft_min = 0.01,
+ max = 100,
+ soft_max = 100,
+ default = 1.0)
+ vTrunc = FloatProperty(name = "Vertex Truncation",
+ description = "Ammount of vertex truncation",
+ min = 0.0,
+ soft_min = 0.0,
+ max = 2.0,
+ soft_max = 2.0,
+ default = 0.0,
+ precision = 3,
+ step = 0.5)
+ eTrunc = FloatProperty(name = "Edge Truncation",
+ description = "Ammount of edge truncation",
+ min = 0.0,
+ soft_min = 0.0,
+ max = 1.0,
+ soft_max = 1.0,
+ default = 0.0,
+ precision = 3,
+ step = 0.2)
+ snub = EnumProperty(items = (("None","No Snub",""),
+ ("Left","Left Snub",""),
+ ("Right","Right Snub","")),
+ name = "Snub",
+ description = "Create the snub version")
+ dual = BoolProperty(name="Dual",
+ description="Create the dual of the current solid",
+ default=False)
+ keepSize = BoolProperty(name="Keep Size",
+ description="Keep the whole solid at a constant size",
+ default=False)
+ preset = EnumProperty(items = (("0","Custom",""),
+ ("t4","Truncated Tetrahedron",""),
+ ("r4","Cuboctahedron",""),
+ ("t6","Truncated Cube",""),
+ ("t8","Truncated Octahedron",""),
+ ("b6","Rhombicuboctahedron",""),
+ ("c6","Truncated Cuboctahedron",""),
+ ("s6","Snub Cube",""),
+ ("r12","Icosidodecahedron",""),
+ ("t12","Truncated Dodecahedron",""),
+ ("t20","Truncated Icosahedron",""),
+ ("b12","Rhombicosidodecahedron",""),
+ ("c12","Truncated Icosidodecahedron",""),
+ ("s12","Snub Dodecahedron",""),
+ ("dt4","Triakis Tetrahedron",""),
+ ("dr4","Rhombic Dodecahedron",""),
+ ("dt6","Triakis Octahedron",""),
+ ("dt8","Tetrakis Hexahedron",""),
+ ("db6","Deltoidal Icositetrahedron",""),
+ ("dc6","Disdyakis Dodecahedron",""),
+ ("ds6","Pentagonal Icositetrahedron",""),
+ ("dr12","Rhombic Triacontahedron",""),
+ ("dt12","Triakis Icosahedron",""),
+ ("dt20","Pentakis Dodecahedron",""),
+ ("db12","Deltoidal Hexecontahedron",""),
+ ("dc12","Disdyakis Triacontahedron",""),
+ ("ds12","Pentagonal Hexecontahedron","")),
+ name = "Presets",
+ description = "Parameters for some hard names")
+
+ # actual preset values
+ p = {"t4":["4",2/3,0,0,"None"],
+ "r4":["4",1,1,0,"None"],
+ "t6":["6",2/3,0,0,"None"],
+ "t8":["8",2/3,0,0,"None"],
+ "b6":["6",1.0938,1,0,"None"],
+ "c6":["6",1.0572,0.585786,0,"None"],
+ "s6":["6",1.0875,0.704,0,"Left"],
+ "r12":["12",1,0,0,"None"],
+ "t12":["12",2/3,0,0,"None"],
+ "t20":["20",2/3,0,0,"None"],
+ "b12":["12",1.1338,1,0,"None"],
+ "c12":["20",0.921,0.553,0,"None"],
+ "s12":["12",1.1235,0.68,0,"Left"],
+ "dt4":["4",2/3,0,1,"None"],
+ "dr4":["4",1,1,1,"None"],
+ "dt6":["6",2/3,0,1,"None"],
+ "dt8":["8",2/3,0,1,"None"],
+ "db6":["6",1.0938,1,1,"None"],
+ "dc6":["6",1.0572,0.585786,1,"None"],
+ "ds6":["6",1.0875,0.704,1,"Left"],
+ "dr12":["12",1,0,1,"None"],
+ "dt12":["12",2/3,0,1,"None"],
+ "dt20":["20",2/3,0,1,"None"],
+ "db12":["12",1.1338,1,1,"None"],
+ "dc12":["20",0.921,0.553,1,"None"],
+ "ds12":["12",1.1235,0.68,1,"Left"]}
+
+ #previous preset, for User-friendly reasons
+ previousSetting = ""
+
+ def execute(self,context):
+ # turn off undo for better performance (3-5x faster), also makes sure
+ # that mesh ops are undoable and entire script acts as one operator
+ bpy.context.user_preferences.edit.use_global_undo = False
+
+ # piece of code to make presets remain until parameters are changed
+ if self.preset != "0":
+ #if preset, set preset
+ if self.previousSetting != self.preset:
+ using = self.p[self.preset]
+ self.source = using[0]
+ self.vTrunc = using[1]
+ self.eTrunc = using[2]
+ self.dual = using[3]
+ self.snub = using[4]
+ else:
+ using = self.p[self.preset]
+ result0 = self.source == using[0]
+ result1 = abs(self.vTrunc - using[1]) < 0.004
+ result2 = abs(self.eTrunc - using[2]) < 0.0015
+ result4 = using[4] == self.snub or ((using[4] == "Left") and
+ self.snub in ["Left","Right"])
+ if (result0 and result1 and result2 and result4):
+ if self.p[self.previousSetting][3] != self.dual:
+ if self.preset[0] == "d":
+ self.preset = self.preset[1:]
+ else:
+ self.preset = "d" + self.preset
+ else:
+ self.preset = "0"
+
+ self.previousSetting = self.preset
+
+ # generate mesh
+ verts,faces = createSolid(self.source,
+ self.vTrunc,
+ self.eTrunc,
+ self.dual,
+ self.snub)
+
+ # turn n-gons in quads and tri's
+ faces = createPolys(faces)
+
+ # resize to normal size, or if keepSize, make sure all verts are of length 'size'
+ if self.keepSize:
+ if dual: rad = self.size/verts[-1].length
+ else: rad = self.size/verts[0].length
+ else: rad = self.size
+ verts = [i*rad for i in verts]
+
+ # generate object
+ # Create new mesh
+ mesh = bpy.data.meshes.new("Solid")
+
+ # Make a mesh from a list of verts/edges/faces.
+ mesh.from_pydata(verts, [], faces)
+
+ # Update mesh geometry after adding stuff.
+ mesh.update()
+
+ object_data_add(context, mesh, operator=None)
+ # object generation done
+
+ # turn undo back on
+ bpy.context.user_preferences.edit.use_global_undo = True
+
+ return {'FINISHED'}
+
+class Solids_add_menu(bpy.types.Menu):
+ """Define the menu with presets"""
+ bl_idname = "Solids_add_menu"
+ bl_label = "Solids"
+
+ def draw(self,context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator(Solids.bl_idname, text = "Solid")
+ layout.menu(PlatonicMenu.bl_idname, text = "Platonic")
+ layout.menu(ArchiMenu.bl_idname, text = "Archimeadean")
+ layout.menu(CatalanMenu.bl_idname, text = "Catalan")
+
+class PlatonicMenu(bpy.types.Menu):
+ """Define Platonic menu"""
+ bl_idname = "Platonic_calls"
+ bl_label = "Platonic"
+
+ def draw(self,context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator(Solids.bl_idname, text = "Tetrahedron").source = "4"
+ layout.operator(Solids.bl_idname, text = "Hexahedron").source = "6"
+ layout.operator(Solids.bl_idname, text = "Octahedron").source = "8"
+ layout.operator(Solids.bl_idname, text = "Dodecahedron").source = "12"
+ layout.operator(Solids.bl_idname, text = "Icosahedron").source = "20"
+
+class ArchiMenu(bpy.types.Menu):
+ """Defines Achimedean preset menu"""
+ bl_idname = "Achimedean_calls"
+ bl_label = "Archimedean"
+
+ def draw(self,context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator(Solids.bl_idname, text = "Truncated Tetrahedron").preset = "t4"
+ layout.operator(Solids.bl_idname, text = "Cuboctahedron").preset = "r4"
+ layout.operator(Solids.bl_idname, text = "Truncated Cube").preset = "t6"
+ layout.operator(Solids.bl_idname, text = "Truncated Octahedron").preset = "t8"
+ layout.operator(Solids.bl_idname, text = "Rhombicuboctahedron").preset = "b6"
+ layout.operator(Solids.bl_idname, text = "Truncated Cuboctahedron").preset = "c6"
+ layout.operator(Solids.bl_idname, text = "Snub Cube").preset = "s6"
+ layout.operator(Solids.bl_idname, text = "Icosidodecahedron").preset = "r12"
+ layout.operator(Solids.bl_idname, text = "Truncated Dodecahedron").preset = "t12"
+ layout.operator(Solids.bl_idname, text = "Truncated Icosahedron").preset = "t20"
+ layout.operator(Solids.bl_idname, text = "Rhombicosidodecahedron").preset = "b12"
+ layout.operator(Solids.bl_idname, text = "Truncated Icosidodecahedron").preset = "c12"
+ layout.operator(Solids.bl_idname, text = "Snub Dodecahedron").preset = "s12"
+
+class CatalanMenu(bpy.types.Menu):
+ """Defines Catalan preset menu"""
+ bl_idname = "Catalan_calls"
+ bl_label = "Catalan"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator(Solids.bl_idname, text = "Triakis Tetrahedron").preset = "dt4"
+ layout.operator(Solids.bl_idname, text = "Rhombic Dodecahedron").preset = "dr4"
+ layout.operator(Solids.bl_idname, text = "Triakis Octahedron").preset = "dt6"
+ layout.operator(Solids.bl_idname, text = "Triakis Hexahedron").preset = "dt8"
+ layout.operator(Solids.bl_idname, text = "Deltoidal Icositetrahedron").preset = "db6"
+ layout.operator(Solids.bl_idname, text = "Disdyakis Dodecahedron").preset = "dc6"
+ layout.operator(Solids.bl_idname, text = "Pentagonal Icositetrahedron").preset = "ds6"
+ layout.operator(Solids.bl_idname, text = "Rhombic Triacontahedron").preset = "dr12"
+ layout.operator(Solids.bl_idname, text = "Triakis Icosahedron").preset = "dt12"
+ layout.operator(Solids.bl_idname, text = "Pentakis Dodecahedron").preset = "dt20"
+ layout.operator(Solids.bl_idname, text = "Deltoidal Hexecontahedron").preset = "db12"
+ layout.operator(Solids.bl_idname, text = "Disdyakis Triacontahedron").preset = "dc12"
+ layout.operator(Solids.bl_idname, text = "Pentagonal Hexecontahedron").preset = "ds12"
+
+def menu_func(self, context):
+ self.layout.menu(Solids_add_menu.bl_idname, icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_mesh_add.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_mesh_add.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/animation_add_corrective_shape_key.py b/animation_add_corrective_shape_key.py
new file mode 100644
index 00000000..482c458d
--- /dev/null
+++ b/animation_add_corrective_shape_key.py
@@ -0,0 +1,504 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': 'Corrective shape keys',
+ 'author': 'Ivo Grigull (loolarge), Tal Trachtman',
+ 'version': (1, 0),
+ "blender": (2, 5, 7),
+ "api": 36157,
+ 'location': 'Object Data > Shape Keys (Search: corrective) ',
+ 'description': 'Creates a corrective shape key for the current pose',
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Animation/Corrective_Shape_Key",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22129",
+ 'category': 'Animation'}
+
+"""
+This script transfer the shape from an object (base mesh without
+modifiers) to another object with modifiers (i.e. posed Armature).
+Only two objects must be selected.
+The first selected object will be added to the second selected
+object as a new shape key.
+
+- Original 2.4x script by ? (brecht?)
+- Unpose-function reused from a script by Tal Trachtman in 2007
+ http://www.apexbow.com/randd.html
+- Converted to Blender 2.5 by Ivo Grigull
+
+Limitations:
+- Target mesh may not have any transformation at object level,
+ it will be set to zero.
+- Fast/Armature method does not work with Bone envelopes or dual quaternions,
+ both settings will be disabled in the modifier
+"""
+
+
+import bpy
+import mathutils
+
+
+iterations = 20
+threshold = 1e-16
+
+def reset_transform(ob):
+ m = mathutils.Matrix()
+ ob.matrix_local = m
+
+# flips rotation matrix
+def flip_matrix_direction(m):
+ mat = mathutils.Matrix()
+
+ mat[0][0] = m[0][0]
+ mat[0][1] = m[1][0]
+ mat[0][2] = m[2][0]
+
+ mat[1][0] = m[0][1]
+ mat[1][1] = m[1][1]
+ mat[1][2] = m[2][1]
+
+ mat[2][0] = m[0][2]
+ mat[2][1] = m[1][2]
+ mat[2][2] = m[2][2]
+
+ return mat
+
+# this version is for shape_key data
+def extractX(ob, mesh):
+ x = []
+
+ for i in range(0, len(mesh)):
+ v = mesh[i]
+ x += [mathutils.Vector(v.co)]
+
+ return x
+
+# this version is for mesh data
+def extractX_2(ob, mesh):
+ x = []
+
+ for i in range(0, len(mesh.vertices)):
+ v = mesh.vertices[i]
+ x += [mathutils.Vector(v.co)]
+
+ return x
+
+def extractMappedX(ob, mesh):
+ totvert = len(mesh)
+
+ mesh = ob.to_mesh( bpy.context.scene, True, 'PREVIEW' )
+
+ x = []
+
+ # cheating, the original mapped verts happen
+ # to be at the end of the vertex array
+ for i in range(len(mesh.vertices)-totvert, len(mesh.vertices)):
+ v = mesh.vertices[i]
+ x += [mathutils.Vector(v.co)]
+
+ mesh.user_clear()
+ bpy.data.meshes.remove(mesh)
+
+ return x
+
+def applyX(ob, mesh, x ):
+ for i in range(0, len(mesh)):
+ v = mesh[i]
+ v.co = x[i]
+
+ ob.data.update()
+
+ return x
+
+
+def func_add_corrective_pose_shape( source, target):
+
+ ob_1 = target
+ mesh_1 = target.data
+ ob_2 = source
+ mesh_2 = source.data
+
+ reset_transform(target)
+
+ # If target object doesn't have Basis shape key, create it.
+ try:
+ num_keys = len( mesh_1.shape_keys.key_blocks )
+ except:
+ basis = ob_1.shape_key_add()
+ basis.name = "Basis"
+ ob_1.data.update()
+
+
+ key_index = ob_1.active_shape_key_index
+ # Insert new shape key
+ if key_index == 0:
+ new_shapekey = ob_1.shape_key_add()
+ new_shapekey.name = "Shape_" + ob_2.name
+ new_shapekey_name = new_shapekey.name
+
+ key_index = len(mesh_1.shape_keys.key_blocks)-1
+ ob_1.active_shape_key_index = key_index
+
+ # else, the active shape will be used (updated)
+
+ ob_1.show_only_shape_key = True
+
+ vgroup = ob_1.active_shape_key.vertex_group
+ ob_1.active_shape_key.vertex_group = ""
+
+ mesh_1_key_verts = mesh_1.shape_keys.key_blocks[ key_index ].data
+
+
+ x = extractX(ob_1, mesh_1_key_verts)
+
+ targetx = extractX_2(ob_2, mesh_2)
+
+ for iteration in range(0, iterations):
+ dx = [[], [], [], [], [], []]
+
+ mapx = extractMappedX(ob_1, mesh_1_key_verts)
+
+ # finite differencing in X/Y/Z to get approximate gradient
+ for i in range(0, len(mesh_1.vertices)):
+ epsilon = (targetx[i] - mapx[i]).length
+
+ if epsilon < threshold:
+ epsilon = 0.0
+
+ dx[0] += [x[i] + 0.5*epsilon*mathutils.Vector([1, 0, 0])]
+ dx[1] += [x[i] + 0.5*epsilon*mathutils.Vector([-1, 0, 0])]
+ dx[2] += [x[i] + 0.5*epsilon*mathutils.Vector([0, 1, 0])]
+ dx[3] += [x[i] + 0.5*epsilon*mathutils.Vector([0, -1, 0])]
+ dx[4] += [x[i] + 0.5*epsilon*mathutils.Vector([0, 0, 1])]
+ dx[5] += [x[i] + 0.5*epsilon*mathutils.Vector([0, 0, -1])]
+
+ for j in range(0, 6):
+ applyX(ob_1, mesh_1_key_verts, dx[j] )
+ dx[j] = extractMappedX(ob_1, mesh_1_key_verts)
+
+ # take a step in the direction of the gradient
+ for i in range(0, len(mesh_1.vertices)):
+ epsilon = (targetx[i] - mapx[i]).length
+
+ if epsilon >= threshold:
+ Gx = list((dx[0][i] - dx[1][i])/epsilon)
+ Gy = list((dx[2][i] - dx[3][i])/epsilon)
+ Gz = list((dx[4][i] - dx[5][i])/epsilon)
+ G = mathutils.Matrix((Gx, Gy, Gz))
+ G = flip_matrix_direction(G)
+
+ x[i] += (targetx[i] - mapx[i]) * G
+
+ applyX(ob_1, mesh_1_key_verts, x )
+
+
+ ob_1.active_shape_key.vertex_group = vgroup
+
+ # set the new shape key value to 1.0, so we see the result instantly
+ ob_1.active_shape_key.value = 1.0
+
+ #mesh_1.update()
+ ob_1.show_only_shape_key = False
+
+
+class add_corrective_pose_shape(bpy.types.Operator):
+ '''Adds first object as shape to second object for the current pose while maintaining modifiers (i.e. anisculpt, avoiding crazy space) Beware of slowness!!!'''
+
+ bl_idname = "object.add_corrective_pose_shape"
+ bl_label = "Add object as corrective pose shape"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+
+ if len(context.selected_objects) > 2:
+ print("Select source and target objects please")
+ return {'FINISHED'}
+
+ selection = context.selected_objects
+ target = context.active_object
+ if context.active_object == selection[0]:
+ source = selection[1]
+ else:
+ source = selection[0]
+
+ #~ print(source)
+ #~ print(target)
+ func_add_corrective_pose_shape( source, target)
+
+ return {'FINISHED'}
+
+def func_object_duplicate_flatten_modifiers(ob, scene):
+ mesh = ob.to_mesh( bpy.context.scene, True, 'PREVIEW' )
+ name = ob.name + "_clean"
+ new_object = bpy.data.objects.new( name, mesh)
+ new_object.data = mesh
+ scene.objects.link(new_object)
+ return new_object
+
+class object_duplicate_flatten_modifiers(bpy.types.Operator):
+ '''Duplicates the selected object with modifiers applied'''
+
+ bl_idname = "object.object_duplicate_flatten_modifiers"
+ bl_label = "Duplicate and apply all"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ new_object = func_object_duplicate_flatten_modifiers( context.active_object, context.scene )
+ context.scene.objects.active = new_object
+
+ for n in bpy.data.objects:
+ if n != new_object:
+ n.select = False
+ else:
+ n.select = True
+ return {'FINISHED'}
+
+
+
+
+def flip_matrix_direction_4x4(m):
+ mat = mathutils.Matrix()
+
+ mat[0][0] = m[0][0]
+ mat[0][1] = m[1][0]
+ mat[0][2] = m[2][0]
+ mat[0][3] = m[3][0]
+
+ mat[1][0] = m[0][1]
+ mat[1][1] = m[1][1]
+ mat[1][2] = m[2][1]
+ mat[1][3] = m[3][1]
+
+ mat[2][0] = m[0][2]
+ mat[2][1] = m[1][2]
+ mat[2][2] = m[2][2]
+ mat[2][3] = m[3][2]
+
+ mat[3][0] = m[0][3]
+ mat[3][1] = m[1][3]
+ mat[3][2] = m[2][3]
+ mat[3][3] = m[3][3]
+ return mat
+
+
+def unposeMesh(meshObToUnpose, meshObToUnposeWeightSrc, armatureOb):
+ psdMeshData = meshObToUnpose
+
+ psdMesh = psdMeshData
+ I = mathutils.Matrix() #identity matrix
+
+ meshData = meshObToUnposeWeightSrc.data
+ mesh = meshData
+
+ armData = armatureOb.data
+
+ pose = armatureOb.pose
+ pbones = pose.bones
+
+
+ for index, v in enumerate(mesh.vertices):
+ # above is python shortcut for:index goes up from 0 to tot num of verts in mesh,
+ # with index incrementing by 1 each iteration
+
+ psdMeshVert = psdMesh[index]
+
+ listOfBoneNameWeightPairs = []
+ for n in mesh.vertices[index].groups:
+ try:
+ name = meshObToUnposeWeightSrc.vertex_groups[n.group].name
+ weight = n.weight
+ is_bone = False
+ for i in armData.bones:
+ if i.name == name:
+ is_bone = True
+ break
+ # ignore non-bone vertex groups
+ if is_bone:
+ listOfBoneNameWeightPairs.append( [name, weight] )
+ except:
+ print('error')
+ pass
+
+ weightedAverageDictionary = {}
+ totalWeight = 0
+ for pair in listOfBoneNameWeightPairs:
+ totalWeight += pair[1]
+
+ for pair in listOfBoneNameWeightPairs:
+ if (totalWeight>0): #avoid divide by zero!
+ weightedAverageDictionary[pair[0]] = pair[1]/totalWeight
+ else:
+ weightedAverageDictionary[pair[0]] = 0
+
+ sigma = mathutils.Matrix(I-I) #Matrix filled with zeros
+
+ list = []
+ for n in pbones:
+ list.append(n)
+ list.reverse()
+
+ for pbone in list:
+ if pbone.name in weightedAverageDictionary:
+ #~ print("found key %s", pbone.name)
+ vertexWeight = weightedAverageDictionary[pbone.name]
+ m = pbone.matrix_channel.copy()
+ #m = flip_matrix_direction_4x4(m)
+ sigma += (m - I) * vertexWeight
+
+ else:
+ pass
+ #~ print("no key for bone " + pbone.name)
+
+ sigma = I + sigma
+ sigma.invert()
+ psdMeshVert.co = psdMeshVert.co * sigma
+
+
+
+def func_add_corrective_pose_shape_fast(source, target):
+
+
+ reset_transform(target)
+
+ # If target object doesn't have Basis shape key, create it.
+ try:
+ num_keys = len( target.data.shape_keys.key_blocks )
+ except:
+ basis = target.shape_key_add()
+ basis.name = "Basis"
+ target.data.update()
+
+ key_index = target.active_shape_key_index
+
+ if key_index == 0:
+
+ # Insert new shape key
+ new_shapekey = target.shape_key_add()
+ new_shapekey.name = "Shape_" + source.name
+ new_shapekey_name = new_shapekey.name
+
+ key_index = len(target.data.shape_keys.key_blocks)-1
+ target.active_shape_key_index = key_index
+
+ # else, the active shape will be used (updated)
+
+ target.show_only_shape_key = True
+
+ shape_key_verts = target.data.shape_keys.key_blocks[ key_index ].data
+
+ try:
+ vgroup = target.active_shape_key.vertex_group
+ target.active_shape_key.vertex_group = ''
+ except:
+ print("blub")
+ pass
+
+ # copy the local vertex positions to the new shape
+ verts = source.data.vertices
+ for n in range( len(verts)):
+ shape_key_verts[n].co = verts[n].co
+
+ # go to all armature modifies and unpose the shape
+ for n in target.modifiers:
+ if n.type == 'ARMATURE' and n.show_viewport:
+ #~ print("got one")
+ n.use_bone_envelopes = False
+ n.use_deform_preserve_volume = False
+ n.use_vertex_groups = True
+ armature = n.object
+ unposeMesh( shape_key_verts, target, armature)
+ break
+
+ # set the new shape key value to 1.0, so we see the result instantly
+ target.data.shape_keys.key_blocks[ target.active_shape_key_index].value = 1.0
+
+ try:
+ target.active_shape_key.vertex_group = vgroup
+ except:
+ #~ print("bluba")
+ pass
+
+ target.show_only_shape_key = False
+ target.data.update()
+
+
+
+class add_corrective_pose_shape_fast(bpy.types.Operator):
+ '''Adds 1st object as shape to 2nd object as pose shape (only 1 armature)'''
+
+ bl_idname = "object.add_corrective_pose_shape_fast"
+ bl_label = "Add object as corrective shape faster"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+
+ if len(context.selected_objects) > 2:
+ print("Select source and target objects please")
+ return {'FINISHED'}
+
+ selection = context.selected_objects
+ target = context.active_object
+ if context.active_object == selection[0]:
+ source = selection[1]
+ else:
+ source = selection[0]
+
+ print(source)
+ print(target)
+ func_add_corrective_pose_shape_fast( source, target)
+
+ return {'FINISHED'}
+
+
+
+
+## GUI
+def vgroups_draw(self, context):
+ layout = self.layout
+
+ layout.row().operator("object.object_duplicate_flatten_modifiers", text='Create duplicate for editing' )
+ layout.row().operator("object.add_corrective_pose_shape_fast", text='Add as corrective pose-shape (fast, armatures only)', icon='COPY_ID') # icon is not ideal
+ layout.row().operator("object.add_corrective_pose_shape", text='Add as corrective pose-shape (slow, all modifiers)', icon='COPY_ID') # icon is not ideal
+
+def modifiers_draw(self, context):
+ layout = self.layout
+
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.MESH_MT_shape_key_specials.append( vgroups_draw )
+ bpy.types.DATA_PT_modifiers.append( modifiers_draw )
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/animation_animall.py b/animation_animall.py
new file mode 100644
index 00000000..71c038be
--- /dev/null
+++ b/animation_animall.py
@@ -0,0 +1,290 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': 'AnimAll',
+ 'author': 'Daniel Salazar <zanqdo@gmail.com>',
+ 'version': (0, 4),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ 'location': 'Select a Mesh: Tool Shelf > AnimAll panel',
+ 'description': 'Allows animation of mesh and lattice data (Shape Keys, VCols, VGroups, UVs)',
+ 'warning': '',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/Animation/AnimAll',
+ 'tracker_url': 'http://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=24874',
+ 'category': 'Animation'}
+
+'''-------------------------------------------------------------------------
+Thanks to Campbell Barton and Joshua Leung for hes API additions and fixes
+Daniel 'ZanQdo' Salazar
+
+Rev 0.1 initial release (animate Mesh points)
+Rev 0.2 added support for animating UVs, VCols, VGroups
+Rev 0.3 added support for animating Lattice points
+Rev 0.4 added support for ShapeKey layer animation, removed support
+for direct point animation since this new aproach is much stronger
+and inline with the animation system
+-------------------------------------------------------------------------'''
+
+import bpy
+from bpy.props import *
+
+
+#
+# Property Definitions
+#
+bpy.types.WindowManager.key_shape = BoolProperty(
+ name="Shape",
+ description="Insert keyframes on active Shape Key layer",
+ default=True)
+
+bpy.types.WindowManager.key_uvs = BoolProperty(
+ name="UVs",
+ description="Insert keyframes on active UV coordinates",
+ default=False)
+
+bpy.types.WindowManager.key_vcols = BoolProperty(
+ name="VCols",
+ description="Insert keyframes on active Vertex Color values",
+ default=False)
+
+bpy.types.WindowManager.key_vgroups = BoolProperty(
+ name="VGroups",
+ description="Insert keyframes on active Vertex Group values",
+ default=False)
+
+
+#
+# GUI (Panel)
+#
+class VIEW3D_PT_animall(bpy.types.Panel):
+
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+ bl_label = 'AnimAll'
+
+ # show this add-on only in the Camera-Data-Panel
+ @classmethod
+ def poll(self, context):
+ if context.active_object:
+ return context.active_object.type == 'MESH'\
+ or context.active_object.type == 'LATTICE'
+
+ # draw the gui
+ def draw(self, context):
+
+ Obj = context.active_object
+
+ layout = self.layout
+
+ col = layout.column(align=True)
+
+ #col.label(text="Keyframing:")
+ row = col.row()
+ row.prop(context.window_manager, "key_shape")
+ if context.active_object.type == 'MESH':
+ row.prop(context.window_manager, "key_uvs")
+ row = col.row()
+ row.prop(context.window_manager, "key_vcols")
+ row.prop(context.window_manager, "key_vgroups")
+
+ row = col.row()
+ row.operator('anim.insert_keyframe_animall', icon='KEY_HLT')
+ row.operator('anim.delete_keyframe_animall', icon='KEY_DEHLT')
+ row = layout.row()
+ row.operator('anim.clear_animation_animall', icon='X')
+
+ if context.window_manager.key_shape:
+
+ ShapeKey = Obj.active_shape_key
+
+ split = layout.split()
+ row = split.row()
+
+ if ShapeKey:
+ row.label(ShapeKey.name, icon='SHAPEKEY_DATA')
+ row.prop(ShapeKey, "value", text="")
+ row.prop(Obj, "show_only_shape_key", text="")
+ else:
+ row.label('No active ShapeKey', icon='INFO')
+
+
+class ANIM_OT_insert_keyframe_animall(bpy.types.Operator):
+ bl_label = 'Insert'
+ bl_idname = 'anim.insert_keyframe_animall'
+ bl_description = 'Insert a Keyframe'
+ bl_options = {'REGISTER', 'UNDO'}
+
+
+ # on mouse up:
+ def invoke(self, context, event):
+
+ self.execute(context)
+
+ return {'FINISHED'}
+
+
+ def execute(op, context):
+
+ Obj = context.active_object
+
+ if Obj.type == 'MESH':
+ Mode = False
+ if context.mode == 'EDIT_MESH':
+ Mode = not Mode
+ bpy.ops.object.editmode_toggle()
+
+ Data = Obj.data
+
+ if context.window_manager.key_shape:
+ if Obj.active_shape_key:
+ for Point in Obj.active_shape_key.data:
+ Point.keyframe_insert('co')
+
+ if context.window_manager.key_vgroups:
+ for Vert in Data.vertices:
+ for Group in Vert.groups:
+ Group.keyframe_insert('weight')
+
+ if context.window_manager.key_uvs:
+ for UVLayer in Data.uv_textures:
+ if UVLayer.active: # only insert in active UV layer
+ for Data in UVLayer.data:
+ Data.keyframe_insert('uv')
+
+ if context.window_manager.key_vcols:
+ for VColLayer in Data.vertex_colors:
+ if VColLayer.active: # only insert in active VCol layer
+ for Data in VColLayer.data:
+ Data.keyframe_insert('color1')
+ Data.keyframe_insert('color2')
+ Data.keyframe_insert('color3')
+ Data.keyframe_insert('color4')
+
+ if Mode:
+ bpy.ops.object.editmode_toggle()
+
+ if Obj.type == 'LATTICE':
+ if context.window_manager.key_shape:
+ if Obj.active_shape_key:
+ for Point in Obj.active_shape_key.data:
+ Point.keyframe_insert('co')
+
+
+ return {'FINISHED'}
+
+
+class ANIM_OT_delete_keyframe_animall(bpy.types.Operator):
+ bl_label = 'Delete'
+ bl_idname = 'anim.delete_keyframe_animall'
+ bl_description = 'Delete a Keyframe'
+ bl_options = {'REGISTER', 'UNDO'}
+
+
+ # on mouse up:
+ def invoke(self, context, event):
+
+ self.execute(context)
+
+ return {'FINISHED'}
+
+
+ def execute(op, context):
+
+ Obj = context.active_object
+
+ if Obj.type == 'MESH':
+ Mode = False
+ if context.mode == 'EDIT_MESH':
+ Mode = not Mode
+ bpy.ops.object.editmode_toggle()
+
+ Data = Obj.data
+
+ if context.window_manager.key_shape:
+ if Obj.active_shape_key:
+ for Point in Obj.active_shape_key.data:
+ Point.keyframe_delete('co')
+
+ if context.window_manager.key_vgroups:
+ for Vert in Data.vertices:
+ for Group in Vert.groups:
+ Group.keyframe_delete('weight')
+
+ if context.window_manager.key_uvs:
+ for UVLayer in Data.uv_textures:
+ if UVLayer.active: # only delete in active UV layer
+ for Data in UVLayer.data:
+ Data.keyframe_delete('uv')
+
+ if context.window_manager.key_vcols:
+ for VColLayer in Data.vertex_colors:
+ if VColLayer.active: # only delete in active VCol layer
+ for Data in VColLayer.data:
+ Data.keyframe_delete('color1')
+ Data.keyframe_delete('color2')
+ Data.keyframe_delete('color3')
+ Data.keyframe_delete('color4')
+
+
+ if Mode:
+ bpy.ops.object.editmode_toggle()
+
+ if Obj.type == 'LATTICE':
+ if context.window_manager.key_shape:
+ if Obj.active_shape_key:
+ for Point in Obj.active_shape_key.data:
+ Point.keyframe_delete('co')
+
+ return {'FINISHED'}
+
+
+class ANIM_OT_clear_animation_animall(bpy.types.Operator):
+ bl_label = 'Clear Animation'
+ bl_idname = 'anim.clear_animation_animall'
+ bl_description = 'Clear all animation'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+
+ wm = context.window_manager
+ return wm.invoke_confirm(self, event)
+
+
+ def execute(op, context):
+
+ Data = context.active_object.data
+ Data.animation_data_clear()
+
+ return {'FINISHED'}
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ pass
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/animation_rotobezier.py b/animation_rotobezier.py
new file mode 100644
index 00000000..5b92fc32
--- /dev/null
+++ b/animation_rotobezier.py
@@ -0,0 +1,382 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': 'RotoBezier',
+ 'author': 'Daniel Salazar <zanqdo@gmail.com>',
+ 'version': (0, 8),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ 'location': 'Select a Curve: Tool Shelf > RotoBezier Panel',
+ 'description': 'Allows animation of Bezier and NURBS curves',
+ 'warning': '',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/'\
+ 'Scripts/Animation/RotoBezier',
+ 'tracker_url': 'http://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=24839',
+ 'category': 'Animation'}
+
+'''
+-------------------------------------------------------------------------
+Thanks to Campbell Barton for his API additions and fixes
+Daniel Salazar - ZanQdo
+
+Rev 0.1 Initial release
+Rev 0.2 New make matte object tools and convenient display toggles
+Rev 0.3 Tool to clear all animation from the curve
+Rev 0.4 Moved from curve properties to toolbar
+Rev 0.5 Added pass index property
+Rev 0.6 Re-arranged UI
+Rev 0.7 Adding options for what properties to keyframe
+Rev 0.8 Allowing to key NURBS
+-------------------------------------------------------------------------
+'''
+
+import bpy
+from bpy.props import *
+
+
+#
+# Property Definitions
+#
+bpy.types.WindowManager.key_points = BoolProperty(
+ name="Points",
+ description="Insert keyframes on point locations",
+ default=True)
+
+bpy.types.WindowManager.key_radius = BoolProperty(
+ name="Radius",
+ description="Insert keyframes on point radius (Shrink/Fatten)",
+ default=False)
+
+bpy.types.WindowManager.key_tilt = BoolProperty(
+ name="Tilt",
+ description="Insert keyframes on point tilt",
+ default=False)
+
+
+#
+# GUI (Panel)
+#
+class VIEW3D_PT_rotobezier(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+ bl_label = 'RotoBezier'
+
+ # show this add-on only in the Camera-Data-Panel
+ @classmethod
+ def poll(self, context):
+ if context.active_object:
+ return context.active_object.type == 'CURVE'
+
+ # draw the gui
+ def draw(self, context):
+ layout = self.layout
+
+ col = layout.column(align=True)
+
+ col.label(text="Keyframing:")
+ row = col.row()
+ row.prop(context.window_manager, "key_points")
+ row.prop(context.window_manager, "key_radius")
+ row.prop(context.window_manager, "key_tilt")
+
+ row = col.row()
+ row.operator('curve.insert_keyframe_rotobezier', icon='KEY_HLT')
+ row.operator('curve.delete_keyframe_rotobezier', icon='KEY_DEHLT')
+ row = layout.row()
+ row.operator('curve.clear_animation_rotobezier', icon='X')
+
+ col = layout.column()
+
+ col.label(text="Display:")
+ row = col.row()
+ row.operator('curve.toggle_draw_rotobezier', icon='MESH_CIRCLE')
+
+ if context.mode == 'EDIT_CURVE':
+ row.operator('curve.toggle_handles_rotobezier', icon='CURVE_BEZCIRCLE')
+
+ col = layout.column(align=True)
+
+ col.label(text="Tools:")
+ row = col.row()
+ row.operator('curve.make_white_matte_rotobezier')
+ row.operator('curve.make_black_matte_rotobezier')
+ row = layout.row()
+ ob = context.active_object
+ row.prop(ob, "pass_index")
+
+
+class CURVE_OT_insert_keyframe_rotobezier(bpy.types.Operator):
+ bl_label = 'Insert'
+ bl_idname = 'curve.insert_keyframe_rotobezier'
+ bl_description = 'Insert/Replace all Keyframes in current frame'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ self.execute(context)
+ return {'FINISHED'}
+
+ @classmethod
+ def poll(cls, context):
+ return (context.active_object.type == 'CURVE')
+
+ def execute(op, context):
+
+ Obj = context.active_object
+
+ Mode = False
+ if context.mode != 'OBJECT':
+ Mode = not Mode
+ bpy.ops.object.editmode_toggle()
+ Data = Obj.data
+
+ for Spline in Data.splines:
+ if Spline.type == 'BEZIER':
+ for CV in Spline.bezier_points:
+ if context.window_manager.key_points:
+ CV.keyframe_insert('co')
+ CV.keyframe_insert('handle_left')
+ CV.keyframe_insert('handle_right')
+ if context.window_manager.key_radius:
+ CV.keyframe_insert('radius')
+ if context.window_manager.key_tilt:
+ CV.keyframe_insert('tilt')
+
+ elif Spline.type == 'NURBS':
+ for CV in Spline.points:
+ if context.window_manager.key_points:
+ CV.keyframe_insert('co')
+ if context.window_manager.key_radius:
+ CV.keyframe_insert('radius')
+ if context.window_manager.key_tilt:
+ CV.keyframe_insert('tilt')
+
+ if Mode:
+ bpy.ops.object.editmode_toggle()
+
+
+ return {'FINISHED'}
+
+
+class CURVE_OT_delete_keyframe_rotobezier(bpy.types.Operator):
+ bl_label = 'Delete'
+ bl_idname = 'curve.delete_keyframe_rotobezier'
+ bl_description = 'Delete all keyframes in current frame'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ self.execute(context)
+ return {'FINISHED'}
+
+ @classmethod
+ def poll(cls, context):
+ return (context.active_object.type == 'CURVE')
+
+ def execute(op, context):
+
+ Obj = context.active_object
+
+ Mode = False
+ if context.mode != 'OBJECT':
+ Mode = not Mode
+ bpy.ops.object.editmode_toggle()
+ Data = Obj.data
+
+ for Spline in Data.splines:
+ if Spline.type == 'BEZIER':
+ for CV in Spline.bezier_points:
+ if context.window_manager.key_points:
+ CV.keyframe_delete('co')
+ CV.keyframe_delete('handle_left')
+ CV.keyframe_delete('handle_right')
+ if context.window_manager.key_radius:
+ CV.keyframe_delete('radius')
+ if context.window_manager.key_tilt:
+ CV.keyframe_delete('tilt')
+
+ elif Spline.type == 'NURBS':
+ for CV in Spline.points:
+ if context.window_manager.key_points:
+ CV.keyframe_delete('co')
+ if context.window_manager.key_radius:
+ CV.keyframe_delete('radius')
+ if context.window_manager.key_tilt:
+ CV.keyframe_delete('tilt')
+
+ if Mode:
+ bpy.ops.object.editmode_toggle()
+
+ return {'FINISHED'}
+
+
+class CURVE_OT_clear_animation_rotobezier(bpy.types.Operator):
+ bl_label = 'Clear Animation'
+ bl_idname = 'curve.clear_animation_rotobezier'
+ bl_description = 'Clear all animation from the curve'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ wm = context.window_manager
+ return wm.invoke_confirm(self, event)
+
+ def execute(op, context):
+ Data = context.active_object.data
+ Data.animation_data_clear()
+ return {'FINISHED'}
+
+
+def MakeMatte (Type):
+ '''
+ Matte Material Assignment Function
+ '''
+
+ Obj = bpy.context.active_object
+
+ # Material
+
+ if Type == 'White':
+ MatName = 'RotoBezier_WhiteMatte'
+ MatCol = (1,1,1)
+
+ elif Type == 'Black':
+ MatName = 'RotoBezier_BlackMatte'
+ MatCol = (0,0,0)
+
+ if bpy.data.materials.get(MatName):
+ Mat = bpy.data.materials[MatName]
+ if not Obj.material_slots:
+ bpy.ops.object.material_slot_add()
+ Obj.material_slots[0].material = Mat
+
+ else:
+ Mat = bpy.data.materials.new(MatName)
+ Mat.diffuse_color = MatCol
+ Mat.use_shadeless = True
+ Mat.use_raytrace = False
+ Mat.use_shadows = False
+ Mat.use_cast_buffer_shadows = False
+ Mat.use_cast_approximate = False
+
+ if not Obj.material_slots:
+ bpy.ops.object.material_slot_add()
+
+ Obj.material_slots[0].material = Mat
+
+ # Settings
+ Curve = Obj.data
+
+ Curve.dimensions = '2D'
+ Curve.use_fill_front = False
+ Curve.use_fill_back = False
+
+
+class CURVE_OT_make_white_matte_rotobezier(bpy.types.Operator):
+ bl_label = 'White Matte'
+ bl_idname = 'curve.make_white_matte_rotobezier'
+ bl_description = 'Make this curve a white matte'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ self.execute(context)
+ return {'FINISHED'}
+
+ def execute(op, context):
+ MakeMatte('White')
+ return {'FINISHED'}
+
+
+class CURVE_OT_make_black_matte_rotobezier(bpy.types.Operator):
+ bl_label = 'Black Matte'
+ bl_idname = 'curve.make_black_matte_rotobezier'
+ bl_description = 'Make this curve a black matte'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ self.execute(context)
+ return {'FINISHED'}
+
+ def execute(op, context):
+ MakeMatte('Black')
+ return {'FINISHED'}
+
+
+class CURVE_OT_toggle_handles_rotobezier(bpy.types.Operator):
+ bl_label = 'Handles'
+ bl_idname = 'curve.toggle_handles_rotobezier'
+ bl_description = 'Toggle the curve handles display in edit mode'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ self.execute(context)
+ return {'FINISHED'}
+
+ def execute(op, context):
+ Obj = context.active_object
+ Curve = Obj.data
+ if Curve.show_handles:
+ Curve.show_handles = False
+ else:
+ Curve.show_handles = True
+ return {'FINISHED'}
+
+
+class CURVE_OT_toggle_draw_rotobezier(bpy.types.Operator):
+ bl_label = 'Filling'
+ bl_idname = 'curve.toggle_draw_rotobezier'
+ bl_description = 'Toggle the curve display mode between Wire and Solid'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ # on mouse up:
+ def invoke(self, context, event):
+ self.execute(context)
+ return {'FINISHED'}
+
+ def execute(op, context):
+ Obj = context.active_object
+
+ if Obj.draw_type == 'SOLID':
+ Obj.draw_type = 'WIRE'
+
+ elif Obj.draw_type == 'WIRE':
+ Obj.draw_type = 'SOLID'
+
+ else:
+ Obj.draw_type = 'WIRE'
+
+ return {'FINISHED'}
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ pass
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/curve_simplify.py b/curve_simplify.py
new file mode 100644
index 00000000..14284941
--- /dev/null
+++ b/curve_simplify.py
@@ -0,0 +1,597 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Simplify curves",
+ "author": "testscreenings",
+ "version": (1,),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "Search > Simplify Curves",
+ "description": "Simplifies 3D curves and fcurves",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Curve/Curve_Simplify",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22327",
+ "category": "Add Curve"}
+
+"""
+This script simplifies Curves.
+"""
+
+####################################################
+import bpy
+from bpy.props import *
+import mathutils
+import math
+
+##############################
+#### simplipoly algorithm ####
+##############################
+# get SplineVertIndices to keep
+def simplypoly(splineVerts, options):
+ # main vars
+ newVerts = [] # list of vertindices to keep
+ points = splineVerts # list of 3dVectors
+ pointCurva = [] # table with curvatures
+ curvatures = [] # averaged curvatures per vert
+ for p in points:
+ pointCurva.append([])
+ order = options[3] # order of sliding beziercurves
+ k_thresh = options[2] # curvature threshold
+ dis_error = options[6] # additional distance error
+
+ # get curvatures per vert
+ for i, point in enumerate(points[:-(order-1)]):
+ BVerts = points[i:i+order]
+ for b, BVert in enumerate(BVerts[1:-1]):
+ deriv1 = getDerivative(BVerts, 1/(order-1), order-1)
+ deriv2 = getDerivative(BVerts, 1/(order-1), order-2)
+ curva = getCurvature(deriv1, deriv2)
+ pointCurva[i+b+1].append(curva)
+
+ # average the curvatures
+ for i in range(len(points)):
+ avgCurva = sum(pointCurva[i]) / (order-1)
+ curvatures.append(avgCurva)
+
+ # get distancevalues per vert - same as Ramer-Douglas-Peucker
+ # but for every vert
+ distances = [0.0] #first vert is always kept
+ for i, point in enumerate(points[1:-1]):
+ dist = altitude(points[i], points[i+2], points[i+1])
+ distances.append(dist)
+ distances.append(0.0) # last vert is always kept
+
+ # generate list of vertindices to keep
+ # tested against averaged curvatures and distances of neighbour verts
+ newVerts.append(0) # first vert is always kept
+ for i, curv in enumerate(curvatures):
+ if (curv >= k_thresh*0.01
+ or distances[i] >= dis_error*0.1):
+ newVerts.append(i)
+ newVerts.append(len(curvatures)-1) # last vert is always kept
+
+ return newVerts
+
+# get binomial coefficient
+def binom(n, m):
+ b = [0] * (n+1)
+ b[0] = 1
+ for i in range(1, n+1):
+ b[i] = 1
+ j = i-1
+ while j > 0:
+ b[j] += b[j-1]
+ j-= 1
+ return b[m]
+
+# get nth derivative of order(len(verts)) bezier curve
+def getDerivative(verts, t, nth):
+ order = len(verts) - 1 - nth
+ QVerts = []
+
+ if nth:
+ for i in range(nth):
+ if QVerts:
+ verts = QVerts
+ derivVerts = []
+ for i in range(len(verts)-1):
+ derivVerts.append(verts[i+1] - verts[i])
+ QVerts = derivVerts
+ else:
+ QVerts = verts
+
+ if len(verts[0]) == 3:
+ point = mathutils.Vector((0, 0, 0))
+ if len(verts[0]) == 2:
+ point = mathutils.Vector((0, 0))
+
+ for i, vert in enumerate(QVerts):
+ point += binom(order, i) * math.pow(t, i) * math.pow(1-t, order-i) * vert
+ deriv = point
+
+ return deriv
+
+# get curvature from first, second derivative
+def getCurvature(deriv1, deriv2):
+ if deriv1.length == 0: # in case of points in straight line
+ curvature = 0
+ return curvature
+ curvature = (deriv1.cross(deriv2)).length / math.pow(deriv1.length, 3)
+ return curvature
+
+#########################################
+#### Ramer-Douglas-Peucker algorithm ####
+#########################################
+# get altitude of vert
+def altitude(point1, point2, pointn):
+ edge1 = point2 - point1
+ edge2 = pointn - point1
+ if edge2.length == 0:
+ altitude = 0
+ return altitude
+ if edge1.length == 0:
+ altitude = edge2.length
+ return altitude
+ alpha = edge1.angle(edge2)
+ altitude = math.sin(alpha) * edge2.length
+ return altitude
+
+# iterate through verts
+def iterate(points, newVerts, error):
+ new = []
+ for newIndex in range(len(newVerts)-1):
+ bigVert = 0
+ alti_store = 0
+ for i, point in enumerate(points[newVerts[newIndex]+1:newVerts[newIndex+1]]):
+ alti = altitude(points[newVerts[newIndex]], points[newVerts[newIndex+1]], point)
+ if alti > alti_store:
+ alti_store = alti
+ if alti_store >= error:
+ bigVert = i+1+newVerts[newIndex]
+ if bigVert:
+ new.append(bigVert)
+ if new == []:
+ return False
+ return new
+
+#### get SplineVertIndices to keep
+def simplify_RDP(splineVerts, options):
+ #main vars
+ error = options[4]
+
+ # set first and last vert
+ newVerts = [0, len(splineVerts)-1]
+
+ # iterate through the points
+ new = 1
+ while new != False:
+ new = iterate(splineVerts, newVerts, error)
+ if new:
+ newVerts += new
+ newVerts.sort()
+ return newVerts
+
+##########################
+#### CURVE GENERATION ####
+##########################
+# set bezierhandles to auto
+def setBezierHandles(newCurve):
+ scene = bpy.context.scene
+ bpy.ops.object.mode_set(mode='EDIT', toggle=True)
+ bpy.ops.curve.select_all(action='SELECT')
+ bpy.ops.curve.handle_type_set(type='AUTOMATIC')
+ bpy.ops.object.mode_set(mode='OBJECT', toggle=True)
+
+# get array of new coords for new spline from vertindices
+def vertsToPoints(newVerts, splineVerts, splineType):
+ # main vars
+ newPoints = []
+
+ # array for BEZIER spline output
+ if splineType == 'BEZIER':
+ for v in newVerts:
+ newPoints += splineVerts[v].to_tuple()
+
+ # array for nonBEZIER output
+ else:
+ for v in newVerts:
+ newPoints += (splineVerts[v].to_tuple())
+ if splineType == 'NURBS':
+ newPoints.append(1) #for nurbs w=1
+ else: #for poly w=0
+ newPoints.append(0)
+ return newPoints
+
+#########################
+#### MAIN OPERATIONS ####
+#########################
+
+def main(context, obj, options):
+ #print("\n_______START_______")
+ # main vars
+ mode = options[0]
+ output = options[1]
+ degreeOut = options[5]
+ keepShort = options[7]
+ bpy.ops.object.select_all(action='DESELECT')
+ scene = context.scene
+ splines = obj.data.splines.values()
+
+ # create curvedatablock
+ curve = bpy.data.curves.new("simple_"+obj.name, type = 'CURVE')
+
+ # go through splines
+ for spline_i, spline in enumerate(splines):
+ # test if spline is a long enough
+ if len(spline.points) >= 7 or keepShort:
+ #check what type of spline to create
+ if output == 'INPUT':
+ splineType = spline.type
+ else:
+ splineType = output
+
+ # get vec3 list to simplify
+ if spline.type == 'BEZIER': # get bezierverts
+ splineVerts = [splineVert.co.copy()
+ for splineVert in spline.bezier_points.values()]
+
+ else: # verts from all other types of curves
+ splineVerts = [splineVert.co.to_3d()
+ for splineVert in spline.points.values()]
+
+ # simplify spline according to mode
+ if mode == 'distance':
+ newVerts = simplify_RDP(splineVerts, options)
+
+ if mode == 'curvature':
+ newVerts = simplypoly(splineVerts, options)
+
+ # convert indices into vectors3D
+ newPoints = vertsToPoints(newVerts, splineVerts, splineType)
+
+ # create new spline
+ newSpline = curve.splines.new(type = splineType)
+
+ # put newPoints into spline according to type
+ if splineType == 'BEZIER':
+ newSpline.bezier_points.add(int(len(newPoints)*0.33))
+ newSpline.bezier_points.foreach_set('co', newPoints)
+ else:
+ newSpline.points.add(int(len(newPoints)*0.25 - 1))
+ newSpline.points.foreach_set('co', newPoints)
+
+ # set degree of outputNurbsCurve
+ if output == 'NURBS':
+ newSpline.order_u = degreeOut
+
+ # splineoptions
+ newSpline.use_endpoint_u = spline.use_endpoint_u
+
+ # create ne object and put into scene
+ newCurve = bpy.data.objects.new("simple_"+obj.name, curve)
+ scene.objects.link(newCurve)
+ newCurve.select = True
+ scene.objects.active = newCurve
+ newCurve.matrix_world = obj.matrix_world
+
+ # set bezierhandles to auto
+ setBezierHandles(newCurve)
+
+ #print("________END________\n")
+ return
+
+##################
+## get preoperator fcurves
+def getFcurveData(obj):
+ fcurves = []
+ for fc in obj.animation_data.action.fcurves:
+ if fc.select:
+ fcVerts = [vcVert.co.to_3d()
+ for vcVert in fc.keyframe_points.values()]
+ fcurves.append(fcVerts)
+ return fcurves
+
+def selectedfcurves(obj):
+ fcurves_sel = []
+ for i, fc in enumerate(obj.animation_data.action.fcurves):
+ if fc.select:
+ fcurves_sel.append(fc)
+ return fcurves_sel
+
+###########################################################
+## fCurves Main
+def fcurves_simplify(context, obj, options, fcurves):
+ # main vars
+ mode = options[0]
+ scene = context.scene
+ fcurves_obj = obj.animation_data.action.fcurves
+
+ #get indices of selected fcurves
+ fcurve_sel = selectedfcurves(obj)
+
+ # go through fcurves
+ for fcurve_i, fcurve in enumerate(fcurves):
+ # test if fcurve is long enough
+ if len(fcurve) >= 7:
+
+ # simplify spline according to mode
+ if mode == 'distance':
+ newVerts = simplify_RDP(fcurve, options)
+
+ if mode == 'curvature':
+ newVerts = simplypoly(fcurve, options)
+
+ # convert indices into vectors3D
+ newPoints = []
+
+ #this is different from the main() function for normal curves, different api...
+ for v in newVerts:
+ newPoints.append(fcurve[v])
+
+ #remove all points from curve first
+ for i in range(len(fcurve)-1,0,-1):
+ fcurve_sel[fcurve_i].keyframe_points.remove(fcurve_sel[fcurve_i].keyframe_points[i])
+ # put newPoints into fcurve
+ for v in newPoints:
+ fcurve_sel[fcurve_i].keyframe_points.insert(frame=v[0],value=v[1])
+ #fcurve.points.foreach_set('co', newPoints)
+ return
+
+#################################################
+#### ANIMATION CURVES OPERATOR ##################
+#################################################
+class GRAPH_OT_simplify(bpy.types.Operator):
+ ''''''
+ bl_idname = "graph.simplify"
+ bl_label = "simplifiy f-curves"
+ bl_description = "simplify selected f-curves"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ ## Properties
+ opModes = [
+ ('distance', 'distance', 'distance'),
+ ('curvature', 'curvature', 'curvature')]
+ mode = EnumProperty(name="Mode",
+ description="choose algorithm to use",
+ items=opModes)
+ k_thresh = FloatProperty(name="k",
+ min=0, soft_min=0,
+ default=0, precision=3,
+ description="threshold")
+ pointsNr = IntProperty(name="n",
+ min=5, soft_min=5,
+ max=16, soft_max=9,
+ default=5,
+ description="degree of curve to get averaged curvatures")
+ error = FloatProperty(name="error",
+ description="maximum error to allow - distance",
+ min=0.0, soft_min=0.0,
+ default=0, precision=3)
+ degreeOut = IntProperty(name="degree",
+ min=3, soft_min=3,
+ max=7, soft_max=7,
+ default=5,
+ description="degree of new curve")
+ dis_error = FloatProperty(name="distance error",
+ description="maximum error in Blenderunits to allow - distance",
+ min=0, soft_min=0,
+ default=0.0, precision=3)
+ fcurves = []
+
+ ''' Remove curvature mode as long as it isnn't significantly improved
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+ col.label('Mode:')
+ col.prop(self, 'mode', expand=True)
+ if self.mode == 'distance':
+ box = layout.box()
+ box.label(self.mode, icon='ARROW_LEFTRIGHT')
+ box.prop(self, 'error', expand=True)
+ if self.mode == 'curvature':
+ box = layout.box()
+ box.label('degree', icon='SMOOTHCURVE')
+ box.prop(self, 'pointsNr', expand=True)
+ box.label('threshold', icon='PARTICLE_PATH')
+ box.prop(self, 'k_thresh', expand=True)
+ box.label('distance', icon='ARROW_LEFTRIGHT')
+ box.prop(self, 'dis_error', expand=True)
+ col = layout.column()
+ '''
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+ col.prop(self, 'error', expand=True)
+
+ ## Check for animdata
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ fcurves = False
+ if obj:
+ animdata = obj.animation_data
+ if animdata:
+ act = animdata.action
+ if act:
+ fcurves = act.fcurves
+ return (obj and fcurves)
+
+ ## execute
+ def execute(self, context):
+ #print("------START------")
+
+ options = [
+ self.mode, #0
+ self.mode, #1
+ self.k_thresh, #2
+ self.pointsNr, #3
+ self.error, #4
+ self.degreeOut, #6
+ self.dis_error] #7
+
+ obj = context.active_object
+
+ if not self.fcurves:
+ self.fcurves = getFcurveData(obj)
+
+ fcurves_simplify(context, obj, options, self.fcurves)
+
+ #print("-------END-------")
+ return {'FINISHED'}
+
+###########################
+##### Curves OPERATOR #####
+###########################
+class CURVE_OT_simplify(bpy.types.Operator):
+ ''''''
+ bl_idname = "curve.simplify"
+ bl_label = "simplifiy curves"
+ bl_description = "simplify curves"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ ## Properties
+ opModes = [
+ ('distance', 'distance', 'distance'),
+ ('curvature', 'curvature', 'curvature')]
+ mode = EnumProperty(name="Mode",
+ description="choose algorithm to use",
+ items=opModes)
+ SplineTypes = [
+ ('INPUT', 'Input', 'same type as input spline'),
+ ('NURBS', 'Nurbs', 'NURBS'),
+ ('BEZIER', 'Bezier', 'BEZIER'),
+ ('POLY', 'Poly', 'POLY')]
+ output = EnumProperty(name="Output splines",
+ description="Type of splines to output",
+ items=SplineTypes)
+ k_thresh = FloatProperty(name="k",
+ min=0, soft_min=0,
+ default=0, precision=3,
+ description="threshold")
+ pointsNr = IntProperty(name="n",
+ min=5, soft_min=5,
+ max=9, soft_max=9,
+ default=5,
+ description="degree of curve to get averaged curvatures")
+ error = FloatProperty(name="error in Bu",
+ description="maximum error in Blenderunits to allow - distance",
+ min=0, soft_min=0,
+ default=0.0, precision=3)
+ degreeOut = IntProperty(name="degree",
+ min=3, soft_min=3,
+ max=7, soft_max=7,
+ default=5,
+ description="degree of new curve")
+ dis_error = FloatProperty(name="distance error",
+ description="maximum error in Blenderunits to allow - distance",
+ min=0, soft_min=0,
+ default=0.0)
+ keepShort = BoolProperty(name="keep short Splines",
+ description="keep short splines (less then 7 points)",
+ default=True)
+
+ ''' Remove curvature mode as long as it isnn't significantly improved
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+ col.label('Mode:')
+ col.prop(self, 'mode', expand=True)
+ if self.mode == 'distance':
+ box = layout.box()
+ box.label(self.mode, icon='ARROW_LEFTRIGHT')
+ box.prop(self, 'error', expand=True)
+ if self.mode == 'curvature':
+ box = layout.box()
+ box.label('degree', icon='SMOOTHCURVE')
+ box.prop(self, 'pointsNr', expand=True)
+ box.label('threshold', icon='PARTICLE_PATH')
+ box.prop(self, 'k_thresh', expand=True)
+ box.label('distance', icon='ARROW_LEFTRIGHT')
+ box.prop(self, 'dis_error', expand=True)
+ col = layout.column()
+ col.separator()
+ col.prop(self, 'output', text='Output', icon='OUTLINER_OB_CURVE')
+ if self.output == 'NURBS':
+ col.prop(self, 'degreeOut', expand=True)
+ col.prop(self, 'keepShort', expand=True)
+ '''
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+ col.prop(self, 'error', expand=True)
+ col.prop(self, 'output', text='Output', icon='OUTLINER_OB_CURVE')
+ if self.output == 'NURBS':
+ col.prop(self, 'degreeOut', expand=True)
+ col.prop(self, 'keepShort', expand=True)
+
+
+ ## Check for curve
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ return (obj and obj.type == 'CURVE')
+
+ ## execute
+ def execute(self, context):
+ #print("------START------")
+
+ options = [
+ self.mode, #0
+ self.output, #1
+ self.k_thresh, #2
+ self.pointsNr, #3
+ self.error, #4
+ self.degreeOut, #5
+ self.dis_error, #6
+ self.keepShort] #7
+
+
+ bpy.context.user_preferences.edit.use_global_undo = False
+
+ bpy.ops.object.mode_set(mode='OBJECT', toggle=True)
+ obj = context.active_object
+
+ main(context, obj, options)
+
+ bpy.context.user_preferences.edit.use_global_undo = True
+
+ #print("-------END-------")
+ return {'FINISHED'}
+
+#################################################
+#### REGISTER ###################################
+#################################################
+def register():
+ bpy.utils.register_module(__name__)
+
+ pass
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/development_api_navigator.py b/development_api_navigator.py
new file mode 100644
index 00000000..addc0d29
--- /dev/null
+++ b/development_api_navigator.py
@@ -0,0 +1,715 @@
+# development_api_navigator.py
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+bl_info = {
+ "name": "API Navigator",
+ "author": "Dany Lebel (Axon_D)",
+ "version": (1, 0, 2),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ "location": "Text Editor > Properties > API Navigator Panel",
+ "description": "Allows exploration of the python api via the user interface",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"
+ "Scripts/Text_Editor/API_Navigator",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=24982",
+ "category": "Development"}
+
+"""
+ You can browse through the tree structure of the api. Each child object appears in a list
+that tries to be representative of its type. These lists are :
+
+ * Items (for an iterable object)
+ * Item Values (for an iterable object wich only supports index)
+ * Modules
+ * Types
+ * Properties
+ * Structs and Functions
+ * Methods and Functions
+ * Attributes
+ * Inaccessible (some objects may be listed but inaccessible)
+
+ The lists can be filtered to help searching in the tree. Just enter the text in the
+filter section. It is also possible to explore other modules. Go the the root and select
+it in the list of available modules. It will be imported dynamically.
+
+ In the text section, some informations are displayed. The type of the object,
+what it returns, and its docstring. We could hope that these docstrings will be as
+descriptive as possible. This text data block named api_doc_ can be toggled on and off
+with the Escape key. (but a bug prevent the keymap to register correctly at start)
+
+"""
+
+import bpy, sys
+from console.complete_import import get_root_modules
+
+
+############ Global Variables ############
+
+last_text = None # last text data block
+
+root_module = None # root module of the tree
+
+root_m_path = '' # root_module + path as a string
+
+current_module = None # the object itself in the tree structure
+
+
+tree_level = None # the list of objects from the current_module
+
+def init_tree_level():
+ global tree_level
+ tree_level = [[],[],[],[],[],[],[], [], []]
+
+init_tree_level()
+
+
+api_doc_ = '' # the documentation formated for the API Navigator
+
+module_type = None # the type of current_module
+
+return_report = '' # what current_module returns
+
+filter_mem = {} # remember last filters entered for each path
+
+too_long = False # is tree_level list too long to display in a panel?
+
+
+############ Functions ############
+
+def get_root_module(path):
+ #print('get_root_module')
+ global root_module
+ if '.' in path:
+ root = path[:path.find('.')]
+ else :
+ root = path
+ try :
+ root_module = __import__(root)
+ except :
+ root_module = None
+
+
+def evaluate(module):
+ #print('evaluate')
+ global root_module, tree_level, root_m_path
+
+ path = bpy.context.window_manager.api_nav_props.path
+ try :
+ len_name = root_module.__name__.__len__()
+ root_m_path = 'root_module' + module[len_name:]
+ current_module = eval(root_m_path)
+ return current_module
+ except :
+ init_tree_level
+ return None
+
+
+def get_tree_level():
+ #print('get_tree_level')
+
+ path = bpy.context.window_manager.api_nav_props.path
+
+ def object_list():
+ #print('object_list')
+ global current_module, root_m_path
+
+ itm, val, mod, typ, props, struct, met, att, bug = [], [], [], [], [], [], [], [], []
+ iterable = isiterable(current_module)
+ if iterable:
+ iter(current_module)
+ current_type = str(module_type)
+ if current_type != "<class 'str'>":
+ if iterable == 'a':
+ #if iterable == 'a':
+ #current_type.__iter__()
+ itm = list(current_module.keys())
+ if not itm:
+ val = list(current_module)
+ else :
+ val = list(current_module)
+
+ for i in dir(current_module):
+ try :
+ t = str(type(eval(root_m_path + '.' + i)))
+ except (AttributeError, SyntaxError):
+ bug += [i]
+ continue
+
+
+ if t == "<class 'module'>":
+ mod += [i]
+ elif t[0:16] == "<class 'bpy_prop":
+ props += [i]
+ elif t[8:11] == 'bpy':
+ struct += [i]
+ elif t == "<class 'builtin_function_or_method'>":
+ met += [i]
+ elif t == "<class 'type'>":
+ typ += [i]
+ else :
+ att += [i]
+
+ return [itm, val, mod, typ, props, struct, met, att, bug]
+
+ if not path:
+ return [[], [], [i for i in get_root_modules()], [], [], [], [], [], []]
+ return object_list()
+
+
+def parent(path):
+ """Returns the parent path"""
+ #print('parent')
+
+ parent = path
+ if parent[-1] == ']' and '[' in parent:
+ while parent[-1] != '[':
+ parent = parent[:-1]
+ elif '.' in parent:
+ while parent[-1] != '.':
+ parent = parent[:-1]
+ else :
+ return ''
+ parent = parent[:-1]
+ return parent
+
+
+def update_filter():
+ """Update the filter according to the current path"""
+ global filter_mem
+
+ try :
+ bpy.context.window_manager.api_nav_props.filter = filter_mem[bpy.context.window_manager.api_nav_props.path]
+ except :
+ bpy.context.window_manager.api_nav_props.filter = ''
+
+
+def isiterable(mod):
+
+ try :
+ iter(mod)
+ except :
+ return False
+ try :
+ mod['']
+ return 'a'
+ except KeyError:
+ return 'a'
+ except (AttributeError, TypeError):
+ return 'b'
+
+
+def fill_filter_mem():
+ global filter_mem
+
+ filter = bpy.context.window_manager.api_nav_props.filter
+ if filter:
+ filter_mem[bpy.context.window_manager.api_nav_props.old_path] = bpy.context.window_manager.api_nav_props.filter
+ else :
+ filter_mem.pop(bpy.context.window_manager.api_nav_props.old_path, None)
+
+
+###### API Navigator parent class #######
+
+class ApiNavigator():
+ """Parent class for API Navigator"""
+
+ @staticmethod
+ def generate_global_values():
+ """Populate the level attributes to display the panel buttons and the documentation"""
+ global tree_level, current_module, module_type, return_report, last_text
+
+ text = bpy.context.space_data.text
+ if text:
+ if text.name != 'api_doc_':
+ last_text = bpy.context.space_data.text.name
+ elif bpy.data.texts.__len__() < 2:
+ last_text = None
+ else :
+ last_text = None
+ bpy.context.window_manager.api_nav_props.pages = 0
+ get_root_module(bpy.context.window_manager.api_nav_props.path)
+ current_module = evaluate(bpy.context.window_manager.api_nav_props.path)
+ module_type = str(type(current_module))
+ return_report = str(current_module)
+ tree_level = get_tree_level()
+
+ if tree_level.__len__() > 30:
+ global too_long
+ too_long = True
+ else :
+ too_long = False
+
+ __class__.generate_api_doc()
+ return {'FINISHED'}
+
+ @staticmethod
+ def generate_api_doc():
+ """Format the doc string for API Navigator"""
+ global current_module, api_doc_, return_report, module_type
+
+ path = bpy.context.window_manager.api_nav_props.path
+ line = "-" * (path.__len__()+2)
+ header = """\n\n\n\t\t%s\n\t %s\n\
+_____________________________________________\n\
+\n\
+Type : %s\n\
+\n\
+\n\
+Return : %s\n\
+_____________________________________________\n\
+\n\
+Doc:
+\n\
+""" % (path, line, module_type, return_report)
+ footer = "\n\
+_____________________________________________\n\
+\n\
+\n\
+\n\
+#############################################\n\
+# api_doc_ #\n\
+# Escape to toggle text #\n\
+# (F8 to reload modules if doesn't work) #\n\
+#############################################"
+ doc = current_module.__doc__
+ api_doc_ = header + str(doc) + footer
+ return {'FINISHED'}
+
+ @staticmethod
+ def doc_text_datablock():
+ """Create the text databloc or overwrite it if it already exist"""
+ global api_doc_
+
+ space_data = bpy.context.space_data
+
+ try :
+ doc_text = bpy.data.texts['api_doc_']
+ space_data.text = doc_text
+ doc_text.clear()
+ except :
+ bpy.data.texts.new(name='api_doc_')
+ doc_text = bpy.data.texts['api_doc_']
+ space_data.text = doc_text
+
+ doc_text.write(text=api_doc_)
+ return {'FINISHED'}
+
+
+
+############ Operators ############
+def api_update(context):
+ if bpy.context.window_manager.api_nav_props.path != bpy.context.window_manager.api_nav_props.old_path:
+ fill_filter_mem()
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path
+ update_filter()
+ ApiNavigator.generate_global_values()
+ ApiNavigator.doc_text_datablock()
+
+
+class Update(ApiNavigator, bpy.types.Operator):
+ """Update the tree structure"""
+ bl_idname = "api_navigator.update"
+ bl_label = "API Navigator Update"
+
+ def execute(self, context):
+ api_update()
+ return {'FINISHED'}
+
+
+class BackToBpy(ApiNavigator, bpy.types.Operator):
+ """go back to module bpy"""
+ bl_idname = "api_navigator.back_to_bpy"
+ bl_label = "Back to bpy"
+
+ def execute(self, context):
+ fill_filter_mem()
+ if not bpy.context.window_manager.api_nav_props.path:
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path = 'bpy'
+ else :
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path = 'bpy'
+ update_filter()
+ self.generate_global_values()
+ self.doc_text_datablock()
+ return {'FINISHED'}
+
+
+class Down(ApiNavigator, bpy.types.Operator):
+ """go to this Module"""
+ bl_idname = "api_navigator.down"
+ bl_label = "API Navigator Down"
+ pointed_module = bpy.props.StringProperty(name='Current Module', default='')
+
+
+ def execute(self, context):
+ fill_filter_mem()
+
+ if not bpy.context.window_manager.api_nav_props.path:
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path = bpy.context.window_manager.api_nav_props.path + self.pointed_module
+ else :
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path = bpy.context.window_manager.api_nav_props.path + '.' + self.pointed_module
+
+ update_filter()
+ self.generate_global_values()
+ self.doc_text_datablock()
+ return {'FINISHED'}
+
+
+class Parent(ApiNavigator, bpy.types.Operator):
+ """go to Parent Module"""
+ bl_idname = "api_navigator.parent"
+ bl_label = "API Navigator Parent"
+
+
+ def execute(self, context):
+ path = bpy.context.window_manager.api_nav_props.path
+
+ if path:
+ fill_filter_mem()
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path = parent(bpy.context.window_manager.api_nav_props.path)
+ update_filter()
+ self.generate_global_values()
+ self.doc_text_datablock()
+
+ return {'FINISHED'}
+
+
+class ClearFilter(ApiNavigator, bpy.types.Operator):
+ """Clear the filter"""
+ bl_idname = 'api_navigator.clear_filter'
+ bl_label = 'API Nav clear filter'
+
+ def execute(self, context):
+ bpy.context.window_manager.api_nav_props.filter = ''
+ return {'FINISHED'}
+
+
+class FakeButton(ApiNavigator, bpy.types.Operator):
+ """The list is not displayed completely""" # only serve as an indicator
+ bl_idname = 'api_navigator.fake_button'
+ bl_label = ''
+
+
+class Subscript(ApiNavigator, bpy.types.Operator):
+ """Subscript to this Item"""
+ bl_idname = "api_navigator.subscript"
+ bl_label = "API Navigator Subscript"
+ subscription = bpy.props.StringProperty(name='', default='')
+
+ def execute(self, context):
+ fill_filter_mem()
+ bpy.context.window_manager.api_nav_props.old_path = bpy.context.window_manager.api_nav_props.path = bpy.context.window_manager.api_nav_props.path + '[' + self.subscription + ']'
+ update_filter()
+ self.generate_global_values()
+ self.doc_text_datablock()
+ return {'FINISHED'}
+
+
+class Toggle_doc(ApiNavigator, bpy.types.Operator):
+ """Toggle on or off api_doc_ Text"""
+ bl_idname = 'api_navigator.toggle_doc'
+ bl_label = 'Toggle api_doc_'
+
+
+ def execute(self, context):
+ global last_text
+
+ try :
+ if bpy.context.space_data.text.name != "api_doc_":
+ last_text = bpy.context.space_data.text.name
+ except : pass
+
+ try :
+ text = bpy.data.texts["api_doc_"]
+ bpy.data.texts["api_doc_"].clear()
+ bpy.data.texts.remove(text)
+ except KeyError:
+ self.doc_text_datablock()
+ return {'FINISHED'}
+
+ try :
+ text = bpy.data.texts[last_text]
+ bpy.context.space_data.text = text
+ #line = bpy.ops.text.line_number() # operator doesn't seems to work ???
+ #bpy.ops.text.jump(line=line)
+ return {'FINISHED'}
+ except : pass
+
+ bpy.context.space_data.text = None
+ return {'FINISHED'}
+
+############ UI Panels ############
+
+class OBJECT_PT_api_navigator(ApiNavigator, bpy.types.Panel):
+ bl_idname = 'api_navigator'
+ bl_space_type = "TEXT_EDITOR"
+ bl_region_type = "UI"
+ bl_label = "API Navigator"
+ bl_options = "DEFAULT_CLOSED"
+
+
+ columns = 3
+
+
+ def iterable_draw(self):
+ global tree_level, current_module
+
+ iterable = isiterable(current_module)
+
+ if iterable:
+ iter(current_module)
+ current_type = str(module_type)
+
+ if current_type == "<class 'str'>":
+ return {'FINISHED'}
+
+ col = self.layout
+ filter = bpy.context.window_manager.api_nav_props.filter
+ reduce_to = bpy.context.window_manager.api_nav_props.reduce_to * self.columns
+ pages = bpy.context.window_manager.api_nav_props.pages
+ page_index = reduce_to*pages
+ rank = 0
+ count = 0
+ i = 0
+ filtered = 0
+
+ if iterable == 'a':
+ current_type.__iter__()
+ collection = list(current_module.keys())
+ end = collection.__len__()
+ box = self.layout.box()
+ row = box.row()
+ row.label(text="Items", icon="DOTSDOWN")
+ box = box.box()
+ col = box.column(align=True)
+
+ while count < reduce_to and i < end:
+ mod = collection[i]
+ if filtered < page_index:
+ filtered += 1
+ i += 1
+ continue
+
+ if not (i % self.columns):
+ row = col.row()
+ row.operator('api_navigator.subscript', text=mod, emboss=False).subscription = '"' + mod + '"'
+ filtered += 1
+ i += 1
+ count += 1
+
+ elif iterable == 'b':
+ box = self.layout.box()
+ row = box.row()
+ row.label(text="Item Values", icon="OOPS")
+ box = box.box()
+ col = box.column(align=True)
+ collection = list(current_module)
+ end = collection.__len__()
+
+ while count < reduce_to and i < end:
+ mod = str(collection[i])
+ if filtered < page_index:
+ filtered += 1
+ i += 1
+ continue
+
+ if not (i % self.columns):
+ row = col.row()
+ row.operator('api_navigator.subscript', text=mod, emboss=False).subscription = str(i)
+ filtered += 1
+ i += 1
+ count += 1
+
+ too_long = end > 30
+
+ if too_long:
+ row = col.row()
+ row.prop(bpy.context.window_manager.api_nav_props, 'reduce_to')
+ row.operator('api_navigator.fake_button', text='', emboss=False, icon="DOTSDOWN")
+ row.prop(bpy.context.window_manager.api_nav_props, 'pages', text='Pages')
+
+ return {'FINISHED'}
+
+
+
+
+ def list_draw(self, t, pages, icon, label=None, emboss=False):
+ global tree_level, current_module
+
+ def reduced(too_long):
+
+ if too_long:
+ row = col.row()
+ row.prop(bpy.context.window_manager.api_nav_props, 'reduce_to')
+ row.operator('api_navigator.fake_button', text='', emboss=False, icon="DOTSDOWN")
+ row.prop(bpy.context.window_manager.api_nav_props, 'pages', text='Pages')
+
+ layout = self.layout
+
+ filter = bpy.context.window_manager.api_nav_props.filter
+
+ reduce_to = bpy.context.window_manager.api_nav_props.reduce_to * self.columns
+
+ page_index = reduce_to*pages
+
+
+ len = tree_level[t].__len__()
+ too_long = len > reduce_to
+
+ if len:
+ col = layout.column()
+ box = col.box()
+
+ row = box.row()
+ row.label(text=label, icon=icon)
+
+ if t < 2:
+ box = box.box()
+ row = box.row()
+ col = row.column(align=True)
+ i = 0
+ objects = 0
+ count = 0
+ filtered = 0
+
+ while count < reduce_to and i < len:
+ obj = tree_level[t][i]
+
+ if filter and filter not in obj:
+ i += 1
+ continue
+ elif filtered < page_index:
+ filtered += 1
+ i += 1
+ continue
+
+ if not (objects % self.columns):
+ row = col.row()
+ if t > 1:
+ row.operator("api_navigator.down", text=obj, emboss=emboss).pointed_module = obj
+ elif t == 0:
+ row.operator('api_navigator.subscript', text=str(obj), emboss=False).subscription = '"' + obj + '"'
+ else :
+ row.operator('api_navigator.subscript', text=str(obj), emboss=False).subscription = str(i)
+ filtered += 1
+ i += 1
+ objects += 1
+ count += 1
+
+ reduced(too_long)
+
+ return {'FINISHED'}
+
+
+ def draw(self, context):
+ global tree_level, current_module, module_type, return_report
+
+ api_update(context)
+
+ st = bpy.context.space_data
+
+ ###### layout ######
+ layout = self.layout
+ col = layout.column()
+ layout.label(text='Tree Structure')
+ col = layout.column(align=True)
+ col.prop(bpy.context.window_manager.api_nav_props, 'path', text='')
+ row = col.row()
+ row.operator("api_navigator.parent", text="Parent", icon="BACK")
+ row.operator("api_navigator.back_to_bpy", text='', emboss=True, icon="FILE_PARENT")
+
+ col = layout.column()
+ row = col.row(align=True)
+ row.prop(bpy.context.window_manager.api_nav_props, 'filter')
+ row.operator('api_navigator.clear_filter', text='', icon='PANEL_CLOSE')
+
+ col = layout.column()
+
+ pages = bpy.context.window_manager.api_nav_props.pages
+ self.list_draw(0, pages, "DOTSDOWN", label="Items")
+ self.list_draw(1, pages, "DOTSDOWN", label="Item Values")
+ self.list_draw(2, pages, "PACKAGE", label="Modules", emboss=True)
+ self.list_draw(3, pages, "WORDWRAP_ON", label="Types", emboss=False)
+ self.list_draw(4, pages, "BUTS", label="Properties", emboss=False)
+ self.list_draw(5, pages, "OOPS", label="Structs and Functions")
+ self.list_draw(6, pages, "SCRIPTWIN", label="Methods and Functions")
+ self.list_draw(7, pages, "INFO", label="Attributes")
+ self.list_draw(8, pages, "ERROR", label="Inaccessible")
+
+
+########### Menu functions ###############
+
+
+def register_keymaps():
+ kc = bpy.context.window_manager.keyconfigs['Blender']
+ km = kc.keymaps.get("Text")
+ if km is None:
+ km = kc.keymaps.new(name="Text")
+ kmi = km.keymap_items.new('api_navigator.toggle_doc', 'ESC', 'PRESS')
+
+
+def unregister_keymaps():
+ km = bpy.data.window_managers["WinMan"].keyconfigs.default.keymaps["Text"]
+ kmi = km.keymap_items["api_navigator.toggle_doc"]
+ km.keymap_items.remove(kmi)
+
+
+def register():
+ from bpy.props import StringProperty, IntProperty, PointerProperty
+
+ class ApiNavProps(bpy.types.PropertyGroup):
+ """
+ Fake module like class.
+
+ bpy.context.window_manager.api_nav_props
+
+ """
+ path = StringProperty(name='path',
+ description='Enter bpy.ops.api_navigator to see the documentation',
+ default='bpy')
+ old_path = StringProperty(name='old_path', default='')
+ filter = StringProperty(name='filter',
+ description='Filter the resulting modules', default='')
+ reduce_to = IntProperty(name='Reduce to ',
+ description='Display a maximum number of x entries by pages',
+ default=10, min=1)
+ pages = IntProperty(name='Pages',
+ description='Display a Page', default=0, min=0)
+
+ bpy.utils.register_module(__name__)
+
+ bpy.types.WindowManager.api_nav_props = PointerProperty(
+ type=ApiNavProps, name='API Nav Props', description='')
+
+ register_keymaps()
+ #print(get_tree_level())
+
+
+def unregister():
+ unregister_keymaps()
+ del bpy.types.WindowManager.api_nav_props
+
+ bpy.utils.unregister_module(__name__)
+
+
+if __name__ == '__main__':
+ register()
diff --git a/development_icon_get.py b/development_icon_get.py
new file mode 100644
index 00000000..858e8a1d
--- /dev/null
+++ b/development_icon_get.py
@@ -0,0 +1,243 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+
+bl_info = {
+ 'name': 'Icons',
+ 'author': 'Crouch, N.tox, PKHG, Campbell Barton, Dany Lebel',
+ 'version': (1, 5, 1),
+ "blender": (2, 5, 7),
+ "api": 35850,
+ 'location': 'Text Editor > Properties or '\
+ 'Console > Console Menu',
+ 'warning': '',
+ 'description': 'Click an icon to display its name and copy it '\
+ 'to the clipboard',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/'\
+ 'Py/Scripts/System/Display_All_Icons',
+ 'tracker_url': 'http://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=22011',
+ 'category': 'Development'}
+
+
+import bpy
+
+
+def create_icon_list_all():
+ icons = bpy.types.UILayout.bl_rna.functions['prop'].parameters['icon'].\
+ enum_items.keys()
+
+ icons.remove("NONE")
+
+ return icons
+
+
+def create_icon_list():
+ icons = create_icon_list_all()
+ search = bpy.context.scene.icon_props.search.lower()
+
+ if search == "":
+ pass
+ else:
+ icons = [key for key in icons if search in key.lower()]
+
+ return icons
+
+
+class WM_OT_icon_info(bpy.types.Operator):
+ bl_idname = "wm.icon_info"
+ bl_label = "Icon Info"
+ bl_description = "Click to copy this icon name to the clipboard"
+ icon = bpy.props.StringProperty()
+ icon_scroll = bpy.props.IntProperty()
+
+ def invoke(self, context, event):
+ bpy.data.window_managers['WinMan'].clipboard = self.icon
+ self.report({'INFO'}, "Icon ID: %s" % self.icon)
+ return {'FINISHED'}
+
+
+class OBJECT_PT_icons(bpy.types.Panel):
+ bl_space_type = "TEXT_EDITOR"
+ bl_region_type = "UI"
+ bl_label = "All icons"
+
+ def __init__(self):
+ self.amount = 10
+ self.icon_list = create_icon_list()
+
+ def draw(self, context):
+ props = context.scene.icon_props
+ # polling for updates
+ if props.search != CONSOLE_HT_icons._search_old:
+ self.icon_list = create_icon_list()
+ # adjusting max value of scroller
+# IconProps.scroll = bpy.props.IntProperty(default=1, min=1,
+# max=max(1, len(self.icon_list) - self.amount + 1),
+# description="Drag to scroll icons")
+
+ box = self.layout.box()
+ # scroll view
+ if not props.expand:
+ # expand button
+ toprow = box.row()
+ toprow.prop(props, "expand", icon="TRIA_RIGHT", icon_only=True,
+ emboss=False)
+ # search buttons
+ row = toprow.row(align=True)
+ row.prop(props, "search", icon="VIEWZOOM")
+ # scroll button
+ row = toprow.row()
+ row.active = props.bl_rna.scroll[1]['max'] > 1
+ row.prop(props, "scroll")
+
+ # icons
+ row = box.row(align=True)
+ if len(self.icon_list) == 0:
+ row.label("No icons found")
+ else:
+ for icon in self.icon_list[props.scroll - 1:
+ props.scroll - 1 + self.amount]:
+ row.operator("wm.icon_info", text=" ", icon=icon,
+ emboss=False).icon = icon
+ if len(self.icon_list) < self.amount:
+ for i in range(self.amount - len(self.icon_list) \
+ % self.amount):
+ row.label("")
+
+ # expanded view
+ else:
+ # expand button
+ toprow = box.row()
+ toprow.prop(props, "expand", icon="TRIA_DOWN", icon_only=True,
+ emboss=False)
+ # search buttons
+ row = toprow.row(align=True)
+ row.prop(props, "search", icon="VIEWZOOM")
+ # scroll button
+ row = toprow.row()
+ row.active = False
+ row.prop(props, "scroll")
+
+ # icons
+ col = box.column(align=True)
+ if len(self.icon_list) == 0:
+ col.label("No icons found")
+ else:
+ for i, icon in enumerate(self.icon_list):
+ if i % self.amount == 0:
+ row = col.row(align=True)
+ row.operator("wm.icon_info", text=" ", icon=icon,
+ emboss=False).icon = icon
+ for i in range(self.amount - len(self.icon_list) \
+ % self.amount):
+ row.label("")
+
+
+class CONSOLE_HT_icons(bpy.types.Header):
+ bl_space_type = 'CONSOLE'
+ _search_old = ""
+
+ def __init__(self):
+ self.amount = 10
+ self.icon_list = create_icon_list()
+
+ def draw(self, context):
+ props = context.scene.icon_props
+ # polling for updates
+ if props.search != __class__._search_old:
+ __class__._search_old = props.search
+ self.icon_list = create_icon_list()
+ # adjusting max value of scroller
+# IconProps.scroll = bpy.props.IntProperty(default=1, min=1,
+# max=max(1, len(self.icon_list) - self.amount + 1),
+# description="Drag to scroll icons")
+
+ # scroll view
+ if props.console:
+ layout = self.layout
+ layout.separator()
+ # search buttons
+ row = layout.row()
+ row.prop(props, "search", icon="VIEWZOOM")
+ # scroll button
+ row = layout.row()
+ row.active = props.bl_rna.scroll[1]['max'] > 1
+ row.prop(props, "scroll")
+
+ # icons
+ row = layout.row(align=True)
+ if len(self.icon_list) == 0:
+ row.label("No icons found")
+ else:
+ for icon in self.icon_list[props.scroll - 1:
+ props.scroll - 1 + self.amount]:
+ row.operator("wm.icon_info", text="", icon=icon,
+ emboss=False).icon = icon
+
+
+def menu_func(self, context):
+ self.layout.prop(bpy.context.scene.icon_props, 'console')
+
+
+def register():
+ global IconProps
+
+ icons_total = len(create_icon_list_all())
+ icons_per_row = 10
+
+ class IconProps(bpy.types.PropertyGroup):
+ """
+ Fake module like class
+ bpy.context.scene.icon_props
+ """
+ console = bpy.props.BoolProperty(name='Show System Icons',
+ description='Display the Icons in the console header', default=False)
+ expand = bpy.props.BoolProperty(default=False,
+ description="Expand, to display all icons at once")
+ search = bpy.props.StringProperty(default="",
+ description="Search for icons by name")
+ scroll = bpy.props.IntProperty(default=1, min=1,
+ max=max(1, icons_total - icons_per_row + 1),
+ description="Drag to scroll icons")
+
+ bpy.utils.register_module(__name__)
+ bpy.types.Scene.icon_props = bpy.props.PointerProperty(type=IconProps)
+ bpy.types.CONSOLE_MT_console.append(menu_func)
+
+
+def unregister():
+ if bpy.context.scene.get('icon_props') != None:
+ del bpy.context.scene['icon_props']
+ try:
+ del bpy.types.Scene.icon_props
+ bpy.types.CONSOLE_MT_console.remove(menu_func)
+ except:
+ pass
+ if __name__ == "__main__":
+ # unregistering is only done automatically when run as add-on
+ bpy.utils.unregister_class(OBJECT_PT_icons)
+ bpy.utils.unregister_class(CONSOLE_HT_icons)
+
+ bpy.utils.unregister_module(__name__)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/game_engine_save_as_runtime.py b/game_engine_save_as_runtime.py
new file mode 100644
index 00000000..588b0904
--- /dev/null
+++ b/game_engine_save_as_runtime.py
@@ -0,0 +1,227 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': 'Save As Game Engine Runtime',
+ 'author': 'Mitchell Stokes (Moguri)',
+ 'version': (0, 3, 1),
+ "blender": (2, 5, 8),
+ "api": 37846,
+ 'location': 'File > Export',
+ 'description': 'Bundle a .blend file with the Blenderplayer',
+ 'warning': '',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/'\
+ 'Scripts/Game_Engine/Save_As_Runtime',
+ 'tracker_url': 'https://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=23564',
+ 'category': 'Game Engine'}
+
+import bpy
+import os
+import sys
+import shutil
+
+
+def CopyPythonLibs(dst, overwrite_lib):
+ import sysconfig
+ src = sysconfig.get_paths()['platstdlib']
+ # X.XX/python/lib --> X.XX/python/lib/pythonX.X
+ dst = os.path.join(dst, os.path.basename(src))
+ if os.path.exists(src):
+ write = False
+ if os.path.exists(dst):
+ if overwrite_lib:
+ shutil.rmtree(dst)
+ write = True
+ else:
+ write = True
+ if write:
+ shutil.copytree(src, dst, ignore=lambda dir, contents: [i for i in contents if i == '__pycache__'])
+ else:
+ print("Python not found in %r, skipping pythn copy." % src)
+
+
+def WriteAppleRuntime(player_path, output_path, copy_python, overwrite_lib):
+ # Enforce the extension
+ if not output_path.endswith('.app'):
+ output_path += '.app'
+
+ # Use the system's cp command to preserve some meta-data
+ os.system('cp -R "%s" "%s"' % (player_path, output_path))
+
+ bpy.ops.wm.save_as_mainfile(filepath=output_path+"/Contents/Resources/game.blend", copy=True)
+
+ # Copy bundled Python
+ blender_dir = os.path.dirname(bpy.app.binary_path)
+
+ if copy_python:
+ print("Copying Python files...", end=" ")
+ dst = os.path.join(output_path, "..")
+ CopyPythonLibs(dst, overwrite_lib)
+ print("done")
+
+
+def WriteRuntime(player_path, output_path, copy_python, overwrite_lib, copy_dlls):
+ import struct
+
+ # Check the paths
+ if not os.path.isfile(player_path) and not(os.path.exists(player_path) and player_path.endswith('.app')):
+ print("The player could not be found! Runtime not saved.")
+ return
+
+ # Check if we're bundling a .app
+ if player_path.endswith('.app'):
+ WriteAppleRuntime(player_path, output_path, copy_python, overwrite_lib)
+ return
+
+ # Enforce "exe" extension on Windows
+ if player_path.endswith('.exe') and not output_path.endswith('.exe'):
+ output_path += '.exe'
+
+ # Get the player's binary and the offset for the blend
+ file = open(player_path, 'rb')
+ player_d = file.read()
+ offset = file.tell()
+ file.close()
+
+ # Create a tmp blend file (Blenderplayer doesn't like compressed blends)
+ blend_path = bpy.path.clean_name(output_path)
+ bpy.ops.wm.save_as_mainfile(filepath=blend_path, compress=False, copy=True)
+ blend_path += '.blend'
+
+ # Get the blend data
+ blend_file = open(blend_path, 'rb')
+ blend_d = blend_file.read()
+ blend_file.close()
+
+ # Get rid of the tmp blend, we're done with it
+ os.remove(blend_path)
+
+ # Create a new file for the bundled runtime
+ output = open(output_path, 'wb')
+
+ # Write the player and blend data to the new runtime
+ print("Writing runtime...", end=" ")
+ output.write(player_d)
+ output.write(blend_d)
+
+ # Store the offset (an int is 4 bytes, so we split it up into 4 bytes and save it)
+ output.write(struct.pack('B', (offset>>24)&0xFF))
+ output.write(struct.pack('B', (offset>>16)&0xFF))
+ output.write(struct.pack('B', (offset>>8)&0xFF))
+ output.write(struct.pack('B', (offset>>0)&0xFF))
+
+ # Stuff for the runtime
+ output.write(b'BRUNTIME')
+ output.close()
+
+ print("done")
+
+ # Make the runtime executable on Linux
+ if os.name == 'posix':
+ os.chmod(output_path, 0o755)
+
+ # Copy bundled Python
+ blender_dir = os.path.dirname(bpy.app.binary_path)
+ runtime_dir = os.path.dirname(output_path)
+
+ if copy_python:
+ print("Copying Python files...", end=" ")
+ py_folder = os.path.join(bpy.app.version_string.split()[0], "python", "lib")
+ dst = os.path.join(runtime_dir, py_folder)
+ CopyPythonLibs(dst, overwrite_lib)
+ print("done")
+
+ # And DLLs
+ if copy_dlls:
+ print("Copying DLLs...", end=" ")
+ for file in [i for i in os.listdir(blender_dir) if i.lower().endswith('.dll')]:
+ src = os.path.join(blender_dir, file)
+ dst = os.path.join(runtime_dir, file)
+ shutil.copy2(src, dst)
+
+ print("done")
+
+from bpy.props import *
+
+
+class SaveAsRuntime(bpy.types.Operator):
+ bl_idname = "wm.save_as_runtime"
+ bl_label = "Save As Game Engine Runtime"
+ bl_options = {'REGISTER'}
+
+ if sys.platform == 'darwin':
+ blender_bin_dir = '/'+os.path.join(*bpy.app.binary_path.split('/')[0:-4])
+ ext = '.app'
+ else:
+ blender_bin_path = bpy.app.binary_path
+ blender_bin_dir = os.path.dirname(blender_bin_path)
+ ext = os.path.splitext(blender_bin_path)[-1].lower()
+
+ default_player_path = os.path.join(blender_bin_dir, 'blenderplayer' + ext)
+ player_path = StringProperty(name="Player Path", description="The path to the player to use", default=default_player_path)
+ filepath = StringProperty(name="Output Path", description="Where to save the runtime", default="")
+ copy_python = BoolProperty(name="Copy Python", description="Copy bundle Python with the runtime", default=True)
+ overwrite_lib = BoolProperty(name="Overwrite 'lib' folder", description="Overwrites the lib folder (if one exists) with the bundled Python lib folder", default=False)
+
+ # Only Windows has dlls to copy
+ if ext == '.exe':
+ copy_dlls = BoolProperty(name="Copy DLLs", description="Copy all needed DLLs with the runtime", default=True)
+ else:
+ copy_dlls = False
+
+ def execute(self, context):
+ import time
+ start_time = time.clock()
+ print("Saving runtime to", self.properties.filepath)
+ WriteRuntime(self.properties.player_path,
+ self.properties.filepath,
+ self.properties.copy_python,
+ self.properties.overwrite_lib,
+ self.copy_dlls)
+ print("Finished in %.4fs" % (time.clock()-start_time))
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ if not self.filepath:
+ ext = '.app' if sys.platform == 'darwin' else os.path.splitext(bpy.app.binary_path)[-1]
+ self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ext)
+
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+
+def menu_func(self, context):
+ self.layout.operator(SaveAsRuntime.bl_idname)
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_export.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_export.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_anim_bvh/__init__.py b/io_anim_bvh/__init__.py
new file mode 100644
index 00000000..94d048f6
--- /dev/null
+++ b/io_anim_bvh/__init__.py
@@ -0,0 +1,154 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "BioVision Motion Capture (BVH) format",
+ "author": "Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export BVH from armature objects",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/MotionCapture_BVH",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "import_bvh" in locals():
+ imp.reload(import_bvh)
+ if "export_bvh" in locals():
+ imp.reload(export_bvh)
+
+import bpy
+from bpy.props import StringProperty, FloatProperty, IntProperty, BoolProperty, EnumProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper
+
+
+class ImportBVH(bpy.types.Operator, ImportHelper):
+ '''Load a BVH motion capture file'''
+ bl_idname = "import_anim.bvh"
+ bl_label = "Import BVH"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ filename_ext = ".bvh"
+ filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
+
+ target = EnumProperty(items=(
+ ('ARMATURE', "Armature", ""),
+ ('OBJECT', "Object", ""),
+ ),
+ name="Target",
+ description="Import target type.",
+ default='ARMATURE')
+
+ global_scale = FloatProperty(name="Scale", description="Scale the BVH by this value", min=0.0001, max=1000000.0, soft_min=0.001, soft_max=100.0, default=1.0)
+ frame_start = IntProperty(name="Start Frame", description="Starting frame for the animation", default=1)
+ use_cyclic = BoolProperty(name="Loop", description="Loop the animation playback", default=False)
+ rotate_mode = EnumProperty(items=(
+ ('QUATERNION', "Quaternion", "Convert rotations to quaternions"),
+ ('NATIVE', "Euler (Native)", "Use the rotation order defined in the BVH file"),
+ ('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
+ ('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
+ ('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
+ ('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
+ ('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
+ ('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
+ ),
+ name="Rotation",
+ description="Rotation conversion.",
+ default='NATIVE')
+
+ def execute(self, context):
+ from . import import_bvh
+ return import_bvh.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class ExportBVH(bpy.types.Operator, ExportHelper):
+ '''Save a BVH motion capture file from an armature'''
+ bl_idname = "export_anim.bvh"
+ bl_label = "Export BVH"
+
+ filename_ext = ".bvh"
+ filter_glob = StringProperty(default="*.bvh", options={'HIDDEN'})
+
+ global_scale = FloatProperty(name="Scale", description="Scale the BVH by this value", min=0.0001, max=1000000.0, soft_min=0.001, soft_max=100.0, default=1.0)
+ frame_start = IntProperty(name="Start Frame", description="Starting frame to export", default=0)
+ frame_end = IntProperty(name="End Frame", description="End frame to export", default=0)
+
+ rotate_mode = EnumProperty(items=(
+ ('NATIVE', "Euler (Native)", "Use the rotation order defined in the BVH file"),
+ ('XYZ', "Euler (XYZ)", "Convert rotations to euler XYZ"),
+ ('XZY', "Euler (XZY)", "Convert rotations to euler XZY"),
+ ('YXZ', "Euler (YXZ)", "Convert rotations to euler YXZ"),
+ ('YZX', "Euler (YZX)", "Convert rotations to euler YZX"),
+ ('ZXY', "Euler (ZXY)", "Convert rotations to euler ZXY"),
+ ('ZYX', "Euler (ZYX)", "Convert rotations to euler ZYX"),
+ ),
+ name="Rotation",
+ description="Rotation conversion.",
+ default='NATIVE')
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ return obj and obj.type == 'ARMATURE'
+
+ def invoke(self, context, event):
+ self.frame_start = context.scene.frame_start
+ self.frame_end = context.scene.frame_end
+
+ return super().invoke(context, event)
+
+ def execute(self, context):
+ if self.frame_start == 0 and self.frame_end == 0:
+ self.frame_start = context.scene.frame_start
+ self.frame_end = context.scene.frame_end
+
+ from . import export_bvh
+ return export_bvh.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportBVH.bl_idname, text="Motion Capture (.bvh)")
+
+
+def menu_func_export(self, context):
+ self.layout.operator(ExportBVH.bl_idname, text="Motion Capture (.bvh)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_anim_bvh/export_bvh.py b/io_anim_bvh/export_bvh.py
new file mode 100644
index 00000000..851eb1cc
--- /dev/null
+++ b/io_anim_bvh/export_bvh.py
@@ -0,0 +1,281 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+# fixes from Andrea Rugliancich
+
+import bpy
+
+
+def write_armature(context,
+ filepath,
+ frame_start,
+ frame_end,
+ global_scale=1.0,
+ rotate_mode='NATIVE',
+ ):
+
+ def ensure_rot_order(rot_order_str):
+ if set(rot_order_str) != {'X', 'Y', 'Z'}:
+ rot_order_str = "XYZ"
+ return rot_order_str
+
+ from mathutils import Matrix, Vector, Euler
+ from math import degrees
+
+ file = open(filepath, "w", encoding="utf8", newline="\n")
+
+ obj = context.object
+ arm = obj.data
+
+ # Build a dictionary of children.
+ # None for parentless
+ children = {None: []}
+
+ # initialize with blank lists
+ for bone in arm.bones:
+ children[bone.name] = []
+
+ for bone in arm.bones:
+ children[getattr(bone.parent, "name", None)].append(bone.name)
+
+ # sort the children
+ for children_list in children.values():
+ children_list.sort()
+
+ # bone name list in the order that the bones are written
+ serialized_names = []
+
+ node_locations = {}
+
+ file.write("HIERARCHY\n")
+
+ def write_recursive_nodes(bone_name, indent):
+ my_children = children[bone_name]
+
+ indent_str = "\t" * indent
+
+ bone = arm.bones[bone_name]
+ pose_bone = obj.pose.bones[bone_name]
+ loc = bone.head_local
+ node_locations[bone_name] = loc
+
+ if rotate_mode == "NATIVE":
+ rot_order_str = ensure_rot_order(pose_bone.rotation_mode)
+ else:
+ rot_order_str = rotate_mode
+
+ # make relative if we can
+ if bone.parent:
+ loc = loc - node_locations[bone.parent.name]
+
+ if indent:
+ file.write("%sJOINT %s\n" % (indent_str, bone_name))
+ else:
+ file.write("%sROOT %s\n" % (indent_str, bone_name))
+
+ file.write("%s{\n" % indent_str)
+ file.write("%s\tOFFSET %.6f %.6f %.6f\n" % (indent_str, loc.x * global_scale, loc.y * global_scale, loc.z * global_scale))
+ if bone.use_connect and bone.parent:
+ file.write("%s\tCHANNELS 3 %srotation %srotation %srotation\n" % (indent_str, rot_order_str[0], rot_order_str[1], rot_order_str[2]))
+ else:
+ file.write("%s\tCHANNELS 6 Xposition Yposition Zposition %srotation %srotation %srotation\n" % (indent_str, rot_order_str[0], rot_order_str[1], rot_order_str[2]))
+
+ if my_children:
+ # store the location for the children
+ # to het their relative offset
+
+ # Write children
+ for child_bone in my_children:
+ serialized_names.append(child_bone)
+ write_recursive_nodes(child_bone, indent + 1)
+
+ else:
+ # Write the bone end.
+ file.write("%s\tEnd Site\n" % indent_str)
+ file.write("%s\t{\n" % indent_str)
+ loc = bone.tail_local - node_locations[bone_name]
+ file.write("%s\t\tOFFSET %.6f %.6f %.6f\n" % (indent_str, loc.x * global_scale, loc.y * global_scale, loc.z * global_scale))
+ file.write("%s\t}\n" % indent_str)
+
+ file.write("%s}\n" % indent_str)
+
+ if len(children[None]) == 1:
+ key = children[None][0]
+ serialized_names.append(key)
+ indent = 0
+
+ write_recursive_nodes(key, indent)
+
+ else:
+ # Write a dummy parent node
+ file.write("ROOT %s\n" % key)
+ file.write("{\n")
+ file.write("\tOFFSET 0.0 0.0 0.0\n")
+ file.write("\tCHANNELS 0\n") # Xposition Yposition Zposition Xrotation Yrotation Zrotation
+ key = None
+ indent = 1
+
+ write_recursive_nodes(key, indent)
+
+ file.write("}\n")
+
+ # redefine bones as sorted by serialized_names
+ # so we can write motion
+
+ class decorated_bone(object):
+ __slots__ = (\
+ "name", # bone name, used as key in many places
+ "parent", # decorated bone parent, set in a later loop
+ "rest_bone", # blender armature bone
+ "pose_bone", # blender pose bone
+ "pose_mat", # blender pose matrix
+ "rest_arm_mat", # blender rest matrix (armature space)
+ "rest_local_mat", # blender rest batrix (local space)
+ "pose_imat", # pose_mat inverted
+ "rest_arm_imat", # rest_arm_mat inverted
+ "rest_local_imat", # rest_local_mat inverted
+ "prev_euler", # last used euler to preserve euler compability in between keyframes
+ "connected", # is the bone connected to the parent bone?
+ "rot_order",
+ "rot_order_str",
+ )
+
+ _eul_order_lookup = {
+ 'XYZ': (0, 1, 2),
+ 'XZY': (0, 2, 1),
+ 'YXZ': (1, 0, 2),
+ 'YZX': (1, 2, 0),
+ 'ZXY': (2, 0, 1),
+ 'ZYX': (2, 1, 0)}
+
+ def __init__(self, bone_name):
+ self.name = bone_name
+ self.rest_bone = arm.bones[bone_name]
+ self.pose_bone = obj.pose.bones[bone_name]
+
+ if rotate_mode == "NATIVE":
+ self.rot_order_str = ensure_rot_order(self.pose_bone.rotation_mode)
+ else:
+ self.rot_order_str = rotate_mode
+
+ self.rot_order = __class__._eul_order_lookup[self.rot_order_str]
+
+ self.pose_mat = self.pose_bone.matrix
+
+ mat = self.rest_bone.matrix
+ self.rest_arm_mat = self.rest_bone.matrix_local
+ self.rest_local_mat = self.rest_bone.matrix
+
+ # inverted mats
+ self.pose_imat = self.pose_mat.inverted()
+ self.rest_arm_imat = self.rest_arm_mat.inverted()
+ self.rest_local_imat = self.rest_local_mat.inverted()
+
+ self.parent = None
+ self.prev_euler = Euler((0.0, 0.0, 0.0), self.rot_order_str)
+ self.connected = (self.rest_bone.use_connect and self.rest_bone.parent)
+
+ def update_posedata(self):
+ self.pose_mat = self.pose_bone.matrix
+ self.pose_imat = self.pose_mat.inverted()
+
+ def __repr__(self):
+ if self.parent:
+ return "[\"%s\" child on \"%s\"]\n" % (self.name, self.parent.name)
+ else:
+ return "[\"%s\" root bone]\n" % (self.name)
+
+ bones_decorated = [decorated_bone(bone_name) for bone_name in serialized_names]
+
+ # Assign parents
+ bones_decorated_dict = {}
+ for dbone in bones_decorated:
+ bones_decorated_dict[dbone.name] = dbone
+
+ for dbone in bones_decorated:
+ parent = dbone.rest_bone.parent
+ if parent:
+ dbone.parent = bones_decorated_dict[parent.name]
+ del bones_decorated_dict
+ # finish assigning parents
+
+ scene = bpy.context.scene
+
+ file.write("MOTION\n")
+ file.write("Frames: %d\n" % (frame_end - frame_start + 1))
+ file.write("Frame Time: %.6f\n" % (1.0 / (scene.render.fps / scene.render.fps_base)))
+
+ for frame in range(frame_start, frame_end + 1):
+ scene.frame_set(frame)
+
+ for dbone in bones_decorated:
+ dbone.update_posedata()
+
+ for dbone in bones_decorated:
+ trans = Matrix.Translation(dbone.rest_bone.head_local)
+ itrans = Matrix.Translation(-dbone.rest_bone.head_local)
+
+ if dbone.parent:
+ mat_final = dbone.parent.rest_arm_mat * dbone.parent.pose_imat * dbone.pose_mat * dbone.rest_arm_imat
+ mat_final = itrans * mat_final * trans
+ loc = mat_final.to_translation() + (dbone.rest_bone.head_local - dbone.parent.rest_bone.head_local)
+ else:
+ mat_final = dbone.pose_mat * dbone.rest_arm_imat
+ mat_final = itrans * mat_final * trans
+ loc = mat_final.to_translation() + dbone.rest_bone.head
+
+ # keep eulers compatible, no jumping on interpolation.
+ rot = mat_final.to_3x3().inverted().to_euler(dbone.rot_order_str, dbone.prev_euler)
+
+ if not dbone.connected:
+ file.write("%.6f %.6f %.6f " % (loc * global_scale)[:])
+
+ file.write("%.6f %.6f %.6f " % (-degrees(rot[dbone.rot_order[0]]), -degrees(rot[dbone.rot_order[1]]), -degrees(rot[dbone.rot_order[2]])))
+
+ dbone.prev_euler = rot
+
+ file.write("\n")
+
+ file.close()
+
+ print("BVH Exported: %s frames:%d\n" % (filepath, frame_end - frame_start + 1))
+
+
+def save(operator, context, filepath="",
+ frame_start=-1,
+ frame_end=-1,
+ global_scale=1.0,
+ rotate_mode="NATIVE",
+ ):
+
+ write_armature(context, filepath,
+ frame_start=frame_start,
+ frame_end=frame_end,
+ global_scale=global_scale,
+ rotate_mode=rotate_mode,
+ )
+
+ return {'FINISHED'}
+
+
+if __name__ == "__main__":
+ scene = bpy.context.scene
+ _read(bpy.data.filepath.rstrip(".blend") + ".bvh", bpy.context.object, scene.frame_start, scene.frame_end, 1.0)
diff --git a/io_anim_bvh/import_bvh.py b/io_anim_bvh/import_bvh.py
new file mode 100644
index 00000000..4f1d19ff
--- /dev/null
+++ b/io_anim_bvh/import_bvh.py
@@ -0,0 +1,552 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+
+import math
+from math import radians
+
+import bpy
+import mathutils
+from mathutils import Vector, Euler, Matrix
+
+
+class bvh_node_class(object):
+ __slots__ = (
+ 'name', # bvh joint name
+ 'parent', # bvh_node_class type or None for no parent
+ 'children', # a list of children of this type.
+ 'rest_head_world', # worldspace rest location for the head of this node
+ 'rest_head_local', # localspace rest location for the head of this node
+ 'rest_tail_world', # worldspace rest location for the tail of this node
+ 'rest_tail_local', # worldspace rest location for the tail of this node
+ 'channels', # list of 6 ints, -1 for an unused channel, otherwise an index for the BVH motion data lines, lock triple then rot triple
+ 'rot_order', # a triple of indices as to the order rotation is applied. [0,1,2] is x/y/z - [None, None, None] if no rotation.
+ 'rot_order_str', # same as above but a string 'XYZ' format.
+ 'anim_data', # a list one tuple's one for each frame. (locx, locy, locz, rotx, roty, rotz), euler rotation ALWAYS stored xyz order, even when native used.
+ 'has_loc', # Conveinience function, bool, same as (channels[0]!=-1 or channels[1]!=-1 channels[2]!=-1)
+ 'has_rot', # Conveinience function, bool, same as (channels[3]!=-1 or channels[4]!=-1 channels[5]!=-1)
+ 'temp') # use this for whatever you want
+
+ _eul_order_lookup = {
+ (0, 1, 2): 'XYZ',
+ (0, 2, 1): 'XZY',
+ (1, 0, 2): 'YXZ',
+ (1, 2, 0): 'YZX',
+ (2, 0, 1): 'ZXY',
+ (2, 1, 0): 'ZYX'}
+
+ def __init__(self, name, rest_head_world, rest_head_local, parent, channels, rot_order):
+ self.name = name
+ self.rest_head_world = rest_head_world
+ self.rest_head_local = rest_head_local
+ self.rest_tail_world = None
+ self.rest_tail_local = None
+ self.parent = parent
+ self.channels = channels
+ self.rot_order = tuple(rot_order)
+ self.rot_order_str = __class__._eul_order_lookup[self.rot_order]
+
+ # convenience functions
+ self.has_loc = channels[0] != -1 or channels[1] != -1 or channels[2] != -1
+ self.has_rot = channels[3] != -1 or channels[4] != -1 or channels[5] != -1
+
+ self.children = []
+
+ # list of 6 length tuples: (lx,ly,lz, rx,ry,rz)
+ # even if the channels arnt used they will just be zero
+ #
+ self.anim_data = [(0, 0, 0, 0, 0, 0)]
+
+ def __repr__(self):
+ return 'BVH name:"%s", rest_loc:(%.3f,%.3f,%.3f), rest_tail:(%.3f,%.3f,%.3f)' %\
+ (self.name,\
+ self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z,\
+ self.rest_head_world.x, self.rest_head_world.y, self.rest_head_world.z)
+
+
+def read_bvh(context, file_path, rotate_mode='XYZ', global_scale=1.0):
+ # File loading stuff
+ # Open the file for importing
+ file = open(file_path, 'rU')
+
+ # Seperate into a list of lists, each line a list of words.
+ file_lines = file.readlines()
+ # Non standard carrage returns?
+ if len(file_lines) == 1:
+ file_lines = file_lines[0].split('\r')
+
+ # Split by whitespace.
+ file_lines = [ll for ll in [l.split() for l in file_lines] if ll]
+
+ # Create Hirachy as empties
+ if file_lines[0][0].lower() == 'hierarchy':
+ #print 'Importing the BVH Hierarchy for:', file_path
+ pass
+ else:
+ raise 'ERROR: This is not a BVH file'
+
+ bvh_nodes = {None: None}
+ bvh_nodes_serial = [None]
+
+ channelIndex = -1
+
+ lineIdx = 0 # An index for the file.
+ while lineIdx < len(file_lines) - 1:
+ #...
+ if file_lines[lineIdx][0].lower() == 'root' or file_lines[lineIdx][0].lower() == 'joint':
+
+ # Join spaces into 1 word with underscores joining it.
+ if len(file_lines[lineIdx]) > 2:
+ file_lines[lineIdx][1] = '_'.join(file_lines[lineIdx][1:])
+ file_lines[lineIdx] = file_lines[lineIdx][:2]
+
+ # MAY NEED TO SUPPORT MULTIPLE ROOT's HERE!!!, Still unsure weather multiple roots are possible.??
+
+ # Make sure the names are unique- Object names will match joint names exactly and both will be unique.
+ name = file_lines[lineIdx][1]
+
+ #print '%snode: %s, parent: %s' % (len(bvh_nodes_serial) * ' ', name, bvh_nodes_serial[-1])
+
+ lineIdx += 2 # Increment to the next line (Offset)
+ rest_head_local = Vector((float(file_lines[lineIdx][1]), float(file_lines[lineIdx][2]), float(file_lines[lineIdx][3]))) * global_scale
+ lineIdx += 1 # Increment to the next line (Channels)
+
+ # newChannel[Xposition, Yposition, Zposition, Xrotation, Yrotation, Zrotation]
+ # newChannel references indecies to the motiondata,
+ # if not assigned then -1 refers to the last value that will be added on loading at a value of zero, this is appended
+ # We'll add a zero value onto the end of the MotionDATA so this is always refers to a value.
+ my_channel = [-1, -1, -1, -1, -1, -1]
+ my_rot_order = [None, None, None]
+ rot_count = 0
+ for channel in file_lines[lineIdx][2:]:
+ channel = channel.lower()
+ channelIndex += 1 # So the index points to the right channel
+ if channel == 'xposition':
+ my_channel[0] = channelIndex
+ elif channel == 'yposition':
+ my_channel[1] = channelIndex
+ elif channel == 'zposition':
+ my_channel[2] = channelIndex
+
+ elif channel == 'xrotation':
+ my_channel[3] = channelIndex
+ my_rot_order[rot_count] = 0
+ rot_count += 1
+ elif channel == 'yrotation':
+ my_channel[4] = channelIndex
+ my_rot_order[rot_count] = 1
+ rot_count += 1
+ elif channel == 'zrotation':
+ my_channel[5] = channelIndex
+ my_rot_order[rot_count] = 2
+ rot_count += 1
+
+ channels = file_lines[lineIdx][2:]
+
+ my_parent = bvh_nodes_serial[-1] # account for none
+
+ # Apply the parents offset accumulatively
+ if my_parent is None:
+ rest_head_world = Vector(rest_head_local)
+ else:
+ rest_head_world = my_parent.rest_head_world + rest_head_local
+
+ bvh_node = bvh_nodes[name] = bvh_node_class(name, rest_head_world, rest_head_local, my_parent, my_channel, my_rot_order)
+
+ # If we have another child then we can call ourselves a parent, else
+ bvh_nodes_serial.append(bvh_node)
+
+ # Account for an end node
+ if file_lines[lineIdx][0].lower() == 'end' and file_lines[lineIdx][1].lower() == 'site': # There is sometimes a name after 'End Site' but we will ignore it.
+ lineIdx += 2 # Increment to the next line (Offset)
+ rest_tail = Vector((float(file_lines[lineIdx][1]), float(file_lines[lineIdx][2]), float(file_lines[lineIdx][3]))) * global_scale
+
+ bvh_nodes_serial[-1].rest_tail_world = bvh_nodes_serial[-1].rest_head_world + rest_tail
+ bvh_nodes_serial[-1].rest_tail_local = bvh_nodes_serial[-1].rest_head_local + rest_tail
+
+ # Just so we can remove the Parents in a uniform way- End has kids
+ # so this is a placeholder
+ bvh_nodes_serial.append(None)
+
+ if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0] == '}': # == ['}']
+ bvh_nodes_serial.pop() # Remove the last item
+
+ if len(file_lines[lineIdx]) == 1 and file_lines[lineIdx][0].lower() == 'motion':
+ #print '\nImporting motion data'
+ lineIdx += 3 # Set the cursor to the first frame
+ break
+
+ lineIdx += 1
+
+ # Remove the None value used for easy parent reference
+ del bvh_nodes[None]
+ # Dont use anymore
+ del bvh_nodes_serial
+
+ bvh_nodes_list = bvh_nodes.values()
+
+ while lineIdx < len(file_lines):
+ line = file_lines[lineIdx]
+ for bvh_node in bvh_nodes_list:
+ #for bvh_node in bvh_nodes_serial:
+ lx = ly = lz = rx = ry = rz = 0.0
+ channels = bvh_node.channels
+ anim_data = bvh_node.anim_data
+ if channels[0] != -1:
+ lx = global_scale * float(line[channels[0]])
+
+ if channels[1] != -1:
+ ly = global_scale * float(line[channels[1]])
+
+ if channels[2] != -1:
+ lz = global_scale * float(line[channels[2]])
+
+ if channels[3] != -1 or channels[4] != -1 or channels[5] != -1:
+
+ rx = radians(float(line[channels[3]]))
+ ry = radians(float(line[channels[4]]))
+ rz = radians(float(line[channels[5]]))
+
+ # Done importing motion data #
+ anim_data.append((lx, ly, lz, rx, ry, rz))
+ lineIdx += 1
+
+ # Assign children
+ for bvh_node in bvh_nodes.values():
+ bvh_node_parent = bvh_node.parent
+ if bvh_node_parent:
+ bvh_node_parent.children.append(bvh_node)
+
+ # Now set the tip of each bvh_node
+ for bvh_node in bvh_nodes.values():
+
+ if not bvh_node.rest_tail_world:
+ if len(bvh_node.children) == 0:
+ # could just fail here, but rare BVH files have childless nodes
+ bvh_node.rest_tail_world = Vector(bvh_node.rest_head_world)
+ bvh_node.rest_tail_local = Vector(bvh_node.rest_head_local)
+ elif len(bvh_node.children) == 1:
+ bvh_node.rest_tail_world = Vector(bvh_node.children[0].rest_head_world)
+ bvh_node.rest_tail_local = bvh_node.rest_head_local + bvh_node.children[0].rest_head_local
+ else:
+ # allow this, see above
+ #if not bvh_node.children:
+ # raise 'error, bvh node has no end and no children. bad file'
+
+ # Removed temp for now
+ rest_tail_world = Vector((0.0, 0.0, 0.0))
+ rest_tail_local = Vector((0.0, 0.0, 0.0))
+ for bvh_node_child in bvh_node.children:
+ rest_tail_world += bvh_node_child.rest_head_world
+ rest_tail_local += bvh_node_child.rest_head_local
+
+ bvh_node.rest_tail_world = rest_tail_world * (1.0 / len(bvh_node.children))
+ bvh_node.rest_tail_local = rest_tail_local * (1.0 / len(bvh_node.children))
+
+ # Make sure tail isnt the same location as the head.
+ if (bvh_node.rest_tail_local - bvh_node.rest_head_local).length <= 0.001 * global_scale:
+ print("\tzero length node found:", bvh_node.name)
+ bvh_node.rest_tail_local.y = bvh_node.rest_tail_local.y + global_scale / 10
+ bvh_node.rest_tail_world.y = bvh_node.rest_tail_world.y + global_scale / 10
+
+ return bvh_nodes
+
+
+def bvh_node_dict2objects(context, bvh_name, bvh_nodes, rotate_mode='NATIVE', frame_start=1, IMPORT_LOOP=False):
+
+ if frame_start < 1:
+ frame_start = 1
+
+ scene = context.scene
+ for obj in scene.objects:
+ obj.select = False
+
+ objects = []
+
+ def add_ob(name):
+ obj = bpy.data.objects.new(name, None)
+ scene.objects.link(obj)
+ objects.append(obj)
+ obj.select = True
+
+ # nicer drawing.
+ obj.empty_draw_type = 'CUBE'
+ obj.empty_draw_size = 0.1
+
+ return obj
+
+ # Add objects
+ for name, bvh_node in bvh_nodes.items():
+ bvh_node.temp = add_ob(name)
+ bvh_node.temp.rotation_mode = bvh_node.rot_order_str[::-1]
+
+ # Parent the objects
+ for bvh_node in bvh_nodes.values():
+ for bvh_node_child in bvh_node.children:
+ bvh_node_child.temp.parent = bvh_node.temp
+
+ # Offset
+ for bvh_node in bvh_nodes.values():
+ # Make relative to parents offset
+ bvh_node.temp.location = bvh_node.rest_head_local
+
+ # Add tail objects
+ for name, bvh_node in bvh_nodes.items():
+ if not bvh_node.children:
+ ob_end = add_ob(name + '_end')
+ ob_end.parent = bvh_node.temp
+ ob_end.location = bvh_node.rest_tail_world - bvh_node.rest_head_world
+
+ for name, bvh_node in bvh_nodes.items():
+ obj = bvh_node.temp
+
+ for frame_current in range(len(bvh_node.anim_data)):
+
+ lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current]
+
+ if bvh_node.has_loc:
+ obj.delta_location = Vector((lx, ly, lz)) - bvh_node.rest_head_world
+ obj.keyframe_insert("delta_location", index=-1, frame=frame_start + frame_current)
+
+ if bvh_node.has_rot:
+ obj.delta_rotation_euler = rx, ry, rz
+ obj.keyframe_insert("delta_rotation_euler", index=-1, frame=frame_start + frame_current)
+
+ return objects
+
+
+def bvh_node_dict2armature(context, bvh_name, bvh_nodes, rotate_mode='XYZ', frame_start=1, IMPORT_LOOP=False):
+
+ if frame_start < 1:
+ frame_start = 1
+
+ # Add the new armature,
+ scene = context.scene
+ for obj in scene.objects:
+ obj.select = False
+
+ arm_data = bpy.data.armatures.new(bvh_name)
+ arm_ob = bpy.data.objects.new(bvh_name, arm_data)
+
+ scene.objects.link(arm_ob)
+
+ arm_ob.select = True
+ scene.objects.active = arm_ob
+
+ bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
+ bpy.ops.object.mode_set(mode='EDIT', toggle=False)
+
+ # Get the average bone length for zero length bones, we may not use this.
+ average_bone_length = 0.0
+ nonzero_count = 0
+ for bvh_node in bvh_nodes.values():
+ l = (bvh_node.rest_head_local - bvh_node.rest_tail_local).length
+ if l:
+ average_bone_length += l
+ nonzero_count += 1
+
+ # Very rare cases all bones couldbe zero length???
+ if not average_bone_length:
+ average_bone_length = 0.1
+ else:
+ # Normal operation
+ average_bone_length = average_bone_length / nonzero_count
+
+ # XXX, annoying, remove bone.
+ while arm_data.edit_bones:
+ arm_ob.edit_bones.remove(arm_data.edit_bones[-1])
+
+ ZERO_AREA_BONES = []
+ for name, bvh_node in bvh_nodes.items():
+ # New editbone
+ bone = bvh_node.temp = arm_data.edit_bones.new(name)
+
+ bone.head = bvh_node.rest_head_world
+ bone.tail = bvh_node.rest_tail_world
+
+ # ZERO AREA BONES.
+ if (bone.head - bone.tail).length < 0.001:
+ print("\tzero length bone found:", bone.name)
+ if bvh_node.parent:
+ ofs = bvh_node.parent.rest_head_local - bvh_node.parent.rest_tail_local
+ if ofs.length: # is our parent zero length also?? unlikely
+ bone.tail = bone.tail - ofs
+ else:
+ bone.tail.y = bone.tail.y + average_bone_length
+ else:
+ bone.tail.y = bone.tail.y + average_bone_length
+
+ ZERO_AREA_BONES.append(bone.name)
+
+ for bvh_node in bvh_nodes.values():
+ if bvh_node.parent:
+ # bvh_node.temp is the Editbone
+
+ # Set the bone parent
+ bvh_node.temp.parent = bvh_node.parent.temp
+
+ # Set the connection state
+ if not bvh_node.has_loc and\
+ bvh_node.parent and\
+ bvh_node.parent.temp.name not in ZERO_AREA_BONES and\
+ bvh_node.parent.rest_tail_local == bvh_node.rest_head_local:
+ bvh_node.temp.use_connect = True
+
+ # Replace the editbone with the editbone name,
+ # to avoid memory errors accessing the editbone outside editmode
+ for bvh_node in bvh_nodes.values():
+ bvh_node.temp = bvh_node.temp.name
+
+ # Now Apply the animation to the armature
+
+ # Get armature animation data
+ bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
+
+ pose = arm_ob.pose
+ pose_bones = pose.bones
+
+ if rotate_mode == 'NATIVE':
+ for bvh_node in bvh_nodes.values():
+ bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened.
+ pose_bone = pose_bones[bone_name]
+ pose_bone.rotation_mode = bvh_node.rot_order_str
+
+ elif rotate_mode != 'QUATERNION':
+ for pose_bone in pose_bones:
+ pose_bone.rotation_mode = rotate_mode
+ else:
+ # Quats default
+ pass
+
+ context.scene.update()
+
+ arm_ob.animation_data_create()
+ action = bpy.data.actions.new(name=bvh_name)
+ arm_ob.animation_data.action = action
+
+ # Replace the bvh_node.temp (currently an editbone)
+ # With a tuple (pose_bone, armature_bone, bone_rest_matrix, bone_rest_matrix_inv)
+ for bvh_node in bvh_nodes.values():
+ bone_name = bvh_node.temp # may not be the same name as the bvh_node, could have been shortened.
+ pose_bone = pose_bones[bone_name]
+ rest_bone = arm_data.bones[bone_name]
+ bone_rest_matrix = rest_bone.matrix_local.to_3x3()
+
+ bone_rest_matrix_inv = Matrix(bone_rest_matrix)
+ bone_rest_matrix_inv.invert()
+
+ bone_rest_matrix_inv.resize_4x4()
+ bone_rest_matrix.resize_4x4()
+ bvh_node.temp = (pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv)
+
+ # Make a dict for fast access without rebuilding a list all the time.
+
+ # KEYFRAME METHOD, SLOW, USE IPOS DIRECT
+ # TODO: use f-point samples instead (Aligorith)
+ if rotate_mode != 'QUATERNION':
+ prev_euler = [Euler() for i in range(len(bvh_nodes))]
+
+ # Animate the data, the last used bvh_node will do since they all have the same number of frames
+ for frame_current in range(len(bvh_node.anim_data) - 1): # skip the first frame (rest frame)
+ # print frame_current
+
+ # if frame_current==40: # debugging
+ # break
+
+ scene.frame_set(frame_start + frame_current)
+
+ # Dont neet to set the current frame
+ for i, bvh_node in enumerate(bvh_nodes.values()):
+ pose_bone, bone, bone_rest_matrix, bone_rest_matrix_inv = bvh_node.temp
+ lx, ly, lz, rx, ry, rz = bvh_node.anim_data[frame_current + 1]
+
+ if bvh_node.has_rot:
+ # apply rotation order and convert to XYZ
+ # note that the rot_order_str is reversed.
+ bone_rotation_matrix = Euler((rx, ry, rz), bvh_node.rot_order_str[::-1]).to_matrix().to_4x4()
+ bone_rotation_matrix = bone_rest_matrix_inv * bone_rotation_matrix * bone_rest_matrix
+
+ if rotate_mode == 'QUATERNION':
+ pose_bone.rotation_quaternion = bone_rotation_matrix.to_quaternion()
+ else:
+ euler = bone_rotation_matrix.to_euler(pose_bone.rotation_mode, prev_euler[i])
+ pose_bone.rotation_euler = euler
+ prev_euler[i] = euler
+
+ if bvh_node.has_loc:
+ pose_bone.location = (bone_rest_matrix_inv * Matrix.Translation(Vector((lx, ly, lz)) - bvh_node.rest_head_local)).to_translation()
+
+ if bvh_node.has_loc:
+ pose_bone.keyframe_insert("location")
+ if bvh_node.has_rot:
+ if rotate_mode == 'QUATERNION':
+ pose_bone.keyframe_insert("rotation_quaternion")
+ else:
+ pose_bone.keyframe_insert("rotation_euler")
+
+ for cu in action.fcurves:
+ if IMPORT_LOOP:
+ pass # 2.5 doenst have cyclic now?
+
+ for bez in cu.keyframe_points:
+ bez.interpolation = 'LINEAR'
+
+ return arm_ob
+
+
+def load(operator, context, filepath="", target='ARMATURE', rotate_mode='NATIVE', global_scale=1.0, use_cyclic=False, frame_start=1):
+ import time
+ t1 = time.time()
+ print('\tparsing bvh %r...' % filepath, end="")
+
+ bvh_nodes = read_bvh(context, filepath,
+ rotate_mode=rotate_mode,
+ global_scale=global_scale)
+
+ print('%.4f' % (time.time() - t1))
+
+ frame_orig = context.scene.frame_current
+
+ t1 = time.time()
+ print('\timporting to blender...', end="")
+
+ bvh_name = bpy.path.display_name_from_filepath(filepath)
+
+ if target == 'ARMATURE':
+ bvh_node_dict2armature(context, bvh_name, bvh_nodes,
+ rotate_mode=rotate_mode,
+ frame_start=frame_start,
+ IMPORT_LOOP=use_cyclic)
+
+ elif target == 'OBJECT':
+ bvh_node_dict2objects(context, bvh_name, bvh_nodes,
+ rotate_mode=rotate_mode,
+ frame_start=frame_start,
+ IMPORT_LOOP=use_cyclic)
+
+ else:
+ raise Exception("invalid type")
+
+ print('Done in %.4f\n' % (time.time() - t1))
+
+ context.scene.frame_set(frame_orig)
+
+ return {'FINISHED'}
diff --git a/io_anim_camera.py b/io_anim_camera.py
new file mode 100644
index 00000000..2026254d
--- /dev/null
+++ b/io_anim_camera.py
@@ -0,0 +1,170 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Export Camera Animation",
+ "author": "Campbell Barton",
+ "version": (0, 1),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ "location": "File > Export > Cameras & Markers (.py)",
+ "description": "Export Cameras & Markers (.py)",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Camera_Animation",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22835",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+
+import bpy
+
+
+def writeCameras(context, filepath, frame_start, frame_end, only_selected=False):
+
+ data_attrs = ['lens', 'shift_x', 'shift_y', 'dof_distance', 'clip_start', 'clip_end', 'draw_size']
+ obj_attrs = ['hide_render']
+
+ fw = open(filepath, 'w').write
+
+ scene = bpy.context.scene
+
+ cameras = []
+
+ for obj in scene.objects:
+ if only_selected and not obj.select:
+ continue
+ if obj.type != 'CAMERA':
+ continue
+
+ cameras.append((obj, obj.data))
+
+ frame_range = range(frame_start, frame_end + 1)
+
+ fw("cameras = {}\n")
+ fw("scene = bpy.context.scene\n")
+ fw("frame = scene.frame_current - 1\n")
+ fw("\n")
+
+ for obj, obj_data in cameras:
+ fw("data = bpy.data.cameras.new('%s')\n" % obj.name)
+ for attr in data_attrs:
+ fw("data.%s = %s\n" % (attr, repr(getattr(obj_data, attr))))
+
+ fw("obj = bpy.data.objects.new('%s', data)\n" % obj.name)
+
+ for attr in obj_attrs:
+ fw("obj.%s = %s\n" % (attr, repr(getattr(obj, attr))))
+
+ fw("scene.objects.link(obj)\n")
+ fw("cameras['%s'] = obj\n" % obj.name)
+ fw("\n")
+
+ for f in frame_range:
+ scene.frame_set(f)
+ fw("# new frame\n")
+ fw("scene.frame_set(%d + frame)\n" % f)
+
+ for obj, obj_data in cameras:
+ fw("obj = cameras['%s']\n" % obj.name)
+
+ matrix = obj.matrix_world.copy()
+ fw("obj.location = %r\n" % matrix.to_translation()[:])
+ fw("obj.scale = %r\n" % matrix.to_scale()[:])
+ fw("obj.rotation_euler = %r\n" % matrix.to_euler()[:])
+
+ fw("obj.keyframe_insert('location')\n")
+ fw("obj.keyframe_insert('scale')\n")
+ fw("obj.keyframe_insert('rotation_euler')\n")
+
+ # only key the angle
+ fw("data = obj.data\n")
+ fw("data.lens = %s\n" % obj_data.lens)
+ fw("data.keyframe_insert('lens')\n")
+
+ fw("\n")
+
+ # now markers
+ fw("# markers\n")
+ for marker in scene.timeline_markers:
+ fw("marker = scene.timeline_markers.new('%s')\n" % marker.name)
+ fw("marker.frame = %d + frame\n" % marker.frame)
+
+ # will fail if the cameras not selected
+ if marker.camera:
+ fw("marker.camera = cameras.get('%s')\n" % marker.camera.name)
+ fw("\n")
+
+
+from bpy.props import StringProperty, IntProperty, BoolProperty
+from bpy_extras.io_utils import ExportHelper
+
+
+class CameraExporter(bpy.types.Operator, ExportHelper):
+ '''Save a python script which re-creartes cameras and markers elsewhere'''
+ bl_idname = "export_animation.cameras"
+ bl_label = "Export Camera & Markers"
+
+ filename_ext = ".py"
+ filter_glob = StringProperty(default="*.py", options={'HIDDEN'})
+
+ frame_start = IntProperty(name="Start Frame",
+ description="Start frame for export",
+ default=1, min=1, max=300000)
+ frame_end = IntProperty(name="End Frame",
+ description="End frame for export",
+ default=250, min=1, max=300000)
+ only_selected = BoolProperty(name="Only Selected",
+ default=True)
+
+ def execute(self, context):
+ writeCameras(context, self.filepath, self.frame_start, self.frame_end, self.only_selected)
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ self.frame_start = context.scene.frame_start
+ self.frame_end = context.scene.frame_end
+
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+
+def menu_export(self, context):
+ import os
+ default_path = os.path.splitext(bpy.data.filepath)[0] + ".py"
+ self.layout.operator(CameraExporter.bl_idname, text="Cameras & Markers (.py)").filepath = default_path
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_export.append(menu_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_export.remove(menu_export)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_coat3D/__init__.py b/io_coat3D/__init__.py
new file mode 100644
index 00000000..9a787a02
--- /dev/null
+++ b/io_coat3D/__init__.py
@@ -0,0 +1,244 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "3D-Coat Applink",
+ "author": "Kalle-Samuli Riihikoski (haikalle)",
+ "version": (3, 5, 20),
+ "blender": (2, 5, 8),
+ "api": 35622,
+ "location": "Scene > 3D-Coat Applink",
+ "description": "Transfer data between 3D-Coat/Blender",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/" \
+ "Scripts/Import-Export/3dcoat_applink",
+ "tracker_url": "https://projects.blender.org/tracker/?"\
+ "func=detail&aid=24446",
+ "category": "Import-Export"}
+
+
+
+if "bpy" in locals():
+ import imp
+ imp.reload(coat)
+ imp.reload(tex)
+else:
+ from . import coat
+ from . import tex
+
+import bpy
+from bpy.props import *
+
+
+def register():
+ bpy.coat3D = dict()
+ bpy.coat3D['active_coat'] = ''
+ bpy.coat3D['status'] = 0
+ bpy.coat3D['kuva'] = 1
+
+ class ObjectCoat3D(bpy.types.PropertyGroup):
+ objpath = StringProperty(name="Object_Path")
+ coatpath = StringProperty(name="Coat_Path")
+ objectdir = StringProperty(name="ObjectPath", subtype="FILE_PATH")
+ texturefolder = StringProperty(name="Texture folder:", subtype="DIR_PATH")
+ path3b = StringProperty(name="3B Path", subtype="FILE_PATH")
+ export_on = BoolProperty(name="Export_On", description="Add Modifiers and export.",default= False)
+
+
+ class SceneCoat3D(bpy.types.PropertyGroup):
+
+ defaultfolder = StringProperty(
+ name="FilePath",
+ subtype="DIR_PATH",
+ )
+ exchangedir = StringProperty(
+ name="FilePath",
+ subtype="DIR_PATH"
+ )
+
+
+
+ wasactive = StringProperty(
+ name="Pass active object",
+ )
+ import_box = BoolProperty(
+ name="Import window",
+ description="Allows to skip import dialog.",
+ default= True
+ )
+ export_box = BoolProperty(
+ name="Export window",
+ description="Allows to skip export dialog.",
+ default= True
+ )
+ export_color = BoolProperty(
+ name="Export color",
+ description="Export color texture.",
+ default= True
+ )
+ export_spec = BoolProperty(
+ name="Export specular",
+ description="Export specular texture.",
+ default= True
+ )
+ export_normal = BoolProperty(
+ name="Export Normal",
+ description="Export normal texture.",
+ default= True
+ )
+ export_disp = BoolProperty(
+ name="Export Displacement",
+ description="Export displacement texture.",
+ default= True
+ )
+ export_position = BoolProperty(
+ name="Export Source Position",
+ description="Export source position.",
+ default= True
+ )
+ export_zero_layer = BoolProperty(
+ name="Export from Layer 0",
+ description="Export mesh from Layer 0",
+ default= True
+ )
+ export_coarse = BoolProperty(
+ name="Export Coarse",
+ description="Export Coarse.",
+ default= True
+ )
+
+ smooth_on = BoolProperty(
+ name="Auto Smooth",
+ description="Add Modifiers and export.",
+ default= True
+ )
+ exportfile = BoolProperty(
+ name="No Import File",
+ description="Add Modifiers and export.",
+ default= False
+ )
+ importmod = BoolProperty(
+ name="Remove Modifiers",
+ description="Import and add modifiers.",
+ default= True
+ )
+ exportmod = BoolProperty(
+ name="Modifiers",
+ description="Export modifiers.",
+ default= False
+ )
+ export_pos = BoolProperty(
+ name="Remember Position",
+ description="Remember position.",
+ default= True
+ )
+ importtextures = BoolProperty(
+ name="Bring Textures",
+ description="Import Textures.",
+ default= True
+ )
+ exportover = BoolProperty(
+ name="Export Obj",
+ description="Import Textures.",
+ default= False
+ )
+ importmesh = BoolProperty(
+ name="Mesh",
+ description="Import Mesh.",
+ default= True
+ )
+
+ # copy location
+ cursor = FloatVectorProperty(
+ name="Cursor",
+ description="Location.",
+ subtype="XYZ",
+ default=(0.0, 0.0, 0.0)
+ )
+ loca = FloatVectorProperty(
+ name="location",
+ description="Location.",
+ subtype="XYZ",
+ default=(0.0, 0.0, 0.0)
+ )
+ rota = FloatVectorProperty(
+ name="location",
+ description="Location.",
+ subtype="EULER",
+ default=(0.0, 0.0, 0.0)
+ )
+ scal = FloatVectorProperty(
+ name="location",
+ description="Location.",
+ subtype="XYZ",
+ default=(0.0, 0.0, 0.0)
+ )
+ dime = FloatVectorProperty(
+ name="dimension",
+ description="Dimension.",
+ subtype="XYZ",
+ default=(0.0, 0.0, 0.0)
+ )
+
+ type = EnumProperty( name= "Export Type",
+ description= "Diffrent Export Types.",
+ items=(("ppp", "Per-Pixel Painting", ""),
+ ("mv", "Microvertex Painting", ""),
+ ("ptex", "Ptex Painting", ""),
+ ("uv", "UV-Mapping", ""),
+ ("ref", "Reference Mesh", ""),
+ ("retopo", "Retopo mesh as new layer", ""),
+ ("vox", "Mesh As Voxel Object", ""),
+ ("alpha", "Mesh As New Pen Alpha", ""),
+ ("prim", "Mesh As Voxel Primitive", ""),
+ ("curv", "Mesh As a Curve Profile", ""),
+ ("autopo", "Mesh For Auto-retopology", ""),
+ ),
+ default= "ppp"
+ )
+
+ bpy.utils.register_module(__name__)
+
+ bpy.types.Object.coat3D= PointerProperty(
+ name= "Applink Variables",
+ type= ObjectCoat3D,
+ description= "Applink variables"
+ )
+
+ bpy.types.Scene.coat3D= PointerProperty(
+ name= "Applink Variables",
+ type= SceneCoat3D,
+ description= "Applink variables"
+ )
+
+
+def unregister():
+ import bpy
+
+ del bpy.types.Object.coat3D
+ del bpy.types.Scene.coat3D
+ del bpy.coat3D
+
+ bpy.utils.unregister_module(__name__)
+
+
+if __name__ == "__main__":
+ register()
+
+
+
diff --git a/io_coat3D/coat.py b/io_coat3D/coat.py
new file mode 100644
index 00000000..954502f3
--- /dev/null
+++ b/io_coat3D/coat.py
@@ -0,0 +1,650 @@
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+import bpy
+from bpy.props import *
+from io_coat3D import tex
+import os
+import linecache
+import math
+
+
+bpy.coat3D = dict()
+bpy.coat3D['active_coat'] = ''
+bpy.coat3D['status'] = 0
+
+
+class ObjectButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "object"
+
+class SCENE_PT_Main(ObjectButtonsPanel,bpy.types.Panel):
+ bl_label = "3D-Coat Applink"
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+ bl_context = "scene"
+
+ def draw(self, context):
+ layout = self.layout
+ scene = context.scene
+ me = context.scene.objects
+ mat_list = []
+ import_no = 0
+ coat = bpy.coat3D
+ coat3D = bpy.context.scene.coat3D
+ Blender_export = ""
+ if(bpy.context.scene.objects.active):
+ coa = bpy.context.scene.objects.active.coat3D
+
+ if(os.path.isdir(coat3D.exchangedir)):
+ foldder = coat3D.exchangedir
+ if(foldder.rfind('Exchange') >= 0):
+ coat['status'] = 1
+ else:
+ coat['status'] = 0
+ else:
+ coat['status'] = 0
+
+ if(coat['status'] == 1):
+ Blender_folder = ("%s%sBlender"%(coat3D.exchangedir,os.sep))
+ Blender_export = Blender_folder
+ path3b_now = coat3D.exchangedir
+ path3b_now += ('last_saved_3b_file.txt')
+ Blender_export += ('%sexport.txt'%(os.sep))
+
+ if(not(os.path.isdir(Blender_folder))):
+ os.makedirs(Blender_folder)
+ Blender_folder = os.path.join(Blender_folder,"run.txt")
+ file = open(Blender_folder, "w")
+ file.close()
+
+ #Here you add your GUI
+ row = layout.row()
+ row.prop(coat3D,"type",text = "")
+ row = layout.row()
+ if(context.selected_objects and bpy.context.mode == 'OBJECT'):
+ if(context.selected_objects[0].type == 'MESH'):
+ row.active = True
+ else:
+ row.active = False
+ else:
+ row.active = False
+
+ if(not(bpy.context.selected_objects) and os.path.isfile(Blender_export)):
+ row.active = True
+ row.operator("import3b_applink.pilgway_3d_coat", text="Bring from 3D-Coat")
+
+ else:
+ colL = row.column()
+ colR = row.column()
+
+ colL.operator("export_applink.pilgway_3d_coat", text="Export")
+ colL.label(text="Export Settings:")
+
+ colL.prop(coat3D,"exportover")
+ if(coat3D.exportover):
+ colL.prop(coat3D,"exportmod")
+ colL.prop(coat3D,"exportfile")
+ colL.prop(coat3D,"export_pos")
+
+ colR.operator("import_applink.pilgway_3d_coat", text="Import")
+ colR.label(text="Import Settings:")
+ colR.prop(coat3D,"importmesh")
+ colR.prop(coat3D,"importmod")
+ colR.prop(coat3D,"smooth_on")
+ colR.prop(coat3D,"importtextures")
+ row = layout.row()
+
+ if(bpy.context.selected_objects):
+ if(context.selected_objects[0].type == 'MESH'):
+ coa = context.selected_objects[0].coat3D
+ colL = row.column()
+ colR = row.column()
+ colL.label(text="Object Path:")
+ if(coa.path3b):
+ colR.active = True
+ else:
+ colR.active = False
+
+ colR.operator("import_applink.pilgway_3d_coat_3b", text="Load 3b")
+ row = layout.row()
+ row.prop(coa,"objectdir",text="")
+
+ row = layout.row()
+
+ if(context.selected_objects):
+ if(context.selected_objects[0].type == 'MESH'):
+ coa = bpy.context.selected_objects[0].coat3D
+ row = layout.row()
+ row.label(text="Texture output folder:")
+ row = layout.row()
+ row.prop(coa,"texturefolder",text="")
+ row = layout.row()
+ if(coat['status'] == 0):
+ row.label(text="Exchange Folder: not connected")
+ else:
+ row.label(text="Exchange Folder: connected")
+
+class SCENE_PT_Settings(ObjectButtonsPanel,bpy.types.Panel):
+ bl_label = "Applink Settings"
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+ bl_context = "scene"
+
+ def draw(self, context):
+ layout = self.layout
+ scene = context.scene
+ coat3D = bpy.context.scene.coat3D
+
+ row = layout.row()
+ if(bpy.context.selected_objects):
+ if(context.selected_objects[0].type == 'MESH'):
+ row.active = True
+ else:
+ row.active = False
+ row.operator("import_applink.pilgway_3d_deltex",text="Delete Textures")
+ row = layout.row()
+ row.label(text="Exchange Folder:")
+ row = layout.row()
+ row.prop(coat3D,"exchangedir",text="")
+ if(bpy.context.scene.objects.active):
+ coa = bpy.context.scene.objects.active.coat3D
+ row = layout.row()
+ row.label(text="3b path:")
+ row = layout.row()
+ row.prop(coa,"path3b",text="")
+ row = layout.row()
+ row.label(text="Default Folder:")
+ row = layout.row()
+ row.prop(coat3D,"defaultfolder",text="")
+
+ #colL = row.column()
+ #colR = row.column()
+ #colL.prop(coat3D,"export_box")
+ #colR.prop(coat3D,"import_box")
+ #if(not(coat3D.export_box)):
+ # row = layout.row()
+ # colL.label(text="Export settings:")
+ # row = layout.row()
+ # colL = row.column()
+ # colR = row.column()
+ # colL.prop(coat3D,"export_color")
+ # colL.prop(coat3D,"export_spec")
+ # colL.prop(coat3D,"export_normal")
+ # colL.prop(coat3D,"export_disp")
+ # colR.prop(coat3D,"export_position")
+ # colR.prop(coat3D,"export_export_zero_layer")
+ # colR.prop(coat3D,"export_coarse")
+ #row = layout.row()
+ #colL = row.column()
+ #colR = row.column()
+
+class SCENE_OT_export(bpy.types.Operator):
+ bl_idname = "export_applink.pilgway_3d_coat"
+ bl_label = "Export your custom property"
+ bl_description = "Export your custom property"
+
+
+ def invoke(self, context, event):
+ checkname = ''
+ coat3D = bpy.context.scene.coat3D
+ scene = context.scene
+ coat3D.export_on = False
+ activeobj = bpy.context.active_object.name
+ obj = scene.objects[activeobj]
+ coa = bpy.context.scene.objects.active.coat3D
+
+ if(coa.objectdir == '' and (coat3D.defaultfolder)):
+ coa.objectdir = coat3D.defaultfolder
+ else:
+
+ importfile = coat3D.exchangedir
+ texturefile = coat3D.exchangedir
+ importfile += ('%simport.txt'%(os.sep))
+ texturefile += ('%stextures.txt'%(os.sep))
+ if(os.path.isfile(texturefile)):
+ os.remove(texturefile)
+
+ checkname = coa.objectdir
+
+ if(coa.objectdir[-4:] != '.obj'):
+ checkname += ('%s.obj'%(activeobj))
+
+ if(not(os.path.isfile(checkname)) or coat3D.exportover):
+ if(coat3D.export_pos):
+ bpy.ops.object.transform_apply(location=True,rotation=True,scale=True)
+
+ bpy.ops.export_scene.obj(filepath=checkname,use_selection=True,
+ use_apply_modifiers=coat3D.exportmod,use_blen_objects=False, group_by_material= True,
+ use_materials = False,keep_vertex_order = True,axis_forward='X',axis_up='Y')
+ bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
+
+ coa.export_on = True
+ else:
+ bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
+ coat3D.loca = obj.location
+ coat3D.rota = obj.rotation_euler
+ coat3D.scal = obj.scale
+ obj.location = (0,0,0)
+ obj.rotation_euler = (0,0,0)
+ obj.scale = (1,1,1)
+
+ bpy.ops.export_scene.obj(filepath=checkname,use_selection=True,
+ use_apply_modifiers=coat3D.exportmod,use_blen_objects=False, group_by_material= True,
+ use_materials = False,keep_vertex_order = True,axis_forward='X',axis_up='Y')
+
+ obj.location = coat3D.loca
+ obj.rotation_euler = coat3D.rota
+ obj.scale = coat3D.scal
+ coa.export_on = False
+
+ if(coat3D.exportfile == False):
+ file = open(importfile, "w")
+ file.write("%s"%(checkname))
+ file.write("\n%s"%(checkname))
+ file.write("\n[%s]"%(coat3D.type))
+ if(coa.texturefolder):
+ file.write("\n[TexOutput:%s"%(coa.texturefolder))
+
+ file.close()
+ coa.objectdir = checkname
+
+ return('FINISHED')
+
+
+class SCENE_OT_import(bpy.types.Operator):
+ bl_idname = "import_applink.pilgway_3d_coat"
+ bl_label = "import your custom property"
+ bl_description = "import your custom property"
+
+ def invoke(self, context, event):
+ scene = context.scene
+ coat3D = bpy.context.scene.coat3D
+ coat = bpy.coat3D
+ test = bpy.context.selected_objects
+ act_first = bpy.context.scene.objects.active
+ for act_name in test:
+ if act_name.type == 'MESH' and os.path.isfile(act_name.coat3D.objectdir):
+ activeobj = act_name.name
+ mat_list = []
+ scene.objects[activeobj].select = True
+ objekti = scene.objects[activeobj]
+ coat3D.loca = objekti.location
+ coat3D.rota = objekti.rotation_euler
+ coa = act_name.coat3D
+
+ exportfile = coat3D.exchangedir
+ path3b_n = coat3D.exchangedir
+ path3b_n += ('last_saved_3b_file.txt')
+ exportfile += ('%sexport.txt'%(os.sep))
+ if(os.path.isfile(exportfile)):
+ export_file = open(exportfile)
+ for line in export_file:
+ if line.rfind('.3b'):
+ objekti.coat3D.coatpath = line
+ coat['active_coat'] = line
+ export_file.close()
+ os.remove(exportfile)
+
+ if(objekti.material_slots):
+ for obj_mat in objekti.material_slots:
+ mat_list.append(obj_mat.material)
+ act_mat_index = objekti.active_material_index
+
+
+ if(coat3D.importmesh and os.path.isfile(coa.objectdir)):
+ mtl = coa.objectdir
+ mtl = mtl.replace('.obj','.mtl')
+ if(os.path.isfile(mtl)):
+ os.remove(mtl)
+
+
+ bpy.ops.import_scene.obj(filepath=act_name.coat3D.objectdir,axis_forward='X',axis_up='Y')
+ obj_proxy = scene.objects[0]
+ bpy.ops.object.select_all(action='TOGGLE')
+ obj_proxy.select = True
+ if(coa.export_on):
+ bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
+
+ bpy.ops.object.transform_apply(rotation=True)
+ proxy_mat = obj_proxy.material_slots[0].material
+ obj_proxy.data.materials.pop(0)
+ proxy_mat.user_clear()
+ bpy.data.materials.remove(proxy_mat)
+ bpy.ops.object.select_all(action='TOGGLE')
+
+ scene.objects.active = obj_proxy
+
+ obj_data = objekti.data.id_data
+ objekti.data = obj_proxy.data.id_data
+ if(bpy.data.meshes[obj_data.name].users == 0):
+ bpy.data.meshes.remove(obj_data)
+ objekti.data.id_data.name = obj_data.name
+
+ obj_proxy.select = True
+ bpy.ops.object.delete()
+ objekti.select = True
+ bpy.context.scene.objects.active = objekti
+
+ if(coat3D.smooth_on):
+ bpy.ops.object.shade_smooth()
+ else:
+ bpy.ops.object.shade_flat()
+
+ if(os.path.isfile(path3b_n)):
+ path3b_fil = open(path3b_n)
+ for lin in path3b_fil:
+ objekti.coat3D.path3b = lin
+ path3b_fil.close()
+ os.remove(path3b_n)
+
+ if(coat3D.importmesh and not(os.path.isfile(coa.objectdir))):
+ coat3D.importmesh = False
+
+ if(mat_list and coat3D.importmesh):
+ for mat_one in mat_list:
+ objekti.data.materials.append(mat_one)
+ objekti.active_material_index = act_mat_index
+
+ if(mat_list):
+ for obj_mate in objekti.material_slots:
+ for tex_slot in obj_mate.material.texture_slots:
+ if(hasattr(tex_slot,'texture')):
+ if(tex_slot.texture.type == 'IMAGE'):
+ if tex_slot.texture.image is not None:
+ tex_slot.texture.image.reload()
+
+
+ if(coat3D.importmod):
+ mod_list = []
+ for mod_index in objekti.modifiers:
+ objekti.modifiers.remove(mod_index)
+
+
+
+ if(coat3D.importtextures):
+ export = ''
+ tex.gettex(mat_list,objekti,scene,export)
+
+ for act_name in test:
+ act_name.select = True
+ bpy.context.scene.objects.active = act_first
+
+ return('FINISHED')
+
+class SCENE_OT_import3b(bpy.types.Operator):
+ bl_idname = "import3b_applink.pilgway_3d_coat"
+ bl_label = "Brings mesh from 3D-Coat"
+ bl_description = "Bring 3D-Coat Mesh"
+
+
+ def invoke(self, context, event):
+
+ coat3D = bpy.context.scene.coat3D
+ scene = context.scene
+
+ Blender_folder = ("%s%sBlender"%(coat3D.exchangedir,os.sep))
+ Blender_export = Blender_folder
+ path3b_now = coat3D.exchangedir
+ path3b_now += ('last_saved_3b_file.txt')
+ Blender_export += ('%sexport.txt'%(os.sep))
+
+ import_no = 0
+ mat_list = []
+ obj_path =''
+
+ obj_pathh = open(Blender_export)
+ for line in obj_pathh:
+ obj_path = line
+ break
+ obj_pathh.close()
+ export = obj_path
+ mod_time = os.path.getmtime(obj_path)
+ mtl_list = obj_path.replace('.obj','.mtl')
+ if(os.path.isfile(mtl_list)):
+ os.remove(mtl_list)
+
+ if(os.path.isfile(path3b_now)):
+ path3b_file = open(path3b_now)
+ for lin in path3b_file:
+ path_export = lin
+ path_on = 1
+ path3b_file.close()
+ os.remove(path3b_now)
+ else:
+ path_on = 0
+
+ for palikka in bpy.context.scene.objects:
+ if(palikka.type == 'MESH'):
+ if(palikka.coat3D.objectdir == export):
+ import_no = 1
+ target = palikka
+ break
+
+ if(import_no):
+ new_obj = palikka
+ import_no = 0
+ else:
+ bpy.ops.import_scene.obj(filepath=obj_path,axis_forward='X',axis_up='Y')
+ new_obj = scene.objects[0]
+ scene.objects[0].coat3D.objectdir = export
+ if(path_on):
+ scene.objects[0].coat3D.path3b = path_export
+
+ os.remove(Blender_export)
+
+ bpy.context.scene.objects.active = new_obj
+
+ if(coat3D.smooth_on):
+ bpy.ops.object.shade_smooth()
+ else:
+ bpy.ops.object.shade_flat()
+
+ Blender_tex = ("%s%stextures.txt"%(coat3D.exchangedir,os.sep))
+ mat_list.append(new_obj.material_slots[0].material)
+ tex.gettex(mat_list, new_obj, scene,export)
+
+
+ return('FINISHED')
+
+class SCENE_OT_load3b(bpy.types.Operator):
+ bl_idname = "import_applink.pilgway_3d_coat_3b"
+ bl_label = "Loads 3b linked into object"
+ bl_description = "Loads 3b linked into object"
+
+
+ def invoke(self, context, event):
+ checkname = ''
+ coa = bpy.context.scene.objects.active.coat3D
+ if(coa.path3b):
+ coat3D = bpy.context.scene.coat3D
+ scene = context.scene
+ importfile = coat3D.exchangedir
+ importfile += ('%simport.txt'%(os.sep))
+
+ coat_path = bpy.context.active_object.coat3D.path3b
+
+ file = open(importfile, "w")
+ file.write("%s"%(coat_path))
+ file.write("\n%s"%(coat_path))
+ file.write("\n[3B]")
+ file.close()
+
+
+ return('FINISHED')
+
+class SCENE_OT_deltex(bpy.types.Operator):
+ bl_idname = "import_applink.pilgway_3d_deltex" # XXX, name?
+ bl_label = "Picks Object's name into path"
+ bl_description = "Loads 3b linked into object"
+
+
+ def invoke(self, context, event):
+ if(bpy.context.selected_objects):
+ if(context.selected_objects[0].type == 'MESH'):
+ coat3D = bpy.context.scene.coat3D
+ coa = bpy.context.scene.objects.active.coat3D
+ scene = context.scene
+ nimi = tex.objname(coa.objectdir)
+ if(coa.texturefolder):
+ osoite = os.path.dirname(coa.texturefolder) + os.sep
+ else:
+ osoite = os.path.dirname(coa.objectdir) + os.sep
+ just_nimi = tex.justname(nimi)
+ just_nimi += '_'
+
+ files = os.listdir(osoite)
+ for i in files:
+ if(i.rfind(just_nimi) >= 0):
+ del_osoite = osoite + i
+ os.remove(del_osoite)
+
+ return('FINISHED')
+
+
+from bpy import *
+from mathutils import Vector, Matrix
+
+
+# 3D-Coat Dynamic Menu
+class VIEW3D_MT_Coat_Dynamic_Menu(bpy.types.Menu):
+ bl_label = "3D-Coat Applink Menu"
+
+ def draw(self, context):
+ layout = self.layout
+ settings = context.tool_settings
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ coat3D = bpy.context.scene.coat3D
+ Blender_folder = ("%s%sBlender"%(coat3D.exchangedir,os.sep))
+ Blender_export = Blender_folder
+ Blender_export += ('%sexport.txt'%(os.sep))
+
+ ob = context
+ if ob.mode == 'OBJECT':
+ if(bpy.context.selected_objects):
+ for ind_obj in bpy.context.selected_objects:
+ if(ind_obj.type == 'MESH'):
+ layout.active = True
+ break
+ layout.active = False
+
+ if(layout.active == True):
+
+ layout.operator("import_applink.pilgway_3d_coat", text="Import")
+ layout.separator()
+
+ layout.operator("export_applink.pilgway_3d_coat", text="Export")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_ImportMenu")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_ExportMenu")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_ExtraMenu")
+ layout.separator()
+
+ if(len(bpy.context.selected_objects) == 1):
+ if(os.path.isfile(bpy.context.selected_objects[0].coat3D.path3b)):
+ layout.operator("import_applink.pilgway_3d_coat_3b", text="Load 3b")
+ layout.separator()
+
+ if(os.path.isfile(Blender_export)):
+
+ layout.operator("import3b_applink.pilgway_3d_coat", text="Bring from 3D-Coat")
+ layout.separator()
+ else:
+ if(os.path.isfile(Blender_export)):
+ layout.active = True
+
+ layout.operator("import3b_applink.pilgway_3d_coat", text="Bring from 3D-Coat")
+ layout.separator()
+ else:
+ if(os.path.isfile(Blender_export)):
+
+
+ layout.operator("import3b_applink.pilgway_3d_coat", text="Bring from 3D-Coat")
+ layout.separator()
+
+
+
+
+class VIEW3D_MT_ImportMenu(bpy.types.Menu):
+ bl_label = "Import Settings"
+
+ def draw(self, context):
+ layout = self.layout
+ coat3D = bpy.context.scene.coat3D
+ settings = context.tool_settings
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.prop(coat3D,"importmesh")
+ layout.prop(coat3D,"importmod")
+ layout.prop(coat3D,"smooth_on")
+ layout.prop(coat3D,"importtextures")
+
+class VIEW3D_MT_ExportMenu(bpy.types.Menu):
+ bl_label = "Export Settings"
+
+ def draw(self, context):
+ layout = self.layout
+ coat3D = bpy.context.scene.coat3D
+ settings = context.tool_settings
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.prop(coat3D,"exportover")
+ if(coat3D.exportover):
+ layout.prop(coat3D,"exportmod")
+ layout.prop(coat3D,"exportfile")
+ layout.prop(coat3D,"export_pos")
+
+class VIEW3D_MT_ExtraMenu(bpy.types.Menu):
+ bl_label = "Extra"
+
+ def draw(self, context):
+ layout = self.layout
+ coat3D = bpy.context.scene.coat3D
+ settings = context.tool_settings
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.operator("import_applink.pilgway_3d_deltex",text="Delete all Textures")
+ layout.separator()
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ km = bpy.context.window_manager.keyconfigs.default.keymaps['3D View']
+ kmi = km.keymap_items.new('wm.call_menu2', 'Q', 'PRESS')
+ kmi.properties.name = "VIEW3D_MT_Coat_Dynamic_Menu"
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ km = bpy.context.window_manager.keyconfigs.default.keymaps['3D View']
+ for kmi in km.keymap_items:
+ if kmi.idname == '':
+ if kmi.properties.name == "VIEW3D_MT_Coat_Dynamic_Menu":
+ km.keymap_items.remove(kmi)
+ break
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_coat3D/tex.py b/io_coat3D/tex.py
new file mode 100644
index 00000000..d4156e5d
--- /dev/null
+++ b/io_coat3D/tex.py
@@ -0,0 +1,409 @@
+import bpy
+import os
+import filecmp
+
+def objname(path):
+
+ path2 = os.path.dirname(path) + os.sep
+ pituus = len(path2)
+ nimi = path[pituus:]
+ return nimi
+
+def justname(name):
+ monesko = name.rfind('.')
+ justname = name[:monesko]
+ return justname
+
+def setgallery():
+ newname =''
+ tex_name =[]
+ index_tex = 0
+ for tt in bpy.data.textures:
+ tex_name.append(tt.name)
+ return tex_name
+
+def find_index(objekti):
+ luku = 0
+ for tex in objekti.active_material.texture_slots:
+ if(not(hasattr(tex,'texture'))):
+ break
+ luku = luku +1
+ return luku
+
+def gettex(mat_list, objekti, scene,export):
+
+ coat3D = bpy.context.scene.coat3D
+
+
+ if(bpy.context.scene.render.engine == 'VRAY_RENDER' or bpy.context.scene.render.engine == 'VRAY_RENDER_PREVIEW'):
+ vray = True
+ else:
+ vray = False
+
+ take_color = 0;
+ take_spec = 0;
+ take_normal = 0;
+ take_disp = 0;
+
+ bring_color = 1;
+ bring_spec = 1;
+ bring_normal = 1;
+ bring_disp = 1;
+
+ texcoat = {}
+ texcoat['color'] = []
+ texcoat['specular'] = []
+ texcoat['nmap'] = []
+ texcoat['disp'] = []
+ texu = []
+
+ if(export):
+ objekti.coat3D.objpath = export
+ nimi = objname(export)
+ osoite = os.path.dirname(export) + os.sep
+ for mate in objekti.material_slots:
+ for tex_slot in mate.material.texture_slots:
+ if(hasattr(tex_slot,'texture')):
+ if(tex_slot.texture.type == 'IMAGE'):
+ if tex_slot.texture.image is not None:
+ tex_slot.texture.image.reload()
+ else:
+ coa = objekti.coat3D
+ nimi = objname(coa.objectdir)
+ if(coa.texturefolder):
+ osoite = os.path.dirname(coa.texturefolder) + os.sep
+ else:
+ osoite = os.path.dirname(coa.objectdir) + os.sep
+ just_nimi = justname(nimi)
+ just_nimi += '_'
+ just_nimi_len = len(just_nimi)
+
+ if(len(objekti.material_slots) != 0):
+ for obj_tex in objekti.active_material.texture_slots:
+ if(hasattr(obj_tex,'texture')):
+ if(obj_tex.texture):
+ if(obj_tex.use_map_color_diffuse):
+ bring_color = 0;
+ if(obj_tex.use_map_specular):
+ bring_spec = 0;
+ if(obj_tex.use_map_normal):
+ bring_normal = 0;
+ if(obj_tex.use_map_displacement):
+ bring_disp = 0;
+
+ files = os.listdir(osoite)
+ for i in files:
+ tui = i[:just_nimi_len]
+ if(tui == just_nimi):
+ texu.append(i)
+
+ for yy in texu:
+ minimi = (yy.rfind('_'))+1
+ maksimi = (yy.rfind('.'))
+ tex_name = yy[minimi:maksimi]
+ koko = ''
+ koko += osoite
+ koko += yy
+ texcoat[tex_name].append(koko)
+ #date = os.path.getmtime(texcoat[tex_name][0])
+
+ if((texcoat['color'] or texcoat['nmap'] or texcoat['disp'] or texcoat['specular']) and (len(objekti.material_slots)) == 0):
+ index = 0
+ tuli = False
+ lasku = False
+ while(lasku == False):
+ tuli = False
+ new_mat = ("Material.%03d"%(index))
+ for i in bpy.data.materials:
+ if(i.name == new_mat):
+ tuli = True
+ break
+ if(tuli):
+ index += 1
+ else:
+ lasku = True
+ bpy.data.materials.new(new_mat)
+ ki = bpy.data.materials[new_mat]
+ objekti.data.materials.append(ki)
+
+ if(bring_color == 1 and texcoat['color']):
+ name_tex ='Color_'
+ num = []
+
+ index = find_index(objekti)
+
+
+ tex = bpy.ops.Texture
+ objekti.active_material.texture_slots.create(index)
+ total_mat = len(objekti.active_material.texture_slots.items())
+ useold = ''
+
+ for seekco in bpy.data.textures:
+ if((seekco.name[:6] == 'Color_') and (seekco.users_material == ())):
+ useold = seekco
+
+
+ if(useold == ''):
+
+ indexx = 0
+ tuli = False
+ lasku = False
+ while(lasku == False):
+ tuli = False
+ name_tex = ("Color_%s"%(indexx))
+ for i in bpy.data.textures:
+ if(i.name == name_tex):
+ tuli = True
+ break
+ if(tuli):
+ indexx += 1
+ else:
+ lasku = True
+
+
+ bpy.ops.image.new(name=name_tex)
+ bpy.data.images[name_tex].filepath = texcoat['color'][0]
+ bpy.data.images[name_tex].source = 'FILE'
+
+
+
+ bpy.data.textures.new(name_tex,type='IMAGE')
+ objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
+ objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
+
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+
+ objekti.active_material.texture_slots[index].texture.image.reload()
+
+
+ elif(useold != ''):
+
+ objekti.active_material.texture_slots[index].texture = useold
+ objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['color'][0]
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+
+ if(bring_normal == 1 and texcoat['nmap']):
+ name_tex ='Normal_'
+ num = []
+
+ index = find_index(objekti)
+ tex = bpy.ops.Texture
+ objekti.active_material.texture_slots.create(index)
+ total_mat = len(objekti.active_material.texture_slots.items())
+ useold = ''
+
+ for seekco in bpy.data.textures:
+ if((seekco.name[:7] == 'Normal_') and (seekco.users_material == ())):
+ useold = seekco
+
+ if(useold == ''):
+
+ indexx = 0
+ tuli = False
+ lasku = False
+ while(lasku == False):
+ tuli = False
+ name_tex = ("Normal_%s"%(indexx))
+ for i in bpy.data.textures:
+ if(i.name == name_tex):
+ tuli = True
+ break
+ if(tuli):
+ indexx += 1
+ else:
+ lasku = True
+
+ bpy.ops.image.new(name=name_tex)
+ bpy.data.images[name_tex].filepath = texcoat['nmap'][0]
+ bpy.data.images[name_tex].source = 'FILE'
+
+
+ bpy.data.textures.new(name_tex,type='IMAGE')
+ objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
+ objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
+
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+ objekti.active_material.texture_slots[index].use_map_color_diffuse = False
+ objekti.active_material.texture_slots[index].use_map_normal = True
+
+ objekti.active_material.texture_slots[index].texture.image.reload()
+ if(vray):
+ bpy.data.textures[name_tex].vray_slot.BRDFBump.map_type = 'TANGENT'
+
+ else:
+ bpy.data.textures[name_tex].use_normal_map = True
+ objekti.active_material.texture_slots[index].normal_map_space = 'TANGENT'
+ objekti.active_material.texture_slots[index].normal_factor = 1
+
+
+
+ elif(useold != ''):
+
+ objekti.active_material.texture_slots[index].texture = useold
+ objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['nmap'][0]
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+ objekti.active_material.texture_slots[index].use_map_color_diffuse = False
+ objekti.active_material.texture_slots[index].use_map_normal = True
+ objekti.active_material.texture_slots[index].normal_factor = 1
+
+
+ if(bring_spec == 1 and texcoat['specular']):
+ name_tex ='Specular_'
+ num = []
+
+ index = find_index(objekti)
+
+
+ tex = bpy.ops.Texture
+ objekti.active_material.texture_slots.create(index)
+ total_mat = len(objekti.active_material.texture_slots.items())
+ useold = ''
+
+ for seekco in bpy.data.textures:
+ if((seekco.name[:9] == 'Specular_') and (seekco.users_material == ())):
+ useold = seekco
+
+
+
+
+ if(useold == ''):
+
+ indexx = 0
+ tuli = False
+ lasku = False
+ while(lasku == False):
+ tuli = False
+ name_tex = ("Specular_%s"%(indexx))
+ for i in bpy.data.textures:
+ if(i.name == name_tex):
+ tuli = True
+ break
+ if(tuli):
+ indexx += 1
+ else:
+ lasku = True
+
+ bpy.ops.image.new(name=name_tex)
+ bpy.data.images[name_tex].filepath = texcoat['specular'][0]
+ bpy.data.images[name_tex].source = 'FILE'
+
+
+ bpy.data.textures.new(name_tex,type='IMAGE')
+ objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
+ objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
+
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+ objekti.active_material.texture_slots[index].use_map_color_diffuse = False
+ objekti.active_material.texture_slots[index].use_map_specular = True
+
+ objekti.active_material.texture_slots[index].texture.image.reload()
+
+
+ elif(useold != ''):
+
+ objekti.active_material.texture_slots[index].texture = useold
+ objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['specular'][0]
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+ objekti.active_material.texture_slots[index].use_map_color_diffuse = False
+ objekti.active_material.texture_slots[index].use_map_specular = True
+
+ if(bring_disp == 1 and texcoat['disp']):
+ name_tex ='Displacement_'
+ num = []
+
+ index = find_index(objekti)
+
+
+ tex = bpy.ops.Texture
+ objekti.active_material.texture_slots.create(index)
+ total_mat = len(objekti.active_material.texture_slots.items())
+ useold = ''
+
+ for seekco in bpy.data.textures:
+ if((seekco.name[:13] == 'Displacement_') and (seekco.users_material == ())):
+ useold = seekco
+
+
+
+
+ if(useold == ''):
+
+ indexx = 0
+ tuli = False
+ lasku = False
+ while(lasku == False):
+ tuli = False
+ name_tex = ("Displacement_%s"%(indexx))
+ for i in bpy.data.textures:
+ if(i.name == name_tex):
+ tuli = True
+ break
+ if(tuli):
+ indexx += 1
+ else:
+ lasku = True
+
+ bpy.ops.image.new(name=name_tex)
+ bpy.data.images[name_tex].filepath = texcoat['disp'][0]
+ bpy.data.images[name_tex].source = 'FILE'
+
+
+ bpy.data.textures.new(name_tex,type='IMAGE')
+ objekti.active_material.texture_slots[index].texture = bpy.data.textures[name_tex]
+ objekti.active_material.texture_slots[index].texture.image = bpy.data.images[name_tex]
+
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+ objekti.active_material.texture_slots[index].use_map_color_diffuse = False
+ objekti.active_material.texture_slots[index].use_map_displacement = True
+
+ objekti.active_material.texture_slots[index].texture.image.reload()
+
+
+ elif(useold != ''):
+
+ objekti.active_material.texture_slots[index].texture = useold
+ objekti.active_material.texture_slots[index].texture.image.filepath = texcoat['disp'][0]
+ if(objekti.data.uv_textures.active):
+ objekti.active_material.texture_slots[index].texture_coords = 'UV'
+ objekti.active_material.texture_slots[index].uv_layer = objekti.data.uv_textures.active.name
+ objekti.active_material.texture_slots[index].use_map_color_diffuse = False
+ objekti.active_material.texture_slots[index].use_map_displacement = True
+
+ if(vray):
+ objekti.active_material.texture_slots[index].texture.use_interpolation = False
+ objekti.active_material.texture_slots[index].displacement_factor = 0.05
+
+
+ else:
+ disp_modi = ''
+ for seek_modi in objekti.modifiers:
+ if(seek_modi.type == 'DISPLACE'):
+ disp_modi = seek_modi
+ break
+ if(disp_modi):
+ disp_modi.texture = objekti.active_material.texture_slots[index].texture
+ if(objekti.data.uv_textures.active):
+ disp_modi.texture_coords = 'UV'
+ disp_modi.uv_layer = objekti.data.uv_textures.active.name
+ else:
+ objekti.modifiers.new('Displace',type='DISPLACE')
+ objekti.modifiers['Displace'].texture = objekti.active_material.texture_slots[index].texture
+ if(objekti.data.uv_textures.active):
+ objekti.modifiers['Displace'].texture_coords = 'UV'
+ objekti.modifiers['Displace'].uv_layer = objekti.data.uv_textures.active.name
+
+ return('FINISHED')
diff --git a/io_convert_image_to_mesh_img/__init__.py b/io_convert_image_to_mesh_img/__init__.py
new file mode 100644
index 00000000..e3309617
--- /dev/null
+++ b/io_convert_image_to_mesh_img/__init__.py
@@ -0,0 +1,132 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "HiRISE DTM from PDS IMG",
+ "author": "Tim Spriggs (tims@uahirise.org)",
+ "version": (0, 1, 2),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import > HiRISE DTM from PDS IMG (.IMG)",
+ "description": "Import a HiRISE DTM formatted as a PDS IMG file",
+ "warning": "May consume a lot of memory",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/HiRISE_DTM_from_PDS_IMG",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=24897&group_id=153&atid=469",
+ "category": "Import-Export"}
+
+
+# Revision History:
+# 0.1.1 - make default import 12x12 bin (fast) to not consume too much memory
+# by default (TJS - 2010-12-07)
+# 0.1.2 - included into svn under the tree:
+# trunk/py/scripts/addons/io_convert_image_to_mesh_img
+# may be moved out to contrib once the blender downloader works well
+# (TJS - 2010-12-14)
+
+
+if "bpy" in locals():
+ import imp
+ imp.reload(import_img)
+else:
+ from . import import_img
+
+
+import bpy
+from bpy.props import *
+from bpy_extras.io_utils import ImportHelper
+
+
+class ImportHiRISEIMGDTM(bpy.types.Operator, ImportHelper):
+ '''Import a HiRISE DTM formatted as a PDS IMG file'''
+ bl_idname = "import_shape.img"
+ bl_label = "Import HiRISE DTM from PDS IMG"
+
+ filename_ext = ".IMG"
+ filter_glob = StringProperty(default="*.IMG", options={'HIDDEN'})
+
+ scale = FloatProperty(name="Scale",
+ description="Scale the IMG by this value",
+ min=0.0001,
+ max=10.0,
+ soft_min=0.001,
+ soft_max=100.0,
+ default=0.01)
+
+ bin_mode = EnumProperty(items=(
+ ('NONE', "None", "Don't bin the image"),
+ ('BIN2', "2x2", "use 2x2 binning to import the mesh"),
+ ('BIN6', "6x6", "use 6x6 binning to import the mesh"),
+ ('BIN6-FAST', "6x6 Fast", "use one sample per 6x6 region"),
+ ('BIN12', "12x12", "use 12x12 binning to import the mesh"),
+ ('BIN12-FAST', "12x12 Fast", "use one sample per 12x12 region"),
+ ),
+ name="Binning",
+ description="Import Binning.",
+ default='BIN12-FAST'
+ )
+
+ #red_material = BoolProperty(name="Mars Red Mesh",
+ # description="Set the mesh as a 'Mars' red value",
+ # default=True
+ # )
+
+ ## TODO: add support for cropping on import when the checkbox is checked
+ # do_crop = BoolProperty(name="Crop Image", description="Crop the image during import", ... )
+ ## we only want these visible when the above is "true"
+ # crop_x = IntProperty(name="X", description="Offset from left side of image")
+ # crop_y = IntProperty(name="Y", description="Offset from top of image")
+ # crop_w = IntProperty(name="Width", description="width of cropped operation")
+ # crop_h = IntProperty(name="Height", description="height of cropped region")
+ ## This is also a bit ugly and maybe an anti-pattern. The problem is that
+ ## importing a HiRISE DTM at full resolution will likely kill any mortal user with
+ ## less than 16 GB RAM and getting at specific features in a DTM at full res
+ ## may prove beneficial. Someday most mortals will have 16GB RAM.
+ ## -TJS 2010-11-23
+
+ def execute(self, context):
+ filepath = self.filepath
+ filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
+
+ return import_img.load(self, context,
+ filepath=self.filepath,
+ scale=self.scale,
+ bin_mode=self.bin_mode,
+ cropVars=False,
+ # marsRed=self.red_material
+ marsRed=False
+ )
+
+## How to register the script inside of Blender
+
+def menu_import(self, context):
+ self.layout.operator(ImportHiRISEIMGDTM.bl_idname, text="HiRISE DTM from PDS IMG (*.IMG)")
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_import)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_import)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_convert_image_to_mesh_img/import_img.py b/io_convert_image_to_mesh_img/import_img.py
new file mode 100644
index 00000000..1735d1c3
--- /dev/null
+++ b/io_convert_image_to_mesh_img/import_img.py
@@ -0,0 +1,786 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+"""
+This script can import a HiRISE DTM .IMG file.
+"""
+
+import bpy
+from bpy.props import *
+
+from struct import pack, unpack, unpack_from
+import os
+import queue, threading
+
+class image_props:
+ ''' keeps track of image attributes throughout the hirise_dtm_helper class '''
+ def __init__(self, name, dimensions, pixel_scale):
+ self.name( name )
+ self.dims( dimensions )
+ self.processed_dims( dimensions )
+ self.pixel_scale( pixel_scale )
+
+ def dims(self, dims=None):
+ if dims is not None:
+ self.__dims = dims
+ return self.__dims
+
+ def processed_dims(self, processed_dims=None):
+ if processed_dims is not None:
+ self.__processed_dims = processed_dims
+ return self.__processed_dims
+
+ def name(self, name=None):
+ if name is not None:
+ self.__name = name
+ return self.__name
+
+ def pixel_scale(self, pixel_scale=None):
+ if pixel_scale is not None:
+ self.__pixel_scale = pixel_scale
+ return self.__pixel_scale
+
+class hirise_dtm_helper(object):
+ ''' methods to understand/import a HiRISE DTM formatted as a PDS .IMG '''
+
+ def __init__(self, context, filepath):
+ self.__context = context
+ self.__filepath = filepath
+ self.__ignore_value = 0x00000000
+ self.__bin_mode = 'BIN6'
+ self.scale( 1.0 )
+ self.__cropXY = False
+ self.marsRed(False)
+
+ def bin_mode(self, bin_mode=None):
+ if bin_mode != None:
+ self.__bin_mode = bin_mode
+ return self.__bin_mode
+
+ def scale(self, scale=None):
+ if scale is not None:
+ self.__scale = scale
+ return self.__scale
+
+ def crop(self, widthX, widthY, offX, offY):
+ self.__cropXY = [ widthX, widthY, offX, offY ]
+ return self.__cropXY
+
+ def marsRed(self, marsRed=None):
+ if marsRed is not None:
+ self.__marsRed = marsRed
+ return self.__marsRed
+
+ def dbg(self, mesg):
+ print(mesg)
+
+ ############################################################################
+ ## PDS Label Operations
+ ############################################################################
+
+ def parsePDSLabel(self, labelIter, currentObjectName=None, level = ""):
+ # Let's parse this thing... semi-recursively
+ ## I started writing this caring about everything in the PDS standard but ...
+ ## it's a mess and I only need a few things -- thar be hacks below
+ ## Mostly I just don't care about continued data from previous lines
+ label_structure = []
+
+ # When are we done with this level?
+ endStr = "END"
+ if not currentObjectName is None:
+ endStr = "END_OBJECT = %s" % currentObjectName
+ line = ""
+
+ while not line.rstrip() == endStr:
+ line = next(labelIter)
+
+ # Get rid of comments
+ comment = line.find("/*")
+ if comment > -1:
+ line = line[:comment]
+
+ # Take notice of objects
+ if line[:8] == "OBJECT =":
+ objName = line[8:].rstrip()
+ label_structure.append(
+ (
+ objName.lstrip().rstrip(),
+ self.parsePDSLabel(labelIter, objName.lstrip().rstrip(), level + " ")
+ )
+ )
+ elif line.find("END_OBJECT =") > -1:
+ pass
+ elif len(line.rstrip().lstrip()) > 0:
+ key_val = line.split(" = ", 2)
+ if len(key_val) == 2:
+ label_structure.append( (key_val[0].rstrip().lstrip(), key_val[1].rstrip().lstrip()) )
+
+ return label_structure
+
+ # There has got to be a better way in python?
+ def iterArr(self, label):
+ for line in label:
+ yield line
+
+ def getPDSLabel(self, img):
+ # Just takes file and stores it into an array for later use
+ label = []
+ done = False;
+ # Grab label into array of lines
+ while not done:
+ line = str(img.readline(), 'utf-8')
+ if line.rstrip() == "END":
+ done = True
+ label.append(line)
+ return (label, self.parsePDSLabel(self.iterArr(label)))
+
+ def getLinesAndSamples(self, label):
+ ''' uses the parsed PDS Label to get the LINES and LINE_SAMPLES parameters
+ from the first object named "IMAGE" -- is hackish
+ '''
+ lines = None
+ line_samples = None
+ for obj in label:
+ if obj[0] == "IMAGE":
+ return self.getLinesAndSamples(obj[1])
+ if obj[0] == "LINES":
+ lines = int(obj[1])
+ if obj[0] == "LINE_SAMPLES":
+ line_samples = int(obj[1])
+
+ return ( line_samples, lines )
+
+ def getValidMinMax(self, label):
+ ''' uses the parsed PDS Label to get the VALID_MINIMUM and VALID_MAXIMUM parameters
+ from the first object named "IMAGE" -- is hackish
+ '''
+ lines = None
+ line_samples = None
+ for obj in label:
+ if obj[0] == "IMAGE":
+ return self.getValidMinMax(obj[1])
+ if obj[0] == "VALID_MINIMUM":
+ vmin = float(obj[1])
+ if obj[0] == "VALID_MAXIMUM":
+ vmax = float(obj[1])
+
+ return ( vmin, vmax )
+
+ def getMissingConstant(self, label):
+ ''' uses the parsed PDS Label to get the MISSING_CONSTANT parameter
+ from the first object named "IMAGE" -- is hackish
+ '''
+
+ lines = None
+ line_samples = None
+ for obj in label:
+ if obj[0] == "IMAGE":
+ return self.getMissingConstant(obj[1])
+ if obj[0] == "MISSING_CONSTANT":
+ bit_string_repr = obj[1]
+
+ # This is always the same for a HiRISE image, so we are just checking it
+ # to be a little less insane here. If someone wants to support another
+ # constant then go for it. Just make sure this one continues to work too
+ pieces = bit_string_repr.split("#")
+ if pieces[0] == "16" and pieces[1] == "FF7FFFFB":
+ ignore_value = unpack("f", pack("I", 0xFF7FFFFB))[0]
+
+ return ( ignore_value )
+
+ ############################################################################
+ ## Image operations
+ ############################################################################
+
+ # decorator to run a generator in a thread
+ def threaded_generator(func):
+ def start(*args,**kwargs):
+ # Setup a queue of returned items
+ yield_q = queue.Queue()
+ # Thread to run generator inside of
+ def worker():
+ for obj in func(*args,**kwargs): yield_q.put(obj)
+ yield_q.put(StopIteration)
+ t = threading.Thread(target=worker)
+ t.start()
+ # yield from the queue as fast as we can
+ obj = yield_q.get()
+ while obj is not StopIteration:
+ yield obj
+ obj = yield_q.get()
+
+ # return the thread-wrapped generator
+ return start
+
+ @threaded_generator
+ def bin2(self, image_iter, bin2_method_type="SLOW"):
+ ''' this is an iterator that: Given an image iterator will yield binned lines '''
+
+ img_props = next(image_iter)
+ # dimensions shrink as we remove pixels
+ processed_dims = img_props.processed_dims()
+ processed_dims = ( processed_dims[0]//2, processed_dims[1]//2 )
+ img_props.processed_dims( processed_dims )
+ # each pixel is larger as binning gets larger
+ pixel_scale = img_props.pixel_scale()
+ pixel_scale = ( pixel_scale[0]*2, pixel_scale[1]*2 )
+ img_props.pixel_scale( pixel_scale )
+ yield img_props
+
+ # Take two lists [a1, a2, a3], [b1, b2, b3] and combine them into one
+ # list of [a1 + b1, a2+b2, ... ] as long as both values are not ignorable
+ combine_fun = lambda a, b: a != self.__ignore_value and b != self.__ignore_value and a + b or self.__ignore_value
+
+ line_count = 0
+ ret_list = []
+ for line in image_iter:
+ if line_count == 1:
+ line_count = 0
+ tmp_list = list(map(combine_fun, line, last_line))
+ while len(tmp_list) > 1:
+ ret_list.append( combine_fun( tmp_list[0], tmp_list[1] ) )
+ del tmp_list[0:2]
+ yield ret_list
+ ret_list = []
+ last_line = line
+ line_count += 1
+
+ @threaded_generator
+ def bin6(self, image_iter, bin6_method_type="SLOW"):
+ ''' this is an iterator that: Given an image iterator will yield binned lines '''
+
+ img_props = next(image_iter)
+ # dimensions shrink as we remove pixels
+ processed_dims = img_props.processed_dims()
+ processed_dims = ( processed_dims[0]//6, processed_dims[1]//6 )
+ img_props.processed_dims( processed_dims )
+ # each pixel is larger as binning gets larger
+ pixel_scale = img_props.pixel_scale()
+ pixel_scale = ( pixel_scale[0]*6, pixel_scale[1]*6 )
+ img_props.pixel_scale( pixel_scale )
+ yield img_props
+
+ if bin6_method_type == "FAST":
+ bin6_method = self.bin6_real_fast
+ else:
+ bin6_method = self.bin6_real
+
+ raw_data = []
+ line_count = 0
+ for line in image_iter:
+ raw_data.append( line )
+ line_count += 1
+ if line_count == 6:
+ yield bin6_method( raw_data )
+ line_count = 0
+ raw_data = []
+
+ def bin6_real(self, raw_data):
+ ''' does a 6x6 sample of raw_data and returns a single line of data '''
+ # TODO: make this more efficient
+
+ binned_data = []
+
+ # Filter out those unwanted hugely negative values...
+ filter_fun = lambda a: self.__ignore_value.__ne__(a)
+
+ base = 0
+ for i in range(0, len(raw_data[0])//6):
+
+ ints = list(filter( filter_fun, raw_data[0][base:base+6] +
+ raw_data[1][base:base+6] +
+ raw_data[2][base:base+6] +
+ raw_data[3][base:base+6] +
+ raw_data[4][base:base+6] +
+ raw_data[5][base:base+6] ))
+ len_ints = len( ints )
+
+ # If we have all pesky values, return a pesky value
+ if len_ints == 0:
+ binned_data.append( self.__ignore_value )
+ else:
+ binned_data.append( sum(ints) / len(ints) )
+
+ base += 6
+ return binned_data
+
+ def bin6_real_fast(self, raw_data):
+ ''' takes a single value from each 6x6 sample of raw_data and returns a single line of data '''
+ # TODO: make this more efficient
+
+ binned_data = []
+
+ base = 0
+ for i in range(0, len(raw_data[0])//6):
+ binned_data.append( raw_data[0][base] )
+ base += 6
+
+ return binned_data
+
+ @threaded_generator
+ def bin12(self, image_iter, bin12_method_type="SLOW"):
+ ''' this is an iterator that: Given an image iterator will yield binned lines '''
+
+ img_props = next(image_iter)
+ # dimensions shrink as we remove pixels
+ processed_dims = img_props.processed_dims()
+ processed_dims = ( processed_dims[0]//12, processed_dims[1]//12 )
+ img_props.processed_dims( processed_dims )
+ # each pixel is larger as binning gets larger
+ pixel_scale = img_props.pixel_scale()
+ pixel_scale = ( pixel_scale[0]*12, pixel_scale[1]*12 )
+ img_props.pixel_scale( pixel_scale )
+ yield img_props
+
+ if bin12_method_type == "FAST":
+ bin12_method = self.bin12_real_fast
+ else:
+ bin12_method = self.bin12_real
+
+ raw_data = []
+ line_count = 0
+ for line in image_iter:
+ raw_data.append( line )
+ line_count += 1
+ if line_count == 12:
+ yield bin12_method( raw_data )
+ line_count = 0
+ raw_data = []
+
+ def bin12_real(self, raw_data):
+ ''' does a 12x12 sample of raw_data and returns a single line of data '''
+
+ binned_data = []
+
+ # Filter out those unwanted hugely negative values...
+ filter_fun = lambda a: self.__ignore_value.__ne__(a)
+
+ base = 0
+ for i in range(0, len(raw_data[0])//12):
+
+ ints = list(filter( filter_fun, raw_data[0][base:base+12] +
+ raw_data[1][base:base+12] +
+ raw_data[2][base:base+12] +
+ raw_data[3][base:base+12] +
+ raw_data[4][base:base+12] +
+ raw_data[5][base:base+12] +
+ raw_data[6][base:base+12] +
+ raw_data[7][base:base+12] +
+ raw_data[8][base:base+12] +
+ raw_data[9][base:base+12] +
+ raw_data[10][base:base+12] +
+ raw_data[11][base:base+12] ))
+ len_ints = len( ints )
+
+ # If we have all pesky values, return a pesky value
+ if len_ints == 0:
+ binned_data.append( self.__ignore_value )
+ else:
+ binned_data.append( sum(ints) / len(ints) )
+
+ base += 12
+ return binned_data
+
+ def bin12_real_fast(self, raw_data):
+ ''' takes a single value from each 12x12 sample of raw_data and returns a single line of data '''
+ return raw_data[0][11::12]
+
+ @threaded_generator
+ def cropXY(self, image_iter, XSize=None, YSize=None, XOffset=0, YOffset=0):
+ ''' return a cropped portion of the image '''
+
+ img_props = next(image_iter)
+ # dimensions shrink as we remove pixels
+ processed_dims = img_props.processed_dims()
+
+ if XSize is None:
+ XSize = processed_dims[0]
+ if YSize is None:
+ YSize = processed_dims[1]
+
+ if XSize + XOffset > processed_dims[0]:
+ self.dbg("WARNING: Upstream dims are larger than cropped XSize dim")
+ XSize = processed_dims[0]
+ XOffset = 0
+ if YSize + YOffset > processed_dims[1]:
+ self.dbg("WARNING: Upstream dims are larger than cropped YSize dim")
+ YSize = processed_dims[1]
+ YOffset = 0
+
+ img_props.processed_dims( (XSize, YSize) )
+ yield img_props
+
+ currentY = 0
+ for line in image_iter:
+ if currentY >= YOffset and currentY <= YOffset + YSize:
+ yield line[XOffset:XOffset+XSize]
+ # Not much point in reading the rest of the data...
+ if currentY == YOffset + YSize:
+ return
+ currentY += 1
+
+ @threaded_generator
+ def getImage(self, img, img_props):
+ ''' Assumes 32-bit pixels -- bins image '''
+ dims = img_props.dims()
+ self.dbg("getting image (x,y): %d,%d" % ( dims[0], dims[1] ))
+
+ # setup to unpack more efficiently.
+ x_len = dims[0]
+ # little endian (PC_REAL)
+ unpack_str = "<"
+ # unpack_str = ">"
+ unpack_bytes_str = "<"
+ pack_bytes_str = "="
+ # 32 bits/sample * samples/line = y_bytes (per line)
+ x_bytes = 4*x_len
+ for x in range(0, x_len):
+ # 32-bit float is "d"
+ unpack_str += "f"
+ unpack_bytes_str += "I"
+ pack_bytes_str += "I"
+
+ # Each iterator yields this first ... it is for reference of the next iterator:
+ yield img_props
+
+ for y in range(0, dims[1]):
+ # pixels is a byte array
+ pixels = b''
+ while len(pixels) < x_bytes:
+ new_pixels = img.read( x_bytes - len(pixels) )
+ pixels += new_pixels
+ if len(new_pixels) == 0:
+ x_bytes = -1
+ pixels = []
+ self.dbg("Uh oh: unexpected EOF!")
+ if len(pixels) == x_bytes:
+ if 0 == 1:
+ repacked_pixels = b''
+ for integer in unpack(unpack_bytes_str, pixels):
+ repacked_pixels += pack("=I", integer)
+ yield unpack( unpack_str, repacked_pixels )
+ else:
+ yield unpack( unpack_str, pixels )
+
+ @threaded_generator
+ def shiftToOrigin(self, image_iter, image_min_max):
+ ''' takes a generator and shifts the points by the valid minimum
+ also removes points with value self.__ignore_value and replaces them with None
+ '''
+
+ # use the passed in values ...
+ valid_min = image_min_max[0]
+
+ # pass on dimensions/pixel_scale since we don't modify them here
+ yield next(image_iter)
+
+ self.dbg("shiftToOrigin filter enabled...");
+
+ # closures rock!
+ def normalize_fun(point):
+ if point == self.__ignore_value:
+ return None
+ return point - valid_min
+
+ for line in image_iter:
+ yield list(map(normalize_fun, line))
+ self.dbg("shifted all points")
+
+ @threaded_generator
+ def scaleZ(self, image_iter, scale_factor):
+ ''' scales the mesh values by a factor '''
+ # pass on dimensions since we don't modify them here
+ yield next(image_iter)
+
+ scale_factor = self.scale()
+
+ def scale_fun(point):
+ try:
+ return point * scale_factor
+ except:
+ return None
+
+ for line in image_iter:
+ yield list(map(scale_fun, line))
+
+ def genMesh(self, image_iter):
+ '''Returns a mesh object from an image iterator this has the
+ value-added feature that a value of "None" is ignored
+ '''
+
+ # Get the output image size given the above transforms
+ img_props = next(image_iter)
+
+ # Let's interpolate the binned DTM with blender -- yay meshes!
+ coords = []
+ faces = []
+ face_count = 0
+ coord = -1
+ max_x = img_props.processed_dims()[0]
+ max_y = img_props.processed_dims()[1]
+
+ scale_x = self.scale() * img_props.pixel_scale()[0]
+ scale_y = self.scale() * img_props.pixel_scale()[1]
+
+ line_count = 0
+ current_line = []
+ # seed the last line (or previous line) with a line
+ last_line = next(image_iter)
+ point_offset = 0
+ previous_point_offset = 0
+
+ # Let's add any initial points that are appropriate
+ x = 0
+ point_offset += len( last_line ) - last_line.count(None)
+ for z in last_line:
+ if z != None:
+ coords.extend([x*scale_x, 0.0, z])
+ coord += 1
+ x += 1
+
+ # We want to ignore points with a value of "None" but we also need to create vertices
+ # with an index that we can re-create on the next line. The solution is to remember
+ # two offsets: the point offset and the previous point offset.
+ # these offsets represent the point index that blender gets -- not the number of
+ # points we have read from the image
+
+ # if "x" represents points that are "None" valued then conceptually this is how we
+ # think of point indices:
+ #
+ # previous line: offset0 x x +1 +2 +3
+ # current line: offset1 x +1 +2 +3 x
+
+ # once we can map points we can worry about making triangular or square faces to fill
+ # the space between vertices so that blender is more efficient at managing the final
+ # structure.
+
+ self.dbg('generate mesh coords/faces from processed image data...')
+
+ # read each new line and generate coordinates+faces
+ for dtm_line in image_iter:
+
+ # Keep track of where we are in the image
+ line_count += 1
+ y_val = line_count*-scale_y
+ if line_count % 31 == 0:
+ self.dbg("reading image... %d of %d" % ( line_count, max_y ))
+
+ # Just add all points blindly
+ # TODO: turn this into a map
+ x = 0
+ for z in dtm_line:
+ if z != None:
+ coords.extend( [x*scale_x, y_val, z] )
+ coord += 1
+ x += 1
+
+ # Calculate faces
+ for x in range(0, max_x - 1):
+ vals = [
+ last_line[ x + 1 ],
+ last_line[ x ],
+ dtm_line[ x ],
+ dtm_line[ x + 1 ],
+ ]
+
+ # Two or more values of "None" means we can ignore this block
+ none_val = vals.count(None)
+
+ # Common case: we can create a square face
+ if none_val == 0:
+ faces.extend( [
+ previous_point_offset,
+ previous_point_offset+1,
+ point_offset+1,
+ point_offset,
+ ] )
+ face_count += 1
+ elif none_val == 1:
+ # special case: we can implement a triangular face
+ ## NB: blender 2.5 makes a triangular face when the last coord is 0
+ # TODO: implement a triangular face
+ pass
+
+ if vals[1] != None:
+ previous_point_offset += 1
+ if vals[2] != None:
+ point_offset += 1
+
+ # Squeeze the last point offset increment out of the previous line
+ if last_line[-1] != None:
+ previous_point_offset += 1
+
+ # Squeeze the last point out of the current line
+ if dtm_line[-1] != None:
+ point_offset += 1
+
+ # remember what we just saw (and forget anything before that)
+ last_line = dtm_line
+
+ self.dbg('generate mesh from coords/faces...')
+ me = bpy.data.meshes.new(img_props.name()) # create a new mesh
+
+ self.dbg('coord: %d' % coord)
+ self.dbg('len(coords): %d' % len(coords))
+ self.dbg('len(faces): %d' % len(faces))
+
+ self.dbg('setting coords...')
+ me.vertices.add(len(coords)/3)
+ me.vertices.foreach_set("co", coords)
+
+ self.dbg('setting faces...')
+ me.faces.add(len(faces)/4)
+ me.faces.foreach_set("vertices_raw", faces)
+
+ self.dbg('running update...')
+ me.update()
+
+ bin_desc = self.bin_mode()
+ if bin_desc == 'NONE':
+ bin_desc = 'No Bin'
+
+ ob=bpy.data.objects.new("DTM - %s" % bin_desc, me)
+
+ return ob
+
+ def marsRedMaterial(self):
+ ''' produce some approximation of a mars surface '''
+ mat = None
+ for material in bpy.data.materials:
+ if material.getName() == "redMars":
+ mat = material
+ if mat is None:
+ mat = bpy.data.materials.new("redMars")
+ mat.diffuse_shader = 'MINNAERT'
+ mat.setRGBCol( (0.426, 0.213, 0.136) )
+ mat.setDiffuseDarkness(0.8)
+ mat.specular_shader = 'WARDISO'
+ mat.setSpecCol( (1.000, 0.242, 0.010) )
+ mat.setSpec( 0.010 )
+ mat.setRms( 0.100 )
+ return mat
+
+ ################################################################################
+ # Yay, done with helper functions ... let's see the abstraction in action! #
+ ################################################################################
+ def execute(self):
+
+ self.dbg('opening/importing file: %s' % self.__filepath)
+ img = open(self.__filepath, 'rb')
+
+ self.dbg('read PDS Label...')
+ (label, parsedLabel) = self.getPDSLabel(img)
+
+ self.dbg('parse PDS Label...')
+ image_dims = self.getLinesAndSamples(parsedLabel)
+ img_min_max_vals = self.getValidMinMax(parsedLabel)
+ self.__ignore_value = self.getMissingConstant(parsedLabel)
+
+ self.dbg('import/bin image data...')
+
+ # MAGIC VALUE? -- need to formalize this to rid ourselves of bad points
+ img.seek(28)
+ # Crop off 4 lines
+ img.seek(4*image_dims[0])
+
+ # HiRISE images (and most others?) have 1m x 1m pixels
+ pixel_scale=(1, 1)
+
+ # The image we are importing
+ image_name = os.path.basename( self.__filepath )
+
+ # Set the properties of the image in a manageable object
+ img_props = image_props( image_name, image_dims, pixel_scale )
+
+ # Get an iterator to iterate over lines
+ image_iter = self.getImage(img, img_props)
+
+ ## Wrap the image_iter generator with other generators to modify the dtm on a
+ ## line-by-line basis. This creates a stream of modifications instead of reading
+ ## all of the data at once, processing all of the data (potentially several times)
+ ## and then handing it off to blender
+ ## TODO: find a way to alter projection based on transformations below
+
+ if self.__cropXY:
+ image_iter = self.cropXY(image_iter,
+ XSize=self.__cropXY[0],
+ YSize=self.__cropXY[1],
+ XOffset=self.__cropXY[2],
+ YOffset=self.__cropXY[3]
+ )
+
+ # Select an appropriate binning mode
+ ## TODO: generalize the binning fn's
+ bin_mode = self.bin_mode()
+ bin_mode_funcs = {
+ 'BIN2': self.bin2(image_iter),
+ 'BIN6': self.bin6(image_iter),
+ 'BIN6-FAST': self.bin6(image_iter, 'FAST'),
+ 'BIN12': self.bin12(image_iter),
+ 'BIN12-FAST': self.bin12(image_iter, 'FAST')
+ }
+ if bin_mode in bin_mode_funcs.keys():
+ image_iter = bin_mode_funcs[ bin_mode ]
+
+ image_iter = self.shiftToOrigin(image_iter, img_min_max_vals)
+
+ if self.scale != 1.0:
+ image_iter = self.scaleZ(image_iter, img_min_max_vals)
+
+ # Create a new mesh object and set data from the image iterator
+ self.dbg('generating mesh object...')
+ ob_new = self.genMesh(image_iter)
+
+ if self.marsRed():
+ mars_red = self.marsRedMaterial()
+ ob_new.materials += [mars_red]
+
+ if img:
+ img.close()
+
+ # Add mesh object to the current scene
+ scene = self.__context.scene
+ self.dbg('linking object to scene...')
+ scene.objects.link(ob_new)
+ scene.update()
+
+ # deselect other objects
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # scene.objects.active = ob_new
+ # Select the new mesh
+ ob_new.select = True
+
+ self.dbg('done with ops ... now wait for blender ...')
+
+ return ('FINISHED',)
+
+def load(operator, context, filepath, scale, bin_mode, cropVars, marsRed):
+ print("Bin Mode: %s" % bin_mode)
+ print("Scale: %f" % scale)
+ helper = hirise_dtm_helper(context,filepath)
+ helper.bin_mode( bin_mode )
+ helper.scale( scale )
+ if cropVars:
+ helper.crop( cropVars[0], cropVars[1], cropVars[2], cropVars[3] )
+ helper.execute()
+ if marsRed:
+ helper.marsRed(marsRed)
+
+ print("Loading %s" % filepath)
+ return {'FINISHED'}
diff --git a/io_curve_svg/__init__.py b/io_curve_svg/__init__.py
new file mode 100644
index 00000000..dc68490b
--- /dev/null
+++ b/io_curve_svg/__init__.py
@@ -0,0 +1,84 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Scalable Vector Graphics (SVG) 1.1 format",
+ "author": "JM Soler, Sergey Sharybin",
+ "blender": (2, 5, 7),
+ "api": 36079,
+ "location": "File > Import > Scalable Vector Graphics (.svg)",
+ "description": "Import SVG as curves",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/SVG",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=26166&",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var,
+# if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "import_svg" in locals():
+ imp.reload(import_svg)
+
+
+import bpy
+from bpy.props import StringProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper
+
+
+class ImportSVG(bpy.types.Operator, ImportHelper):
+ '''Load a SVG file'''
+ bl_idname = "import_curve.svg"
+ bl_label = "Import SVG"
+
+ filename_ext = ".svg"
+ filter_glob = StringProperty(default="*.svg", options={'HIDDEN'})
+
+ def execute(self, context):
+ from . import import_svg
+
+ return import_svg.load(self, context,
+ **self.as_keywords(ignore=("filter_glob",)))
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportSVG.bl_idname,
+ text="Scalable Vector Graphics (.svg)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+
+# NOTES
+# - blender version is hardcoded
+
+if __name__ == "__main__":
+ register()
diff --git a/io_curve_svg/import_svg.py b/io_curve_svg/import_svg.py
new file mode 100644
index 00000000..231986e5
--- /dev/null
+++ b/io_curve_svg/import_svg.py
@@ -0,0 +1,1831 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import re
+import xml.dom.minidom
+from math import cos, sin, tan, atan2, pi, ceil
+
+import bpy
+from mathutils import Vector, Matrix
+
+from . import svg_colors
+
+#### Common utilities ####
+
+# TODO: "em" and "ex" aren't actually supported
+SVGUnits = {"": 1.0,
+ "px": 1.0,
+ "in": 90,
+ "mm": 90 / 25.4,
+ "cm": 90 / 2.54,
+ "pt": 1.25,
+ "pc": 15.0,
+ "em": 1.0,
+ "ex": 1.0,
+ "INVALID": 1.0, # some DocBook files contain this
+ }
+
+SVGEmptyStyles = {'useFill': None,
+ 'fill': None}
+
+
+def SVGParseFloat(s, i=0):
+ """
+ Parse first float value from string
+
+ Returns value as string
+ """
+
+ start = i
+ n = len(s)
+ token = ''
+
+ # Skip leading whitespace characters
+ while i < n and (s[i].isspace() or s[i] == ','):
+ i += 1
+
+ if i == n:
+ return None, i
+
+ # Read sign
+ if s[i] == '-':
+ token += '-'
+ i += 1
+ elif s[i] == '+':
+ i += 1
+
+ # Read integer part
+ if s[i].isdigit():
+ while i < n and s[i].isdigit():
+ token += s[i]
+ i += 1
+
+ # Fractional part
+ if i < n and s[i] == '.':
+ token += '.'
+ i += 1
+
+ if s[i].isdigit():
+ while i < n and s[i].isdigit():
+ token += s[i]
+ i += 1
+ elif s[i].isspace() or s[i] == ',':
+ # Inkscape sometimes uses qeird float format with missed
+ # fractional part after dot. Suppose zero fractional part
+ # for this case
+ pass
+ else:
+ raise Exception('Invalid float value near ' + s[start:start + 10])
+
+ # Degree
+ if i < n and (s[i] == 'e' or s[i] == 'E'):
+ token += s[i]
+ i += 1
+ if s[i] == '+' or s[i] == '-':
+ token += s[i]
+ i += 1
+
+ if s[i].isdigit():
+ while i < n and s[i].isdigit():
+ token += s[i]
+ i += 1
+ else:
+ raise Exception('Invalid float value near ' +
+ s[start:start + 10])
+ else:
+ raise Exception('Invalid float value near ' + s[start:start + 10])
+
+ return token, i
+
+
+def SVGCreateCurve():
+ """
+ Create new curve object to hold splines in
+ """
+
+ cu = bpy.data.curves.new("Curve", 'CURVE')
+ obj = bpy.data.objects.new("Curve", cu)
+ bpy.context.scene.objects.link(obj)
+
+ return obj
+
+
+def SVGFinishCurve():
+ """
+ Finish curve creation
+ """
+
+ pass
+
+
+def SVGFlipHandle(x, y, x1, y1):
+ """
+ Flip handle around base point
+ """
+
+ x = x + (x - x1)
+ y = y + (y - y1)
+
+ return x, y
+
+
+def SVGParseCoord(coord, size):
+ """
+ Parse coordinate component to common basis
+
+ Needed to handle coordinates set in cm, mm, iches..
+ """
+
+ token, last_char = SVGParseFloat(coord)
+ val = float(token)
+ unit = coord[last_char:].strip() # strip() incase there is a space
+
+ if unit == '%':
+ return float(size) / 100.0 * val
+ else:
+ return val * SVGUnits[unit]
+
+ return val
+
+
+def SVGRectFromNode(node, context):
+ """
+ Get display rectangle from node
+ """
+
+ w = context['rect'][0]
+ h = context['rect'][1]
+
+ if node.getAttribute('viewBox'):
+ viewBox = node.getAttribute('viewBox').replace(',', ' ').split()
+ w = SVGParseCoord(viewBox[2], w)
+ h = SVGParseCoord(viewBox[3], h)
+ else:
+ if node.getAttribute('width'):
+ w = SVGParseCoord(node.getAttribute('width'), w)
+
+ if node.getAttribute('height'):
+ h = SVGParseCoord(node.getAttribute('height'), h)
+
+ return (w, h)
+
+
+def SVGMatrixFromNode(node, context):
+ """
+ Get transformation matrix from given node
+ """
+
+ tagName = node.tagName.lower()
+ tags = ['svg:svg', 'svg:use', 'svg:symbol']
+
+ if tagName not in tags and 'svg:' + tagName not in tags:
+ return Matrix()
+
+ rect = context['rect']
+
+ m = Matrix()
+ x = SVGParseCoord(node.getAttribute('x') or '0', rect[0])
+ y = SVGParseCoord(node.getAttribute('y') or '0', rect[1])
+ w = SVGParseCoord(node.getAttribute('width') or str(rect[0]), rect[0])
+ h = SVGParseCoord(node.getAttribute('height') or str(rect[1]), rect[1])
+
+ m = m.Translation(Vector((x, y, 0.0)))
+ if len(context['rects']) > 1:
+ m = m * m.Scale(w / rect[0], 4, Vector((1.0, 0.0, 0.0)))
+ m = m * m.Scale(h / rect[1], 4, Vector((0.0, 1.0, 0.0)))
+
+ if node.getAttribute('viewBox'):
+ viewBox = node.getAttribute('viewBox').replace(',', ' ').split()
+ vx = SVGParseCoord(viewBox[0], w)
+ vy = SVGParseCoord(viewBox[1], h)
+ vw = SVGParseCoord(viewBox[2], w)
+ vh = SVGParseCoord(viewBox[3], h)
+
+ sx = w / vw
+ sy = h / vh
+ scale = min(sx, sy)
+
+ tx = (w - vw * scale) / 2
+ ty = (h - vh * scale) / 2
+ m = m * m.Translation(Vector((tx, ty, 0.0)))
+
+ m = m * m.Translation(Vector((-vx, -vy, 0.0)))
+ m = m * m.Scale(scale, 4, Vector((1.0, 0.0, 0.0)))
+ m = m * m.Scale(scale, 4, Vector((0.0, 1.0, 0.0)))
+
+ return m
+
+
+def SVGParseTransform(transform):
+ """
+ Parse transform string and return transformation matrix
+ """
+
+ m = Matrix()
+ r = re.compile('\s*([A-z]+)\s*\((.*?)\)')
+
+ for match in r.finditer(transform):
+ func = match.group(1)
+ params = match.group(2)
+ params = params.replace(',', ' ').split()
+
+ proc = SVGTransforms.get(func)
+ if proc is None:
+ raise Exception('Unknown trasnform function: ' + func)
+
+ m = m * proc(params)
+
+ return m
+
+
+def SVGGetMaterial(color, context):
+ """
+ Get material for specified color
+ """
+
+ materials = context['materials']
+ rgb_re = re.compile('^\s*rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,(\d+)\s*\)\s*$')
+
+ if color in materials:
+ return materials[color]
+
+ diff = None
+ if color.startswith('#'):
+ color = color[1:]
+
+ if len(color) == 3:
+ color = color[0] * 2 + color[1] * 2 + color[2] * 2
+
+ diff = (int(color[0:2], 16), int(color[2:4], 16), int(color[4:6], 16))
+ elif color in svg_colors.SVGColors:
+ diff = svg_colors.SVGColors[color]
+ elif rgb_re.match(color):
+ c = rgb_re.findall(color)[0]
+ diff = (float(c[0]), float(c[1]), float(c[2]))
+ else:
+ return None
+
+ mat = bpy.data.materials.new(name='SVGMat')
+ mat.diffuse_color = ([x / 255.0 for x in diff])
+
+ materials[color] = mat
+
+ return mat
+
+
+def SVGTransformTranslate(params):
+ """
+ translate SVG transform command
+ """
+
+ tx = float(params[0])
+ ty = float(params[1]) if len(params) > 1 else 0.0
+
+ return Matrix.Translation(Vector((tx, ty, 0.0)))
+
+
+def SVGTransformMatrix(params):
+ """
+ matrix SVG transform command
+ """
+
+ a = float(params[0])
+ b = float(params[1])
+ c = float(params[2])
+ d = float(params[3])
+ e = float(params[4])
+ f = float(params[5])
+
+ return Matrix(((a, b, 0.0, 0.0),
+ (c, d, 0.0, 0.0),
+ (0, 0, 1.0, 0.0),
+ (e, f, 0.0, 1.0)))
+
+
+def SVGTransformScale(params):
+ """
+ scale SVG transform command
+ """
+
+ sx = float(params[0])
+ sy = float(params[1]) if len(params) > 1 else sx
+
+ m = Matrix()
+
+ m = m * m.Scale(sx, 4, Vector((1.0, 0.0, 0.0)))
+ m = m * m.Scale(sy, 4, Vector((0.0, 1.0, 0.0)))
+
+ return m
+
+
+def SVGTransformSkewX(params):
+ """
+ skewX SVG transform command
+ """
+
+ ang = float(params[0]) * pi / 180.0
+
+ return Matrix(((1.0, 0.0, 0.0),
+ (tan(ang), 1.0, 0.0),
+ (0.0, 0.0, 1.0))).to_4x4()
+
+
+def SVGTransformSkewY(params):
+ """
+ skewX SVG transform command
+ """
+
+ ang = float(params[0]) * pi / 180.0
+
+ return Matrix(((1.0, tan(ang), 0.0),
+ (0.0, 1.0, 0.0),
+ (0.0, 0.0, 1.0))).to_4x4()
+
+
+def SVGTransformRotate(params):
+ """
+ skewX SVG transform command
+ """
+
+ ang = float(params[0]) * pi / 180.0
+ cx = cy = 0.0
+
+ if len(params) >= 3:
+ cx = float(params[1])
+ cy = float(params[2])
+
+ tm = Matrix.Translation(Vector((cx, cy, 0.0)))
+ rm = Matrix.Rotation(ang, 4, Vector((0.0, 0.0, 1.0)))
+
+ return tm * rm * tm.inverted()
+
+SVGTransforms = {'translate': SVGTransformTranslate,
+ 'scale': SVGTransformScale,
+ 'skewX': SVGTransformSkewX,
+ 'skewY': SVGTransformSkewY,
+ 'matrix': SVGTransformMatrix,
+ 'rotate': SVGTransformRotate}
+
+
+def SVGParseStyles(node, context):
+ """
+ Parse node to get different styles for displaying geometries
+ (materilas, filling flags, etc..)
+ """
+
+ styles = SVGEmptyStyles.copy()
+
+ style = node.getAttribute('style')
+ if style:
+ elems = style.split(';')
+ for elem in elems:
+ s = elem.split(':')
+
+ if len(s) != 2:
+ continue
+
+ name = s[0].strip().lower()
+ val = s[1].strip()
+
+ if name == 'fill':
+ val = val.lower()
+ if val == 'none':
+ styles['useFill'] = False
+ else:
+ styles['useFill'] = True
+ styles['fill'] = SVGGetMaterial(val, context)
+
+ if styles['useFill'] is None:
+ styles['useFill'] = True
+ styles['fill'] = SVGGetMaterial('#000', context)
+
+ return styles
+
+ if styles['useFill'] is None:
+ fill = node.getAttribute('fill')
+ if fill:
+ fill = fill.lower()
+ if fill == 'none':
+ styles['useFill'] = False
+ else:
+ styles['useFill'] = True
+ styles['fill'] = SVGGetMaterial(fill, context)
+
+ if styles['useFill'] is None:
+ styles['useFill'] = True
+ styles['fill'] = SVGGetMaterial('#000', context)
+
+ return styles
+
+#### SVG path helpers ####
+
+
+class SVGPathData:
+ """
+ SVG Path data token supplier
+ """
+
+ __slots__ = ('_data', # List of tokens
+ '_index', # Index of current token in tokens list
+ '_len') # Lenght og tokens list
+
+ def __init__(self, d):
+ """
+ Initialize new path data supplier
+
+ d - the definition of the outline of a shape
+ """
+
+ spaces = ' ,\t'
+ commands = ['m', 'l', 'h', 'v', 'c', 's', 'q', '', 't', 'a', 'z']
+ tokens = []
+
+ i = 0
+ n = len(d)
+ while i < n:
+ c = d[i]
+
+ if c in spaces:
+ pass
+ elif c.lower() in commands:
+ tokens.append(c)
+ elif c in ['-', '.'] or c.isdigit():
+ token, last_char = SVGParseFloat(d, i)
+ tokens.append(token)
+
+ # in most cases len(token) and (last_char - i) are the same
+ # but with whitespace or ',' prefix they are not.
+
+ i += (last_char - i) - 1
+
+ i += 1
+
+ self._data = tokens
+ self._index = 0
+ self._len = len(tokens)
+
+ def eof(self):
+ """
+ Check if end of data reached
+ """
+
+ return self._index >= self._len
+
+ def cur(self):
+ """
+ Return current token
+ """
+
+ if self.eof():
+ return None
+
+ return self._data[self._index]
+
+ def lookupNext(self):
+ """
+ get next token without moving pointer
+ """
+
+ if self.eof():
+ return None
+
+ return self._data[self._index]
+
+ def next(self):
+ """
+ Return current token and go to next one
+ """
+
+ if self.eof():
+ return None
+
+ token = self._data[self._index]
+ self._index += 1
+
+ return token
+
+ def nextCoord(self):
+ """
+ Return coordinate created from current token and move to next token
+ """
+
+ token = self.next()
+
+ if token is None:
+ return None
+
+ return float(token)
+
+
+class SVGPathParser:
+ """
+ Parser of SVG path data
+ """
+
+ __slots__ = ('_data', # Path data supplird
+ '_point', # Current point coorfinate
+ '_handle', # Last handle coordinate
+ '_splines', # List of all splies created during parsing
+ '_spline', # Currently handling spline
+ '_commands') # Hash of all supported path commands
+
+ def __init__(self, d):
+ """
+ Initialize path parser
+
+ d - the definition of the outline of a shape
+ """
+
+ self._data = SVGPathData(d)
+ self._point = None # Current point
+ self._handle = None # Last handle
+ self._splines = [] # List of splines in path
+ self._spline = None # Current spline
+
+ self._commands = {'M': self._pathMoveTo,
+ 'L': self._pathLineTo,
+ 'H': self._pathLineTo,
+ 'V': self._pathLineTo,
+ 'C': self._pathCurveToCS,
+ 'S': self._pathCurveToCS,
+ 'Q': self._pathCurveToQT,
+ 'T': self._pathCurveToQT,
+ 'A': self._pathCurveToA,
+ 'Z': self._pathClose,
+
+ 'm': self._pathMoveTo,
+ 'l': self._pathLineTo,
+ 'h': self._pathLineTo,
+ 'v': self._pathLineTo,
+ 'c': self._pathCurveToCS,
+ 's': self._pathCurveToCS,
+ 'q': self._pathCurveToQT,
+ 't': self._pathCurveToQT,
+ 'a': self._pathCurveToA,
+ 'z': self._pathClose}
+
+ def _getCoordPair(self, relative, point):
+ """
+ Get next coordinate pair
+ """
+
+ x = self._data.nextCoord()
+ y = self._data.nextCoord()
+
+ if relative and point is not None:
+ x += point[0]
+ y += point[1]
+
+ return x, y
+
+ def _appendPoint(self, x, y, handle_left=None, handle_left_type='VECTOR',
+ handle_right=None, handle_right_type='VECTOR'):
+ """
+ Append point to spline
+
+ If there's no active spline, create one and set it's first point
+ to current point coordinate
+ """
+
+ if self._spline is None:
+ self._spline = {'points': [],
+ 'closed': False}
+
+ self._splines.append(self._spline)
+
+ if len(self._spline['points']) > 0:
+ # Not sure bout specifications, but Illustrator could create
+ # last point at the same position, as start point (which was
+ # reached by MoveTo command) to set needed handle coords.
+ # It's also could use last point at last position to make path
+ # filled.
+
+ first = self._spline['points'][0]
+ if abs(first['x'] - x) < 1e-6 and abs(first['y'] - y) < 1e-6:
+ if handle_left is not None:
+ first['handle_left'] = handle_left
+ first['handle_left_type'] = 'FREE'
+
+ if handle_left_type != 'VECTOR':
+ first['handle_left_type'] = handle_left_type
+
+ if self._data.eof() or self._data.lookupNext().lower() == 'm':
+ self._spline['closed'] = True
+
+ return
+
+ point = {'x': x,
+ 'y': y,
+
+ 'handle_left': handle_left,
+ 'handle_left_type': handle_left_type,
+
+ 'handle_right': handle_right,
+ 'handle_right_type': handle_right_type}
+
+ self._spline['points'].append(point)
+
+ def _updateHandle(self, handle=None, handle_type=None):
+ """
+ Update right handle of previous point when adding new point to spline
+ """
+
+ point = self._spline['points'][-1]
+
+ if handle_type is not None:
+ point['handle_right_type'] = handle_type
+
+ if handle is not None:
+ point['handle_right'] = handle
+
+ def _pathMoveTo(self, code):
+ """
+ MoveTo path command
+ """
+
+ relative = code.islower()
+ x, y = self._getCoordPair(relative, self._point)
+
+ self._spline = None # Flag to start new spline
+ self._point = (x, y)
+
+ cur = self._data.cur()
+ while cur is not None and not cur.isalpha():
+ x, y = self._getCoordPair(relative, self._point)
+
+ if self._spline is None:
+ self._appendPoint(self._point[0], self._point[1])
+
+ self._appendPoint(x, y)
+
+ self._point = (x, y)
+ cur = self._data.cur()
+
+ self._handle = None
+
+ def _pathLineTo(self, code):
+ """
+ LineTo path command
+ """
+
+ c = code.lower()
+
+ cur = self._data.cur()
+ while cur is not None and not cur.isalpha():
+ if c == 'l':
+ x, y = self._getCoordPair(code == 'l', self._point)
+ elif c == 'h':
+ x = self._data.nextCoord()
+ y = self._point[1]
+ else:
+ x = self._point[0]
+ y = self._data.nextCoord()
+
+ if code == 'h':
+ x += self._point[0]
+ elif code == 'v':
+ y += self._point[1]
+
+ if self._spline is None:
+ self._appendPoint(self._point[0], self._point[1])
+
+ self._appendPoint(x, y)
+
+ self._point = (x, y)
+ cur = self._data.cur()
+
+ self._handle = None
+
+ def _pathCurveToCS(self, code):
+ """
+ Cubic BEZIER CurveTo path command
+ """
+
+ c = code.lower()
+ cur = self._data.cur()
+ while cur is not None and not cur.isalpha():
+ if c == 'c':
+ x1, y1 = self._getCoordPair(code.islower(), self._point)
+ x2, y2 = self._getCoordPair(code.islower(), self._point)
+ else:
+ if self._handle is not None:
+ x1, y1 = SVGFlipHandle(self._point[0], self._point[1],
+ self._handle[0], self._handle[1])
+ else:
+ x1, y1 = self._point
+
+ x2, y2 = self._getCoordPair(code.islower(), self._point)
+
+ x, y = self._getCoordPair(code.islower(), self._point)
+
+ if self._spline is None:
+ self._appendPoint(self._point[0], self._point[1],
+ handle_left_type='FREE', handle_left=self._point,
+ handle_right_type='FREE', handle_right=(x1, y1))
+ else:
+ self._updateHandle(handle=(x1, y1), handle_type='FREE')
+
+ self._appendPoint(x, y,
+ handle_left_type='FREE', handle_left=(x2, y2),
+ handle_right_type='FREE', handle_right=(x, y))
+
+ self._point = (x, y)
+ self._handle = (x2, y2)
+ cur = self._data.cur()
+
+ def _pathCurveToQT(self, code):
+ """
+ Qyadracic BEZIER CurveTo path command
+ """
+
+ c = code.lower()
+ cur = self._data.cur()
+
+ while cur is not None and not cur.isalpha():
+ if c == 'q':
+ x1, y1 = self._getCoordPair(code.islower(), self._point)
+ else:
+ if self._handle is not None:
+ x1, y1 = SVGFlipHandle(self._point[0], self._point[1],
+ self._handle[0], self._handle[1])
+ else:
+ x1, y1 = self._point
+
+ x, y = self._getCoordPair(code.islower(), self._point)
+
+ if self._spline is None:
+ self._appendPoint(self._point[0], self._point[1],
+ handle_left_type='FREE', handle_left=self._point,
+ handle_right_type='FREE', handle_right=self._point)
+
+ self._appendPoint(x, y,
+ handle_left_type='FREE', handle_left=(x1, y1),
+ handle_right_type='FREE', handle_right=(x, y))
+
+ self._point = (x, y)
+ self._handle = (x1, y1)
+ cur = self._data.cur()
+
+ def _calcArc(self, rx, ry, ang, fa, fs, x, y):
+ """
+ Calc arc paths
+
+ Copied and adoptedfrom paths_svg2obj.py scring for Blender 2.49
+ which is Copyright (c) jm soler juillet/novembre 2004-april 2009,
+ """
+
+ cpx = self._point[0]
+ cpy = self._point[1]
+ rx = abs(rx)
+ ry = abs(ry)
+ px = abs((cos(ang) * (cpx - x) + sin(ang) * (cpy - y)) * 0.5) ** 2.0
+ py = abs((cos(ang) * (cpy - y) - sin(ang) * (cpx - x)) * 0.5) ** 2.0
+ rpx = rpy = 0.0
+
+ if abs(rx) > 0.0:
+ px = px / (rx ** 2.0)
+
+ if abs(ry) > 0.0:
+ rpy = py / (ry ** 2.0)
+
+ pl = rpx + rpy
+ if pl > 1.0:
+ pl = pl ** 0.5
+ rx *= pl
+ ry *= pl
+
+ carx = sarx = cary = sary = 0.0
+
+ if abs(rx) > 0.0:
+ carx = cos(ang) / rx
+ sarx = sin(ang) / rx
+
+ if abs(ry) > 0.0:
+ cary = cos(ang) / ry
+ sary = sin(ang) / ry
+
+ x0 = carx * cpx + sarx * cpy
+ y0 = -sary * cpx + cary * cpy
+ x1 = carx * x + sarx * y
+ y1 = -sary * x + cary * y
+ d = (x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0)
+
+ if abs(d) > 0.0:
+ sq = 1.0 / d - 0.25
+ else:
+ sq = -0.25
+
+ if sq < 0.0:
+ sq = 0.0
+
+ sf = sq ** 0.5
+ if fs == fa:
+ sf = -sf
+
+ xc = 0.5 * (x0 + x1) - sf * (y1 - y0)
+ yc = 0.5 * (y0 + y1) + sf * (x1 - x0)
+ ang_0 = atan2(y0 - yc, x0 - xc)
+ ang_1 = atan2(y1 - yc, x1 - xc)
+ ang_arc = ang_1 - ang_0
+
+ if ang_arc < 0.0 and fs == 1:
+ ang_arc += 2.0 * pi
+ elif ang_arc > 0.0 and fs == 0:
+ ang_arc -= 2.0 * pi
+
+ n_segs = int(ceil(abs(ang_arc * 2.0 / (pi * 0.5 + 0.001))))
+
+ if self._spline is None:
+ self._appendPoint(cpx, cpy,
+ handle_left_type='FREE', handle_left=(cpx, cpy),
+ handle_right_type='FREE', handle_right=(cpx, cpy))
+
+ for i in range(n_segs):
+ ang0 = ang_0 + i * ang_arc / n_segs
+ ang1 = ang_0 + (i + 1) * ang_arc / n_segs
+ ang_demi = 0.25 * (ang1 - ang0)
+ t = 2.66666 * sin(ang_demi) * sin(ang_demi) / sin(ang_demi * 2.0)
+ x1 = xc + cos(ang0) - t * sin(ang0)
+ y1 = yc + sin(ang0) + t * cos(ang0)
+ x2 = xc + cos(ang1)
+ y2 = yc + sin(ang1)
+ x3 = x2 + t * sin(ang1)
+ y3 = y2 - t * cos(ang1)
+
+ coord1 = ((cos(ang) * rx) * x1 + (-sin(ang) * ry) * y1,
+ (sin(ang) * rx) * x1 + (cos(ang) * ry) * y1)
+ coord2 = ((cos(ang) * rx) * x3 + (-sin(ang) * ry) * y3,
+ (sin(ang) * rx) * x3 + (cos(ang) * ry) * y3)
+ coord3 = ((cos(ang) * rx) * x2 + (-sin(ang) * ry) * y2,
+ (sin(ang) * rx) * x2 + (cos(ang) * ry) * y2)
+
+ self._updateHandle(handle=coord1, handle_type='FREE')
+
+ self._appendPoint(coord3[0], coord3[1],
+ handle_left_type='FREE', handle_left=coord2,
+ handle_right_type='FREE', handle_right=coord3)
+
+ def _pathCurveToA(self, code):
+ """
+ Elliptical arc CurveTo path command
+ """
+
+ c = code.lower()
+ cur = self._data.cur()
+
+ while cur is not None and not cur.isalpha():
+ rx = float(self._data.next())
+ ry = float(self._data.next())
+ ang = float(self._data.next()) / 180 * pi
+ fa = float(self._data.next())
+ fs = float(self._data.next())
+ x, y = self._getCoordPair(code.islower(), self._point)
+
+ self._calcArc(rx, ry, ang, fa, fs, x, y)
+
+ self._point = (x, y)
+ self._handle = None
+ cur = self._data.cur()
+
+ def _pathClose(self, code):
+ """
+ Close path command
+ """
+
+ if self._spline:
+ self._spline['closed'] = True
+
+ cv = self._spline['points'][0]
+ self._point = (cv['x'], cv['y'])
+
+ def parse(self):
+ """
+ Execute parser
+ """
+
+ while not self._data.eof():
+ code = self._data.next()
+ cmd = self._commands.get(code)
+
+ if cmd is None:
+ raise Exception('Unknown path command: {0}' . format(code))
+
+ cmd(code)
+
+ def getSplines(self):
+ """
+ Get splines definitions
+ """
+
+ return self._splines
+
+
+class SVGGeometry:
+ """
+ Abstract SVG geometry
+ """
+
+ __slots__ = ('_node', # XML node for geometry
+ '_context', # Global SVG context (holds matrices stack, i.e.)
+ '_creating') # Flag if geometry is already creating
+ # for this node
+ # need to detect cycles for USE node
+
+ def __init__(self, node, context):
+ """
+ Initialize SVG geometry
+ """
+
+ self._node = node
+ self._context = context
+ self._creating = False
+
+ if hasattr(node, 'getAttribute'):
+ defs = context['defines']
+
+ id = node.getAttribute('id')
+ if id and defs.get('#' + id) is None:
+ defs['#' + id] = self
+
+ className = node.getAttribute('class')
+ if className and defs.get(className) is None:
+ defs[className] = self
+
+ def _pushRect(self, rect):
+ """
+ Push display rectangle
+ """
+
+ self._context['rects'].append(rect)
+ self._context['rect'] = rect
+
+ def _popRect(self):
+ """
+ Pop display rectangle
+ """
+
+ self._context['rects'].pop
+ self._context['rect'] = self._context['rects'][-1]
+
+ def _pushMatrix(self, matrix):
+ """
+ Push transformation matrix
+ """
+
+ self._context['transform'].append(matrix)
+ self._context['matrix'] = self._context['matrix'] * matrix
+
+ def _popMatrix(self):
+ """
+ Pop transformation matrix
+ """
+
+ matrix = self._context['transform'].pop()
+ self._context['matrix'] = self._context['matrix'] * matrix.inverted()
+
+ def _transformCoord(self, point):
+ """
+ Transform SVG-file coords
+ """
+
+ v = Vector((point[0], point[1], 0.0))
+
+ return v * self._context['matrix']
+
+ def getNodeMatrix(self):
+ """
+ Get transformation matrix of node
+ """
+
+ return SVGMatrixFromNode(self._node, self._context)
+
+ def parse(self):
+ """
+ Parse XML node to memory
+ """
+
+ pass
+
+ def _doCreateGeom(self, instancing):
+ """
+ Internal handler to create real geometries
+ """
+
+ pass
+
+ def getTransformMatrix(self):
+ """
+ Get matrix created from "transform" attribute
+ """
+
+ transform = self._node.getAttribute('transform')
+
+ if transform:
+ return SVGParseTransform(transform)
+
+ return None
+
+ def createGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ if self._creating:
+ return
+
+ self._creating = True
+
+ matrix = self.getTransformMatrix()
+ if matrix is not None:
+ self._pushMatrix(matrix)
+
+ self._doCreateGeom(instancing)
+
+ if matrix is not None:
+ self._popMatrix()
+
+ self._creating = False
+
+
+class SVGGeometryContainer(SVGGeometry):
+ """
+ Container of SVG geometries
+ """
+
+ __slots__ = ('_geometries') # List of chold geometries
+
+ def __init__(self, node, context):
+ """
+ Initialize SVG geometry container
+ """
+
+ super().__init__(node, context)
+
+ self._geometries = []
+
+ def parse(self):
+ """
+ Parse XML node to memory
+ """
+
+ for node in self._node.childNodes:
+ if type(node) is not xml.dom.minidom.Element:
+ continue
+
+ ob = parseAbstractNode(node, self._context)
+ if ob is not None:
+ self._geometries.append(ob)
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ for geom in self._geometries:
+ geom.createGeom(instancing)
+
+ def getGeometries(self):
+ """
+ Get list of parsed geometries
+ """
+
+ return self._geometries
+
+
+class SVGGeometryPATH(SVGGeometry):
+ """
+ SVG path geometry
+ """
+
+ __slots__ = ('_splines', # List of splines after parsing
+ '_styles') # Styles, used for displaying
+
+ def __init__(self, node, context):
+ """
+ Initialize SVG path
+ """
+
+ super().__init__(node, context)
+
+ self._splines = []
+ self._styles = SVGEmptyStyles
+
+ def parse(self):
+ """
+ Parse SVG path node
+ """
+
+ d = self._node.getAttribute('d')
+
+ pathParser = SVGPathParser(d)
+ pathParser.parse()
+
+ self._splines = pathParser.getSplines()
+ self._styles = SVGParseStyles(self._node, self._context)
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ ob = SVGCreateCurve()
+ cu = ob.data
+
+ if self._node.getAttribute('id'):
+ cu.name = self._node.getAttribute('id')
+
+ if self._styles['useFill']:
+ cu.dimensions = '2D'
+ cu.materials.append(self._styles['fill'])
+ else:
+ cu.dimensions = '3D'
+
+ for spline in self._splines:
+ act_spline = None
+ for point in spline['points']:
+ co = self._transformCoord((point['x'], point['y']))
+
+ if act_spline is None:
+ cu.splines.new('BEZIER')
+
+ act_spline = cu.splines[-1]
+ act_spline.use_cyclic_u = spline['closed']
+ else:
+ act_spline.bezier_points.add()
+
+ bezt = act_spline.bezier_points[-1]
+ bezt.co = co
+
+ bezt.handle_left_type = point['handle_left_type']
+ if point['handle_left'] is not None:
+ handle = point['handle_left']
+ bezt.handle_left = self._transformCoord(handle)
+
+ bezt.handle_right_type = point['handle_right_type']
+ if point['handle_right'] is not None:
+ handle = point['handle_right']
+ bezt.handle_right = self._transformCoord(handle)
+
+ SVGFinishCurve()
+
+
+class SVGGeometryDEFS(SVGGeometryContainer):
+ """
+ Container for referenced elements
+ """
+
+ def createGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ pass
+
+
+class SVGGeometrySYMBOL(SVGGeometryContainer):
+ """
+ Referenced element
+ """
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ self._pushMatrix(self.getNodeMatrix())
+
+ super()._doCreateGeom(False)
+
+ self._popMatrix()
+
+ def createGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ if not instancing:
+ return
+
+ super().createGeom(instancing)
+
+
+class SVGGeometryG(SVGGeometryContainer):
+ """
+ Geometry group
+ """
+
+ pass
+
+
+class SVGGeometryUSE(SVGGeometry):
+ """
+ User of referenced elements
+ """
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ geometries = []
+ ref = self._node.getAttribute('xlink:href')
+ geom = self._context['defines'].get(ref)
+
+ if geom is not None:
+ rect = SVGRectFromNode(self._node, self._context)
+ self._pushRect(rect)
+
+ self._pushMatrix(self.getNodeMatrix())
+
+ geom.createGeom(True)
+
+ self._popMatrix()
+
+ self._popRect()
+
+
+class SVGGeometryRECT(SVGGeometry):
+ """
+ SVG rectangle
+ """
+
+ __slots__ = ('_rect', # coordinate and domensions of rectangle
+ '_radius', # Rounded corner radiuses
+ '_styles') # Styles, used for displaying
+
+ def __init__(self, node, context):
+ """
+ Initialize new rectangle
+ """
+
+ super().__init__(node, context)
+
+ self._rect = ('0', '0', '0', '0')
+ self._radius = ('0', '0')
+ self._styles = SVGEmptyStyles
+
+ def parse(self):
+ """
+ Parse SVG rectangle node
+ """
+
+ self._styles = SVGParseStyles(self._node, self._context)
+
+ rect = []
+ for attr in ['x', 'y', 'width', 'height']:
+ val = self._node.getAttribute(attr)
+ rect.append(val or '0')
+
+ self._rect = (rect)
+
+ rx = self._node.getAttribute('rx')
+ ry = self._node.getAttribute('ry')
+
+ self._radius = (rx, ry)
+
+ def _appendCorner(self, spline, coord, firstTime, rounded):
+ """
+ Append new corner to rectangle
+ """
+
+ handle = None
+ if len(coord) == 3:
+ handle = self._transformCoord(coord[2])
+ coord = (coord[0], coord[1])
+
+ co = self._transformCoord(coord)
+
+ if not firstTime:
+ spline.bezier_points.add()
+
+ bezt = spline.bezier_points[-1]
+ bezt.co = co
+
+ if rounded:
+ if handle:
+ bezt.handle_left_type = 'VECTOR'
+ bezt.handle_right_type = 'FREE'
+
+ bezt.handle_right = handle
+ else:
+ bezt.handle_left_type = 'FREE'
+ bezt.handle_right_type = 'VECTOR'
+ bezt.handle_left = co
+
+ else:
+ bezt.handle_left_type = 'VECTOR'
+ bezt.handle_right_type = 'VECTOR'
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ # Run-time parsing -- percents would be correct only if
+ # parsing them now
+ crect = self._context['rect']
+ rect = []
+
+ for i in range(4):
+ rect.append(SVGParseCoord(self._rect[i], crect[i % 2]))
+
+ r = self._radius
+ rx = ry = 0.0
+
+ if r[0] and r[1]:
+ rx = min(SVGParseCoord(r[0], rect[0]), rect[2] / 2)
+ ry = min(SVGParseCoord(r[1], rect[1]), rect[3] / 2)
+ elif r[0]:
+ rx = min(SVGParseCoord(r[0], rect[0]), rect[2] / 2)
+ ry = min(rx, rect[3] / 2)
+ rx = ry = min(rx, ry)
+ elif r[1]:
+ ry = min(SVGParseCoord(r[1], rect[1]), rect[3] / 2)
+ rx = min(ry, rect[2] / 2)
+ rx = ry = min(rx, ry)
+
+ radius = (rx, ry)
+
+ # Geometry creation
+ ob = SVGCreateCurve()
+ cu = ob.data
+
+ if self._styles['useFill']:
+ cu.dimensions = '2D'
+ cu.materials.append(self._styles['fill'])
+ else:
+ cu.dimensions = '3D'
+
+ cu.splines.new('BEZIER')
+
+ spline = cu.splines[-1]
+ spline.use_cyclic_u = True
+
+ x, y = rect[0], rect[1]
+ w, h = rect[2], rect[3]
+ rx, ry = radius[0], radius[1]
+ rounded = False
+
+ if rx or ry:
+ #
+ # 0 _______ 1
+ # / \
+ # / \
+ # 7 2
+ # | |
+ # | |
+ # 6 3
+ # \ /
+ # \ /
+ # 5 _______ 4
+ #
+
+ # Optional third component -- right handle coord
+ coords = [(x + rx, y),
+ (x + w - rx, y, (x + w, y)),
+ (x + w, y + ry),
+ (x + w, y + h - ry, (x + w, y + h)),
+ (x + w - rx, y + h),
+ (x + rx, y + h, (x, y + h)),
+ (x, y + h - ry),
+ (x, y + ry, (x, y))]
+
+ rounded = True
+ else:
+ coords = [(x, y), (x + w, y), (x + w, y + h), (x, y + h)]
+
+ firstTime = True
+ for coord in coords:
+ self._appendCorner(spline, coord, firstTime, rounded)
+ firstTime = False
+
+ SVGFinishCurve()
+
+
+class SVGGeometryELLIPSE(SVGGeometry):
+ """
+ SVG ellipse
+ """
+
+ __slots__ = ('_cx', # X-coordinate of center
+ '_cy', # Y-coordinate of center
+ '_rx', # X-axis radius of circle
+ '_ry', # Y-axis radius of circle
+ '_styles') # Styles, used for displaying
+
+ def __init__(self, node, context):
+ """
+ Initialize new ellipse
+ """
+
+ super().__init__(node, context)
+
+ self._cx = '0.0'
+ self._cy = '0.0'
+ self._rx = '0.0'
+ self._ry = '0.0'
+ self._styles = SVGEmptyStyles
+
+ def parse(self):
+ """
+ Parse SVG ellipse node
+ """
+
+ self._styles = SVGParseStyles(self._node, self._context)
+
+ self._cx = self._node.getAttribute('cx') or '0'
+ self._cy = self._node.getAttribute('cy') or '0'
+ self._rx = self._node.getAttribute('rx') or '0'
+ self._ry = self._node.getAttribute('ry') or '0'
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ # Run-time parsing -- percents would be correct only if
+ # parsing them now
+ crect = self._context['rect']
+
+ cx = SVGParseCoord(self._cx, crect[0])
+ cy = SVGParseCoord(self._cy, crect[1])
+ rx = SVGParseCoord(self._rx, crect[0])
+ ry = SVGParseCoord(self._ry, crect[1])
+
+ if not rx or not ry:
+ # Automaic handles will work incorrect in this case
+ return
+
+ # Create circle
+ ob = SVGCreateCurve()
+ cu = ob.data
+
+ if self._styles['useFill']:
+ cu.dimensions = '2D'
+ cu.materials.append(self._styles['fill'])
+ else:
+ cu.dimensions = '3D'
+
+ coords = [((cx - rx, cy),
+ (cx - rx, cy + ry * 0.552),
+ (cx - rx, cy - ry * 0.552)),
+
+ ((cx, cy - ry),
+ (cx - rx * 0.552, cy - ry),
+ (cx + rx * 0.552, cy - ry)),
+
+ ((cx + rx, cy),
+ (cx + rx, cy - ry * 0.552),
+ (cx + rx, cy + ry * 0.552)),
+
+ ((cx, cy + ry),
+ (cx + rx * 0.552, cy + ry),
+ (cx - rx * 0.552, cy + ry))]
+
+ spline = None
+ for coord in coords:
+ co = self._transformCoord(coord[0])
+ handle_left = self._transformCoord(coord[1])
+ handle_right = self._transformCoord(coord[2])
+
+ if spline is None:
+ cu.splines.new('BEZIER')
+ spline = cu.splines[-1]
+ spline.use_cyclic_u = True
+ else:
+ spline.bezier_points.add()
+
+ bezt = spline.bezier_points[-1]
+ bezt.co = co
+ bezt.handle_left_type = 'FREE'
+ bezt.handle_right_type = 'FREE'
+ bezt.handle_left = handle_left
+ bezt.handle_right = handle_right
+
+ SVGFinishCurve()
+
+
+class SVGGeometryCIRCLE(SVGGeometryELLIPSE):
+ """
+ SVG circle
+ """
+
+ def parse(self):
+ """
+ Parse SVG circle node
+ """
+
+ self._styles = SVGParseStyles(self._node, self._context)
+
+ self._cx = self._node.getAttribute('cx') or '0'
+ self._cy = self._node.getAttribute('cy') or '0'
+
+ r = self._node.getAttribute('r') or '0'
+ self._rx = self._ry = r
+
+
+class SVGGeometryLINE(SVGGeometry):
+ """
+ SVG line
+ """
+
+ __slots__ = ('_x1', # X-coordinate of beginning
+ '_y1', # Y-coordinate of beginning
+ '_x2', # X-coordinate of ending
+ '_y2') # Y-coordinate of ending
+
+ def __init__(self, node, context):
+ """
+ Initialize new line
+ """
+
+ super().__init__(node, context)
+
+ self._x1 = '0.0'
+ self._y1 = '0.0'
+ self._x2 = '0.0'
+ self._y2 = '0.0'
+
+ def parse(self):
+ """
+ Parse SVG line node
+ """
+
+ self._x1 = self._node.getAttribute('x1') or '0'
+ self._y1 = self._node.getAttribute('y1') or '0'
+ self._x2 = self._node.getAttribute('x2') or '0'
+ self._y2 = self._node.getAttribute('y2') or '0'
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ # Run-time parsing -- percents would be correct only if
+ # parsing them now
+ crect = self._context['rect']
+
+ x1 = SVGParseCoord(self._x1, crect[0])
+ y1 = SVGParseCoord(self._y1, crect[1])
+ x2 = SVGParseCoord(self._x2, crect[0])
+ y2 = SVGParseCoord(self._y2, crect[1])
+
+ # Create cline
+ ob = SVGCreateCurve()
+ cu = ob.data
+
+ coords = [(x1, y1), (x2, y2)]
+ spline = None
+
+ for coord in coords:
+ co = self._transformCoord(coord)
+
+ if spline is None:
+ cu.splines.new('BEZIER')
+ spline = cu.splines[-1]
+ spline.use_cyclic_u = True
+ else:
+ spline.bezier_points.add()
+
+ bezt = spline.bezier_points[-1]
+ bezt.co = co
+ bezt.handle_left_type = 'VECTOR'
+ bezt.handle_right_type = 'VECTOR'
+
+ SVGFinishCurve()
+
+
+class SVGGeometryPOLY(SVGGeometry):
+ """
+ Abstract class for handling poly-geometries
+ (polylines and polygons)
+ """
+
+ __slots__ = ('_points', # Array of points for poly geometry
+ '_styles', # Styles, used for displaying
+ '_closed') # Should generated curve be closed?
+
+ def __init__(self, node, context):
+ """
+ Initialize new poly geometry
+ """
+
+ super().__init__(node, context)
+
+ self._points = []
+ self._styles = SVGEmptyStyles
+ self._closed = False
+
+ def parse(self):
+ """
+ Parse poly node
+ """
+
+ self._styles = SVGParseStyles(self._node, self._context)
+
+ points = self._node.getAttribute('points')
+ points = points.replace(',', ' ').replace('-', ' -')
+ points = points.split()
+
+ prev = None
+ self._points = []
+
+ for p in points:
+ if prev is None:
+ prev = p
+ else:
+ self._points.append((float(prev), float(p)))
+ prev = None
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ ob = SVGCreateCurve()
+ cu = ob.data
+
+ if self._closed and self._styles['useFill']:
+ cu.dimensions = '2D'
+ cu.materials.append(self._styles['fill'])
+ else:
+ cu.dimensions = '3D'
+
+ spline = None
+
+ for point in self._points:
+ co = self._transformCoord(point)
+
+ if spline is None:
+ cu.splines.new('BEZIER')
+ spline = cu.splines[-1]
+ spline.use_cyclic_u = self._closed
+ else:
+ spline.bezier_points.add()
+
+ bezt = spline.bezier_points[-1]
+ bezt.co = co
+ bezt.handle_left_type = 'VECTOR'
+ bezt.handle_right_type = 'VECTOR'
+
+ SVGFinishCurve()
+
+
+class SVGGeometryPOLYLINE(SVGGeometryPOLY):
+ """
+ SVG polyline geometry
+ """
+
+ pass
+
+
+class SVGGeometryPOLYGON(SVGGeometryPOLY):
+ """
+ SVG polygon geometry
+ """
+
+ def __init__(self, node, context):
+ """
+ Initialize new polygon geometry
+ """
+
+ super().__init__(node, context)
+
+ self._closed = True
+
+
+class SVGGeometrySVG(SVGGeometryContainer):
+ """
+ Main geometry holder
+ """
+
+ def _doCreateGeom(self, instancing):
+ """
+ Create real geometries
+ """
+
+ rect = SVGRectFromNode(self._node, self._context)
+
+ self._pushMatrix(self.getNodeMatrix())
+ self._pushRect(rect)
+
+ super()._doCreateGeom(False)
+
+ self._popRect()
+ self._popMatrix()
+
+
+class SVGLoader(SVGGeometryContainer):
+ """
+ SVG file loader
+ """
+
+ def getTransformMatrix(self):
+ """
+ Get matrix created from "transform" attribute
+ """
+
+ # SVG document doesn't support transform specification
+ # it can't even hold attributes
+
+ return None
+
+ def __init__(self, filepath):
+ """
+ Initialize SVG loader
+ """
+
+ node = xml.dom.minidom.parse(filepath)
+
+ m = Matrix()
+ m = m * m.Scale(1.0 / 90.0, 4, Vector((1.0, 0.0, 0.0)))
+ m = m * m.Scale(-1.0 / 90.0, 4, Vector((0.0, 1.0, 0.0)))
+
+ rect = (1, 1)
+
+ self._context = {'defines': {},
+ 'transform': [],
+ 'rects': [rect],
+ 'rect': rect,
+ 'matrix': m,
+ 'materials': {}}
+
+ super().__init__(node, self._context)
+
+
+svgGeometryClasses = {
+ 'svg': SVGGeometrySVG,
+ 'path': SVGGeometryPATH,
+ 'defs': SVGGeometryDEFS,
+ 'symbol': SVGGeometrySYMBOL,
+ 'use': SVGGeometryUSE,
+ 'rect': SVGGeometryRECT,
+ 'ellipse': SVGGeometryELLIPSE,
+ 'circle': SVGGeometryCIRCLE,
+ 'line': SVGGeometryLINE,
+ 'polyline': SVGGeometryPOLYLINE,
+ 'polygon': SVGGeometryPOLYGON,
+ 'g': SVGGeometryG}
+
+
+def parseAbstractNode(node, context):
+ name = node.tagName.lower()
+
+ if name.startswith('svg:'):
+ name = name[4:]
+
+ geomClass = svgGeometryClasses.get(name)
+
+ if geomClass is not None:
+ ob = geomClass(node, context)
+ ob.parse()
+
+ return ob
+
+ return None
+
+
+def load_svg(filepath):
+ """
+ Load specified SVG file
+ """
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ loader = SVGLoader(filepath)
+ loader.parse()
+ loader.createGeom(False)
+
+
+def load(operator, context, filepath=""):
+
+ # error in code should raise exceptions but loading
+ # non SVG files can give useful messages.
+ try:
+ load_svg(filepath)
+ except (xml.parsers.expat.ExpatError, UnicodeEncodeError) as e:
+ import traceback
+ traceback.print_exc()
+
+ operator.report({'WARNING'}, "Unable to parse XML, %s:%s for file %r" % (type(e).__name__, e, filepath))
+ return {'CANCELLED'}
+
+ return {'FINISHED'}
diff --git a/io_curve_svg/svg_colors.py b/io_curve_svg/svg_colors.py
new file mode 100644
index 00000000..fd5e9548
--- /dev/null
+++ b/io_curve_svg/svg_colors.py
@@ -0,0 +1,172 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+#
+# Copied and adoptedfrom paths_svg2obj.py scring for Blender 2.49 which is
+# Copyright (c) jm soler juillet/novembre 2004-april 2009,
+#
+
+SVGColors = {'aliceblue': (240, 248, 255),
+ 'antiquewhite': (250, 235, 215),
+ 'aqua': (0, 255, 255),
+ 'aquamarine': (127, 255, 212),
+ 'azure': (240, 255, 255),
+ 'beige': (245, 245, 220),
+ 'bisque': (255, 228, 196),
+ 'black': (0, 0, 0),
+ 'blanchedalmond': (255, 235, 205),
+ 'blue': (0, 0, 255),
+ 'blueviolet': (138, 43, 226),
+ 'brown': (165, 42, 42),
+ 'burlywood': (222, 184, 135),
+ 'cadetblue': (95, 158, 160),
+ 'chartreuse': (127, 255, 0),
+ 'chocolate': (210, 105, 30),
+ 'coral': (255, 127, 80),
+ 'cornflowerblue': (100, 149, 237),
+ 'cornsilk': (255, 248, 220),
+ 'crimson': (220, 20, 60),
+ 'cyan': (0, 255, 255),
+ 'darkblue': (0, 0, 139),
+ 'darkcyan': (0, 139, 139),
+ 'darkgoldenrod': (184, 134, 11),
+ 'darkgray': (169, 169, 169),
+ 'darkgreen': (0, 100, 0),
+ 'darkgrey': (169, 169, 169),
+ 'darkkhaki': (189, 183, 107),
+ 'darkmagenta': (139, 0, 139),
+ 'darkolivegreen': (85, 107, 47),
+ 'darkorange': (255, 140, 0),
+ 'darkorchid': (153, 50, 204),
+ 'darkred': (139, 0, 0),
+ 'darksalmon': (233, 150, 122),
+ 'darkseagreen': (143, 188, 143),
+ 'darkslateblue': (72, 61, 139),
+ 'darkslategray': (47, 79, 79),
+ 'darkslategrey': (47, 79, 79),
+ 'darkturquoise': (0, 206, 209),
+ 'darkviolet': (148, 0, 211),
+ 'deeppink': (255, 20, 147),
+ 'deepskyblue': (0, 191, 255),
+ 'dimgray': (105, 105, 105),
+ 'dimgrey': (105, 105, 105),
+ 'dodgerblue': (30, 144, 255),
+ 'firebrick': (178, 34, 34),
+ 'floralwhite': (255, 250, 240),
+ 'forestgreen': (34, 139, 34),
+ 'fuchsia': (255, 0, 255),
+ 'gainsboro': (220, 220, 220),
+ 'ghostwhite': (248, 248, 255),
+ 'gold': (255, 215, 0),
+ 'goldenrod': (218, 165, 32),
+ 'gray': (128, 128, 128),
+ 'grey': (128, 128, 128),
+ 'green': (0, 128, 0),
+ 'greenyellow': (173, 255, 47),
+ 'honeydew': (240, 255, 240),
+ 'hotpink': (255, 105, 180),
+ 'indianred': (205, 92, 92),
+ 'indigo': (75, 0, 130),
+ 'ivory': (255, 255, 240),
+ 'khaki': (240, 230, 140),
+ 'lavender': (230, 230, 250),
+ 'lavenderblush': (255, 240, 245),
+ 'lawngreen': (124, 252, 0),
+ 'lemonchiffon': (255, 250, 205),
+ 'lightblue': (173, 216, 230),
+ 'lightcoral': (240, 128, 128),
+ 'lightcyan': (224, 255, 255),
+ 'lightgoldenrodyellow': (250, 250, 210),
+ 'lightgray': (211, 211, 211),
+ 'lightgreen': (144, 238, 144),
+ 'lightgrey': (211, 211, 211),
+ 'lightpink': (255, 182, 193),
+ 'lightsalmon': (255, 160, 122),
+ 'lightseagreen': (32, 178, 170),
+ 'lightskyblue': (135, 206, 250),
+ 'lightslategray': (119, 136, 153),
+ 'lightslategrey': (119, 136, 153),
+ 'lightsteelblue': (176, 196, 222),
+ 'lightyellow': (255, 255, 224),
+ 'lime': (0, 255, 0),
+ 'limegreen': (50, 205, 50),
+ 'linen': (250, 240, 230),
+ 'magenta': (255, 0, 255),
+ 'maroon': (128, 0, 0),
+ 'mediumaquamarine': (102, 205, 170),
+ 'mediumblue': (0, 0, 205),
+ 'mediumorchid': (186, 85, 211),
+ 'mediumpurple': (147, 112, 219),
+ 'mediumseagreen': (60, 179, 113),
+ 'mediumslateblue': (123, 104, 238),
+ 'mediumspringgreen': (0, 250, 154),
+ 'mediumturquoise': (72, 209, 204),
+ 'mediumvioletred': (199, 21, 133),
+ 'midnightblue': (25, 25, 112),
+ 'mintcream': (245, 255, 250),
+ 'mistyrose': (255, 228, 225),
+ 'moccasin': (255, 228, 181),
+ 'navajowhite': (255, 222, 173),
+ 'navy': (0, 0, 128),
+ 'oldlace': (253, 245, 230),
+ 'olive': (128, 128, 0),
+ 'olivedrab': (107, 142, 35),
+ 'orange': (255, 165, 0),
+ 'orangered': (255, 69, 0),
+ 'orchid': (218, 112, 214),
+ 'palegoldenrod': (238, 232, 170),
+ 'palegreen': (152, 251, 152),
+ 'paleturquoise': (175, 238, 238),
+ 'palevioletred': (219, 112, 147),
+ 'papayawhip': (255, 239, 213),
+ 'peachpuff': (255, 218, 185),
+ 'peru': (205, 133, 63),
+ 'pink': (255, 192, 203),
+ 'plum': (221, 160, 221),
+ 'powderblue': (176, 224, 230),
+ 'purple': (128, 0, 128),
+ 'red': (255, 0, 0),
+ 'rosybrown': (188, 143, 143),
+ 'royalblue': (65, 105, 225),
+ 'saddlebrown': (139, 69, 19),
+ 'salmon': (250, 128, 114),
+ 'sandybrown': (244, 164, 96),
+ 'seagreen': (46, 139, 87),
+ 'seashell': (255, 245, 238),
+ 'sienna': (160, 82, 45),
+ 'silver': (192, 192, 192),
+ 'skyblue': (135, 206, 235),
+ 'slateblue': (106, 90, 205),
+ 'slategray': (112, 128, 144),
+ 'slategrey': (112, 128, 144),
+ 'snow': (255, 250, 250),
+ 'springgreen': (0, 255, 127),
+ 'steelblue': (70, 130, 180),
+ 'tan': (210, 180, 140),
+ 'teal': (0, 128, 128),
+ 'thistle': (216, 191, 216),
+ 'tomato': (255, 99, 71),
+ 'turquoise': (64, 224, 208),
+ 'violet': (238, 130, 238),
+ 'wheat': (245, 222, 179),
+ 'white': (255, 255, 255),
+ 'whitesmoke': (245, 245, 245),
+ 'yellow': (255, 255, 0),
+ 'yellowgreen': (154, 205, 50)}
diff --git a/io_export_directx_x.py b/io_export_directx_x.py
new file mode 100644
index 00000000..e31d5973
--- /dev/null
+++ b/io_export_directx_x.py
@@ -0,0 +1,1251 @@
+# ***** GPL LICENSE BLOCK *****
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# All rights reserved.
+# ***** GPL LICENSE BLOCK *****
+
+bl_info = {
+ "name": "DirectX Model Format (.x)",
+ "author": "Chris Foster (Kira Vakaan)",
+ "version": (2, 1, 1),
+ "blender": (2, 5, 7),
+ "api": 36339,
+ "location": "File > Export > DirectX (.x)",
+ "description": "Export DirectX Model Format (.x)",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/DirectX_Exporter",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22795",
+ "category": "Import-Export"}
+
+import os
+from math import radians
+
+import bpy
+from mathutils import *
+
+#Container for the exporter settings
+class DirectXExporterSettings:
+ def __init__(self,
+ context,
+ FilePath,
+ CoordinateSystem=1,
+ RotateX=True,
+ FlipNormals=False,
+ ApplyModifiers=False,
+ IncludeFrameRate=False,
+ ExportTextures=True,
+ ExportArmatures=False,
+ ExportAnimation=0,
+ ExportMode=1,
+ Verbose=False):
+ self.context = context
+ self.FilePath = FilePath
+ self.CoordinateSystem = int(CoordinateSystem)
+ self.RotateX = RotateX
+ self.FlipNormals = FlipNormals
+ self.ApplyModifiers = ApplyModifiers
+ self.IncludeFrameRate = IncludeFrameRate
+ self.ExportTextures = ExportTextures
+ self.ExportArmatures = ExportArmatures
+ self.ExportAnimation = int(ExportAnimation)
+ self.ExportMode = int(ExportMode)
+ self.Verbose = Verbose
+
+
+def LegalName(Name):
+
+ def ReplaceSet(String, OldSet, NewChar):
+ for OldChar in OldSet:
+ String = String.replace(OldChar, NewChar)
+ return String
+
+ import string
+
+ NewName = ReplaceSet(Name, string.punctuation, "_")
+ if NewName[0].isdigit() or NewName in ["ARRAY",
+ "DWORD",
+ "UCHAR",
+ "BINARY",
+ "FLOAT",
+ "ULONGLONG",
+ "BINARY_RESOURCE",
+ "SDWORD",
+ "UNICODE",
+ "CHAR",
+ "STRING",
+ "WORD",
+ "CSTRING",
+ "SWORD",
+ "DOUBLE",
+ "TEMPLATE"]:
+ NewName = "_" + NewName
+ return NewName
+
+
+def ExportDirectX(Config):
+ print("----------\nExporting to {}".format(Config.FilePath))
+ if Config.Verbose:
+ print("Opening File...")
+ Config.File = open(Config.FilePath, "w")
+ if Config.Verbose:
+ print("Done")
+
+ if Config.Verbose:
+ print("Generating Object list for export... (Root parents only)")
+ if Config.ExportMode == 1:
+ Config.ExportList = [Object for Object in Config.context.scene.objects
+ if Object.type in ("ARMATURE", "EMPTY", "MESH")
+ and Object.parent is None]
+ else:
+ ExportList = [Object for Object in Config.context.selected_objects
+ if Object.type in ("ARMATURE", "EMPTY", "MESH")]
+ Config.ExportList = [Object for Object in ExportList
+ if Object.parent not in ExportList]
+ if Config.Verbose:
+ print(" List: {}\nDone".format(Config.ExportList))
+
+ if Config.Verbose:
+ print("Setting up...")
+ Config.SystemMatrix = Matrix()
+ if Config.RotateX:
+ Config.SystemMatrix *= Matrix.Rotation(radians(-90), 4, "X")
+ if Config.CoordinateSystem == 1:
+ Config.SystemMatrix *= Matrix.Scale(-1, 4, Vector((0, 1, 0)))
+
+ if Config.ExportAnimation:
+ CurrentFrame = bpy.context.scene.frame_current
+ bpy.context.scene.frame_current = bpy.context.scene.frame_current
+ if Config.Verbose:
+ print("Done")
+
+ if Config.Verbose:
+ print("Writing Header...")
+ WriteHeader(Config)
+ if Config.Verbose:
+ print("Done")
+
+ Config.Whitespace = 0
+ if Config.Verbose:
+ print("Writing Root Frame...")
+ WriteRootFrame(Config)
+ if Config.Verbose:
+ print("Done")
+
+ Config.ObjectList = []
+ if Config.Verbose:
+ print("Writing Objects...")
+ WriteObjects(Config, Config.ExportList)
+ if Config.Verbose:
+ print("Done")
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of Root Frame\n".format(" " * Config.Whitespace))
+
+ if Config.Verbose:
+ print("Objects Exported: {}".format(Config.ExportList))
+
+ if Config.ExportAnimation:
+ if Config.IncludeFrameRate:
+ if Config.Verbose:
+ print("Writing Frame Rate...")
+ Config.File.write("{}AnimTicksPerSecond {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{};\n".format(" " * Config.Whitespace, int(bpy.context.scene.render.fps / bpy.context.scene.render.fps_base)))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print("Done")
+ if Config.Verbose:
+ print("Writing Animation...")
+ if Config.ExportAnimation==1:
+ WriteKeyedAnimationSet(Config)
+ else:
+ WriteFullAnimationSet(Config)
+ bpy.context.scene.frame_current = CurrentFrame
+ if Config.Verbose:
+ print("Done")
+
+ CloseFile(Config)
+ print("Finished")
+
+
+def GetObjectChildren(Parent):
+ return [Object for Object in Parent.children
+ if Object.type in ("ARMATURE", "EMPTY", "MESH")]
+
+#Returns the vertex count of Mesh, counting each vertex for every face.
+def GetMeshVertexCount(Mesh):
+ VertexCount = 0
+ for Face in Mesh.faces:
+ VertexCount += len(Face.vertices)
+ return VertexCount
+
+#Returns the file path of first image texture from Material.
+def GetMaterialTexture(Material):
+ if Material:
+ #Create a list of Textures that have type "IMAGE"
+ ImageTextures = [Material.texture_slots[TextureSlot].texture for TextureSlot in Material.texture_slots.keys() if Material.texture_slots[TextureSlot].texture.type == "IMAGE"]
+ #Refine a new list with only image textures that have a file source
+ ImageFiles = [os.path.basename(Texture.image.filepath) for Texture in ImageTextures if Texture.image.source == "FILE"]
+ if ImageFiles:
+ return ImageFiles[0]
+ return None
+
+
+def WriteHeader(Config):
+ Config.File.write("xof 0303txt 0032\n\n")
+
+ if Config.IncludeFrameRate:
+ Config.File.write("template AnimTicksPerSecond {\n\
+ <9E415A43-7BA6-4a73-8743-B73D47E88476>\n\
+ DWORD AnimTicksPerSecond;\n\
+}\n\n")
+
+ if Config.ExportArmatures:
+ Config.File.write("template XSkinMeshHeader {\n\
+ <3cf169ce-ff7c-44ab-93c0-f78f62d172e2>\n\
+ WORD nMaxSkinWeightsPerVertex;\n\
+ WORD nMaxSkinWeightsPerFace;\n\
+ WORD nBones;\n\
+}\n\n\
+template SkinWeights {\n\
+ <6f0d123b-bad2-4167-a0d0-80224f25fabb>\n\
+ STRING transformNodeName;\n\
+ DWORD nWeights;\n\
+ array DWORD vertexIndices[nWeights];\n\
+ array float weights[nWeights];\n\
+ Matrix4x4 matrixOffset;\n\
+}\n\n")
+
+def WriteRootFrame(Config):
+ Config.File.write("{}Frame Root {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+
+ Config.File.write("{}FrameTransformMatrix {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, Config.SystemMatrix[0][0], Config.SystemMatrix[0][1], Config.SystemMatrix[0][2], Config.SystemMatrix[0][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, Config.SystemMatrix[1][0], Config.SystemMatrix[1][1], Config.SystemMatrix[1][2], Config.SystemMatrix[1][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, Config.SystemMatrix[2][0], Config.SystemMatrix[2][1], Config.SystemMatrix[2][2], Config.SystemMatrix[2][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f};;\n".format(" " * Config.Whitespace, Config.SystemMatrix[3][0], Config.SystemMatrix[3][1], Config.SystemMatrix[3][2], Config.SystemMatrix[3][3]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+
+def WriteObjects(Config, ObjectList):
+ Config.ObjectList += ObjectList
+
+ for Object in ObjectList:
+ if Config.Verbose:
+ print(" Writing Object: {}...".format(Object.name))
+ Config.File.write("{}Frame {} {{\n".format(" " * Config.Whitespace, LegalName(Object.name)))
+
+ Config.Whitespace += 1
+ if Config.Verbose:
+ print(" Writing Local Matrix...")
+ WriteLocalMatrix(Config, Object)
+ if Config.Verbose:
+ print(" Done")
+
+ if Config.ExportArmatures and Object.type == "ARMATURE":
+ Armature = Object.data
+ ParentList = [Bone for Bone in Armature.bones if Bone.parent is None]
+ if Config.Verbose:
+ print(" Writing Armature Bones...")
+ WriteArmatureBones(Config, Object, ParentList)
+ if Config.Verbose:
+ print(" Done")
+
+ ChildList = GetObjectChildren(Object)
+ if Config.ExportMode == 2: #Selected Objects Only
+ ChildList = [Child for Child in ChildList
+ if Child in Config.context.selected_objects]
+ if Config.Verbose:
+ print(" Writing Children...")
+ WriteObjects(Config, ChildList)
+ if Config.Verbose:
+ print(" Done Writing Children")
+
+ if Object.type == "MESH":
+ if Config.Verbose:
+ print(" Generating Mesh...")
+ if Config.ApplyModifiers:
+ if Config.ExportArmatures:
+ #Create a copy of the object and remove all armature modifiers so an unshaped
+ #mesh can be created from it.
+ Object2 = Object.copy()
+ for Modifier in [Modifier for Modifier in Object2.modifiers if Modifier.type == "ARMATURE"]:
+ Object2.modifiers.remove(Modifier)
+ Mesh = Object2.to_mesh(bpy.context.scene, True, "PREVIEW")
+ else:
+ Mesh = Object.to_mesh(bpy.context.scene, True, "PREVIEW")
+ else:
+ Mesh = Object.to_mesh(bpy.context.scene, False, "PREVIEW")
+ if Config.Verbose:
+ print(" Done")
+ print(" Writing Mesh...")
+ WriteMesh(Config, Object, Mesh)
+ if Config.Verbose:
+ print(" Done")
+ if Config.ApplyModifiers and Config.ExportArmatures:
+ bpy.data.objects.remove(Object2)
+ bpy.data.meshes.remove(Mesh)
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of {}\n".format(" " * Config.Whitespace, LegalName(Object.name)))
+ if Config.Verbose:
+ print(" Done Writing Object: {}".format(Object.name))
+
+
+def WriteLocalMatrix(Config, Object):
+ LocalMatrix = Object.matrix_local
+
+ Config.File.write("{}FrameTransformMatrix {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, LocalMatrix[0][0], LocalMatrix[0][1], LocalMatrix[0][2], LocalMatrix[0][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, LocalMatrix[1][0], LocalMatrix[1][1], LocalMatrix[1][2], LocalMatrix[1][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, LocalMatrix[2][0], LocalMatrix[2][1], LocalMatrix[2][2], LocalMatrix[2][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f};;\n".format(" " * Config.Whitespace, LocalMatrix[3][0], LocalMatrix[3][1], LocalMatrix[3][2], LocalMatrix[3][3]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+
+
+def WriteArmatureBones(Config, Object, ChildList):
+ PoseBones = Object.pose.bones
+ for Bone in ChildList:
+ if Config.Verbose:
+ print(" Writing Bone: {}...".format(Bone.name))
+ Config.File.write("{}Frame {} {{\n".format(" " * Config.Whitespace, LegalName(Object.name) + "_" + LegalName(Bone.name)))
+ Config.Whitespace += 1
+
+ PoseBone = PoseBones[Bone.name]
+
+ if Bone.parent:
+ BoneMatrix = PoseBone.parent.matrix.inverted()
+ else:
+ BoneMatrix = Matrix()
+
+ BoneMatrix *= PoseBone.matrix
+
+ Config.File.write("{}FrameTransformMatrix {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, BoneMatrix[0][0], BoneMatrix[0][1], BoneMatrix[0][2], BoneMatrix[0][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, BoneMatrix[1][0], BoneMatrix[1][1], BoneMatrix[1][2], BoneMatrix[1][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, BoneMatrix[2][0], BoneMatrix[2][1], BoneMatrix[2][2], BoneMatrix[2][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f};;\n".format(" " * Config.Whitespace, BoneMatrix[3][0], BoneMatrix[3][1], BoneMatrix[3][2], BoneMatrix[3][3]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+
+ if Config.Verbose:
+ print(" Done")
+ WriteArmatureBones(Config, Object, Bone.children)
+ Config.Whitespace -= 1
+
+ Config.File.write("{}}} //End of {}\n".format(" " * Config.Whitespace, LegalName(Object.name) + "_" + LegalName(Bone.name)))
+
+
+def WriteMesh(Config, Object, Mesh):
+ Config.File.write("{}Mesh {{ //{} Mesh\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+ Config.Whitespace += 1
+
+ if Config.Verbose:
+ print(" Writing Mesh Vertices...")
+ WriteMeshVertices(Config, Mesh)
+ if Config.Verbose:
+ print(" Done\n Writing Mesh Normals...")
+ WriteMeshNormals(Config, Mesh)
+ if Config.Verbose:
+ print(" Done\n Writing Mesh Materials...")
+ WriteMeshMaterials(Config, Mesh)
+ if Config.Verbose:
+ print(" Done")
+ if Mesh.uv_textures:
+ if Config.Verbose:
+ print(" Writing Mesh UV Coordinates...")
+ WriteMeshUVCoordinates(Config, Mesh)
+ if Config.Verbose:
+ print(" Done")
+ if Config.ExportArmatures:
+ if Config.Verbose:
+ print(" Writing Mesh Skin Weights...")
+ WriteMeshSkinWeights(Config, Object, Mesh)
+ if Config.Verbose:
+ print(" Done")
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of {} Mesh\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+
+
+def WriteMeshVertices(Config, Mesh):
+ Index = 0
+ VertexCount = GetMeshVertexCount(Mesh)
+ Config.File.write("{}{};\n".format(" " * Config.Whitespace, VertexCount))
+
+ for Face in Mesh.faces:
+ Vertices = list(Face.vertices)
+
+ if Config.CoordinateSystem == 1:
+ Vertices = Vertices[::-1]
+ for Vertex in [Mesh.vertices[Vertex] for Vertex in Vertices]:
+ Position = Vertex.co
+ Config.File.write("{}{:9f};{:9f};{:9f};".format(" " * Config.Whitespace, Position[0], Position[1], Position[2]))
+ Index += 1
+ if Index == VertexCount:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+
+ Index = 0
+ Config.File.write("{}{};\n".format(" " * Config.Whitespace, len(Mesh.faces)))
+
+ for Face in Mesh.faces:
+ Config.File.write("{}{};".format(" " * Config.Whitespace, len(Face.vertices)))
+ for Vertex in Face.vertices:
+ Config.File.write("{};".format(Index))
+ Index += 1
+ if Index == VertexCount:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+
+
+def WriteMeshNormals(Config, Mesh):
+ Config.File.write("{}MeshNormals {{ //{} Normals\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+ Config.Whitespace += 1
+
+ Index = 0
+ VertexCount = GetMeshVertexCount(Mesh)
+ Config.File.write("{}{};\n".format(" " * Config.Whitespace, VertexCount))
+
+ for Face in Mesh.faces:
+ Vertices = list(Face.vertices)
+
+ if Config.CoordinateSystem == 1:
+ Vertices = Vertices[::-1]
+ for Vertex in [Mesh.vertices[Vertex] for Vertex in Vertices]:
+ if Face.use_smooth:
+ Normal = Vertex.normal
+ else:
+ Normal = Face.normal
+ if Config.FlipNormals:
+ Normal = -Normal
+ Config.File.write("{}{:9f};{:9f};{:9f};".format(" " * Config.Whitespace, Normal[0], Normal[1], Normal[2]))
+ Index += 1
+ if Index == VertexCount:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+
+ Index = 0
+ Config.File.write("{}{};\n".format(" " * Config.Whitespace, len(Mesh.faces)))
+
+ for Face in Mesh.faces:
+ Config.File.write("{}{};".format(" " * Config.Whitespace, len(Face.vertices)))
+ for Vertex in Face.vertices:
+ Config.File.write("{};".format(Index))
+ Index += 1
+ if Index == VertexCount:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of {} Normals\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+
+
+def WriteMeshMaterials(Config, Mesh):
+ Config.File.write("{}MeshMaterialList {{ //{} Material List\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+ Config.Whitespace += 1
+
+ Materials = Mesh.materials
+ if Materials.keys():
+ MaterialIndexes = {}
+ for Face in Mesh.faces:
+ if Materials[Face.material_index] not in MaterialIndexes:
+ MaterialIndexes[Materials[Face.material_index]] = len(MaterialIndexes)
+
+ FaceCount = len(Mesh.faces)
+ Index = 0
+ Config.File.write("{}{};\n{}{};\n".format(" " * Config.Whitespace, len(MaterialIndexes), " " * Config.Whitespace, FaceCount))
+ for Face in Mesh.faces:
+ Config.File.write("{}{}".format(" " * Config.Whitespace, MaterialIndexes[Materials[Face.material_index]]))
+ Index += 1
+ if Index == FaceCount:
+ Config.File.write(";;\n")
+ else:
+ Config.File.write(",\n")
+
+ Materials = [Item[::-1] for Item in MaterialIndexes.items()]
+ Materials.sort()
+ for Material in Materials:
+ WriteMaterial(Config, Material[1])
+ else:
+ Config.File.write("{}1;\n{}1;\n{}0;;\n".format(" " * Config.Whitespace, " " * Config.Whitespace, " " * Config.Whitespace))
+ WriteMaterial(Config)
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of {} Material List\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+
+
+def WriteMaterial(Config, Material=None):
+ if Material:
+ Config.File.write("{}Material {} {{\n".format(" " * Config.Whitespace, LegalName(Material.name)))
+ Config.Whitespace += 1
+
+ Diffuse = list(Vector(Material.diffuse_color) * Material.diffuse_intensity)
+ Diffuse.append(Material.alpha)
+ Specularity = 1000 * (Material.specular_hardness - 1.0) / (511.0 - 1.0) # Map Blender's range of 1 - 511 to 0 - 1000
+ Specular = list(Vector(Material.specular_color) * Material.specular_intensity)
+
+ Config.File.write("{}{:9f};{:9f};{:9f};{:9f};;\n".format(" " * Config.Whitespace, Diffuse[0], Diffuse[1], Diffuse[2], Diffuse[3]))
+ Config.File.write("{} {:9f};\n".format(" " * Config.Whitespace, Specularity))
+ Config.File.write("{}{:9f};{:9f};{:9f};;\n".format(" " * Config.Whitespace, Specular[0], Specular[1], Specular[2]))
+ else:
+ Config.File.write("{}Material Default_Material {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{} 0.800000; 0.800000; 0.800000; 0.800000;;\n".format(" " * Config.Whitespace))
+ Config.File.write("{} 96.078431;\n".format(" " * Config.Whitespace)) # 1000 * (50 - 1) / (511 - 1)
+ Config.File.write("{} 0.500000; 0.500000; 0.500000;;\n".format(" " * Config.Whitespace))
+ Config.File.write("{} 0.000000; 0.000000; 0.000000;;\n".format(" " * Config.Whitespace))
+ if Config.ExportTextures:
+ Texture = GetMaterialTexture(Material)
+ if Texture:
+ Config.File.write("{}TextureFilename {{\"{}\";}}\n".format(" " * Config.Whitespace, Texture))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+
+
+def WriteMeshUVCoordinates(Config, Mesh):
+ Config.File.write("{}MeshTextureCoords {{ //{} UV Coordinates\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+ Config.Whitespace += 1
+
+ UVCoordinates = None
+ for UV in Mesh.uv_textures:
+ if UV.active_render:
+ UVCoordinates = UV.data
+ break
+
+ Index = 0
+ VertexCount = GetMeshVertexCount(Mesh)
+ Config.File.write("{}{};\n".format(" " * Config.Whitespace, VertexCount))
+
+ for Face in UVCoordinates:
+ Vertices = []
+ for Vertex in Face.uv:
+ Vertices.append(tuple(Vertex))
+ if Config.CoordinateSystem == 1:
+ Vertices = Vertices[::-1]
+ for Vertex in Vertices:
+ Config.File.write("{}{:9f};{:9f};".format(" " * Config.Whitespace, Vertex[0], 1 - Vertex[1]))
+ Index += 1
+ if Index == VertexCount:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of {} UV Coordinates\n".format(" " * Config.Whitespace, LegalName(Mesh.name)))
+
+
+def WriteMeshSkinWeights(Config, Object, Mesh):
+ ArmatureList = [Modifier for Modifier in Object.modifiers if Modifier.type == "ARMATURE"]
+ if ArmatureList:
+ ArmatureObject = ArmatureList[0].object
+ ArmatureBones = ArmatureObject.data.bones
+
+ PoseBones = ArmatureObject.pose.bones
+
+ MaxInfluences = 0
+ UsedBones = set()
+ #Maps bones to a list of vertices they affect
+ VertexGroups = {}
+
+ for Vertex in Mesh.vertices:
+ #BoneInfluences contains the bones of the armature that affect the current vertex
+ BoneInfluences = [PoseBones[Object.vertex_groups[Group.group].name] for Group in Vertex.groups if Object.vertex_groups[Group.group].name in PoseBones]
+ if len(BoneInfluences) > MaxInfluences:
+ MaxInfluences = len(BoneInfluences)
+ for Bone in BoneInfluences:
+ UsedBones.add(Bone)
+ if Bone not in VertexGroups:
+ VertexGroups[Bone] = [Vertex]
+ else:
+ VertexGroups[Bone].append(Vertex)
+ BoneCount = len(UsedBones)
+
+ Config.File.write("{}XSkinMeshHeader {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{};\n{}{};\n{}{};\n".format(" " * Config.Whitespace, MaxInfluences, " " * Config.Whitespace, MaxInfluences * 3, " " * Config.Whitespace, BoneCount))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+
+ for Bone in UsedBones:
+ VertexCount = 0
+ VertexIndexes = [Vertex.index for Vertex in VertexGroups[Bone]]
+ for Face in Mesh.faces:
+ for Vertex in Face.vertices:
+ if Vertex in VertexIndexes:
+ VertexCount += 1
+
+ Config.File.write("{}SkinWeights {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}\"{}\";\n{}{};\n".format(" " * Config.Whitespace, LegalName(ArmatureObject.name) + "_" + LegalName(Bone.name), " " * Config.Whitespace, VertexCount))
+
+ VertexWeights = []
+ Index = 0
+ WrittenIndexes = 0
+ for Face in Mesh.faces:
+ FaceVertices = list(Face.vertices)
+ if Config.CoordinateSystem == 1:
+ FaceVertices = FaceVertices[::-1]
+ for Vertex in FaceVertices:
+ if Vertex in VertexIndexes:
+ Config.File.write("{}{}".format(" " * Config.Whitespace, Index))
+
+ GroupIndexes = {Object.vertex_groups[Group.group].name: Index for Index, Group in enumerate(Mesh.vertices[Vertex].groups) if Object.vertex_groups[Group.group].name in PoseBones}
+
+ WeightTotal = 0.0
+ for Weight in [Group.weight for Group in Mesh.vertices[Vertex].groups if Object.vertex_groups[Group.group].name in PoseBones]:
+ WeightTotal += Weight
+
+ if WeightTotal:
+ VertexWeights.append(Mesh.vertices[Vertex].groups[GroupIndexes[Bone.name]].weight / WeightTotal)
+ else:
+ VertexWeights.append(0.0)
+
+ WrittenIndexes += 1
+ if WrittenIndexes == VertexCount:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Index += 1
+
+ for Index, Weight in enumerate(VertexWeights):
+ Config.File.write("{}{:8f}".format(" " * Config.Whitespace, Weight))
+ if Index == (VertexCount - 1):
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+
+ RestBone = ArmatureBones[Bone.name]
+
+ #BoneMatrix transforms mesh vertices into the space of the bone.
+ #Here are the final transformations in order:
+ # - Object Space to World Space
+ # - World Space to Armature Space
+ # - Armature Space to Bone Space (The bone matrix needs to be rotated 90 degrees to align with Blender's world axes)
+ #This way, when BoneMatrix is transformed by the bone's Frame matrix, the vertices will be in their final world position.
+
+ BoneMatrix = RestBone.matrix_local.inverted()
+ BoneMatrix *= ArmatureObject.matrix_world.inverted()
+ BoneMatrix *= Object.matrix_world
+
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, BoneMatrix[0][0], BoneMatrix[0][1], BoneMatrix[0][2], BoneMatrix[0][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, BoneMatrix[1][0], BoneMatrix[1][1], BoneMatrix[1][2], BoneMatrix[1][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f},\n".format(" " * Config.Whitespace, BoneMatrix[2][0], BoneMatrix[2][1], BoneMatrix[2][2], BoneMatrix[2][3]))
+ Config.File.write("{}{:9f},{:9f},{:9f},{:9f};;\n".format(" " * Config.Whitespace, BoneMatrix[3][0], BoneMatrix[3][1], BoneMatrix[3][2], BoneMatrix[3][3]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of {} Skin Weights\n".format(" " * Config.Whitespace, LegalName(ArmatureObject.name) + "_" + LegalName(Bone.name)))
+
+
+def WriteKeyedAnimationSet(Config):
+ Config.File.write("{}AnimationSet {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ for Object in [Object for Object in Config.ObjectList if Object.animation_data]:
+ if Config.Verbose:
+ print(" Writing Animation Data for Object: {}".format(Object.name))
+ Action = Object.animation_data.action
+ if Action:
+ PositionFCurves = [None, None, None]
+ RotationFCurves = [None, None, None]
+ ScaleFCurves = [None, None, None]
+ for FCurve in Action.fcurves:
+ if FCurve.data_path == "location":
+ PositionFCurves[FCurve.array_index] = FCurve
+ elif FCurve.data_path == "rotation_euler":
+ RotationFCurves[FCurve.array_index] = FCurve
+ elif FCurve.data_path == "scale":
+ ScaleFCurves[FCurve.array_index] = FCurve
+ if [FCurve for FCurve in PositionFCurves + RotationFCurves + ScaleFCurves if FCurve]:
+ Config.File.write("{}Animation {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{{{}}}\n".format(" " * Config.Whitespace, LegalName(Object.name)))
+
+ #Position
+ if Config.Verbose:
+ print(" Writing Position...")
+ AllKeyframes = set()
+ for Index, FCurve in enumerate(PositionFCurves):
+ if FCurve:
+ Keyframes = []
+ for Keyframe in FCurve.keyframe_points:
+ if Keyframe.co[0] < bpy.context.scene.frame_start:
+ AllKeyframes.add(bpy.context.scene.frame_start)
+ elif Keyframe.co[0] > bpy.context.scene.frame_end:
+ AllKeyframes.add(bpy.context.scene.frame_end)
+ else:
+ Keyframes.append(Keyframe.co)
+ AllKeyframes.add(int(Keyframe.co[0]))
+ PositionFCurves[Index] = {int(Keyframe): Value for Keyframe, Value in Keyframes}
+ Config.File.write("{}AnimationKey {{ //Position\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ AllKeyframes = list(AllKeyframes)
+ AllKeyframes.sort()
+ if len(AllKeyframes):
+ Config.File.write("{}2;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, len(AllKeyframes)))
+ for Keyframe in AllKeyframes:
+ bpy.context.scene.frame_set(Keyframe)
+ Position = Vector()
+ Position[0] = ((PositionFCurves[0][Keyframe] if Keyframe in PositionFCurves[0] else Object.location[0]) if PositionFCurves[0] else Object.location[0])
+ Position[1] = ((PositionFCurves[1][Keyframe] if Keyframe in PositionFCurves[1] else Object.location[1]) if PositionFCurves[1] else Object.location[1])
+ Position[2] = ((PositionFCurves[2][Keyframe] if Keyframe in PositionFCurves[2] else Object.location[2]) if PositionFCurves[2] else Object.location[2])
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Keyframe - bpy.context.scene.frame_start) + ";3;").ljust(8), Position[0], Position[1], Position[2]))
+ if Keyframe == AllKeyframes[-1]:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+
+ else:
+ Config.File.write("{}2;\n{}1;\n".format(" " * Config.Whitespace, " " * Config.Whitespace))
+ bpy.context.scene.frame_set(bpy.context.scene.frame_start)
+ Position = Object.matrix_local.to_translation()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;;\n".format(" " * Config.Whitespace, ("0;3;").ljust(8), Position[0], Position[1], Position[2]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Rotation
+ if Config.Verbose:
+ print(" Writing Rotation...")
+ AllKeyframes = set()
+ for Index, FCurve in enumerate(RotationFCurves):
+ if FCurve:
+ Keyframes = []
+ for Keyframe in FCurve.keyframe_points:
+ if Keyframe.co[0] < bpy.context.scene.frame_start:
+ AllKeyframes.add(bpy.context.scene.frame_start)
+ elif Keyframe.co[0] > bpy.context.scene.frame_end:
+ AllKeyframes.add(bpy.context.scene.frame_end)
+ else:
+ Keyframes.append(Keyframe.co)
+ AllKeyframes.add(int(Keyframe.co[0]))
+ RotationFCurves[Index] = {int(Keyframe): Value for Keyframe, Value in Keyframes}
+ Config.File.write("{}AnimationKey {{ //Rotation\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ AllKeyframes = list(AllKeyframes)
+ AllKeyframes.sort()
+ if len(AllKeyframes):
+ Config.File.write("{}0;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, len(AllKeyframes)))
+ for Keyframe in AllKeyframes:
+ bpy.context.scene.frame_set(Keyframe)
+ Rotation = Euler()
+ Rotation[0] = ((RotationFCurves[0][Keyframe] if Keyframe in RotationFCurves[0] else Object.rotation_euler[0]) if RotationFCurves[0] else Object.rotation_euler[0])
+ Rotation[1] = ((RotationFCurves[1][Keyframe] if Keyframe in RotationFCurves[1] else Object.rotation_euler[1]) if RotationFCurves[1] else Object.rotation_euler[1])
+ Rotation[2] = ((RotationFCurves[2][Keyframe] if Keyframe in RotationFCurves[2] else Object.rotation_euler[2]) if RotationFCurves[2] else Object.rotation_euler[2])
+ Rotation = Rotation.to_quaternion()
+ Config.File.write("{}{}{:9f},{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Keyframe - bpy.context.scene.frame_start) + ";4;").ljust(8), -Rotation[0], Rotation[1], Rotation[2], Rotation[3]))
+ if Keyframe == AllKeyframes[-1]:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ else:
+ Config.File.write("{}0;\n{}1;\n".format(" " * Config.Whitespace, " " * Config.Whitespace))
+ bpy.context.scene.frame_set(bpy.context.scene.frame_start)
+ Rotation = Object.rotation_euler.to_quaternion()
+ Config.File.write("{}{}{:9f},{:9f},{:9f},{:9f};;;\n".format(" " * Config.Whitespace, ("0;4;").ljust(8), -Rotation[0], Rotation[1], Rotation[2], Rotation[3]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Scale
+ if Config.Verbose:
+ print(" Writing Scale...")
+ AllKeyframes = set()
+ for Index, FCurve in enumerate(ScaleFCurves):
+ if FCurve:
+ Keyframes = []
+ for Keyframe in FCurve.keyframe_points:
+ if Keyframe.co[0] < bpy.context.scene.frame_start:
+ AllKeyframes.add(bpy.context.scene.frame_start)
+ elif Keyframe.co[0] > bpy.context.scene.frame_end:
+ AllKeyframes.add(bpy.context.scene.frame_end)
+ else:
+ Keyframes.append(Keyframe.co)
+ AllKeyframes.add(int(Keyframe.co[0]))
+ ScaleFCurves[Index] = {int(Keyframe): Value for Keyframe, Value in Keyframes}
+ Config.File.write("{}AnimationKey {{ //Scale\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ AllKeyframes = list(AllKeyframes)
+ AllKeyframes.sort()
+ if len(AllKeyframes):
+ Config.File.write("{}1;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, len(AllKeyframes)))
+ for Keyframe in AllKeyframes:
+ bpy.context.scene.frame_set(Keyframe)
+ Scale = Vector()
+ Scale[0] = ((ScaleFCurves[0][Keyframe] if Keyframe in ScaleFCurves[0] else Object.scale[0]) if ScaleFCurves[0] else Object.scale[0])
+ Scale[1] = ((ScaleFCurves[1][Keyframe] if Keyframe in ScaleFCurves[1] else Object.scale[1]) if ScaleFCurves[1] else Object.scale[1])
+ Scale[2] = ((ScaleFCurves[2][Keyframe] if Keyframe in ScaleFCurves[2] else Object.scale[2]) if ScaleFCurves[2] else Object.scale[2])
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Keyframe - bpy.context.scene.frame_start) + ";3;").ljust(8), Scale[0], Scale[1], Scale[2]))
+ if Keyframe == AllKeyframes[-1]:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ else:
+ Config.File.write("{}1;\n{}1;\n".format(" " * Config.Whitespace, " " * Config.Whitespace))
+ bpy.context.scene.frame_set(bpy.context.scene.frame_start)
+ Scale = Object.matrix_local.to_scale()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;;\n".format(" " * Config.Whitespace, ("0;3;").ljust(8), Scale[0], Scale[1], Scale[2]))
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ else:
+ if Config.Verbose:
+ print(" Object has no useable animation data.")
+
+ if Config.ExportArmatures and Object.type == "ARMATURE":
+ if Config.Verbose:
+ print(" Writing Armature Bone Animation Data...")
+ PoseBones = Object.pose.bones
+ for Bone in PoseBones:
+ if Config.Verbose:
+ print(" Writing Bone: {}...".format(Bone.name))
+ PositionFCurves = [None, None, None]
+ RotationFCurves = [None, None, None, None]
+ ScaleFCurves = [None, None, None]
+ for FCurve in Action.fcurves:
+ if FCurve.data_path == "pose.bones[\"{}\"].location".format(Bone.name):
+ PositionFCurves[FCurve.array_index] = FCurve
+ elif FCurve.data_path == "pose.bones[\"{}\"].rotation_quaternion".format(Bone.name):
+ RotationFCurves[FCurve.array_index] = FCurve
+ elif FCurve.data_path == "pose.bones[\"{}\"].scale".format(Bone.name):
+ ScaleFCurves[FCurve.array_index] = FCurve
+ if not [FCurve for FCurve in PositionFCurves + RotationFCurves + ScaleFCurves if FCurve]:
+ if Config.Verbose:
+ print(" Bone has no useable animation data.\n Done")
+ continue
+
+ Config.File.write("{}Animation {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{{{}}}\n".format(" " * Config.Whitespace, LegalName(Object.name) + "_" + LegalName(Bone.name)))
+
+ #Position
+ if Config.Verbose:
+ print(" Writing Position...")
+ AllKeyframes = set()
+ for Index, FCurve in enumerate(PositionFCurves):
+ if FCurve:
+ Keyframes = []
+ for Keyframe in FCurve.keyframe_points:
+ if Keyframe.co[0] < bpy.context.scene.frame_start:
+ AllKeyframes.add(bpy.context.scene.frame_start)
+ elif Keyframe.co[0] > bpy.context.scene.frame_end:
+ AllKeyframes.add(bpy.context.scene.frame_end)
+ else:
+ Keyframes.append(Keyframe.co)
+ AllKeyframes.add(int(Keyframe.co[0]))
+ PositionFCurves[Index] = {int(Keyframe): Value for Keyframe, Value in Keyframes}
+ Config.File.write("{}AnimationKey {{ //Position\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ AllKeyframes = list(AllKeyframes)
+ AllKeyframes.sort()
+ if not len(AllKeyframes):
+ AllKeyframes = [bpy.context.scene.frame_start]
+ Config.File.write("{}2;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, len(AllKeyframes)))
+ for Keyframe in AllKeyframes:
+ bpy.context.scene.frame_set(Keyframe)
+
+ if Bone.parent:
+ PoseMatrix = Bone.parent.matrix.inverted()
+ else:
+ PoseMatrix = Matrix()
+ PoseMatrix *= Bone.matrix
+
+ Position = PoseMatrix.to_translation()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Keyframe - bpy.context.scene.frame_start) + ";3;").ljust(8), Position[0], Position[1], Position[2]))
+ if Keyframe == AllKeyframes[-1]:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Rotation
+ if Config.Verbose:
+ print(" Writing Rotation...")
+ AllKeyframes = set()
+ for Index, FCurve in enumerate(RotationFCurves):
+ if FCurve:
+ Keyframes = []
+ for Keyframe in FCurve.keyframe_points:
+ if Keyframe.co[0] < bpy.context.scene.frame_start:
+ AllKeyframes.add(bpy.context.scene.frame_start)
+ elif Keyframe.co[0] > bpy.context.scene.frame_end:
+ AllKeyframes.add(bpy.context.scene.frame_end)
+ else:
+ Keyframes.append(Keyframe.co)
+ AllKeyframes.add(int(Keyframe.co[0]))
+ RotationFCurves[Index] = {int(Keyframe): Value for Keyframe, Value in Keyframes}
+ Config.File.write("{}AnimationKey {{ //Rotation\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ AllKeyframes = list(AllKeyframes)
+ AllKeyframes.sort()
+ if not len(AllKeyframes):
+ AllKeyframes = [bpy.context.scene.frame_start]
+ Config.File.write("{}0;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, len(AllKeyframes)))
+ for Keyframe in AllKeyframes:
+ bpy.context.scene.frame_set(Keyframe)
+
+ if Bone.parent:
+ PoseMatrix = Bone.parent.matrix.inverted()
+ else:
+ PoseMatrix = Matrix()
+ PoseMatrix *= Bone.matrix
+
+ Rotation = PoseMatrix.to_3x3().to_quaternion()
+ Config.File.write("{}{}{:9f},{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Keyframe - bpy.context.scene.frame_start) + ";4;").ljust(8), -Rotation[0], Rotation[1], Rotation[2], Rotation[3]))
+ if Keyframe == AllKeyframes[-1]:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Scale
+ if Config.Verbose:
+ print(" Writing Scale...")
+ AllKeyframes = set()
+ for Index, FCurve in enumerate(ScaleFCurves):
+ if FCurve:
+ Keyframes = []
+ for Keyframe in FCurve.keyframe_points:
+ if Keyframe.co[0] < bpy.context.scene.frame_start:
+ AllKeyframes.add(bpy.context.scene.frame_start)
+ elif Keyframe.co[0] > bpy.context.scene.frame_end:
+ AllKeyframes.add(bpy.context.scene.frame_end)
+ else:
+ Keyframes.append(Keyframe.co)
+ AllKeyframes.add(int(Keyframe.co[0]))
+ ScaleFCurves[Index] = {int(Keyframe): Value for Keyframe, Value in Keyframes}
+ Config.File.write("{}AnimationKey {{ //Scale\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ AllKeyframes = list(AllKeyframes)
+ AllKeyframes.sort()
+ if not len(AllKeyframes):
+ AllKeyframes = [bpy.context.scene.frame_start]
+ Config.File.write("{}1;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, len(AllKeyframes)))
+ for Keyframe in AllKeyframes:
+ bpy.context.scene.frame_set(Keyframe)
+
+ if Bone.parent:
+ PoseMatrix = Bone.parent.matrix.inverted()
+ else:
+ PoseMatrix = Matrix()
+ PoseMatrix *= Bone.matrix
+
+ Scale = PoseMatrix.to_scale()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Keyframe - bpy.context.scene.frame_start) + ";3;").ljust(8), Scale[0], Scale[1], Scale[2]))
+ if Keyframe == AllKeyframes[-1]:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done") #Done with Armature Bone
+ if Config.Verbose:
+ print(" Done") #Done with Armature Bone data
+ if Config.Verbose:
+ print(" Done") #Done with Object
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of AnimationSet\n".format(" " * Config.Whitespace))
+
+def WriteFullAnimationSet(Config):
+ Config.File.write("{}AnimationSet {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+
+ KeyframeCount = bpy.context.scene.frame_end - bpy.context.scene.frame_start + 1
+
+ for Object in Config.ObjectList:
+ if Config.Verbose:
+ print(" Writing Animation Data for Object: {}".format(Object.name))
+
+ Config.File.write("{}Animation {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{{{}}}\n".format(" " * Config.Whitespace, LegalName(Object.name)))
+
+ #Position
+ if Config.Verbose:
+ print(" Writing Position...")
+ Config.File.write("{}AnimationKey {{ //Position\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}2;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, KeyframeCount))
+ for Frame in range(0, KeyframeCount):
+ bpy.context.scene.frame_set(Frame + bpy.context.scene.frame_start)
+ Position = Object.matrix_local.to_translation()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Frame) + ";3;").ljust(8), Position[0], Position[1], Position[2]))
+ if Frame == KeyframeCount-1:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Rotation
+ if Config.Verbose:
+ print(" Writing Rotation...")
+ Config.File.write("{}AnimationKey {{ //Rotation\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}0;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, KeyframeCount))
+ for Frame in range(0, KeyframeCount):
+ bpy.context.scene.frame_set(Frame + bpy.context.scene.frame_start)
+ Rotation = Object.rotation_euler.to_quaternion()
+ Config.File.write("{}{}{:9f},{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Frame) + ";4;").ljust(8), -Rotation[0], Rotation[1], Rotation[2], Rotation[3]))
+ if Frame == KeyframeCount-1:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Scale
+ if Config.Verbose:
+ print(" Writing Scale...")
+ Config.File.write("{}AnimationKey {{ //Scale\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}1;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, KeyframeCount))
+ for Frame in range(0, KeyframeCount):
+ bpy.context.scene.frame_set(Frame + bpy.context.scene.frame_start)
+ Scale = Object.matrix_local.to_scale()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Frame) + ";3;").ljust(8), Scale[0], Scale[1], Scale[2]))
+ if Frame == KeyframeCount-1:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+
+ if Config.ExportArmatures and Object.type == "ARMATURE":
+ if Config.Verbose:
+ print(" Writing Armature Bone Animation Data...")
+ PoseBones = Object.pose.bones
+ Bones = Object.data.bones
+ for Bone in PoseBones:
+ if Config.Verbose:
+ print(" Writing Bone: {}...".format(Bone.name))
+
+ Config.File.write("{}Animation {{\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}{{{}}}\n".format(" " * Config.Whitespace, LegalName(Object.name) + "_" + LegalName(Bone.name)))
+
+ #Position
+ if Config.Verbose:
+ print(" Writing Position...")
+ Config.File.write("{}AnimationKey {{ //Position\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}2;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, KeyframeCount))
+ for Frame in range(0, KeyframeCount):
+ bpy.context.scene.frame_set(Frame + bpy.context.scene.frame_start)
+
+ if Bone.parent:
+ PoseMatrix = Bone.parent.matrix.inverted()
+ else:
+ PoseMatrix = Matrix()
+ PoseMatrix *= Bone.matrix
+
+ Position = PoseMatrix.to_translation()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Frame) + ";3;").ljust(8), Position[0], Position[1], Position[2]))
+ if Frame == KeyframeCount-1:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Rotation
+ if Config.Verbose:
+ print(" Writing Rotation...")
+ Config.File.write("{}AnimationKey {{ //Rotation\n".format(" " * Config.Whitespace))
+ Config.Whitespace += 1
+ Config.File.write("{}0;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, KeyframeCount))
+ for Frame in range(0, KeyframeCount):
+ bpy.context.scene.frame_set(Frame + bpy.context.scene.frame_start)
+
+ Rotation = Bones[Bone.name].matrix.to_quaternion() * Bone.rotation_quaternion
+
+ Config.File.write("{}{}{:9f},{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Frame) + ";4;").ljust(8), -Rotation[0], Rotation[1], Rotation[2], Rotation[3]))
+ if Frame == KeyframeCount-1:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ #Scale
+ if Config.Verbose:
+ print(" Writing Scale...")
+ Config.File.write("{}AnimationKey {{ //Scale\n".format(" " * Config.Whitespace, KeyframeCount))
+ Config.Whitespace += 1
+ Config.File.write("{}1;\n{}{};\n".format(" " * Config.Whitespace, " " * Config.Whitespace, KeyframeCount))
+ for Frame in range(0, KeyframeCount):
+ bpy.context.scene.frame_set(Frame + bpy.context.scene.frame_start)
+
+ if Bone.parent:
+ PoseMatrix = Bone.parent.matrix.inverted()
+ else:
+ PoseMatrix = Matrix()
+ PoseMatrix *= Bone.matrix
+
+ Scale = PoseMatrix.to_scale()
+ Config.File.write("{}{}{:9f},{:9f},{:9f};;".format(" " * Config.Whitespace, (str(Frame) + ";3;").ljust(8), Scale[0], Scale[1], Scale[2]))
+ if Frame == KeyframeCount-1:
+ Config.File.write(";\n")
+ else:
+ Config.File.write(",\n")
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done")
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}}\n".format(" " * Config.Whitespace))
+ if Config.Verbose:
+ print(" Done") #Done with Armature Bone
+ if Config.Verbose:
+ print(" Done") #Done with Armature Bone data
+ if Config.Verbose:
+ print(" Done") #Done with Object
+
+ Config.Whitespace -= 1
+ Config.File.write("{}}} //End of AnimationSet\n".format(" " * Config.Whitespace))
+
+
+def CloseFile(Config):
+ if Config.Verbose:
+ print("Closing File...")
+ Config.File.close()
+ if Config.Verbose:
+ print("Done")
+
+CoordinateSystems = []
+CoordinateSystems.append(("1", "Left-Handed", ""))
+CoordinateSystems.append(("2", "Right-Handed", ""))
+
+AnimationModes = []
+AnimationModes.append(("0", "None", ""))
+AnimationModes.append(("1", "Keyframes Only", ""))
+AnimationModes.append(("2", "Full Animation", ""))
+
+ExportModes = []
+ExportModes.append(("1", "All Objects", ""))
+ExportModes.append(("2", "Selected Objects", ""))
+
+from bpy.props import *
+
+
+class DirectXExporter(bpy.types.Operator):
+ """Export to the DirectX model format (.x)"""
+
+ bl_idname = "export.directx"
+ bl_label = "Export DirectX"
+
+ filepath = StringProperty(subtype='FILE_PATH')
+
+ #Coordinate System
+ CoordinateSystem = EnumProperty(name="System", description="Select a coordinate system to export to", items=CoordinateSystems, default="1")
+
+ #General Options
+ RotateX = BoolProperty(name="Rotate X 90 Degrees", description="Rotate the entire scene 90 degrees around the X axis so Y is up.", default=True)
+ FlipNormals = BoolProperty(name="Flip Normals", description="", default=False)
+ ApplyModifiers = BoolProperty(name="Apply Modifiers", description="Apply object modifiers before export.", default=False)
+ IncludeFrameRate = BoolProperty(name="Include Frame Rate", description="Include the AnimTicksPerSecond template which is used by some engines to control animation speed.", default=False)
+ ExportTextures = BoolProperty(name="Export Textures", description="Reference external image files to be used by the model.", default=True)
+ ExportArmatures = BoolProperty(name="Export Armatures", description="Export the bones of any armatures to deform meshes.", default=False)
+ ExportAnimation = EnumProperty(name="Animations", description="Select the type of animations to export. Only object and armature bone animations can be exported. Full Animation exports every frame.", items=AnimationModes, default="0")
+
+ #Export Mode
+ ExportMode = EnumProperty(name="Export", description="Select which objects to export. Only Mesh, Empty, and Armature objects will be exported.", items=ExportModes, default="1")
+
+ Verbose = BoolProperty(name="Verbose", description="Run the exporter in debug mode. Check the console for output.", default=False)
+
+ def execute(self, context):
+ #Append .x
+ FilePath = os.path.splitext(self.filepath)[0] + ".x"
+
+ Config = DirectXExporterSettings(context,
+ FilePath,
+ CoordinateSystem=self.CoordinateSystem,
+ RotateX=self.RotateX,
+ FlipNormals=self.FlipNormals,
+ ApplyModifiers=self.ApplyModifiers,
+ IncludeFrameRate=self.IncludeFrameRate,
+ ExportTextures=self.ExportTextures,
+ ExportArmatures=self.ExportArmatures,
+ ExportAnimation=self.ExportAnimation,
+ ExportMode=self.ExportMode,
+ Verbose=self.Verbose)
+ ExportDirectX(Config)
+ return {"FINISHED"}
+
+ def invoke(self, context, event):
+ WindowManager = context.window_manager
+ WindowManager.fileselect_add(self)
+ return {"RUNNING_MODAL"}
+
+
+def menu_func(self, context):
+ default_path = os.path.splitext(bpy.data.filepath)[0] + ".x"
+ self.layout.operator(DirectXExporter.bl_idname, text="DirectX (.x)").filepath = default_path
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_export.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_export.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_export_pc2.py b/io_export_pc2.py
new file mode 100644
index 00000000..f9170652
--- /dev/null
+++ b/io_export_pc2.py
@@ -0,0 +1,203 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Export Pointcache Format(.pc2)",
+ "author": "Florian Meyer (tstscr)",
+ "version": (1, 0),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ "location": "File > Export > Pointcache (.pc2)",
+ "description": "Export mesh Pointcache data (.pc2)",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/PC2_Pointcache_export",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=24703",
+ "category": "Import-Export"}
+
+'''
+Usage Notes:
+
+in Maya Mel:
+cacheFile -pc2 1 -pcf "<insert filepath of source>" -f "<insert target filename w/o extension>" -dir "<insert directory path for target>" -format "OneFile";
+
+'''
+
+import bpy
+from bpy.props import *
+import mathutils, math, struct
+from os import remove
+import time
+from bpy_extras.io_utils import ExportHelper
+
+def getSampling(start, end, sampling):
+ samples = [start - sampling
+ + x * sampling
+ for x in range(start, int((end-start)*1/sampling)+1)]
+ return samples
+
+def do_export(context, props, filepath):
+ mat_x90 = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
+ ob = context.active_object
+ sc = context.scene
+ start = props.range_start
+ end = props.range_end
+ sampling = float(props.sampling)
+ apply_modifiers = props.apply_modifiers
+ me = ob.to_mesh(sc, apply_modifiers, 'PREVIEW')
+ vertCount = len(me.vertices)
+ sampletimes = getSampling(start, end, sampling)
+ sampleCount = len(sampletimes)
+
+ # Create the header
+ headerFormat='<12siiffi'
+ headerStr = struct.pack(headerFormat, b'POINTCACHE2\0',
+ 1, vertCount, start, sampling, sampleCount)
+
+ file = open(filepath, "wb")
+ file.write(headerStr)
+
+ for frame in sampletimes:
+ sc.frame_set(frame)
+ me = ob.to_mesh(sc, apply_modifiers, 'PREVIEW')
+
+ if len(me.vertices) != vertCount:
+ file.close()
+ try:
+ remove(filepath)
+ except:
+ empty = open(filepath, 'w')
+ empty.write('DUMMIFILE - export failed\n')
+ empty.close()
+ print('Export failed. Vertexcount of Object is not constant')
+ return False
+
+ if props.world_space:
+ me.transform(ob.matrix_world)
+ if props.rot_x90:
+ me.transform(mat_x90)
+
+ for v in me.vertices:
+ thisVertex = struct.pack('<fff', float(v.co[0]),
+ float(v.co[1]),
+ float(v.co[2]))
+ file.write(thisVertex)
+
+ file.flush()
+ file.close()
+ return True
+
+
+###### EXPORT OPERATOR #######
+class Export_pc2(bpy.types.Operator, ExportHelper):
+ '''Exports the active Object as a .pc2 Pointcache file.'''
+ bl_idname = "export_shape.pc2"
+ bl_label = "Export Pointcache (.pc2)"
+
+ filename_ext = ".pc2"
+
+ rot_x90 = BoolProperty(name="Convert to Y-up",
+ description="Rotate 90 degrees around X to convert to y-up",
+ default=True)
+ world_space = BoolProperty(name="Export into Worldspace",
+ description="Transform the Vertexcoordinates into Worldspace",
+ default=False)
+ apply_modifiers = BoolProperty(name="Apply Modifiers",
+ description="Applies the Modifiers",
+ default=True)
+ range_start = IntProperty(name='Start Frame',
+ description='First frame to use for Export',
+ default=1)
+ range_end = IntProperty(name='End Frame',
+ description='Last frame to use for Export',
+ default=250)
+ sampling = EnumProperty(name='Sampling',
+ description='Sampling --> frames per sample (0.1 yields 10 samples per frame)',
+ items=[
+ ('0.01', '0.01', ''),
+ ('0.05', '0.05', ''),
+ ('0.1', '0.1', ''),
+ ('0.2', '0.2', ''),
+ ('0.25', '0.25', ''),
+ ('0.5', '0.5', ''),
+ ('1', '1', ''),
+ ('2', '2', ''),
+ ('3', '3', ''),
+ ('4', '4', ''),
+ ('5', '5', ''),
+ ('10', '10', '')],
+ default='1')
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object.type in ['MESH', 'CURVE', 'SURFACE', 'FONT']
+
+ def execute(self, context):
+ start_time = time.time()
+ print('\n_____START_____')
+ props = self.properties
+ filepath = self.filepath
+ filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
+
+ exported = do_export(context, props, filepath)
+
+ if exported:
+ print('finished export in %s seconds' %((time.time() - start_time)))
+ print(filepath)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+
+ if True:
+ # File selector
+ wm.fileselect_add(self) # will run self.execute()
+ return {'RUNNING_MODAL'}
+ elif True:
+ # search the enum
+ wm.invoke_search_popup(self)
+ return {'RUNNING_MODAL'}
+ elif False:
+ # Redo popup
+ return wm.invoke_props_popup(self, event) #
+ elif False:
+ return self.execute(context)
+
+
+### REGISTER ###
+
+def menu_func(self, context):
+ self.layout.operator(Export_pc2.bl_idname, text="Pointcache (.pc2)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_export.append(menu_func)
+ #bpy.types.VIEW3D_PT_tools_objectmode.prepend(menu_func)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_export.remove(menu_func)
+ #bpy.types.VIEW3D_PT_tools_objectmode.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_export_unreal_psk_psa.py b/io_export_unreal_psk_psa.py
new file mode 100644
index 00000000..e44d0cbf
--- /dev/null
+++ b/io_export_unreal_psk_psa.py
@@ -0,0 +1,2331 @@
+# ***** GPL LICENSE BLOCK *****
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+# All rights reserved.
+# ***** GPL LICENSE BLOCK *****
+
+bl_info = {
+ "name": "Export Unreal Engine Format(.psk/.psa)",
+ "author": "Darknet/Optimus_P-Fat/Active_Trash/Sinsoft/VendorX",
+ "version": (2, 3),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ "location": "File > Export > Skeletal Mesh/Animation Data (.psk/.psa)",
+ "description": "Export Skeleletal Mesh/Animation Data",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Unreal_psk_psa",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21366",
+ "category": "Import-Export"}
+
+"""
+-- Unreal Skeletal Mesh and Animation Export (.psk and .psa) export script v0.0.1 --<br>
+
+- NOTES:
+- This script Exports To Unreal's PSK and PSA file formats for Skeletal Meshes and Animations. <br>
+- This script DOES NOT support vertex animation! These require completely different file formats. <br>
+
+- v0.0.1
+- Initial version
+
+- v0.0.2
+- This version adds support for more than one material index!
+
+[ - Edit by: Darknet
+- v0.0.3 - v0.0.12
+- This will work on UT3 and it is a stable version that work with vehicle for testing.
+- Main Bone fix no dummy needed to be there.
+- Just bone issues position, rotation, and offset for psk.
+- The armature bone position, rotation, and the offset of the bone is fix. It was to deal with skeleton mesh export for psk.
+- Animation is fix for position, offset, rotation bone support one rotation direction when armature build.
+- It will convert your mesh into triangular when exporting to psk file.
+- Did not work with psa export yet.
+
+- v0.0.13
+- The animatoin will support different bone rotations when export the animation.
+
+- v0.0.14
+- Fixed Action set keys frames when there is no pose keys and it will ignore it.
+
+- v0.0.15
+- Fixed multiple objects when exporting to psk. Select one mesh to export to psk.
+- ]
+
+- v0.1.1
+- Blender 2.50 svn (Support)
+
+Credit to:
+- export_cal3d.py (Position of the Bones Format)
+- blender2md5.py (Animation Translation Format)
+- export_obj.py (Blender 2.5/Pyhton 3.x Format)
+
+- freenode #blendercoder -> user -> ideasman42
+
+- Give Credit to those who work on this script.
+
+- http://sinsoft.com
+"""
+
+import os
+import time
+import datetime
+import bpy
+import mathutils
+import random
+import operator
+import sys
+
+
+from struct import pack, calcsize
+
+# REFERENCE MATERIAL JUST IN CASE:
+#
+# U = x / sqrt(x^2 + y^2 + z^2)
+# V = y / sqrt(x^2 + y^2 + z^2)
+#
+# Triangles specifed counter clockwise for front face
+#
+#defines for sizeofs
+SIZE_FQUAT = 16
+SIZE_FVECTOR = 12
+SIZE_VJOINTPOS = 44
+SIZE_ANIMINFOBINARY = 168
+SIZE_VCHUNKHEADER = 32
+SIZE_VMATERIAL = 88
+SIZE_VBONE = 120
+SIZE_FNAMEDBONEBINARY = 120
+SIZE_VRAWBONEINFLUENCE = 12
+SIZE_VQUATANIMKEY = 32
+SIZE_VVERTEX = 16
+SIZE_VPOINT = 12
+SIZE_VTRIANGLE = 12
+MaterialName = []
+
+# ======================================================================
+# TODO: remove this 1am hack
+nbone = 0
+bDeleteMergeMesh = False
+exportmessage = "Export Finish"
+
+########################################################################
+# Generic Object->Integer mapping
+# the object must be usable as a dictionary key
+class ObjMap:
+ def __init__(self):
+ self.dict = {}
+ self.next = 0
+ def get(self, obj):
+ if obj in self.dict:
+ return self.dict[obj]
+ else:
+ id = self.next
+ self.next = self.next + 1
+ self.dict[obj] = id
+ return id
+
+ def items(self):
+ getval = operator.itemgetter(0)
+ getkey = operator.itemgetter(1)
+ return map(getval, sorted(self.dict.items(), key=getkey))
+
+########################################################################
+# RG - UNREAL DATA STRUCTS - CONVERTED FROM C STRUCTS GIVEN ON UDN SITE
+# provided here: http://udn.epicgames.com/Two/BinaryFormatSpecifications.html
+# updated UDK (Unreal Engine 3): http://udn.epicgames.com/Three/BinaryFormatSpecifications.html
+class FQuat:
+ def __init__(self):
+ self.X = 0.0
+ self.Y = 0.0
+ self.Z = 0.0
+ self.W = 1.0
+
+ def dump(self):
+ data = pack('ffff', self.X, self.Y, self.Z, self.W)
+ return data
+
+ def __cmp__(self, other):
+ return cmp(self.X, other.X) \
+ or cmp(self.Y, other.Y) \
+ or cmp(self.Z, other.Z) \
+ or cmp(self.W, other.W)
+
+ def __hash__(self):
+ return hash(self.X) ^ hash(self.Y) ^ hash(self.Z) ^ hash(self.W)
+
+ def __str__(self):
+ return "[%f,%f,%f,%f](FQuat)" % (self.X, self.Y, self.Z, self.W)
+
+class FVector(object):
+ def __init__(self, X=0.0, Y=0.0, Z=0.0):
+ self.X = X
+ self.Y = Y
+ self.Z = Z
+
+ def dump(self):
+ data = pack('fff', self.X, self.Y, self.Z)
+ return data
+
+ def __cmp__(self, other):
+ return cmp(self.X, other.X) \
+ or cmp(self.Y, other.Y) \
+ or cmp(self.Z, other.Z)
+
+ def _key(self):
+ return (type(self).__name__, self.X, self.Y, self.Z)
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __eq__(self, other):
+ if not hasattr(other, '_key'):
+ return False
+ return self._key() == other._key()
+
+ def dot(self, other):
+ return self.X * other.X + self.Y * other.Y + self.Z * other.Z
+
+ def cross(self, other):
+ return FVector(self.Y * other.Z - self.Z * other.Y,
+ self.Z * other.X - self.X * other.Z,
+ self.X * other.Y - self.Y * other.X)
+
+ def sub(self, other):
+ return FVector(self.X - other.X,
+ self.Y - other.Y,
+ self.Z - other.Z)
+
+class VJointPos:
+ def __init__(self):
+ self.Orientation = FQuat()
+ self.Position = FVector()
+ self.Length = 0.0
+ self.XSize = 0.0
+ self.YSize = 0.0
+ self.ZSize = 0.0
+
+ def dump(self):
+ data = self.Orientation.dump() + self.Position.dump() + pack('4f', self.Length, self.XSize, self.YSize, self.ZSize)
+ return data
+
+class AnimInfoBinary:
+ def __init__(self):
+ self.Name = "" # length=64
+ self.Group = "" # length=64
+ self.TotalBones = 0
+ self.RootInclude = 0
+ self.KeyCompressionStyle = 0
+ self.KeyQuotum = 0
+ self.KeyPrediction = 0.0
+ self.TrackTime = 0.0
+ self.AnimRate = 0.0
+ self.StartBone = 0
+ self.FirstRawFrame = 0
+ self.NumRawFrames = 0
+
+ def dump(self):
+ data = pack('64s64siiiifffiii', str.encode(self.Name), str.encode(self.Group), self.TotalBones, self.RootInclude, self.KeyCompressionStyle, self.KeyQuotum, self.KeyPrediction, self.TrackTime, self.AnimRate, self.StartBone, self.FirstRawFrame, self.NumRawFrames)
+ return data
+
+class VChunkHeader:
+ def __init__(self, name, type_size):
+ self.ChunkID = str.encode(name) # length=20
+ self.TypeFlag = 1999801 # special value
+ self.DataSize = type_size
+ self.DataCount = 0
+
+ def dump(self):
+ data = pack('20siii', self.ChunkID, self.TypeFlag, self.DataSize, self.DataCount)
+ return data
+
+class VMaterial:
+ def __init__(self):
+ self.MaterialName = "" # length=64
+ self.TextureIndex = 0
+ self.PolyFlags = 0 # DWORD
+ self.AuxMaterial = 0
+ self.AuxFlags = 0 # DWORD
+ self.LodBias = 0
+ self.LodStyle = 0
+
+ def dump(self):
+ data = pack('64siLiLii', str.encode(self.MaterialName), self.TextureIndex, self.PolyFlags, self.AuxMaterial, self.AuxFlags, self.LodBias, self.LodStyle)
+ return data
+
+class VBone:
+ def __init__(self):
+ self.Name = "" # length = 64
+ self.Flags = 0 # DWORD
+ self.NumChildren = 0
+ self.ParentIndex = 0
+ self.BonePos = VJointPos()
+
+ def dump(self):
+ data = pack('64sLii', str.encode(self.Name), self.Flags, self.NumChildren, self.ParentIndex) + self.BonePos.dump()
+ return data
+
+#same as above - whatever - this is how Epic does it...
+class FNamedBoneBinary:
+ def __init__(self):
+ self.Name = "" # length = 64
+ self.Flags = 0 # DWORD
+ self.NumChildren = 0
+ self.ParentIndex = 0
+ self.BonePos = VJointPos()
+
+ self.IsRealBone = 0 # this is set to 1 when the bone is actually a bone in the mesh and not a dummy
+
+ def dump(self):
+ data = pack('64sLii', str.encode(self.Name), self.Flags, self.NumChildren, self.ParentIndex) + self.BonePos.dump()
+ return data
+
+class VRawBoneInfluence:
+ def __init__(self):
+ self.Weight = 0.0
+ self.PointIndex = 0
+ self.BoneIndex = 0
+
+ def dump(self):
+ data = pack('fii', self.Weight, self.PointIndex, self.BoneIndex)
+ return data
+
+class VQuatAnimKey:
+ def __init__(self):
+ self.Position = FVector()
+ self.Orientation = FQuat()
+ self.Time = 0.0
+
+ def dump(self):
+ data = self.Position.dump() + self.Orientation.dump() + pack('f', self.Time)
+ return data
+
+class VVertex(object):
+ def __init__(self):
+ self.PointIndex = 0 # WORD
+ self.U = 0.0
+ self.V = 0.0
+ self.MatIndex = 0 #BYTE
+ self.Reserved = 0 #BYTE
+
+ def dump(self):
+ data = pack('HHffBBH', self.PointIndex, 0, self.U, self.V, self.MatIndex, self.Reserved, 0)
+ return data
+
+ def __cmp__(self, other):
+ return cmp(self.PointIndex, other.PointIndex) \
+ or cmp(self.U, other.U) \
+ or cmp(self.V, other.V) \
+ or cmp(self.MatIndex, other.MatIndex) \
+ or cmp(self.Reserved, other.Reserved)
+
+ def _key(self):
+ return (type(self).__name__,self.PointIndex, self.U, self.V,self.MatIndex,self.Reserved)
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __eq__(self, other):
+ if not hasattr(other, '_key'):
+ return False
+ return self._key() == other._key()
+
+class VPoint(object):
+ def __init__(self):
+ self.Point = FVector()
+
+ def dump(self):
+ return self.Point.dump()
+
+ def __cmp__(self, other):
+ return cmp(self.Point, other.Point)
+
+ def _key(self):
+ return (type(self).__name__, self.Point)
+
+ def __hash__(self):
+ return hash(self._key())
+
+ def __eq__(self, other):
+ if not hasattr(other, '_key'):
+ return False
+ return self._key() == other._key()
+
+class VTriangle:
+ def __init__(self):
+ self.WedgeIndex0 = 0 # WORD
+ self.WedgeIndex1 = 0 # WORD
+ self.WedgeIndex2 = 0 # WORD
+ self.MatIndex = 0 # BYTE
+ self.AuxMatIndex = 0 # BYTE
+ self.SmoothingGroups = 0 # DWORD
+
+ def dump(self):
+ data = pack('HHHBBL', self.WedgeIndex0, self.WedgeIndex1, self.WedgeIndex2, self.MatIndex, self.AuxMatIndex, self.SmoothingGroups)
+ return data
+
+# END UNREAL DATA STRUCTS
+########################################################################
+
+########################################################################
+#RG - helper class to handle the normal way the UT files are stored
+#as sections consisting of a header and then a list of data structures
+class FileSection:
+ def __init__(self, name, type_size):
+ self.Header = VChunkHeader(name, type_size)
+ self.Data = [] # list of datatypes
+
+ def dump(self):
+ data = self.Header.dump()
+ for i in range(len(self.Data)):
+ data = data + self.Data[i].dump()
+ return data
+
+ def UpdateHeader(self):
+ self.Header.DataCount = len(self.Data)
+
+class PSKFile:
+ def __init__(self):
+ self.GeneralHeader = VChunkHeader("ACTRHEAD", 0)
+ self.Points = FileSection("PNTS0000", SIZE_VPOINT) #VPoint
+ self.Wedges = FileSection("VTXW0000", SIZE_VVERTEX) #VVertex
+ self.Faces = FileSection("FACE0000", SIZE_VTRIANGLE) #VTriangle
+ self.Materials = FileSection("MATT0000", SIZE_VMATERIAL) #VMaterial
+ self.Bones = FileSection("REFSKELT", SIZE_VBONE) #VBone
+ self.Influences = FileSection("RAWWEIGHTS", SIZE_VRAWBONEINFLUENCE) #VRawBoneInfluence
+
+ #RG - this mapping is not dumped, but is used internally to store the new point indices
+ # for vertex groups calculated during the mesh dump, so they can be used again
+ # to dump bone influences during the armature dump
+ #
+ # the key in this dictionary is the VertexGroup/Bone Name, and the value
+ # is a list of tuples containing the new point index and the weight, in that order
+ #
+ # Layout:
+ # { groupname : [ (index, weight), ... ], ... }
+ #
+ # example:
+ # { 'MyVertexGroup' : [ (0, 1.0), (5, 1.0), (3, 0.5) ] , 'OtherGroup' : [(2, 1.0)] }
+
+ self.VertexGroups = {}
+
+ def AddPoint(self, p):
+ #print ('AddPoint')
+ self.Points.Data.append(p)
+
+ def AddWedge(self, w):
+ #print ('AddWedge')
+ self.Wedges.Data.append(w)
+
+ def AddFace(self, f):
+ #print ('AddFace')
+ self.Faces.Data.append(f)
+
+ def AddMaterial(self, m):
+ #print ('AddMaterial')
+ self.Materials.Data.append(m)
+
+ def AddBone(self, b):
+ #print ('AddBone [%s]: Position: (x=%f, y=%f, z=%f) Rotation=(%f,%f,%f,%f)' % (b.Name, b.BonePos.Position.X, b.BonePos.Position.Y, b.BonePos.Position.Z, b.BonePos.Orientation.X,b.BonePos.Orientation.Y,b.BonePos.Orientation.Z,b.BonePos.Orientation.W))
+ self.Bones.Data.append(b)
+
+ def AddInfluence(self, i):
+ #print ('AddInfluence')
+ self.Influences.Data.append(i)
+
+ def UpdateHeaders(self):
+ self.Points.UpdateHeader()
+ self.Wedges.UpdateHeader()
+ self.Faces.UpdateHeader()
+ self.Materials.UpdateHeader()
+ self.Bones.UpdateHeader()
+ self.Influences.UpdateHeader()
+
+ def dump(self):
+ self.UpdateHeaders()
+ data = self.GeneralHeader.dump() + self.Points.dump() + self.Wedges.dump() + self.Faces.dump() + self.Materials.dump() + self.Bones.dump() + self.Influences.dump()
+ return data
+
+ def GetMatByIndex(self, mat_index):
+ if mat_index >= 0 and len(self.Materials.Data) > mat_index:
+ return self.Materials.Data[mat_index]
+ else:
+ m = VMaterial()
+ # modified by VendorX
+ m.MaterialName = MaterialName[mat_index]
+ self.AddMaterial(m)
+ return m
+
+ def PrintOut(self):
+ print ("--- PSK FILE EXPORTED ---")
+ print ('point count: %i' % len(self.Points.Data))
+ print ('wedge count: %i' % len(self.Wedges.Data))
+ print ('face count: %i' % len(self.Faces.Data))
+ print ('material count: %i' % len(self.Materials.Data))
+ print ('bone count: %i' % len(self.Bones.Data))
+ print ('inlfuence count: %i' % len(self.Influences.Data))
+ print ('-------------------------')
+
+# PSA FILE NOTES FROM UDN:
+#
+# The raw key array holds all the keys for all the bones in all the specified sequences,
+# organized as follows:
+# For each AnimInfoBinary's sequence there are [Number of bones] times [Number of frames keys]
+# in the VQuatAnimKeys, laid out as tracks of [numframes] keys for each bone in the order of
+# the bones as defined in the array of FnamedBoneBinary in the PSA.
+#
+# Once the data from the PSK (now digested into native skeletal mesh) and PSA (digested into
+# a native animation object containing one or more sequences) are associated together at runtime,
+# bones are linked up by name. Any bone in a skeleton (from the PSK) that finds no partner in
+# the animation sequence (from the PSA) will assume its reference pose stance ( as defined in
+# the offsets & rotations that are in the VBones making up the reference skeleton from the PSK)
+
+class PSAFile:
+ def __init__(self):
+ self.GeneralHeader = VChunkHeader("ANIMHEAD", 0)
+ self.Bones = FileSection("BONENAMES", SIZE_FNAMEDBONEBINARY) #FNamedBoneBinary
+ self.Animations = FileSection("ANIMINFO", SIZE_ANIMINFOBINARY) #AnimInfoBinary
+ self.RawKeys = FileSection("ANIMKEYS", SIZE_VQUATANIMKEY) #VQuatAnimKey
+
+ # this will take the format of key=Bone Name, value = (BoneIndex, Bone Object)
+ # THIS IS NOT DUMPED
+ self.BoneLookup = {}
+
+ def dump(self):
+ data = self.Generalheader.dump() + self.Bones.dump() + self.Animations.dump() + self.RawKeys.dump()
+ return data
+
+ def AddBone(self, b):
+ #LOUD
+ #print "AddBone: " + b.Name
+ self.Bones.Data.append(b)
+
+ def AddAnimation(self, a):
+ #LOUD
+ #print "AddAnimation: %s, TotalBones: %i, AnimRate: %f, NumRawFrames: %i, TrackTime: %f" % (a.Name, a.TotalBones, a.AnimRate, a.NumRawFrames, a.TrackTime)
+ self.Animations.Data.append(a)
+
+ def AddRawKey(self, k):
+ #LOUD
+ #print "AddRawKey [%i]: Time: %f, Quat: x=%f, y=%f, z=%f, w=%f, Position: x=%f, y=%f, z=%f" % (len(self.RawKeys.Data), k.Time, k.Orientation.X, k.Orientation.Y, k.Orientation.Z, k.Orientation.W, k.Position.X, k.Position.Y, k.Position.Z)
+ self.RawKeys.Data.append(k)
+
+ def UpdateHeaders(self):
+ self.Bones.UpdateHeader()
+ self.Animations.UpdateHeader()
+ self.RawKeys.UpdateHeader()
+
+ def GetBoneByIndex(self, bone_index):
+ if bone_index >= 0 and len(self.Bones.Data) > bone_index:
+ return self.Bones.Data[bone_index]
+
+ def IsEmpty(self):
+ return (len(self.Bones.Data) == 0 or len(self.Animations.Data) == 0)
+
+ def StoreBone(self, b):
+ self.BoneLookup[b.Name] = [-1, b]
+
+ def UseBone(self, bone_name):
+ if bone_name in self.BoneLookup:
+ bone_data = self.BoneLookup[bone_name]
+
+ if bone_data[0] == -1:
+ bone_data[0] = len(self.Bones.Data)
+ self.AddBone(bone_data[1])
+ #self.Bones.Data.append(bone_data[1])
+
+ return bone_data[0]
+
+ def GetBoneByName(self, bone_name):
+ if bone_name in self.BoneLookup:
+ bone_data = self.BoneLookup[bone_name]
+ return bone_data[1]
+
+ def GetBoneIndex(self, bone_name):
+ if bone_name in self.BoneLookup:
+ bone_data = self.BoneLookup[bone_name]
+ return bone_data[0]
+
+ def dump(self):
+ self.UpdateHeaders()
+ data = self.GeneralHeader.dump() + self.Bones.dump() + self.Animations.dump() + self.RawKeys.dump()
+ return data
+
+ def PrintOut(self):
+ print ('--- PSA FILE EXPORTED ---')
+ print ('bone count: %i' % len(self.Bones.Data))
+ print ('animation count: %i' % len(self.Animations.Data))
+ print ('rawkey count: %i' % len(self.RawKeys.Data))
+ print ('-------------------------')
+
+####################################
+# helpers to create bone structs
+def make_vbone(name, parent_index, child_count, orientation_quat, position_vect):
+ bone = VBone()
+ bone.Name = name
+ bone.ParentIndex = parent_index
+ bone.NumChildren = child_count
+ bone.BonePos.Orientation = orientation_quat
+ bone.BonePos.Position.X = position_vect.x
+ bone.BonePos.Position.Y = position_vect.y
+ bone.BonePos.Position.Z = position_vect.z
+
+ #these values seem to be ignored?
+ #bone.BonePos.Length = tail.length
+ #bone.BonePos.XSize = tail.x
+ #bone.BonePos.YSize = tail.y
+ #bone.BonePos.ZSize = tail.z
+
+ return bone
+
+def make_namedbonebinary(name, parent_index, child_count, orientation_quat, position_vect, is_real):
+ bone = FNamedBoneBinary()
+ bone.Name = name
+ bone.ParentIndex = parent_index
+ bone.NumChildren = child_count
+ bone.BonePos.Orientation = orientation_quat
+ bone.BonePos.Position.X = position_vect.x
+ bone.BonePos.Position.Y = position_vect.y
+ bone.BonePos.Position.Z = position_vect.z
+ bone.IsRealBone = is_real
+ return bone
+
+##################################################
+#RG - check to make sure face isnt a line
+#The face has to be triangle not a line
+def is_1d_face(blender_face,mesh):
+ #ID Vertex of id point
+ v0 = blender_face.vertices[0]
+ v1 = blender_face.vertices[1]
+ v2 = blender_face.vertices[2]
+
+ return (mesh.vertices[v0].co == mesh.vertices[v1].co or \
+ mesh.vertices[v1].co == mesh.vertices[v2].co or \
+ mesh.vertices[v2].co == mesh.vertices[v0].co)
+ return False
+
+##################################################
+# http://en.wikibooks.org/wiki/Blender_3D:_Blending_Into_Python/Cookbook#Triangulate_NMesh
+#blender 2.50 format using the Operators/command convert the mesh to tri mesh
+def triangulateNMesh(object):
+ global bDeleteMergeMesh
+ bneedtri = False
+ scene = bpy.context.scene
+ bpy.ops.object.mode_set(mode='OBJECT')
+ for i in scene.objects: i.select = False #deselect all objects
+ object.select = True
+ scene.objects.active = object #set the mesh object to current
+ bpy.ops.object.mode_set(mode='OBJECT')
+ print("Checking mesh if needs to convert quad to Tri...")
+ for face in object.data.faces:
+ if (len(face.vertices) > 3):
+ bneedtri = True
+ break
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ if bneedtri == True:
+ print("Converting quad to tri mesh...")
+ me_da = object.data.copy() #copy data
+ me_ob = object.copy() #copy object
+ #note two copy two types else it will use the current data or mesh
+ me_ob.data = me_da
+ bpy.context.scene.objects.link(me_ob)#link the object to the scene #current object location
+ for i in scene.objects: i.select = False #deselect all objects
+ me_ob.select = True
+ scene.objects.active = me_ob #set the mesh object to current
+ bpy.ops.object.mode_set(mode='EDIT') #Operators
+ bpy.ops.mesh.select_all(action='SELECT')#select all the face/vertex/edge
+ bpy.ops.mesh.quads_convert_to_tris() #Operators
+ bpy.context.scene.update()
+ bpy.ops.object.mode_set(mode='OBJECT') # set it in object
+ bpy.context.scene.unrealtriangulatebool = True
+ print("Triangulate Mesh Done!")
+ if bDeleteMergeMesh == True:
+ print("Remove Merge tmp Mesh [ " ,object.name, " ] from scene!" )
+ bpy.ops.object.mode_set(mode='OBJECT') # set it in object
+ bpy.context.scene.objects.unlink(object)
+ else:
+ bpy.context.scene.unrealtriangulatebool = False
+ print("No need to convert tri mesh.")
+ me_ob = object
+ return me_ob
+
+# Actual object parsing functions
+def parse_meshes(blender_meshes, psk_file):
+ #this is use to call the bone name and the index array for group index matches
+ global bDeleteMergeMesh
+ print ("----- parsing meshes -----")
+ print("Number of Object Meshes:",len(blender_meshes))
+ for current_obj in blender_meshes: #number of mesh that should be one mesh here
+ #bpy.ops.object.mode_set(mode='EDIT')
+ current_obj = triangulateNMesh(current_obj)
+ #print(dir(current_obj))
+ print("Mesh Name:",current_obj.name)
+ current_mesh = current_obj.data
+
+ #collect a list of the material names
+ if len(current_obj.material_slots) > 0:
+ counter = 0
+ while counter < len(current_obj.material_slots):
+ MaterialName.append(current_obj.material_slots[counter].name)
+ print("Material Name:",current_obj.material_slots[counter].name)
+ #create the current material
+ psk_file.GetMatByIndex(counter)
+ #print("materials: ",MaterialName[counter])
+ counter += 1
+ # object_mat = current_obj.materials[0]
+ object_material_index = current_obj.active_material_index
+
+ points = ObjMap()
+ wedges = ObjMap()
+
+ discarded_face_count = 0
+ print (" -- Dumping Mesh Faces -- LEN:", len(current_mesh.faces))
+ for current_face in current_mesh.faces:
+ #print ' -- Dumping UVs -- '
+ #print current_face.uv_textures
+ # modified by VendorX
+ object_material_index = current_face.material_index
+
+ if len(current_face.vertices) != 3:
+ raise RuntimeError("Non-triangular face (%i)" % len(current_face.vertices))
+
+ #No Triangulate Yet
+ # if len(current_face.vertices) != 3:
+ # raise RuntimeError("Non-triangular face (%i)" % len(current_face.vertices))
+ # #TODO: add two fake faces made of triangles?
+
+ #RG - apparently blender sometimes has problems when you do quad to triangle
+ # conversion, and ends up creating faces that have only TWO points -
+ # one of the points is simply in the vertex list for the face twice.
+ # This is bad, since we can't get a real face normal for a LINE, we need
+ # a plane for this. So, before we add the face to the list of real faces,
+ # ensure that the face is actually a plane, and not a line. If it is not
+ # planar, just discard it and notify the user in the console after we're
+ # done dumping the rest of the faces
+
+ if not is_1d_face(current_face,current_mesh):
+ #print("faces")
+ wedge_list = []
+ vect_list = []
+
+ #get or create the current material
+ m = psk_file.GetMatByIndex(object_material_index)
+
+ face_index = current_face.index
+ has_UV = False
+ faceUV = None
+
+ if len(current_mesh.uv_textures) > 0:
+ has_UV = True
+ #print("face index: ",face_index)
+ #faceUV = current_mesh.uv_textures.active.data[face_index]#UVs for current face
+ #faceUV = current_mesh.uv_textures.active.data[0]#UVs for current face
+ #print(face_index,"<[FACE NUMBER")
+ uv_layer = current_mesh.uv_textures.active
+ faceUV = uv_layer.data[face_index]
+ #print("============================")
+ #size(data) is number of texture faces. Each face has UVs
+ #print("DATA face uv: ",len(faceUV.uv), " >> ",(faceUV.uv[0][0]))
+
+ for i in range(3):
+ vert_index = current_face.vertices[i]
+ vert = current_mesh.vertices[vert_index]
+ uv = []
+ #assumes 3 UVs Per face (for now).
+ if (has_UV):
+ if len(faceUV.uv) != 3:
+ print ("WARNING: Current face is missing UV coordinates - writing 0,0...")
+ print ("WARNING: Face has more than 3 UVs - writing 0,0...")
+ uv = [0.0, 0.0]
+ else:
+ #uv.append(faceUV.uv[i][0])
+ #uv.append(faceUV.uv[i][1])
+ uv = [faceUV.uv[i][0],faceUV.uv[i][1]] #OR bottom works better # 24 for cube
+ #uv = list(faceUV.uv[i]) #30 just cube
+ else:
+ #print ("No UVs?")
+ uv = [0.0, 0.0]
+ #print("UV >",uv)
+ #uv = [0.0, 0.0] #over ride uv that is not fixed
+ #print(uv)
+ #flip V coordinate because UEd requires it and DOESN'T flip it on its own like it
+ #does with the mesh Y coordinates.
+ #this is otherwise known as MAGIC-2
+ uv[1] = 1.0 - uv[1]
+
+ #deal with the min and max value
+ #if value is over the set limit it will null the uv texture
+ if (uv[0] > 1):
+ uv[0] = 1
+ if (uv[0] < 0):
+ uv[0] = 0
+ if (uv[1] > 1):
+ uv[1] = 1
+ if (uv[1] < 0):
+ uv[1] = 0
+
+
+ # RE - Append untransformed vector (for normal calc below)
+ # TODO: convert to Blender.Mathutils
+ vect_list.append(FVector(vert.co.x, vert.co.y, vert.co.z))
+
+ # Transform position for export
+ #vpos = vert.co * object_material_index
+ vpos = vert.co * current_obj.matrix_local
+ # Create the point
+ p = VPoint()
+ p.Point.X = vpos.x
+ p.Point.Y = vpos.y
+ p.Point.Z = vpos.z
+
+ # Create the wedge
+ w = VVertex()
+ w.MatIndex = object_material_index
+ w.PointIndex = points.get(p) # get index from map
+ #Set UV TEXTURE
+ w.U = uv[0]
+ w.V = uv[1]
+ index_wedge = wedges.get(w)
+ wedge_list.append(index_wedge)
+
+ #print results
+ #print 'result PointIndex=%i, U=%f, V=%f, wedge_index=%i' % (
+ # w.PointIndex,
+ # w.U,
+ # w.V,
+ # wedge_index)
+
+ # Determine face vertex order
+ # get normal from blender
+ no = current_face.normal
+
+ # TODO: convert to Blender.Mathutils
+ # convert to FVector
+ norm = FVector(no[0], no[1], no[2])
+
+ # Calculate the normal of the face in blender order
+ tnorm = vect_list[1].sub(vect_list[0]).cross(vect_list[2].sub(vect_list[1]))
+
+ # RE - dot the normal from blender order against the blender normal
+ # this gives the product of the two vectors' lengths along the blender normal axis
+ # all that matters is the sign
+ dot = norm.dot(tnorm)
+
+ # print results
+ #print 'face norm: (%f,%f,%f), tnorm=(%f,%f,%f), dot=%f' % (
+ # norm.X, norm.Y, norm.Z,
+ # tnorm.X, tnorm.Y, tnorm.Z,
+ # dot)
+
+ tri = VTriangle()
+ # RE - magic: if the dot product above > 0, order the vertices 2, 1, 0
+ # if the dot product above < 0, order the vertices 0, 1, 2
+ # if the dot product is 0, then blender's normal is coplanar with the face
+ # and we cannot deduce which side of the face is the outside of the mesh
+ if (dot > 0):
+ (tri.WedgeIndex2, tri.WedgeIndex1, tri.WedgeIndex0) = wedge_list
+ elif (dot < 0):
+ (tri.WedgeIndex0, tri.WedgeIndex1, tri.WedgeIndex2) = wedge_list
+ else:
+ dindex0 = current_face.vertices[0];
+ dindex1 = current_face.vertices[1];
+ dindex2 = current_face.vertices[2];
+
+ current_mesh.vertices[dindex0].select = True
+ current_mesh.vertices[dindex1].select = True
+ current_mesh.vertices[dindex2].select = True
+
+ raise RuntimeError("normal vector coplanar with face! points:", current_mesh.vertices[dindex0].co, current_mesh.vertices[dindex1].co, current_mesh.vertices[dindex2].co)
+ #print(dir(current_face))
+ current_face.select = True
+ #print("smooth:",(current_face.use_smooth))
+ #not sure if this right
+ #tri.SmoothingGroups
+ if current_face.use_smooth == True:
+ tri.SmoothingGroups = 1
+ else:
+ tri.SmoothingGroups = 0
+
+ tri.MatIndex = object_material_index
+ #print(tri)
+ psk_file.AddFace(tri)
+ else:
+ discarded_face_count = discarded_face_count + 1
+
+ print (" -- Dumping Mesh Points -- LEN:",len(points.dict))
+ for point in points.items():
+ psk_file.AddPoint(point)
+ if len(points.dict) > 32767:
+ raise RuntimeError("Vertex point reach max limited 32767 in pack data. Your",len(points.dict))
+ print (" -- Dumping Mesh Wedge -- LEN:",len(wedges.dict))
+
+ for wedge in wedges.items():
+ psk_file.AddWedge(wedge)
+
+ #RG - if we happend upon any non-planar faces above that we've discarded,
+ # just let the user know we discarded them here in case they want
+ # to investigate
+
+ if discarded_face_count > 0:
+ print ("INFO: Discarded %i non-planar faces." % (discarded_face_count))
+
+ #RG - walk through the vertex groups and find the indexes into the PSK points array
+ #for them, then store that index and the weight as a tuple in a new list of
+ #verts for the group that we can look up later by bone name, since Blender matches
+ #verts to bones for influences by having the VertexGroup named the same thing as
+ #the bone
+
+ #vertex group
+ for obvgroup in current_obj.vertex_groups:
+ #print("bone gourp build:",obvgroup.name)#print bone name
+ #print(dir(obvgroup))
+ vert_list = []
+ for current_vert in current_mesh.vertices:
+ #print("INDEX V:",current_vert.index)
+ vert_index = current_vert.index
+ for vgroup in current_vert.groups:#vertex groupd id
+ vert_weight = vgroup.weight
+ if(obvgroup.index == vgroup.group):
+ p = VPoint()
+ vpos = current_vert.co * current_obj.matrix_local
+ p.Point.X = vpos.x
+ p.Point.Y = vpos.y
+ p.Point.Z = vpos.z
+ #print(current_vert.co)
+ point_index = points.get(p) #point index
+ v_item = (point_index, vert_weight)
+ vert_list.append(v_item)
+ #bone name, [point id and wieght]
+ #print("Add Vertex Group:",obvgroup.name, " No. Points:",len(vert_list))
+ psk_file.VertexGroups[obvgroup.name] = vert_list
+
+ #unrealtriangulatebool #this will remove the mesh from the scene
+ '''
+ if (bpy.context.scene.unrealtriangulatebool == True) and (bDeleteMergeMesh == True):
+ #if bDeleteMergeMesh == True:
+ # print("Removing merge mesh.")
+ print("Remove tmp Mesh [ " ,current_obj.name, " ] from scene >" ,(bpy.context.scene.unrealtriangulatebool ))
+ bpy.ops.object.mode_set(mode='OBJECT') # set it in object
+ bpy.context.scene.objects.unlink(current_obj)
+ el
+ '''
+ if bDeleteMergeMesh == True:
+ print("Remove Merge tmp Mesh [ " ,current_obj.name, " ] from scene >" ,(bpy.context.scene.unrealtriangulatebool ))
+ bpy.ops.object.mode_set(mode='OBJECT') # set it in object
+ bpy.context.scene.objects.unlink(current_obj)
+ elif bpy.context.scene.unrealtriangulatebool == True:
+ print("Remove tri tmp Mesh [ " ,current_obj.name, " ] from scene >" ,(bpy.context.scene.unrealtriangulatebool ))
+ bpy.ops.object.mode_set(mode='OBJECT') # set it in object
+ bpy.context.scene.objects.unlink(current_obj)
+ #if bDeleteMergeMesh == True:
+ #print("Remove merge Mesh [ " ,current_obj.name, " ] from scene")
+ #bpy.ops.object.mode_set(mode='OBJECT') # set it in object
+ #bpy.context.scene.objects.unlink(current_obj)
+
+def make_fquat(bquat):
+ quat = FQuat()
+ #flip handedness for UT = set x,y,z to negative (rotate in other direction)
+ quat.X = -bquat.x
+ quat.Y = -bquat.y
+ quat.Z = -bquat.z
+
+ quat.W = bquat.w
+ return quat
+
+def make_fquat_default(bquat):
+ quat = FQuat()
+ #print(dir(bquat))
+ quat.X = bquat.x
+ quat.Y = bquat.y
+ quat.Z = bquat.z
+
+ quat.W = bquat.w
+ return quat
+
+def parse_bone(blender_bone, psk_file, psa_file, parent_id, is_root_bone, parent_matrix, parent_root):
+ global nbone # look it's evil!
+ #print '-------------------- Dumping Bone ---------------------- '
+
+ #If bone does not have parent that mean it the root bone
+ if blender_bone.parent is None:
+ parent_root = blender_bone
+
+ child_count = len(blender_bone.children)
+ #child of parent
+ child_parent = blender_bone.parent
+
+ if child_parent != None:
+ print ("--Bone Name:",blender_bone.name ," parent:" , blender_bone.parent.name, "ID:", nbone)
+ else:
+ print ("--Bone Name:",blender_bone.name ," parent: None" , "ID:", nbone)
+
+ if child_parent != None:
+ quat_root = blender_bone.matrix
+ quat = make_fquat(quat_root.to_quaternion())
+ #print("DIR:",dir(child_parent.matrix.to_quaternion()))
+ quat_parent = child_parent.matrix.to_quaternion().inverted()
+ parent_head = child_parent.head * quat_parent
+ parent_tail = child_parent.tail * quat_parent
+
+ set_position = (parent_tail - parent_head) + blender_bone.head
+ else:
+ # ROOT BONE
+ #This for root
+ set_position = blender_bone.head * parent_matrix #ARMATURE OBJECT Locction
+ rot_mat = blender_bone.matrix * parent_matrix.to_3x3() #ARMATURE OBJECT Rotation
+ #print(dir(rot_mat))
+
+ quat = make_fquat_default(rot_mat.to_quaternion())
+
+ #print ("[[======= FINAL POSITION:", set_position)
+ final_parent_id = parent_id
+
+ #RG/RE -
+ #if we are not separated by a small distance, create a dummy bone for the displacement
+ #this is only needed for root bones, since UT assumes a connected skeleton, and from here
+ #down the chain we just use "tail" as an endpoint
+ #if(head.length > 0.001 and is_root_bone == 1):
+ if(0):
+ pb = make_vbone("dummy_" + blender_bone.name, parent_id, 1, FQuat(), tail)
+ psk_file.AddBone(pb)
+ pbb = make_namedbonebinary("dummy_" + blender_bone.name, parent_id, 1, FQuat(), tail, 0)
+ psa_file.StoreBone(pbb)
+ final_parent_id = nbone
+ nbone = nbone + 1
+ #tail = tail-head
+
+ my_id = nbone
+
+ pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, set_position)
+ psk_file.AddBone(pb)
+ pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, set_position, 1)
+ psa_file.StoreBone(pbb)
+
+ nbone = nbone + 1
+
+ #RG - dump influences for this bone - use the data we collected in the mesh dump phase
+ # to map our bones to vertex groups
+ #print("///////////////////////")
+ #print("set influence")
+ if blender_bone.name in psk_file.VertexGroups:
+ vertex_list = psk_file.VertexGroups[blender_bone.name]
+ #print("vertex list:", len(vertex_list), " of >" ,blender_bone.name )
+ for vertex_data in vertex_list:
+ #print("set influence vettex")
+ point_index = vertex_data[0]
+ vertex_weight = vertex_data[1]
+ influence = VRawBoneInfluence()
+ influence.Weight = vertex_weight
+ influence.BoneIndex = my_id
+ influence.PointIndex = point_index
+ #print ('Adding Bone Influence for [%s] = Point Index=%i, Weight=%f' % (blender_bone.name, point_index, vertex_weight))
+ #print("adding influence")
+ psk_file.AddInfluence(influence)
+
+ #blender_bone.matrix_local
+ #recursively dump child bones
+ mainparent = parent_matrix
+ #if len(blender_bone.children) > 0:
+ for current_child_bone in blender_bone.children:
+ parse_bone(current_child_bone, psk_file, psa_file, my_id, 0, mainparent, parent_root)
+
+def parse_armature(blender_armature, psk_file, psa_file):
+ print ("----- parsing armature -----")
+ print ('blender_armature length: %i' % (len(blender_armature)))
+
+ #magic 0 sized root bone for UT - this is where all armature dummy bones will attach
+ #dont increment nbone here because we initialize it to 1 (hackity hackity hack)
+
+ #count top level bones first. NOT EFFICIENT.
+ child_count = 0
+ for current_obj in blender_armature:
+ current_armature = current_obj.data
+ bones = [x for x in current_armature.bones if not x.parent is None]
+ child_count += len(bones)
+
+ for current_obj in blender_armature:
+ print ("Current Armature Name: " + current_obj.name)
+ current_armature = current_obj.data
+ #armature_id = make_armature_bone(current_obj, psk_file, psa_file)
+
+ #we dont want children here - only the top level bones of the armature itself
+ #we will recursively dump the child bones as we dump these bones
+ """
+ bones = [x for x in current_armature.bones if not x.parent is None]
+ #will ingore this part of the ocde
+ """
+ if len(current_armature.bones) == 0:
+ raise RuntimeError("Warning add two bones else it will crash the unreal editor.")
+ if len(current_armature.bones) == 1:
+ raise RuntimeError("Warning add one more bone else it will crash the unreal editor.")
+
+ mainbonecount = 0;
+ for current_bone in current_armature.bones: #list the bone. #note this will list all the bones.
+ if(current_bone.parent is None):
+ mainbonecount += 1
+ print("Main Bone",mainbonecount)
+ if mainbonecount > 1:
+ #print("Warning there no main bone.")
+ raise RuntimeError("There too many Main bones. Number main bones:",mainbonecount)
+ for current_bone in current_armature.bones: #list the bone. #note this will list all the bones.
+ if(current_bone.parent is None):
+ parse_bone(current_bone, psk_file, psa_file, 0, 0, current_obj.matrix_local, None)
+ break
+
+# get blender objects by type
+def get_blender_objects(objects, intype):
+ return [x for x in objects if x.type == intype]
+
+#strips current extension (if any) from filename and replaces it with extension passed in
+def make_filename_ext(filename, extension):
+ new_filename = ''
+ extension_index = filename.find('.')
+
+ if extension_index == -1:
+ new_filename = filename + extension
+ else:
+ new_filename = filename[0:extension_index] + extension
+
+ return new_filename
+
+# returns the quaternion Grassman product a*b
+# this is the same as the rotation a(b(x))
+# (ie. the same as B*A if A and B are matrices representing
+# the rotations described by quaternions a and b)
+def grassman(a, b):
+ return mathutils.Quaternion(
+ a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z,
+ a.w*b.x + a.x*b.w + a.y*b.z - a.z*b.y,
+ a.w*b.y - a.x*b.z + a.y*b.w + a.z*b.x,
+ a.w*b.z + a.x*b.y - a.y*b.x + a.z*b.w)
+
+def parse_animation(blender_scene, blender_armatures, psa_file):
+ #to do list:
+ #need to list the action sets
+ #need to check if there animation
+ #need to check if animation is has one frame then exit it
+ print ('\n----- parsing animation -----')
+ render_data = blender_scene.render
+ bHaveAction = True
+
+ anim_rate = render_data.fps
+
+ print("==== Blender Settings ====")
+
+ print ('Scene: %s Start Frame: %i, End Frame: %i' % (blender_scene.name, blender_scene.frame_start, blender_scene.frame_end))
+ print ('Frames Per Sec: %i' % anim_rate)
+ print ("Default FPS: 24" )
+
+ cur_frame_index = 0
+ if (bpy.context.scene.UEActionSetSettings == '1') or (bpy.context.scene.UEActionSetSettings == '2'):
+ print("Action Set(s) Settings Idx:",bpy.context.scene.UEActionSetSettings)
+ print("[==== Action list ====]")
+
+ print("Number of Action set(s):",len(bpy.data.actions))
+
+ for action in bpy.data.actions:#current number action sets
+ print("+Action Name:",action.name)
+ print("Group Count:",len(action.groups))
+ #print("Groups:")
+ #for bone in action.groups:
+ #print("> Name: ",bone.name)
+ #print(dir(bone))
+
+ amatureobject = None #this is the armature set to none
+ bonenames = [] #bone name of the armature bones list
+
+ for arm in blender_armatures:
+ amatureobject = arm
+ #print(dir(amatureobject))
+ collection = amatureobject.myCollectionUEA #collection of the object
+ print("\n[==== Armature Object ====]")
+ if amatureobject != None:
+ print("+Name:",amatureobject.name)
+ print("+Number of bones:", len(amatureobject.pose.bones),"\n")
+ for bone in amatureobject.pose.bones:
+ bonenames.append(bone.name)
+
+ for ActionNLA in bpy.data.actions:
+ FoundAction = True
+ if bpy.context.scene.UEActionSetSettings == '2':
+ for c in collection:
+ if c.name == ActionNLA.name:
+ if c.mybool == True:
+ FoundAction = True
+ else:
+ FoundAction = False
+ break
+ if FoundAction == False:
+ print("========================================")
+ print("Skipping Action Set!",ActionNLA.name)
+ print("Action Group Count:", len(ActionNLA.groups))
+ print("Bone Group Count:", len(amatureobject.pose.bones))
+ print("========================================")
+ #break
+
+ nobone = 0
+ nomatchbone = 0
+
+ baction = True
+ #print("\nChecking actions matching groups with bone names...")
+ #Check if the bone names matches the action groups names
+ print("=================================")
+ print("=================================")
+ for abone in bonenames:
+ #print("bone name:",abone)
+ bfound = False
+ for group in ActionNLA.groups:
+ #print("name:>>",abone)
+ if abone == group.name:
+ nobone += 1
+ bfound = True
+ break
+ if bfound == False:
+ #print("Not Found!:",abone)
+ nomatchbone += 1
+ #else:
+ #print("Found!:",abone)
+
+ print("Armature Bones Count:",nobone , " Action Groups Counts:",len(ActionNLA.groups)," Left Out Count:",nomatchbone)
+ #if the bones are less some missing bones that were added to the action group names than export this
+ if (nobone <= len(ActionNLA.groups)) and (bpy.context.scene.unrealignoreactionmatchcount == True) :
+ #print("Action Set match: Pass")
+ print("Ingore Action groups Count from Armature bones.")
+ baction = True
+ #if action groups matches the bones length and names matching the gourps do something
+ elif ((len(ActionNLA.groups) == len(bonenames)) and (nobone == len(ActionNLA.groups))):
+ #print("Action Set match: Pass")
+ baction = True
+ else:
+ print("Action Set match: Fail")
+ #print("Action Name:",ActionNLA.name)
+ baction = False
+
+ if (baction == True) and (FoundAction == True):
+ arm = amatureobject #set armature object
+ if not arm.animation_data:
+ print("======================================")
+ print("Check Animation Data: None")
+ print("Armature has no animation, skipping...")
+ print("======================================")
+ break
+
+ if not arm.animation_data.action:
+ print("======================================")
+ print("Check Action: None")
+ print("Armature has no animation, skipping...")
+ print("======================================")
+ break
+ #print("Last Action Name:",arm.animation_data.action.name)
+ arm.animation_data.action = ActionNLA
+ #print("Set Action Name:",arm.animation_data.action.name)
+ bpy.context.scene.update()
+ act = arm.animation_data.action
+ action_name = act.name
+
+ if not len(act.fcurves):
+ print("//===========================================================")
+ print("// None bone pose set keys for this action set... skipping...")
+ print("//===========================================================")
+ bHaveAction = False
+
+ #this deal with action export control
+ if bHaveAction == True:
+ #print("------------------------------------")
+ print("[==== Action Set ====]")
+ print("Action Name:",action_name)
+
+ #look for min and max frame that current set keys
+ framemin, framemax = act.frame_range
+ #print("max frame:",framemax)
+ start_frame = int(framemin)
+ end_frame = int(framemax)
+ scene_frames = range(start_frame, end_frame+1)
+ frame_count = len(scene_frames)
+ #===================================================
+ anim = AnimInfoBinary()
+ anim.Name = action_name
+ anim.Group = "" #what is group?
+ anim.NumRawFrames = frame_count
+ anim.AnimRate = anim_rate
+ anim.FirstRawFrame = cur_frame_index
+ #===================================================
+ count_previous_keys = len(psa_file.RawKeys.Data)
+ print("Frame Key Set Count:",frame_count, "Total Frame:",frame_count)
+ #print("init action bones...")
+ unique_bone_indexes = {}
+ # bone lookup table
+ bones_lookup = {}
+
+ #build bone node for animation keys needed to be set
+ for bone in arm.data.bones:
+ bones_lookup[bone.name] = bone
+ #print("bone name:",bone.name)
+ frame_count = len(scene_frames)
+ #print ('Frame Count: %i' % frame_count)
+ pose_data = arm.pose
+
+ #these must be ordered in the order the bones will show up in the PSA file!
+ ordered_bones = {}
+ ordered_bones = sorted([(psa_file.UseBone(x.name), x) for x in pose_data.bones], key=operator.itemgetter(0))
+
+ #############################
+ # ORDERED FRAME, BONE
+ #for frame in scene_frames:
+
+ for i in range(frame_count):
+ frame = scene_frames[i]
+ #LOUD
+ #print ("==== outputting frame %i ===" % frame)
+
+ if frame_count > i+1:
+ next_frame = scene_frames[i+1]
+ #print "This Frame: %i, Next Frame: %i" % (frame, next_frame)
+ else:
+ next_frame = -1
+ #print "This Frame: %i, Next Frame: NONE" % frame
+
+ #frame start from 1 as number one from blender
+ blender_scene.frame_set(frame)
+
+ cur_frame_index = cur_frame_index + 1
+ for bone_data in ordered_bones:
+ bone_index = bone_data[0]
+ pose_bone = bone_data[1]
+ #print("[=====POSE NAME:",pose_bone.name)
+
+ #print("LENG >>.",len(bones_lookup))
+ blender_bone = bones_lookup[pose_bone.name]
+
+ #just need the total unique bones used, later for this AnimInfoBinary
+ unique_bone_indexes[bone_index] = bone_index
+ #LOUD
+ #print ("-------------------", pose_bone.name)
+ head = pose_bone.head
+
+ posebonemat = mathutils.Matrix(pose_bone.matrix)
+ #print(dir(posebonemat))
+
+ #print("quat",posebonemat)
+ #
+ # Error looop action get None in matrix
+ # looping on each armature give invert and normalize for None
+ #
+ parent_pose = pose_bone.parent
+
+ if parent_pose != None:
+ parentposemat = mathutils.Matrix(parent_pose.matrix)
+ posebonemat = parentposemat.inverted() * posebonemat
+
+ head = posebonemat.to_translation()
+ quat = posebonemat.to_quaternion().normalized()
+
+ vkey = VQuatAnimKey()
+ vkey.Position.X = head.x
+ vkey.Position.Y = head.y
+ vkey.Position.Z = head.z
+
+ if parent_pose != None:
+ quat = make_fquat(quat)
+ else:
+ quat = make_fquat_default(quat)
+
+ vkey.Orientation = quat
+ #print("Head:",head)
+ #print("Orientation",quat)
+
+ #time from now till next frame = diff / framesPerSec
+ if next_frame >= 0:
+ diff = next_frame - frame
+ else:
+ diff = 1.0
+
+ #print ("Diff = ", diff)
+ vkey.Time = float(diff)/float(anim_rate)
+ psa_file.AddRawKey(vkey)
+
+ #done looping frames
+ #done looping armatures
+ #continue adding animInfoBinary counts here
+
+ anim.TotalBones = len(unique_bone_indexes)
+ print("Bones Count:",anim.TotalBones)
+ anim.TrackTime = float(frame_count) / anim.AnimRate
+ print("Time Track Frame:",anim.TrackTime)
+ psa_file.AddAnimation(anim)
+ print("------------------------------------\n")
+ else:
+ print("[==== Action Set ====]")
+ print("Action Name:",ActionNLA.name)
+ print("Action Group Count:", len(ActionNLA.groups))
+ print("Bone Group Count:", len(amatureobject.pose.bones))
+ print("Action set Skip!")
+ print("------------------------------------\n")
+ print("==== Finish Action Build(s) ====")
+ else:
+ print("[==== Action Set Single Export====]")
+ #list of armature objects
+ for arm in blender_armatures:
+ #check if there animation data from armature or something
+
+ if not arm.animation_data:
+ print("======================================")
+ print("Check Animation Data: None")
+ print("Armature has no animation, skipping...")
+ print("======================================")
+ break
+
+ if not arm.animation_data.action:
+ print("======================================")
+ print("Check Action: None")
+ print("Armature has no animation, skipping...")
+ print("======================================")
+ break
+ act = arm.animation_data.action
+ #print(dir(act))
+ action_name = act.name
+
+ if not len(act.fcurves):
+ print("//===========================================================")
+ print("// None bone pose set keys for this action set... skipping...")
+ print("//===========================================================")
+ bHaveAction = False
+
+ #this deal with action export control
+ if bHaveAction == True:
+ print("---- Action Start ----")
+ print("Action Name:",action_name)
+ #look for min and max frame that current set keys
+ framemin, framemax = act.frame_range
+ #print("max frame:",framemax)
+ start_frame = int(framemin)
+ end_frame = int(framemax)
+ scene_frames = range(start_frame, end_frame+1)
+ frame_count = len(scene_frames)
+ #===================================================
+ anim = AnimInfoBinary()
+ anim.Name = action_name
+ anim.Group = "" #what is group?
+ anim.NumRawFrames = frame_count
+ anim.AnimRate = anim_rate
+ anim.FirstRawFrame = cur_frame_index
+ #===================================================
+ count_previous_keys = len(psa_file.RawKeys.Data)
+ print("Frame Key Set Count:",frame_count, "Total Frame:",frame_count)
+ #print("init action bones...")
+ unique_bone_indexes = {}
+ # bone lookup table
+ bones_lookup = {}
+
+ #build bone node for animation keys needed to be set
+ for bone in arm.data.bones:
+ bones_lookup[bone.name] = bone
+ #print("bone name:",bone.name)
+ frame_count = len(scene_frames)
+ #print ('Frame Count: %i' % frame_count)
+ pose_data = arm.pose
+
+ #these must be ordered in the order the bones will show up in the PSA file!
+ ordered_bones = {}
+ ordered_bones = sorted([(psa_file.UseBone(x.name), x) for x in pose_data.bones], key=operator.itemgetter(0))
+
+ #############################
+ # ORDERED FRAME, BONE
+ #for frame in scene_frames:
+
+ for i in range(frame_count):
+ frame = scene_frames[i]
+ #LOUD
+ #print ("==== outputting frame %i ===" % frame)
+
+ if frame_count > i+1:
+ next_frame = scene_frames[i+1]
+ #print "This Frame: %i, Next Frame: %i" % (frame, next_frame)
+ else:
+ next_frame = -1
+ #print "This Frame: %i, Next Frame: NONE" % frame
+
+ #frame start from 1 as number one from blender
+ blender_scene.frame_set(frame)
+
+ cur_frame_index = cur_frame_index + 1
+ for bone_data in ordered_bones:
+ bone_index = bone_data[0]
+ pose_bone = bone_data[1]
+ #print("[=====POSE NAME:",pose_bone.name)
+
+ #print("LENG >>.",len(bones_lookup))
+ blender_bone = bones_lookup[pose_bone.name]
+
+ #just need the total unique bones used, later for this AnimInfoBinary
+ unique_bone_indexes[bone_index] = bone_index
+ #LOUD
+ #print ("-------------------", pose_bone.name)
+ head = pose_bone.head
+
+ posebonemat = mathutils.Matrix(pose_bone.matrix)
+ parent_pose = pose_bone.parent
+ if parent_pose != None:
+ parentposemat = mathutils.Matrix(parent_pose.matrix)
+ #blender 2.4X it been flip around with new 2.50 (mat1 * mat2) should now be (mat2 * mat1)
+ posebonemat = parentposemat.inverted() * posebonemat
+ head = posebonemat.to_translation()
+ quat = posebonemat.to_quaternion().normalized()
+ vkey = VQuatAnimKey()
+ vkey.Position.X = head.x
+ vkey.Position.Y = head.y
+ vkey.Position.Z = head.z
+ #print("quat:",quat)
+ if parent_pose != None:
+ quat = make_fquat(quat)
+ else:
+ quat = make_fquat_default(quat)
+
+ vkey.Orientation = quat
+ #print("Head:",head)
+ #print("Orientation",quat)
+
+ #time from now till next frame = diff / framesPerSec
+ if next_frame >= 0:
+ diff = next_frame - frame
+ else:
+ diff = 1.0
+
+ #print ("Diff = ", diff)
+ vkey.Time = float(diff)/float(anim_rate)
+ psa_file.AddRawKey(vkey)
+
+ #done looping frames
+ #done looping armatures
+ #continue adding animInfoBinary counts here
+
+ anim.TotalBones = len(unique_bone_indexes)
+ print("Bones Count:",anim.TotalBones)
+ anim.TrackTime = float(frame_count) / anim.AnimRate
+ print("Time Track Frame:",anim.TrackTime)
+ psa_file.AddAnimation(anim)
+ print("---- Action End ----")
+ print("==== Finish Action Build ====")
+
+def meshmerge(selectedobjects):
+ bpy.ops.object.mode_set(mode='OBJECT')
+ cloneobjects = []
+ if len(selectedobjects) > 1:
+ print("selectedobjects:",len(selectedobjects))
+ count = 0 #reset count
+ for count in range(len( selectedobjects)):
+ #print("Index:",count)
+ if selectedobjects[count] != None:
+ me_da = selectedobjects[count].data.copy() #copy data
+ me_ob = selectedobjects[count].copy() #copy object
+ #note two copy two types else it will use the current data or mesh
+ me_ob.data = me_da
+ bpy.context.scene.objects.link(me_ob)#link the object to the scene #current object location
+ print("Index:",count,"clone object",me_ob.name)
+ cloneobjects.append(me_ob)
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ for i in bpy.data.objects: i.select = False #deselect all objects
+ count = 0 #reset count
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ for count in range(len( cloneobjects)):
+ if count == 0:
+ bpy.context.scene.objects.active = cloneobjects[count]
+ print("Set Active Object:",cloneobjects[count].name)
+ cloneobjects[count].select = True
+ bpy.ops.object.join()
+ return cloneobjects[0]
+
+def fs_callback(filename, context):
+ #this deal with repeat export and the reset settings
+ global nbone, exportmessage, bDeleteMergeMesh
+ nbone = 0
+
+ start_time = time.clock()
+
+ print ("========EXPORTING TO UNREAL SKELETAL MESH FORMATS========\r\n")
+ print("Blender Version:", bpy.app.version[1],"-")
+
+ psk = PSKFile()
+ psa = PSAFile()
+
+ #sanity check - this should already have the extension, but just in case, we'll give it one if it doesn't
+ psk_filename = make_filename_ext(filename, '.psk')
+
+ #make the psa filename
+ psa_filename = make_filename_ext(filename, '.psa')
+
+ print ('PSK File: ' + psk_filename)
+ print ('PSA File: ' + psa_filename)
+
+ barmature = True
+ bmesh = True
+ blender_meshes = []
+ blender_armature = []
+ selectmesh = []
+ selectarmature = []
+
+ current_scene = context.scene
+ cur_frame = current_scene.frame_current #store current frame before we start walking them during animation parse
+ objects = current_scene.objects
+
+ print("Checking object count...")
+ for next_obj in objects:
+ if next_obj.type == 'MESH':
+ blender_meshes.append(next_obj)
+ if (next_obj.select):
+ #print("mesh object select")
+ selectmesh.append(next_obj)
+ if next_obj.type == 'ARMATURE':
+ blender_armature.append(next_obj)
+ if (next_obj.select):
+ #print("armature object select")
+ selectarmature.append(next_obj)
+
+ print("Mesh Count:",len(blender_meshes)," Armature Count:",len(blender_armature))
+ print("====================================")
+ print("Checking Mesh Condtion(s):")
+ #if there 1 mesh in scene add to the array
+ if len(blender_meshes) == 1:
+ print(" - One Mesh Scene")
+ #if there more than one mesh and one mesh select add to array
+ elif (len(blender_meshes) > 1) and (len(selectmesh) == 1):
+ blender_meshes = []
+ blender_meshes.append(selectmesh[0])
+ print(" - One Mesh [Select]")
+ elif (len(blender_meshes) > 1) and (len(selectmesh) >= 1):
+ #code build check for merge mesh before ops
+ print("More than one mesh is selected!")
+ centermesh = []
+ notcentermesh = []
+ countm = 0
+ for countm in range(len(selectmesh)):
+ #selectmesh[]
+ if selectmesh[countm].location.x == 0 and selectmesh[countm].location.y == 0 and selectmesh[countm].location.z == 0:
+ centermesh.append(selectmesh[countm])
+ else:
+ notcentermesh.append(selectmesh[countm])
+ if len(centermesh) > 0:
+ print("Center Object Found!")
+ blender_meshes = []
+ selectmesh = []
+ countm = 0
+ for countm in range(len(centermesh)):
+ selectmesh.append(centermesh[countm])
+ for countm in range(len(notcentermesh)):
+ selectmesh.append(notcentermesh[countm])
+ blender_meshes.append(meshmerge(selectmesh))
+ bDeleteMergeMesh = True
+ else:
+ bDeleteMergeMesh = False
+ bmesh = False
+ print("Center Object Not Found")
+ else:
+ print(" - Too Many Meshes!")
+ print(" - Select One Mesh Object!")
+ bmesh = False
+ bDeleteMergeMesh = False
+
+ print("====================================")
+ print("Checking Armature Condtion(s):")
+ if len(blender_armature) == 1:
+ print(" - One Armature Scene")
+ elif (len(blender_armature) > 1) and (len(selectarmature) == 1):
+ print(" - One Armature [Select]")
+ else:
+ print(" - Too Armature Meshes!")
+ print(" - Select One Armature Object Only!")
+ barmature = False
+ bMeshScale = True
+ bMeshCenter = True
+ if len(blender_meshes) > 0:
+ if blender_meshes[0].scale.x == 1 and blender_meshes[0].scale.y == 1 and blender_meshes[0].scale.z == 1:
+ #print("Okay")
+ bMeshScale = True
+ else:
+ print("Error, Mesh Object not scale right should be (1,1,1).")
+ bMeshScale = False
+ if blender_meshes[0].location.x == 0 and blender_meshes[0].location.y == 0 and blender_meshes[0].location.z == 0:
+ #print("Okay")
+ bMeshCenter = True
+ else:
+ print("Error, Mesh Object not center.",blender_meshes[0].location)
+ bMeshCenter = False
+ else:
+ bmesh = False
+ bArmatureScale = True
+ bArmatureCenter = True
+ if blender_armature[0] !=None:
+ if blender_armature[0].scale.x == 1 and blender_armature[0].scale.y == 1 and blender_armature[0].scale.z == 1:
+ #print("Okay")
+ bArmatureScale = True
+ else:
+ print("Error, Armature Object not scale right should be (1,1,1).")
+ bArmatureScale = False
+ if blender_armature[0].location.x == 0 and blender_armature[0].location.y == 0 and blender_armature[0].location.z == 0:
+ #print("Okay")
+ bArmatureCenter = True
+ else:
+ print("Error, Armature Object not center.",blender_armature[0].location)
+ bArmatureCenter = False
+
+
+
+ #print("location:",blender_armature[0].location.x)
+
+ if (bmesh == False) or (barmature == False) or (bArmatureCenter == False) or (bArmatureScale == False)or (bMeshScale == False) or (bMeshCenter == False):
+ exportmessage = "Export Fail! Check Log."
+ print("=================================")
+ print("= Export Fail! =")
+ print("=================================")
+ else:
+ exportmessage = "Export Finish!"
+ #print("blender_armature:",dir(blender_armature[0]))
+ #print(blender_armature[0].scale)
+
+ try:
+ #######################
+ # STEP 1: MESH DUMP
+ # we build the vertexes, wedges, and faces in here, as well as a vertexgroup lookup table
+ # for the armature parse
+ print("//===============================")
+ print("// STEP 1")
+ print("//===============================")
+ parse_meshes(blender_meshes, psk)
+ except:
+ context.scene.frame_set(cur_frame) #set frame back to original frame
+ print ("Exception during Mesh Parse")
+ raise
+
+ try:
+ #######################
+ # STEP 2: ARMATURE DUMP
+ # IMPORTANT: do this AFTER parsing meshes - we need to use the vertex group data from
+ # the mesh parse in here to generate bone influences
+ print("//===============================")
+ print("// STEP 2")
+ print("//===============================")
+ parse_armature(blender_armature, psk, psa)
+
+ except:
+ context.scene.frame_set(cur_frame) #set frame back to original frame
+ print ("Exception during Armature Parse")
+ raise
+
+ try:
+ #######################
+ # STEP 3: ANIMATION DUMP
+ # IMPORTANT: do AFTER parsing bones - we need to do bone lookups in here during animation frames
+ print("//===============================")
+ print("// STEP 3")
+ print("//===============================")
+ parse_animation(current_scene, blender_armature, psa)
+
+ except:
+ context.scene.frame_set(cur_frame) #set frame back to original frame
+ print ("Exception during Animation Parse")
+ raise
+
+ # reset current frame
+
+ context.scene.frame_set(cur_frame) #set frame back to original frame
+
+ ##########################
+ # FILE WRITE
+ print("//===========================================")
+ print("// bExportPsk:",bpy.context.scene.unrealexportpsk," bExportPsa:",bpy.context.scene.unrealexportpsa)
+ print("//===========================================")
+ if bpy.context.scene.unrealexportpsk == True:
+ print("Writing Skeleton Mesh Data...")
+ #RG - dump psk file
+ psk.PrintOut()
+ file = open(psk_filename, "wb")
+ file.write(psk.dump())
+ file.close()
+ print ("Successfully Exported File: " + psk_filename)
+ if bpy.context.scene.unrealexportpsa == True:
+ print("Writing Animaiton Data...")
+ #RG - dump psa file
+ if not psa.IsEmpty():
+ psa.PrintOut()
+ file = open(psa_filename, "wb")
+ file.write(psa.dump())
+ file.close()
+ print ("Successfully Exported File: " + psa_filename)
+ else:
+ print ("No Animations (.psa file) to Export")
+
+ print ('PSK/PSA Export Script finished in %.2f seconds' % (time.clock() - start_time))
+ print( "Current Script version: ",bl_info['version'])
+ #MSG BOX EXPORT COMPLETE
+ #...
+
+ #DONE
+ print ("PSK/PSA Export Complete")
+
+def write_data(path, context):
+ print("//============================")
+ print("// running psk/psa export...")
+ print("//============================")
+ fs_callback(path, context)
+ pass
+
+from bpy.props import *
+
+bpy.types.Scene.unrealfpsrate = IntProperty(
+ name="fps rate",
+ description="Set the frame per second (fps) for unreal.",
+ default=24,min=1,max=100)
+
+bpy.types.Scene.unrealexport_settings = EnumProperty(
+ name="Export:",
+ description="Select a export settings (psk/psa/all)...",
+ items = [("0","PSK","Export PSK"),("1","PSA","Export PSA"),("2","ALL","Export ALL")], default = '0')
+
+bpy.types.Scene.UEActionSetSettings = EnumProperty(
+ name="Action Set(s) Export Type",
+ description="For Exporting Single, All, and Select Action Set(s).",
+ items = [("0","Single","Single Action Set Export"),("1","All","All Action Sets Export"),("2","Select","Select Action Set(s) Export")], default = '0')
+
+bpy.types.Scene.unrealtriangulatebool = BoolProperty(
+ name="Triangulate Mesh",
+ description="Convert Quad to Tri Mesh Boolean...",
+ default=False)
+
+bpy.types.Scene.unrealignoreactionmatchcount = BoolProperty(
+ name="Acion Group Ignore Count",
+ description="It will ingore Action group count as long is matches the Armature bone count to match and over ride the armature animation data.",
+ default=False)
+
+bpy.types.Scene.unrealdisplayactionsets = BoolProperty(
+ name="Show Action Set(s)",
+ description="Display Action Sets Information.",
+ default=False)
+
+bpy.types.Scene.unrealexportpsk = BoolProperty(
+ name="bool export psa",
+ description="bool for exporting this psk format",
+ default=True)
+
+bpy.types.Scene.unrealexportpsa = BoolProperty(
+ name="bool export psa",
+ description="bool for exporting this psa format",
+ default=True)
+
+class UEAPropertyGroup(bpy.types.PropertyGroup):
+ ## create Properties for the collection entries:
+ mystring = bpy.props.StringProperty()
+ mybool = bpy.props.BoolProperty(name="Export",description="Check if you want to export the action set.",default = False)
+
+bpy.utils.register_class(UEAPropertyGroup)
+
+## create CollectionProperty and link it to the property class
+bpy.types.Object.myCollectionUEA = bpy.props.CollectionProperty(type = UEAPropertyGroup)
+bpy.types.Object.myCollectionUEA_index = bpy.props.IntProperty(min = -1, default = -1)
+
+## create operator to add or remove entries to/from the Collection
+class OBJECT_OT_add_remove_Collection_Items_UE(bpy.types.Operator):
+ bl_label = "Add or Remove"
+ bl_idname = "collection.add_remove_ueactions"
+ __doc__ = """Button for Add, Remove, Refresh Action Set(s) list."""
+ set = bpy.props.StringProperty()
+
+ def invoke(self, context, event):
+ obj = context.object
+ collection = obj.myCollectionUEA
+ if self.set == "remove":
+ print("remove")
+ index = obj.myCollectionUEA_index
+ collection.remove(index) # This remove on item in the collection list function of index value
+ if self.set == "add":
+ print("add")
+ added = collection.add() # This add at the end of the collection list
+ added.name = "Action"+ str(random.randrange(0, 101, 2))
+ if self.set == "refresh":
+ print("refresh")
+ ArmatureSelect = None
+ ActionNames = []
+ BoneNames = []
+ for obj in bpy.data.objects:
+ if obj.type == 'ARMATURE' and obj.select == True:
+ print("Armature Name:",obj.name)
+ ArmatureSelect = obj
+ for bone in obj.pose.bones:
+ BoneNames.append(bone.name)
+ break
+ actionsetmatchcount = 0
+ for ActionNLA in bpy.data.actions:
+ nobone = 0
+ for group in ActionNLA.groups:
+ for abone in BoneNames:
+ if abone == group.name:
+ nobone += 1
+ break
+ if (len(ActionNLA.groups) == len(BoneNames)) and (nobone == len(ActionNLA.groups)):
+ actionsetmatchcount += 1
+ ActionNames.append(ActionNLA.name)
+ #print(dir(collection))
+ #print("collection:",len(collection))
+ print("action list check")
+ for action in ActionNames:
+ BfoundAction = False
+ #print("action:",action)
+ for c in collection:
+ #print(c.name)
+ if c.name == action:
+ BfoundAction = True
+ break
+ if BfoundAction == False:
+ added = collection.add() # This add at the end of the collection list
+ added.name = action
+ #print("finish...")
+ return {'FINISHED'}
+
+class ExportUDKAnimData(bpy.types.Operator):
+ global exportmessage
+ '''Export Skeleton Mesh / Animation Data file(s)'''
+ bl_idname = "export_anim.udk" # this is important since its how bpy.ops.export.udk_anim_data is constructed
+ bl_label = "Export PSK/PSA"
+ __doc__ = """One mesh and one armature else select one mesh or armature to be exported."""
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ filepath = StringProperty(name="File Path", description="Filepath used for exporting the PSA file", maxlen= 1024, default= "", subtype='FILE_PATH')
+ filter_glob = StringProperty(default="*.psk;*.psa", options={'HIDDEN'})
+ pskexportbool = BoolProperty(name="Export PSK", description="Export Skeletal Mesh", default= True)
+ psaexportbool = BoolProperty(name="Export PSA", description="Export Action Set (Animation Data)", default= True)
+
+ actionexportall = BoolProperty(name="All Actions", description="This will export all the actions that matches the current armature.", default=False)
+ ignoreactioncountexportbool = BoolProperty(name="Ignore Action Group Count", description="It will ignore action group count but as long it matches the armature bone count to over ride the animation data.", default= False)
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ #check if skeleton mesh is needed to be exported
+ if (self.pskexportbool):
+ bpy.context.scene.unrealexportpsk = True
+ else:
+ bpy.context.scene.unrealexportpsk = False
+ #check if animation data is needed to be exported
+ if (self.psaexportbool):
+ bpy.context.scene.unrealexportpsa = True
+ else:
+ bpy.context.scene.unrealexportpsa = False
+
+ if (self.actionexportall):
+ bpy.context.scene.UEActionSetSettings = '1'#export one action set
+ else:
+ bpy.context.scene.UEActionSetSettings = '0'#export all action sets
+
+ if(self.ignoreactioncountexportbool):
+ bpy.context.scene.unrealignoreactionmatchcount = True
+ else:
+ bpy.context.scene.unrealignoreactionmatchcount = False
+
+ write_data(self.filepath, context)
+
+ self.report({'WARNING', 'INFO'}, exportmessage)
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+class VIEW3D_PT_unrealtools_objectmode(bpy.types.Panel):
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "TOOLS"
+ bl_label = "Unreal Tools"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object
+
+ def draw(self, context):
+ layout = self.layout
+ rd = context.scene
+ layout.prop(rd, "unrealexport_settings",expand=True)
+ layout.prop(rd, "UEActionSetSettings")
+ layout.prop(rd, "unrealignoreactionmatchcount")
+
+ #FPS #it use the real data from your scene
+ layout.prop(rd.render, "fps")
+ layout.operator(OBJECT_OT_UnrealExport.bl_idname)
+
+
+ layout.prop(rd, "unrealdisplayactionsets")
+
+ ArmatureSelect = None
+ for obj in bpy.data.objects:
+ if obj.type == 'ARMATURE' and obj.select == True:
+ #print("Armature Name:",obj.name)
+ ArmatureSelect = obj
+ break
+ #display armature actions list
+ if ArmatureSelect != None and rd.unrealdisplayactionsets == True:
+ layout.label(("Selected: "+ArmatureSelect.name))
+ row = layout.row()
+ row.template_list(obj, "myCollectionUEA", obj, "myCollectionUEA_index") # This show list for the collection
+ col = row.column(align=True)
+ col.operator("collection.add_remove_ueactions", icon="ZOOMIN", text="").set = "add" # This show a plus sign button
+ col.operator("collection.add_remove_ueactions", icon="ZOOMOUT", text="").set = "remove" # This show a minus sign button
+ col.operator("collection.add_remove_ueactions", icon="FILE_REFRESH", text="").set = "refresh" # This show a refresh sign button
+
+ ##change name of Entry:
+ if obj.myCollectionUEA:
+ entry = obj.myCollectionUEA[obj.myCollectionUEA_index]
+ layout.prop(entry, "name")
+ layout.prop(entry, "mybool")
+ layout.operator(OBJECT_OT_UTSelectedFaceSmooth.bl_idname)
+ layout.operator(OBJECT_OT_UTRebuildArmature.bl_idname)
+ layout.operator(OBJECT_OT_UTRebuildMesh.bl_idname)
+ layout.operator(OBJECT_OT_ToggleConsle.bl_idname)
+ layout.operator(OBJECT_OT_DeleteActionSet.bl_idname)
+ layout.operator(OBJECT_OT_MeshClearWeights.bl_idname)
+
+class OBJECT_OT_UnrealExport(bpy.types.Operator):
+ global exportmessage
+ bl_idname = "export_mesh.udk" # XXX, name???
+ bl_label = "Unreal Export"
+ __doc__ = """Select export setting for .psk/.psa or both."""
+
+ def invoke(self, context, event):
+ print("Init Export Script:")
+ if(int(bpy.context.scene.unrealexport_settings) == 0):
+ bpy.context.scene.unrealexportpsk = True
+ bpy.context.scene.unrealexportpsa = False
+ print("Exporting PSK...")
+ if(int(bpy.context.scene.unrealexport_settings) == 1):
+ bpy.context.scene.unrealexportpsk = False
+ bpy.context.scene.unrealexportpsa = True
+ print("Exporting PSA...")
+ if(int(bpy.context.scene.unrealexport_settings) == 2):
+ bpy.context.scene.unrealexportpsk = True
+ bpy.context.scene.unrealexportpsa = True
+ print("Exporting ALL...")
+
+ default_path = os.path.splitext(bpy.data.filepath)[0] + ".psk"
+ fs_callback(default_path, bpy.context)
+ #self.report({'WARNING', 'INFO'}, exportmessage)
+ self.report({'INFO'}, exportmessage)
+ return{'FINISHED'}
+
+class OBJECT_OT_ToggleConsle(bpy.types.Operator):
+ global exportmessage
+ bl_idname = "object.toggleconsle" # XXX, name???
+ bl_label = "Toggle Console"
+ __doc__ = "Show or Hide Console."
+
+ def invoke(self, context, event):
+ bpy.ops.wm.console_toggle()
+ return{'FINISHED'}
+
+class OBJECT_OT_UTSelectedFaceSmooth(bpy.types.Operator):
+ bl_idname = "object.utselectfacesmooth" # XXX, name???
+ bl_label = "Select Smooth faces"
+ __doc__ = """It will only select smooth faces that is select mesh."""
+
+ def invoke(self, context, event):
+ print("----------------------------------------")
+ print("Init Select Face(s):")
+ bselected = False
+ for obj in bpy.data.objects:
+ if obj.type == 'MESH' and obj.select == True:
+ smoothcount = 0
+ flatcount = 0
+ bpy.ops.object.mode_set(mode='OBJECT')#it need to go into object mode to able to select the faces
+ for i in bpy.context.scene.objects: i.select = False #deselect all objects
+ obj.select = True #set current object select
+ bpy.context.scene.objects.active = obj #set active object
+ for face in obj.data.faces:
+ if face.use_smooth == True:
+ face.select = True
+ smoothcount += 1
+ else:
+ flatcount += 1
+ face.select = False
+ #print("selected:",face.select)
+ #print(("smooth:",face.use_smooth))
+ bpy.context.scene.update()
+ bpy.ops.object.mode_set(mode='EDIT')
+ print("Select Smooth Count(s):",smoothcount," Flat Count(s):",flatcount)
+ bselected = True
+ break
+ if bselected:
+ print("Selected Face(s) Exectue!")
+ self.report({'INFO'}, "Selected Face(s) Exectue!")
+ else:
+ print("Didn't select Mesh Object!")
+ self.report({'INFO'}, "Didn't Select Mesh Object!")
+ print("----------------------------------------")
+ return{'FINISHED'}
+
+class OBJECT_OT_DeleteActionSet(bpy.types.Operator):
+ bl_idname = "object.deleteactionset" # XXX, name???
+ bl_label = "Delete Action Set"
+ __doc__ = """It will remove the first top of the index of the action list. Reload file to remove it. It used for unable to delete action set. """
+
+ def invoke(self, context, event):
+ if len(bpy.data.actions) > 0:
+ for action in bpy.data.actions:
+ print("Action:",action.name)
+ action.user_clear()
+ break
+ #bpy.data.actions.remove(act)
+ print("finish")
+ return{'FINISHED'}
+
+class OBJECT_OT_MeshClearWeights(bpy.types.Operator):
+ bl_idname = "object.meshclearweights" # XXX, name???
+ bl_label = "Mesh Clear Weights"
+ __doc__ = """Clear selected mesh vertex group weights for the bones. Be sure you unparent the armature."""
+
+ def invoke(self, context, event):
+ for obj in bpy.data.objects:
+ if obj.type == 'MESH' and obj.select == True:
+ for vg in obj.vertex_groups:
+ obj.vertex_groups.remove(vg)
+ break
+ return{'FINISHED'}
+
+class OBJECT_OT_UTRebuildArmature(bpy.types.Operator):
+ bl_idname = "object.utrebuildarmature" # XXX, name???
+ bl_label = "Rebuild Armature"
+ __doc__ = """If mesh is deform when importing to unreal engine try this. It rebuild the bones one at the time by select one armature object scrape to raw setup build. Note the scale will be 1:1 for object mode. To keep from deforming."""
+
+ def invoke(self, context, event):
+ print("----------------------------------------")
+ print("Init Rebuild Armature...")
+ bselected = False
+ for obj in bpy.data.objects:
+ if obj.type == 'ARMATURE' and obj.select == True:
+ currentbone = [] #select armature for roll copy
+ print("Armature Name:",obj.name)
+ objectname = "ArmatureDataPSK"
+ meshname ="ArmatureObjectPSK"
+ armdata = bpy.data.armatures.new(objectname)
+ ob_new = bpy.data.objects.new(meshname, armdata)
+ bpy.context.scene.objects.link(ob_new)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ for i in bpy.context.scene.objects: i.select = False #deselect all objects
+ ob_new.select = True
+ bpy.context.scene.objects.active = obj
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in obj.data.edit_bones:
+ if bone.parent != None:
+ currentbone.append([bone.name,bone.roll])
+ else:
+ currentbone.append([bone.name,bone.roll])
+ bpy.ops.object.mode_set(mode='OBJECT')
+ for i in bpy.context.scene.objects: i.select = False #deselect all objects
+ bpy.context.scene.objects.active = ob_new
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ for bone in obj.data.bones:
+ bpy.ops.object.mode_set(mode='EDIT')
+ newbone = ob_new.data.edit_bones.new(bone.name)
+ newbone.head = bone.head_local
+ newbone.tail = bone.tail_local
+ for bonelist in currentbone:
+ if bone.name == bonelist[0]:
+ newbone.roll = bonelist[1]
+ break
+ if bone.parent != None:
+ parentbone = ob_new.data.edit_bones[bone.parent.name]
+ newbone.parent = parentbone
+ print("Bone Count:",len(obj.data.bones))
+ print("Hold Bone Count",len(currentbone))
+ print("New Bone Count",len(ob_new.data.edit_bones))
+ print("Rebuild Armture Finish:",ob_new.name)
+ bpy.context.scene.update()
+ bselected = True
+ break
+ if bselected:
+ self.report({'INFO'}, "Rebuild Armature Finish!")
+ else:
+ self.report({'INFO'}, "Didn't Select Armature Object!")
+ print("End of Rebuild Armature.")
+ print("----------------------------------------")
+ return{'FINISHED'}
+
+# rounded the vert locations to save a bit of blurb.. change the round value or remove for accuracy i suppose
+def rounded_tuple(tup):
+ return tuple(round(value,4) for value in tup)
+
+def unpack_list(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ l.extend(t)
+ return l
+
+class OBJECT_OT_UTRebuildMesh(bpy.types.Operator):
+ bl_idname = "object.utrebuildmesh" # XXX, name???
+ bl_label = "Rebuild Mesh"
+ __doc__ = """It rebuild the mesh from scrape from the selected mesh object. Note the scale will be 1:1 for object mode. To keep from deforming."""
+
+ def invoke(self, context, event):
+ print("----------------------------------------")
+ print("Init Mesh Bebuild...")
+ bselected = False
+ for obj in bpy.data.objects:
+ if obj.type == 'MESH' and obj.select == True:
+ for i in bpy.context.scene.objects: i.select = False #deselect all objects
+ obj.select = True
+ bpy.context.scene.objects.active = obj
+ bpy.ops.object.mode_set(mode='OBJECT')
+ me_ob = bpy.data.meshes.new(("Re_"+obj.name))
+ mesh = obj.data
+ faces = []
+ verts = []
+ smoothings = []
+ uvfaces = []
+ #print(dir(mesh))
+ print("creating array build mesh...")
+ uv_layer = mesh.uv_textures.active
+ for face in mesh.faces:
+ v = []
+ smoothings.append(face.use_smooth)#smooth or flat in boolean
+ if uv_layer != None:#check if there texture data exist
+ faceUV = uv_layer.data[face.index]
+ #print(len(faceUV.uv))
+ uvs = []
+ for uv in faceUV.uv:
+ #vert = mesh.vertices[videx]
+ #print("UV:",uv[0],":",uv[1])
+ uvs.append((uv[0],uv[1]))
+ #print(uvs)
+ uvfaces.append(uvs)
+ for videx in face.vertices:
+ vert = mesh.vertices[videx]
+ v.append(videx)
+ faces.append(v)
+ #vertex positions
+ for vertex in mesh.vertices:
+ verts.append(vertex.co.to_tuple())
+ #vertices weight groups into array
+ vertGroups = {} #array in strings
+ for vgroup in obj.vertex_groups:
+ #print(dir(vgroup))
+ #print("name:",(vgroup.name),"index:",vgroup.index)
+ #vertex in index and weight
+ vlist = []
+ for v in mesh.vertices:
+ for vg in v.groups:
+ if vg.group == vgroup.index:
+ vlist.append((v.index,vg.weight))
+ #print((v.index,vg.weight))
+ vertGroups[vgroup.name] = vlist
+ '''
+ #Fail for this method
+ #can't covert the tri face plogyon
+ for face in mesh.faces:
+ x = [f for f in face.vertices]
+ faces.extend(x)
+ smoothings.append(face.use_smooth)
+ for vertex in mesh.vertices:
+ verts.append(vertex.co.to_tuple())
+ me_ob.vertices.add(len(verts))
+ me_ob.faces.add(len(faces)//4)
+ me_ob.vertices.foreach_set("co", unpack_list(verts))
+ me_ob.faces.foreach_set("vertices_raw", faces)
+ me_ob.faces.foreach_set("use_smooth", smoothings)
+ '''
+ #test dummy mesh
+ #verts = [(-1,1,0),(1,1,0),(1,-1,0),(-1,-1,0),(0,1,1),(0,-1,1)]
+ #faces = [(0,1,2,3),(1,2,5,4),(0,3,5,4),(0,1,4),(2,3,5)]
+ #for f in faces:
+ #print("face",f)
+ #for v in verts:
+ #print("vertex",v)
+ #me_ob = bpy.data.objects.new("ReBuildMesh",me_ob)
+ print("creating mesh object...")
+ me_ob.from_pydata(verts, [], faces)
+ me_ob.faces.foreach_set("use_smooth", smoothings)#smooth array from face
+ me_ob.update()
+ #check if there is uv faces
+ if len(uvfaces) > 0:
+ uvtex = me_ob.uv_textures.new(name="retex")
+ for i, face in enumerate(me_ob.faces):
+ blender_tface = uvtex.data[i] #face
+ mfaceuv = uvfaces[i]
+ if len(mfaceuv) == 3:
+ blender_tface.uv1 = mfaceuv[0];
+ blender_tface.uv2 = mfaceuv[1];
+ blender_tface.uv3 = mfaceuv[2];
+ if len(mfaceuv) == 4:
+ blender_tface.uv1 = mfaceuv[0];
+ blender_tface.uv2 = mfaceuv[1];
+ blender_tface.uv3 = mfaceuv[2];
+ blender_tface.uv4 = mfaceuv[3];
+
+ obmesh = bpy.data.objects.new(("Re_"+obj.name),me_ob)
+ bpy.context.scene.update()
+ #Build tmp materials
+ materialname = "ReMaterial"
+ for matcount in mesh.materials:
+ matdata = bpy.data.materials.new(materialname)
+ me_ob.materials.append(matdata)
+ #assign face to material id
+ for face in mesh.faces:
+ #print(dir(face))
+ me_ob.faces[face.index].material_index = face.material_index
+ #vertices weight groups
+ for vgroup in vertGroups:
+ #print("vgroup",vgroup)#name of group
+ #print(dir(vgroup))
+ #print(vertGroups[vgroup])
+ group = obmesh.vertex_groups.new(vgroup)
+ #print("group index",group.index)
+ for v in vertGroups[vgroup]:
+ group.add([v[0]], v[1], 'ADD')# group.add(array[vertex id],weight,add)
+ #print("[vertex id, weight]",v) #array (0,0)
+ #print("[vertex id, weight]",v[0],":",v[1]) #array (0,0)
+ bpy.context.scene.objects.link(obmesh)
+ print("Mesh Material Count:",len(me_ob.materials))
+ for mat in me_ob.materials:
+ print("-Material:",mat.name)
+ print("Object Name:",obmesh.name)
+ bpy.context.scene.update()
+ #bpy.ops.wm.console_toggle()
+ bselected = True
+ break
+ if bselected:
+ self.report({'INFO'}, "Rebuild Mesh Finish!")
+ print("Finish Mesh Build...")
+ else:
+ self.report({'INFO'}, "Didn't Select Mesh Object!")
+ print("Didn't Select Mesh Object!")
+ print("----------------------------------------")
+
+ return{'FINISHED'}
+
+def menu_func(self, context):
+ #bpy.context.scene.unrealexportpsk = True
+ #bpy.context.scene.unrealexportpsa = True
+ default_path = os.path.splitext(bpy.data.filepath)[0] + ".psk"
+ self.layout.operator(ExportUDKAnimData.bl_idname, text="Skeleton Mesh / Animation Data (.psk/.psa)").filepath = default_path
+
+def register():
+ bpy.utils.register_module(__name__)
+ bpy.types.INFO_MT_file_export.append(menu_func)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+ bpy.types.INFO_MT_file_export.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_import_gimp_image_to_scene.py b/io_import_gimp_image_to_scene.py
new file mode 100644
index 00000000..e0f6a7aa
--- /dev/null
+++ b/io_import_gimp_image_to_scene.py
@@ -0,0 +1,683 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Import GIMP Image to Scene (.xcf/.xjt)",
+ "author": "Daniel Salazar (ZanQdo)",
+ "version": (2, 0, 0),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ "location": "File > Import > GIMP Image to Scene(.xcf/.xjt)",
+ "description": "Imports GIMP multilayer image files as a series of multiple planes",
+ "warning": "XCF import requires xcftools installed",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/GIMPImageToScene",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=25136",
+ "category": "Import-Export"}
+
+"""
+This script imports GIMP layered image files into 3D Scenes (.xcf, .xjt)
+"""
+
+def main(File, Path, LayerViewers, MixerViewers, LayerOffset,\
+ LayerScale, OpacityMode, PremulAlpha, ShadelessMats,\
+ SetCamera, SetupCompo, GroupUntagged, Ext):
+
+ #-------------------------------------------------
+
+ #Folder = '['+File.rstrip(Ext)+']'+'_images/'
+ Folder = 'images_'+'['+File.rstrip(Ext)+']/'
+
+ if not bpy.data.is_saved:
+ PathSaveRaw = Path+Folder
+ PathSave = PathSaveRaw.replace(' ', '\ ')
+ try: os.mkdir(PathSaveRaw)
+ except: pass
+ else:
+ PathSave = bpy.data.filepath
+ RSlash = PathSave.rfind('/')
+ PathSaveRaw = PathSave[:RSlash+1]+Folder
+ PathSave = PathSaveRaw.replace(' ', '\ ')
+ try: os.mkdir(PathSaveRaw)
+ except: pass
+ PathSaveRaw = bpy.path.relpath(PathSaveRaw)+'/'
+
+ PathRaw = Path
+ Path = Path.replace(' ', '\ ')
+ if Ext == '.xjt':
+ ExtSave = '.jpg'
+ #-------------------------------------------------
+ # EXTRACT XJT
+ import tarfile
+
+ IMG = tarfile.open ('%s%s' % (PathRaw, File))
+ PRP = IMG.extractfile('PRP')
+
+ Members = IMG.getmembers()
+
+ for Member in Members:
+ Name = Member.name
+ if Name.startswith('l') and Name.endswith('.jpg'):
+ IMG.extract(Name, path=PathSaveRaw)
+
+ #-------------------------------------------------
+ # INFO XJT
+ IMGs = []
+ for Line in PRP.readlines():
+ Line = str(Line)
+
+ if Line.startswith("b'GIMP_XJ_IMAGE"):
+ for Segment in Line.split():
+ if Segment.startswith('w/h:'):
+ ResX, ResY = map (int, Segment[4:].split(','))
+ if Line.startswith("b'L") or Line.startswith("b'l"):
+
+ '''The "nice" method to check if layer has alpha channel
+ sadly GIMP sometimes decides not to export an alpha channel
+ if it's pure white so we are not completly sure here yet'''
+ if Line.startswith("b'L"): HasAlpha = True
+ else: HasAlpha = False
+
+ md = None
+ op = 1
+ ox, oy = 0,0
+
+ for Segment in Line.split():
+
+ if Segment.startswith("b'"):
+ imageFile = 'l' + Segment[3:] + '.jpg'
+ imageFileAlpha ='la'+Segment[3:]+'.jpg'
+
+ '''Phisically double checking if alpha image exists
+ now we can be sure! (damn GIMP)'''
+ if HasAlpha:
+ if not os.path.isfile(PathSaveRaw+imageFileAlpha): HasAlpha = False
+
+ # Get Widht and Height from images
+ data = open(PathSaveRaw+imageFile, "rb").read()
+
+ hexList = []
+ for ch in data:
+ byt = "%02X" % ch
+ hexList.append(byt)
+
+ for k in range(len(hexList)-1):
+ if hexList[k] == 'FF' and (hexList[k+1] == 'C0' or hexList[k+1] == 'C2'):
+ ow = int(hexList[k+7],16)*256 + int(hexList[k+8],16)
+ oh = int(hexList[k+5],16)*256 + int(hexList[k+6],16)
+
+ elif Segment.startswith('md:'): # mode
+ md = Segment[3:]
+
+ elif Segment.startswith('op:'): # opacity
+ op = float(Segment[3:])*.01
+
+ elif Segment.startswith('o:'): # origin
+ ox, oy = map(int, Segment[2:].split(','))
+
+ elif Segment.startswith('n:'): # name
+ n = Segment[3:-4]
+ OpenBracket = n.find ('[')
+ CloseBracket = n.find (']')
+
+ if OpenBracket != -1 and CloseBracket != -1:
+ RenderLayer = n[OpenBracket+1:CloseBracket]
+ NameShort = n[:OpenBracket]
+
+ else:
+ RenderLayer = n
+ NameShort = n
+
+ os.rename(PathSaveRaw+imageFile, PathSaveRaw+NameShort+'.jpg')
+ if HasAlpha: os.rename(PathSaveRaw+imageFileAlpha, PathSaveRaw+NameShort+'_A'+'.jpg')
+
+ IMGs.append({'LayerMode':md, 'LayerOpacity':op,\
+ 'LayerName':n, 'LayerNameShort':NameShort,\
+ 'RenderLayer':RenderLayer, 'LayerCoords':[ow, oh, ox, oy], 'HasAlpha':HasAlpha})
+
+ else: # Ext == '.xcf':
+ ExtSave = '.png'
+ #-------------------------------------------------
+ # CONFIG
+ XCFInfo = 'xcfinfo'
+ XCF2PNG = 'xcf2png'
+ #-------------------------------------------------
+ # INFO XCF
+
+ CMD = '%s %s%s' % (XCFInfo, Path, File)
+
+ Info = os.popen(CMD)
+
+ IMGs = []
+ for Line in Info.readlines():
+ if Line.startswith ('+'):
+
+ Line = Line.split(' ', 4)
+
+ RenderLayer = Line[4]
+
+ OpenBracket = RenderLayer.find ('[')
+ CloseBracket = RenderLayer.find (']')
+
+ if OpenBracket != -1 and CloseBracket != -1:
+ RenderLayer = RenderLayer[OpenBracket+1:CloseBracket]
+ NameShort = Line[4][:OpenBracket]
+ else:
+ NameShort = Line[4].rstrip()
+ if GroupUntagged:
+ RenderLayer = '__Undefined__'
+ else:
+ RenderLayer = NameShort
+
+ LineThree = Line[3]
+ Slash = LineThree.find('/')
+ if Slash == -1:
+ Mode = LineThree
+ Opacity = 1
+ else:
+ Mode = LineThree[:Slash]
+ Opacity = float(LineThree[Slash+1:LineThree.find('%')])*.01
+
+ IMGs.append ({\
+ 'LayerMode':Mode,\
+ 'LayerOpacity':Opacity,\
+ 'LayerName':Line[4].rstrip(),\
+ 'LayerNameShort':NameShort,\
+ 'LayerCoords':list(map(int, Line[1].replace('x', ' ').replace('+', ' +').replace('-', ' -').split())),\
+ 'RenderLayer':RenderLayer,\
+ 'HasAlpha':True,\
+ })
+ elif Line.startswith('Version'):
+ ResX, ResY = map (int, Line.split()[2].split('x'))
+
+ #-------------------------------------------------
+ # EXTRACT XCF
+ if OpacityMode == 'BAKE':
+ Opacity = ''
+ else:
+ Opacity = ' --percent 100'
+ for Layer in IMGs:
+ CMD = '%s -C %s%s -o %s%s.png "%s"%s' %\
+ (XCF2PNG, Path, File, PathSave, Layer['LayerName'].replace(' ', '_'), Layer['LayerName'], Opacity)
+ os.system(CMD)
+
+ #-------------------------------------------------
+ Scene = bpy.context.scene
+ #-------------------------------------------------
+ # CAMERA
+
+ if SetCamera:
+ bpy.ops.object.camera_add(location=(0, 0, 10))
+
+ Camera = bpy.context.active_object.data
+
+ Camera.type = 'ORTHO'
+ Camera.ortho_scale = ResX * .01
+
+ #-------------------------------------------------
+ # RENDER SETTINGS
+
+ Render = Scene.render
+
+ if SetCamera:
+ Render.resolution_x = ResX
+ Render.resolution_y = ResY
+ Render.resolution_percentage = 100
+ if PremulAlpha: Render.alpha_mode = 'PREMUL'
+
+ #-------------------------------------------------
+ # 3D VIEW SETTINGS
+
+ Scene.game_settings.material_mode = 'GLSL'
+
+ Areas = bpy.context.screen.areas
+
+ for Area in Areas:
+ if Area.type == 'VIEW_3D':
+ Area.spaces.active.viewport_shade = 'TEXTURED'
+ Area.spaces.active.show_textured_solid = True
+ Area.spaces.active.show_floor = False
+
+ #-------------------------------------------------
+ # 3D LAYERS
+
+ def Make3DLayer (Name, NameShort, Z, Coords, RenderLayer, LayerMode, LayerOpacity, HasAlpha):
+
+ # RenderLayer
+
+ if SetupCompo:
+ if not bpy.context.scene.render.layers.get(RenderLayer):
+
+ bpy.ops.scene.render_layer_add()
+
+ LayerActive = bpy.context.scene.render.layers.active
+ LayerActive.name = RenderLayer
+ LayerActive.use_pass_vector = True
+ LayerActive.use_sky = False
+ LayerActive.use_edge_enhance = False
+ LayerActive.use_strand = False
+ LayerActive.use_halo = False
+
+ global LayerNum
+ for i in range (0,20):
+ if not i == LayerNum:
+ LayerActive.layers[i] = False
+
+ bpy.context.scene.layers[LayerNum] = True
+
+ LayerFlags[RenderLayer] = bpy.context.scene.render.layers.active.layers
+
+ LayerList.append([RenderLayer, LayerMode, LayerOpacity])
+
+ LayerNum += 1
+
+ # Object
+ bpy.ops.mesh.primitive_plane_add(\
+ view_align=False,\
+ enter_editmode=False,\
+ rotation=(0, 0, pi))
+
+ bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
+
+
+ Active = bpy.context.active_object
+
+ if SetupCompo:
+ Active.layers = LayerFlags[RenderLayer]
+
+ Active.location = (\
+ (float(Coords[2])-(ResX*0.5))*LayerScale,\
+ (-float(Coords[3])+(ResY*0.5))*LayerScale, Z)
+
+ for Vert in Active.data.vertices:
+ Vert.co[0] += 1
+ Vert.co[1] += -1
+
+ Active.dimensions = float(Coords[0])*LayerScale, float(Coords[1])*LayerScale, 0
+
+ bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
+
+ bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
+
+ Active.show_wire = True
+
+ Active.name = NameShort
+ bpy.ops.mesh.uv_texture_add()
+
+ # Material
+
+ '''if bpy.data.materials.get(NameShort):
+ Mat = bpy.data.materials[NameShort]
+ if not Active.material_slots:
+ bpy.ops.object.material_slot_add()
+ Active.material_slots[0].material = Mat
+ else:'''
+
+ Mat = bpy.data.materials.new(NameShort)
+ Mat.diffuse_color = (1,1,1)
+ Mat.use_raytrace = False
+ Mat.use_shadows = False
+ Mat.use_cast_buffer_shadows = False
+ Mat.use_cast_approximate = False
+ if HasAlpha:
+ Mat.use_transparency = True
+ if OpacityMode == 'MAT': Mat.alpha = LayerOpacity
+ else: Mat.alpha = 0
+ if ShadelessMats: Mat.use_shadeless = True
+
+ if Ext == '.xcf':
+ # Color & Alpha PNG
+ Tex = bpy.data.textures.new(NameShort, 'IMAGE')
+ Tex.extension = 'CLIP'
+ Tex.use_preview_alpha = True
+
+ Img = bpy.data.images.new(NameShort, 128, 128)
+ Img.source = 'FILE'
+ if PremulAlpha: Img.use_premultiply = True
+ Img.filepath = '%s%s%s' % (PathSaveRaw, Name, ExtSave)
+
+ UVFace = Active.data.uv_textures[0].data[0]
+ UVFace.image = Img
+ UVFace.use_image = True
+
+ Tex.image = Img
+
+ Mat.texture_slots.add()
+ TexSlot = Mat.texture_slots[0]
+ TexSlot.texture = Tex
+ TexSlot.use_map_alpha = True
+ TexSlot.texture_coords = 'UV'
+ if OpacityMode == 'TEX': TexSlot.alpha_factor = LayerOpacity
+ elif OpacityMode == 'MAT': TexSlot.blend_type = 'MULTIPLY'
+
+ else: # Ext == '.xjt'
+ # Color JPG
+ Tex = bpy.data.textures.new(NameShort, 'IMAGE')
+ Tex.extension = 'CLIP'
+
+ Img = bpy.data.images.new(NameShort, 128, 128)
+ Img.source = 'FILE'
+ Img.filepath = '%s%s%s' % (PathSaveRaw, Name, ExtSave)
+
+ UVFace = Active.data.uv_textures[0].data[0]
+ UVFace.image = Img
+ UVFace.use_image = True
+
+ Tex.image = Img
+
+ Mat.texture_slots.add()
+ TexSlot = Mat.texture_slots[0]
+ TexSlot.texture = Tex
+ TexSlot.texture_coords = 'UV'
+
+ if HasAlpha:
+ # Alpha JPG
+ Tex = bpy.data.textures.new(NameShort+'_A', 'IMAGE')
+ Tex.extension = 'CLIP'
+ Tex.use_preview_alpha = True
+ Tex.use_alpha = False
+
+ Img = bpy.data.images.new(NameShort+'_A', 128, 128)
+ Img.source = 'FILE'
+ if PremulAlpha: Img.use_premultiply = True
+ Img.filepath = '%s%s_A%s' % (PathSaveRaw, Name, ExtSave)
+
+ Tex.image = Img
+
+ Mat.texture_slots.add()
+ TexSlot = Mat.texture_slots[1]
+ TexSlot.texture = Tex
+ TexSlot.use_map_alpha = True
+ TexSlot.use_map_color_diffuse = False
+ TexSlot.texture_coords = 'UV'
+ if OpacityMode == 'TEX': TexSlot.alpha_factor = LayerOpacity
+ elif OpacityMode == 'MAT': TexSlot.blend_type = 'MULTIPLY'
+
+ if not Active.material_slots:
+ bpy.ops.object.material_slot_add()
+
+ Active.material_slots[0].material = Mat
+
+
+ Z = 0
+ global LayerNum
+ LayerNum = 0
+ LayerFlags = {}
+ LayerList = []
+
+ for Layer in IMGs:
+ Make3DLayer(\
+ Layer['LayerName'].replace(' ', '_'),\
+ Layer['LayerNameShort'].replace(' ', '_'),\
+ Z,\
+ Layer['LayerCoords'],\
+ Layer['RenderLayer'],\
+ Layer['LayerMode'],\
+ Layer['LayerOpacity'],\
+ Layer['HasAlpha'],\
+ )
+
+ Z -= LayerOffset
+
+ if SetupCompo:
+ #-------------------------------------------------
+ # COMPO NODES
+
+ Scene.use_nodes = True
+
+ Tree = Scene.node_tree
+
+ for i in Tree.nodes:
+ Tree.nodes.remove(i)
+
+ LayerList.reverse()
+
+ Offset = 0
+ LayerLen = len(LayerList)
+
+ for Layer in LayerList:
+
+ Offset += 1
+
+ X_Offset = (500*Offset)
+ Y_Offset = (-300*Offset)
+
+ Node = Tree.nodes.new('R_LAYERS')
+ Node.location = (-500+X_Offset, 300+Y_Offset)
+ Node.name = 'R_'+ str(Offset)
+ Node.scene = Scene
+ Node.layer = Layer[0]
+
+ if LayerViewers:
+ Node_V = Tree.nodes.new('VIEWER')
+ Node_V.name = Layer[0]
+ Node_V.location = (-200+X_Offset, 200+Y_Offset)
+
+ Tree.links.new(Node.outputs[0], Node_V.inputs[0])
+
+ if LayerLen > Offset:
+
+ Mode = LayerList[Offset][1] # has to go one step further
+ LayerOpacity = LayerList[Offset][2]
+
+ if not Mode in ('Normal', '-1'):
+
+ Node = Tree.nodes.new('MIX_RGB')
+ if OpacityMode == 'COMPO': Node.inputs['Fac'].default_value[0] = LayerOpacity
+ else: Node.inputs['Fac'].default_value[0] = 1
+ Node.use_alpha = True
+
+ if Mode in ('Addition', '7'): Node.blend_type = 'ADD'
+ elif Mode in ('Subtract', '8'): Node.blend_type = 'SUBTRACT'
+ elif Mode in ('Multiply', '3'): Node.blend_type = 'MULTIPLY'
+ elif Mode in ('DarkenOnly', '9'): Node.blend_type = 'DARKEN'
+ elif Mode in ('Dodge', '16'): Node.blend_type = 'DODGE'
+ elif Mode in ('LightenOnly', '10'): Node.blend_type = 'LIGHTEN'
+ elif Mode in ('Difference', '6'): Node.blend_type = 'DIFFERENCE'
+ elif Mode in ('Divide', '15'): Node.blend_type = 'DIVIDE'
+ elif Mode in ('Overlay', '5'): Node.blend_type = 'OVERLAY'
+ elif Mode in ('Screen', '4'): Node.blend_type = 'SCREEN'
+ elif Mode in ('Burn', '17'): Node.blend_type = 'BURN'
+ elif Mode in ('Color', '13'): Node.blend_type = 'COLOR'
+ elif Mode in ('Value', '14'): Node.blend_type = 'VALUE'
+ elif Mode in ('Saturation', '12'): Node.blend_type = 'SATURATION'
+ elif Mode in ('Hue', '11'): Node.blend_type = 'HUE'
+ elif Mode in ('Softlight', '19'): Node.blend_type = 'SOFT_LIGHT'
+ else: pass
+
+ else:
+ Node = Tree.nodes.new('ALPHAOVER')
+ if OpacityMode == 'COMPO': Node.inputs['Fac'].default_value[0] = LayerOpacity
+ Node.name = 'M_' + str(Offset)
+ Node.location = (300+X_Offset, 250+Y_Offset)
+
+ if MixerViewers:
+ Node_V = Tree.nodes.new('VIEWER')
+ Node_V.name = Layer[0]
+ Node_V.location = (500+X_Offset, 350+Y_Offset)
+
+ Tree.links.new(Node.outputs[0], Node_V.inputs[0])
+
+ else:
+ Node = Tree.nodes.new('COMPOSITE')
+ Node.name = 'Composite'
+ Node.location = (400+X_Offset, 350+Y_Offset)
+
+ Nodes = bpy.context.scene.node_tree.nodes
+
+ if LayerLen > 1:
+ for i in range (1, LayerLen+1):
+ if i == 1:
+ Tree.links.new(Nodes['R_'+str(i)].outputs[0], Nodes['M_'+str(i)].inputs[1])
+ if 1 < i < LayerLen:
+ Tree.links.new(Nodes['M_'+str(i-1)].outputs[0], Nodes['M_'+str(i)].inputs[1])
+ if 1 < i < LayerLen+1:
+ Tree.links.new(Nodes['R_'+str(i)].outputs[0], Nodes['M_'+str(i-1)].inputs[2])
+ if i == LayerLen:
+ Tree.links.new(Nodes['M_'+str(i-1)].outputs[0], Nodes['Composite'].inputs[0])
+ else:
+ Tree.links.new(Nodes['R_1'].outputs[0], Nodes['Composite'].inputs[0])
+
+ for i in Tree.nodes:
+ i.location[0] += -250*Offset
+ i.location[1] += 150*Offset
+
+#------------------------------------------------------------------------
+import os
+import bpy
+from bpy.props import *
+from math import pi
+
+# Operator
+class GIMPImageToScene(bpy.types.Operator):
+ ''''''
+ bl_idname = "import.gimp_image_to_scene"
+ bl_label = "GIMP Image to Scene"
+ bl_description = "Imports GIMP multilayer image files into 3D Scenes"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ filename = StringProperty(name="File Name",
+ description="Name of the file")
+ directory = StringProperty(name="Directory",
+ description="Directory of the file")
+
+ LayerViewers = BoolProperty(name="Layer Viewers",
+ description="Add Viewer nodes to each Render Layer node",
+ default=True)
+
+ MixerViewers = BoolProperty(name="Mixer Viewers",
+ description="Add Viewer nodes to each Mix node",
+ default=True)
+
+ PremulAlpha = BoolProperty(name="Premuliply Alpha",
+ description="Set Image and Render settings to premultiplied alpha",
+ default=True)
+
+ ShadelessMats = BoolProperty(name="Shadeless Material",
+ description="Set Materials as Shadeless",
+ default=True)
+
+ OpacityMode = EnumProperty(name="Opacity Mode",
+ description="Layer Opacity management",
+ items=(
+ ('TEX', 'Texture Alpha Factor', ''),
+ ('MAT', 'Material Alpha Value', ''),
+ ('COMPO', 'Mixer Node Factor', ''),
+ ('BAKE', 'Baked in Image Alpha', '')),
+ default='TEX')
+
+ SetCamera = BoolProperty(name="Set Camera",
+ description="Create an Ortho Camera matching image resolution",
+ default=True)
+
+ SetupCompo = BoolProperty(name="Setup Node Compositing",
+ description="Create a compositing node setup (will delete existing nodes)",
+ default=False)
+
+ GroupUntagged = BoolProperty(name="Group Untagged",
+ description="Layers with no tag go to a single Render Layer",
+ default=False)
+
+ LayerOffset = FloatProperty(name="Layer Separation",
+ description="Distance between each 3D Layer in the Z axis",
+ min=0,
+ default=0.01)
+
+ LayerScale = FloatProperty(name="Layer Scale",
+ description="Scale pixel resolution by Blender units",
+ min=0,
+ default=0.01)
+
+ def draw(self, context):
+ layout = self.layout
+ box = layout.box()
+ box.label('3D Layers:', icon='SORTSIZE')
+ box.prop(self, 'SetCamera', icon='OUTLINER_DATA_CAMERA')
+ box.prop(self, 'OpacityMode', icon='GHOST')
+ if self.OpacityMode == 'COMPO' and self.SetupCompo == False:
+ box.label('Tip: Enable Node Compositing', icon='INFO')
+ box.prop(self, 'PremulAlpha', icon='IMAGE_RGB_ALPHA')
+ box.prop(self, 'ShadelessMats', icon='SOLID')
+ box.prop(self, 'LayerOffset')
+ box.prop(self, 'LayerScale')
+ box = layout.box()
+ box.label('Compositing:', icon='RENDERLAYERS')
+ box.prop(self, 'SetupCompo', icon='NODETREE')
+ if self.SetupCompo:
+ box.prop(self, 'GroupUntagged', icon='IMAGE_ZDEPTH')
+ box.prop(self, 'LayerViewers', icon='NODE')
+ box.prop(self, 'MixerViewers', icon='NODE')
+
+ def execute(self, context):
+ # File Path
+ filename = self.filename
+ directory = self.directory
+
+ # Settings
+ LayerViewers = self.LayerViewers
+ MixerViewers = self.MixerViewers
+ OpacityMode = self.OpacityMode
+ PremulAlpha = self.PremulAlpha
+ ShadelessMats = self.ShadelessMats
+ SetCamera = self.SetCamera
+ SetupCompo = self.SetupCompo
+ GroupUntagged = self.GroupUntagged
+ LayerOffset = self.LayerOffset
+ LayerScale = self.LayerScale
+
+ Ext = None
+ if filename.endswith('.xcf'): Ext = '.xcf'
+ elif filename.endswith('.xjt'): Ext = '.xjt'
+
+ # Call Main Function
+ if Ext:
+ main(filename, directory, LayerViewers, MixerViewers, LayerOffset,\
+ LayerScale, OpacityMode, PremulAlpha, ShadelessMats,\
+ SetCamera, SetupCompo, GroupUntagged, Ext)
+ else:
+ self.report({'ERROR'},"Selected file wasn't valid, try .xcf or .xjt")
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = bpy.context.window_manager
+ wm.fileselect_add(self)
+
+ return {'RUNNING_MODAL'}
+
+
+# Registering / Unregister
+def menu_func(self, context):
+ self.layout.operator(GIMPImageToScene.bl_idname, text="GIMP Image to Scene (.xcf, .xjt)", icon='PLUGIN')
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_import_images_as_planes.py b/io_import_images_as_planes.py
new file mode 100644
index 00000000..4ede9e93
--- /dev/null
+++ b/io_import_images_as_planes.py
@@ -0,0 +1,352 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Import Images as Planes",
+ "author": "Florian Meyer (tstscr)",
+ "version": (1, 0),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import > Images as Planes",
+ "description": "Imports images and creates planes with the appropriate aspect ratio. "\
+ "The images are mapped to the planes.",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Add_Mesh/Planes_from_Images",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21751",
+ "category": "Import-Export"}
+
+import bpy, os, mathutils
+from bpy.props import *
+from add_utils import *
+from bpy_extras.io_utils import ImportHelper
+from bpy_extras.image_utils import load_image
+
+## GLOBAL VARS ##
+EXT_LIST = {
+ 'jpeg': ['jpeg', 'jpg', 'jpe'],
+ 'png': ['png'],
+ 'tga': ['tga', 'tpic'],
+ 'tiff': ['tiff', 'tif'],
+ 'exr': ['exr'],
+ 'hdr': ['hdr'],
+ 'avi': ['avi'],
+ 'mov': ['mov', 'qt'],
+ 'mp4': ['mp4'],
+ 'ogg': ['ogg', 'ogv'],
+ 'bmp': ['bmp', 'dib'],
+ 'cin': ['cin'],
+ 'dpx': ['dpx'],
+ 'psd': ['psd']}
+EXT_VALS = [val for val in EXT_LIST.values()]
+EXTENSIONS = []
+for i in EXT_VALS:
+ EXTENSIONS.extend(i)
+
+## FUNCTIONS ##
+def set_image_options(self, image):
+ image.use_premultiply = self.use_premultiply
+
+def create_image_textures(self, image):
+ #look for texture with importsettings
+ for texture in bpy.data.textures:
+ if texture.type == 'IMAGE'\
+ and texture.image\
+ and texture.image.filepath == image.filepath:
+ if self.use_transparency:
+ texture.use_alpha = True
+ else:
+ texture.use_alpha = False
+ return texture
+
+ #if no texture is found: create one
+ texture = bpy.data.textures.new(name=os.path.split(image.filepath)[1],
+ type='IMAGE')
+ texture.image = image
+ if self.use_transparency:
+ texture.use_alpha = True
+ else:
+ texture.use_alpha = False
+ return texture
+
+def create_material_for_texture(self, texture):
+ #look for material with the needed texture
+ for material in bpy.data.materials:
+ if material.texture_slots[0]\
+ and material.texture_slots[0].texture == texture:
+ if self.use_transparency:
+ material.alpha = 0
+ material.specular_alpha = 0
+ material.texture_slots[0].use_map_alpha = True
+ else:
+ material.alpha = 1
+ material.specular_alpha = 1
+ material.texture_slots[0].use_map_alpha = False
+ material.use_transparency = self.use_transparency
+ material.transparency_method = self.transparency_method
+ material.use_shadeless = self.use_shadeless
+ return material
+
+ # if no material found: create one
+ material = bpy.data.materials.new(name=os.path.split(texture.image.filepath)[1])
+ slot = material.texture_slots.add()
+ slot.texture = texture
+ slot.texture_coords = 'UV'
+ if self.use_transparency:
+ slot.use_map_alpha = True
+ material.alpha = 0
+ material.specular_alpha = 0
+ else:
+ material.alpha = 1
+ material.specular_alpha = 1
+ slot.use_map_alpha = False
+ material.use_transparency = self.use_transparency
+ material.transparency_method = self.transparency_method
+ material.use_shadeless = self.use_shadeless
+
+ return material
+
+def create_image_plane(self, context, material):
+ img = material.texture_slots[0].texture.image
+ x = img.size[0] / img.size[1]
+ y = 1
+
+ if self.use_dimension:
+ x = (img.size[0] * (1.0 / self.factor)) * 0.5
+ y = (img.size[1] * (1.0 / self.factor)) * 0.5
+
+ verts = [(-x, -y, 0),
+ (x, -y, 0),
+ (x, y, 0),
+ (-x, y, 0)]
+ faces = [[0, 1, 2, 3]]
+
+ mesh_data = bpy.data.meshes.new(img.name)
+ mesh_data.from_pydata(verts, [], faces)
+ mesh_data.update()
+ add_object_data(context, mesh_data, operator=self)
+ plane = context.scene.objects.active
+ plane.data.uv_textures.new()
+ plane.data.materials.append(material)
+ plane.data.uv_textures[0].data[0].image = img
+ plane.data.uv_textures[0].data[0].use_image = True
+ plane.data.uv_textures[0].data[0].blend_type = 'ALPHA'
+ plane.data.uv_textures[0].data[0].use_twoside = True
+ return plane
+
+def generate_paths(self):
+ directory, file = os.path.split(self.filepath)
+
+ if file and not self.all_in_directory:
+ #test for extension
+ if not os.path.splitext(file)[1].lstrip('.').lower() in EXTENSIONS:
+ return [], directory
+
+ return [self.filepath], directory
+
+ if not file or self.all_in_directory:
+ imagepaths = []
+ files_in_directory = os.listdir(directory)
+ #clean files from nonimages
+ files_in_directory = [file for file in files_in_directory
+ if os.path.splitext(file)[1].lstrip('.').lower()
+ in EXTENSIONS]
+ #clean from unwanted extensions
+ if self.extension != '*':
+ files_in_directory = [file for file in files_in_directory
+ if os.path.splitext(file)[1].lstrip('.').lower()
+ in EXT_LIST[self.extension]]
+ #create paths
+ for file in files_in_directory:
+ imagepaths.append(os.path.join(directory, file))
+
+ #print(imagepaths)
+ return imagepaths, directory
+
+def align_planes(self, planes):
+ gap = self.align_offset
+ offset = 0
+ for i, plane in enumerate(planes):
+ offset += (plane.dimensions.x / 2) + gap
+ if i == 0: continue
+ move_local = mathutils.Vector((offset, 0, 0))
+ move_world = plane.location + move_local * plane.matrix_world.inverted()
+ plane.location += move_world
+ offset += (plane.dimensions.x / 2)
+
+##### MAIN #####
+def import_images(self, context):
+ import_list, directory = generate_paths(self)
+ images = []
+ textures = []
+ materials = []
+ planes = []
+
+ for path in import_list:
+ images.append(load_image(path, directory))
+
+ for image in images:
+ set_image_options(self, image)
+ textures.append(create_image_textures(self, image))
+
+ for texture in textures:
+ materials.append(create_material_for_texture(self, texture))
+
+ for material in materials:
+ plane = create_image_plane(self, context, material)
+ planes.append(plane)
+
+ context.scene.update()
+ if self.align:
+ align_planes(self, planes)
+
+ for plane in planes:
+ plane.select = True
+
+ self.report(type='INFO',
+ message='Added %i Image Plane(s)' %len(planes))
+
+##### OPERATOR #####
+class IMPORT_OT_image_to_plane(bpy.types.Operator, ImportHelper, AddObjectHelper):
+ ''''''
+ bl_idname = "import.image_to_plane"
+ bl_label = "Import Images as Planes"
+ bl_description = "Create mesh plane(s) from image files" \
+ " with the appropiate aspect ratio."
+ bl_options = {'REGISTER', 'UNDO'}
+
+ ## OPTIONS ##
+ all_in_directory = BoolProperty(name="All in directory",
+ description="Import all image files (of the selected type)" \
+ " in this directory.",
+ default=False)
+ align = BoolProperty(name='Align Planes',
+ description='Create Planes in a row',
+ default=True)
+ align_offset = FloatProperty(name='Offset',
+ description='Space between Planes',
+ min=0, soft_min=0,
+ default=0.1)
+ extEnum = [
+ ('*', 'All image formats',
+ 'Import all know image (or movie) formats.'),
+ ('jpeg', 'JPEG (.jpg, .jpeg, .jpe)',
+ 'Joint Photographic Experts Group'),
+ ('png', 'PNG (.png)', 'Portable Network Graphics'),
+ ('tga', 'Truevision TGA (.tga, tpic)', ''),
+ ('tiff', 'TIFF (.tif, .tiff)', 'Tagged Image File Format'),
+ ('exr', 'OpenEXR (.exr)', 'OpenEXR HDR imaging image file format'),
+ ('hdr', 'Radiance HDR (.hdr, .pic)', ''),
+ ('avi', 'AVI (.avi)', 'Audio Video Interleave'),
+ ('mov', 'QuickTime (.mov, .qt)', ''),
+ ('mp4', 'MPEG-4 (.mp4)', ' MPEG-4 Part 14'),
+ ('ogg', 'OGG Theora (.ogg, .ogv)', ''),
+ ('bmp', 'BMP (.bmp, .dib)', 'Windows Bitmap'),
+ ('cin', 'CIN (.cin)', ''),
+ ('dpx', 'DPX (.dpx)', 'DPX (Digital Picture Exchange)'),
+ ('psd', 'PSD (.psd)', 'Photoshop Document')]
+ extension = EnumProperty(name="Extension",
+ description="Only import files of this type.",
+ items=extEnum)
+ use_dimension = BoolProperty(name="Use image dimensions",
+ description="Use the images pixels to derive the size of the plane.",
+ default=False)
+ factor = IntProperty(name="Pixels/BU",
+ description="Number of pixels per Blenderunit.",
+ min=1,
+ default=500)
+
+ ## MATERIAL OPTIONS ##
+ use_shadeless = BoolProperty(name="Shadeless",
+ description="Set material to shadeless",
+ default=False)
+ use_transparency = BoolProperty(name="Use alpha",
+ description="Use alphachannel for transparency.",
+ default=False)
+ tEnum = [
+ ('Z_TRANSPARENCY',
+ 'Z Transparency',
+ 'Use alpha buffer for transparent faces'),
+ ('RAYTRACE',
+ 'Raytrace',
+ 'Use raytracing for transparent refraction rendering.')]
+ transparency_method = EnumProperty(name="Transp. Method",
+ description="Transparency Method",
+ items=tEnum)
+
+ ## IMAGE OPTIONS ##
+ use_premultiply = BoolProperty(name="Premultiply",
+ description="Premultiply image",
+ default=False)
+
+ ## DRAW ##
+ def draw(self, context):
+ layout = self.layout
+ box = layout.box()
+ box.label('Import Options:', icon='FILTER')
+ box.prop(self, 'all_in_directory')
+ box.prop(self, 'extension', icon='FILE_IMAGE')
+ box.prop(self, 'align')
+ box.prop(self, 'align_offset')
+ box = layout.box()
+ box.label('Material mappings:', icon='MATERIAL')
+ box.prop(self, 'use_shadeless')
+ box.prop(self, 'use_transparency')
+ box.prop(self, 'use_premultiply')
+ box.prop(self, 'transparency_method', expand=True)
+ box = layout.box()
+ box.label('Plane dimensions:', icon='ARROW_LEFTRIGHT')
+ box.prop(self, 'use_dimension')
+ box.prop(self, 'factor', expand=True)
+
+
+ ## EXECUTE ##
+ def execute(self, context):
+ #the add utils don't work in this case
+ #because many objects are added
+ #disable relevant things beforehand
+ editmode = context.user_preferences.edit.use_enter_edit_mode
+ context.user_preferences.edit.use_enter_edit_mode = False
+ if context.active_object\
+ and context.active_object.mode == 'EDIT':
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ import_images(self, context)
+
+ context.user_preferences.edit.use_enter_edit_mode = editmode
+ return {'FINISHED'}
+
+
+
+
+##### REGISTER #####
+
+def import_images_button(self, context):
+ self.layout.operator(IMPORT_OT_image_to_plane.bl_idname, text="Images as Planes", icon='PLUGIN')
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(import_images_button)
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(import_images_button)
+if __name__ == '__main__':
+ register()
diff --git a/io_import_scene_dxf.py b/io_import_scene_dxf.py
new file mode 100644
index 00000000..96344ea2
--- /dev/null
+++ b/io_import_scene_dxf.py
@@ -0,0 +1,2526 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': 'Import Autocad DXF Format (.dxf)',
+ 'author': 'Thomas Larsson, Remigiusz Fiedler',
+ 'version': (0, 1, 5),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ 'location': 'File > Import > Autocad (.dxf)',
+ 'description': 'Import files in the Autocad DXF format (.dxf)',
+ 'warning': 'only a part of DXF specification is supported: Work in Progress',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/'\
+ 'Scripts/Import-Export/DXF_Importer',
+ 'tracker_url': 'https://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=23480',
+ 'support': 'OFFICIAL',
+ 'category': 'Import-Export',
+ }
+
+"""
+Release note by migius (DXF support maintainer) 2011.01.02:
+Script supports only a small part of DXF specification:
+- imports LINE, ARC, CIRCLE, ELLIPSE, SOLID, TRACE, POLYLINE, LWPOLYLINE
+- imports TEXT, MTEXT
+- supports 3d-rotation of entities (210 group)
+- supports THICKNESS for SOLID, TRACE, LINE, ARC, CIRCLE, ELLIPSE
+- ignores WIDTH, THICKNESS, BULGE in POLYLINE/LWPOLYLINE
+- ignores face-data in POLYFACE / POLYMESH
+- ignores TEXT 2d-rotation
+- ignores hierarchies (BLOCK, INSERT, GROUP)
+- ignores LAYER
+- ignores COLOR, LINEWIDTH, LINESTYLE
+
+This script is a temporary solution.
+Probably no more improvements will be done to this script.
+The full-feature importer script from 2.49 will be back in 2.6 release.
+
+Installation:
+Place this file to Blender addons directory
+ (on Windows it is %Blender_directory%\2.53\scripts\addons\)
+You must activate the script in the "Add-Ons" tab (user preferences).
+Access it from File > Import menu.
+
+History:
+ver 0.1.5 - 2011.02.05 by migius for r.34661
+- changed support level to OFFICIAL
+- fixed missing last point at building Mesh-ARCs (by pildanovak)
+- fixed for changes in API and mathutils by campbell
+ver 0.1.4 - 2011.01.13 by migius
+- modified for latest API in rev.34300 (by Filiciss Muhgue)
+ver 0.1.3 - 2011.01.02 by migius
+- added draw curves as sequence for "Draw_as_Curve"
+- added toggle "Draw as one" as user preset in UI
+- added draw POINT as mesh-vertex
+- added draw_THICKNESS for LINE, ARC, CIRCLE, ELLIPSE, LWPOLYLINE and POLYLINE
+- added draw_THICKNESS for SOLID, TRACE
+ver 0.1.2 - 2010.12.27 by migius
+- added draw() for TRACE
+- fixed wrong vertex order in SOLID
+- added CIRCLE resolution as user preset in UI
+- added closing segment for circular LWPOLYLINE and POLYLINE
+- fixed registering for 2.55beta
+ver 0.1.1 - 2010.09.07 by migius
+- fixed dxf-file names recognition limited to ".dxf"
+- fixed registering for 2.53beta
+ver 0.1 - 2010.06.10 by Thomas Larsson
+"""
+
+__version__ = '.'.join([str(s) for s in bl_info['version']])
+
+import os
+import codecs
+import math
+from math import sin, cos, radians
+import bpy
+import mathutils
+from mathutils import Vector, Matrix
+
+#
+# Global flags
+#
+
+T_Merge = 0x01
+T_NewScene = 0x02
+T_Curves = 0x04
+T_DrawOne = 0x08
+T_Debug = 0x10
+T_Verbose = 0x20
+T_ThicON = 0x40
+
+toggle = T_Merge | T_NewScene | T_DrawOne | T_ThicON
+theCircleRes = 32
+theMergeLimit = 1e-5
+
+#
+# class CSection:
+#
+
+class CSection:
+ type = None
+
+ def __init__(self):
+ self.data = []
+
+ def display(self):
+ print("Section", self.type)
+ for datum in self.data:
+ datum.display()
+
+#
+# class CTable:
+#
+
+class CTable:
+ def __init__(self):
+ self.type = None
+ self.name = None
+ self.handle = None
+ self.owner = None
+ self.subclass = None
+ self.nEntries = 0
+ def display(self):
+ print("Table %s %s %s %s %s %d" % (self.type, self.name, self.handle, self.owner, self.subclass, self.nEntries))
+
+#
+# class CEntity:
+#
+class CEntity:
+ def __init__(self, typ, drawtype):
+ self.type = typ
+ self.drawtype = drawtype
+ self.handle = None
+ self.owner = None
+ self.subclass = None
+ self.layer = 0
+ self.color = 0
+ self.invisible = 0
+ self.linetype_name = ''
+ self.linetype_scale = 1.0
+ self.paperspace = 0
+ #self.normal = Vector((0,0,1))
+
+ def display(self):
+ print("Entity %s %s %s %s %s %s %x" %
+ (self.type, self.handle, self.owner, self.subclass, self.layer, self.color, self.invisible))
+
+ def build(self, vn=0):
+ global toggle
+ if toggle & T_Debug:
+ raise NameError("Warning: can not build - unsupported entity type: %s" % self.type)
+ return(([], [], [], vn))
+
+ def draw(self):
+ global toggle
+ if toggle & T_Debug:
+ raise NameError("Warning: can not draw - unsupported entity type: %s" % self.type)
+ return
+
+
+DxfCommonAttributes = {
+ 5 : 'handle',
+ 6 : 'linetype_name',
+ 8 : 'layer',
+ 48 : 'linetype_scale',
+ 60 : 'invisible',
+ 62 : 'color',
+ 67 : 'paperspace',
+ 100 : 'subclass',
+ 330 : 'owner',
+ 360 : 'owner',
+}
+
+#
+# class C3dFace(CEntity):
+# 10 : 'point0.x', 20 : 'point0.y', 30 : 'point0.z',
+# 11 : 'point1.x', 21 : 'point1.y', 31 : 'point1.z',
+# 12 : 'point2.x', 22 : 'point2.y', 32 : 'point2.z',
+# 13 : 'point3.x', 23 : 'point3.y', 33 : 'point3.z',
+# 70 : 'flags',
+#
+
+class C3dFace(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, '3DFACE', 'Mesh')
+ self.point0 = Vector()
+ self.point1 = Vector()
+ self.point2 = Vector()
+ self.point3 = Vector()
+
+ def display(self):
+ CEntity.display(self)
+ print(self.point0)
+ print(self.point1)
+ print(self.point2)
+ print(self.point3)
+
+ def build(self, vn=0):
+ verts = [self.point0, self.point1, self.point2]
+ if self.point3 == Vector((0,0,0)) or self.point2 == self.point3:
+ faces = [(vn+0, vn+1, vn+2)]
+ vn += 3
+ else:
+ verts.append( self.point3 )
+ faces = [(vn+0, vn+1, vn+2, vn+3)]
+ vn += 4
+ return((verts, [], faces, vn))
+
+#
+# class C3dSolid(CEntity):
+# 1 : 'data', 3 : 'more', 70 : 'version',
+#
+
+class C3dSolid(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, '3DSOLID', 'Mesh')
+ self.data = None
+ self.more = None
+ self.version = 0
+
+#
+# class CAcadProxyEntity(CEntity):
+# 70 : 'format',
+# 90 : 'id', 91 : 'class', 92 : 'graphics_size', 93 : 'entity_size', 95: 'format',
+# 310 : 'data', 330 : 'id1', 340 : 'id2', 350 : 'id3', 360 : 'id4',
+#
+
+class CAcadProxyEntity(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'ACAD_PROXY_ENTITY', None)
+
+
+#
+# class CArc(CEntity):
+# 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+# 40 : 'radius',
+# 50 : 'start_angle', 51 : 'end_angle'
+#
+
+class CArc(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'ARC', 'Mesh')
+ self.center = Vector()
+ self.radius = 0.0
+ self.start_angle = 0.0
+ self.end_angle = 0.0
+ self.thickness = 0.0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print(self.center)
+ print("%.4f %.4f %.4f " % (self.radius, self.start_angle, self.end_angle))
+
+ def build(self, vn=0):
+ start, end = self.start_angle, self.end_angle
+ if end > 360: end = end % 360.0
+ if end < start: end +=360.0
+ angle = end - start
+
+ deg2rad = math.pi/180.0
+ start *= deg2rad
+ end *= deg2rad
+ dphi = end - start
+ phi0 = start
+ w = dphi/theCircleRes
+ r = self.radius
+ center = self.center
+ v0 = vn
+ points = []
+ edges, faces = [], []
+ for n in range(theCircleRes + 1):
+ s = math.sin(n*w + phi0)
+ c = math.cos(n*w + phi0)
+ v = center + Vector((r*c, r*s, 0.0))
+ points.append(v)
+ pn = len(points)
+ thic = self.thickness
+ t_vector = Vector((0, 0, thic))
+ if thic != 0 and (toggle & T_ThicON):
+ thic_points = [v + t_vector for v in points]
+ if thic < 0.0:
+ thic_points.extend(points)
+ points = thic_points
+ else:
+ points.extend(thic_points)
+ faces = [(v0+nr+0,v0+nr+1,v0+pn+nr+1,v0+pn+nr+0) for nr in range(pn)]
+ faces.pop()
+ self.drawtype = 'Mesh'
+ vn += 2*pn
+ else:
+ edges = [(v0+nr+0,v0+nr+1) for nr in range(pn)]
+ edges.pop()
+ vn += pn
+
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ #ma.invert()
+ points = [v * ma for v in points]
+ #print ('arc vn=', vn)
+ #print ('faces=', len(faces))
+ return ((points, edges, faces, vn))
+
+#
+# class CArcAlignedText(CEntity):
+# 1 : 'text', 2 : 'font', 3 : 'bigfont', 7 : 'style',
+# 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+# 40 : 'radius', 41 : 'width', 42 : 'height', 43 : 'spacing',
+# 44 : 'offset', 45 : 'right_offset', 46 : 'left_offset',
+# 50 : 'start_angle', 51 : 'end_angle',
+# 70 : 'order', 71 : 'direction', 72 : 'alignment', 73 : 'side',
+# 74 : 'bold', 75 : 'italic', 76 : 'underline',
+# 77 : 'character_set', 78 : 'pitch', 79 'fonttype',
+# 90 : 'color',
+# 280 : 'wizard', 330 : 'id'
+#
+
+class CArcAlignedText(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'ARCALIGNEDTEXT', 'Mesh')
+ self.text = ""
+ self.style = ""
+ self.center = Vector()
+ self.radius = 0.0
+ self.width = 1.0
+ self.height = 1.0
+ self.spacing = 1.0
+ self.offset = 0.0
+ self.right_offset = 0.0
+ self.left_offset = 0.0
+ self.start_angle = 0.0
+ self.end_angle = 0.0
+ self.order = 0
+ self.directions = 0
+ self.alignment = 0
+ self.side = 0
+ self.bold = 0
+ self.italic = 0
+ self.underline = 0
+ self.character_set = 0
+ self.pitch = 0
+ self.fonttype = 0
+ self.color = 0
+ self.wizard = None
+ self.id = None
+ self.normal = Vector((0,0,1))
+
+
+#
+# class CAttdef(CEntity):
+# 1 : 'text', 2 : 'tag', 3 : 'prompt', 7 : 'style',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+# 40 : 'height', 41 : 'x_scale',
+# 50 : 'rotation_angle', 51 : 'oblique_angle',
+# 70 : 'flags', 71 : 'text_generation_flags',
+# 72 : 'horizontal_justification', 74 : 'vertical_justification',
+#
+
+class CAttdef(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'ATTDEF', None)
+ self.value = ""
+ self.tag = ""
+ self.prompt = ""
+ self.style = ""
+ self.insertion_point = Vector()
+ self.alignment_point = Vector()
+ self.height = 1.0
+ self.x_scale = 1.0
+ self.rotation_angle = 0.0
+ self.oblique_angle = 0.0
+ self.flags = 0
+ self.text_generation_flags = 0
+ self.horizontal_justification = 0.0
+ self.vertical_justification = 0.0
+ self.normal = Vector((0,0,1))
+
+ def draw(self):
+ drawText(self.text, self.insertion_point, self.height, self.x_scale, self.rotation_angle, self.oblique_angle, self.normal)
+ return
+
+#
+# class CAttrib(CEntity):
+# 1 : 'text', 2 : 'tag', 3 : 'prompt', 7 : 'style',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+# 40 : 'height', 41 : 'x_scale',
+# 50 : 'rotation_angle', 51 : 'oblique_angle',
+# 70 : 'flags', 73 : 'length',
+# 71 : 'text_generation_flags', 72 : 'horizontal_justification', 74 : 'vertical_justification',
+#
+
+class CAttrib(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'ATTRIB', None)
+ self.text = ""
+ self.tag = ""
+ self.prompt = ""
+
+ self.style = ""
+ self.insertion_point = Vector()
+ self.alignment_point = Vector()
+ self.height = 1.0
+ self.x_scale = 1.0
+ self.rotation_angle = 0.0
+ self.oblique_angle = 0.0
+ self.flags = 0
+ self.length = 1.0
+ self.text_generation_flags = 0
+ self.horizontal_justification = 0.0
+ self.vertical_justification = 0.0
+ self.normal = Vector((0,0,1))
+
+ def draw(self):
+ drawText(self.text, self.insertion_point, self.height, self.x_scale, self.rotation_angle, self.oblique_angle, self.normal)
+ return
+
+
+#
+# class CBlock(CEntity):
+# 1 : 'xref', 2 : 'name', 3 : 'also_name',
+# 10 : 'base_point.x', 20 : 'base_point.y', 30 : 'base_point.z',
+# 40 : 'size', 41 : 'x_scale',
+# 50 : 'rotation_angle', 51 : 'oblique_angle',
+# 70 : 'flags',
+#
+
+class CBlock(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'BLOCK', None)
+ self.xref = ""
+ self.name = ""
+ self.also_name = ""
+ self.base_point = Vector()
+ self.size = 1.0
+ self.x_scale = 1.0
+ self.rotation_angle = 0.0
+ self.oblique_angle = 0.0
+ self.flags = 0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print("%s %s %s " % (self.xref, self.name, self.also_name))
+ print(self.base_point)
+
+ def draw(self):
+ # Todo
+ return
+
+#
+# class CCircle(CEntity):
+# 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+# 40 : 'radius'
+#
+
+class CCircle(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'CIRCLE', 'Mesh')
+ self.center = Vector()
+ self.radius = 0.0
+ self.thickness = 0.0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print(self.center)
+ print("%.4f" % self.radius)
+
+ def build(self, vn=0):
+ w = 2*math.pi/theCircleRes
+ r = self.radius
+ center = self.center
+ points = []
+ edges, faces = [], []
+ v0 = vn
+ for n in range(theCircleRes):
+ s = math.sin(n*w)
+ c = math.cos(n*w)
+ v = center + Vector((r*c, r*s, 0))
+ points.append(v)
+
+ pn = len(points)
+ thic = self.thickness
+ t_vector = Vector((0, 0, thic))
+ if thic != 0 and (toggle & T_ThicON):
+ thic_points = [v + t_vector for v in points]
+ if thic < 0.0:
+ thic_points.extend(points)
+ points = thic_points
+ else:
+ points.extend(thic_points)
+ faces = [(v0+nr,v0+nr+1,pn+v0+nr+1,pn+v0+nr) for nr in range(pn)]
+ nr = pn -1
+ faces[-1] = (v0+nr,v0,pn+v0,pn+v0+nr)
+ self.drawtype = 'Mesh'
+ vn += 2*pn
+ else:
+ edges = [(v0+nr,v0+nr+1) for nr in range(pn)]
+ nr = pn -1
+ edges[-1] = (v0+nr,v0)
+ vn += pn
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ #ma.invert()
+ points = [v * ma for v in points]
+ #print ('cir vn=', vn)
+ #print ('faces=',len(faces))
+ return( (points, edges, faces, vn) )
+
+#
+# class CDimension(CEntity):
+# 1 : 'text', 2 : 'name', 3 : 'style',
+# 10 : 'def_point.x', 20 : 'def_point.y', 30 : 'def_point.z',
+# 11 : 'mid_point.x', 21 : 'mid_point.y', 31 : 'mid_point.z',
+# 12 : 'vector.x', 22 : 'vector.y', 32 : 'vector.z',
+# 13 : 'def_point2.x', 23 : 'def_point2.y', 33 : 'def_point2.z',
+# 14 : 'vector2.x', 24 : 'vector2.y', 34 : 'vector2.z',
+# 15 : 'vector3.x', 25 : 'vector3.y', 35 : 'vector3.z',
+# 16 : 'vector4.x', 26 : 'vector4.y', 36 : 'vector4.z',
+# 70 : 'dimtype',
+#
+
+class CDimension(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'DIMENSION', None)
+ self.text = ""
+ self.name = ""
+ self.style = ""
+ self.def_point = Vector()
+ self.mid_point = Vector()
+ self.vector = Vector()
+ self.def_point2 = Vector()
+ self.vector2 = Vector()
+ self.vector3 = Vector()
+ self.vector4 = Vector()
+ self.dimtype = 0
+ self.normal = Vector((0,0,1))
+
+ def draw(self):
+ return
+
+#
+# class CEllipse(CEntity):
+# 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+# 11 : 'end_point.x', 21 : 'end_point.y', 31 : 'end_point.z',
+# 40 : 'ratio', 41 : 'start', 42 : 'end',
+#
+
+class CEllipse(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'ELLIPSE', 'Mesh')
+ self.center = Vector()
+ self.end_point = Vector()
+ self.ratio = 1.0
+ self.start = 0.0
+ self.end = 2*math.pi
+ self.thickness = 0.0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print(self.center)
+ print("%.4f" % self.ratio)
+
+ def build(self, vn=0):
+ dphi = (self.end - self.start)
+ phi0 = self.start
+ w = dphi/theCircleRes
+ r = self.end_point.length
+ f = self.ratio
+ a = self.end_point.x/r
+ b = self.end_point.y/r
+ center = self.center
+ v0 = vn
+ points = []
+ edges, faces = [], []
+ for n in range(theCircleRes):
+ x = r*math.sin(n*w + phi0)
+ y = f*r*math.cos(n*w + phi0)
+ v = (center.x - a*x + b*y, center.y - a*y - b*x, center.z)
+ points.append(v)
+
+ pn = len(points)
+ thic = self.thickness
+ t_vector = Vector((0, 0, thic))
+ if thic != 0 and (toggle & T_ThicON):
+ thic_points = [v + t_vector for v in points]
+ if thic < 0.0:
+ thic_points.extend(points)
+ points = thic_points
+ else:
+ points.extend(thic_points)
+ faces = [(v0+nr,v0+nr+1,pn+v0+nr+1,pn+v0+nr) for nr in range(pn)]
+ nr = pn -1
+ faces[-1] = (v0+nr,v0,pn+v0,pn+v0+nr)
+ #self.drawtype = 'Mesh'
+ vn += 2*pn
+ else:
+ edges = [(v0+nr,v0+nr+1) for nr in range(pn)]
+ nr = pn -1
+ edges[-1] = (v0+nr,v0)
+ vn += pn
+
+
+ if thic != 0 and (toggle & T_ThicON):
+ pass
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ #ma.invert()
+ points = [v * ma for v in points]
+ return ((points, edges, faces, vn))
+
+#
+# class CHatch(CEntity):
+# 2 : 'pattern',
+# 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+# 41 : 'scale', 47 : 'pixelsize', 52 : 'angle',
+# 70 : 'fill', 71 : 'associativity', 75: 'style', 77 : 'double',
+# 78 : 'numlines', 91 : 'numpaths', 98 : 'numseeds',
+#
+
+class CHatch(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'HATCH', None)
+ self.pattern = 0
+ self.point = Vector()
+ self.scale = 1.0
+ self.pixelsize = 1.0
+ self.angle = 0.0
+ self.fill = 0
+ self.associativity = 0
+ self.style = 0
+ self.double = 0
+ self.numlines = 0
+ self.numpaths = 0
+ self.numseeds = 0
+ self.normal = Vector((0,0,1))
+
+
+# class CImage(CEntity):
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 11 : 'u_vector.x', 21 : 'u_vector.y', 31 : 'u_vector.z',
+# 12 : 'v_vector.x', 22 : 'v_vector.y', 32 : 'v_vector.z',
+# 13 : 'size.x', 23 : 'size.y', 33 : 'size.z',
+# 14 : 'clip.x', 24 : 'clip.y', 34 : 'clip.z',
+# 70 : 'display', 71 : 'cliptype',
+# 90 : 'version',
+# 280 : 'clipstate', 281 : 'brightness', 282 : 'contrast', 283 : 'fade',
+# 340 : 'image', 360 : 'reactor'
+#
+
+class CImage(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'IMAGE', None)
+ self.insertion_point = Vector()
+ self.u_vector = Vector()
+ self.v_vector = Vector()
+ self.size = Vector()
+ self.clip = Vector()
+ self.display = 0
+ self.cliptype = 0
+ self.version = 1
+ self.clipstate = 0
+ self.brightness = 0
+ self.constrast = 0
+ self.fade = 0
+ self.image = None
+ self.reactor = None
+ self.normal = Vector((0,0,1))
+
+#
+# class CInsert(CEntity):
+# 1 : 'attributes_follow', 2 : 'name',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 41 : 'x_scale', 42 : 'y_scale', 43 : 'z_scale',
+# 44 : 'column_spacing', 45 : 'row_spacing',
+# 50 : 'rotation_angle', 66 : 'attributes_follow',
+# 70 : 'column_count', 71 : 'row_count',
+#
+
+class CInsert(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'INSERT', None)
+ self.attributes_follow = 1
+ self.name = ""
+ self.insertion_point = Vector()
+ self.x_scale = 1.0
+ self.y_scale = 1.0
+ self.z_scale = 1.0
+ self.column_spacing = 1.0
+ self.row_spacing = 1.0
+ self.rotation_angle = 0.0
+ self.column_count = 1
+ self.row_count = 1
+ self.attributes_follow = 0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print(self.insertion_point)
+
+ def draw(self):
+ # Todo
+ return
+
+#
+# class CLeader(CEntity):
+# 3 : 'style',
+# 10 : ['new_vertex(data)'], 20 : 'vertex.y', 30 : 'vertex.z',
+# 40 : 'height', 41 : 'width',
+# 71 : 'arrowhead', 72 : 'pathtype', 73 : 'creation',
+# 74 : 'hookdir', 75 : 'hookline', 76 : 'numverts', 77 : 'color',
+# 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+# 211 : 'horizon.x', 221 : 'horizon.y', 231 : 'horizon.z',
+# 212 : 'offset_ins.x', 222 : 'offset_ins.y', 232 : 'offset_ins.z',
+# 213 : 'offset_ann.x', 223 : 'offset_ann.y', 233 : 'offset_ann.z',
+#
+
+class CLeader(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'LEADER', 'Mesh')
+ self.style = ""
+ self.vertex = None
+ self.verts = []
+ self.height = 1.0
+ self.width = 1.0
+ self.arrowhead = 0
+ self.pathtype = 0
+ self.creation = 0
+ self.hookdir = 0
+ self.hookline = 0
+ self.numverts = 0
+ self.color = 0
+ self.normal = Vector((0,0,1))
+ self.horizon = Vector()
+ self.offset_ins = Vector()
+ self.offset_ann = Vector()
+
+ def new_vertex(self, data):
+ self.vertex = Vector()
+ self.vertex.x = data
+ self.verts.append(self.vertex)
+
+ def build(self, vn=0):
+ edges = []
+ for v in self.verts:
+ edges.append((vn, vn+1))
+ vn += 1
+ edges.pop()
+ return (self.verts, edges, [], vn)
+
+# class CLwPolyLine(CEntity):
+# 10 : ['new_vertex(data)'], 20 : 'vertex.y', 30 : 'vertex.z',
+# 38 : 'elevation', 39 : 'thickness',
+# 40 : 'start_width', 41 : 'end_width', 42 : 'bulge', 43 : 'constant_width',
+# 70 : 'flags', 90 : 'numverts'
+#
+
+class CLWPolyLine(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'LWPOLYLINE', None)
+ self.vertex = None
+ self.verts = []
+ self.elevation = 0
+ self.thickness = 0.0
+ self.start_width = 0.0
+ self.end_width = 0.0
+ self.bulge = 0.0
+ self.constant_width = 0.0
+ self.flags = 0
+ self.numverts = 0
+ self.normal = Vector((0,0,1))
+
+ def new_vertex(self, data):
+ self.vertex = Vector()
+ self.vertex.x = data
+ self.verts.append(self.vertex)
+
+ def build(self, vn=0):
+ edges = []
+ v_start = vn
+ for v in self.verts:
+ edges.append((vn, vn+1))
+ vn += 1
+ if self.flags & PL_CLOSED:
+ edges[-1] = (vn-1, v_start)
+ else:
+ edges.pop()
+ verts = self.verts
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ #ma.invert()
+ verts = [v * ma for v in verts]
+ return (verts, edges, [], vn-1)
+
+#
+# class CLine(CEntity):
+# 10 : 'start_point.x', 20 : 'start_point.y', 30 : 'start_point.z',
+# 11 : 'end_point.x', 21 : 'end_point.y', 31 : 'end_point.z',
+# 39 : 'thickness',
+#
+
+class CLine(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'LINE', 'Mesh')
+ self.start_point = Vector()
+ self.end_point = Vector()
+ self.thickness = 0.0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print(self.start_point)
+ print(self.end_point)
+
+ def build(self, vn=0):
+ points = [self.start_point, self.end_point]
+ faces, edges = [], []
+ n = vn
+ thic = self.thickness
+ if thic != 0 and (toggle & T_ThicON):
+ t_vector = thic * self.normal
+ #print 'deb:thic_vector: ', t_vector #---------------------
+ points.extend([v + t_vector for v in points])
+ faces = [[0+n, 1+n, 3+n, 2+n]]
+ self.drawtype = 'Mesh'
+ else:
+ edges = [[0+n, 1+n]]
+ vn +=2
+ return((points, edges, faces, vn))
+
+# class CMLine(CEntity):
+# 10 : 'start_point.x', 20 : 'start_point.y', 30 : 'start_point.z',
+# 11 : ['new_vertex(data)'], 21 : 'vertex.y', 31 : 'vertex.z',
+# 12 : ['new_seg_dir(data)'], 22 : 'seg_dir.y', 32 : 'seg_dir.z',
+# 13 : ['new_miter_dir(data)'], 23 : 'miter_dir.y', 33 : 'miter_dir.z',
+# 40 : 'scale', 41 : 'elem_param', 42 : 'fill_param',
+# 70 : 'justification', 71 : 'flags'
+# 72 : 'numverts', 73 : 'numelems', 74 : 'numparam', 75 : 'numfills',
+# 340 : 'id'
+#
+
+class CMLine(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'MLINE', None)
+ self.start_point = Vector()
+ self.vertex = None
+ self.seg_dir = None
+ self.miter_dir = None
+ self.verts = []
+ self.seg_dirs = []
+ self.miter_dirs = []
+ self.scale = 1.0
+ self.elem_param = 0
+ self.fill_param = 0
+ self.justification = 0
+ self.flags = 0
+ self.numverts = 0
+ self.numelems = 0
+ self.numparam = 0
+ self.numfills = 0
+ self.id = 0
+ self.normal = Vector((0,0,1))
+
+ def new_vertex(self, data):
+ self.vertex = Vector()
+ self.vertex.x = data
+ self.verts.append(self.vertex)
+
+ def new_seg_dir(self, data):
+ self.seg_dir = Vector()
+ self.seg_dir.x = data
+ self.seg_dirs.append(self.seg_dir)
+
+ def new_miter_dir(self, data):
+ self.miter_dir = Vector()
+ self.miter_dir.x = data
+ self.miter_dirs.append(self.miter_dir)
+
+
+
+#
+# class CMText(CText):
+# 1 : 'text', 3: 'more_text', 7 : 'style',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+# 40 : 'nominal_height', 41 : 'reference_width', 42: 'width', 43 : 'height', 44 : 'line_spacing',
+# 50 : 'rotation_angle',
+# 71 : 'attachment_point', 72 : 'drawing_direction', 73 : 'spacing_style',
+#
+
+class CMText(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'MTEXT', 'Text')
+ self.text = ""
+ self.more_text = ""
+ self.style = ""
+ self.insertion_point = Vector()
+ self.alignment_point = Vector()
+ self.nominal_height = 1.0
+ self.reference_width = 1.0
+ self.width = 1.0
+ self.height = 1.0
+ self.rotation_angle = 0.0
+ self.attachment_point = 0
+ self.drawing_direction = 0
+ self.spacing_style = 0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print("%s %s" % (self.text, self.style))
+ print('MTEXTinsertion_point=',self.insertion_point)
+ print('MTEXTalignment_point=',self.alignment_point)
+
+ def draw(self):
+ drawText(self.text, self.insertion_point, self.height, self.width, self.rotation_angle, 0.0, self.normal)
+ return
+
+#
+# class CPoint(CEntity):
+# 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+# 39 : 'thickness', 50 : 'orientation'
+#
+
+class CPoint(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'POINT', 'Mesh')
+ self.point = Vector()
+ self.thickness = 0.0
+ self.orientation = 0.0
+
+ def display(self):
+ CEntity.display(self)
+ print(self.point)
+ print("%.4f" % self.orientation)
+
+ def build(self, vn=0):
+ # draw as mesh-vertex
+ verts = [self.point]
+ return((verts, [], [], vn+1))
+
+ def draw(self):
+ #todo
+ # draw as empty-object
+ loc = self.point
+ #bpy.ops.object.new('DXFpoint')
+
+#
+# class CPolyLine(CEntity):
+# 1 : 'verts_follow', 2 : 'name',
+# 10 : 'elevation.x', 20 : 'elevation.y', 30 : 'elevation.z',
+# 40 : 'start_width', 41 : 'end_width',
+# 66 : 'verts_follow_flag',
+# 70 : 'flags', 71 : 'row_count', 72 : 'column_count',
+# 73 : 'row_density', 74 : 'column_density', 75 : 'linetype',
+#
+
+class CPolyLine(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'POLYLINE', 'Mesh')
+ self.verts = []
+ self.verts_follow = 1
+ self.name = ""
+ self.elevation = Vector()
+ self.thickness = 0.0
+ self.start_width = 0.0
+ self.end_width = 0.0
+ self.verts_follow_flags = 0
+ self.flags = 0
+ self.row_count = 1
+ self.column_count = 1
+ self.row_density = 1.0
+ self.column_density = 1.0
+ self.linetype = 1
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print("VERTS")
+ for v in self.verts:
+ print(v.location)
+ print("END VERTS")
+
+ def build(self, vn=0):
+ verts = []
+ lines = []
+ v_start = vn
+ for vert in self.verts:
+ verts.append(vert.location)
+ lines.append((vn, vn+1))
+ vn += 1
+ if self.flags & PL_CLOSED:
+ lines[-1] = (vn-1, v_start)
+ else:
+ lines.pop()
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ verts = [v * ma for v in verts]
+ return((verts, lines, [], vn-1))
+
+#
+# class CShape(CEntity):
+# 2 : 'name',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 39 : 'thickness',
+# 40 : 'size', 41 : 'x_scale',
+# 50 : 'rotation_angle', 51 : 'oblique_angle',
+#
+
+class CShape(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'SHAPE', None)
+ self.name = ""
+ self.insertion_point = Vector()
+ self.thickness = 0.0
+ self.size = 1.0
+ self.x_scale = 1.0
+ self.rotation_angle = 0.0
+ self.oblique_angle = 0.0
+
+ def display(self):
+ CEntity.display(self)
+ print("%s" % (self.name))
+ print(self.insertion_point)
+
+#
+# class CSpline(CEntity):
+# 10 : ['new_control_point(data)'], 20 : 'control_point.y', 30 : 'control_point.z',
+# 11 : ['new_fit_point(data)'], 21 : 'fit_point.y', 31 : 'fit_point.z',
+# 40 : ['new_knot_value(data)'],
+# 12 : 'start_tangent.x', 22 : 'start_tangent.y', 32 : 'start_tangent.z',
+# 13 : 'end_tangent.x', 23 : 'end_tangent.y', 33 : 'end_tangent.z',
+# 41 : 'weight', 42 : 'knot_tol', 43 : 'control_point_tol', 44 : 'fit_tol',
+# 70 : 'flag', 71 : 'degree',
+# 72 : 'num_knots', 73 : 'num_control_points', 74 : 'num_fit_points',
+# 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+#
+
+class CSpline(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'SPLINE', 'Mesh')
+ self.control_points = []
+ self.fit_points = []
+ self.knot_values = []
+ self.control_point = None
+ self.fit_point = None
+ self.knot_value = None
+ self.start_tangent = Vector()
+ self.end_tangent = Vector()
+ self.weight = 1.0
+ self.knot_tol = 1e-6
+ self.control_point_tol = 1e-6
+ self.fit_tol = 1e-6
+ self.flag = 0
+ self.degree = 3
+ self.num_knots = 0
+ self.num_control_points = 0
+ self.num_fit_points = 0
+ self.thickness = 0.0
+ self.normal = Vector((0,0,1))
+
+ def new_control_point(self, data):
+ self.control_point = Vector()
+ self.control_point.x = data
+ self.control_points.append(self.control_point)
+
+ def new_fit_point(self, data):
+ self.fit_point = Vector()
+ self.fit_point.x = data
+ self.fit_points.append(self.fit_point)
+
+ def new_knot_value(self, data):
+ self.knot_value = data
+ self.knot_values.append(self.knot_value)
+
+ def display(self):
+ #not testet yet (migius)
+ CEntity.display(self)
+ print("CONTROL")
+ for p in self.control_points:
+ print(p)
+ print("FIT")
+ for p in self.fit_points:
+ print(p)
+ print("KNOT")
+ for v in self.knot_values:
+ print(v)
+
+ def build(self, vn=0):
+ verts = []
+ lines = []
+ for vert in self.control_points:
+ verts.append(vert)
+ lines.append((vn, vn+1))
+ vn += 1
+ lines.pop()
+ return((verts, lines, [], vn))
+
+
+#
+# class CSolid(CEntity):
+# 10 : 'point0.x', 20 : 'point0.y', 30 : 'point0.z',
+# 11 : 'point1.x', 21 : 'point1.y', 31 : 'point1.z',
+# 12 : 'point2.x', 22 : 'point2.y', 32 : 'point2.z',
+# 13 : 'point3.x', 23 : 'point3.y', 33 : 'point3.z',
+# 39 : 'thickness',
+#
+
+class CSolid(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'SOLID', 'Mesh')
+ self.point0 = Vector()
+ self.point1 = Vector()
+ self.point2 = Vector()
+ self.point3 = Vector()
+ self.normal = Vector((0,0,1))
+ self.thickness = 0.0
+
+ def display(self):
+ CEntity.display(self)
+ print(self.point0)
+ print(self.point1)
+ print(self.point2)
+ print(self.point3)
+
+ def build(self, vn=0):
+ points, edges, faces = [],[],[]
+ if self.point2 == self.point3:
+ points = [self.point0, self.point1, self.point2]
+ else:
+ points = [self.point0, self.point1, self.point2, self.point3]
+ pn = len(points)
+ v0 = vn
+
+ thic = self.thickness
+ t_vector = Vector((0, 0, thic))
+ if thic != 0 and (toggle & T_ThicON):
+ thic_points = [v + t_vector for v in points]
+ if thic < 0.0:
+ thic_points.extend(points)
+ points = thic_points
+ else:
+ points.extend(thic_points)
+
+ if pn == 4:
+ faces = [[0,1,3,2], [4,6,7,5], [0,4,5,1],
+ [1,5,7,3], [3,7,6,2], [2,6,4,0]]
+ elif pn == 3:
+ faces = [[0,1,2], [3,5,4], [0,3,4,1], [1,4,5,2], [2,5,3,0]]
+ elif pn == 2: faces = [[0,1,3,2]]
+ vn += 2*pn
+ else:
+ if pn == 4: faces = [[0,2,3,1]]
+ elif pn == 3: faces = [[0,2,1]]
+ elif pn == 2:
+ edges = [[0,1]]
+ self.drawtype = 'Mesh'
+ vn += pn
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ points = [v * ma for v in points]
+ return((points, edges, faces, vn))
+
+#
+# class CText(CEntity):
+# 1 : 'text', 7 : 'style',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+# 40 : 'height', 41 : 'x_scale',
+# 50 : 'rotation_angle', 51 : 'oblique_angle',
+# 71 : 'flags', 72 : 'horizontal_justification', 73 : 'vertical_justification',
+#
+
+class CText(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'TEXT', 'Text')
+ self.text = ""
+ self.style = ""
+ self.insertion_point = Vector()
+ self.alignment_point = Vector()
+ self.height = 1.0
+ self.x_scale = 1.0
+ self.rotation_angle = 0.0
+ self.oblique_angle = 0.0
+ self.flags = 0
+ self.horizontal_justification = 0.0
+ self.vertical_justification = 0.0
+ self.thickness = 0.0
+ self.normal = Vector((0,0,1))
+
+ def display(self):
+ CEntity.display(self)
+ print("%s %s" % (self.text, self.style))
+ print(self.insertion_point)
+ print(self.alignment_point)
+
+ def draw(self):
+ drawText(self.text, self.insertion_point, self.height, self.x_scale, self.rotation_angle, self.oblique_angle, self.normal)
+ return
+
+
+def drawText(text, loc, size, spacing, angle, shear, normal=Vector((0,0,1))):
+ #print('angle_deg=',angle)
+ bpy.ops.object.text_add(
+ view_align=False,
+ enter_editmode=False,
+ location= loc,
+ #rotation=(0, 0, angle), #need radians here
+ )
+ cu = bpy.context.object.data
+ cu.body = text
+ cu.size = size #up 2.56
+ cu.space_word = spacing #up 2.56
+ cu.shear = shear
+ if angle!=0.0 or normal!=Vector((0,0,1)):
+ obj = bpy.context.object
+ transform(normal, angle, obj)
+ return
+
+#
+# class CTolerance(CEntity):
+# 3 : 'style',
+# 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+# 11 : 'direction.x', 21 : 'direction.y', 31 : 'direction.z',
+#
+
+class CTolerance(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'TOLERANCE', None)
+ self.stype = ""
+ self.insertion_point = Vector()
+ self.direction = Vector()
+
+#
+# class CTrace(CEntity):
+# 10 : 'point0.x', 20 : 'point0.y', 30 : 'point0.z',
+# 11 : 'point1.x', 21 : 'point1.y', 31 : 'point1.z',
+# 12 : 'point2.x', 22 : 'point2.y', 32 : 'point2.z',
+# 13 : 'point3.x', 23 : 'point3.y', 33 : 'point3.z',
+# 39 : 'thickness',
+#
+
+class CTrace(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'TRACE', 'Mesh')
+ self.point0 = Vector()
+ self.point1 = Vector()
+ self.point2 = Vector()
+ self.point3 = Vector()
+ self.normal = Vector((0,0,1))
+ self.thickness = 0.0
+
+ def display(self):
+ CEntity.display(self)
+ print(self.point0)
+ print(self.point1)
+ print(self.point2)
+ print(self.point3)
+
+ def build(self, vn=0):
+ points, edges, faces = [],[],[]
+ if self.point2 == self.point3:
+ points = [self.point0, self.point2, self.point1]
+ else:
+ points = [self.point0, self.point2, self.point1, self.point3]
+ pn = len(points)
+ v0 = vn
+ thic = self.thickness
+ t_vector = Vector((0, 0, thic))
+ if thic != 0 and (toggle & T_ThicON):
+ thic_points = [v + t_vector for v in points]
+ if thic < 0.0:
+ thic_points.extend(points)
+ points = thic_points
+ else:
+ points.extend(thic_points)
+
+ if pn == 4:
+ faces = [[0,1,3,2], [4,6,7,5], [0,4,5,1],
+ [1,5,7,3], [3,7,6,2], [2,6,4,0]]
+ elif pn == 3:
+ faces = [[0,1,2], [3,5,4], [0,3,4,1], [1,4,5,2], [2,5,3,0]]
+ elif pn == 2: faces = [[0,1,3,2]]
+ vn += 2*pn
+ else:
+ if pn == 4: faces = [[0,2,3,1]]
+ elif pn == 3: faces = [[0,2,1]]
+ elif pn == 2:
+ edges = [[0,1]]
+ self.drawtype = 'Mesh'
+ if self.normal!=Vector((0,0,1)):
+ ma = getOCS(self.normal)
+ if ma:
+ points = [v * ma for v in points]
+ return ((points, edges, faces, vn))
+
+#
+# class CVertex(CEntity):
+# 10 : 'location.x', 20 : 'location.y', 30 : 'location.z',
+# 40 : 'start_width', 41 : 'end_width', 42 : 'bulge',
+# 50 : 'tangent',
+# 70 : 'flags',
+# 71 : 'index1', 72 : 'index2', 73 : 'index3', 74 : 'index4',
+#
+
+class CVertex(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'VERTEX', None)
+ self.location = Vector()
+ self.start_width = 0.0
+ self.end_width = 0.0
+ self.bulge = 0.0
+ self.tangent = 0.0
+ self.flags = 0
+
+ def display(self):
+ return
+
+ def draw(self):
+ return
+
+#
+# class CViewPort(CEntity):
+# 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+# 12 : 'view_center.x', 22 : 'view_center.y', 32 : 'view_center.z',
+# 13 : 'snap_base.x', 23 : 'snap_base.y', 33 : 'snap_base.z',
+# 14 : 'snap_spacing.x', 24 : 'snap_spacing.y', 34 : 'snap_spacing.z',
+# 15 : 'grid_spacing.x', 25 : 'grid_spacing.y', 35 : 'grid_spacing.z',
+# 16 : 'view_direction.x', 26 : 'view_direction.y', 36 : 'view_direction.z',
+# 40 : 'width', 41 : 'height',
+# 68 : 'status', 69 : 'id',
+#
+
+class CViewPort(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'VIEWPORT', None)
+ self.center = Vector()
+ self.view_center = Vector()
+ self.snap_base = Vector()
+ self.snap_spacing = Vector()
+ self.grid_spacing = Vector()
+ self.view_direction = Vector()
+ self.width = 1.0
+ self.height = 1.0
+ self.status = 0
+ self.id = 0
+
+ def draw(self):
+ # Todo
+ return
+
+#
+# class CWipeOut(CEntity):
+# 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+# 11 : 'direction.x', 21 : 'direction.y', 31 : 'direction.z',
+#
+
+class CWipeOut(CEntity):
+ def __init__(self):
+ CEntity.__init__(self, 'WIPEOUT', None)
+ self.point = Vector()
+ self.direction = Vector()
+
+#
+#
+#
+WORLDX = Vector((1.0,0.0,0.0))
+WORLDY = Vector((0.0,1.0,0.0))
+WORLDZ = Vector((0.0,0.0,1.0))
+
+
+def getOCS(az): #-----------------------------------------------------------------
+ """An implimentation of the Arbitrary Axis Algorithm.
+ """
+ #decide if we need to transform our coords
+ #if az[0] == 0 and az[1] == 0:
+ if abs(az.x) < 0.00001 and abs(az.y) < 0.00001:
+ if az.z > 0.0:
+ return False
+ elif az.z < 0.0:
+ return Matrix((-WORLDX, WORLDY*1, -WORLDZ))
+
+ cap = 0.015625 # square polar cap value (1/64.0)
+ if abs(az.x) < cap and abs(az.y) < cap:
+ ax = WORLDY.cross(az)
+ else:
+ ax = WORLDZ.cross(az)
+ ax.normalize()
+ ay = az.cross(ax)
+ ay.normalize()
+ return Matrix((ax, ay, az))
+
+
+
+def transform(normal, rotation, obj): #--------------------------------------------
+ """Use the calculated ocs to determine the objects location/orientation in space.
+ """
+ ma = Matrix(((1,0,0,0),(0,1,0,0),(0,0,1,0),(0,0,0,1)))
+ o = Vector(obj.location)
+ ma_new = getOCS(normal)
+ if ma_new:
+ ma = ma_new
+ ma.resize_4x4()
+ o = o * ma
+
+ if rotation != 0:
+ g = radians(-rotation)
+ rmat = Matrix(((cos(g), -sin(g), 0), (sin(g), cos(g), 0), (0, 0, 1)))
+ ma = ma * rmat.to_4x4()
+
+ obj.matrix_world = ma #must be matrix4x4
+ obj.location = o
+
+
+DxfEntityAttributes = {
+'3DFACE' : {
+ 10 : 'point0.x', 20 : 'point0.y', 30 : 'point0.z',
+ 11 : 'point1.x', 21 : 'point1.y', 31 : 'point1.z',
+ 12 : 'point2.x', 22 : 'point2.y', 32 : 'point2.z',
+ 13 : 'point3.x', 23 : 'point3.y', 33 : 'point3.z',
+ 70 : 'flags',
+ },
+
+'3DSOLID' : {
+ 1 : 'data', 3 : 'more', 70 : 'version',
+ },
+
+'ACAD_PROXY_ENTITY' : {
+ 70 : 'format',
+ 90 : 'id', 91 : 'class', 92 : 'graphics_size', 93 : 'entity_size', 95: 'format',
+ 310 : 'data', 330 : 'id1', 340 : 'id2', 350 : 'id3', 360 : 'id4',
+ },
+
+'ARC' : {
+ 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+ 40 : 'radius',
+ 50 : 'start_angle', 51 : 'end_angle',
+ 39 : 'thickness',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'ARCALIGNEDTEXT' : {
+ 1 : 'text', 2 : 'font', 3 : 'bigfont', 7 : 'style',
+ 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+ 40 : 'radius', 41 : 'width', 42 : 'height', 43 : 'spacing',
+ 44 : 'offset', 45 : 'right_offset', 46 : 'left_offset',
+ 50 : 'start_angle', 51 : 'end_angle',
+ 70 : 'order', 71 : 'direction', 72 : 'alignment', 73 : 'side',
+ 74 : 'bold', 75 : 'italic', 76 : 'underline',
+ 77 : 'character_set', 78 : 'pitch', 79 : 'fonttype',
+ 90 : 'color',
+ 280 : 'wizard', 330 : 'id'
+ },
+
+'ATTDEF' : {
+ 1 : 'text', 2 : 'tag', 3 : 'prompt', 7 : 'style',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+ 40 : 'height', 41 : 'x_scale',
+ 50 : 'rotation_angle', 51 : 'oblique_angle',
+ 70 : 'flags', 71 : 'text_generation_flags',
+ 72 : 'horizontal_justification', 74 : 'vertical_justification',
+ },
+
+
+'ATTRIB' : {
+ 1 : 'text', 2 : 'tag', 3 : 'prompt', 7 : 'style',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+ 40 : 'height', 41 : 'x_scale',
+ 50 : 'rotation_angle', 51 : 'oblique_angle',
+ 70 : 'flags', 73 : 'length',
+ 71 : 'text_generation_flags', 72 : 'horizontal_justification', 74 : 'vertical_justification',
+ },
+
+'BLOCK' : {
+ 1 : 'xref', 2 : 'name', 3 : 'also_name',
+ 10 : 'base_point.x', 20 : 'base_point.y', 30 : 'base_point.z',
+ 40 : 'size', 41 : 'x_scale',
+ 50 : 'rotation_angle', 51 : 'oblique_angle',
+ 70 : 'flags',
+ },
+
+'CIRCLE' : {
+ 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+ 40 : 'radius',
+ 39 : 'thickness',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'DIMENSION' : {
+ 1 : 'text', 2 : 'name', 3 : 'style',
+ 10 : 'def_point.x', 20 : 'def_point.y', 30 : 'def_point.z',
+ 11 : 'mid_point.x', 21 : 'mid_point.y', 31 : 'mid_point.z',
+ 12 : 'vector.x', 22 : 'vector.y', 32 : 'vector.z',
+ 13 : 'def_point2.x', 23 : 'def_point2.y', 33 : 'def_point2.z',
+ 14 : 'vector2.x', 24 : 'vector2.y', 34 : 'vector2.z',
+ 15 : 'vector3.x', 25 : 'vector3.y', 35 : 'vector3.z',
+ 16 : 'vector4.x', 26 : 'vector4.y', 36 : 'vector4.z',
+ 70 : 'dimtype',
+ },
+
+'ELLIPSE' : {
+ 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+ 11 : 'end_point.x', 21 : 'end_point.y', 31 : 'end_point.z',
+ 40 : 'ratio', 41 : 'start', 42 : 'end',
+ 39 : 'thickness',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'HATCH' : {
+ 2 : 'pattern',
+ 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+ 41 : 'scale', 47 : 'pixelsize', 52 : 'angle',
+ 70 : 'fill', 71 : 'associativity', 75: 'style', 77 : 'double',
+ 78 : 'numlines', 91 : 'numpaths', 98 : 'numseeds',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'IMAGE' : {
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 11 : 'u_vector.x', 21 : 'u_vector.y', 31 : 'u_vector.z',
+ 12 : 'v_vector.x', 22 : 'v_vector.y', 32 : 'v_vector.z',
+ 13 : 'size.x', 23 : 'size.y', 33 : 'size.z',
+ 14 : 'clip.x', 24 : 'clip.y', 34 : 'clip.z',
+ 70 : 'display', 71 : 'cliptype',
+ 90 : 'version',
+ 280 : 'clipstate', 281 : 'brightness', 282 : 'contrast', 283 : 'fade',
+ 340 : 'image', 360 : 'reactor',
+ },
+
+'INSERT' : {
+ 1 : 'attributes_follow', 2 : 'name',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 41 : 'x_scale', 42 : 'y_scale', 43 : 'z_scale',
+ 44 : 'column_spacing', 45 : 'row_spacing',
+ 50 : 'rotation_angle', 66 : 'attributes_follow',
+ 70 : 'column_count', 71 : 'row_count',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'LEADER' : {
+ 3 : 'style',
+ 10 : ['new_vertex(data)'], 20 : 'vertex.y', 30 : 'vertex.z',
+ 40 : 'height', 41 : 'width',
+ 71 : 'arrowhead', 72 : 'pathtype', 73 : 'creation',
+ 74 : 'hookdir', 75 : 'hookline', 76 : 'numverts', 77 : 'color',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ 211 : 'horizon.x', 221 : 'horizon.y', 231 : 'horizon.z',
+ 212 : 'offset_ins.x', 222 : 'offset_ins.y', 232 : 'offset_ins.z',
+ 213 : 'offset_ann.x', 223 : 'offset_ann.y', 233 : 'offset_ann.z',
+ },
+
+'LINE' : {
+ 10 : 'start_point.x', 20 : 'start_point.y', 30 : 'start_point.z',
+ 11 : 'end_point.x', 21 : 'end_point.y', 31 : 'end_point.z',
+ 39 : 'thickness',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'LWPOLYLINE' : {
+ 10 : ['new_vertex(data)'], 20 : 'vertex.y', 30 : 'vertex.z',
+ 38 : 'elevation', 39 : 'thickness',
+ 40 : 'start_width', 41 : 'end_width', 42 : 'bulge', 43 : 'constant_width',
+ 70 : 'flags', 90 : 'numverts',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'MLINE' : {
+ 10 : 'start_point.x', 20 : 'start_point.y', 30 : 'start_point.z',
+ 11 : ['new_vertex(data)'], 21 : 'vertex.y', 31 : 'vertex.z',
+ 12 : ['new_seg_dir(data)'], 22 : 'seg_dir.y', 32 : 'seg_dir.z',
+ 13 : ['new_miter_dir(data)'], 23 : 'miter_dir.y', 33 : 'miter_dir.z',
+ 39 : 'thickness',
+ 40 : 'scale', 41 : 'elem_param', 42 : 'fill_param',
+ 70 : 'justification', 71 : 'flags',
+ 72 : 'numverts', 73 : 'numelems', 74 : 'numparam', 75 : 'numfills',
+ 340 : 'id',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'MTEXT' : {
+ 1 : 'text', 3: 'more_text', 7 : 'style',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+ 40 : 'nominal_height', 41 : 'reference_width', 42: 'width', 43 : 'height', 44 : 'line_spacing',
+ 50 : 'rotation_angle',
+ 71 : 'attachment_point', 72 : 'drawing_direction', 73 : 'spacing_style',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'POINT' : {
+ 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+ 39 : 'thickness', 50 : 'orientation',
+ },
+
+'POLYLINE' : {
+ 1 : 'verts_follow', 2 : 'name',
+ 10 : 'elevation.x', 20 : 'elevation.y', 30 : 'elevation.z',
+ 39 : 'thickness',
+ 40 : 'start_width', 41 : 'end_width',
+ 66 : 'verts_follow_flag',
+ 70 : 'flags', 71 : 'row_count', 72 : 'column_count',
+ 73 : 'row_density', 74 : 'column_density', 75 : 'linetype',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'RAY' : {
+ 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+ 11 : 'direction.x', 21 : 'direction.y', 31 : 'direction.z',
+ },
+
+'RTEXT' : {
+ 1 : 'text', 7 : 'style',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 39 : 'thickness',
+ 40 : 'height',
+ 50 : 'rotation_angle',
+ 70 : 'flags',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'SHAPE' : {
+ 2 : 'name',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 39 : 'thickness',
+ 40 : 'size', 41 : 'x_scale',
+ 50 : 'rotation_angle', 51 : 'oblique_angle',
+ 39 : 'thickness',
+ },
+
+'SOLID' : {
+ 10 : 'point0.x', 20 : 'point0.y', 30 : 'point0.z',
+ 11 : 'point1.x', 21 : 'point1.y', 31 : 'point1.z',
+ 12 : 'point2.x', 22 : 'point2.y', 32 : 'point2.z',
+ 13 : 'point3.x', 23 : 'point3.y', 33 : 'point3.z',
+ 39 : 'thickness',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'SPLINE' : {
+ 10 : ['new_control_point(data)'], 20 : 'control_point.y', 30 : 'control_point.z',
+ 11 : ['new_fit_point(data)'], 21 : 'fit_point.y', 31 : 'fit_point.z',
+ 40 : ['new_knot_value(data)'],
+ 12 : 'start_tangent.x', 22 : 'start_tangent.y', 32 : 'start_tangent.z',
+ 13 : 'end_tangent.x', 23 : 'end_tangent.y', 33 : 'end_tangent.z',
+ 39 : 'thickness',
+ 41 : 'weight', 42 : 'knot_tol', 43 : 'control_point_tol', 44 : 'fit_tol',
+ 70 : 'flag', 71 : 'degree',
+ 72 : 'num_knots', 73 : 'num_control_points', 74 : 'num_fit_points',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'TEXT' : {
+ 1 : 'text', 7 : 'style',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 11 : 'alignment_point.x', 21 : 'alignment_point.y', 31 : 'alignment_point.z',
+ 40 : 'height', 41 : 'x_scale',
+ 50 : 'rotation_angle', 51 : 'oblique_angle',
+ 71 : 'flags', 72 : 'horizontal_justification', 73 : 'vertical_justification',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'TOLERANCE' : {
+ 3 : 'style',
+ 10 : 'insertion_point.x', 20 : 'insertion_point.y', 30 : 'insertion_point.z',
+ 11 : 'direction.x', 21 : 'direction.y', 31 : 'direction.z',
+ },
+
+'TRACE' : {
+ 10 : 'point0.x', 20 : 'point0.y', 30 : 'point0.z',
+ 11 : 'point1.x', 21 : 'point1.y', 31 : 'point1.z',
+ 12 : 'point2.x', 22 : 'point2.y', 32 : 'point2.z',
+ 13 : 'point3.x', 23 : 'point3.y', 33 : 'point3.z',
+ 39 : 'thickness',
+ 210 : 'normal.x', 220 : 'normal.y', 230 : 'normal.z',
+ },
+
+'VERTEX' : {
+ 10 : 'location.x', 20 : 'location.y', 30 : 'location.z',
+ 40 : 'start_width', 41 : 'end_width', 42 : 'bulge',
+ 50 : 'tangent',
+ 70 : 'flags',
+ 71 : 'index1', 72 : 'index2', 73 : 'index3', 74 : 'index4',
+ },
+
+'VIEWPORT' : {
+ 10 : 'center.x', 20 : 'center.y', 30 : 'center.z',
+ 12 : 'view_center.x', 22 : 'view_center.y', 32 : 'view_center.z',
+ 13 : 'snap_base.x', 23 : 'snap_base.y', 33 : 'snap_base.z',
+ 14 : 'snap_spacing.x', 24 : 'snap_spacing.y', 34 : 'snap_spacing.z',
+ 15 : 'grid_spacing.x', 25 : 'grid_spacing.y', 35 : 'grid_spacing.z',
+ 16 : 'view_direction.x', 26 : 'view_direction.y', 36 : 'view_direction.z',
+ 40 : 'width', 41 : 'height',
+ 68 : 'status', 69 : 'id',
+ },
+
+'WIPEOUT' : {
+ 10 : 'point.x', 20 : 'point.y', 30 : 'point.z',
+ 11 : 'direction.x', 21 : 'direction.y', 31 : 'direction.z',
+ },
+
+}
+
+
+#
+# Flags
+#
+
+# Polyline flags
+PL_CLOSED = 0x01
+PL_CURVE_FIT_VERTS = 0x02
+PL_SPLINE_FIT_VERTS = 0x04
+PL_3D_POLYLINE = 0x08
+PL_3D_POLYGON_MESH = 0x10
+PL_CLOSED_IN_N_DIR = 0x20
+PL_POLYFACE_MESH = 0x40
+PL_CONTINUOUS = 0x80
+
+
+# Vertex flags
+VX_EXTRA_FLAG_CREATED = 0x01
+VX_CURVE_FIT_TANGENT_DEFINED = 0x02
+VX_SPLINE_VERTEX_CREATED = 0x08
+VX_SPLINE_FRAME_CONTROL_POINT = 0x10
+VX_3D_POLYLINE_VERTEX = 0x20
+VX_3D_POLYGON_MESH_VERTEX = 0x40
+VX_POLYFACE_MESH_VERTEX = 0x80
+
+# 3DFACE flags
+
+F3D_EDGE0_INVISIBLE = 0x01
+F3D_EDGE1_INVISIBLE = 0x02
+F3D_EDGE2_INVISIBLE = 0x04
+F3D_EDGE3_INVISIBLE = 0x08
+
+#
+# readDxfFile(filePath):
+#
+
+def readDxfFile(fileName):
+ global toggle, theCodec
+
+ print( "Opening DXF file "+ fileName )
+
+ # fp= open(fileName, "rU")
+ fp = codecs.open(fileName, "r", encoding=theCodec)
+ first = True
+ statements = []
+ no = 0
+ for line in fp:
+ word = line.strip()
+ no += 1
+ if first:
+ if word:
+ code = int(word)
+ first = False
+ else:
+ if toggle & T_Verbose:
+ print("%4d: %4d %s" % (no, code, word))
+ if code < 10:
+ data = word
+ elif code < 60:
+ data = float(word)
+ elif code < 100:
+ data = int(word)
+ elif code < 140:
+ data = word
+ elif code < 150:
+ data = float(word)
+ elif code < 200:
+ data = int(word)
+ elif code < 300:
+ data = float(word)
+ elif code < 370:
+ data = word
+ elif code < 390:
+ data = int(word)
+ elif code < 400:
+ data = word
+ elif code < 410:
+ data = int(word)
+ elif code < 1010:
+ data = word
+ elif code < 1060:
+ data = float(word)
+ elif code < 1080:
+ data = int(word)
+
+ statements.append((code,data))
+ first = True
+ fp.close()
+
+ statements.reverse()
+ sections = {}
+ handles = {}
+ while statements:
+ (code,data) = statements.pop()
+ if code == 0:
+ if data == 'SECTION':
+ section = CSection()
+ elif code == 2:
+ section.type = data
+ if data == 'HEADER':
+ parseHeader(section, statements, handles)
+ known = False
+ elif data == 'CLASSES':
+ parseClasses(section, statements, handles)
+ known = False
+ elif data == 'TABLES':
+ parseTables(section, statements, handles)
+ known = False
+ elif data == 'BLOCKS':
+ parseBlocks(section, statements, handles)
+ known = False
+ elif data == 'ENTITIES':
+ parseEntities(section, statements, handles)
+ known = False
+ elif data == 'OBJECTS':
+ parseObjects(section, statements, handles)
+ sections[data] = section
+ elif code == 999:
+ pass
+ else:
+ raise NameError("Unexpected code in SECTION context: %d %s" % (code,data))
+
+ if toggle & T_Verbose:
+ for (typ,section) in sections.items():
+ section.display()
+ return sections
+
+
+#
+# 0
+# SECTION
+# 2
+# HEADER
+#
+# 9
+# $<variable>
+# <group code>
+# <value>
+#
+# 0
+# ENDSEC
+
+
+def parseHeader(section, statements, handles):
+ while statements:
+ (code,data) = statements.pop()
+ if code == 0:
+ if data == 'ENDSEC':
+ return
+
+ return
+
+
+# 0
+# SECTION
+# 2
+# CLASSES
+#
+# 0
+# CLASS
+# 1
+# <class dxf record>
+# 2
+# <class name>
+# 3
+# <app name>
+# 90
+# <flag>
+# 280
+# <flag>
+# 281
+# <flag>
+#
+# 0
+# ENDSEC
+
+def parseClasses(section, statements, handles):
+ while statements:
+ (code,data) = statements.pop()
+ if code == 0:
+ if data == 'ENDSEC':
+ return
+
+ return
+
+
+# 0
+# SECTION
+# 2
+# TABLES
+#
+# 0
+# TABLE
+# 2
+# <table type>
+# 5
+# <handle>
+# 100
+# AcDbSymbolTable
+# 70
+# <max. entries>
+#
+# 0
+# <table type>
+# 5
+# <handle>
+# 100
+# AcDbSymbolTableRecord
+# .
+# . <data>
+# .
+#
+# 0
+# ENDTAB
+#
+# 0
+# ENDSEC
+
+#
+# APPID (application identification table)
+#
+# BLOCK_RECORD (block reference table)
+#
+# DIMSTYLE (dimension style table)
+#
+# LAYER (layer table)
+#
+# LTYPE (linetype table)
+#
+# STYLE (text style table)
+#
+# UCS (User Coordinate System table)
+#
+# VIEW (view table)
+#
+# VPORT (viewport configuration table)
+
+
+def parseTables(section, statements, handles):
+ tables = []
+ section.data = tables
+ while statements:
+ (code,data) = statements.pop()
+ if code == 0:
+ if data == 'ENDSEC':
+ return
+ '''
+ known = False
+ elif data == 'TABLE':
+ table = CTable()
+ tables.append(table)
+ known = False
+ elif data == 'ENDTAB':
+ pass
+ known = False
+ elif data == table.type:
+ parseTableType
+ table = CTable()
+ tables.append(table)
+ table.type = word
+ elif code == 2:
+ table.type = word
+ elif code == 5:
+ table.handle = word
+ handles[word] = table
+ elif code == 330:
+ table.owner = word
+ elif code == 100:
+ table.subclass = word
+ elif code == 70:
+ table.nEntries = int(word)
+ '''
+ return
+
+# 0
+# SECTION
+# 2
+# BLOCKS
+#
+# 0
+# BLOCK
+# 5
+# <handle>
+# 100
+# AcDbEntity
+# 8
+# <layer>
+# 100
+# AcDbBlockBegin
+# 2
+# <block name>
+# 70
+# <flag>
+# 10
+# <X value>
+# 20
+# <Y value>
+# 30
+# <Z value>
+# 3
+# <block name>
+# 1
+# <xref path>
+#
+# 0
+# <entity type>
+# .
+# . <data>
+# .
+#
+# 0
+# ENDBLK
+# 5
+# <handle>
+# 100
+# AcDbBlockEnd
+#
+# 0
+# ENDSEC
+
+def parseBlocks(section, statements, handles):
+ while statements:
+ (code,data) = statements.pop()
+ if code == 0:
+ if data == 'ENDSEC':
+ return
+
+ return
+
+# 0
+# SECTION
+# 2
+# ENTITIES
+#
+# 0
+# <entity type>
+# 5
+# <handle>
+# 330
+# <pointer to owner>
+# 100
+# AcDbEntity
+# 8
+# <layer>
+# 100
+# AcDb<classname>
+# .
+# . <data>
+# .
+#
+# 0
+# ENDSEC
+
+Ignorables = ['DIMENSION', 'TEXT', 'VIEWPORT']
+
+ClassCreators = {
+ '3DFACE': 'C3dFace()',
+ '3DSOLID': 'C3dSolid()',
+ 'ACAD_PROXY_ENTITY': 'CAcadProxyEntity()',
+ 'ACAD_ZOMBIE_ENTITY': 0,
+ 'ARC': 'CArc()',
+ 'ARCALIGNEDTEXT': 'CArcAlignedText()',
+ 'ATTDEF': 'CAttdef()',
+ 'ATTRIB': 'CAttrib()',
+ 'BODY': 0,
+ 'CIRCLE': 'CCircle()',
+ 'DIMENSION': 'CDimension()',
+ 'ELLIPSE': 'CEllipse()',
+ 'HATCH': 'CHatch()',
+ 'IMAGE': 'CImage()',
+ 'INSERT': 'CInsert()',
+ 'LEADER': 'CLeader()',
+ 'LINE': 'CLine()',
+ 'LWPOLYLINE': 'CLWPolyLine()',
+ 'MLINE': 'CMLine()',
+ 'MTEXT': 'CMText()',
+ 'OLEFRAME': 0,
+ 'OLE2FRAME': 0,
+ 'POINT': 'CPoint()',
+ 'POLYLINE': 'CPolyLine()',
+ 'RAY': 'CRay()',
+ 'REGION': 0,
+ 'RTEXT': 'CRText',
+ 'SEQEND': 0,
+ 'SHAPE': 'CShape()',
+ 'SOLID': 'CSolid()',
+ 'SPLINE': 'CSpline()',
+ 'TEXT': 'CText()',
+ 'TOLERANCE': 'CTolerance()',
+ 'TRACE': 'CTrace()',
+ 'VERTEX': 'CVertex()',
+ 'VIEWPORT': 'CViewPort()',
+ 'WIPEOUT': 'CWipeOut()',
+ 'XLINE': 'CXLine()',
+}
+
+def parseEntities(section, statements, handles):
+ entities = []
+ section.data = entities
+ while statements:
+ (code,data) = statements.pop()
+ if toggle & T_Verbose:
+ print("ent", code,data)
+ if code == 0:
+ known = True
+ if data in Ignorables:
+ ignore = True
+ else:
+ ignore = False
+
+ try:
+ creator = ClassCreators[data]
+ except:
+ creator = None
+
+ if creator:
+ entity = eval(creator)
+ elif data == 'ENDSEC':
+ return
+ else:
+ known = False
+
+ if data == 'POLYLINE':
+ verts = entity.verts
+ elif data == 'VERTEX':
+ verts.append(entity)
+
+ if data == 'SEQEND':
+ attributes = []
+ known = False
+ elif creator == 0:
+ ignore = True
+ elif known:
+ entities.append(entity)
+ attributes = DxfEntityAttributes[data]
+ else:
+ raise NameError("Unknown data %s" % data)
+
+ elif not known:
+ pass
+ else:
+ expr = getAttribute(attributes, code)
+ if expr:
+ exec(expr)
+ else:
+ expr = getAttribute(DxfCommonAttributes, code)
+ if expr:
+ exec(expr)
+ elif code >= 1000 or ignore:
+ pass
+ elif toggle & T_Debug:
+ raise NameError("Unknown code %d for %s" % (code, entity.type))
+
+ return
+
+def getAttribute(attributes, code):
+ try:
+ ext = attributes[code]
+ if type(ext) == str:
+ expr = "entity.%s = data" % ext
+ else:
+ name = ext[0]
+ expr = "entity.%s" % name
+ except:
+ expr = None
+ return expr
+
+
+# 0
+# SECTION
+# 2
+# OBJECTS
+#
+# 0
+# DICTIONARY
+# 5
+# <handle>
+# 100
+# AcDbDictionary
+#
+# 3
+# <dictionary name>
+# 350
+# <handle of child>
+#
+# 0
+# <object type>
+# .
+# . <data>
+# .
+#
+# 0
+# ENDSEC
+
+def parseObjects(data, statements, handles):
+ while statements:
+ (code,data) = statements.pop()
+ if code == 0:
+ if data == 'ENDSEC':
+ return
+
+ return
+
+#
+# buildGeometry(entities):
+# addMesh(name, verts, edges, faces):
+#
+
+def buildGeometry(entities):
+ try: bpy.ops.object.mode_set(mode='OBJECT')
+ except: pass
+ v_verts = []
+ v_vn = 0
+ e_verts = []
+ e_edges = []
+ e_vn = 0
+ f_verts = []
+ f_edges = []
+ f_faces = []
+ f_vn = 0
+ for ent in entities:
+ if ent.drawtype in ('Mesh','Curve'):
+ (verts, edges, faces, vn) = ent.build()
+ if not toggle & T_DrawOne:
+ drawGeometry(verts, edges, faces)
+ else:
+ if verts:
+ if faces:
+ for i,f in enumerate(faces):
+ #print ('face=', f)
+ faces[i] = tuple(it+f_vn for it in f)
+ for i,e in enumerate(edges):
+ edges[i] = tuple(it+f_vn for it in e)
+ f_verts.extend(verts)
+ f_edges.extend(edges)
+ f_faces.extend(faces)
+ f_vn += len(verts)
+ elif edges:
+ for i,e in enumerate(edges):
+ edges[i] = tuple(it+e_vn for it in e)
+ e_verts.extend(verts)
+ e_edges.extend(edges)
+ e_vn += len(verts)
+ else:
+ v_verts.extend(verts)
+ v_vn += len(verts)
+ else:
+ ent.draw()
+
+ if toggle & T_DrawOne:
+ drawGeometry(f_verts, f_edges, f_faces)
+ drawGeometry(e_verts, e_edges)
+ drawGeometry(v_verts)
+
+
+
+def drawGeometry(verts, edges=[], faces=[]):
+ if verts:
+ if edges and (toggle & T_Curves):
+ print ('draw Curve')
+ cu = bpy.data.curves.new('DXFlines', 'CURVE')
+ cu.dimensions = '3D'
+ buildSplines(cu, verts, edges)
+ ob = addObject('DXFlines', cu)
+ else:
+ #for v in verts: print(v)
+ #print ('draw Mesh with %s vertices' %(len(verts)))
+ #for e in edges: print(e)
+ #print ('draw Mesh with %s edges' %(len(edges)))
+ #for f in faces: print(f)
+ #print ('draw Mesh with %s faces' %(len(faces)))
+ me = bpy.data.meshes.new('DXFmesh')
+ me.from_pydata(verts, edges, faces)
+ ob = addObject('DXFmesh', me)
+ removeDoubles(ob)
+ return
+
+
+
+def buildSplines(cu, verts, edges):
+ if edges:
+ point_list = []
+ (v0,v1) = edges.pop()
+ v1_old = v1
+ newPoints = [tuple(verts[v0]),tuple(verts[v1])]
+ for (v0,v1) in edges:
+ if v0==v1_old:
+ newPoints.append(tuple(verts[v1]))
+ else:
+ #print ('newPoints=', newPoints)
+ point_list.append(newPoints)
+ newPoints = [tuple(verts[v0]),tuple(verts[v1])]
+ v1_old = v1
+ point_list.append(newPoints)
+ for points in point_list:
+ spline = cu.splines.new('POLY')
+ #spline = cu.splines.new('BEZIER')
+ #spline.use_endpoint_u = True
+ #spline.order_u = 2
+ #spline.resolution_u = 1
+ #spline.bezier_points.add(2)
+
+ spline.points.add(len(points)-1)
+ #spline.points.foreach_set('co', points)
+ for i,p in enumerate(points):
+ spline.points[i].co = (p[0],p[1],p[2],0)
+
+ #print ('spline.type=', spline.type)
+ #print ('spline number=', len(cu.splines))
+
+
+def addObject(name, data):
+ ob = bpy.data.objects.new(name, data)
+ scn = bpy.context.scene
+ scn.objects.link(ob)
+ return ob
+
+
+def removeDoubles(ob):
+ global theMergeLimit
+ if toggle & T_Merge:
+ scn = bpy.context.scene
+ scn.objects.active = ob
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.mesh.remove_doubles(limit=theMergeLimit)
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+
+
+#
+# clearScene(context):
+#
+
+def clearScene():
+ global toggle
+ scn = bpy.context.scene
+ print("clearScene %s %s" % (toggle & T_NewScene, scn))
+ if not toggle & T_NewScene:
+ return scn
+
+ for ob in scn.objects:
+ if ob.type in ["MESH", "CURVE", "TEXT"]:
+ scn.objects.active = ob
+ bpy.ops.object.mode_set(mode='OBJECT')
+ scn.objects.unlink(ob)
+ del ob
+ return scn
+
+#
+# readAndBuildDxfFile(filepath):
+#
+
+def readAndBuildDxfFile(filepath):
+ fileName = os.path.expanduser(filepath)
+ if fileName:
+ (shortName, ext) = os.path.splitext(fileName)
+ #print("filepath: ", filepath)
+ #print("fileName: ", fileName)
+ #print("shortName: ", shortName)
+ if ext.lower() != ".dxf":
+ print("Error: Not a dxf file: " + fileName)
+ return
+ if toggle & T_NewScene:
+ clearScene()
+ if 0: # how to switch to the new scene?? (migius)
+ new_scn = bpy.data.scenes.new(shortName[-20:])
+ #new_scn.layers = (1<<20) -1
+ new_scn_name = new_scn.name
+ bpy.data.screens.scene = new_scn
+ #print("newScene: %s" % (new_scn))
+ sections = readDxfFile(fileName)
+ print("Building geometry")
+ buildGeometry(sections['ENTITIES'].data)
+ print("Done")
+ return
+ print("Error: Not a dxf file: " + filepath)
+ return
+
+#
+# User interface
+#
+
+DEBUG= False
+from bpy.props import *
+
+def tripleList(list1):
+ list3 = []
+ for elt in list1:
+ list3.append((elt,elt,elt))
+ return list3
+
+class IMPORT_OT_autocad_dxf(bpy.types.Operator):
+ '''Import from DXF file format (.dxf)'''
+ bl_idname = "import_scene.autocad_dxf"
+ bl_description = 'Import from DXF file format (.dxf)'
+ bl_label = "Import DXF" +' v.'+ __version__
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+
+ filepath = StringProperty(name="File Path", description="Filepath used for importing the DXF file", maxlen= 1024, default= "", subtype='FILE_PATH')
+
+ new_scene = BoolProperty(name="Replace scene", description="Replace scene", default=toggle&T_NewScene)
+ #new_scene = BoolProperty(name="New scene", description="Create new scene", default=toggle&T_NewScene)
+ curves = BoolProperty(name="Draw curves", description="Draw entities as curves", default=toggle&T_Curves)
+ thic_on = BoolProperty(name="Thic ON", description="Support THICKNESS", default=toggle&T_ThicON)
+
+ merge = BoolProperty(name="Remove doubles", description="Merge coincident vertices", default=toggle&T_Merge)
+ mergeLimit = FloatProperty(name="Limit", description="Merge limit", default = theMergeLimit*1e4,min=1.0, soft_min=1.0, max=100.0, soft_max=100.0)
+
+ draw_one = BoolProperty(name="Merge all", description="Draw all into one mesh-object", default=toggle&T_DrawOne)
+ circleResolution = IntProperty(name="Circle resolution", description="Circle/Arc are aproximated will this factor", default = theCircleRes,
+ min=4, soft_min=4, max=360, soft_max=360)
+ codecs = tripleList(['iso-8859-15', 'utf-8', 'ascii'])
+ codec = EnumProperty(name="Codec", description="Codec", items=codecs, default = 'ascii')
+
+ debug = BoolProperty(name="Debug", description="Unknown DXF-codes generate errors", default=toggle&T_Debug)
+ verbose = BoolProperty(name="Verbose", description="Print debug info", default=toggle&T_Verbose)
+
+ ##### DRAW #####
+ def draw(self, context):
+ layout0 = self.layout
+ #layout0.enabled = False
+
+ #col = layout0.column_flow(2,align=True)
+ layout = layout0.box()
+ col = layout.column()
+ #col.prop(self, 'KnotType') waits for more knottypes
+ #col.label(text="import Parameters")
+ #col.prop(self, 'replace')
+ col.prop(self, 'new_scene')
+
+ row = layout.row(align=True)
+ row.prop(self, 'curves')
+ row.prop(self, 'circleResolution')
+
+ row = layout.row(align=True)
+ row.prop(self, 'merge')
+ if self.merge:
+ row.prop(self, 'mergeLimit')
+
+ row = layout.row(align=True)
+ #row.label('na')
+ row.prop(self, 'draw_one')
+ row.prop(self, 'thic_on')
+
+ col = layout.column()
+ col.prop(self, 'codec')
+
+ row = layout.row(align=True)
+ row.prop(self, 'debug')
+ if self.debug:
+ row.prop(self, 'verbose')
+
+ def execute(self, context):
+ global toggle, theMergeLimit, theCodec, theCircleRes
+ O_Merge = T_Merge if self.merge else 0
+ #O_Replace = T_Replace if self.replace else 0
+ O_NewScene = T_NewScene if self.new_scene else 0
+ O_Curves = T_Curves if self.curves else 0
+ O_ThicON = T_ThicON if self.thic_on else 0
+ O_DrawOne = T_DrawOne if self.draw_one else 0
+ O_Debug = T_Debug if self.debug else 0
+ O_Verbose = T_Verbose if self.verbose else 0
+
+ toggle = O_Merge | O_DrawOne | O_NewScene | O_Curves | O_ThicON | O_Debug | O_Verbose
+ theMergeLimit = self.mergeLimit*1e-4
+ theCircleRes = self.circleResolution
+ theCodec = self.codec
+
+ readAndBuildDxfFile(self.filepath)
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+
+def menu_func(self, context):
+ self.layout.operator(IMPORT_OT_autocad_dxf.bl_idname, text="Autocad (.dxf)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
+
+
diff --git a/io_import_scene_lwo.py b/io_import_scene_lwo.py
new file mode 100644
index 00000000..69fa2730
--- /dev/null
+++ b/io_import_scene_lwo.py
@@ -0,0 +1,1256 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info= {
+ "name": "Import LightWave Objects",
+ "author": "Ken Nign (Ken9)",
+ "version": (1, 2),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import > LightWave Object (.lwo)",
+ "description": "Imports a LWO file including any UV, Morph and Color maps. "\
+ "Can convert Skelegons to an Armature.",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/LightWave_Object",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=23623",
+ "category": "Import-Export"}
+
+# Copyright (c) Ken Nign 2010
+# ken@virginpi.com
+#
+# Version 1.2 - Sep 7, 2010
+#
+# Loads a LightWave .lwo object file, including the vertex maps such as
+# UV, Morph, Color and Weight maps.
+#
+# Will optionally create an Armature from an embedded Skelegon rig.
+#
+# Point orders are maintained so that .mdds can exchanged with other
+# 3D programs.
+#
+#
+# Notes:
+# NGons, polygons with more than 4 points are supported, but are
+# added (as triangles) after the vertex maps have been applied. Thus they
+# won't contain all the vertex data that the original ngon had.
+#
+# Blender is limited to only 8 UV Texture and 8 Vertex Color maps,
+# thus only the first 8 of each can be imported.
+#
+# History:
+#
+# 1.2 Added Absolute Morph and CC Edge Weight support.
+# Made edge creation safer.
+# 1.0 First Release
+
+
+import os
+import io
+import time
+import struct
+import chunk
+
+import bpy
+import mathutils
+from mathutils.geometry import tesselate_polygon
+
+
+class _obj_layer(object):
+ __slots__ = (
+ "name",
+ "index",
+ "parent_index",
+ "pivot",
+ "pols",
+ "bones",
+ "bone_names",
+ "bone_rolls",
+ "pnts",
+ "wmaps",
+ "colmaps",
+ "uvmaps",
+ "morphs",
+ "edge_weights",
+ "surf_tags",
+ "has_subds",
+ )
+ def __init__(self):
+ self.name= ""
+ self.index= -1
+ self.parent_index= -1
+ self.pivot= [0, 0, 0]
+ self.pols= []
+ self.bones= []
+ self.bone_names= {}
+ self.bone_rolls= {}
+ self.pnts= []
+ self.wmaps= {}
+ self.colmaps= {}
+ self.uvmaps= {}
+ self.morphs= {}
+ self.edge_weights= {}
+ self.surf_tags= {}
+ self.has_subds= False
+
+
+class _obj_surf(object):
+ __slots__ = (
+ "bl_mat",
+ "name",
+ "source_name",
+ "colr",
+ "diff",
+ "lumi",
+ "spec",
+ "refl",
+ "rblr",
+ "tran",
+ "rind",
+ "tblr",
+ "trnl",
+ "glos",
+ "shrp",
+ "smooth",
+ )
+
+ def __init__(self):
+ self.bl_mat= None
+ self.name= "Default"
+ self.source_name= ""
+ self.colr= [1.0, 1.0, 1.0]
+ self.diff= 1.0 # Diffuse
+ self.lumi= 0.0 # Luminosity
+ self.spec= 0.0 # Specular
+ self.refl= 0.0 # Reflectivity
+ self.rblr= 0.0 # Reflection Bluring
+ self.tran= 0.0 # Transparency (the opposite of Blender's Alpha value)
+ self.rind= 1.0 # RT Transparency IOR
+ self.tblr= 0.0 # Refraction Bluring
+ self.trnl= 0.0 # Translucency
+ self.glos= 0.4 # Glossiness
+ self.shrp= 0.0 # Diffuse Sharpness
+ self.smooth= False # Surface Smoothing
+
+
+def load_lwo(filename,
+ context,
+ ADD_SUBD_MOD=True,
+ LOAD_HIDDEN=False,
+ SKEL_TO_ARM=True):
+ '''Read the LWO file, hand off to version specific function.'''
+ name, ext= os.path.splitext(os.path.basename(filename))
+ file= open(filename, 'rb')
+
+ try:
+ header, chunk_size, chunk_name = struct.unpack(">4s1L4s", file.read(12))
+ except:
+ print("Error parsing file header!")
+ file.close()
+ return
+
+ layers= []
+ surfs= {}
+ tags= []
+ # Gather the object data using the version specific handler.
+ if chunk_name == b'LWO2':
+ read_lwo2(file, filename, layers, surfs, tags, ADD_SUBD_MOD, LOAD_HIDDEN, SKEL_TO_ARM)
+ elif chunk_name == b'LWOB' or chunk_name == b'LWLO':
+ # LWOB and LWLO are the old format, LWLO is a layered object.
+ read_lwob(file, filename, layers, surfs, tags, ADD_SUBD_MOD)
+ else:
+ print("Not a supported file type!")
+ file.close()
+ return
+
+ file.close()
+
+ # With the data gathered, build the object(s).
+ build_objects(layers, surfs, tags, name, ADD_SUBD_MOD, SKEL_TO_ARM)
+
+ layers= None
+ surfs.clear()
+ tags= None
+
+
+def read_lwo2(file, filename, layers, surfs, tags, add_subd_mod, load_hidden, skel_to_arm):
+ '''Read version 2 file, LW 6+.'''
+ handle_layer= True
+ last_pols_count= 0
+ just_read_bones= False
+ print("Importing LWO: " + filename + "\nLWO v2 Format")
+
+ while True:
+ try:
+ rootchunk = chunk.Chunk(file)
+ except EOFError:
+ break
+
+ if rootchunk.chunkname == b'TAGS':
+ read_tags(rootchunk.read(), tags)
+ elif rootchunk.chunkname == b'LAYR':
+ handle_layer= read_layr(rootchunk.read(), layers, load_hidden)
+ elif rootchunk.chunkname == b'PNTS' and handle_layer:
+ read_pnts(rootchunk.read(), layers)
+ elif rootchunk.chunkname == b'VMAP' and handle_layer:
+ vmap_type = rootchunk.read(4)
+
+ if vmap_type == b'WGHT':
+ read_weightmap(rootchunk.read(), layers)
+ elif vmap_type == b'MORF':
+ read_morph(rootchunk.read(), layers, False)
+ elif vmap_type == b'SPOT':
+ read_morph(rootchunk.read(), layers, True)
+ elif vmap_type == b'TXUV':
+ read_uvmap(rootchunk.read(), layers)
+ elif vmap_type == b'RGB ' or vmap_type == b'RGBA':
+ read_colmap(rootchunk.read(), layers)
+ else:
+ rootchunk.skip()
+
+ elif rootchunk.chunkname == b'VMAD' and handle_layer:
+ vmad_type= rootchunk.read(4)
+
+ if vmad_type == b'TXUV':
+ read_uv_vmad(rootchunk.read(), layers, last_pols_count)
+ elif vmad_type == b'RGB ' or vmad_type == b'RGBA':
+ read_color_vmad(rootchunk.read(), layers, last_pols_count)
+ elif vmad_type == b'WGHT':
+ # We only read the Edge Weight map if it's there.
+ read_weight_vmad(rootchunk.read(), layers)
+ else:
+ rootchunk.skip()
+
+ elif rootchunk.chunkname == b'POLS' and handle_layer:
+ face_type = rootchunk.read(4)
+ just_read_bones= False
+ # PTCH is LW's Subpatches, SUBD is CatmullClark.
+ if (face_type == b'FACE' or face_type == b'PTCH' or
+ face_type == b'SUBD') and handle_layer:
+ last_pols_count= read_pols(rootchunk.read(), layers)
+ if face_type != b'FACE':
+ layers[-1].has_subds= True
+ elif face_type == b'BONE' and handle_layer:
+ read_bones(rootchunk.read(), layers)
+ just_read_bones= True
+ else:
+ rootchunk.skip()
+
+ elif rootchunk.chunkname == b'PTAG' and handle_layer:
+ tag_type,= struct.unpack("4s", rootchunk.read(4))
+ if tag_type == b'SURF' and not just_read_bones:
+ # Ignore the surface data if we just read a bones chunk.
+ read_surf_tags(rootchunk.read(), layers, last_pols_count)
+
+ elif skel_to_arm:
+ if tag_type == b'BNUP':
+ read_bone_tags(rootchunk.read(), layers, tags, 'BNUP')
+ elif tag_type == b'BONE':
+ read_bone_tags(rootchunk.read(), layers, tags, 'BONE')
+ else:
+ rootchunk.skip()
+ else:
+ rootchunk.skip()
+ elif rootchunk.chunkname == b'SURF':
+ read_surf(rootchunk.read(), surfs)
+ else:
+ #if handle_layer:
+ #print("Skipping Chunk:", rootchunk.chunkname)
+ rootchunk.skip()
+
+
+def read_lwob(file, filename, layers, surfs, tags, add_subd_mod):
+ '''Read version 1 file, LW < 6.'''
+ last_pols_count= 0
+ print("Importing LWO: " + filename + "\nLWO v1 Format")
+
+ while True:
+ try:
+ rootchunk = chunk.Chunk(file)
+ except EOFError:
+ break
+
+ if rootchunk.chunkname == b'SRFS':
+ read_tags(rootchunk.read(), tags)
+ elif rootchunk.chunkname == b'LAYR':
+ read_layr_5(rootchunk.read(), layers)
+ elif rootchunk.chunkname == b'PNTS':
+ if len(layers) == 0:
+ # LWOB files have no LAYR chunk to set this up.
+ nlayer= _obj_layer()
+ nlayer.name= "Layer 1"
+ layers.append(nlayer)
+ read_pnts(rootchunk.read(), layers)
+ elif rootchunk.chunkname == b'POLS':
+ last_pols_count= read_pols_5(rootchunk.read(), layers)
+ elif rootchunk.chunkname == b'PCHS':
+ last_pols_count= read_pols_5(rootchunk.read(), layers)
+ layers[-1].has_subds= True
+ elif rootchunk.chunkname == b'PTAG':
+ tag_type,= struct.unpack("4s", rootchunk.read(4))
+ if tag_type == b'SURF':
+ read_surf_tags_5(rootchunk.read(), layers, last_pols_count)
+ else:
+ rootchunk.skip()
+ elif rootchunk.chunkname == b'SURF':
+ read_surf_5(rootchunk.read(), surfs)
+ else:
+ # For Debugging \/.
+ #if handle_layer:
+ #print("Skipping Chunk: ", rootchunk.chunkname)
+ rootchunk.skip()
+
+
+def read_lwostring(raw_name):
+ '''Parse a zero-padded string.'''
+
+ i = raw_name.find(b'\0')
+ name_len = i + 1
+ if name_len % 2 == 1: # Test for oddness.
+ name_len += 1
+
+ if i > 0:
+ # Some plugins put non-text strings in the tags chunk.
+ name = raw_name[0:i].decode("utf-8", "ignore")
+ else:
+ name = ""
+
+ return name, name_len
+
+
+def read_vx(pointdata):
+ '''Read a variable-length index.'''
+ if pointdata[0] != 255:
+ index= pointdata[0]*256 + pointdata[1]
+ size= 2
+ else:
+ index= pointdata[1]*65536 + pointdata[2]*256 + pointdata[3]
+ size= 4
+
+ return index, size
+
+
+def read_tags(tag_bytes, object_tags):
+ '''Read the object's Tags chunk.'''
+ offset= 0
+ chunk_len= len(tag_bytes)
+
+ while offset < chunk_len:
+ tag, tag_len= read_lwostring(tag_bytes[offset:])
+ offset+= tag_len
+ object_tags.append(tag)
+
+
+def read_layr(layr_bytes, object_layers, load_hidden):
+ '''Read the object's layer data.'''
+ new_layr= _obj_layer()
+ new_layr.index, flags= struct.unpack(">HH", layr_bytes[0:4])
+
+ if flags > 0 and not load_hidden:
+ return False
+
+ print("Reading Object Layer")
+ offset= 4
+ pivot= struct.unpack(">fff", layr_bytes[offset:offset+12])
+ # Swap Y and Z to match Blender's pitch.
+ new_layr.pivot= [pivot[0], pivot[2], pivot[1]]
+ offset+= 12
+ layr_name, name_len = read_lwostring(layr_bytes[offset:])
+ offset+= name_len
+
+ if layr_name:
+ new_layr.name= layr_name
+ else:
+ new_layr.name= "Layer %d" % (new_layr.index + 1)
+
+ if len(layr_bytes) == offset+2:
+ new_layr.parent_index,= struct.unpack(">h", layr_bytes[offset:offset+2])
+
+ object_layers.append(new_layr)
+ return True
+
+
+def read_layr_5(layr_bytes, object_layers):
+ '''Read the object's layer data.'''
+ # XXX: Need to check what these two exactly mean for a LWOB/LWLO file.
+ new_layr= _obj_layer()
+ new_layr.index, flags= struct.unpack(">HH", layr_bytes[0:4])
+
+ print("Reading Object Layer")
+ offset= 4
+ layr_name, name_len = read_lwostring(layr_bytes[offset:])
+ offset+= name_len
+
+ if name_len > 2 and layr_name != 'noname':
+ new_layr.name= layr_name
+ else:
+ new_layr.name= "Layer %d" % new_layr.index
+
+ object_layers.append(new_layr)
+
+
+def read_pnts(pnt_bytes, object_layers):
+ '''Read the layer's points.'''
+ print("\tReading Layer ("+object_layers[-1].name+") Points")
+ offset= 0
+ chunk_len= len(pnt_bytes)
+
+ while offset < chunk_len:
+ pnts= struct.unpack(">fff", pnt_bytes[offset:offset+12])
+ offset+= 12
+ # Re-order the points so that the mesh has the right pitch,
+ # the pivot already has the correct order.
+ pnts= [pnts[0] - object_layers[-1].pivot[0],\
+ pnts[2] - object_layers[-1].pivot[1],\
+ pnts[1] - object_layers[-1].pivot[2]]
+ object_layers[-1].pnts.append(pnts)
+
+
+def read_weightmap(weight_bytes, object_layers):
+ '''Read a weight map's values.'''
+ chunk_len= len(weight_bytes)
+ offset= 2
+ name, name_len= read_lwostring(weight_bytes[offset:])
+ offset+= name_len
+ weights= []
+
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(weight_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ value,= struct.unpack(">f", weight_bytes[offset:offset+4])
+ offset+= 4
+ weights.append([pnt_id, value])
+
+ object_layers[-1].wmaps[name]= weights
+
+
+def read_morph(morph_bytes, object_layers, is_abs):
+ '''Read an endomorph's relative or absolute displacement values.'''
+ chunk_len= len(morph_bytes)
+ offset= 2
+ name, name_len= read_lwostring(morph_bytes[offset:])
+ offset+= name_len
+ deltas= []
+
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(morph_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ pos= struct.unpack(">fff", morph_bytes[offset:offset+12])
+ offset+= 12
+ pnt= object_layers[-1].pnts[pnt_id]
+
+ if is_abs:
+ deltas.append([pnt_id, pos[0], pos[2], pos[1]])
+ else:
+ # Swap the Y and Z to match Blender's pitch.
+ deltas.append([pnt_id, pnt[0]+pos[0], pnt[1]+pos[2], pnt[2]+pos[1]])
+
+ object_layers[-1].morphs[name]= deltas
+
+
+def read_colmap(col_bytes, object_layers):
+ '''Read the RGB or RGBA color map.'''
+ chunk_len= len(col_bytes)
+ dia,= struct.unpack(">H", col_bytes[0:2])
+ offset= 2
+ name, name_len= read_lwostring(col_bytes[offset:])
+ offset+= name_len
+ colors= {}
+
+ if dia == 3:
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(col_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ col= struct.unpack(">fff", col_bytes[offset:offset+12])
+ offset+= 12
+ colors[pnt_id]= (col[0], col[1], col[2])
+ elif dia == 4:
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(col_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ col= struct.unpack(">ffff", col_bytes[offset:offset+16])
+ offset+= 16
+ colors[pnt_id]= (col[0], col[1], col[2])
+
+ if name in object_layers[-1].colmaps:
+ if "PointMap" in object_layers[-1].colmaps[name]:
+ object_layers[-1].colmaps[name]["PointMap"].update(colors)
+ else:
+ object_layers[-1].colmaps[name]["PointMap"]= colors
+ else:
+ object_layers[-1].colmaps[name]= dict(PointMap=colors)
+
+
+def read_color_vmad(col_bytes, object_layers, last_pols_count):
+ '''Read the Discontinous (per-polygon) RGB values.'''
+ chunk_len= len(col_bytes)
+ dia,= struct.unpack(">H", col_bytes[0:2])
+ offset= 2
+ name, name_len= read_lwostring(col_bytes[offset:])
+ offset+= name_len
+ colors= {}
+ abs_pid= len(object_layers[-1].pols) - last_pols_count
+
+ if dia == 3:
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(col_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ pol_id, pol_id_len= read_vx(col_bytes[offset:offset+4])
+ offset+= pol_id_len
+
+ # The PolyID in a VMAD can be relative, this offsets it.
+ pol_id+= abs_pid
+ col= struct.unpack(">fff", col_bytes[offset:offset+12])
+ offset+= 12
+ if pol_id in colors:
+ colors[pol_id][pnt_id]= (col[0], col[1], col[2])
+ else:
+ colors[pol_id]= dict({pnt_id: (col[0], col[1], col[2])})
+ elif dia == 4:
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(col_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ pol_id, pol_id_len= read_vx(col_bytes[offset:offset+4])
+ offset+= pol_id_len
+
+ pol_id+= abs_pid
+ col= struct.unpack(">ffff", col_bytes[offset:offset+16])
+ offset+= 16
+ if pol_id in colors:
+ colors[pol_id][pnt_id]= (col[0], col[1], col[2])
+ else:
+ colors[pol_id]= dict({pnt_id: (col[0], col[1], col[2])})
+
+ if name in object_layers[-1].colmaps:
+ if "FaceMap" in object_layers[-1].colmaps[name]:
+ object_layers[-1].colmaps[name]["FaceMap"].update(colors)
+ else:
+ object_layers[-1].colmaps[name]["FaceMap"]= colors
+ else:
+ object_layers[-1].colmaps[name]= dict(FaceMap=colors)
+
+
+def read_uvmap(uv_bytes, object_layers):
+ '''Read the simple UV coord values.'''
+ chunk_len= len(uv_bytes)
+ offset= 2
+ name, name_len= read_lwostring(uv_bytes[offset:])
+ offset+= name_len
+ uv_coords= {}
+
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(uv_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ pos= struct.unpack(">ff", uv_bytes[offset:offset+8])
+ offset+= 8
+ uv_coords[pnt_id]= (pos[0], pos[1])
+
+ if name in object_layers[-1].uvmaps:
+ if "PointMap" in object_layers[-1].uvmaps[name]:
+ object_layers[-1].uvmaps[name]["PointMap"].update(uv_coords)
+ else:
+ object_layers[-1].uvmaps[name]["PointMap"]= uv_coords
+ else:
+ object_layers[-1].uvmaps[name]= dict(PointMap=uv_coords)
+
+
+def read_uv_vmad(uv_bytes, object_layers, last_pols_count):
+ '''Read the Discontinous (per-polygon) uv values.'''
+ chunk_len= len(uv_bytes)
+ offset= 2
+ name, name_len= read_lwostring(uv_bytes[offset:])
+ offset+= name_len
+ uv_coords= {}
+ abs_pid= len(object_layers[-1].pols) - last_pols_count
+
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(uv_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ pol_id, pol_id_len= read_vx(uv_bytes[offset:offset+4])
+ offset+= pol_id_len
+
+ pol_id+= abs_pid
+ pos= struct.unpack(">ff", uv_bytes[offset:offset+8])
+ offset+= 8
+ if pol_id in uv_coords:
+ uv_coords[pol_id][pnt_id]= (pos[0], pos[1])
+ else:
+ uv_coords[pol_id]= dict({pnt_id: (pos[0], pos[1])})
+
+ if name in object_layers[-1].uvmaps:
+ if "FaceMap" in object_layers[-1].uvmaps[name]:
+ object_layers[-1].uvmaps[name]["FaceMap"].update(uv_coords)
+ else:
+ object_layers[-1].uvmaps[name]["FaceMap"]= uv_coords
+ else:
+ object_layers[-1].uvmaps[name]= dict(FaceMap=uv_coords)
+
+
+def read_weight_vmad(ew_bytes, object_layers):
+ '''Read the VMAD Weight values.'''
+ chunk_len= len(ew_bytes)
+ offset= 2
+ name, name_len= read_lwostring(ew_bytes[offset:])
+ if name != "Edge Weight":
+ return # We just want the Catmull-Clark edge weights
+
+ offset+= name_len
+ prev_pol= -1
+ prev_pnt= -1
+ prev_weight= 0.0
+ first_pnt= -1
+ poly_pnts= 0
+ while offset < chunk_len:
+ pnt_id, pnt_id_len= read_vx(ew_bytes[offset:offset+4])
+ offset+= pnt_id_len
+ pol_id, pol_id_len= read_vx(ew_bytes[offset:offset+4])
+ offset+= pol_id_len
+
+ weight,= struct.unpack(">f", ew_bytes[offset:offset+4])
+ offset+= 4
+ if prev_pol == pol_id:
+ # Points on the same poly should define an edge.
+ object_layers[-1].edge_weights["{0} {1}".format(prev_pnt, pnt_id)]= weight
+ poly_pnts += 1
+ else:
+ if poly_pnts > 2:
+ # Make an edge from the first and last points.
+ object_layers[-1].edge_weights["{0} {1}".format(first_pnt, prev_pnt)]= prev_weight
+ first_pnt= pnt_id
+ prev_pol= pol_id
+ poly_pnts= 1
+
+ prev_pnt= pnt_id
+ prev_weight= weight
+
+ if poly_pnts > 2:
+ object_layers[-1].edge_weights["{0} {1}".format(first_pnt, prev_pnt)]= prev_weight
+
+
+def read_pols(pol_bytes, object_layers):
+ '''Read the layer's polygons, each one is just a list of point indexes.'''
+ print("\tReading Layer ("+object_layers[-1].name+") Polygons")
+ offset= 0
+ pols_count = len(pol_bytes)
+ old_pols_count= len(object_layers[-1].pols)
+
+ while offset < pols_count:
+ pnts_count,= struct.unpack(">H", pol_bytes[offset:offset+2])
+ offset+= 2
+ all_face_pnts= []
+ for j in range(pnts_count):
+ face_pnt, data_size= read_vx(pol_bytes[offset:offset+4])
+ offset+= data_size
+ all_face_pnts.append(face_pnt)
+
+ object_layers[-1].pols.append(all_face_pnts)
+
+ return len(object_layers[-1].pols) - old_pols_count
+
+
+def read_pols_5(pol_bytes, object_layers):
+ '''
+ Read the polygons, each one is just a list of point indexes.
+ But it also includes the surface index.
+ '''
+ print("\tReading Layer ("+object_layers[-1].name+") Polygons")
+ offset= 0
+ chunk_len= len(pol_bytes)
+ old_pols_count= len(object_layers[-1].pols)
+ poly= 0
+
+ while offset < chunk_len:
+ pnts_count,= struct.unpack(">H", pol_bytes[offset:offset+2])
+ offset+= 2
+ all_face_pnts= []
+ for j in range(pnts_count):
+ face_pnt,= struct.unpack(">H", pol_bytes[offset:offset+2])
+ offset+= 2
+ all_face_pnts.append(face_pnt)
+
+ object_layers[-1].pols.append(all_face_pnts)
+ sid,= struct.unpack(">h", pol_bytes[offset:offset+2])
+ offset+= 2
+ sid= abs(sid) - 1
+ if sid not in object_layers[-1].surf_tags:
+ object_layers[-1].surf_tags[sid]= []
+ object_layers[-1].surf_tags[sid].append(poly)
+ poly+= 1
+
+ return len(object_layers[-1].pols) - old_pols_count
+
+
+def read_bones(bone_bytes, object_layers):
+ '''Read the layer's skelegons.'''
+ print("\tReading Layer ("+object_layers[-1].name+") Bones")
+ offset= 0
+ bones_count = len(bone_bytes)
+
+ while offset < bones_count:
+ pnts_count,= struct.unpack(">H", bone_bytes[offset:offset+2])
+ offset+= 2
+ all_bone_pnts= []
+ for j in range(pnts_count):
+ bone_pnt, data_size= read_vx(bone_bytes[offset:offset+4])
+ offset+= data_size
+ all_bone_pnts.append(bone_pnt)
+
+ object_layers[-1].bones.append(all_bone_pnts)
+
+
+def read_bone_tags(tag_bytes, object_layers, object_tags, type):
+ '''Read the bone name or roll tags.'''
+ offset= 0
+ chunk_len= len(tag_bytes)
+
+ if type == 'BONE':
+ bone_dict= object_layers[-1].bone_names
+ elif type == 'BNUP':
+ bone_dict= object_layers[-1].bone_rolls
+ else:
+ return
+
+ while offset < chunk_len:
+ pid, pid_len= read_vx(tag_bytes[offset:offset+4])
+ offset+= pid_len
+ tid,= struct.unpack(">H", tag_bytes[offset:offset+2])
+ offset+= 2
+ bone_dict[pid]= object_tags[tid]
+
+
+def read_surf_tags(tag_bytes, object_layers, last_pols_count):
+ '''Read the list of PolyIDs and tag indexes.'''
+ print("\tReading Layer ("+object_layers[-1].name+") Surface Assignments")
+ offset= 0
+ chunk_len= len(tag_bytes)
+
+ # Read in the PolyID/Surface Index pairs.
+ abs_pid= len(object_layers[-1].pols) - last_pols_count
+ while offset < chunk_len:
+ pid, pid_len= read_vx(tag_bytes[offset:offset+4])
+ offset+= pid_len
+ sid,= struct.unpack(">H", tag_bytes[offset:offset+2])
+ offset+=2
+ if sid not in object_layers[-1].surf_tags:
+ object_layers[-1].surf_tags[sid]= []
+ object_layers[-1].surf_tags[sid].append(pid + abs_pid)
+
+
+def read_surf(surf_bytes, object_surfs):
+ '''Read the object's surface data.'''
+ if len(object_surfs) == 0:
+ print("Reading Object Surfaces")
+
+ surf= _obj_surf()
+ name, name_len= read_lwostring(surf_bytes)
+ if len(name) != 0:
+ surf.name = name
+
+ # We have to read this, but we won't use it...yet.
+ s_name, s_name_len= read_lwostring(surf_bytes[name_len:])
+ offset= name_len+s_name_len
+ block_size= len(surf_bytes)
+ while offset < block_size:
+ subchunk_name,= struct.unpack("4s", surf_bytes[offset:offset+4])
+ offset+= 4
+ subchunk_len,= struct.unpack(">H", surf_bytes[offset:offset+2])
+ offset+= 2
+
+ # Now test which subchunk it is.
+ if subchunk_name == b'COLR':
+ surf.colr= struct.unpack(">fff", surf_bytes[offset:offset+12])
+ # Don't bother with any envelopes for now.
+
+ elif subchunk_name == b'DIFF':
+ surf.diff,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'LUMI':
+ surf.lumi,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'SPEC':
+ surf.spec,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'REFL':
+ surf.refl,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'RBLR':
+ surf.rblr,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'TRAN':
+ surf.tran,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'RIND':
+ surf.rind,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'TBLR':
+ surf.tblr,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'TRNL':
+ surf.trnl,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'GLOS':
+ surf.glos,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'SHRP':
+ surf.shrp,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'SMAN':
+ s_angle,= struct.unpack(">f", surf_bytes[offset:offset+4])
+ if s_angle > 0.0:
+ surf.smooth = True
+
+ offset+= subchunk_len
+
+ object_surfs[surf.name]= surf
+
+
+def read_surf_5(surf_bytes, object_surfs):
+ '''Read the object's surface data.'''
+ if len(object_surfs) == 0:
+ print("Reading Object Surfaces")
+
+ surf= _obj_surf()
+ name, name_len= read_lwostring(surf_bytes)
+ if len(name) != 0:
+ surf.name = name
+
+ offset= name_len
+ chunk_len= len(surf_bytes)
+ while offset < chunk_len:
+ subchunk_name,= struct.unpack("4s", surf_bytes[offset:offset+4])
+ offset+= 4
+ subchunk_len,= struct.unpack(">H", surf_bytes[offset:offset+2])
+ offset+= 2
+
+ # Now test which subchunk it is.
+ if subchunk_name == b'COLR':
+ color= struct.unpack(">BBBB", surf_bytes[offset:offset+4])
+ surf.colr= [color[0] / 255.0, color[1] / 255.0, color[2] / 255.0]
+
+ elif subchunk_name == b'DIFF':
+ surf.diff,= struct.unpack(">h", surf_bytes[offset:offset+2])
+ surf.diff/= 256.0 # Yes, 256 not 255.
+
+ elif subchunk_name == b'LUMI':
+ surf.lumi,= struct.unpack(">h", surf_bytes[offset:offset+2])
+ surf.lumi/= 256.0
+
+ elif subchunk_name == b'SPEC':
+ surf.spec,= struct.unpack(">h", surf_bytes[offset:offset+2])
+ surf.spec/= 256.0
+
+ elif subchunk_name == b'REFL':
+ surf.refl,= struct.unpack(">h", surf_bytes[offset:offset+2])
+ surf.refl/= 256.0
+
+ elif subchunk_name == b'TRAN':
+ surf.tran,= struct.unpack(">h", surf_bytes[offset:offset+2])
+ surf.tran/= 256.0
+
+ elif subchunk_name == b'RIND':
+ surf.rind,= struct.unpack(">f", surf_bytes[offset:offset+4])
+
+ elif subchunk_name == b'GLOS':
+ surf.glos,= struct.unpack(">h", surf_bytes[offset:offset+2])
+
+ elif subchunk_name == b'SMAN':
+ s_angle,= struct.unpack(">f", surf_bytes[offset:offset+4])
+ if s_angle > 0.0:
+ surf.smooth = True
+
+ offset+= subchunk_len
+
+ object_surfs[surf.name]= surf
+
+
+def create_mappack(data, map_name, map_type):
+ '''Match the map data to faces.'''
+ pack= {}
+
+ def color_pointmap(map):
+ for fi in range(len(data.pols)):
+ if fi not in pack:
+ pack[fi]= []
+ for pnt in data.pols[fi]:
+ if pnt in map:
+ pack[fi].append(map[pnt])
+ else:
+ pack[fi].append((1.0, 1.0, 1.0))
+
+ def color_facemap(map):
+ for fi in range(len(data.pols)):
+ if fi not in pack:
+ pack[fi]= []
+ for p in data.pols[fi]:
+ pack[fi].append((1.0, 1.0, 1.0))
+ if fi in map:
+ for po in range(len(data.pols[fi])):
+ if data.pols[fi][po] in map[fi]:
+ pack[fi].insert(po, map[fi][data.pols[fi][po]])
+ del pack[fi][po+1]
+
+ def uv_pointmap(map):
+ for fi in range(len(data.pols)):
+ if fi not in pack:
+ pack[fi]= []
+ for p in data.pols[fi]:
+ pack[fi].append((-0.1,-0.1))
+ for po in range(len(data.pols[fi])):
+ pnt_id= data.pols[fi][po]
+ if pnt_id in map:
+ pack[fi].insert(po, map[pnt_id])
+ del pack[fi][po+1]
+
+ def uv_facemap(map):
+ for fi in range(len(data.pols)):
+ if fi not in pack:
+ pack[fi]= []
+ for p in data.pols[fi]:
+ pack[fi].append((-0.1,-0.1))
+ if fi in map:
+ for po in range(len(data.pols[fi])):
+ pnt_id= data.pols[fi][po]
+ if pnt_id in map[fi]:
+ pack[fi].insert(po, map[fi][pnt_id])
+ del pack[fi][po+1]
+
+ if map_type == "COLOR":
+ # Look at the first map, is it a point or face map
+ if "PointMap" in data.colmaps[map_name]:
+ color_pointmap(data.colmaps[map_name]["PointMap"])
+
+ if "FaceMap" in data.colmaps[map_name]:
+ color_facemap(data.colmaps[map_name]["FaceMap"])
+ elif map_type == "UV":
+ if "PointMap" in data.uvmaps[map_name]:
+ uv_pointmap(data.uvmaps[map_name]["PointMap"])
+
+ if "FaceMap" in data.uvmaps[map_name]:
+ uv_facemap(data.uvmaps[map_name]["FaceMap"])
+
+ return pack
+
+
+def build_armature(layer_data, bones):
+ '''Build an armature from the skelegon data in the mesh.'''
+ print("Building Armature")
+
+ # New Armatures include a default bone, remove it.
+ bones.remove(bones[0])
+
+ # Now start adding the bones at the point locations.
+ prev_bone= None
+ for skb_idx in range(len(layer_data.bones)):
+ if skb_idx in layer_data.bone_names:
+ nb= bones.new(layer_data.bone_names[skb_idx])
+ else:
+ nb= bones.new("Bone")
+
+ nb.head= layer_data.pnts[layer_data.bones[skb_idx][0]]
+ nb.tail= layer_data.pnts[layer_data.bones[skb_idx][1]]
+
+ if skb_idx in layer_data.bone_rolls:
+ xyz= layer_data.bone_rolls[skb_idx].split(' ')
+ vec= mathutils.Vector((float(xyz[0]), float(xyz[1]), float(xyz[2])))
+ quat= vec.to_track_quat('Y', 'Z')
+ nb.roll= max(quat.to_euler('YZX'))
+ if nb.roll == 0.0:
+ nb.roll= min(quat.to_euler('YZX')) * -1
+ # YZX order seems to produce the correct roll value.
+ else:
+ nb.roll= 0.0
+
+ if prev_bone != None:
+ if nb.head == prev_bone.tail:
+ nb.parent= prev_bone
+
+ nb.use_connect= True
+ prev_bone= nb
+
+
+def build_objects(object_layers, object_surfs, object_tags, object_name, add_subd_mod, skel_to_arm):
+ '''Using the gathered data, create the objects.'''
+ ob_dict= {} # Used for the parenting setup.
+ print("Adding %d Materials" % len(object_surfs))
+
+ for surf_key in object_surfs:
+ surf_data= object_surfs[surf_key]
+ surf_data.bl_mat= bpy.data.materials.new(surf_data.name)
+ surf_data.bl_mat.diffuse_color= (surf_data.colr[:])
+ surf_data.bl_mat.diffuse_intensity= surf_data.diff
+ surf_data.bl_mat.emit= surf_data.lumi
+ surf_data.bl_mat.specular_intensity= surf_data.spec
+ if surf_data.refl != 0.0:
+ surf_data.bl_mat.raytrace_mirror.use= True
+ surf_data.bl_mat.raytrace_mirror.reflect_factor= surf_data.refl
+ surf_data.bl_mat.raytrace_mirror.gloss_factor= 1.0-surf_data.rblr
+ if surf_data.tran != 0.0:
+ surf_data.bl_mat.use_transparency= True
+ surf_data.bl_mat.transparency_method= 'RAYTRACE'
+ surf_data.bl_mat.alpha= 1.0 - surf_data.tran
+ surf_data.bl_mat.raytrace_transparency.ior= surf_data.rind
+ surf_data.bl_mat.raytrace_transparency.gloss_factor= 1.0 - surf_data.tblr
+ surf_data.bl_mat.translucency= surf_data.trnl
+ surf_data.bl_mat.specular_hardness= int(4*((10*surf_data.glos)*(10*surf_data.glos)))+4
+ # The Gloss is as close as possible given the differences.
+
+ # Single layer objects use the object file's name instead.
+ if len(object_layers) and object_layers[-1].name == 'Layer 1':
+ object_layers[-1].name= object_name
+ print("Building '%s' Object" % object_name)
+ else:
+ print("Building %d Objects" % len(object_layers))
+
+ # Before adding any meshes or armatures go into Object mode.
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ for layer_data in object_layers:
+ me= bpy.data.meshes.new(layer_data.name)
+ me.vertices.add(len(layer_data.pnts))
+ me.faces.add(len(layer_data.pols))
+ # for vi in range(len(layer_data.pnts)):
+ # me.vertices[vi].co= layer_data.pnts[vi]
+
+ # faster, would be faster again to use an array
+ me.vertices.foreach_set("co", [axis for co in layer_data.pnts for axis in co])
+
+ ngons= {} # To keep the FaceIdx consistant, handle NGons later.
+ edges= [] # Holds the FaceIdx of the 2-point polys.
+ for fi, fpol in enumerate(layer_data.pols):
+ fpol.reverse() # Reversing gives correct normal directions
+ # PointID 0 in the last element causes Blender to think it's un-used.
+ if fpol[-1] == 0:
+ fpol.insert(0, fpol[-1])
+ del fpol[-1]
+
+ vlen= len(fpol)
+ if vlen == 3 or vlen == 4:
+ for i in range(vlen):
+ me.faces[fi].vertices_raw[i]= fpol[i]
+ elif vlen == 2:
+ edges.append(fi)
+ elif vlen != 1:
+ ngons[fi]= fpol # Deal with them later
+
+ ob= bpy.data.objects.new(layer_data.name, me)
+ bpy.context.scene.objects.link(ob)
+ ob_dict[layer_data.index]= [ob, layer_data.parent_index]
+
+ # Move the object so the pivot is in the right place.
+ ob.location= layer_data.pivot
+
+ # Create the Material Slots and assign the MatIndex to the correct faces.
+ mat_slot= 0
+ for surf_key in layer_data.surf_tags:
+ if object_tags[surf_key] in object_surfs:
+ me.materials.append(object_surfs[object_tags[surf_key]].bl_mat)
+
+ for fi in layer_data.surf_tags[surf_key]:
+ me.faces[fi].material_index= mat_slot
+ me.faces[fi].use_smooth= object_surfs[object_tags[surf_key]].smooth
+
+ mat_slot+=1
+
+ # Create the Vertex Groups (LW's Weight Maps).
+ if len(layer_data.wmaps) > 0:
+ print("Adding %d Vertex Groups" % len(layer_data.wmaps))
+ for wmap_key in layer_data.wmaps:
+ vgroup= ob.vertex_groups.new()
+ vgroup.name= wmap_key
+ wlist= layer_data.wmaps[wmap_key]
+ for pvp in wlist:
+ vgroup.add((pvp[0], ), pvp[1], 'REPLACE')
+
+ # Create the Shape Keys (LW's Endomorphs).
+ if len(layer_data.morphs) > 0:
+ print("Adding %d Shapes Keys" % len(layer_data.morphs))
+ ob.shape_key_add('Basis') # Got to have a Base Shape.
+ for morph_key in layer_data.morphs:
+ skey= ob.shape_key_add(morph_key)
+ dlist= layer_data.morphs[morph_key]
+ for pdp in dlist:
+ me.shape_keys.keys[skey.name].data[pdp[0]].co= [pdp[1], pdp[2], pdp[3]]
+
+ # Create the Vertex Color maps.
+ if len(layer_data.colmaps) > 0:
+ print("Adding %d Vertex Color Maps" % len(layer_data.colmaps))
+ for cmap_key in layer_data.colmaps:
+ map_pack= create_mappack(layer_data, cmap_key, "COLOR")
+ vcol= me.vertex_colors.new(cmap_key)
+ if not vcol:
+ break
+ for fi in map_pack:
+ if fi > len(vcol.data):
+ continue
+ face= map_pack[fi]
+ colf= vcol.data[fi]
+
+ if len(face) > 2:
+ colf.color1= face[0]
+ colf.color2= face[1]
+ colf.color3= face[2]
+ if len(face) == 4:
+ colf.color4= face[3]
+
+ # Create the UV Maps.
+ if len(layer_data.uvmaps) > 0:
+ print("Adding %d UV Textures" % len(layer_data.uvmaps))
+ for uvmap_key in layer_data.uvmaps:
+ map_pack= create_mappack(layer_data, uvmap_key, "UV")
+ uvm= me.uv_textures.new(uvmap_key)
+ if not uvm:
+ break
+ for fi in map_pack:
+ if fi > len(uvm.data):
+ continue
+ face= map_pack[fi]
+ uvf= uvm.data[fi]
+
+ if len(face) > 2:
+ uvf.uv1= face[0]
+ uvf.uv2= face[1]
+ uvf.uv3= face[2]
+ if len(face) == 4:
+ uvf.uv4= face[3]
+
+ # Now add the NGons.
+ if len(ngons) > 0:
+ for ng_key in ngons:
+ face_offset= len(me.faces)
+ ng= ngons[ng_key]
+ v_locs= []
+ for vi in range(len(ng)):
+ v_locs.append(mathutils.Vector(layer_data.pnts[ngons[ng_key][vi]]))
+ tris= tesselate_polygon([v_locs])
+ me.faces.add(len(tris))
+ for tri in tris:
+ face= me.faces[face_offset]
+ face.vertices_raw[0]= ng[tri[0]]
+ face.vertices_raw[1]= ng[tri[1]]
+ face.vertices_raw[2]= ng[tri[2]]
+ face.material_index= me.faces[ng_key].material_index
+ face.use_smooth= me.faces[ng_key].use_smooth
+ face_offset+= 1
+
+ # FaceIDs are no longer a concern, so now update the mesh.
+ has_edges= len(edges) > 0 or len(layer_data.edge_weights) > 0
+ me.update(calc_edges=has_edges)
+
+ # Add the edges.
+ edge_offset= len(me.edges)
+ me.edges.add(len(edges))
+ for edge_fi in edges:
+ me.edges[edge_offset].vertices[0]= layer_data.pols[edge_fi][0]
+ me.edges[edge_offset].vertices[1]= layer_data.pols[edge_fi][1]
+ edge_offset+= 1
+
+ # Apply the Edge Weighting.
+ if len(layer_data.edge_weights) > 0:
+ for edge in me.edges:
+ edge_sa= "{0} {1}".format(edge.vertices[0], edge.vertices[1])
+ edge_sb= "{0} {1}".format(edge.vertices[1], edge.vertices[0])
+ if edge_sa in layer_data.edge_weights:
+ edge.crease= layer_data.edge_weights[edge_sa]
+ elif edge_sb in layer_data.edge_weights:
+ edge.crease= layer_data.edge_weights[edge_sb]
+
+ # Unfortunately we can't exlude certain faces from the subdivision.
+ if layer_data.has_subds and add_subd_mod:
+ ob.modifiers.new(name="Subsurf", type='SUBSURF')
+
+ # Should we build an armature from the embedded rig?
+ if len(layer_data.bones) > 0 and skel_to_arm:
+ bpy.ops.object.armature_add()
+ arm_object= bpy.context.active_object
+ arm_object.name= "ARM_" + layer_data.name
+ arm_object.data.name= arm_object.name
+ arm_object.location= layer_data.pivot
+ bpy.ops.object.mode_set(mode='EDIT')
+ build_armature(layer_data, arm_object.data.edit_bones)
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Clear out the dictionaries for this layer.
+ layer_data.bone_names.clear()
+ layer_data.bone_rolls.clear()
+ layer_data.wmaps.clear()
+ layer_data.colmaps.clear()
+ layer_data.uvmaps.clear()
+ layer_data.morphs.clear()
+ layer_data.surf_tags.clear()
+
+ # With the objects made, setup the parents and re-adjust the locations.
+ for ob_key in ob_dict:
+ if ob_dict[ob_key][1] != -1 and ob_dict[ob_key][1] in ob_dict:
+ parent_ob = ob_dict[ob_dict[ob_key][1]]
+ ob_dict[ob_key][0].parent= parent_ob[0]
+ ob_dict[ob_key][0].location-= parent_ob[0].location
+
+ bpy.context.scene.update()
+
+ print("Done Importing LWO File")
+
+
+from bpy.props import StringProperty, BoolProperty
+
+
+class IMPORT_OT_lwo(bpy.types.Operator):
+ '''Import LWO Operator.'''
+ bl_idname= "import_scene.lwo"
+ bl_label= "Import LWO"
+ bl_description= "Import a LightWave Object file."
+ bl_options= {'REGISTER', 'UNDO'}
+
+ filepath= StringProperty(name="File Path", description="Filepath used for importing the LWO file", maxlen=1024, default="")
+
+ ADD_SUBD_MOD= BoolProperty(name="Apply SubD Modifier", description="Apply the Subdivision Surface modifier to layers with Subpatches", default=True)
+ LOAD_HIDDEN= BoolProperty(name="Load Hidden Layers", description="Load object layers that have been marked as hidden", default=False)
+ SKEL_TO_ARM= BoolProperty(name="Create Armature", description="Create an armature from an embedded Skelegon rig", default=True)
+
+ def execute(self, context):
+ load_lwo(self.filepath,
+ context,
+ self.ADD_SUBD_MOD,
+ self.LOAD_HIDDEN,
+ self.SKEL_TO_ARM)
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm= context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+
+def menu_func(self, context):
+ self.layout.operator(IMPORT_OT_lwo.bl_idname, text="LightWave Object (.lwo)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_import_scene_mhx.py b/io_import_scene_mhx.py
new file mode 100644
index 00000000..e861fcf8
--- /dev/null
+++ b/io_import_scene_mhx.py
@@ -0,0 +1,2718 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# Project Name: MakeHuman
+# Product Home Page: http://www.makehuman.org/
+# Code Home Page: http://code.google.com/p/makehuman/
+# Authors: Thomas Larsson
+# Script copyright (C) MakeHuman Team 2001-2011
+# Coding Standards: See http://sites.google.com/site/makehumandocs/developers-guide
+
+"""
+Abstract
+MHX (MakeHuman eXchange format) importer for Blender 2.5x.
+Version 1.4.0
+
+This script should be distributed with Blender.
+If not, place it in the .blender/scripts/addons dir
+Activate the script in the "Add-Ons" tab (user preferences).
+Access from the File > Import menu.
+
+Alternatively, run the script in the script editor (Alt-P), and access from the File > Import menu
+"""
+
+bl_info = {
+ 'name': 'Import: MakeHuman (.mhx)',
+ 'author': 'Thomas Larsson',
+ 'version': (1, 4, 0),
+ "blender": (2, 5, 7),
+ "api": 35774,
+ 'location': "File > Import > MakeHuman (.mhx)",
+ 'description': 'Import files in the MakeHuman eXchange format (.mhx)',
+ 'warning': '',
+ 'wiki_url': 'http://sites.google.com/site/makehumandocs/blender-export-and-mhx',
+ 'tracker_url': 'https://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=21872',
+ 'category': 'Import-Export'}
+
+MAJOR_VERSION = 1
+MINOR_VERSION = 4
+SUB_VERSION = 0
+BLENDER_VERSION = (2, 57, 0)
+
+#
+#
+#
+
+import bpy
+import os
+import time
+import mathutils
+from mathutils import Matrix
+#import geometry
+#import string
+
+MHX249 = False
+Blender24 = False
+Blender25 = True
+TexDir = "~/makehuman/exports"
+
+#
+#
+#
+
+theScale = 1.0
+One = 1.0/theScale
+useMesh = 1
+verbosity = 2
+warnedTextureDir = False
+warnedVersion = False
+
+true = True
+false = False
+Epsilon = 1e-6
+nErrors = 0
+theTempDatum = None
+
+todo = []
+
+#
+# toggle flags
+#
+
+T_EnforceVersion = 0x01
+T_Clothes = 0x02
+T_Stretch = 0x04
+T_Bend = 0x08
+
+T_Diamond = 0x10
+T_Replace = 0x20
+T_Face = 0x40
+T_Shape = 0x80
+
+T_Mesh = 0x100
+T_Armature = 0x200
+T_Proxy = 0x400
+T_Cage = 0x800
+
+T_Rigify = 0x1000
+T_Opcns = 0x2000
+T_Symm = 0x4000
+
+toggle = T_EnforceVersion + T_Replace + T_Mesh + T_Armature + T_Face + T_Shape + T_Proxy + T_Clothes
+
+#
+# Blender versions
+#
+
+BLENDER_GRAPHICALL = 0
+BLENDER_256a = 1
+
+BlenderVersions = ['Graphicall', 'Blender256a']
+theBlenderVersion = BLENDER_GRAPHICALL
+
+#
+# setFlagsAndFloats(rigFlags):
+#
+# Global floats
+#fFingerPanel = 0.0
+#fFingerIK = 0.0
+fNoStretch = 0.0
+
+# rigLeg and rigArm flags
+T_Toes = 0x0001
+#T_GoboFoot = 0x0002
+
+#T_InvFoot = 0x0010
+#T_InvFootPT = 0x0020
+#T_InvFootNoPT = 0x0040
+
+#T_FingerPanel = 0x100
+#T_FingerRot = 0x0200
+#T_FingerIK = 0x0400
+
+
+#T_LocalFKIK = 0x8000
+
+#rigLeg = 0
+#rigArm = 0
+
+def setFlagsAndFloats():
+ '''
+ global toggle, rigLeg, rigArm
+
+ (footRig, fingerRig) = rigFlags
+ rigLeg = 0
+ if footRig == 'Reverse foot':
+ rigLeg |= T_InvFoot
+ if toggle & T_PoleTar:
+ rigLeg |= T_InvFootPT
+ else:
+ rigLeg |= T_InvFootNoPT
+ elif footRig == 'Gobo': rigLeg |= T_GoboFoot
+
+ rigArm = 0
+ if fingerRig == 'Panel': rigArm |= T_FingerPanel
+ elif fingerRig == 'Rotation': rigArm |= T_FingerRot
+ elif fingerRig == 'IK': rigArm |= T_FingerIK
+
+ toggle |= T_Panel
+ '''
+ global fNoStretch
+ if toggle&T_Stretch: fNoStretch == 0.0
+ else: fNoStretch = 1.0
+
+ return
+
+
+#
+# Dictionaries
+#
+
+loadedData = {
+ 'NONE' : {},
+
+ 'Object' : {},
+ 'Mesh' : {},
+ 'Armature' : {},
+ 'Lamp' : {},
+ 'Camera' : {},
+ 'Lattice' : {},
+ 'Curve' : {},
+ 'Text' : {},
+
+ 'Material' : {},
+ 'Image' : {},
+ 'MaterialTextureSlot' : {},
+ 'Texture' : {},
+
+ 'Bone' : {},
+ 'BoneGroup' : {},
+ 'Rigify' : {},
+
+ 'Action' : {},
+ 'Group' : {},
+
+ 'MeshTextureFaceLayer' : {},
+ 'MeshColorLayer' : {},
+ 'VertexGroup' : {},
+ 'ShapeKey' : {},
+ 'ParticleSystem' : {},
+
+ 'ObjectConstraints' : {},
+ 'ObjectModifiers' : {},
+ 'MaterialSlot' : {},
+}
+
+Plural = {
+ 'Object' : 'objects',
+ 'Mesh' : 'meshes',
+ 'Lattice' : 'lattices',
+ 'Curve' : 'curves',
+ 'Text' : 'texts',
+ 'Group' : 'groups',
+ 'Empty' : 'empties',
+ 'Armature' : 'armatures',
+ 'Bone' : 'bones',
+ 'BoneGroup' : 'bone_groups',
+ 'Pose' : 'poses',
+ 'PoseBone' : 'pose_bones',
+ 'Material' : 'materials',
+ 'Texture' : 'textures',
+ 'Image' : 'images',
+ 'Camera' : 'cameras',
+ 'Lamp' : 'lamps',
+ 'World' : 'worlds',
+}
+
+#
+# checkBlenderVersion()
+#
+
+def checkBlenderVersion():
+ print("Found Blender", bpy.app.version)
+ (A, B, C) = bpy.app.version
+ (a, b, c) = BLENDER_VERSION
+ if a <= A: return
+ if b <= B: return
+ if c <= C: return
+ msg = (
+"This version of the MHX importer only works with Blender (%d, %d, %d) or later. " % (a, b, c) +
+"Download a more recent Blender from www.blender.org or www.graphicall.org.\n"
+ )
+ raise NameError(msg)
+ return
+
+#
+# readMhxFile(filePath):
+#
+
+def readMhxFile(filePath):
+ global todo, nErrors, theScale, defaultScale, One, toggle
+
+ #checkBlenderVersion()
+
+ defaultScale = theScale
+ One = 1.0/theScale
+
+ fileName = os.path.expanduser(filePath)
+ (shortName, ext) = os.path.splitext(fileName)
+ if ext.lower() != ".mhx":
+ print("Error: Not a mhx file: " + fileName)
+ return
+ print( "Opening MHX file "+ fileName )
+ time1 = time.clock()
+
+ ignore = False
+ stack = []
+ tokens = []
+ key = "toplevel"
+ level = 0
+ nErrors = 0
+ comment = 0
+ nesting = 0
+
+ setFlagsAndFloats()
+
+ file= open(fileName, "rU")
+ print( "Tokenizing" )
+ lineNo = 0
+ for line in file:
+ # print(line)
+ lineSplit= line.split()
+ lineNo += 1
+ if len(lineSplit) == 0:
+ pass
+ elif lineSplit[0][0] == '#':
+ if lineSplit[0] == '#if':
+ if comment == nesting:
+ try:
+ res = eval(lineSplit[1])
+ except:
+ res = False
+ if res:
+ comment += 1
+ nesting += 1
+ elif lineSplit[0] == '#else':
+ if comment == nesting-1:
+ comment += 1
+ elif comment == nesting:
+ comment -= 1
+ elif lineSplit[0] == '#endif':
+ if comment == nesting:
+ comment -= 1
+ nesting -= 1
+ elif comment < nesting:
+ pass
+ elif lineSplit[0] == 'end':
+ try:
+ sub = tokens
+ tokens = stack.pop()
+ if tokens:
+ tokens[-1][2] = sub
+ level -= 1
+ except:
+ print( "Tokenizer error at or before line %d" % lineNo )
+ print( line )
+ dummy = stack.pop()
+ elif lineSplit[-1] == ';':
+ if lineSplit[0] == '\\':
+ key = lineSplit[1]
+ tokens.append([key,lineSplit[2:-1],[]])
+ else:
+ key = lineSplit[0]
+ tokens.append([key,lineSplit[1:-1],[]])
+ else:
+ key = lineSplit[0]
+ tokens.append([key,lineSplit[1:],[]])
+ stack.append(tokens)
+ level += 1
+ tokens = []
+ file.close()
+
+ if level != 0:
+ raise NameError("Tokenizer out of kilter %d" % level)
+ clearScene()
+ print( "Parsing" )
+ parse(tokens)
+
+ for (expr, glbals, lcals) in todo:
+ try:
+ print("Doing %s" % expr)
+ exec(expr, glbals, lcals)
+ except:
+ msg = "Failed: "+expr
+ print( msg )
+ nErrors += 1
+ #raise NameError(msg)
+
+ time2 = time.clock()
+ print("toggle = %x" % toggle)
+ msg = "File %s loaded in %g s" % (fileName, time2-time1)
+ if nErrors:
+ msg += " but there where %d errors. " % (nErrors)
+ print(msg)
+ return
+
+#
+# getObject(name, var, glbals, lcals):
+#
+
+def getObject(name, var, glbals, lcals):
+ try:
+ ob = loadedData['Object'][name]
+ except:
+ if name != "None":
+ pushOnTodoList(None, "ob = loadedData['Object'][name]" % globals(), locals())
+ ob = None
+ return ob
+
+#
+# checkMhxVersion(major, minor):
+#
+
+def checkMhxVersion(major, minor):
+ global warnedVersion
+ print((major,minor), (MAJOR_VERSION, MINOR_VERSION), warnedVersion)
+ if major != MAJOR_VERSION or minor != MINOR_VERSION:
+ if warnedVersion:
+ return
+ else:
+ msg = (
+"Wrong MHX version\n" +
+"Expected MHX %d.%d but the loaded file has version MHX %d.%d\n" % (MAJOR_VERSION, MINOR_VERSION, major, minor) +
+"You can disable this error message by deselecting the Enforce version option when importing. " +
+"Alternatively, you can try to download the most recent nightly build from www.makehuman.org. " +
+"The current version of the import script is located in the importers/mhx/blender25x folder and is called import_scene_mhx.py. " +
+"The version distributed with Blender builds from www.graphicall.org may be out of date.\n"
+)
+ if toggle & T_EnforceVersion:
+ raise NameError(msg)
+ else:
+ print(msg)
+ warnedVersion = True
+ return
+
+#
+# parse(tokens):
+#
+
+ifResult = False
+
+def parse(tokens):
+ global MHX249, ifResult, theScale, defaultScale, One
+
+ for (key, val, sub) in tokens:
+ print("Parse %s" % key)
+ data = None
+ if key == 'MHX':
+ checkMhxVersion(int(val[0]), int(val[1]))
+ elif key == 'MHX249':
+ MHX249 = eval(val[0])
+ print("Blender 2.49 compatibility mode is %s\n" % MHX249)
+ elif MHX249:
+ pass
+ elif key == 'print':
+ msg = concatList(val)
+ print(msg)
+ elif key == 'warn':
+ msg = concatList(val)
+ print(msg)
+ elif key == 'error':
+ msg = concatList(val)
+ raise NameError(msg)
+ elif key == 'NoScale':
+ if eval(val[0]):
+ theScale = 1.0
+ else:
+ theScale = defaultScale
+ One = 1.0/theScale
+ elif key == "Object":
+ parseObject(val, sub)
+ elif key == "Mesh":
+ data = parseMesh(val, sub)
+ elif key == "Armature":
+ data = parseArmature(val, sub)
+ elif key == "Pose":
+ data = parsePose(val, sub)
+ elif key == "Action":
+ data = parseAction(val, sub)
+ elif key == "Material":
+ data = parseMaterial(val, sub)
+ elif key == "Texture":
+ data = parseTexture(val, sub)
+ elif key == "Image":
+ data = parseImage(val, sub)
+ elif key == "Curve":
+ data = parseCurve(val, sub)
+ elif key == "TextCurve":
+ data = parseTextCurve(val, sub)
+ elif key == "Lattice":
+ data = parseLattice(val, sub)
+ elif key == "Group":
+ data = parseGroup(val, sub)
+ elif key == "Lamp":
+ data = parseLamp(val, sub)
+ elif key == "World":
+ data = parseWorld(val, sub)
+ elif key == "Scene":
+ data = parseScene(val, sub)
+ elif key == "DefineProperty":
+ parseDefineProperty(val, sub)
+ elif key == "Process":
+ parseProcess(val, sub)
+ elif key == "PostProcess":
+ postProcess(val)
+ hideLayers(val)
+ elif key == "CorrectRig":
+ correctRig(val)
+ elif key == 'AnimationData':
+ try:
+ ob = loadedData['Object'][val[0]]
+ except:
+ ob = None
+ if ob:
+ bpy.context.scene.objects.active = ob
+ parseAnimationData(ob, val, sub)
+ elif key == 'MaterialAnimationData':
+ try:
+ ob = loadedData['Object'][val[0]]
+ except:
+ ob = None
+ if ob:
+ bpy.context.scene.objects.active = ob
+ mat = ob.data.materials[int(val[2])]
+ print("matanim", ob, mat)
+ parseAnimationData(mat, val, sub)
+ elif key == 'ShapeKeys':
+ try:
+ ob = loadedData['Object'][val[0]]
+ except:
+ raise NameError("ShapeKeys object %s does not exist" % val[0])
+ if ob:
+ bpy.context.scene.objects.active = ob
+ parseShapeKeys(ob, ob.data, val, sub)
+ else:
+ data = parseDefaultType(key, val, sub)
+
+ if data and key != 'Mesh':
+ print( data )
+ return
+
+#
+# parseDefaultType(typ, args, tokens):
+#
+
+def parseDefaultType(typ, args, tokens):
+ global todo
+
+ name = args[0]
+ data = None
+ expr = "bpy.data.%s.new('%s')" % (Plural[typ], name)
+ # print(expr)
+ data = eval(expr)
+ # print(" ok", data)
+
+ bpyType = typ.capitalize()
+ print(bpyType, name, data)
+ loadedData[bpyType][name] = data
+ if data is None:
+ return None
+
+ for (key, val, sub) in tokens:
+ #print("%s %s" % (key, val))
+ defaultKey(key, val, sub, 'data', [], globals(), locals())
+ print("Done ", data)
+ return data
+
+#
+# concatList(elts)
+#
+
+def concatList(elts):
+ string = ""
+ for elt in elts:
+ string += " %s" % elt
+ return string
+
+#
+# parseAction(args, tokens):
+# parseFCurve(fcu, args, tokens):
+# parseKeyFramePoint(pt, args, tokens):
+#
+
+def parseAction(args, tokens):
+ name = args[0]
+ if invalid(args[1]):
+ return
+
+ ob = bpy.context.object
+ bpy.ops.object.mode_set(mode='POSE')
+ if ob.animation_data:
+ ob.animation_data.action = None
+ created = {}
+ for (key, val, sub) in tokens:
+ if key == 'FCurve':
+ prepareActionFCurve(ob, created, val, sub)
+
+ act = ob.animation_data.action
+ loadedData['Action'][name] = act
+ if act is None:
+ print("Ignoring action %s" % name)
+ return act
+ act.name = name
+ print("Action", name, act, ob)
+
+ for (key, val, sub) in tokens:
+ if key == 'FCurve':
+ fcu = parseActionFCurve(act, ob, val, sub)
+ else:
+ defaultKey(key, val, sub, 'act', [], globals(), locals())
+ ob.animation_data.action = None
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return act
+
+def prepareActionFCurve(ob, created, args, tokens):
+ dataPath = args[0]
+ index = args[1]
+ (expr, channel) = channelFromDataPath(dataPath, index)
+ try:
+ if channel in created[expr]:
+ return
+ else:
+ created[expr].append(channel)
+ except:
+ created[expr] = [channel]
+
+ times = []
+ for (key, val, sub) in tokens:
+ if key == 'kp':
+ times.append(int(val[0]))
+
+ try:
+ data = eval(expr)
+ except:
+ print("Ignoring illegal expression: %s" % expr)
+ return
+
+ n = 0
+ for t in times:
+ #bpy.context.scene.current_frame = t
+ bpy.ops.anim.change_frame(frame = t)
+ try:
+ data.keyframe_insert(channel)
+ n += 1
+ except:
+ pass
+ #print("failed", data, expr, channel)
+ if n != len(times):
+ print("Mismatch", n, len(times), expr, channel)
+ return
+
+def channelFromDataPath(dataPath, index):
+ words = dataPath.split(']')
+ if len(words) == 1:
+ # location
+ expr = "ob"
+ channel = dataPath
+ elif len(words) == 2:
+ # pose.bones["tongue"].location
+ expr = "ob.%s]" % (words[0])
+ cwords = words[1].split('.')
+ channel = cwords[1]
+ elif len(words) == 3:
+ # pose.bones["brow.R"]["mad"]
+ expr = "ob.%s]" % (words[0])
+ cwords = words[1].split('"')
+ channel = cwords[1]
+ # print(expr, channel, index)
+ return (expr, channel)
+
+def parseActionFCurve(act, ob, args, tokens):
+ dataPath = args[0]
+ index = args[1]
+ (expr, channel) = channelFromDataPath(dataPath, index)
+ index = int(args[1])
+
+ success = False
+ for fcu in act.fcurves:
+ (expr1, channel1) = channelFromDataPath(fcu.data_path, fcu.array_index)
+ if expr1 == expr and channel1 == channel and fcu.array_index == index:
+ success = True
+ break
+ if not success:
+ return None
+
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'kp':
+ try:
+ pt = fcu.keyframe_points[n]
+ pt.interpolation = 'LINEAR'
+ pt = parseKeyFramePoint(pt, val, sub)
+ n += 1
+ except:
+ pass
+ #print(tokens)
+ #raise NameError("kp", fcu, n, len(fcu.keyframe_points), val)
+ else:
+ defaultKey(key, val, sub, 'fcu', [], globals(), locals())
+ return fcu
+
+def parseKeyFramePoint(pt, args, tokens):
+ pt.co = (float(args[0]), float(args[1]))
+ if len(args) > 2:
+ pt.handle1 = (float(args[2]), float(args[3]))
+ pt.handle2 = (float(args[3]), float(args[5]))
+ return pt
+
+#
+# parseAnimationData(rna, args, tokens):
+# parseDriver(drv, args, tokens):
+# parseDriverVariable(var, args, tokens):
+#
+
+def parseAnimationData(rna, args, tokens):
+ if not eval(args[1]):
+ return
+ print("Parse Animation data")
+ if rna.animation_data is None:
+ rna.animation_data_create()
+ adata = rna.animation_data
+ for (key, val, sub) in tokens:
+ if key == 'FCurve':
+ fcu = parseAnimDataFCurve(adata, rna, val, sub)
+ else:
+ defaultKey(key, val, sub, 'adata', [], globals(), locals())
+ print(adata)
+ return adata
+
+def parseAnimDataFCurve(adata, rna, args, tokens):
+ global theBlenderVersion
+ if invalid(args[2]):
+ return
+ dataPath = args[0]
+ index = int(args[1])
+ n = 1
+ for (key, val, sub) in tokens:
+ if key == 'Driver':
+ fcu = parseDriver(adata, dataPath, index, rna, val, sub)
+ fmod = fcu.modifiers[0]
+ fcu.modifiers.remove(fmod)
+ elif key == 'FModifier':
+ parseFModifier(fcu, val, sub)
+ elif key == 'kp':
+ if theBlenderVersion >= BLENDER_256a:
+ pt = fcu.keyframe_points.add(n, 0)
+ else:
+ pt = fcu.keyframe_points.insert(n, 0)
+ pt.interpolation = 'LINEAR'
+ pt = parseKeyFramePoint(pt, val, sub)
+ n += 1
+ else:
+ defaultKey(key, val, sub, 'fcu', [], globals(), locals())
+ return fcu
+
+"""
+ fcurve = con.driver_add("influence", 0)
+ driver = fcurve.driver
+ driver.type = 'AVERAGE'
+"""
+def parseDriver(adata, dataPath, index, rna, args, tokens):
+ if dataPath[-1] == ']':
+ words = dataPath.split(']')
+ expr = "rna." + words[0] + ']'
+ pwords = words[1].split('"')
+ prop = pwords[1]
+ #print("prop", expr, prop)
+ bone = eval(expr)
+ return None
+ else:
+ words = dataPath.split('.')
+ channel = words[-1]
+ expr = "rna"
+ for n in range(len(words)-1):
+ expr += "." + words[n]
+ expr += ".driver_add('%s', index)" % channel
+
+ #print("expr", rna, expr)
+ fcu = eval(expr)
+ drv = fcu.driver
+ #print(" Driver type", drv, args[0])
+ drv.type = args[0]
+ #print(" ->", drv.type)
+ for (key, val, sub) in tokens:
+ if key == 'DriverVariable':
+ var = parseDriverVariable(drv, rna, val, sub)
+ else:
+ defaultKey(key, val, sub, 'drv', [], globals(), locals())
+ return fcu
+
+def parseDriverVariable(drv, rna, args, tokens):
+ var = drv.variables.new()
+ var.name = args[0]
+ #print(" Var type", var, args[1])
+ var.type = args[1]
+ #print(" ->", var.type)
+ nTarget = 0
+ for (key, val, sub) in tokens:
+ if key == 'Target':
+ parseDriverTarget(var, nTarget, rna, val, sub)
+ nTarget += 1
+ else:
+ defaultKey(key, val, sub, 'var', [], globals(), locals())
+ return var
+
+def parseFModifier(fcu, args, tokens):
+ fmod = fcu.modifiers.new(args[0])
+ #fmod = fcu.modifiers[0]
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, 'fmod', [], globals(), locals())
+ return fmod
+
+"""
+ var = driver.variables.new()
+ var.name = target_bone
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = obj
+ var.targets[0].rna_path = driver_path
+"""
+def parseDriverTarget(var, nTarget, rna, args, tokens):
+ targ = var.targets[nTarget]
+ ob = loadedData['Object'][args[0]]
+ #print(" targ id", targ, ob)
+ targ.id = ob
+ #print(" ->", targ.id)
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, 'targ', [], globals(), locals())
+ return targ
+
+
+#
+# parseMaterial(args, ext, tokens):
+# parseMTex(mat, args, tokens):
+# parseTexture(args, tokens):
+#
+
+def parseMaterial(args, tokens):
+ global todo
+ name = args[0]
+ mat = bpy.data.materials.new(name)
+ if mat is None:
+ return None
+ loadedData['Material'][name] = mat
+ for (key, val, sub) in tokens:
+ if key == 'MTex':
+ parseMTex(mat, val, sub)
+ elif key == 'Ramp':
+ parseRamp(mat, val, sub)
+ elif key == 'RaytraceTransparency':
+ parseDefault(mat.raytrace_transparency, sub, {}, [])
+ elif key == 'Halo':
+ parseDefault(mat.halo, sub, {}, [])
+ elif key == 'SSS':
+ parseDefault(mat.subsurface_scattering, sub, {}, [])
+ elif key == 'Strand':
+ parseDefault(mat.strand, sub, {}, [])
+ elif key == 'NodeTree':
+ mat.use_nodes = True
+ parseNodeTree(mat.node_tree, val, sub)
+ else:
+ exclude = ['specular_intensity', 'tangent_shading']
+ defaultKey(key, val, sub, 'mat', [], globals(), locals())
+
+ return mat
+
+def parseMTex(mat, args, tokens):
+ global todo
+ index = int(args[0])
+ texname = args[1]
+ texco = args[2]
+ mapto = args[3]
+ tex = loadedData['Texture'][texname]
+ mtex = mat.texture_slots.add()
+ mtex.texture_coords = texco
+ mtex.texture = tex
+
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, "mtex", [], globals(), locals())
+
+ return mtex
+
+def parseTexture(args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing texture %s" % args )
+ name = args[0]
+ tex = bpy.data.textures.new(name=name, type=args[1])
+ loadedData['Texture'][name] = tex
+
+ for (key, val, sub) in tokens:
+ if key == 'Image':
+ try:
+ imgName = val[0]
+ img = loadedData['Image'][imgName]
+ tex.image = img
+ except:
+ msg = "Unable to load image '%s'" % val[0]
+ elif key == 'Ramp':
+ parseRamp(tex, val, sub)
+ elif key == 'NodeTree':
+ tex.use_nodes = True
+ parseNodeTree(tex.node_tree, val, sub)
+ else:
+ defaultKey(key, val, sub, "tex", ['use_nodes', 'use_textures', 'contrast'], globals(), locals())
+
+ return tex
+
+def parseRamp(data, args, tokens):
+ nvar = "data.%s" % args[0]
+ use = "data.use_%s = True" % args[0]
+ exec(use)
+ ramp = eval(nvar)
+ elts = ramp.elements
+ n = 0
+ for (key, val, sub) in tokens:
+ # print("Ramp", key, val)
+ if key == 'Element':
+ elts[n].color = eval(val[0])
+ elts[n].position = eval(val[1])
+ n += 1
+ else:
+ defaultKey(key, val, sub, "tex", ['use_nodes', 'use_textures', 'contrast'], globals(), locals())
+
+def parseSSS(mat, args, tokens):
+ sss = mat.subsurface_scattering
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, "sss", [], globals(), locals())
+
+def parseStrand(mat, args, tokens):
+ strand = mat.strand
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, "strand", [], globals(), locals())
+
+#
+# parseNodeTree(tree, args, tokens):
+# parseNode(node, args, tokens):
+# parseSocket(socket, args, tokens):
+#
+
+def parseNodeTree(tree, args, tokens):
+ return
+ print("Tree", tree, args)
+ print(list(tree.nodes))
+ tree.name = args[0]
+ for (key, val, sub) in tokens:
+ if key == 'Node':
+ parseNodes(tree.nodes, val, sub)
+ else:
+ defaultKey(key, val, sub, "tree", [], globals(), locals())
+
+def parseNodes(nodes, args, tokens):
+ print("Nodes", nodes, args)
+ print(list(nodes))
+ node.name = args[0]
+ for (key, val, sub) in tokens:
+ if key == 'Inputs':
+ parseSocket(node.inputs, val, sub)
+ elif key == 'Outputs':
+ parseSocket(node.outputs, val, sub)
+ else:
+ defaultKey(key, val, sub, "node", [], globals(), locals())
+
+def parseNode(node, args, tokens):
+ print("Node", node, args)
+ print(list(node.inputs), list(node.outputs))
+ node.name = args[0]
+ for (key, val, sub) in tokens:
+ if key == 'Inputs':
+ parseSocket(node.inputs, val, sub)
+ elif key == 'Outputs':
+ parseSocket(node.outputs, val, sub)
+ else:
+ defaultKey(key, val, sub, "node", [], globals(), locals())
+
+def parseSocket(socket, args, tokens):
+ print("Socket", socket, args)
+ socket.name = args[0]
+ for (key, val, sub) in tokens:
+ if key == 'Node':
+ parseNode(tree.nodes, val, sub)
+ else:
+ defaultKey(key, val, sub, "tree", [], globals(), locals())
+
+
+
+#
+# doLoadImage(filepath):
+# loadImage(filepath):
+# parseImage(args, tokens):
+#
+
+def doLoadImage(filepath):
+ path1 = os.path.expanduser(filepath)
+ file1 = os.path.realpath(path1)
+ if os.path.isfile(file1):
+ print( "Found file "+file1 )
+ try:
+ img = bpy.data.images.load(file1)
+ return img
+ except:
+ print( "Cannot read image" )
+ return None
+ else:
+ print( "No file "+file1 )
+ return None
+
+
+def loadImage(filepath):
+ global TexDir, warnedTextureDir, loadedData
+
+ texDir = os.path.expanduser(TexDir)
+ path1 = os.path.expanduser(filepath)
+ file1 = os.path.realpath(path1)
+ (path, filename) = os.path.split(file1)
+ (name, ext) = os.path.splitext(filename)
+ print( "Loading ", filepath, " = ", filename )
+
+ # img = doLoadImage(texDir+"/"+name+".png")
+ # if img:
+ # return img
+
+ img = doLoadImage(texDir+"/"+filename)
+ if img:
+ return img
+
+ # img = doLoadImage(path+"/"+name+".png")
+ # if img:
+ # return img
+
+ img = doLoadImage(path+"/"+filename)
+ if img:
+ return img
+
+ if warnedTextureDir:
+ return None
+ warnedTextureDir = True
+ return None
+ TexDir = Draw.PupStrInput("TexDir? ", path, 100)
+
+ texDir = os.path.expanduser(TexDir)
+ img = doLoadImage(texDir+"/"+name+".png")
+ if img:
+ return img
+
+ img = doLoadImage(TexDir+"/"+filename)
+ return img
+
+def parseImage(args, tokens):
+ global todo
+ imgName = args[0]
+ img = None
+ for (key, val, sub) in tokens:
+ if key == 'Filename':
+ filename = val[0]
+ for n in range(1,len(val)):
+ filename += " " + val[n]
+ img = loadImage(filename)
+ if img is None:
+ return None
+ img.name = imgName
+ else:
+ defaultKey(key, val, sub, "img", ['depth', 'dirty', 'has_data', 'size', 'type'], globals(), locals())
+ print ("Image %s" % img )
+ loadedData['Image'][imgName] = img
+ return img
+
+#
+# parseObject(args, tokens):
+# createObject(type, name, data, datName):
+# setObjectAndData(args, typ):
+#
+
+def parseObject(args, tokens):
+ if verbosity > 2:
+ print( "Parsing object %s" % args )
+ name = args[0]
+ typ = args[1]
+ datName = args[2]
+
+ if typ == 'EMPTY':
+ ob = bpy.data.objects.new(name, None)
+ loadedData['Object'][name] = ob
+ linkObject(ob, None)
+ else:
+ try:
+ data = loadedData[typ.capitalize()][datName]
+ except:
+ raise NameError("Failed to find data: %s %s %s" % (name, typ, datName))
+ return
+
+ try:
+ ob = loadedData['Object'][name]
+ bpy.context.scene.objects.active = ob
+ #print("Found data", ob)
+ except:
+ ob = None
+
+ if ob is None:
+ print("Create", name, data, datName)
+ ob = createObject(typ, name, data, datName)
+ print("created", ob)
+ linkObject(ob, data)
+
+ for (key, val, sub) in tokens:
+ if key == 'Modifier':
+ parseModifier(ob, val, sub)
+ elif key == 'Constraint':
+ parseConstraint(ob.constraints, None, val, sub)
+ elif key == 'AnimationData':
+ parseAnimationData(ob, val, sub)
+ elif key == 'ParticleSystem':
+ parseParticleSystem(ob, val, sub)
+ elif key == 'FieldSettings':
+ parseDefault(ob.field, sub, {}, [])
+ else:
+ defaultKey(key, val, sub, "ob", ['type', 'data'], globals(), locals())
+
+ # Needed for updating layers
+ if bpy.context.object == ob:
+ pass
+ '''
+ if ob.data in ['MESH', 'ARMATURE']:
+ print(ob, ob.data)
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.object.mode_set(mode='OBJECT')
+ '''
+ else:
+ print("Context", ob, bpy.context.object, bpy.context.scene.objects.active)
+ return
+
+def createObject(typ, name, data, datName):
+ # print( "Creating object %s %s %s" % (typ, name, data) )
+ ob = bpy.data.objects.new(name, data)
+ if data:
+ loadedData[typ.capitalize()][datName] = data
+ loadedData['Object'][name] = ob
+ return ob
+
+def linkObject(ob, data):
+ #print("Data", data, ob.data)
+ if data and ob.data is None:
+ ob.data = data
+ print("Data linked", ob, ob.data)
+ scn = bpy.context.scene
+ scn.objects.link(ob)
+ scn.objects.active = ob
+ #print("Linked object", ob)
+ #print("Scene", scn)
+ #print("Active", scn.objects.active)
+ #print("Context", bpy.context.object)
+ return ob
+
+def setObjectAndData(args, typ):
+ datName = args[0]
+ obName = args[1]
+ #bpy.ops.object.add(type=typ)
+ ob = bpy.context.object
+ ob.name = obName
+ ob.data.name = datName
+ loadedData[typ][datName] = ob.data
+ loadedData['Object'][obName] = ob
+ return ob.data
+
+
+#
+# parseModifier(ob, args, tokens):
+#
+
+
+def parseModifier(ob, args, tokens):
+ name = args[0]
+ typ = args[1]
+ if typ == 'PARTICLE_SYSTEM':
+ return None
+ mod = ob.modifiers.new(name, typ)
+ for (key, val, sub) in tokens:
+ if key == 'HookAssignNth':
+ if val[0] == 'CURVE':
+ hookAssignNth(mod, int(val[1]), True, ob.data.splines[0].points)
+ elif val[0] == 'LATTICE':
+ hookAssignNth(mod, int(val[1]), False, ob.data.points)
+ elif val[0] == 'MESH':
+ hookAssignNth(mod, int(val[1]), True, ob.data.vertices)
+ else:
+ raise NameError("Unknown hook %s" % val)
+ else:
+ defaultKey(key, val, sub, 'mod', [], globals(), locals())
+ return mod
+
+def hookAssignNth(mod, n, select, points):
+ if select:
+ for pt in points:
+ pt.select = False
+ points[n].select = True
+ sel = []
+ for pt in points:
+ sel.append(pt.select)
+ #print(mod, sel, n, points)
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.object.hook_reset(modifier=mod.name)
+ bpy.ops.object.hook_select(modifier=mod.name)
+ bpy.ops.object.hook_assign(modifier=mod.name)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return
+
+#
+# parseParticleSystem(ob, args, tokens):
+# parseParticles(particles, args, tokens):
+# parseParticle(par, args, tokens):
+#
+
+def parseParticleSystem(ob, args, tokens):
+ print(ob, bpy.context.object)
+ pss = ob.particle_systems
+ print(pss, pss.values())
+ name = args[0]
+ typ = args[1]
+ #psys = pss.new(name, typ)
+ bpy.ops.object.particle_system_add()
+ print(pss, pss.values())
+ psys = pss[-1]
+ psys.name = name
+ psys.settings.type = typ
+ loadedData['ParticleSystem'][name] = psys
+ print("Psys", psys)
+
+ for (key, val, sub) in tokens:
+ if key == 'Particles':
+ parseParticles(psys, val, sub)
+ else:
+ defaultKey(key, val, sub, 'psys', [], globals(), locals())
+ return psys
+
+def parseParticles(psys, args, tokens):
+ particles = psys.particles
+ bpy.ops.particle.particle_edit_toggle()
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'Particle':
+ parseParticle(particles[n], val, sub)
+ n += 1
+ else:
+ for par in particles:
+ defaultKey(key, val, sub, 'par', [], globals(), locals())
+ bpy.ops.particle.particle_edit_toggle()
+ return particles
+
+def parseParticle(par, args, tokens):
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'h':
+ h = par.hair[n]
+ h.location = eval(val[0])
+ h.time = int(val[1])
+ h.weight = float(val[2])
+ n += 1
+ elif key == 'location':
+ par.location = eval(val[0])
+ return
+
+#
+# unpackList(list_of_tuples):
+#
+
+def unpackList(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ l.extend(t)
+ return l
+
+
+#
+
+# parseMesh (args, tokens):
+#
+
+def parseMesh (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing mesh %s" % args )
+
+ mename = args[0]
+ obname = args[1]
+ me = bpy.data.meshes.new(mename)
+ ob = createObject('MESH', obname, me, mename)
+
+ verts = []
+ edges = []
+ faces = []
+ vertsTex = []
+ texFaces = []
+
+ for (key, val, sub) in tokens:
+ if key == 'Verts':
+ verts = parseVerts(sub)
+ elif key == 'Edges':
+ edges = parseEdges(sub)
+ elif key == 'Faces':
+ faces = parseFaces(sub)
+
+ if faces:
+ me.from_pydata(verts, [], faces)
+ else:
+ me.from_pydata(verts, edges, [])
+ me.update()
+ linkObject(ob, me)
+
+ mats = []
+ for (key, val, sub) in tokens:
+ if key == 'Verts' or key == 'Edges' or key == 'Faces':
+ pass
+ elif key == 'MeshTextureFaceLayer':
+ parseUvTexture(val, sub, me)
+ elif key == 'MeshColorLayer':
+ parseVertColorLayer(val, sub, me)
+ elif key == 'VertexGroup':
+ parseVertexGroup(ob, me, val, sub)
+ elif key == 'ShapeKeys':
+ parseShapeKeys(ob, me, val, sub)
+ elif key == 'Material':
+ try:
+ mat = loadedData['Material'][val[0]]
+ except:
+ mat = None
+ if mat:
+ me.materials.append(mat)
+ else:
+ defaultKey(key, val, sub, "me", [], globals(), locals())
+
+ for (key, val, sub) in tokens:
+ if key == 'Faces':
+ parseFaces2(sub, me)
+ print(me)
+ return me
+
+#
+# parseVerts(tokens):
+# parseEdges(tokens):
+# parseFaces(tokens):
+# parseFaces2(tokens, me):
+#
+
+def parseVerts(tokens):
+ verts = []
+ for (key, val, sub) in tokens:
+ if key == 'v':
+ verts.append( (theScale*float(val[0]), theScale*float(val[1]), theScale*float(val[2])) )
+ return verts
+
+def parseEdges(tokens):
+ edges = []
+ for (key, val, sub) in tokens:
+ if key == 'e':
+ edges.append((int(val[0]), int(val[1])))
+ return edges
+
+def parseFaces(tokens):
+ faces = []
+ for (key, val, sub) in tokens:
+ if key == 'f':
+ if len(val) == 3:
+ face = [int(val[0]), int(val[1]), int(val[2])]
+ elif len(val) == 4:
+ face = [int(val[0]), int(val[1]), int(val[2]), int(val[3])]
+ faces.append(face)
+ return faces
+
+def parseFaces2(tokens, me):
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'ft':
+ f = me.faces[n]
+ f.material_index = int(val[0])
+ f.use_smooth = int(val[1])
+ n += 1
+ elif key == 'mn':
+ fn = int(val[0])
+ mn = int(val[1])
+ f = me.faces[fn]
+ f.material_index = mn
+ elif key == 'ftall':
+ mat = int(val[0])
+ smooth = int(val[1])
+ for f in me.faces:
+ f.material_index = mat
+ f.use_smooth = smooth
+ return
+
+
+#
+# parseUvTexture(args, tokens, me):
+# parseUvTexData(args, tokens, uvdata):
+#
+
+def parseUvTexture(args, tokens, me):
+ name = args[0]
+ me.uv_textures.new(name = name)
+ uvtex = me.uv_textures[-1]
+ loadedData['MeshTextureFaceLayer'][name] = uvtex
+ for (key, val, sub) in tokens:
+ if key == 'Data':
+ parseUvTexData(val, sub, uvtex.data)
+ else:
+ defaultKey(key, val, sub, "uvtex", [], globals(), locals())
+ return
+
+def parseUvTexData(args, tokens, data):
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'vt':
+ data[n].uv1 = (float(val[0]), float(val[1]))
+ data[n].uv2 = (float(val[2]), float(val[3]))
+ data[n].uv3 = (float(val[4]), float(val[5]))
+ if len(val) > 6:
+ data[n].uv4 = (float(val[6]), float(val[7]))
+ n += 1
+ else:
+ pass
+ #for i in range(n):
+ # defaultKey(key, val, sub, "data[i]", [], globals(), locals())
+ return
+
+#
+# parseVertColorLayer(args, tokens, me):
+# parseVertColorData(args, tokens, data):
+#
+
+def parseVertColorLayer(args, tokens, me):
+ name = args[0]
+ print("VertColorLayer", name)
+ vcol = me.vertex_colors.new(name)
+ loadedData['MeshColorLayer'][name] = vcol
+ for (key, val, sub) in tokens:
+ if key == 'Data':
+ parseVertColorData(val, sub, vcol.data)
+ else:
+ defaultKey(key, val, sub, "vcol", [], globals(), locals())
+ return
+
+def parseVertColorData(args, tokens, data):
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'cv':
+ data[n].color1 = eval(val[0])
+ data[n].color2 = eval(val[1])
+ data[n].color3 = eval(val[2])
+ data[n].color4 = eval(val[3])
+ n += 1
+ return
+
+
+#
+# parseVertexGroup(ob, me, args, tokens):
+#
+
+def parseVertexGroup(ob, me, args, tokens):
+ global toggle, theBlenderVersion
+ if verbosity > 2:
+ print( "Parsing vertgroup %s" % args )
+ grpName = args[0]
+ try:
+ res = eval(args[1])
+ except:
+ res = True
+ if not res:
+ return
+
+ if (toggle & T_Armature) or (grpName in ['Eye_L', 'Eye_R', 'Gums', 'Head', 'Jaw', 'Left', 'Middle', 'Right', 'Scalp']):
+ group = ob.vertex_groups.new(grpName)
+ loadedData['VertexGroup'][grpName] = group
+ if theBlenderVersion >= BLENDER_256a:
+ for (key, val, sub) in tokens:
+ if key == 'wv':
+ ob.vertex_groups.assign([int(val[0])], group, float(val[1]), 'REPLACE')
+ else:
+ for (key, val, sub) in tokens:
+ if key == 'wv':
+ group.add( [int(val[0])], float(val[1]), 'REPLACE' )
+ return
+
+
+#
+# parseShapeKeys(ob, me, args, tokens):
+# parseShapeKey(ob, me, args, tokens):
+# addShapeKey(ob, name, vgroup, tokens):
+# doShape(name):
+#
+
+def doShape(name):
+ if (toggle & T_Shape+T_Face) and (name == 'Basis'):
+ return True
+ else:
+ return (toggle & T_Face)
+
+def parseShapeKeys(ob, me, args, tokens):
+ for (key, val, sub) in tokens:
+ if key == 'ShapeKey':
+ parseShapeKey(ob, me, val, sub)
+ elif key == 'AnimationData':
+ if me.shape_keys:
+ parseAnimationData(me.shape_keys, val, sub)
+ ob.active_shape_key_index = 0
+ print("Shapekeys parsed")
+ return
+
+
+def parseShapeKey(ob, me, args, tokens):
+ if verbosity > 2:
+ print( "Parsing ob %s shape %s" % (bpy.context.object, args[0] ))
+ name = args[0]
+ lr = args[1]
+ if invalid(args[2]):
+ return
+
+ if lr == 'Sym' or toggle & T_Symm:
+ addShapeKey(ob, name, None, tokens)
+ elif lr == 'LR':
+ addShapeKey(ob, name+'_L', 'Left', tokens)
+ addShapeKey(ob, name+'_R', 'Right', tokens)
+ else:
+ raise NameError("ShapeKey L/R %s" % lr)
+ return
+
+def addShapeKey(ob, name, vgroup, tokens):
+ skey = ob.shape_key_add(name=name, from_mix=False)
+ if name != 'Basis':
+ skey.relative_key = loadedData['ShapeKey']['Basis']
+ skey.name = name
+ if vgroup:
+ skey.vertex_group = vgroup
+ loadedData['ShapeKey'][name] = skey
+
+ for (key, val, sub) in tokens:
+ if key == 'sv':
+ index = int(val[0])
+ pt = skey.data[index].co
+ pt[0] += theScale*float(val[1])
+ pt[1] += theScale*float(val[2])
+ pt[2] += theScale*float(val[3])
+ else:
+ defaultKey(key, val, sub, "skey", [], globals(), locals())
+
+ return
+
+
+#
+# parseArmature (obName, args, tokens)
+#
+
+def parseArmature (args, tokens):
+ global toggle
+ if verbosity > 2:
+ print( "Parsing armature %s" % args )
+
+ amtname = args[0]
+ obname = args[1]
+ mode = args[2]
+
+ if mode == 'Rigify':
+ toggle |= T_Rigify
+ return parseRigify(amtname, obname, tokens)
+
+ toggle &= ~T_Rigify
+ amt = bpy.data.armatures.new(amtname)
+ ob = createObject('ARMATURE', obname, amt, amtname)
+
+ linkObject(ob, amt)
+ print("Linked")
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ heads = {}
+ tails = {}
+ for (key, val, sub) in tokens:
+ if key == 'Bone':
+ bname = val[0]
+ if not invalid(val[1]):
+ bone = amt.edit_bones.new(bname)
+ parseBone(bone, amt, sub, heads, tails)
+ loadedData['Bone'][bname] = bone
+ elif key == 'RecalcRoll':
+ rolls = {}
+ for bone in amt.edit_bones:
+ bone.select = False
+ blist = eval(val[0])
+ for name in blist:
+ bone = amt.edit_bones[name]
+ bone.select = True
+ bpy.ops.armature.calculate_roll(type='Z')
+ for bone in amt.edit_bones:
+ rolls[bone.name] = bone.roll
+ bpy.ops.object.mode_set(mode='OBJECT')
+ for bone in amt.bones:
+ bone['Roll'] = rolls[bone.name]
+ bpy.ops.object.mode_set(mode='EDIT')
+ else:
+ defaultKey(key, val, sub, "amt", ['MetaRig'], globals(), locals())
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return amt
+
+#
+# parseRigify(amtname, obname, tokens):
+#
+
+def parseRigify(amtname, obname, tokens):
+ (key,val,sub) = tokens[0]
+ if key != 'MetaRig':
+ raise NameError("Expected MetaRig")
+ typ = val[0]
+ if typ == "human":
+ bpy.ops.object.armature_human_advanced_add()
+ else:
+ bpy.ops.pose.metarig_sample_add(type = typ)
+ ob = bpy.context.scene.objects.active
+ amt = ob.data
+ loadedData['Rigify'][obname] = ob
+ loadedData['Armature'][amtname] = amt
+ loadedData['Object'][obname] = ob
+ print("Rigify object", ob, amt)
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ heads = {}
+ tails = {}
+ for (bname, bone) in amt.edit_bones.items():
+ heads[bname] = 10*theScale*bone.head
+ tails[bname] = 10*theScale*bone.tail
+
+ for (key, val, sub) in tokens:
+ if key == 'Bone':
+ bname = val[0]
+ print("Bone", bname)
+ try:
+ bone = amt.edit_bones[bname]
+ except:
+ print("Did not find bone %s" % bname)
+ bone = None
+ print(" -> ", bone)
+ if bone:
+ parseBone(bone, amt, sub, heads, tails)
+ else:
+ defaultKey(key, val, sub, "amt", ['MetaRig'], globals(), locals())
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return amt
+
+#
+# parseBone(bone, amt, tokens, heads, tails):
+#
+
+def parseBone(bone, amt, tokens, heads, tails):
+ global todo
+
+ for (key, val, sub) in tokens:
+ if key == "head":
+ bone.head = (theScale*float(val[0]), theScale*float(val[1]), theScale*float(val[2]))
+ elif key == "tail":
+ bone.tail = (theScale*float(val[0]), theScale*float(val[1]), theScale*float(val[2]))
+ #elif key == 'restrict_select':
+ # pass
+ elif key == 'hide' and val[0] == 'True':
+ name = bone.name
+ '''
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = amt.bones[name]
+ pbone.hide = True
+ print("Hide", pbone, pbone.hide)
+ #bpy.ops.object.mode_set(mode='EDIT')
+ '''
+ else:
+ defaultKey(key, val, sub, "bone", [], globals(), locals())
+ return bone
+
+#
+# parsePose (args, tokens):
+#
+
+def parsePose (args, tokens):
+ global todo
+ if toggle & T_Rigify:
+ return
+ name = args[0]
+ ob = loadedData['Object'][name]
+ bpy.context.scene.objects.active = ob
+ bpy.ops.object.mode_set(mode='POSE')
+ pbones = ob.pose.bones
+ nGrps = 0
+ for (key, val, sub) in tokens:
+ if key == 'Posebone':
+ parsePoseBone(pbones, ob, val, sub)
+ elif key == 'BoneGroup':
+ parseBoneGroup(ob.pose, nGrps, val, sub)
+ nGrps += 1
+ elif key == 'SetProp':
+ bone = val[0]
+ prop = val[1]
+ value = eval(val[2])
+ pb = pbones[bone]
+ print("Setting", pb, prop, val)
+ pb[prop] = value
+ print("Prop set", pb[prop])
+ else:
+ defaultKey(key, val, sub, "ob.pose", [], globals(), locals())
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return ob
+
+
+#
+# parsePoseBone(pbones, args, tokens):
+# parseArray(data, exts, args):
+#
+
+def parseBoneGroup(pose, nGrps, args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing bonegroup %s" % args )
+ name = args[0]
+ bpy.ops.pose.group_add()
+ bg = pose.bone_groups.active
+ loadedData['BoneGroup'][name] = bg
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, "bg", [], globals(), locals())
+ return
+
+def parsePoseBone(pbones, ob, args, tokens):
+ global todo
+ if invalid(args[1]):
+ return
+ name = args[0]
+ pb = pbones[name]
+ amt = ob.data
+ amt.bones.active = pb.bone
+
+ for (key, val, sub) in tokens:
+ if key == 'Constraint':
+ amt.bones.active = pb.bone
+ cns = parseConstraint(pb.constraints, pb, val, sub)
+ elif key == 'bpyops':
+ amt.bones.active = pb.bone
+ expr = "bpy.ops.%s" % val[0]
+ print(expr)
+ exec(expr)
+ elif key == 'ik_dof':
+ parseArray(pb, ["ik_dof_x", "ik_dof_y", "ik_dof_z"], val)
+ elif key == 'ik_limit':
+ parseArray(pb, ["ik_limit_x", "ik_limit_y", "ik_limit_z"], val)
+ elif key == 'ik_max':
+ parseArray(pb, ["ik_max_x", "ik_max_y", "ik_max_z"], val)
+ elif key == 'ik_min':
+ parseArray(pb, ["ik_min_x", "ik_min_y", "ik_min_z"], val)
+ elif key == 'ik_stiffness':
+ parseArray(pb, ["ik_stiffness_x", "ik_stiffness_y", "ik_stiffness_z"], val)
+ elif key == 'hide':
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ amt.bones[name].hide = eval(val[0])
+ #bpy.ops.object.mode_set(mode='POSE')
+
+ else:
+ defaultKey(key, val, sub, "pb", [], globals(), locals())
+ #print("pb %s done" % name)
+ return
+
+def parseArray(data, exts, args):
+ n = 1
+ for ext in exts:
+ expr = "data.%s = %s" % (ext, args[n])
+ # print(expr)
+ exec(expr)
+ n += 1
+ return
+
+#
+# parseConstraint(constraints, pb, args, tokens)
+#
+
+def parseConstraint(constraints, pb, args, tokens):
+ if invalid(args[2]):
+ return None
+ if (toggle&T_Opcns and pb):
+ print("Active")
+ aob = bpy.context.object
+ print("ob", aob)
+ aamt = aob.data
+ print("amt", aamt)
+ apose = aob.pose
+ print("pose", apose)
+ abone = aamt.bones.active
+ print("bone", abone)
+ print('Num cns before', len(list(constraints)))
+ bpy.ops.pose.constraint_add(type=args[1])
+ cns = constraints.active
+ print('and after', pb, cns, len(list(constraints)))
+ else:
+ cns = constraints.new(args[1])
+
+ cns.name = args[0]
+ for (key,val,sub) in tokens:
+ if key == 'invert':
+ parseArray(cns, ["invert_x", "invert_y", "invert_z"], val)
+ elif key == 'use':
+ parseArray(cns, ["use_x", "use_y", "use_z"], val)
+ elif key == 'pos_lock':
+ parseArray(cns, ["lock_location_x", "lock_location_y", "lock_location_z"], val)
+ elif key == 'rot_lock':
+ parseArray(cns, ["lock_rotation_x", "lock_rotation_y", "lock_rotation_z"], val)
+ else:
+ defaultKey(key, val, sub, "cns", [], globals(), locals())
+
+
+ #print("cns %s done" % cns.name)
+ return cns
+
+#
+
+
+# parseCurve (args, tokens):
+# parseSpline(cu, args, tokens):
+# parseBezier(spline, n, args, tokens):
+#
+
+def parseCurve (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing curve %s" % args )
+ bpy.ops.object.add(type='CURVE')
+ cu = setObjectAndData(args, 'Curve')
+
+ for (key, val, sub) in tokens:
+ if key == 'Spline':
+ parseSpline(cu, val, sub)
+ else:
+ defaultKey(key, val, sub, "cu", [], globals(), locals())
+ return
+
+def parseTextCurve (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing text curve %s" % args )
+ bpy.ops.object.text_add()
+ txt = setObjectAndData(args, 'Text')
+
+ for (key, val, sub) in tokens:
+ if key == 'Spline':
+ parseSpline(txt, val, sub)
+ elif key == 'BodyFormat':
+ parseCollection(txt.body_format, sub, [])
+ elif key == 'EditFormat':
+ parseDefault(txt.edit_format, sub, {}, [])
+ elif key == 'Font':
+ parseDefault(txt.font, sub, {}, [])
+ elif key == 'TextBox':
+ parseCollection(txt.body_format, sub, [])
+ else:
+ defaultKey(key, val, sub, "txt", [], globals(), locals())
+ return
+
+
+def parseSpline(cu, args, tokens):
+ typ = args[0]
+ spline = cu.splines.new(typ)
+ nPointsU = int(args[1])
+ nPointsV = int(args[2])
+ #spline.point_count_u = nPointsU
+ #spline.point_count_v = nPointsV
+ if typ == 'BEZIER' or typ == 'BSPLINE':
+ spline.bezier_points.add(nPointsU)
+ else:
+ spline.points.add(nPointsU)
+
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'bz':
+ parseBezier(spline.bezier_points[n], val, sub)
+ n += 1
+ elif key == 'pt':
+ parsePoint(spline.points[n], val, sub)
+ n += 1
+ else:
+ defaultKey(key, val, sub, "spline", [], globals(), locals())
+ return
+
+def parseBezier(bez, args, tokens):
+ bez.co = eval(args[0])
+ bez.co = theScale*bez.co
+ bez.handle1 = eval(args[1])
+ bez.handle1_type = args[2]
+ bez.handle2 = eval(args[3])
+ bez.handle2_type = args[4]
+ return
+
+def parsePoint(pt, args, tokens):
+ pt.co = eval(args[0])
+ pt.co = theScale*pt.co
+ print(" pt", pt.co)
+ return
+
+#
+# parseLattice (args, tokens):
+#
+
+def parseLattice (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing lattice %s" % args )
+ bpy.ops.object.add(type='LATTICE')
+ lat = setObjectAndData(args, 'Lattice')
+ for (key, val, sub) in tokens:
+ if key == 'Points':
+ parseLatticePoints(val, sub, lat.points)
+ else:
+ defaultKey(key, val, sub, "lat", [], globals(), locals())
+ return
+
+def parseLatticePoints(args, tokens, points):
+ global todo
+ n = 0
+ for (key, val, sub) in tokens:
+ if key == 'pt':
+ v = points[n].co_deform
+ v.x = theScale*float(val[0])
+ v.y = theScale*float(val[1])
+ v.z = theScale*float(val[2])
+ n += 1
+ return
+
+#
+# parseLamp (args, tokens):
+# parseFalloffCurve(focu, args, tokens):
+#
+
+def parseLamp (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing lamp %s" % args )
+ bpy.ops.object.add(type='LAMP')
+ lamp = setObjectAndData(args, 'Lamp')
+ for (key, val, sub) in tokens:
+ if key == 'FalloffCurve':
+ parseFalloffCurve(lamp.falloff_curve, val, sub)
+ else:
+ defaultKey(key, val, sub, "lamp", [], globals(), locals())
+ return
+
+def parseFalloffCurve(focu, args, tokens):
+ return
+
+#
+# parseGroup (args, tokens):
+# parseGroupObjects(args, tokens, grp):
+#
+
+def parseGroup (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing group %s" % args )
+
+ grpName = args[0]
+ grp = bpy.data.groups.new(grpName)
+ loadedData['Group'][grpName] = grp
+ for (key, val, sub) in tokens:
+ if key == 'Objects':
+ parseGroupObjects(val, sub, grp)
+ else:
+ defaultKey(key, val, sub, "grp", [], globals(), locals())
+ return
+
+def parseGroupObjects(args, tokens, grp):
+ global todo
+ for (key, val, sub) in tokens:
+ if key == 'ob':
+ try:
+ ob = loadedData['Object'][val[0]]
+ grp.objects.link(ob)
+ except:
+ pass
+ return
+
+#
+# parseWorld (args, tokens):
+#
+
+def parseWorld (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing world %s" % args )
+ world = bpy.context.scene.world
+ for (key, val, sub) in tokens:
+ if key == 'Lighting':
+ parseDefault(world.lighting, sub, {}, [])
+ elif key == 'Mist':
+ parseDefault(world.mist, sub, {}, [])
+ elif key == 'Stars':
+ parseDefault(world.stars, sub, {}, [])
+ else:
+ defaultKey(key, val, sub, "world", [], globals(), locals())
+ return
+
+#
+# parseScene (args, tokens):
+# parseRenderSettings(render, args, tokens):
+# parseToolSettings(tool, args, tokens):
+#
+
+def parseScene (args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing scene %s" % args )
+ scn = bpy.context.scene
+ for (key, val, sub) in tokens:
+ if key == 'NodeTree':
+ scn.use_nodes = True
+ parseNodeTree(scn, val, sub)
+ elif key == 'GameData':
+ parseDefault(scn.game_data, sub, {}, [])
+ elif key == 'KeyingSet':
+ pass
+ #parseDefault(scn.keying_sets, sub, {}, [])
+ elif key == 'ObjectBase':
+ pass
+ #parseDefault(scn.bases, sub, {}, [])
+ elif key == 'RenderSettings':
+ parseRenderSettings(scn.render, sub, [])
+ elif key == 'ToolSettings':
+ subkeys = {'ImagePaint' : "image_paint",
+ 'Sculpt' : "sculpt",
+ 'VertexPaint' : "vertex_paint",
+ 'WeightPaint' : "weight_paint" }
+ parseDefault(scn.tool_settings, sub, subkeys, [])
+ elif key == 'UnitSettings':
+ parseDefault(scn.unit_settings, sub, {}, [])
+ else:
+ defaultKey(key, val, sub, "scn", [], globals(), locals())
+ return
+
+def parseRenderSettings(render, args, tokens):
+ global todo
+ if verbosity > 2:
+ print( "Parsing RenderSettings %s" % args )
+ for (key, val, sub) in tokens:
+ if key == 'Layer':
+ pass
+ #parseDefault(scn.layers, sub, [])
+ else:
+ defaultKey(key, val, sub, "render", [], globals(), locals())
+ return
+
+#
+# parseDefineProperty(args, tokens):
+#
+
+def parseDefineProperty(args, tokens):
+ expr = "bpy.types.Object.%s = %sProperty" % (args[0], args[1])
+ c = '('
+ for option in args[2:]:
+ expr += "%s %s" % (c, option)
+ c = ','
+ expr += ')'
+ #print(expr)
+ exec(expr)
+ #print("Done")
+ return
+
+#
+# correctRig(args):
+#
+
+def correctRig(args):
+ human = args[0]
+ print("CorrectRig %s" % human)
+ try:
+ ob = loadedData['Object'][human]
+ except:
+ return
+ bpy.context.scene.objects.active = ob
+ bpy.ops.object.mode_set(mode='POSE')
+ amt = ob.data
+ cnslist = []
+ for pb in ob.pose.bones:
+ for cns in pb.constraints:
+ if cns.type == 'CHILD_OF':
+ cnslist.append((pb, cns, cns.influence))
+ cns.influence = 0
+
+ for (pb, cns, inf) in cnslist:
+ amt.bones.active = pb.bone
+ cns.influence = 1
+ #print("Childof %s %s %s %.2f" % (amt.name, pb.name, cns.name, inf))
+ bpy.ops.constraint.childof_clear_inverse(constraint=cns.name, owner='BONE')
+ bpy.ops.constraint.childof_set_inverse(constraint=cns.name, owner='BONE')
+ cns.influence = 0
+
+ for (pb, cns, inf) in cnslist:
+ cns.influence = inf
+ return
+
+
+#
+# postProcess(args)
+#
+
+def postProcess(args):
+ human = args[0]
+ print("Postprocess %s" % human)
+ try:
+ ob = loadedData['Object'][human]
+ except:
+ ob = None
+ if toggle & T_Diamond == 0 and ob:
+ deleteDiamonds(ob)
+ if toggle & T_Rigify and False:
+ for rig in loadedData['Rigify'].values():
+ bpy.context.scene.objects.active = rig
+ print("Rigify", rig)
+ bpy.ops.pose.metarig_generate()
+ print("Metarig generated")
+ #bpy.context.scene.objects.unlink(rig)
+
+ rig = bpy.context.scene.objects.active
+ print("Rigged", rig, bpy.context.object)
+ ob = loadedData['Object'][human]
+ mod = ob.modifiers[0]
+ print(ob, mod, mod.object)
+ mod.object = rig
+ print("Rig changed", mod.object)
+ return
+
+#
+# deleteDiamonds(ob)
+# Delete joint diamonds in main mesh
+#
+
+def deleteDiamonds(ob):
+ bpy.context.scene.objects.active = ob
+ if not bpy.context.object:
+ return
+ print("Delete diamonds in %s" % bpy.context.object)
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.mesh.select_all(action='DESELECT')
+ bpy.ops.object.mode_set(mode='OBJECT')
+ me = ob.data
+ for f in me.faces:
+ if len(f.vertices) < 4:
+ for vn in f.vertices:
+ me.vertices[vn].select = True
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.mesh.delete(type='VERT')
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return
+
+
+#
+# parseProcess(args, tokens):
+# applyTransform(objects, rig, parents):
+#
+
+def parseProcess(args, tokens):
+ if toggle & T_Bend == 0:
+ return
+ try:
+ rig = loadedData['Object'][args[0]]
+ except:
+ rig = None
+ if not rig:
+ return
+
+ parents = {}
+ objects = []
+
+ for (key, val, sub) in tokens:
+ #print(key, val)
+ if key == 'Reparent':
+ bname = val[0]
+ try:
+ eb = ebones[bname]
+ parents[bname] = eb.parent.name
+ eb.parent = ebones[val[1]]
+ except:
+ pass
+ elif key == 'Bend':
+ axis = val[1]
+ angle = float(val[2])
+ mat = Matrix.Rotation(angle, 4, axis)
+ try:
+ pb = pbones[val[0]]
+ except:
+ pb = None
+ print("No bone "+val[0])
+ if pb:
+ prod = pb.matrix_basis * mat
+ for i in range(4):
+ for j in range(4):
+ pb.matrix_basis[i][j] = prod[i][j]
+ elif key == 'Snap':
+ try:
+ eb = ebones[val[0]]
+ except:
+ eb = None
+ tb = ebones[val[1]]
+ typ = val[2]
+ if eb is None:
+ pass
+ elif typ == 'Inv':
+ eb.head = tb.tail
+ eb.tail = tb.head
+ elif typ == 'Head':
+ eb.head = tb.head
+ elif typ == 'Tail':
+ eb.tail = tb.tail
+ elif typ == 'Both':
+ eb.head = tb.head
+ eb.tail = tb.tail
+ eb.roll = tb.roll
+ else:
+ raise NameError("Snap type %s" % typ)
+ elif key == 'PoseMode':
+ bpy.context.scene.objects.active = rig
+ bpy.ops.object.mode_set(mode='POSE')
+ pbones = rig.pose.bones
+ elif key == 'ObjectMode':
+ bpy.context.scene.objects.active = rig
+ bpy.ops.object.mode_set(mode='POSE')
+ pbones = rig.pose.bones
+ elif key == 'EditMode':
+ bpy.context.scene.objects.active = rig
+ bpy.ops.object.mode_set(mode='EDIT')
+ ebones = rig.data.edit_bones
+ bpy.ops.armature.select_all(action='DESELECT')
+ elif key == 'Roll':
+ try:
+ eb = ebones[val[0]]
+ except:
+ eb = None
+ if eb:
+ eb.roll = float(val[1])
+ elif key == 'Select':
+ pass
+ elif key == 'RollUp':
+ pass
+ elif key == 'Apply':
+ applyTransform(objects, rig, parents)
+ elif key == 'ApplyArmature':
+ try:
+ ob = loadedData['Object'][val[0]]
+ objects.append((ob,sub))
+ except:
+ ob = None
+ elif key == 'Object':
+ try:
+ ob = loadedData['Object'][val[0]]
+ except:
+ ob = None
+ if ob:
+ bpy.context.scene.objects.active = ob
+ #mod = ob.modifiers[0]
+ #ob.modifiers.remove(mod)
+ for (key1, val1, sub1) in sub:
+ if key1 == 'Modifier':
+ parseModifier(ob, val1, sub1)
+ return
+
+def applyTransform(objects, rig, parents):
+ for (ob,tokens) in objects:
+ print("Applying transform to %s" % ob)
+ bpy.context.scene.objects.active = ob
+ bpy.ops.object.visual_transform_apply()
+ bpy.ops.object.modifier_apply(apply_as='DATA', modifier='Armature')
+
+ bpy.context.scene.objects.active = rig
+ bpy.ops.object.mode_set(mode='POSE')
+ bpy.ops.pose.armature_apply()
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='EDIT')
+ ebones = rig.data.edit_bones
+ for (bname, pname) in parents.items():
+ eb = ebones[bname]
+ par = ebones[pname]
+ if eb.use_connect:
+ par.tail = eb.head
+ eb.parent = par
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ return
+
+#
+# defaultKey(ext, args, tokens, var, exclude, glbals, lcals):
+#
+
+def defaultKey(ext, args, tokens, var, exclude, glbals, lcals):
+ global todo
+
+ if ext == 'Property':
+ try:
+ expr = "%s['%s'] = %s" % (var, args[0], args[1])
+ except:
+ expr = None
+ #print("Property", expr)
+ if expr:
+ exec(expr, glbals, lcals)
+ return
+
+ if ext == 'bpyops':
+ expr = "bpy.ops.%s" % args[0]
+ print(expr)
+ exec(expr)
+ return
+
+ nvar = "%s.%s" % (var, ext)
+ #print(ext)
+ if ext in exclude:
+ return
+ #print("D", nvar)
+
+ if len(args) == 0:
+ raise NameError("Key length 0: %s" % ext)
+
+ rnaType = args[0]
+ if rnaType == 'Add':
+ print("*** Cannot Add yet ***")
+ return
+
+ elif rnaType == 'Refer':
+ typ = args[1]
+ name = args[2]
+ data = "loadedData['%s']['%s']" % (typ, name)
+
+ elif rnaType == 'Struct' or rnaType == 'Define':
+ typ = args[1]
+ name = args[2]
+ try:
+ data = eval(nvar, glbals, lcals)
+ except:
+ data = None
+ # print("Old structrna", nvar, data)
+
+ if data is None:
+ try:
+ creator = args[3]
+ except:
+ creator = None
+ # print("Creator", creator, eval(var,glbals,lcals))
+
+ try:
+ rna = eval(var,glbals,lcals)
+ data = eval(creator)
+ except:
+ data = None
+ # print("New struct", nvar, typ, data)
+
+ if rnaType == 'Define':
+ loadedData[typ][name] = data
+
+ if data:
+ for (key, val, sub) in tokens:
+ defaultKey(key, val, sub, "data", [], globals(), locals())
+
+ print("Struct done", nvar)
+ return
+
+ elif rnaType == 'PropertyRNA':
+ raise NameError("PropertyRNA!")
+ #print("PropertyRNA ", ext, var)
+ for (key, val, sub) in tokens:
+ defaultKey(ext, val, sub, nvar, [], glbals, lcals)
+ return
+
+ elif rnaType == 'Array':
+ for n in range(1, len(args)):
+ expr = "%s[%d] = %s" % (nvar, n-1, args[n])
+ exec(expr, glbals, lcals)
+ if len(args) > 0:
+ expr = "%s[0] = %s" % (nvar, args[1])
+ exec(expr, glbals, lcals)
+ return
+
+ elif rnaType == 'List':
+ data = []
+ for (key, val, sub) in tokens:
+ elt = eval(val[1], glbals, lcals)
+ data.append(elt)
+
+ elif rnaType == 'Matrix':
+ return
+ i = 0
+ n = len(tokens)
+ for (key, val, sub) in tokens:
+ if key == 'row':
+ for j in range(n):
+ expr = "%s[%d][%d] = %g" % (nvar, i, j, float(val[j]))
+ exec(expr, glbals, lcals)
+ i += 1
+ return
+
+ else:
+ try:
+ data = loadedData[rnaType][args[1]]
+ #print("From loaded", rnaType, args[1], data)
+ return data
+ except:
+ data = rnaType
+
+ #print(var, ext, data)
+ expr = "%s = %s" % (nvar, data)
+ try:
+ exec(expr, glbals, lcals)
+ except:
+ pushOnTodoList(var, expr, glbals, lcals)
+ return
+
+#
+#
+#
+
+def pushOnTodoList(var, expr, glbals, lcals):
+ global todo
+ print("Tdo", var)
+ print(dir(eval(var, glbals, lcals)))
+ raise NameError("Todo", expr)
+ todo.append((expr, glbals, lcals))
+ return
+
+
+#
+# parseBoolArray(mask):
+#
+
+def parseBoolArray(mask):
+ list = []
+ for c in mask:
+ if c == '0':
+ list.append(False)
+ else:
+ list.append(True)
+ return list
+
+# parseMatrix(args, tokens)
+#
+
+def parseMatrix(args, tokens):
+ matrix = Matrix()
+ i = 0
+ for (key, val, sub) in tokens:
+ if key == 'row':
+ matrix[i][0] = float(val[0])
+ matrix[i][1] = float(val[1])
+ matrix[i][2] = float(val[2])
+ matrix[i][3] = float(val[3])
+ i += 1
+ return matrix
+
+#
+# parseDefault(data, tokens, subkeys, exclude):
+#
+
+def parseDefault(data, tokens, subkeys, exclude):
+ for (key, val, sub) in tokens:
+ if key in subkeys.keys():
+ for (key2, val2, sub2) in sub:
+ defaultKey(key2, val2, sub2, "data.%s" % subkeys[key], [], globals(), locals())
+ else:
+ defaultKey(key, val, sub, "data", exclude, globals(), locals())
+
+def parseCollection(data, tokens, exclude):
+ return
+
+
+#
+# Utilities
+#
+
+#
+# extractBpyType(data):
+#
+
+def extractBpyType(data):
+ typeSplit = str(type(data)).split("'")
+ if typeSplit[0] != '<class ':
+ return None
+ classSplit = typeSplit[1].split(".")
+ if classSplit[0] == 'bpy' and classSplit[1] == 'types':
+ return classSplit[2]
+ elif classSplit[0] == 'bpy_types':
+ return classSplit[1]
+ else:
+ return None
+
+#
+# Bool(string):
+#
+
+def Bool(string):
+ if string == 'True':
+ return True
+ elif string == 'False':
+ return False
+ else:
+ raise NameError("Bool %s?" % string)
+
+#
+# invalid(condition):
+#
+
+def invalid(condition):
+ global rigLeg, rigArm, toggle
+ res = eval(condition, globals())
+ try:
+ res = eval(condition, globals())
+ #print("%s = %s" % (condition, res))
+ return not res
+ except:
+ #print("%s invalid!" % condition)
+ return True
+
+
+
+#
+# clearScene(context):
+#
+
+def clearScene():
+ global toggle
+ scn = bpy.context.scene
+ for n in range(len(scn.layers)):
+ scn.layers[n] = True
+ print("clearScene %s %s" % (toggle & T_Replace, scn))
+ if not toggle & T_Replace:
+ return scn
+
+ for ob in scn.objects:
+ if ob.type in ["MESH", "ARMATURE", 'EMPTY', 'CURVE', 'LATTICE']:
+ scn.objects.active = ob
+ try:
+ bpy.ops.object.mode_set(mode='OBJECT')
+ except:
+ pass
+ scn.objects.unlink(ob)
+ del ob
+ #print(scn.objects)
+ return scn
+
+#
+# hideLayers(args):
+# args = sceneLayers sceneHideLayers boneLayers boneHideLayers or nothing
+#
+
+def hideLayers(args):
+ if len(args) > 1:
+ sceneLayers = int(args[2], 16)
+ sceneHideLayers = int(args[3], 16)
+ boneLayers = int(args[4], 16)
+ boneHideLayers = int(args[5], 16)
+ else:
+ sceneLayers = 0x00ff
+ sceneHideLayers = 0
+ boneLayers = 0
+ boneHideLayers = 0
+
+ scn = bpy.context.scene
+ mask = 1
+ hidelayers = []
+ for n in range(20):
+ scn.layers[n] = True if sceneLayers & mask else False
+ if sceneHideLayers & mask:
+ hidelayers.append(n)
+ mask = mask << 1
+
+ for ob in scn.objects:
+ for n in hidelayers:
+ if ob.layers[n]:
+ ob.hide = True
+
+ if boneLayers:
+ human = args[1]
+ try:
+ ob = loadedData['Object'][human]
+ except:
+ return
+
+ mask = 1
+ hidelayers = []
+ for n in range(32):
+ ob.data.layers[n] = True if boneLayers & mask else False
+ if boneHideLayers & mask:
+ hidelayers.append(n)
+ mask = mask << 1
+
+ for b in ob.data.bones:
+ for n in hidelayers:
+ if b.layers[n]:
+ b.hide = True
+
+ return
+
+
+#
+# readDefaults():
+# writeDefaults():
+#
+
+ConfigFile = '~/mhx_import.cfg'
+
+
+def readDefaults():
+ global toggle, theScale, theBlenderVersion, BlenderVersions
+ path = os.path.realpath(os.path.expanduser(ConfigFile))
+ try:
+ fp = open(path, 'rU')
+ print('Storing defaults')
+ except:
+ print('Cannot open "%s" for reading' % path)
+ return
+ bver = ''
+ for line in fp:
+ words = line.split()
+ if len(words) >= 3:
+ try:
+ toggle = int(words[0],16)
+ theScale = float(words[1])
+ theBlenderVersion = BlenderVersions.index(words[2])
+ except:
+ print('Configuration file "%s" is corrupt' % path)
+ fp.close()
+ return
+
+def writeDefaults():
+ global toggle, theScale, theBlenderVersion, BlenderVersions
+ path = os.path.realpath(os.path.expanduser(ConfigFile))
+ try:
+ fp = open(path, 'w')
+ print('Storing defaults')
+ except:
+ print('Cannot open "%s" for writing' % path)
+ return
+ fp.write("%x %f %s" % (toggle, theScale, BlenderVersions[theBlenderVersion]))
+ fp.close()
+ return
+
+#
+# User interface
+#
+
+DEBUG = False
+from bpy.props import StringProperty, FloatProperty, EnumProperty, BoolProperty
+from bpy_extras.io_utils import ImportHelper
+
+
+MhxBoolProps = [
+ ("enforce", "Enforce version", "Only accept MHX files of correct version", T_EnforceVersion),
+ ("mesh", "Mesh", "Use main mesh", T_Mesh),
+ ("proxy", "Proxies", "Use proxies", T_Proxy),
+ ("armature", "Armature", "Use armature", T_Armature),
+ ("replace", "Replace scene", "Replace scene", T_Replace),
+ ("cage", "Cage", "Load mesh deform cage", T_Cage),
+ ("clothes", "Clothes", "Include clothes", T_Clothes),
+ ("stretch", "Stretchy limbs", "Stretchy limbs", T_Stretch),
+ ("face", "Face shapes", "Include facial shapekeys", T_Face),
+ ("shape", "Body shapes", "Include body shapekeys", T_Shape),
+ ("symm", "Symmetric shapes", "Keep shapekeys symmetric", T_Symm),
+ ("diamond", "Diamonds", "Keep joint diamonds", T_Diamond),
+ ("bend", "Bend joints", "Bend joints for better IK", T_Bend),
+ #("opcns", "Operator constraints", "Only for Aligorith", T_Opcns),
+]
+
+class ImportMhx(bpy.types.Operator, ImportHelper):
+ '''Import from MHX file format (.mhx)'''
+ bl_idname = "import_scene.makehuman_mhx"
+ bl_description = 'Import from MHX file format (.mhx)'
+ bl_label = "Import MHX"
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+
+ scale = FloatProperty(name="Scale", description="Default meter, decimeter = 1.0", default = theScale)
+ enums = []
+ for enum in BlenderVersions:
+ enums.append((enum,enum,enum))
+ bver = EnumProperty(name="Blender version", items=enums, default = BlenderVersions[0])
+
+ filename_ext = ".mhx"
+ filter_glob = StringProperty(default="*.mhx", options={'HIDDEN'})
+ filepath = StringProperty(name="File Path", description="File path used for importing the MHX file", maxlen= 1024, default= "")
+
+ for (prop, name, desc, flag) in MhxBoolProps:
+ expr = '%s = BoolProperty(name="%s", description="%s", default=toggle&%s)' % (prop, name, desc, flag)
+ exec(expr)
+
+ def execute(self, context):
+ global toggle, theScale, MhxBoolProps, theBlenderVersion, BlenderVersions
+ toggle = 0
+ for (prop, name, desc, flag) in MhxBoolProps:
+ expr = '(%s if self.%s else 0)' % (flag, prop)
+ toggle |= eval(expr)
+ print("execute flags %x" % toggle)
+ theScale = self.scale
+ theBlenderVersion = BlenderVersions.index(self.bver)
+
+ readMhxFile(self.filepath)
+ writeDefaults()
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ global toggle, theScale, MhxBoolProps, theBlenderVersion, BlenderVersions
+ readDefaults()
+ self.scale = theScale
+ self.bver = BlenderVersions[theBlenderVersion]
+ for (prop, name, desc, flag) in MhxBoolProps:
+ expr = 'self.%s = toggle&%s' % (prop, flag)
+ exec(expr)
+ context.window_manager.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+def menu_func(self, context):
+ self.layout.operator(ImportMhx.bl_idname, text="MakeHuman (.mhx)...")
+
+def register():
+ bpy.utils.register_module(__name__)
+ bpy.types.INFO_MT_file_import.append(menu_func)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+ bpy.types.INFO_MT_file_import.remove(menu_func)
+
+if __name__ == "__main__":
+ try:
+ unregister()
+ except:
+ pass
+ register()
+
+#
+# Testing
+#
+"""
+#readMhxFile("C:/Documents and Settings/xxxxxxxxxxxxxxxxxxxx/Mina dokument/makehuman/exports/foo-25.mhx", 'Classic')
+readMhxFile("/home/thomas/makehuman/exports/foo-25.mhx", 1.0)
+
+#toggle = T_Replace + T_Mesh + T_Armature + T_MHX
+#readMhxFile("/home/thomas/myblends/test.mhx", 1.0)
+"""
+
+
+
+
diff --git a/io_import_scene_unreal_psk.py b/io_import_scene_unreal_psk.py
new file mode 100644
index 00000000..5646af8b
--- /dev/null
+++ b/io_import_scene_unreal_psk.py
@@ -0,0 +1,794 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Import Unreal Skeleton Mesh (.psk)",
+ "author": "Darknet",
+ "version": (2, 0),
+ "blender": (2, 5, 3),
+ "api": 36079,
+ "location": "File > Import > Skeleton Mesh (.psk)",
+ "description": "Import Skeleleton Mesh",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"
+ "Scripts/Import-Export/Unreal_psk_psa",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21366",
+ "category": "Import-Export"}
+
+"""
+Version': '2.0' ported by Darknet
+
+Unreal Tournament PSK file to Blender mesh converter V1.0
+Author: D.M. Sturgeon (camg188 at the elYsium forum), ported by Darknet
+Imports a *psk file to a new mesh
+
+-No UV Texutre
+-No Weight
+-No Armature Bones
+-No Material ID
+-Export Text Log From Current Location File (Bool )
+"""
+
+import bpy
+import mathutils
+import os
+import sys
+import string
+import math
+import re
+from string import *
+from struct import *
+from math import *
+from bpy.props import *
+import mathutils
+
+bpy.types.Scene.unrealbonesize = FloatProperty(
+ name="Bone Length",
+ description="Bone Length from head to tail distance.",
+ default=1,min=0.001,max=1000)
+
+#output log in to txt file
+DEBUGLOG = False
+
+scale = 1.0
+bonesize = 1.0
+md5_bones=[]
+
+def unpack_list(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ l.extend(t)
+ return l
+"""
+class md5_bone:
+ bone_index=0
+ name=""
+ bindpos=[]
+ bindmat = mathutils.Quaternion()
+ parent=""
+ parent_index=0
+ blenderbone=None
+ roll=0
+
+ def __init__(self):
+ self.bone_index=0
+ self.name=""
+ self.bindpos=[0.0]*3
+ self.bindmat=[None]*3 #is this how you initilize a 2d-array
+ for i in range(3): self.bindmat[i] = [0.0]*3
+ self.parent=""
+ self.parent_index=0
+ self.blenderbone=None
+
+ def dump(self):
+ print ("bone index: ", self.bone_index)
+ print ("name: ", self.name)
+ print ("bind position: ", self.bindpos)
+ print ("bind translation matrix: ", self.bindmat)
+ print ("parent: ", self.parent)
+ print ("parent index: ", self.parent_index)
+ print ("blenderbone: ", self.blenderbone)
+"""
+class md5_bone:
+ bone_index=0
+ name=""
+ bindpos=[]
+ bindmat=[]
+ scale = []
+ parent=""
+ parent_index=0
+ blenderbone=None
+ roll=0
+
+ def __init__(self):
+ self.bone_index=0
+ self.name=""
+ self.bindpos=[0.0]*3
+ self.scale=[0.0]*3
+ self.bindmat=[None]*3 #is this how you initilize a 2d-array
+ for i in range(3): self.bindmat[i] = [0.0]*3
+ self.parent=""
+ self.parent_index=0
+ self.blenderbone=None
+
+ def dump(self):
+ print ("bone index: ", self.bone_index)
+ print ("name: ", self.name)
+ print ("bind position: ", self.bindpos)
+ print ("bind translation matrix: ", self.bindmat)
+ print ("parent: ", self.parent)
+ print ("parent index: ", self.parent_index)
+ print ("blenderbone: ", self.blenderbone)
+
+#http://www.blender.org/forum/viewtopic.php?t=13340&sid=8b17d5de07b17960021bbd72cac0495f
+def fixRollZ(b):
+ v = (b.tail-b.head)/b.length
+ b.roll -= math.degrees(math.atan2(v[0]*v[2]*(1 - v[1]),v[0]*v[0] + v[1]*v[2]*v[2]))
+def fixRoll(b):
+ v = (b.tail-b.head)/b.length
+ if v[2]*v[2] > .5:
+ #align X-axis
+ b.roll += math.degrees(math.atan2(v[0]*v[2]*(1 - v[1]),v[2]*v[2] + v[1]*v[0]*v[0]))
+ else:
+ #align Z-axis
+ b.roll -= math.degrees(math.atan2(v[0]*v[2]*(1 - v[1]),v[0]*v[0] + v[1]*v[2]*v[2]))
+
+def pskimport(infile,importmesh,importbone,bDebugLogPSK,importmultiuvtextures):
+ global DEBUGLOG
+ DEBUGLOG = bDebugLogPSK
+ print ("--------------------------------------------------")
+ print ("---------SCRIPT EXECUTING PYTHON IMPORTER---------")
+ print ("--------------------------------------------------")
+ print (" DEBUG Log:",bDebugLogPSK)
+ print ("Importing file: ", infile)
+
+ md5_bones=[]
+ pskfile = open(infile,'rb')
+ if (DEBUGLOG):
+ logpath = infile.replace(".psk", ".txt")
+ print("logpath:",logpath)
+ logf = open(logpath,'w')
+
+ def printlog(strdata):
+ if (DEBUGLOG):
+ logf.write(strdata)
+
+ objName = infile.split('\\')[-1].split('.')[0]
+
+ me_ob = bpy.data.meshes.new(objName)
+ print("objName:",objName)
+ printlog(("New Mesh = " + me_ob.name + "\n"))
+ #read general header
+ indata = unpack('20s3i',pskfile.read(32))
+ #not using the general header at this time
+ #==================================================================================================
+ # vertex point
+ #==================================================================================================
+ #read the PNTS0000 header
+ indata = unpack('20s3i',pskfile.read(32))
+ recCount = indata[3]
+ printlog(( "Nbr of PNTS0000 records: " + str(recCount) + "\n"))
+ counter = 0
+ verts = []
+ while counter < recCount:
+ counter = counter + 1
+ indata = unpack('3f',pskfile.read(12))
+ #print(indata[0],indata[1],indata[2])
+ verts.extend([(indata[0],indata[1],indata[2])])
+ printlog(str(indata[0]) + "|" +str(indata[1]) + "|" +str(indata[2]) + "\n")
+ #Tmsh.vertices.append(NMesh.Vert(indata[0],indata[1],indata[2]))
+
+ #==================================================================================================
+ # UV
+ #==================================================================================================
+ #read the VTXW0000 header
+ indata = unpack('20s3i',pskfile.read(32))
+ recCount = indata[3]
+ printlog( "Nbr of VTXW0000 records: " + str(recCount)+ "\n")
+ counter = 0
+ UVCoords = []
+ #UVCoords record format = [index to PNTS, U coord, v coord]
+ printlog("[index to PNTS, U coord, v coord]\n");
+ while counter < recCount:
+ counter = counter + 1
+ indata = unpack('hhffhh',pskfile.read(16))
+ UVCoords.append([indata[0],indata[2],indata[3]])
+ printlog(str(indata[0]) + "|" +str(indata[2]) + "|" +str(indata[3])+"\n")
+ #print([indata[0],indata[2],indata[3]])
+ #print([indata[1],indata[2],indata[3]])
+
+ #==================================================================================================
+ # Face
+ #==================================================================================================
+ #read the FACE0000 header
+ indata = unpack('20s3i',pskfile.read(32))
+ recCount = indata[3]
+ printlog( "Nbr of FACE0000 records: "+ str(recCount) + "\n")
+ #PSK FACE0000 fields: WdgIdx1|WdgIdx2|WdgIdx3|MatIdx|AuxMatIdx|SmthGrp
+ #associate MatIdx to an image, associate SmthGrp to a material
+ SGlist = []
+ counter = 0
+ faces = []
+ faceuv = []
+ #the psk values are: nWdgIdx1|WdgIdx2|WdgIdx3|MatIdx|AuxMatIdx|SmthGrp
+ printlog("nWdgIdx1|WdgIdx2|WdgIdx3|MatIdx|AuxMatIdx|SmthGrp \n")
+ while counter < recCount:
+ counter = counter + 1
+ indata = unpack('hhhbbi',pskfile.read(12))
+ printlog(str(indata[0]) + "|" +str(indata[1]) + "|" +str(indata[2])+ "|" +str(indata[3])+ "|" +str(indata[4])+ "|" +str(indata[5]) + "\n")
+ #indata[0] = index of UVCoords
+ #UVCoords[indata[0]]=[index to PNTS, U coord, v coord]
+ #UVCoords[indata[0]][0] = index to PNTS
+ PNTSA = UVCoords[indata[0]][0]
+ PNTSB = UVCoords[indata[1]][0]
+ PNTSC = UVCoords[indata[2]][0]
+ #print(PNTSA,PNTSB,PNTSC) #face id vertex
+ #faces.extend([0,1,2,0])
+ faces.extend([PNTSA,PNTSB,PNTSC,0])
+ uv = []
+ u0 = UVCoords[indata[0]][1]
+ v0 = UVCoords[indata[0]][2]
+ uv.append([u0,v0])
+ u1 = UVCoords[indata[1]][1]
+ v1 = UVCoords[indata[1]][2]
+ uv.append([u1,v1])
+ u2 = UVCoords[indata[2]][1]
+ v2 = UVCoords[indata[2]][2]
+ uv.append([u2,v2])
+ faceuv.append([uv,indata[3],indata[4],indata[5]])
+
+ #print("material:",indata[3])
+ #print("UV: ",u0,v0)
+ #update the uv var of the last item in the Tmsh.faces list
+ # which is the face just added above
+ ##Tmsh.faces[-1].uv = [(u0,v0),(u1,v1),(u2,v2)]
+ #print("smooth:",indata[5])
+ #collect a list of the smoothing groups
+ if SGlist.count(indata[5]) == 0:
+ SGlist.append(indata[5])
+ print("smooth:",indata[5])
+ #assign a material index to the face
+ #Tmsh.faces[-1].materialIndex = SGlist.index(indata[5])
+ printlog( "Using Materials to represent PSK Smoothing Groups...\n")
+ #==========
+ # skip something...
+ #==========
+
+ #==================================================================================================
+ # Material
+ #==================================================================================================
+ ##
+ #read the MATT0000 header
+ indata = unpack('20s3i',pskfile.read(32))
+ recCount = indata[3]
+ printlog("Nbr of MATT0000 records: " + str(recCount) + "\n" )
+ printlog(" - Not importing any material data now. PSKs are texture wrapped! \n")
+ counter = 0
+ materialcount = 0
+ while counter < recCount:
+ counter = counter + 1
+ indata = unpack('64s6i',pskfile.read(88))
+ materialcount += 1
+ print("Material",counter)
+ ##
+
+ #==================================================================================================
+ # Bones (Armature)
+ #==================================================================================================
+ #read the REFSKEL0 header
+ indata = unpack('20s3i',pskfile.read(32))
+ recCount = indata[3]
+ printlog( "Nbr of REFSKEL0 records: " + str(recCount) + "\n")
+ #REFSKEL0 fields - Name|Flgs|NumChld|PrntIdx|Qw|Qx|Qy|Qz|LocX|LocY|LocZ|Lngth|XSize|YSize|ZSize
+
+ Bns = []
+ bone = []
+ nobone = 0
+ #==================================================================================================
+ # Bone Data
+ #==================================================================================================
+ counter = 0
+ print ("---PRASE--BONES---")
+ printlog("Name|Flgs|NumChld|PrntIdx|Qw|Qx|Qy|Qz|LocX|LocY|LocZ|Lngth|XSize|YSize|ZSize\n")
+ while counter < recCount:
+ indata = unpack('64s3i11f',pskfile.read(120))
+ #print( "DATA",str(indata))
+
+ bone.append(indata)
+
+ createbone = md5_bone()
+ #temp_name = indata[0][:30]
+ temp_name = indata[0]
+
+ temp_name = bytes.decode(temp_name)
+ temp_name = temp_name.lstrip(" ")
+ temp_name = temp_name.rstrip(" ")
+ temp_name = temp_name.strip()
+ temp_name = temp_name.strip( bytes.decode(b'\x00'))
+ printlog(temp_name + "|" +str(indata[1]) + "|" +str(indata[2])+ "|" +str(indata[3])+ "|" +str(indata[4])+ "|" +str(indata[5]) +str(indata[6]) + "|" +str(indata[7]) + "|" +str(indata[8])+ "|" +str(indata[9])+ "|" +str(indata[10])+ "|" +str(indata[11]) +str(indata[12]) + "|" +str(indata[13]) + "|" +str(indata[14])+ "\n")
+ print ("temp_name:", temp_name, "||")
+ createbone.name = temp_name
+ createbone.bone_index = counter
+ createbone.parent_index = indata[3]
+ createbone.bindpos[0] = indata[8]
+ createbone.bindpos[1] = indata[9]
+ createbone.bindpos[2] = indata[10]
+ createbone.scale[0] = indata[12]
+ createbone.scale[1] = indata[13]
+ createbone.scale[2] = indata[14]
+
+ #w,x,y,z
+ if (counter == 0):#main parent
+ print("no parent bone")
+ createbone.bindmat = mathutils.Quaternion((indata[7],indata[4],indata[5],indata[6]))#default
+ #createbone.bindmat = mathutils.Quaternion((indata[7],-indata[4],-indata[5],-indata[6]))
+ else:#parent
+ print("parent bone")
+ createbone.bindmat = mathutils.Quaternion((indata[7],-indata[4],-indata[5],-indata[6]))#default
+ #createbone.bindmat = mathutils.Quaternion((indata[7],indata[4],indata[5],indata[6]))
+
+ md5_bones.append(createbone)
+ counter = counter + 1
+ bnstr = (str(indata[0]))
+ Bns.append(bnstr)
+
+ for pbone in md5_bones:
+ pbone.parent = md5_bones[pbone.parent_index].name
+
+ bonecount = 0
+ for armbone in bone:
+ temp_name = armbone[0][:30]
+ #print ("BONE NAME: ",len(temp_name))
+ temp_name=str((temp_name))
+ #temp_name = temp_name[1]
+ #print ("BONE NAME: ",temp_name)
+ bonecount +=1
+ print ("-------------------------")
+ print ("----Creating--Armature---")
+ print ("-------------------------")
+
+ #================================================================================================
+ #Check armature if exist if so create or update or remove all and addnew bone
+ #================================================================================================
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ meshname ="ArmObject"
+ objectname = "armaturedata"
+ bfound = False
+ arm = None
+ if importbone:
+ for obj in bpy.data.objects:
+ if (obj.name == meshname):
+ bfound = True
+ arm = obj
+ break
+
+ if bfound == False:
+ '''
+ armdata = bpy.data.armatures.new(objectname)
+ ob_new = bpy.data.objects.new(meshname, armdata)
+ #ob_new = bpy.data.objects.new(meshname, 'ARMATURE')
+ #ob_new.data = armdata
+ bpy.context.scene.objects.link(ob_new)
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ for i in bpy.context.scene.objects: i.select = False #deselect all objects
+ ob_new.select = True
+ #set current armature to edit the bone
+ bpy.context.scene.objects.active = ob_new
+ #set mode to able to edit the bone
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ #newbone = ob_new.data.edit_bones.new('test')
+ #newbone.tail.y = 1
+ print("creating bone(s)")
+ for bone in md5_bones:
+ #print(dir(bone))
+ bpy.ops.object.mode_set(mode='EDIT')
+ newbone = ob_new.data.edit_bones.new(bone.name)
+ '''
+
+
+ armdata = bpy.data.armatures.new(objectname)
+ ob_new = bpy.data.objects.new(meshname, armdata)
+ #ob_new = bpy.data.objects.new(meshname, 'ARMATURE')
+ #ob_new.data = armdata
+ bpy.context.scene.objects.link(ob_new)
+ #bpy.ops.object.mode_set(mode='OBJECT')
+ for i in bpy.context.scene.objects: i.select = False #deselect all objects
+ ob_new.select = True
+ #set current armature to edit the bone
+ bpy.context.scene.objects.active = ob_new
+ #set mode to able to edit the bone
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ #newbone = ob_new.data.edit_bones.new('test')
+ #newbone.tail.y = 1
+ print("creating bone(s)")
+ for bone in md5_bones:
+ #print(dir(bone))
+ bpy.ops.object.mode_set(mode='EDIT')
+ newbone = ob_new.data.edit_bones.new(bone.name)
+ #parent the bone
+ print("DRI:",dir(newbone))
+ parentbone = None
+ print("bone name:",bone.name)
+ #note bone location is set in the real space or global not local
+ bonesize = bpy.types.Scene.unrealbonesize
+ if bone.name != bone.parent:
+
+ pos_x = bone.bindpos[0]
+ pos_y = bone.bindpos[1]
+ pos_z = bone.bindpos[2]
+
+ #print( "LINKING:" , bone.parent ,"j")
+ parentbone = ob_new.data.edit_bones[bone.parent]
+ newbone.parent = parentbone
+
+ rotmatrix = bone.bindmat.to_matrix().to_4x4().to_3x3() # XXX, redundant matrix conversion?
+ newbone.transform(bone.bindmat.to_matrix().to_4x4(),True,True)
+ #parent_head = parentbone.head * parentbone.matrix.to_quaternion().inverse()
+ #parent_tail = parentbone.tail * parentbone.matrix.to_quaternion().inverse()
+ #location=Vector(pos_x,pos_y,pos_z)
+ #set_position = (parent_tail - parent_head) + location
+ #print("tmp head:",set_position)
+
+ #pos_x = set_position.x
+ #pos_y = set_position.y
+ #pos_z = set_position.z
+
+
+ newbone.head.x = parentbone.head.x + pos_x
+ newbone.head.y = parentbone.head.y + pos_y
+ newbone.head.z = parentbone.head.z + pos_z
+ #print("head:",newbone.head)
+ newbone.tail.x = parentbone.head.x + (pos_x + bonesize * rotmatrix[1][0])
+ newbone.tail.y = parentbone.head.y + (pos_y + bonesize * rotmatrix[1][1])
+ newbone.tail.z = parentbone.head.z + (pos_z + bonesize * rotmatrix[1][2])
+ #newbone.roll = fixRoll(newbone)
+ else:
+ #print("rotmatrix:",dir(bone.bindmat.to_matrix().resize_4x4()))
+ #rotmatrix = bone.bindmat.to_matrix().resize_4x4().to_3x3() # XXX, redundant matrix conversion?
+ rotmatrix = bone.bindmat.to_matrix().to_3x3() # XXX, redundant matrix conversion?
+ #newbone.transform(bone.bindmat.to_matrix(),True,True)
+ newbone.head.x = bone.bindpos[0]
+ newbone.head.y = bone.bindpos[1]
+ newbone.head.z = bone.bindpos[2]
+ newbone.tail.x = bone.bindpos[0] + bonesize * rotmatrix[1][0]
+ newbone.tail.y = bone.bindpos[1] + bonesize * rotmatrix[1][1]
+ newbone.tail.z = bone.bindpos[2] + bonesize * rotmatrix[1][2]
+ #newbone.roll = fixRoll(newbone)
+ #print("no parent")
+
+ bpy.context.scene.update()
+
+ #==================================================================================================
+ #END BONE DATA BUILD
+ #==================================================================================================
+ VtxCol = []
+ for x in range(len(Bns)):
+ #change the overall darkness of each material in a range between 0.1 and 0.9
+ tmpVal = ((float(x)+1.0)/(len(Bns))*0.7)+0.1
+ tmpVal = int(tmpVal * 256)
+ tmpCol = [tmpVal,tmpVal,tmpVal,0]
+ #Change the color of each material slightly
+ if x % 3 == 0:
+ if tmpCol[0] < 128: tmpCol[0] += 60
+ else: tmpCol[0] -= 60
+ if x % 3 == 1:
+ if tmpCol[1] < 128: tmpCol[1] += 60
+ else: tmpCol[1] -= 60
+ if x % 3 == 2:
+ if tmpCol[2] < 128: tmpCol[2] += 60
+ else: tmpCol[2] -= 60
+ #Add the material to the mesh
+ VtxCol.append(tmpCol)
+
+ #==================================================================================================
+ # Bone Weight
+ #==================================================================================================
+ #read the RAWW0000 header
+ indata = unpack('20s3i',pskfile.read(32))
+ recCount = indata[3]
+ printlog( "Nbr of RAWW0000 records: " + str(recCount) +"\n")
+ #RAWW0000 fields: Weight|PntIdx|BoneIdx
+ RWghts = []
+ counter = 0
+ while counter < recCount:
+ counter = counter + 1
+ indata = unpack('fii',pskfile.read(12))
+ RWghts.append([indata[1],indata[2],indata[0]])
+ #print("weight:",[indata[1],indata[2],indata[0]])
+ #RWghts fields = PntIdx|BoneIdx|Weight
+ RWghts.sort()
+ printlog( "Vertex point and groups count =" + str(len(RWghts)) + "\n")
+ printlog("PntIdx|BoneIdx|Weight")
+ for vg in RWghts:
+ printlog( str(vg[0]) + "|" + str(vg[1]) + "|" + str(vg[2]) + "\n")
+
+ #Tmsh.update_tag()
+
+ #set the Vertex Colors of the faces
+ #face.v[n] = RWghts[0]
+ #RWghts[1] = index of VtxCol
+ """
+ for x in range(len(Tmsh.faces)):
+ for y in range(len(Tmsh.faces[x].v)):
+ #find v in RWghts[n][0]
+ findVal = Tmsh.faces[x].v[y].index
+ n = 0
+ while findVal != RWghts[n][0]:
+ n = n + 1
+ TmpCol = VtxCol[RWghts[n][1]]
+ #check if a vertex has more than one influence
+ if n != len(RWghts)-1:
+ if RWghts[n][0] == RWghts[n+1][0]:
+ #if there is more than one influence, use the one with the greater influence
+ #for simplicity only 2 influences are checked, 2nd and 3rd influences are usually very small
+ if RWghts[n][2] < RWghts[n+1][2]:
+ TmpCol = VtxCol[RWghts[n+1][1]]
+ Tmsh.faces[x].col.append(NMesh.Col(TmpCol[0],TmpCol[1],TmpCol[2],0))
+ """
+ if (DEBUGLOG):
+ logf.close()
+ #==================================================================================================
+ #Building Mesh
+ #==================================================================================================
+ print("vertex:",len(verts),"faces:",len(faces))
+ me_ob.vertices.add(len(verts))
+ me_ob.faces.add(len(faces)//4)
+
+ me_ob.vertices.foreach_set("co", unpack_list(verts))
+
+ me_ob.faces.foreach_set("vertices_raw", faces)
+ me_ob.faces.foreach_set("use_smooth", [False] * len(me_ob.faces))
+ me_ob.update_tag()
+
+ """
+ Material setup coding.
+ First the mesh has to be create first to get the uv texture setup working.
+ -Create material(s) list in the psk pack data from the list.(to do list)
+ -Append the material to the from create the mesh object.
+ -Create Texture(s)
+ -fae loop for uv assign and assign material index
+
+ """
+ bpy.ops.object.mode_set(mode='OBJECT')
+ #===================================================================================================
+ #Material Setup
+ #===================================================================================================
+ print ("-------------------------")
+ print ("----Creating--Materials--")
+ print ("-------------------------")
+ materialname = "pskmat"
+ materials = []
+
+ for matcount in range(materialcount):
+ #if texturedata != None:
+ matdata = bpy.data.materials.new(materialname + str(matcount))
+ #mtex = matdata.texture_slots.new()
+ #mtex.texture = texture[matcount].data
+ #print(type(texture[matcount].data))
+ #print(dir(mtex))
+ #print(dir(matdata))
+ #for texno in range(len( bpy.data.textures)):
+ #print((bpy.data.textures[texno].name))
+ #print(dir(bpy.data.textures[texno]))
+ #matdata.active_texture = bpy.data.textures[matcount-1]
+ #matdata.texture_coords = 'UV'
+ #matdata.active_texture = texturedata
+ materials.append(matdata)
+
+ for material in materials:
+ #add material to the mesh list of materials
+ me_ob.materials.append(material)
+ #===================================================================================================
+ #UV Setup
+ #===================================================================================================
+ print ("-------------------------")
+ print ("-- Creating UV Texture --")
+ print ("-------------------------")
+ texture = []
+ texturename = "text1"
+ countm = 0
+ #for countm in range(materialcount):
+ #psktexname="psk" + str(countm)
+ #me_ob.uv_textures.new(name=psktexname)
+ if importmultiuvtextures == True:
+ me_ob.uv_textures.new(name="pskuvtexture")
+ #print(dir(bpy.data))
+ if (len(faceuv) > 0):
+ for countm in range(len(me_ob.uv_textures)):
+ me_ob.update()
+ uvtex = me_ob.uv_textures[countm] #add one uv texture
+ me_ob.update()
+ #print("UV TEXTURE NAME:",uvtex.name)
+ for i, face in enumerate(me_ob.faces):
+ blender_tface = uvtex.data[i] #face
+ mfaceuv = faceuv[i]
+ #print("---------------------------------------")
+ #print(faceuv[i][1])
+ #print(dir(face))
+ face.material_index = faceuv[i][1]
+ blender_tface.uv1 = mfaceuv[0][0] #uv = (0,0)
+ blender_tface.uv2 = mfaceuv[0][1] #uv = (0,0)
+ blender_tface.uv3 = mfaceuv[0][2] #uv = (0,0)
+ texture.append(uvtex)
+ else:
+ for countm in range(materialcount):
+ psktexname="psk" + str(countm)
+ me_ob.uv_textures.new(name=psktexname)
+ #psktexname="psk" + str(countm)
+ #me_ob.uv_textures.new(name=psktexname)
+ for countm in range(len(me_ob.uv_textures)):
+ me_ob.update()
+ #print(dir(me_ob.uv_textures))
+ #psktexname="psk" + str(countm)
+ uvtex = me_ob.uv_textures[countm] #add one uv texture
+ me_ob.update()
+ #print("UV TEXTURE NAME:",uvtex.name)
+ if (len(faceuv) > 0):
+ counttex = 0
+ countm = 0
+ for countm in range(len(me_ob.uv_textures)):
+ me_ob.update()
+ #print(dir(me_ob.uv_textures))
+ psktexname="psk" + str(countm)
+ uvtex = me_ob.uv_textures[countm] #add one uv texture
+ me_ob.update()
+ #print("UV TEXTURE NAME:",uvtex.name)
+ for i, face in enumerate(me_ob.faces):
+ blender_tface = uvtex.data[i] #face
+ mfaceuv = faceuv[i]
+ #print("---------------------------------------")
+ #print(faceuv[i][1])
+ #print(dir(face))
+ face.material_index = faceuv[i][1]
+ if countm == faceuv[i][1]:
+ face.material_index = faceuv[i][1]
+ blender_tface.uv1 = mfaceuv[0][0] #uv = (0,0)
+ blender_tface.uv2 = mfaceuv[0][1] #uv = (0,0)
+ blender_tface.uv3 = mfaceuv[0][2] #uv = (0,0)
+ else:
+ #set uv to zero (0,0)
+ #print("--------------------")
+ #print(blender_tface.uv1)
+ #print(blender_tface.uv2)
+ #print(blender_tface.uv2)
+ blender_tface.uv1 = [0,0]
+ #print(blender_tface.uv1)
+ blender_tface.uv2 = [0,0]
+ blender_tface.uv3 = [0,0]
+
+ texture.append(uvtex)
+ print("UV TEXTURE LEN:",len(texture))
+ #for tex in me_ob.uv_textures:
+ #print("mesh tex:",dir(tex))
+ #print((tex.name))
+
+ #for face in me_ob.faces:
+ #print(dir(face))
+
+
+ '''
+ matdata = bpy.data.materials.new(materialname)
+ #color is 0 - 1 not in 0 - 255
+ #matdata.mirror_color=(float(0.04),float(0.08),float(0.44))
+ matdata.diffuse_color=(float(0.04),float(0.08),float(0.44))#blue color
+ #print(dir(me_ob.uv_textures[0].data))
+ texdata = None
+ texdata = bpy.data.textures[len(bpy.data.textures)-1]
+ if (texdata != None):
+ #print(texdata.name)
+ #print(dir(texdata))
+ texdata.name = "texturelist1"
+ matdata.active_texture = texdata
+ materials.append(matdata)
+ #matdata = bpy.data.materials.new(materialname)
+ #materials.append(matdata)
+ #= make sure the list isnt too big
+ for material in materials:
+ #add material to the mesh list of materials
+ me_ob.materials.append(material)
+ '''
+ #===================================================================================================
+ #
+ #===================================================================================================
+ obmesh = bpy.data.objects.new(objName,me_ob)
+ #===================================================================================================
+ #Mesh Vertex Group bone weight
+ #===================================================================================================
+ print("---- building bone weight mesh ----")
+ #print(dir(ob_new.data.bones))
+ #create bone vertex group #deal with bone id for index number
+ for bone in ob_new.data.bones:
+ #print("names:",bone.name,":",dir(bone))
+ #print("names:",bone.name)
+ group = obmesh.vertex_groups.new(bone.name)
+ for vgroup in obmesh.vertex_groups:
+ #print(vgroup.name,":",vgroup.index)
+ for vgp in RWghts:
+ #bone index
+ if vgp[1] == vgroup.index:
+ #print(vgp)
+ #[vertex id],weight
+ vgroup.add([vgp[0]], vgp[2], 'ADD')
+
+ #check if there is a material to set to
+ if len(materials) > 0:
+ obmesh.active_material = materials[0] #material setup tmp
+ print("---- adding mesh to the scene ----")
+ bpy.context.scene.objects.link(obmesh)
+ bpy.context.scene.update()
+
+ print ("PSK2Blender completed")
+#End of def pskimport#########################
+
+def getInputFilename(self,filename,importmesh,importbone,bDebugLogPSK,importmultiuvtextures):
+ checktype = filename.split('\\')[-1].split('.')[1]
+ print ("------------",filename)
+ if checktype.lower() != 'psk':
+ print (" Selected file = ",filename)
+ raise (IOError, "The selected input file is not a *.psk file")
+ #self.report({'INFO'}, ("Selected file:"+ filename))
+ else:
+ pskimport(filename,importmesh,importbone,bDebugLogPSK,importmultiuvtextures)
+
+class IMPORT_OT_psk(bpy.types.Operator):
+ '''Load a skeleton mesh psk File'''
+ bl_idname = "import_scene.psk"
+ bl_label = "Import PSK"
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+ filepath = StringProperty(name="File Path", description="Filepath used for importing the psk file", maxlen= 1024, subtype='FILE_PATH')
+ filter_glob = StringProperty(default="*.psk", options={'HIDDEN'})
+ importmesh = BoolProperty(name="Mesh", description="Import mesh only. (not yet build.)", default=True)
+ importbone = BoolProperty(name="Bones", description="Import bones only. Current not working yet.", default=True)
+ importmultiuvtextures = BoolProperty(name="Single UV Texture(s)", description="Single or Multi uv textures.", default=True)
+ bDebugLogPSK = BoolProperty(name="Debug Log.txt", description="Log the output of raw format. It will save in current file dir. Note this just for testing.", default=False)
+ unrealbonesize = FloatProperty( name="Bone Length", description="Bone Length from head to tail distance.", default=1,min=0.001,max=1000)
+
+ def execute(self, context):
+ bpy.types.Scene.unrealbonesize = self.unrealbonesize
+ getInputFilename(self,self.filepath,self.importmesh,self.importbone,self.bDebugLogPSK,self.importmultiuvtextures)
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+def menu_func(self, context):
+ self.layout.operator(IMPORT_OT_psk.bl_idname, text="Skeleton Mesh (.psk)")
+
+def register():
+ bpy.utils.register_module(__name__)
+ bpy.types.INFO_MT_file_import.append(menu_func)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+ bpy.types.INFO_MT_file_import.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
+
+#note this only read the data and will not be place in the scene
+#getInputFilename('C:\\blenderfiles\\BotA.psk')
+#getInputFilename('C:\\blenderfiles\\AA.PSK')
diff --git a/io_mesh_ply/__init__.py b/io_mesh_ply/__init__.py
new file mode 100644
index 00000000..e8384923
--- /dev/null
+++ b/io_mesh_ply/__init__.py
@@ -0,0 +1,134 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Stanford PLY format",
+ "author": "Bruce Merry, Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export PLY mesh data withs UV's and vertex colors",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Stanford_PLY",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "export_ply" in locals():
+ imp.reload(export_ply)
+ if "import_ply" in locals():
+ imp.reload(import_ply)
+
+
+import os
+import bpy
+from bpy.props import CollectionProperty, StringProperty, BoolProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper
+
+
+class ImportPLY(bpy.types.Operator, ImportHelper):
+ '''Load a PLY geometry file'''
+ bl_idname = "import_mesh.ply"
+ bl_label = "Import PLY"
+
+ files = CollectionProperty(name="File Path",
+ description="File path used for importing "
+ "the PLY file",
+ type=bpy.types.OperatorFileListElement)
+
+ directory = StringProperty()
+
+ filename_ext = ".ply"
+ filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
+
+ def execute(self, context):
+ paths = [os.path.join(self.directory, name.name) for name in self.files]
+ if not paths:
+ paths.append(self.filepath)
+
+ from . import import_ply
+
+ for path in paths:
+ import_ply.load(self, context, path)
+
+ return {'FINISHED'}
+
+
+class ExportPLY(bpy.types.Operator, ExportHelper):
+ '''Export a single object as a stanford PLY with normals, colours and texture coordinates.'''
+ bl_idname = "export_mesh.ply"
+ bl_label = "Export PLY"
+
+ filename_ext = ".ply"
+ filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
+
+ use_modifiers = BoolProperty(name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default=True)
+ use_normals = BoolProperty(name="Normals", description="Export Normals for smooth and hard shaded faces", default=True)
+ use_uv_coords = BoolProperty(name="UVs", description="Exort the active UV layer", default=True)
+ use_colors = BoolProperty(name="Vertex Colors", description="Exort the active vertex color layer", default=True)
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ filepath = self.filepath
+ filepath = bpy.path.ensure_ext(filepath, self.filename_ext)
+ from . import export_ply
+ return export_ply.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+ def draw(self, context):
+ layout = self.layout
+
+ row = layout.row()
+ row.prop(self, "use_modifiers")
+ row.prop(self, "use_normals")
+ row = layout.row()
+ row.prop(self, "use_uv_coords")
+ row.prop(self, "use_colors")
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportPLY.bl_idname, text="Stanford (.ply)")
+
+
+def menu_func_export(self, context):
+ self.layout.operator(ExportPLY.bl_idname, text="Stanford (.ply)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_mesh_ply/export_ply.py b/io_mesh_ply/export_ply.py
new file mode 100644
index 00000000..6e2f43db
--- /dev/null
+++ b/io_mesh_ply/export_ply.py
@@ -0,0 +1,203 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za
+# Contributors: Bruce Merry, Campbell Barton
+
+"""
+This script exports Stanford PLY files from Blender. It supports normals,
+colours, and texture coordinates per face or per vertex.
+Only one mesh can be exported at a time.
+"""
+
+import bpy
+import os
+
+
+def save(operator, context, filepath="", use_modifiers=True, use_normals=True, use_uv_coords=True, use_colors=True):
+
+ def rvec3d(v):
+ return round(v[0], 6), round(v[1], 6), round(v[2], 6)
+
+ def rvec2d(v):
+ return round(v[0], 6), round(v[1], 6)
+
+ scene = context.scene
+ obj = context.object
+
+ if not obj:
+ raise Exception("Error, Select 1 active object")
+
+ file = open(filepath, "w", encoding="utf8", newline="\n")
+
+ if scene.objects.active:
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ if use_modifiers:
+ mesh = obj.to_mesh(scene, True, 'PREVIEW')
+ else:
+ mesh = obj.data
+
+ if not mesh:
+ raise Exception("Error, could not get mesh data from active object")
+
+ # mesh.transform(obj.matrix_world) # XXX
+
+ faceUV = (len(mesh.uv_textures) > 0)
+ vertexUV = (len(mesh.sticky) > 0)
+ vertexColors = len(mesh.vertex_colors) > 0
+
+ if (not faceUV) and (not vertexUV):
+ use_uv_coords = False
+ if not vertexColors:
+ use_colors = False
+
+ if not use_uv_coords:
+ faceUV = vertexUV = False
+ if not use_colors:
+ vertexColors = False
+
+ if faceUV:
+ active_uv_layer = mesh.uv_textures.active
+ if not active_uv_layer:
+ use_uv_coords = False
+ faceUV = None
+ else:
+ active_uv_layer = active_uv_layer.data
+
+ if vertexColors:
+ active_col_layer = mesh.vertex_colors.active
+ if not active_col_layer:
+ use_colors = False
+ vertexColors = None
+ else:
+ active_col_layer = active_col_layer.data
+
+ # incase
+ color = uvcoord = uvcoord_key = normal = normal_key = None
+
+ mesh_verts = mesh.vertices # save a lookup
+ ply_verts = [] # list of dictionaries
+ # vdict = {} # (index, normal, uv) -> new index
+ vdict = [{} for i in range(len(mesh_verts))]
+ ply_faces = [[] for f in range(len(mesh.faces))]
+ vert_count = 0
+ for i, f in enumerate(mesh.faces):
+
+ smooth = f.use_smooth
+ if not smooth:
+ normal = tuple(f.normal)
+ normal_key = rvec3d(normal)
+
+ if faceUV:
+ uv = active_uv_layer[i]
+ uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/
+ if vertexColors:
+ col = active_col_layer[i]
+ col = col.color1[:], col.color2[:], col.color3[:], col.color4[:]
+
+ f_verts = f.vertices
+
+ pf = ply_faces[i]
+ for j, vidx in enumerate(f_verts):
+ v = mesh_verts[vidx]
+
+ if smooth:
+ normal = tuple(v.normal)
+ normal_key = rvec3d(normal)
+
+ if faceUV:
+ uvcoord = uv[j][0], 1.0 - uv[j][1]
+ uvcoord_key = rvec2d(uvcoord)
+ elif vertexUV:
+ uvcoord = v.uvco[0], 1.0 - v.uvco[1]
+ uvcoord_key = rvec2d(uvcoord)
+
+ if vertexColors:
+ color = col[j]
+ color = int(color[0] * 255.0), int(color[1] * 255.0), int(color[2] * 255.0)
+
+ key = normal_key, uvcoord_key, color
+
+ vdict_local = vdict[vidx]
+ pf_vidx = vdict_local.get(key) # Will be None initially
+
+ if pf_vidx is None: # same as vdict_local.has_key(key)
+ pf_vidx = vdict_local[key] = vert_count
+ ply_verts.append((vidx, normal, uvcoord, color))
+ vert_count += 1
+
+ pf.append(pf_vidx)
+
+ file.write('ply\n')
+ file.write('format ascii 1.0\n')
+ file.write('comment Created by Blender %s - www.blender.org, source file: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
+
+ file.write('element vertex %d\n' % len(ply_verts))
+
+ file.write('property float x\n')
+ file.write('property float y\n')
+ file.write('property float z\n')
+
+ if use_normals:
+ file.write('property float nx\n')
+ file.write('property float ny\n')
+ file.write('property float nz\n')
+ if use_uv_coords:
+ file.write('property float s\n')
+ file.write('property float t\n')
+ if use_colors:
+ file.write('property uchar red\n')
+ file.write('property uchar green\n')
+ file.write('property uchar blue\n')
+
+ file.write('element face %d\n' % len(mesh.faces))
+ file.write('property list uchar uint vertex_indices\n')
+ file.write('end_header\n')
+
+ for i, v in enumerate(ply_verts):
+ file.write('%.6f %.6f %.6f ' % mesh_verts[v[0]].co[:]) # co
+ if use_normals:
+ file.write('%.6f %.6f %.6f ' % v[1]) # no
+ if use_uv_coords:
+ file.write('%.6f %.6f ' % v[2]) # uv
+ if use_colors:
+ file.write('%u %u %u' % v[3]) # col
+ file.write('\n')
+
+ for pf in ply_faces:
+ if len(pf) == 3:
+ file.write('3 %d %d %d\n' % tuple(pf))
+ else:
+ file.write('4 %d %d %d %d\n' % tuple(pf))
+
+ file.close()
+ print("writing %r done" % filepath)
+
+ if use_modifiers:
+ bpy.data.meshes.remove(mesh)
+
+ # XXX
+ """
+ if is_editmode:
+ Blender.Window.EditMode(1, '', 0)
+ """
+
+ return {'FINISHED'}
diff --git a/io_mesh_ply/import_ply.py b/io_mesh_ply/import_ply.py
new file mode 100644
index 00000000..3d0f2970
--- /dev/null
+++ b/io_mesh_ply/import_ply.py
@@ -0,0 +1,337 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import re
+import struct
+
+
+class element_spec(object):
+ __slots__ = ("name",
+ "count",
+ "properties",
+ )
+
+ def __init__(self, name, count):
+ self.name = name
+ self.count = count
+ self.properties = []
+
+ def load(self, format, stream):
+ if format == b'ascii':
+ stream = re.split(b'\s+', stream.readline())
+ return [x.load(format, stream) for x in self.properties]
+
+ def index(self, name):
+ for i, p in enumerate(self.properties):
+ if p.name == name:
+ return i
+ return -1
+
+
+class property_spec(object):
+ __slots__ = ("name",
+ "list_type",
+ "numeric_type",
+ )
+
+ def __init__(self, name, list_type, numeric_type):
+ self.name = name
+ self.list_type = list_type
+ self.numeric_type = numeric_type
+
+ def read_format(self, format, count, num_type, stream):
+ if format == b'ascii':
+ if num_type == 's':
+ ans = []
+ for i in range(count):
+ s = stream[i]
+ if len(s) < 2 or s[0] != '"' or s[-1] != '"':
+ print('Invalid string', s)
+ print('Note: ply_import.py does not handle whitespace in strings')
+ return None
+ ans.append(s[1:-1])
+ stream[:count] = []
+ return ans
+ if num_type == 'f' or num_type == 'd':
+ mapper = float
+ else:
+ mapper = int
+ ans = [mapper(x) for x in stream[:count]]
+ stream[:count] = []
+ return ans
+ else:
+ if num_type == 's':
+ ans = []
+ for i in range(count):
+ fmt = format + 'i'
+ data = stream.read(struct.calcsize(fmt))
+ length = struct.unpack(fmt, data)[0]
+ fmt = '%s%is' % (format, length)
+ data = stream.read(struct.calcsize(fmt))
+ s = struct.unpack(fmt, data)[0]
+ ans.append(s[:-1]) # strip the NULL
+ return ans
+ else:
+ fmt = '%s%i%s' % (format, count, num_type)
+ data = stream.read(struct.calcsize(fmt))
+ return struct.unpack(fmt, data)
+
+ def load(self, format, stream):
+ if self.list_type is not None:
+ count = int(self.read_format(format, 1, self.list_type, stream)[0])
+ return self.read_format(format, count, self.numeric_type, stream)
+ else:
+ return self.read_format(format, 1, self.numeric_type, stream)[0]
+
+
+class object_spec(object):
+ __slots__ = ("specs",
+ )
+ 'A list of element_specs'
+ def __init__(self):
+ self.specs = []
+
+ def load(self, format, stream):
+ return dict([(i.name, [i.load(format, stream) for j in range(i.count)]) for i in self.specs])
+
+ '''
+ # Longhand for above LC
+ answer = {}
+ for i in self.specs:
+ answer[i.name] = []
+ for j in range(i.count):
+ if not j % 100 and meshtools.show_progress:
+ Blender.Window.DrawProgressBar(float(j) / i.count, 'Loading ' + i.name)
+ answer[i.name].append(i.load(format, stream))
+ return answer
+ '''
+
+
+def read(filepath):
+ format = b''
+ version = b'1.0'
+ format_specs = {b'binary_little_endian': '<',
+ b'binary_big_endian': '>',
+ b'ascii': b'ascii'}
+ type_specs = {b'char': 'b',
+ b'uchar': 'B',
+ b'int8': 'b',
+ b'uint8': 'B',
+ b'int16': 'h',
+ b'uint16': 'H',
+ b'ushort': 'H',
+ b'int': 'i',
+ b'int32': 'i',
+ b'uint': 'I',
+ b'uint32': 'I',
+ b'float': 'f',
+ b'float32': 'f',
+ b'float64': 'd',
+ b'double': 'd',
+ b'string': 's'}
+ obj_spec = object_spec()
+
+ file = open(filepath, 'rb') # Only for parsing the header, not binary data
+ signature = file.readline()
+
+ if not signature.startswith(b'ply'):
+ print('Signature line was invalid')
+ return None
+
+ while 1:
+ tokens = re.split(br'[ \r\n]+', file.readline())
+
+ if len(tokens) == 0:
+ continue
+ if tokens[0] == b'end_header':
+ break
+ elif tokens[0] == b'comment' or tokens[0] == b'obj_info':
+ continue
+ elif tokens[0] == b'format':
+ if len(tokens) < 3:
+ print('Invalid format line')
+ return None
+ if tokens[1] not in format_specs: # .keys(): # keys is implicit
+ print('Unknown format', tokens[1])
+ return None
+ if tokens[2] != version:
+ print('Unknown version', tokens[2])
+ return None
+ format = tokens[1]
+ elif tokens[0] == b'element':
+ if len(tokens) < 3:
+ print(b'Invalid element line')
+ return None
+ obj_spec.specs.append(element_spec(tokens[1], int(tokens[2])))
+ elif tokens[0] == b'property':
+ if not len(obj_spec.specs):
+ print('Property without element')
+ return None
+ if tokens[1] == b'list':
+ obj_spec.specs[-1].properties.append(property_spec(tokens[4], type_specs[tokens[2]], type_specs[tokens[3]]))
+ else:
+ obj_spec.specs[-1].properties.append(property_spec(tokens[2], None, type_specs[tokens[1]]))
+
+ if format != b'ascii':
+ file.close() # was ascii, now binary
+ file = open(filepath, 'rb')
+
+ # skip the header...
+ while not file.readline().startswith(b'end_header'):
+ pass
+
+ obj = obj_spec.load(format_specs[format], file)
+
+ file.close()
+
+ return obj_spec, obj
+
+
+import bpy
+
+
+def load_ply(filepath):
+ import time
+ from bpy_extras.io_utils import unpack_list, unpack_face_list
+ from bpy_extras.image_utils import load_image
+
+ t = time.time()
+ obj_spec, obj = read(filepath)
+ if obj is None:
+ print('Invalid file')
+ return
+
+ uvindices = colindices = None
+ colmultiply = None
+
+ # noindices = None # Ignore normals
+
+ for el in obj_spec.specs:
+ if el.name == b'vertex':
+ vindices = vindices_x, vindices_y, vindices_z = el.index(b'x'), el.index(b'y'), el.index(b'z')
+ # noindices = (el.index('nx'), el.index('ny'), el.index('nz'))
+ # if -1 in noindices: noindices = None
+ uvindices = (el.index(b's'), el.index(b't'))
+ if -1 in uvindices:
+ uvindices = None
+ colindices = el.index(b'red'), el.index(b'green'), el.index(b'blue')
+ if -1 in colindices:
+ colindices = None
+ else: # if not a float assume uchar
+ colmultiply = [1.0 if el.properties[i].numeric_type in ('f', 'd') else (1.0 / 256.0) for i in colindices]
+
+ elif el.name == b'face':
+ findex = el.index(b'vertex_indices')
+
+ mesh_faces = []
+ mesh_uvs = []
+ mesh_colors = []
+
+ def add_face(vertices, indices, uvindices, colindices):
+ mesh_faces.append(indices)
+ if uvindices:
+ mesh_uvs.append([(vertices[index][uvindices[0]], 1.0 - vertices[index][uvindices[1]]) for index in indices])
+ if colindices:
+ mesh_colors.append([(vertices[index][colindices[0]] * colmultiply[0],
+ vertices[index][colindices[1]] * colmultiply[1],
+ vertices[index][colindices[2]] * colmultiply[2],
+ ) for index in indices])
+
+ if uvindices or colindices:
+ # If we have Cols or UVs then we need to check the face order.
+ add_face_simple = add_face
+
+ # EVIL EEKADOODLE - face order annoyance.
+ def add_face(vertices, indices, uvindices, colindices):
+ if len(indices) == 4:
+ if indices[2] == 0 or indices[3] == 0:
+ indices = indices[2], indices[3], indices[0], indices[1]
+ elif len(indices) == 3:
+ if indices[2] == 0:
+ indices = indices[1], indices[2], indices[0]
+
+ add_face_simple(vertices, indices, uvindices, colindices)
+
+ verts = obj[b'vertex']
+
+ if b'face' in obj:
+ for f in obj[b'face']:
+ ind = f[findex]
+ len_ind = len(ind)
+ if len_ind <= 4:
+ add_face(verts, ind, uvindices, colindices)
+ else:
+ # Fan fill the face
+ for j in range(len_ind - 2):
+ add_face(verts, (ind[0], ind[j + 1], ind[j + 2]), uvindices, colindices)
+
+ ply_name = bpy.path.display_name_from_filepath(filepath)
+
+ mesh = bpy.data.meshes.new(name=ply_name)
+
+ mesh.vertices.add(len(obj[b'vertex']))
+
+ mesh.vertices.foreach_set("co", [a for v in obj[b'vertex'] for a in (v[vindices_x], v[vindices_y], v[vindices_z])])
+
+ if mesh_faces:
+ mesh.faces.add(len(mesh_faces))
+ mesh.faces.foreach_set("vertices_raw", unpack_face_list(mesh_faces))
+
+ if uvindices or colindices:
+ if uvindices:
+ uvlay = mesh.uv_textures.new()
+ if colindices:
+ vcol_lay = mesh.vertex_colors.new()
+
+ if uvindices:
+ for i, f in enumerate(uvlay.data):
+ ply_uv = mesh_uvs[i]
+ for j, uv in enumerate(f.uv):
+ uv[0], uv[1] = ply_uv[j]
+
+ if colindices:
+ for i, f in enumerate(vcol_lay.data):
+ # XXX, colors dont come in right, needs further investigation.
+ ply_col = mesh_colors[i]
+ if len(ply_col) == 4:
+ f_col = f.color1, f.color2, f.color3, f.color4
+ else:
+ f_col = f.color1, f.color2, f.color3
+
+ for j, col in enumerate(f_col):
+ col.r, col.g, col.b = ply_col[j]
+
+ mesh.validate()
+ mesh.update()
+
+ scn = bpy.context.scene
+ #scn.objects.selected = [] # XXX25
+
+ obj = bpy.data.objects.new(ply_name, mesh)
+ scn.objects.link(obj)
+ scn.objects.active = obj
+ obj.select = True
+
+ print('\nSuccessfully imported %r in %.3f sec' % (filepath, time.time() - t))
+
+
+def load(operator, context, filepath=""):
+ load_ply(filepath)
+ return {'FINISHED'}
diff --git a/io_mesh_raw/__init__.py b/io_mesh_raw/__init__.py
new file mode 100644
index 00000000..67109d21
--- /dev/null
+++ b/io_mesh_raw/__init__.py
@@ -0,0 +1,68 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Raw mesh format (.raw)",
+ "author": "Anthony D,Agostino (Scorpius), Aurel Wildfellner",
+ "version": (0, 2),
+ "blender": (2, 5, 7),
+ "api": 36103,
+ "location": "File > Import-Export > Raw Faces (.raw) ",
+ "description": "Import-Export Raw Faces",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Raw_Mesh_IO",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=25692",
+ "category": "Import-Export"}
+
+if "bpy" in locals():
+ import imp
+ imp.reload(import_raw)
+ imp.reload(export_raw)
+else:
+ from . import import_raw
+ from . import export_raw
+
+
+import bpy
+
+def menu_import(self, context):
+ self.layout.operator(import_raw.RawImporter.bl_idname, text="Raw Faces (.raw)").filepath = "*.raw"
+
+
+def menu_export(self, context):
+ import os
+ default_path = os.path.splitext(bpy.data.filepath)[0] + ".raw"
+ self.layout.operator(export_raw.RawExporter.bl_idname, text="Raw Faces (.raw)").filepath = default_path
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_import)
+ bpy.types.INFO_MT_file_export.append(menu_export)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_import)
+ bpy.types.INFO_MT_file_export.remove(menu_export)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_mesh_raw/export_raw.py b/io_mesh_raw/export_raw.py
new file mode 100644
index 00000000..3bc79461
--- /dev/null
+++ b/io_mesh_raw/export_raw.py
@@ -0,0 +1,112 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+__author__ = ["Aurel Wildfellner"]
+__version__ = '0.2'
+__bpydoc__ = """\
+This script exports a Mesh to a RAW triangle format file.
+
+The raw triangle format is very simple; it has no verts or faces lists.
+It's just a simple ascii text file with the vertices of each triangle
+listed on each line. In addition, also quads can be exported as a line
+of 12 values (this was the default before blender 2.5). Now default
+settings will triangulate the mesh.
+
+Usage:
+Execute this script from the "File->Export" menu. You can select
+whether modifiers should be applied and if the mesh is triangulated.
+
+"""
+
+import bpy
+
+
+def faceToTriangles(face):
+ triangles = []
+ if (len(face) == 4): #quad
+ triangles.append( [ face[0], face[1], face[2] ] )
+ triangles.append( [ face[2], face[3], face[0] ] )
+ else:
+ triangles.append(face)
+
+ return triangles
+
+
+def faceValues(face, mesh, matrix):
+ fv = []
+ for verti in face.vertices:
+ fv.append(mesh.vertices[verti].co * matrix)
+ return fv
+
+
+def faceToLine(face):
+ line = ""
+ for v in face:
+ line += str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " "
+ return line[:-1] + "\n"
+
+
+def export_raw(filepath, applyMods, triangulate):
+ faces = []
+ for obj in bpy.context.selected_objects:
+ if obj.type == 'MESH':
+ matrix = obj.matrix_world
+
+ if (applyMods):
+ me = obj.to_mesh(bpy.context.scene, True, "PREVIEW")
+ else:
+ me = obj.data
+
+ for face in me.faces:
+ fv = faceValues(face, me, matrix)
+ if triangulate:
+ faces.extend(faceToTriangles(fv))
+ else:
+ faces.append(fv)
+
+ # write the faces to a file
+ file = open(filepath, "w")
+ for face in faces:
+ file.write(faceToLine(face))
+ file.close()
+
+
+from bpy.props import *
+
+
+class RawExporter(bpy.types.Operator):
+ '''Save Raw triangle mesh data'''
+ bl_idname = "export_mesh.raw"
+ bl_label = "Export RAW"
+
+ filepath = StringProperty(name="File Path", description="Filepath used for exporting the RAW file", maxlen= 1024, default= "", subtype='FILE_PATH')
+ check_existing = BoolProperty(name="Check Existing", description="Check and warn on overwriting existing files", default=True, options={'HIDDEN'})
+
+ apply_modifiers = BoolProperty(name="Apply Modifiers", description="Use transformed mesh data from each object", default=True)
+ triangulate = BoolProperty(name="Triangulate", description="Triangulate quads.", default=True)
+
+ def execute(self, context):
+ export_raw(self.filepath, self.apply_modifiers, self.triangulate)
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+# package manages registering
diff --git a/io_mesh_raw/import_raw.py b/io_mesh_raw/import_raw.py
new file mode 100644
index 00000000..04724544
--- /dev/null
+++ b/io_mesh_raw/import_raw.py
@@ -0,0 +1,145 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+__author__ = ["Anthony D'Agostino (Scorpius)", "Aurel Wildfellner"]
+__version__ = '0.2'
+__bpydoc__ = """\
+This script imports Raw Triangle File format files to Blender.
+
+The raw triangle format is very simple; it has no verts or faces lists.
+It's just a simple ascii text file with the vertices of each triangle
+listed on each line. In addition, a line with 12 values will be
+imported as a quad. This may be in conflict with some other
+applications, which use a raw format, but this is how it was
+implemented back in blender 2.42.
+
+Usage:
+Execute this script from the "File->Import" menu and choose a Raw file to
+open.
+
+Notes:
+Generates the standard verts and faces lists, but without duplicate
+verts. Only *exact* duplicates are removed, there is no way to specify a
+tolerance.
+"""
+
+
+
+import bpy
+
+# move those to a utility modul
+from bpy_extras.io_utils import unpack_face_list, unpack_list # TODO, make generic
+
+
+def readMesh(filename, objName):
+ file = open(filename, "rb")
+
+ def line_to_face(line):
+ # Each triplet is an xyz float
+ line_split = []
+ try:
+ line_split = list(map(float, line.split()))
+ except:
+ return None
+
+ if len(line_split) == 9: # Tri
+ f1, f2, f3, f4, f5, f6, f7, f8, f9 = line_split
+ return [(f1, f2, f3), (f4, f5, f6), (f7, f8, f9)]
+ elif len(line_split) == 12: # Quad
+ f1, f2, f3, f4, f5, f6, f7, f8, f9, A, B, C = line_split
+ return [(f1, f2, f3), (f4, f5, f6), (f7, f8, f9), (A, B, C)]
+ else:
+ return None
+
+
+ faces = []
+ for line in file.readlines():
+ face = line_to_face(line)
+ if face:
+ faces.append(face)
+
+ file.close()
+
+ # Generate verts and faces lists, without duplicates
+ verts = []
+ coords = {}
+ index_tot = 0
+ faces_indices = []
+
+ for f in faces:
+ fi = []
+ for i, v in enumerate(f):
+ index = coords.get(v)
+
+ if index is None:
+ index = coords[v] = index_tot
+ index_tot += 1
+ verts.append(v)
+
+ fi.append(index)
+
+ faces_indices.append(fi)
+
+ mesh = bpy.data.meshes.new(objName)
+ mesh.from_pydata(verts, [], faces_indices)
+
+ return mesh
+
+
+def addMeshObj(mesh, objName):
+ scn = bpy.context.scene
+
+ for o in scn.objects:
+ o.select = False
+
+ mesh.update()
+ mesh.validate()
+
+ nobj = bpy.data.objects.new(objName, mesh)
+ scn.objects.link(nobj)
+ nobj.select = True
+
+ if scn.objects.active is None or scn.objects.active.mode == 'OBJECT':
+ scn.objects.active = nobj
+
+
+from bpy.props import *
+
+class RawImporter(bpy.types.Operator):
+ '''Load Raw triangle mesh data'''
+ bl_idname = "import_mesh.raw"
+ bl_label = "Import RAW"
+
+ filepath = StringProperty(name="File Path", description="Filepath used for importing the RAW file", maxlen=1024, default="", subtype='FILE_PATH')
+
+ def execute(self, context):
+
+ #convert the filename to an object name
+ objName = bpy.path.display_name(self.filepath.split("\\")[-1].split("/")[-1])
+
+ mesh = readMesh(self.filepath, objName)
+ addMeshObj(mesh, objName)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+# package manages registering
diff --git a/io_mesh_stl/__init__.py b/io_mesh_stl/__init__.py
new file mode 100644
index 00000000..48c37af4
--- /dev/null
+++ b/io_mesh_stl/__init__.py
@@ -0,0 +1,163 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "STL format",
+ "author": "Guillaume Bouchard (Guillaum)",
+ "version": (1, 0),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export > Stl",
+ "description": "Import-Export STL files",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/STL",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"
+ "func=detail&aid=22837",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# @todo write the wiki page
+
+"""
+Import-Export STL files (binary or ascii)
+
+- Import automatically remove the doubles.
+- Export can export with/without modifiers applied
+
+Issues:
+
+Import:
+ - Does not handle the normal of the triangles
+ - Does not handle endien
+"""
+
+if "bpy" in locals():
+ import imp
+ if "stl_utils" in locals():
+ imp.reload(stl_utils)
+ if "blender_utils" in locals():
+ imp.reload(blender_utils)
+
+import os
+
+import bpy
+from bpy.props import StringProperty, BoolProperty, CollectionProperty
+from bpy_extras.io_utils import ExportHelper, ImportHelper
+
+
+class ImportSTL(bpy.types.Operator, ImportHelper):
+ '''Load STL triangle mesh data'''
+ bl_idname = "import_mesh.stl"
+ bl_label = "Import STL"
+
+ filename_ext = ".stl"
+
+ filter_glob = StringProperty(default="*.stl", options={'HIDDEN'})
+
+ files = CollectionProperty(name="File Path",
+ description="File path used for importing "
+ "the STL file",
+ type=bpy.types.OperatorFileListElement)
+
+ directory = StringProperty(subtype='DIR_PATH')
+
+ def execute(self, context):
+ from . import stl_utils
+ from . import blender_utils
+
+ paths = [os.path.join(self.directory, name.name) for name in self.files]
+
+ if not paths:
+ paths.append(self.filepath)
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT')
+
+ for path in paths:
+ objName = bpy.path.display_name(os.path.basename(path))
+ tris, pts = stl_utils.read_stl(path)
+
+ blender_utils.create_and_link_mesh(objName, tris, pts)
+
+ return {'FINISHED'}
+
+
+class ExportSTL(bpy.types.Operator, ExportHelper):
+ '''
+ Save STL triangle mesh data from the active object
+ '''
+ bl_idname = "export_mesh.stl"
+ bl_label = "Export STL"
+
+ filename_ext = ".stl"
+
+ ascii = BoolProperty(name="Ascii",
+ description="Save the file in ASCII file format",
+ default=False)
+ apply_modifiers = BoolProperty(name="Apply Modifiers",
+ description="Apply the modifiers "
+ "before saving",
+ default=True)
+
+ def execute(self, context):
+ from . import stl_utils
+ from . import blender_utils
+ import itertools
+
+ faces = itertools.chain.from_iterable(
+ blender_utils.faces_from_mesh(ob, self.apply_modifiers)
+ for ob in context.selected_objects)
+
+ stl_utils.write_stl(self.filepath, faces, self.ascii)
+
+ return {'FINISHED'}
+
+
+def menu_import(self, context):
+ self.layout.operator(ImportSTL.bl_idname,
+ text="Stl (.stl)").filepath = "*.stl"
+
+
+def menu_export(self, context):
+ default_path = os.path.splitext(bpy.data.filepath)[0] + ".stl"
+ self.layout.operator(ExportSTL.bl_idname,
+ text="Stl (.stl)").filepath = default_path
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_import)
+ bpy.types.INFO_MT_file_export.append(menu_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_import)
+ bpy.types.INFO_MT_file_export.remove(menu_export)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_mesh_stl/blender_utils.py b/io_mesh_stl/blender_utils.py
new file mode 100644
index 00000000..8d19e30c
--- /dev/null
+++ b/io_mesh_stl/blender_utils.py
@@ -0,0 +1,80 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+
+
+def create_and_link_mesh(name, faces, points):
+ '''
+ Create a blender mesh and object called name from a list of
+ *points* and *faces* and link it in the current scene.
+ '''
+
+ mesh = bpy.data.meshes.new(name)
+ mesh.from_pydata(points, [], faces)
+
+ # update mesh to allow proper display
+ mesh.validate()
+ mesh.update()
+
+ scene = bpy.context.scene
+
+ obj = bpy.data.objects.new(name, mesh)
+ scene.objects.link(obj)
+ obj.select = True
+
+
+def faces_from_mesh(ob, apply_modifier=False, triangulate=True):
+ '''
+ From an object, return a generator over a list of faces.
+
+ Each faces is a list of his vertexes. Each vertex is a tuple of
+ his coordinate.
+
+ apply_modifier
+ Apply the preview modifier to the returned liste
+
+ triangulate
+ Split the quad into two triangles
+ '''
+
+ # get the modifiers
+ try:
+ mesh = ob.to_mesh(bpy.context.scene, apply_modifier, "PREVIEW")
+ except RuntimeError:
+ return ()
+
+ if triangulate:
+ # From a list of faces, return the face triangulated if needed.
+ def iter_face_index():
+ for face in mesh.faces:
+ vertices = face.vertices[:]
+ if len(vertices) == 4:
+ yield vertices[0], vertices[1], vertices[2]
+ yield vertices[2], vertices[3], vertices[0]
+ else:
+ yield vertices
+ else:
+ def iter_face_index():
+ for face in mesh.faces:
+ yield face.vertices[:]
+
+ return ([(mesh.vertices[index].co * ob.matrix_world)[:]
+ for index in indexes] for indexes in iter_face_index())
diff --git a/io_mesh_stl/stl_utils.py b/io_mesh_stl/stl_utils.py
new file mode 100644
index 00000000..e320774d
--- /dev/null
+++ b/io_mesh_stl/stl_utils.py
@@ -0,0 +1,264 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+'''
+Import and export STL files
+
+Used as a blender script, it load all the stl files in the scene:
+
+blender -P stl_utils.py -- file1.stl file2.stl file3.stl ...
+'''
+
+import struct
+import mmap
+import contextlib
+import itertools
+
+# TODO: endien
+
+
+@contextlib.contextmanager
+def mmap_file(filename):
+ '''
+ Context manager over the data of an mmap'ed file (Read ONLY).
+
+
+ Example:
+
+ with mmap_file(filename) as m:
+ m.read()
+ print m[10:50]
+ '''
+ with open(filename, 'rb') as file:
+ # check http://bugs.python.org/issue8046 to have mmap context
+ # manager fixed in python
+ map = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
+ yield map
+ map.close()
+
+
+class ListDict(dict):
+ '''
+ Set struct with order.
+
+ You can:
+ - insert data into without doubles
+ - get the list of data in insertion order with self.list
+
+ Like collections.OrderedDict, but quicker, can be replaced if
+ ODict is optimised.
+ '''
+
+ def __init__(self):
+ dict.__init__(self)
+ self.list = []
+ self._len = 0
+
+ def add(self, item):
+ '''
+ Add a value to the Set, return its position in it.
+ '''
+ value = self.setdefault(item, self._len)
+ if value == self._len:
+ self.list.append(item)
+ self._len += 1
+
+ return value
+
+BINARY_HEADER = 80
+BINARY_STRIDE = 12 * 4 + 2
+
+
+def _is_ascii_file(data):
+ '''
+ This function returns True if the data represents an ASCII file.
+
+ Please note that a False value does not necessary means that the data
+ represents a binary file. It can be a (very *RARE* in real life, but
+ can easily be forged) ascii file.
+ '''
+ size = struct.unpack_from('<I', data, BINARY_HEADER)[0]
+
+ return not data.size() == BINARY_HEADER + 4 + BINARY_STRIDE * size
+
+
+def _binary_read(data):
+ # an stl binary file is
+ # - 80 bytes of description
+ # - 4 bytes of size (unsigned int)
+ # - size triangles :
+ #
+ # - 12 bytes of normal
+ # - 9 * 4 bytes of coordinate (3*3 floats)
+ # - 2 bytes of garbage (usually 0)
+
+ # OFFSET for the first byte of coordinate (headers + first normal bytes)
+ # STRIDE between each triangle (first normal + coordinates + garbage)
+ OFFSET = BINARY_HEADER + 4 + 12
+
+ # read header size, ignore description
+ size = struct.unpack_from('<I', data, BINARY_HEADER)[0]
+ unpack = struct.Struct('<9f').unpack_from
+
+ for i in range(size):
+ # read the points coordinates of each triangle
+ pt = unpack(data, OFFSET + BINARY_STRIDE * i)
+ yield pt[:3], pt[3:6], pt[6:]
+
+
+def _ascii_read(data):
+ # an stl ascii file is like
+ # HEADER: solid some name
+ # for each face:
+ #
+ # facet normal x y z
+ # outerloop
+ # vertex x y z
+ # vertex x y z
+ # vertex x y z
+ # endloop
+ # endfacet
+
+ # strip header
+ data.readline()
+
+ while True:
+ # strip facet normal // or end
+ data.readline()
+
+ # strip outer loup, in case of ending, break the loop
+ if not data.readline():
+ break
+
+ yield [tuple(map(float, data.readline().split()[1:]))
+ for _ in range(3)]
+
+ # strip facet normalend and outerloop end
+ data.readline()
+ data.readline()
+
+
+def _binary_write(filename, faces):
+ with open(filename, 'wb') as data:
+ # header
+ # we write padding at header begginning to avoid to
+ # call len(list(faces)) which may be expensive
+ data.write(struct.calcsize('<80sI') * b'\0')
+
+ # 3 vertex == 9f
+ pack = struct.Struct('<9f').pack
+ # pad is to remove normal, we do use them
+ pad = b'\0' * struct.calcsize('<3f')
+
+ nb = 0
+ for verts in faces:
+ # write pad as normal + vertexes + pad as attributes
+ data.write(pad + pack(*itertools.chain.from_iterable(verts)))
+ data.write(b'\0\0')
+ nb += 1
+
+ # header, with correct value now
+ data.seek(0)
+ data.write(struct.pack('<80sI', b"Exported from blender", nb))
+
+
+def _ascii_write(filename, faces):
+ with open(filename, 'w') as data:
+ data.write('solid Exported from blender\n')
+
+ for face in faces:
+ data.write('''facet normal 0 0 0\nouter loop\n''')
+ for vert in face:
+ data.write('vertex %f %f %f\n' % vert)
+ data.write('endloop\nendfacet\n')
+
+ data.write('endsolid Exported from blender\n')
+
+
+def write_stl(filename, faces, ascii=False):
+ '''
+ Write a stl file from faces,
+
+ filename
+ output filename
+
+ faces
+ iterable of tuple of 3 vertex, vertex is tuple of 3 coordinates as float
+
+ ascii
+ save the file in ascii format (very huge)
+ '''
+ (_ascii_write if ascii else _binary_write)(filename, faces)
+
+
+def read_stl(filename):
+ '''
+ Return the triangles and points of an stl binary file.
+
+ Please note that this process can take lot of time if the file is
+ huge (~1m30 for a 1 Go stl file on an quad core i7).
+
+ - returns a tuple(triangles, points).
+
+ triangles
+ A list of triangles, each triangle as a tuple of 3 index of
+ point in *points*.
+
+ points
+ An indexed list of points, each point is a tuple of 3 float
+ (xyz).
+
+ Example of use:
+
+ >>> tris, pts = read_stl(filename, lambda x:)
+ >>> pts = list(pts)
+ >>>
+ >>> # print the coordinate of the triangle n
+ >>> print(pts[i] for i in tris[n])
+ '''
+
+ tris, pts = [], ListDict()
+
+ with mmap_file(filename) as data:
+ # check for ascii or binary
+ gen = _ascii_read if _is_ascii_file(data) else _binary_read
+
+ for pt in gen(data):
+ # Add the triangle and the point.
+ # If the point is allready in the list of points, the
+ # index returned by pts.add() will be the one from the
+ # first equal point inserted.
+ tris.append([pts.add(p) for p in pt])
+
+ return tris, pts.list
+
+
+if __name__ == '__main__':
+ import sys
+ import bpy
+ from io_mesh_stl import blender_utils
+
+ filenames = sys.argv[sys.argv.index('--') + 1:]
+
+ for filename in filenames:
+ objName = bpy.path.display_name(filename)
+ tris, pts = read_stl(filename)
+
+ blender_utils.create_and_link_mesh(objName, tris, pts)
diff --git a/io_mesh_uv_layout/__init__.py b/io_mesh_uv_layout/__init__.py
new file mode 100644
index 00000000..363aff47
--- /dev/null
+++ b/io_mesh_uv_layout/__init__.py
@@ -0,0 +1,198 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "UV Layout",
+ "author": "Campbell Barton, Matt Ebb",
+ "version": (1, 0),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "Image-Window > UVs > Export UV Layout",
+ "description": "Export the UV layout as a 2D graphic",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/UV_Layout",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"
+ "func=detail&aid=22837",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# @todo write the wiki page
+
+if "bpy" in locals():
+ import imp
+ if "export_uv_eps" in locals():
+ imp.reload(export_uv_eps)
+ if "export_uv_png" in locals():
+ imp.reload(export_uv_png)
+ if "export_uv_svg" in locals():
+ imp.reload(export_uv_svg)
+
+import bpy
+
+from bpy.props import StringProperty, BoolProperty, EnumProperty, IntVectorProperty, FloatProperty
+
+
+class ExportUVLayout(bpy.types.Operator):
+ """Export UV layout to file"""
+
+ bl_idname = "uv.export_layout"
+ bl_label = "Export UV Layout"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ filepath = StringProperty(name="File Path", description="File path used for exporting the SVG file", maxlen=1024, default="", subtype='FILE_PATH')
+ check_existing = BoolProperty(name="Check Existing", description="Check and warn on overwriting existing files", default=True, options={'HIDDEN'})
+ export_all = BoolProperty(name="All UV's", description="Export all UVs in this mesh (not just the visible ones)", default=False)
+ mode = EnumProperty(items=(
+ ('SVG', "Scalable Vector Graphic (.svg)", "Export the UV layout to a vector SVG file"),
+ ('EPS', "Encapsulate PostScript (.eps)", "Export the UV layout to a vector EPS file"),
+ ('PNG', "PNG Image (.png)", "Export the UV layout a bitmap image")),
+ name="Format",
+ description="File format to export the UV layout to",
+ default='PNG')
+ size = IntVectorProperty(size=2, default=(1024, 1024), min=8, max=32768, description="Dimensions of the exported file")
+ opacity = FloatProperty(name="Fill Opacity", min=0.0, max=1.0, default=0.25)
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ return (obj and obj.type == 'MESH' and obj.data.uv_textures)
+
+ def _space_image(self, context):
+ space_data = context.space_data
+ if isinstance(space_data, bpy.types.SpaceImageEditor):
+ return space_data
+ else:
+ return None
+
+ def _image_size(self, context, default_width=1024, default_height=1024):
+ # fallback if not in image context.
+ image_width, image_height = default_width, default_height
+
+ space_data = self._space_image(context)
+ if space_data:
+ image = space_data.image
+ if image:
+ width, height = tuple(context.space_data.image.size)
+ # incase no data is found.
+ if width and height:
+ image_width, image_height = width, height
+
+ return image_width, image_height
+
+ def _face_uv_iter(self, context):
+ obj = context.active_object
+ mesh = obj.data
+ uv_layer = mesh.uv_textures.active.data
+ uv_layer_len = len(uv_layer)
+
+ if not self.export_all:
+
+ local_image = Ellipsis
+
+ if context.tool_settings.show_uv_local_view:
+ space_data = self._space_image(context)
+ if space_data:
+ local_image = space_data.image
+
+ faces = mesh.faces
+
+ for i in range(uv_layer_len):
+ uv_elem = uv_layer[i]
+ # context checks
+ if faces[i].select and (local_image is Ellipsis or local_image == uv_elem.image):
+ #~ uv = uv_elem.uv
+ #~ if False not in uv_elem.select_uv[:len(uv)]:
+ #~ yield (i, uv)
+
+ # just write what we see.
+ yield (i, uv_layer[i].uv)
+ else:
+ # all, simple
+ for i in range(uv_layer_len):
+ yield (i, uv_layer[i].uv)
+
+ def execute(self, context):
+
+ obj = context.active_object
+ is_editmode = (obj.mode == 'EDIT')
+ if is_editmode:
+ bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
+
+ mesh = obj.data
+
+ mode = self.mode
+
+ filepath = self.filepath
+ filepath = bpy.path.ensure_ext(filepath, "." + mode.lower())
+ file = open(filepath, "w")
+ fw = file.write
+
+ if mode == 'EPS':
+ from . import export_uv_eps
+ func = export_uv_eps.write
+ elif mode == 'PNG':
+ from . import export_uv_png
+ func = export_uv_png.write
+ if mode == 'SVG':
+ from . import export_uv_svg
+ func = export_uv_svg.write
+
+ func(fw, mesh, self.size[0], self.size[1], self.opacity, lambda: self._face_uv_iter(context))
+
+ if is_editmode:
+ bpy.ops.object.mode_set(mode='EDIT', toggle=False)
+
+ file.close()
+
+ return {'FINISHED'}
+
+ def check(self, context):
+ filepath = bpy.path.ensure_ext(self.filepath, "." + self.mode.lower())
+ if filepath != self.filepath:
+ self.filepath = filepath
+ return True
+ else:
+ return False
+
+ def invoke(self, context, event):
+ import os
+ self.size = self._image_size(context)
+ self.filepath = os.path.splitext(bpy.data.filepath)[0]
+ wm = context.window_manager
+ wm.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+
+def menu_func(self, context):
+ self.layout.operator(ExportUVLayout.bl_idname)
+
+
+def register():
+ bpy.utils.register_module(__name__)
+ bpy.types.IMAGE_MT_uvs.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+ bpy.types.IMAGE_MT_uvs.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_mesh_uv_layout/export_uv_eps.py b/io_mesh_uv_layout/export_uv_eps.py
new file mode 100644
index 00000000..df75cf73
--- /dev/null
+++ b/io_mesh_uv_layout/export_uv_eps.py
@@ -0,0 +1,84 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+
+
+def write(fw, mesh, image_width, image_height, opacity, face_iter_func):
+ fw("%!PS-Adobe-3.0 EPSF-3.0\n")
+ fw("%%%%Creator: Blender %s\n" % bpy.app.version_string)
+ fw("%%Pages: 1\n")
+ fw("%%Orientation: Portrait\n")
+ fw("%%%%BoundingBox: 0 0 %d %d\n" % (image_width, image_height))
+ fw("%%%%HiResBoundingBox: 0.0 0.0 %.4f %.4f\n" % (image_width, image_height))
+ fw("%%EndComments\n")
+ fw("%%Page: 1 1\n")
+ fw("0 0 translate\n")
+ fw("1.0 1.0 scale\n")
+ fw("0 0 0 setrgbcolor\n")
+ fw("[] 0 setdash\n")
+ fw("1 setlinewidth\n")
+ fw("1 setlinejoin\n")
+ fw("1 setlinecap\n")
+
+ faces = mesh.faces
+
+ if opacity > 0.0:
+ for i, mat in enumerate(mesh.materials if mesh.materials else [None]):
+ fw("/DRAW_%d {" % i)
+ fw("gsave\n")
+ if mat:
+ color = tuple((1.0 - ((1.0 - c) * opacity)) for c in mat.diffuse_color)
+ else:
+ color = 1.0, 1.0, 1.0
+ fw("%.3g %.3g %.3g setrgbcolor\n" % color)
+ fw("fill\n")
+ fw("grestore\n")
+ fw("0 setgray\n")
+ fw("} def\n")
+
+ # fill
+ for i, uvs in face_iter_func():
+ fw("newpath\n")
+ for j, uv in enumerate(uvs):
+ uv_scale = (uv[0] * image_width, uv[1] * image_height)
+ if j == 0:
+ fw("%.5f %.5f moveto\n" % uv_scale)
+ else:
+ fw("%.5f %.5f lineto\n" % uv_scale)
+
+ fw("closepath\n")
+ fw("DRAW_%d\n" % faces[i].material_index)
+
+ # stroke only
+ for i, uvs in face_iter_func():
+ fw("newpath\n")
+ for j, uv in enumerate(uvs):
+ uv_scale = (uv[0] * image_width, uv[1] * image_height)
+ if j == 0:
+ fw("%.5f %.5f moveto\n" % uv_scale)
+ else:
+ fw("%.5f %.5f lineto\n" % uv_scale)
+
+ fw("closepath\n")
+ fw("stroke\n")
+
+ fw("showpage\n")
+ fw("%%EOF\n")
diff --git a/io_mesh_uv_layout/export_uv_png.py b/io_mesh_uv_layout/export_uv_png.py
new file mode 100644
index 00000000..ec92c4d8
--- /dev/null
+++ b/io_mesh_uv_layout/export_uv_png.py
@@ -0,0 +1,149 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+
+
+def write(fw, mesh_source, image_width, image_height, opacity, face_iter_func):
+ filepath = fw.__self__.name
+ fw.__self__.close()
+
+ material_solids = [bpy.data.materials.new("uv_temp_solid") for i in range(max(1, len(mesh_source.materials)))]
+ material_wire = bpy.data.materials.new("uv_temp_wire")
+
+ scene = bpy.data.scenes.new("uv_temp")
+ mesh = bpy.data.meshes.new("uv_temp")
+ for mat_solid in material_solids:
+ mesh.materials.append(mat_solid)
+
+ tot_verts = 0
+ for f in mesh_source.faces:
+ tot_verts += len(f.vertices)
+
+ faces_source = mesh_source.faces
+
+ # get unique UV's incase there are many overlapping which slow down filling.
+ face_hash_3 = set()
+ face_hash_4 = set()
+ for i, uv in face_iter_func():
+ material_index = faces_source[i].material_index
+ if len(uv) == 3:
+ face_hash_3.add((uv[0][0], uv[0][1], uv[1][0], uv[1][1], uv[2][0], uv[2][1], material_index))
+ else:
+ face_hash_4.add((uv[0][0], uv[0][1], uv[1][0], uv[1][1], uv[2][0], uv[2][1], uv[3][0], uv[3][1], material_index))
+
+ # now set the faces coords and locations
+ # build mesh data
+ mesh_new_vertices = []
+ mesh_new_materials = []
+ mesh_new_face_vertices = []
+
+ current_vert = 0
+
+ for face_data in face_hash_3:
+ mesh_new_vertices.extend([face_data[0], face_data[1], 0.0, face_data[2], face_data[3], 0.0, face_data[4], face_data[5], 0.0])
+ mesh_new_face_vertices.extend([current_vert, current_vert + 1, current_vert + 2, 0])
+ mesh_new_materials.append(face_data[6])
+ current_vert += 3
+ for face_data in face_hash_4:
+ mesh_new_vertices.extend([face_data[0], face_data[1], 0.0, face_data[2], face_data[3], 0.0, face_data[4], face_data[5], 0.0, face_data[6], face_data[7], 0.0])
+ mesh_new_face_vertices.extend([current_vert, current_vert + 1, current_vert + 2, current_vert + 3])
+ mesh_new_materials.append(face_data[8])
+ current_vert += 4
+
+ mesh.vertices.add(len(mesh_new_vertices) // 3)
+ mesh.faces.add(len(mesh_new_face_vertices) // 4)
+
+ mesh.vertices.foreach_set("co", mesh_new_vertices)
+ mesh.faces.foreach_set("vertices_raw", mesh_new_face_vertices)
+ mesh.faces.foreach_set("material_index", mesh_new_materials)
+
+ mesh.update(calc_edges=True)
+
+ obj_solid = bpy.data.objects.new("uv_temp_solid", mesh)
+ obj_wire = bpy.data.objects.new("uv_temp_wire", mesh)
+ base_solid = scene.objects.link(obj_solid)
+ base_wire = scene.objects.link(obj_wire)
+ base_solid.layers[0] = True
+ base_wire.layers[0] = True
+
+ # place behind the wire
+ obj_solid.location = 0, 0, -1
+
+ obj_wire.material_slots[0].link = 'OBJECT'
+ obj_wire.material_slots[0].material = material_wire
+
+ # setup the camera
+ cam = bpy.data.cameras.new("uv_temp")
+ cam.type = 'ORTHO'
+ cam.ortho_scale = 1.0
+ obj_cam = bpy.data.objects.new("uv_temp_cam", cam)
+ obj_cam.location = 0.5, 0.5, 1.0
+ scene.objects.link(obj_cam)
+ scene.camera = obj_cam
+
+ # setup materials
+ for i, mat_solid in enumerate(material_solids):
+ if mesh_source.materials and mesh_source.materials[i]:
+ mat_solid.diffuse_color = mesh_source.materials[i].diffuse_color
+
+ mat_solid.use_shadeless = True
+ mat_solid.use_transparency = True
+ mat_solid.alpha = opacity
+
+ material_wire.type = 'WIRE'
+ material_wire.use_shadeless = True
+ material_wire.diffuse_color = 0, 0, 0
+
+ # scene render settings
+ scene.render.use_raytrace = False
+ scene.render.alpha_mode = 'STRAIGHT'
+ scene.render.color_mode = 'RGBA'
+
+ scene.render.resolution_x = image_width
+ scene.render.resolution_y = image_height
+ scene.render.resolution_percentage = 100
+
+ if image_width > image_height:
+ scene.render.pixel_aspect_y = image_width / image_height
+ elif image_width < image_height:
+ scene.render.pixel_aspect_x = image_height / image_width
+
+ scene.frame_start = 1
+ scene.frame_end = 1
+
+ scene.render.file_format = 'PNG'
+ scene.render.filepath = filepath
+
+ data_context = {"blend_data": bpy.context.blend_data, "scene": scene}
+ bpy.ops.render.render(data_context, write_still=True)
+
+ # cleanup
+ bpy.data.scenes.remove(scene)
+ bpy.data.objects.remove(obj_cam)
+ bpy.data.objects.remove(obj_solid)
+ bpy.data.objects.remove(obj_wire)
+
+ bpy.data.cameras.remove(cam)
+ bpy.data.meshes.remove(mesh)
+
+ bpy.data.materials.remove(material_wire)
+ for mat_solid in material_solids:
+ bpy.data.materials.remove(mat_solid)
diff --git a/io_mesh_uv_layout/export_uv_svg.py b/io_mesh_uv_layout/export_uv_svg.py
new file mode 100644
index 00000000..464211cb
--- /dev/null
+++ b/io_mesh_uv_layout/export_uv_svg.py
@@ -0,0 +1,64 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+
+
+def write(fw, mesh, image_width, image_height, opacity, face_iter_func):
+ # for making an XML compatible string
+ from xml.sax.saxutils import escape
+ from os.path import basename
+
+ fw('<?xml version="1.0" standalone="no"?>\n')
+ fw('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" \n')
+ fw(' "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
+ fw('<svg width="%dpx" height="%dpx" viewBox="0px 0px %dpx %dpx"\n' % (image_width, image_height, image_width, image_height))
+ fw(' xmlns="http://www.w3.org/2000/svg" version="1.1">\n')
+ desc = "%r, %s, (Blender %s)" % (basename(bpy.data.filepath), mesh.name, bpy.app.version_string)
+ fw('<desc>%s</desc>\n' % escape(desc))
+
+ # svg colors
+ fill_settings = []
+ fill_default = 'fill="grey"'
+ for mat in mesh.materials if mesh.materials else [None]:
+ if mat:
+ fill_settings.append('fill="rgb(%d, %d, %d)"' % tuple(int(c * 255) for c in mat.diffuse_color))
+ else:
+ fill_settings.append(fill_default)
+
+ faces = mesh.faces
+ for i, uvs in face_iter_func():
+ try: # rare cases material index is invalid.
+ fill = fill_settings[faces[i].material_index]
+ except IndexError:
+ fill = fill_default
+
+ fw('<polygon stroke="black" stroke-width="1px"')
+ if opacity > 0.0:
+ fw(' %s fill-opacity="%.2g"' % (fill, opacity))
+
+ fw(' points="')
+
+ for j, uv in enumerate(uvs):
+ x, y = uv[0], 1.0 - uv[1]
+ fw('%.3f,%.3f ' % (x * image_width, y * image_height))
+ fw('" />\n')
+ fw('\n')
+ fw('</svg>\n')
diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py
new file mode 100644
index 00000000..e7934afc
--- /dev/null
+++ b/io_scene_3ds/__init__.py
@@ -0,0 +1,167 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Autodesk 3DS format",
+ "author": "Bob Holcomb, Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export 3DS, meshes, uvs, materials, textures, cameras & lamps",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Autodesk_3DS",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "import_3ds" in locals():
+ imp.reload(import_3ds)
+ if "export_3ds" in locals():
+ imp.reload(export_3ds)
+
+
+import bpy
+from bpy.props import StringProperty, FloatProperty, BoolProperty, EnumProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper, axis_conversion
+
+
+class Import3DS(bpy.types.Operator, ImportHelper):
+ '''Import from 3DS file format (.3ds)'''
+ bl_idname = "import_scene.autodesk_3ds"
+ bl_label = 'Import 3DS'
+
+ filename_ext = ".3ds"
+ filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
+
+ constrain_size = FloatProperty(name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0)
+ use_image_search = BoolProperty(name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True)
+ use_apply_transform = BoolProperty(name="Apply Transform", description="Workaround for object transformations importing incorrectly", default=True)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='Y',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Z',
+ )
+
+ def execute(self, context):
+ from . import import_3ds
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob"))
+
+ global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return import_3ds.load(self, context, **keywords)
+
+
+class Export3DS(bpy.types.Operator, ExportHelper):
+ '''Export to 3DS file format (.3ds)'''
+ bl_idname = "export_scene.autodesk_3ds"
+ bl_label = 'Export 3DS'
+
+ filename_ext = ".3ds"
+ filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
+
+ use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='Y',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Z',
+ )
+
+ def execute(self, context):
+ from . import export_3ds
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob", "check_existing"))
+ global_matrix = axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return export_3ds.save(self, context, **keywords)
+
+
+# Add to a menu
+def menu_func_export(self, context):
+ self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
+
+
+def menu_func_import(self, context):
+ self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+# NOTES:
+# why add 1 extra vertex? and remove it when done? - "Answer - eekadoodle - would need to re-order UV's without this since face order isnt always what we give blender, BMesh will solve :D"
+# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py
new file mode 100644
index 00000000..b9f5d982
--- /dev/null
+++ b/io_scene_3ds/export_3ds.py
@@ -0,0 +1,1062 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Bob Holcomb
+# Contributors: Campbell Barton, Bob Holcomb, Richard Lärkäng, Damien McGinnes, Mark Stijnman
+
+"""
+Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information
+from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode.
+"""
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will export
+#----- Primary Chunk, at the beginning of each file
+PRIMARY = 0x4D4D
+
+#------ Main Chunks
+OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
+VERSION = 0x0002 # This gives the version of the .3ds file
+KFDATA = 0xB000 # This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL = 45055 # 0xAFFF // This stored the texture info
+OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+MATNAME = 0xA000 # This holds the material name
+MATAMBIENT = 0xA010 # Ambient color of the object/material
+MATDIFFUSE = 0xA020 # This holds the color of the object/material
+MATSPECULAR = 0xA030 # SPecular color of the object/material
+MATSHINESS = 0xA040 # ??
+MATMAP = 0xA200 # This is a header for a new material
+MATMAPFILE = 0xA300 # This holds the file name of the texture
+
+RGB1 = 0x0011
+RGB2 = 0x0012
+
+#>------ sub defines of OBJECT
+OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
+OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object
+OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES = 0x4720 # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES = 0x4110 # The objects vertices
+OBJECT_FACES = 0x4120 # The objects faces
+OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
+OBJECT_UV = 0x4140 # The UV texture coordinates
+OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
+
+#>------ sub defines of KFDATA
+KFDATA_KFHDR = 0xB00A
+KFDATA_KFSEG = 0xB008
+KFDATA_KFCURTIME = 0xB009
+KFDATA_OBJECT_NODE_TAG = 0xB002
+
+#>------ sub defines of OBJECT_NODE_TAG
+OBJECT_NODE_ID = 0xB030
+OBJECT_NODE_HDR = 0xB010
+OBJECT_PIVOT = 0xB013
+OBJECT_INSTANCE_NAME = 0xB011
+POS_TRACK_TAG = 0xB020
+ROT_TRACK_TAG = 0xB021
+SCL_TRACK_TAG = 0xB022
+
+import struct
+
+# So 3ds max can open files, limit names to 12 in length
+# this is verry annoying for filenames!
+name_unique = [] # stores str, ascii only
+name_mapping = {} # stores {orig: byte} mapping
+
+
+def sane_name(name):
+ name_fixed = name_mapping.get(name)
+ if name_fixed is not None:
+ return name_fixed
+
+ # strip non ascii chars
+ new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:12]
+ i = 0
+
+ while new_name in name_unique:
+ new_name = new_name_clean + ".%.3d" % i
+ i += 1
+
+ # note, appending the 'str' version.
+ name_unique.append(new_name)
+ name_mapping[name] = new_name = new_name.encode("ASCII", "replace")
+ return new_name
+
+
+def uv_key(uv):
+ return round(uv[0], 6), round(uv[1], 6)
+
+# size defines:
+SZ_SHORT = 2
+SZ_INT = 4
+SZ_FLOAT = 4
+
+
+class _3ds_short(object):
+ '''Class representing a short (2-byte integer) for a 3ds file.
+ *** This looks like an unsigned short H is unsigned from the struct docs - Cam***'''
+ __slots__ = ("value", )
+
+ def __init__(self, val=0):
+ self.value = val
+
+ def get_size(self):
+ return SZ_SHORT
+
+ def write(self, file):
+ file.write(struct.pack("<H", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_int(object):
+ '''Class representing an int (4-byte integer) for a 3ds file.'''
+ __slots__ = ("value", )
+
+ def __init__(self, val):
+ self.value = val
+
+ def get_size(self):
+ return SZ_INT
+
+ def write(self, file):
+ file.write(struct.pack("<I", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_float(object):
+ '''Class representing a 4-byte IEEE floating point number for a 3ds file.'''
+ __slots__ = ("value", )
+
+ def __init__(self, val):
+ self.value = val
+
+ def get_size(self):
+ return SZ_FLOAT
+
+ def write(self, file):
+ file.write(struct.pack("<f", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_string(object):
+ '''Class representing a zero-terminated string for a 3ds file.'''
+ __slots__ = ("value", )
+
+ def __init__(self, val):
+ assert(type(val) == bytes)
+ self.value = val
+
+ def get_size(self):
+ return (len(self.value) + 1)
+
+ def write(self, file):
+ binary_format = "<%ds" % (len(self.value) + 1)
+ file.write(struct.pack(binary_format, self.value))
+
+ def __str__(self):
+ return self.value
+
+
+class _3ds_point_3d(object):
+ '''Class representing a three-dimensional point for a 3ds file.'''
+ __slots__ = "x", "y", "z"
+
+ def __init__(self, point):
+ self.x, self.y, self.z = point
+
+ def get_size(self):
+ return 3 * SZ_FLOAT
+
+ def write(self, file):
+ file.write(struct.pack('<3f', self.x, self.y, self.z))
+
+ def __str__(self):
+ return '(%f, %f, %f)' % (self.x, self.y, self.z)
+
+# Used for writing a track
+"""
+class _3ds_point_4d(object):
+ '''Class representing a four-dimensional point for a 3ds file, for instance a quaternion.'''
+ __slots__ = "x","y","z","w"
+ def __init__(self, point=(0.0,0.0,0.0,0.0)):
+ self.x, self.y, self.z, self.w = point
+
+ def get_size(self):
+ return 4*SZ_FLOAT
+
+ def write(self,file):
+ data=struct.pack('<4f', self.x, self.y, self.z, self.w)
+ file.write(data)
+
+ def __str__(self):
+ return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
+"""
+
+
+class _3ds_point_uv(object):
+ '''Class representing a UV-coordinate for a 3ds file.'''
+ __slots__ = ("uv", )
+
+ def __init__(self, point):
+ self.uv = point
+
+ def __cmp__(self, other):
+ return cmp(self.uv, other.uv)
+
+ def get_size(self):
+ return 2 * SZ_FLOAT
+
+ def write(self, file):
+ data = struct.pack('<2f', self.uv[0], self.uv[1])
+ file.write(data)
+
+ def __str__(self):
+ return '(%g, %g)' % self.uv
+
+
+class _3ds_rgb_color(object):
+ '''Class representing a (24-bit) rgb color for a 3ds file.'''
+ __slots__ = "r", "g", "b"
+
+ def __init__(self, col):
+ self.r, self.g, self.b = col
+
+ def get_size(self):
+ return 3
+
+ def write(self, file):
+ file.write(struct.pack('<3B', int(255 * self.r), int(255 * self.g), int(255 * self.b)))
+# file.write(struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) )
+
+ def __str__(self):
+ return '{%f, %f, %f}' % (self.r, self.g, self.b)
+
+
+class _3ds_face(object):
+ '''Class representing a face for a 3ds file.'''
+ __slots__ = ("vindex", )
+
+ def __init__(self, vindex):
+ self.vindex = vindex
+
+ def get_size(self):
+ return 4 * SZ_SHORT
+
+ def write(self, file):
+ # The last zero is only used by 3d studio
+ file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], 0))
+
+ def __str__(self):
+ return "[%d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2])
+
+
+class _3ds_array(object):
+ '''Class representing an array of variables for a 3ds file.
+
+ Consists of a _3ds_short to indicate the number of items, followed by the items themselves.
+ '''
+ __slots__ = "values", "size"
+
+ def __init__(self):
+ self.values = []
+ self.size = SZ_SHORT
+
+ # add an item:
+ def add(self, item):
+ self.values.append(item)
+ self.size += item.get_size()
+
+ def get_size(self):
+ return self.size
+
+ def write(self, file):
+ _3ds_short(len(self.values)).write(file)
+ #_3ds_int(len(self.values)).write(file)
+ for value in self.values:
+ value.write(file)
+
+ # To not overwhelm the output in a dump, a _3ds_array only
+ # outputs the number of items, not all of the actual items.
+ def __str__(self):
+ return '(%d items)' % len(self.values)
+
+
+class _3ds_named_variable(object):
+ '''Convenience class for named variables.'''
+
+ __slots__ = "value", "name"
+
+ def __init__(self, name, val=None):
+ self.name = name
+ self.value = val
+
+ def get_size(self):
+ if self.value is None:
+ return 0
+ else:
+ return self.value.get_size()
+
+ def write(self, file):
+ if self.value is not None:
+ self.value.write(file)
+
+ def dump(self, indent):
+ if self.value is not None:
+ spaces = ""
+ for i in range(indent):
+ spaces += " "
+ if (self.name != ""):
+ print(spaces, self.name, " = ", self.value)
+ else:
+ print(spaces, "[unnamed]", " = ", self.value)
+
+
+#the chunk class
+class _3ds_chunk(object):
+ '''Class representing a chunk in a 3ds file.
+
+ Chunks contain zero or more variables, followed by zero or more subchunks.
+ '''
+ __slots__ = "ID", "size", "variables", "subchunks"
+
+ def __init__(self, id=0):
+ self.ID = _3ds_short(id)
+ self.size = _3ds_int(0)
+ self.variables = []
+ self.subchunks = []
+
+ def set_ID(id):
+ self.ID = _3ds_short(id)
+
+ def add_variable(self, name, var):
+ '''Add a named variable.
+
+ The name is mostly for debugging purposes.'''
+ self.variables.append(_3ds_named_variable(name, var))
+
+ def add_subchunk(self, chunk):
+ '''Add a subchunk.'''
+ self.subchunks.append(chunk)
+
+ def get_size(self):
+ '''Calculate the size of the chunk and return it.
+
+ The sizes of the variables and subchunks are used to determine this chunk\'s size.'''
+ tmpsize = self.ID.get_size() + self.size.get_size()
+ for variable in self.variables:
+ tmpsize += variable.get_size()
+ for subchunk in self.subchunks:
+ tmpsize += subchunk.get_size()
+ self.size.value = tmpsize
+ return self.size.value
+
+ def write(self, file):
+ '''Write the chunk to a file.
+
+ Uses the write function of the variables and the subchunks to do the actual work.'''
+ #write header
+ self.ID.write(file)
+ self.size.write(file)
+ for variable in self.variables:
+ variable.write(file)
+ for subchunk in self.subchunks:
+ subchunk.write(file)
+
+ def dump(self, indent=0):
+ '''Write the chunk to a file.
+
+ Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
+ Uses the dump function of the named variables and the subchunks to do the actual work.'''
+ spaces = ""
+ for i in range(indent):
+ spaces += " "
+ print(spaces, "ID=", hex(self.ID.value), "size=", self.get_size())
+ for variable in self.variables:
+ variable.dump(indent + 1)
+ for subchunk in self.subchunks:
+ subchunk.dump(indent + 1)
+
+
+######################################################
+# EXPORT
+######################################################
+
+def get_material_images(material):
+ # blender utility func.
+ if material:
+ return [s.texture.image for s in material.texture_slots if s and s.texture.type == 'IMAGE' and s.texture.image]
+
+ return []
+# images = []
+# if material:
+# for mtex in material.getTextures():
+# if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
+# image = mtex.tex.image
+# if image:
+# images.append(image) # maye want to include info like diffuse, spec here.
+# return images
+
+
+def make_material_subchunk(id, color):
+ '''Make a material subchunk.
+
+ Used for color subchunks, such as diffuse color or ambient color subchunks.'''
+ mat_sub = _3ds_chunk(id)
+ col1 = _3ds_chunk(RGB1)
+ col1.add_variable("color1", _3ds_rgb_color(color))
+ mat_sub.add_subchunk(col1)
+# optional:
+# col2 = _3ds_chunk(RGB1)
+# col2.add_variable("color2", _3ds_rgb_color(color))
+# mat_sub.add_subchunk(col2)
+ return mat_sub
+
+
+def make_material_texture_chunk(id, images):
+ """Make Material Map texture chunk
+ """
+ mat_sub = _3ds_chunk(id)
+
+ def add_image(img):
+ import os
+ filename = os.path.basename(image.filepath)
+ mat_sub_file = _3ds_chunk(MATMAPFILE)
+ mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
+ mat_sub.add_subchunk(mat_sub_file)
+
+ for image in images:
+ add_image(image)
+
+ return mat_sub
+
+
+def make_material_chunk(material, image):
+ '''Make a material chunk out of a blender material.'''
+ material_chunk = _3ds_chunk(MATERIAL)
+ name = _3ds_chunk(MATNAME)
+
+ name_str = material.name if material else "None"
+
+ if image:
+ name_str += image.name
+
+ name.add_variable("name", _3ds_string(sane_name(name_str)))
+ material_chunk.add_subchunk(name)
+
+ if not material:
+ material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0, 0, 0)))
+ material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8)))
+ material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1, 1, 1)))
+
+ else:
+ material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a * material.ambient for a in material.diffuse_color]))
+ material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color))
+ material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color))
+
+ images = get_material_images(material) # can be None
+ if image:
+ images.append(image)
+
+ if images:
+ material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images))
+
+ return material_chunk
+
+
+class tri_wrapper(object):
+ '''Class representing a triangle.
+
+ Used when converting faces to triangles'''
+
+ __slots__ = "vertex_index", "mat", "image", "faceuvs", "offset"
+
+ def __init__(self, vindex=(0, 0, 0), mat=None, image=None, faceuvs=None):
+ self.vertex_index = vindex
+ self.mat = mat
+ self.image = image
+ self.faceuvs = faceuvs
+ self.offset = [0, 0, 0] # offset indices
+
+
+def extract_triangles(mesh):
+ '''Extract triangles from a mesh.
+
+ If the mesh contains quads, they will be split into triangles.'''
+ tri_list = []
+ do_uv = len(mesh.uv_textures)
+
+ img = None
+ for i, face in enumerate(mesh.faces):
+ f_v = face.vertices
+
+ uf = mesh.uv_textures.active.data[i] if do_uv else None
+
+ if do_uv:
+ f_uv = uf.uv
+ img = uf.image if uf else None
+ if img is not None:
+ img = img.name
+
+ # if f_v[3] == 0:
+ if len(f_v) == 3:
+ new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+ if (do_uv):
+ new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+ tri_list.append(new_tri)
+
+ else: # it's a quad
+ new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+ new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img)
+
+ if (do_uv):
+ new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+ new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])
+
+ tri_list.append(new_tri)
+ tri_list.append(new_tri_2)
+
+ return tri_list
+
+
+def remove_face_uv(verts, tri_list):
+ '''Remove face UV coordinates from a list of triangles.
+
+ Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates
+ need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when
+ there are multiple uv coordinates per vertex.'''
+
+ # initialize a list of UniqueLists, one per vertex:
+ #uv_list = [UniqueList() for i in xrange(len(verts))]
+ unique_uvs = [{} for i in range(len(verts))]
+
+ # for each face uv coordinate, add it to the UniqueList of the vertex
+ for tri in tri_list:
+ for i in range(3):
+ # store the index into the UniqueList for future reference:
+ # offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i])))
+
+ context_uv_vert = unique_uvs[tri.vertex_index[i]]
+ uvkey = tri.faceuvs[i]
+
+ offset_index__uv_3ds = context_uv_vert.get(uvkey)
+
+ if not offset_index__uv_3ds:
+ offset_index__uv_3ds = context_uv_vert[uvkey] = len(context_uv_vert), _3ds_point_uv(uvkey)
+
+ tri.offset[i] = offset_index__uv_3ds[0]
+
+ # At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it
+ # only once.
+
+ # Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the
+ # faces refer to the new face indices:
+ vert_index = 0
+ vert_array = _3ds_array()
+ uv_array = _3ds_array()
+ index_list = []
+ for i, vert in enumerate(verts):
+ index_list.append(vert_index)
+
+ pt = _3ds_point_3d(vert.co) # reuse, should be ok
+ uvmap = [None] * len(unique_uvs[i])
+ for ii, uv_3ds in unique_uvs[i].values():
+ # add a vertex duplicate to the vertex_array for every uv associated with this vertex:
+ vert_array.add(pt)
+ # add the uv coordinate to the uv array:
+ # This for loop does not give uv's ordered by ii, so we create a new map
+ # and add the uv's later
+ # uv_array.add(uv_3ds)
+ uvmap[ii] = uv_3ds
+
+ # Add the uv's in the correct order
+ for uv_3ds in uvmap:
+ # add the uv coordinate to the uv array:
+ uv_array.add(uv_3ds)
+
+ vert_index += len(unique_uvs[i])
+
+ # Make sure the triangle vertex indices now refer to the new vertex list:
+ for tri in tri_list:
+ for i in range(3):
+ tri.offset[i] += index_list[tri.vertex_index[i]]
+ tri.vertex_index = tri.offset
+
+ return vert_array, uv_array, tri_list
+
+
+def make_faces_chunk(tri_list, mesh, materialDict):
+ '''Make a chunk for the faces.
+
+ Also adds subchunks assigning materials to all faces.'''
+
+ materials = mesh.materials
+ if not materials:
+ mat = None
+
+ face_chunk = _3ds_chunk(OBJECT_FACES)
+ face_list = _3ds_array()
+
+ if mesh.uv_textures:
+ # Gather materials used in this mesh - mat/image pairs
+ unique_mats = {}
+ for i, tri in enumerate(tri_list):
+
+ face_list.add(_3ds_face(tri.vertex_index))
+
+ if materials:
+ mat = materials[tri.mat]
+ if mat:
+ mat = mat.name
+
+ img = tri.image
+
+ try:
+ context_mat_face_array = unique_mats[mat, img][1]
+ except:
+ name_str = mat if mat else "None"
+ if img:
+ name_str += img
+
+ context_mat_face_array = _3ds_array()
+ unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array
+
+ context_mat_face_array.add(_3ds_short(i))
+ # obj_material_faces[tri.mat].add(_3ds_short(i))
+
+ face_chunk.add_variable("faces", face_list)
+ for mat_name, mat_faces in unique_mats.values():
+ obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)
+ obj_material_chunk.add_variable("name", mat_name)
+ obj_material_chunk.add_variable("face_list", mat_faces)
+ face_chunk.add_subchunk(obj_material_chunk)
+
+ else:
+
+ obj_material_faces = []
+ obj_material_names = []
+ for m in materials:
+ if m:
+ obj_material_names.append(_3ds_string(sane_name(m.name)))
+ obj_material_faces.append(_3ds_array())
+ n_materials = len(obj_material_names)
+
+ for i, tri in enumerate(tri_list):
+ face_list.add(_3ds_face(tri.vertex_index))
+ if (tri.mat < n_materials):
+ obj_material_faces[tri.mat].add(_3ds_short(i))
+
+ face_chunk.add_variable("faces", face_list)
+ for i in range(n_materials):
+ obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)
+ obj_material_chunk.add_variable("name", obj_material_names[i])
+ obj_material_chunk.add_variable("face_list", obj_material_faces[i])
+ face_chunk.add_subchunk(obj_material_chunk)
+
+ return face_chunk
+
+
+def make_vert_chunk(vert_array):
+ '''Make a vertex chunk out of an array of vertices.'''
+ vert_chunk = _3ds_chunk(OBJECT_VERTICES)
+ vert_chunk.add_variable("vertices", vert_array)
+ return vert_chunk
+
+
+def make_uv_chunk(uv_array):
+ '''Make a UV chunk out of an array of UVs.'''
+ uv_chunk = _3ds_chunk(OBJECT_UV)
+ uv_chunk.add_variable("uv coords", uv_array)
+ return uv_chunk
+
+
+def make_mesh_chunk(mesh, materialDict):
+ '''Make a chunk out of a Blender mesh.'''
+
+ # Extract the triangles from the mesh:
+ tri_list = extract_triangles(mesh)
+
+ if len(mesh.uv_textures):
+# if mesh.faceUV:
+ # Remove the face UVs and convert it to vertex UV:
+ vert_array, uv_array, tri_list = remove_face_uv(mesh.vertices, tri_list)
+ else:
+ # Add the vertices to the vertex array:
+ vert_array = _3ds_array()
+ for vert in mesh.vertices:
+ vert_array.add(_3ds_point_3d(vert.co))
+ # If the mesh has vertex UVs, create an array of UVs:
+ if len(mesh.sticky):
+# if mesh.vertexUV:
+ uv_array = _3ds_array()
+ for uv in mesh.sticky:
+# for vert in mesh.vertices:
+ uv_array.add(_3ds_point_uv(uv.co))
+# uv_array.add(_3ds_point_uv(vert.uvco))
+ else:
+ # no UV at all:
+ uv_array = None
+
+ # create the chunk:
+ mesh_chunk = _3ds_chunk(OBJECT_MESH)
+
+ # add vertex chunk:
+ mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
+ # add faces chunk:
+
+ mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))
+
+ # if available, add uv chunk:
+ if uv_array:
+ mesh_chunk.add_subchunk(make_uv_chunk(uv_array))
+
+ return mesh_chunk
+
+""" # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+def make_kfdata(start=0, stop=0, curtime=0):
+ '''Make the basic keyframe data chunk'''
+ kfdata = _3ds_chunk(KFDATA)
+
+ kfhdr = _3ds_chunk(KFDATA_KFHDR)
+ kfhdr.add_variable("revision", _3ds_short(0))
+ # Not really sure what filename is used for, but it seems it is usually used
+ # to identify the program that generated the .3ds:
+ kfhdr.add_variable("filename", _3ds_string("Blender"))
+ kfhdr.add_variable("animlen", _3ds_int(stop-start))
+
+ kfseg = _3ds_chunk(KFDATA_KFSEG)
+ kfseg.add_variable("start", _3ds_int(start))
+ kfseg.add_variable("stop", _3ds_int(stop))
+
+ kfcurtime = _3ds_chunk(KFDATA_KFCURTIME)
+ kfcurtime.add_variable("curtime", _3ds_int(curtime))
+
+ kfdata.add_subchunk(kfhdr)
+ kfdata.add_subchunk(kfseg)
+ kfdata.add_subchunk(kfcurtime)
+ return kfdata
+"""
+
+"""
+def make_track_chunk(ID, obj):
+ '''Make a chunk for track data.
+
+ Depending on the ID, this will construct a position, rotation or scale track.'''
+ track_chunk = _3ds_chunk(ID)
+ track_chunk.add_variable("track_flags", _3ds_short())
+ track_chunk.add_variable("unknown", _3ds_int())
+ track_chunk.add_variable("unknown", _3ds_int())
+ track_chunk.add_variable("nkeys", _3ds_int(1))
+ # Next section should be repeated for every keyframe, but for now, animation is not actually supported.
+ track_chunk.add_variable("tcb_frame", _3ds_int(0))
+ track_chunk.add_variable("tcb_flags", _3ds_short())
+ if obj.type=='Empty':
+ if ID==POS_TRACK_TAG:
+ # position vector:
+ track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation()))
+ elif ID==ROT_TRACK_TAG:
+ # rotation (quaternion, angle first, followed by axis):
+ q = obj.getEuler().to_quaternion() # XXX, todo!
+ track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2])))
+ elif ID==SCL_TRACK_TAG:
+ # scale vector:
+ track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize()))
+ else:
+ # meshes have their transformations applied before
+ # exporting, so write identity transforms here:
+ if ID==POS_TRACK_TAG:
+ # position vector:
+ track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0)))
+ elif ID==ROT_TRACK_TAG:
+ # rotation (quaternion, angle first, followed by axis):
+ track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0)))
+ elif ID==SCL_TRACK_TAG:
+ # scale vector:
+ track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0)))
+
+ return track_chunk
+"""
+
+"""
+def make_kf_obj_node(obj, name_to_id):
+ '''Make a node chunk for a Blender object.
+
+ Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
+ Blender Empty objects are converted to dummy nodes.'''
+
+ name = obj.name
+ # main object node chunk:
+ kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
+ # chunk for the object id:
+ obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
+ # object id is from the name_to_id dictionary:
+ obj_id_chunk.add_variable("node_id", _3ds_short(name_to_id[name]))
+
+ # object node header:
+ obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR)
+ # object name:
+ if obj.type == 'Empty':
+ # Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk
+ # for their name (see below):
+ obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY"))
+ else:
+ # Add the name:
+ obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
+ # Add Flag variables (not sure what they do):
+ obj_node_header_chunk.add_variable("flags1", _3ds_short(0))
+ obj_node_header_chunk.add_variable("flags2", _3ds_short(0))
+
+ # Check parent-child relationships:
+ parent = obj.parent
+ if (parent is None) or (parent.name not in name_to_id):
+ # If no parent, or the parents name is not in the name_to_id dictionary,
+ # parent id becomes -1:
+ obj_node_header_chunk.add_variable("parent", _3ds_short(-1))
+ else:
+ # Get the parent's id from the name_to_id dictionary:
+ obj_node_header_chunk.add_variable("parent", _3ds_short(name_to_id[parent.name]))
+
+ # Add pivot chunk:
+ obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT)
+ obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation()))
+ kf_obj_node.add_subchunk(obj_pivot_chunk)
+
+ # add subchunks for object id and node header:
+ kf_obj_node.add_subchunk(obj_id_chunk)
+ kf_obj_node.add_subchunk(obj_node_header_chunk)
+
+ # Empty objects need to have an extra chunk for the instance name:
+ if obj.type == 'Empty':
+ obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME)
+ obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name)))
+ kf_obj_node.add_subchunk(obj_instance_name_chunk)
+
+ # Add track chunks for position, rotation and scale:
+ kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj))
+ kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj))
+ kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj))
+
+ return kf_obj_node
+"""
+
+
+def save(operator,
+ context, filepath="",
+ use_selection=True,
+ global_matrix=None,
+ ):
+
+ import bpy
+ import mathutils
+
+ import time
+ from bpy_extras.io_utils import create_derived_objects, free_derived_objects
+
+ '''Save the Blender scene to a 3ds file.'''
+
+ # Time the export
+ time1 = time.clock()
+# Blender.Window.WaitCursor(1)
+
+ if global_matrix is None:
+ global_matrix = mathutils.Matrix()
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Initialize the main chunk (primary):
+ primary = _3ds_chunk(PRIMARY)
+ # Add version chunk:
+ version_chunk = _3ds_chunk(VERSION)
+ version_chunk.add_variable("version", _3ds_int(3))
+ primary.add_subchunk(version_chunk)
+
+ # init main object info chunk:
+ object_info = _3ds_chunk(OBJECTINFO)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # init main key frame data chunk:
+ kfdata = make_kfdata()
+ '''
+
+ # Make a list of all materials used in the selected meshes (use a dictionary,
+ # each material is added once):
+ materialDict = {}
+ mesh_objects = []
+
+ scene = context.scene
+
+ if use_selection:
+ objects = (ob for ob in scene.objects if ob.is_visible(scene) and ob.select)
+ else:
+ objects = (ob for ob in scene.objects if ob.is_visible(scene))
+
+ for ob in objects:
+ # get derived objects
+ free, derived = create_derived_objects(scene, ob)
+
+ if derived is None:
+ continue
+
+ for ob_derived, mat in derived:
+ if ob.type not in ('MESH', 'CURVE', 'SURFACE', 'FONT', 'META'):
+ continue
+
+ try:
+ data = ob_derived.to_mesh(scene, True, 'PREVIEW')
+ except:
+ data = None
+
+ if data:
+ data.transform(global_matrix * mat)
+# data.transform(mat, recalc_normals=False)
+ mesh_objects.append((ob_derived, data))
+ mat_ls = data.materials
+ mat_ls_len = len(mat_ls)
+
+ # get material/image tuples.
+ if len(data.uv_textures):
+# if data.faceUV:
+ if not mat_ls:
+ mat = mat_name = None
+
+ for f, uf in zip(data.faces, data.uv_textures.active.data):
+ if mat_ls:
+ mat_index = f.material_index
+ if mat_index >= mat_ls_len:
+ mat_index = f.mat = 0
+ mat = mat_ls[mat_index]
+ mat_name = None if mat is None else mat.name
+ # else there already set to none
+
+ img = uf.image
+ img_name = None if img is None else img.name
+
+ materialDict.setdefault((mat_name, img_name), (mat, img))
+
+ else:
+ for mat in mat_ls:
+ if mat: # material may be None so check its not.
+ materialDict.setdefault((mat.name, None), (mat, None))
+
+ # Why 0 Why!
+ for f in data.faces:
+ if f.material_index >= mat_ls_len:
+# if f.mat >= mat_ls_len:
+ f.material_index = 0
+ # f.mat = 0
+
+ if free:
+ free_derived_objects(ob)
+
+ # Make material chunks for all materials used in the meshes:
+ for mat_and_image in materialDict.values():
+ object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1]))
+
+ # Give all objects a unique ID and build a dictionary from object name to object id:
+ """
+ name_to_id = {}
+ for ob, data in mesh_objects:
+ name_to_id[ob.name]= len(name_to_id)
+ #for ob in empty_objects:
+ # name_to_id[ob.name]= len(name_to_id)
+ """
+
+ # Create object chunks for all meshes:
+ i = 0
+ for ob, blender_mesh in mesh_objects:
+ # create a new object chunk
+ object_chunk = _3ds_chunk(OBJECT)
+
+ # set the object name
+ object_chunk.add_variable("name", _3ds_string(sane_name(ob.name)))
+
+ # make a mesh chunk out of the mesh:
+ object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict))
+ object_info.add_subchunk(object_chunk)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # make a kf object node for the object:
+ kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+ '''
+ if not blender_mesh.users:
+ bpy.data.meshes.remove(blender_mesh)
+# blender_mesh.vertices = None
+
+ i += i
+
+ # Create chunks for all empties:
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ for ob in empty_objects:
+ # Empties only require a kf object node:
+ kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+ pass
+ '''
+
+ # Add main object info chunk to primary chunk:
+ primary.add_subchunk(object_info)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # Add main keyframe data chunk to primary chunk:
+ primary.add_subchunk(kfdata)
+ '''
+
+ # At this point, the chunk hierarchy is completely built.
+
+ # Check the size:
+ primary.get_size()
+ # Open the file for writing:
+ file = open(filepath, 'wb')
+
+ # Recursively write the chunks to file:
+ primary.write(file)
+
+ # Close the file:
+ file.close()
+
+ # Clear name mapping vars, could make locals too
+ name_unique[:] = []
+ name_mapping.clear()
+
+ # Debugging only: report the exporting time:
+# Blender.Window.WaitCursor(0)
+ print("3ds export time: %.2f" % (time.clock() - time1))
+
+ # Debugging only: dump the chunk hierarchy:
+ #primary.dump()
+
+ return {'FINISHED'}
diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py
new file mode 100644
index 00000000..803b8e53
--- /dev/null
+++ b/io_scene_3ds/import_3ds.py
@@ -0,0 +1,935 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Bob Holcomb
+# Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Campbell Barton, Mario Lapin, Dominique Lorre
+
+import os
+import time
+import struct
+
+import bpy
+import mathutils
+
+BOUNDS_3DS = []
+
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will see
+#----- Primary Chunk, at the beginning of each file
+PRIMARY = 0x4D4D
+
+#------ Main Chunks
+OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
+VERSION = 0x0002 # This gives the version of the .3ds file
+EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL = 0xAFFF # This stored the texture info
+OBJECT = 0x4000 # This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+#------ sub defines of MATERIAL_BLOCK
+MAT_NAME = 0xA000 # This holds the material name
+MAT_AMBIENT = 0xA010 # Ambient color of the object/material
+MAT_DIFFUSE = 0xA020 # This holds the color of the object/material
+MAT_SPECULAR = 0xA030 # SPecular color of the object/material
+MAT_SHINESS = 0xA040 # ??
+MAT_TRANSPARENCY = 0xA050 # Transparency value of material
+MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material
+MAT_WIRE = 0xA085 # Only render's wireframe
+
+MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map
+MAT_SPECULAR_MAP = 0xA204 # This is a header for a new specular map
+MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map
+MAT_REFLECTION_MAP = 0xA220 # This is a header for a new reflection map
+MAT_BUMP_MAP = 0xA230 # This is a header for a new bump map
+MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture
+
+MAT_FLOAT_COLOR = 0x0010 # color defined as 3 floats
+MAT_24BIT_COLOR = 0x0011 # color defined as 3 bytes
+
+#>------ sub defines of OBJECT
+OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
+OBJECT_LAMP = 0x4600 # This lets un know we are reading a light object
+OBJECT_LAMP_SPOT = 0x4610 # The light is a spotloght.
+OBJECT_LAMP_OFF = 0x4620 # The light off.
+OBJECT_LAMP_ATTENUATE = 0x4625
+OBJECT_LAMP_RAYSHADE = 0x4627
+OBJECT_LAMP_SHADOWED = 0x4630
+OBJECT_LAMP_LOCAL_SHADOW = 0x4640
+OBJECT_LAMP_LOCAL_SHADOW2 = 0x4641
+OBJECT_LAMP_SEE_CONE = 0x4650
+OBJECT_LAMP_SPOT_RECTANGULAR = 0x4651
+OBJECT_LAMP_SPOT_OVERSHOOT = 0x4652
+OBJECT_LAMP_SPOT_PROJECTOR = 0x4653
+OBJECT_LAMP_EXCLUDE = 0x4654
+OBJECT_LAMP_RANGE = 0x4655
+OBJECT_LAMP_ROLL = 0x4656
+OBJECT_LAMP_SPOT_ASPECT = 0x4657
+OBJECT_LAMP_RAY_BIAS = 0x4658
+OBJECT_LAMP_INNER_RANGE = 0x4659
+OBJECT_LAMP_OUTER_RANGE = 0x465A
+OBJECT_LAMP_MULTIPLIER = 0x465B
+OBJECT_LAMP_AMBIENT_LIGHT = 0x4680
+
+OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES = 0x4720 # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES = 0x4110 # The objects vertices
+OBJECT_FACES = 0x4120 # The objects faces
+OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
+OBJECT_UV = 0x4140 # The UV texture coordinates
+OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
+
+#>------ sub defines of EDITKEYFRAME
+ED_KEY_AMBIENT_NODE = 0xB001
+ED_KEY_OBJECT_NODE = 0xB002
+ED_KEY_CAMERA_NODE = 0xB003
+ED_KEY_TARGET_NODE = 0xB004
+ED_KEY_LIGHT_NODE = 0xB005
+ED_KEY_L_TARGET_NODE = 0xB006
+ED_KEY_SPOTLIGHT_NODE = 0xB007
+#>------ sub defines of ED_KEY_OBJECT_NODE
+# EK_OB_KEYFRAME_SEG = 0xB008
+# EK_OB_KEYFRAME_CURTIME = 0xB009
+# EK_OB_KEYFRAME_HEADER = 0xB00A
+EK_OB_NODE_HEADER = 0xB010
+EK_OB_INSTANCE_NAME = 0xB011
+# EK_OB_PRESCALE = 0xB012
+EK_OB_PIVOT = 0xB013
+# EK_OB_BOUNDBOX = 0xB014
+# EK_OB_MORPH_SMOOTH = 0xB015
+EK_OB_POSITION_TRACK = 0xB020
+EK_OB_ROTATION_TRACK = 0xB021
+EK_OB_SCALE_TRACK = 0xB022
+# EK_OB_CAMERA_FOV_TRACK = 0xB023
+# EK_OB_CAMERA_ROLL_TRACK = 0xB024
+# EK_OB_COLOR_TRACK = 0xB025
+# EK_OB_MORPH_TRACK = 0xB026
+# EK_OB_HOTSPOT_TRACK = 0xB027
+# EK_OB_FALLOF_TRACK = 0xB028
+# EK_OB_HIDE_TRACK = 0xB029
+# EK_OB_NODE_ID = 0xB030
+
+ROOT_OBJECT = 0xFFFF
+
+global scn
+scn = None
+
+object_dictionary = {}
+object_matrix = {}
+
+
+#the chunk class
+class chunk:
+ ID = 0
+ length = 0
+ bytes_read = 0
+
+ #we don't read in the bytes_read, we compute that
+ binary_format = "<HI"
+
+ def __init__(self):
+ self.ID = 0
+ self.length = 0
+ self.bytes_read = 0
+
+ def dump(self):
+ print('ID: ', self.ID)
+ print('ID in hex: ', hex(self.ID))
+ print('length: ', self.length)
+ print('bytes_read: ', self.bytes_read)
+
+
+def read_chunk(file, chunk):
+ temp_data = file.read(struct.calcsize(chunk.binary_format))
+ data = struct.unpack(chunk.binary_format, temp_data)
+ chunk.ID = data[0]
+ chunk.length = data[1]
+ #update the bytes read function
+ chunk.bytes_read = 6
+
+ #if debugging
+ #chunk.dump()
+
+
+def read_string(file):
+ #read in the characters till we get a null character
+ s = b''
+ while True:
+ c = struct.unpack('<c', file.read(1))[0]
+ if c == b'\x00':
+ break
+ s += c
+ #print 'string: ',s
+
+ #remove the null character from the string
+# print("read string", s)
+ return str(s, "utf-8", "replace"), len(s) + 1
+
+######################################################
+# IMPORT
+######################################################
+
+
+def process_next_object_chunk(file, previous_chunk):
+ new_chunk = chunk()
+ temp_chunk = chunk()
+
+ while (previous_chunk.bytes_read < previous_chunk.length):
+ #read the next chunk
+ read_chunk(file, new_chunk)
+
+
+def skip_to_end(file, skip_chunk):
+ buffer_size = skip_chunk.length - skip_chunk.bytes_read
+ binary_format = "%ic" % buffer_size
+ temp_data = file.read(struct.calcsize(binary_format))
+ skip_chunk.bytes_read += buffer_size
+
+
+def add_texture_to_material(image, texture, material, mapto):
+ #print('assigning %s to %s' % (texture, material))
+
+ if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
+ print('/tError: Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
+ mapto = "COLOR"
+
+ if image:
+ texture.image = image
+
+ mtex = material.texture_slots.add()
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_color_diffuse = False
+
+ if mapto == 'COLOR':
+ mtex.use_map_color_diffuse = True
+ elif mapto == 'SPECULARITY':
+ mtex.use_map_specular = True
+ elif mapto == 'ALPHA':
+ mtex.use_map_alpha = True
+ elif mapto == 'NORMAL':
+ mtex.use_map_normal = True
+
+
+def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
+ from bpy_extras.image_utils import load_image
+
+ #print previous_chunk.bytes_read, 'BYTES READ'
+ contextObName = None
+ contextLamp = [None, None] # object, Data
+ contextMaterial = None
+ contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity()
+ #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity()
+ contextMesh_vertls = None # flat array: (verts * 3)
+ contextMesh_facels = None
+ contextMeshMaterials = [] # (matname, [face_idxs])
+ contextMeshUV = None # flat array (verts * 2)
+
+ TEXTURE_DICT = {}
+ MATDICT = {}
+# TEXMODE = Mesh.FaceModes['TEX']
+
+ # Localspace variable names, faster.
+ STRUCT_SIZE_1CHAR = struct.calcsize('c')
+ STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
+ STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
+ STRUCT_SIZE_4FLOAT = struct.calcsize('4f')
+ STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
+ STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
+ STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
+ _STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
+ # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
+ # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
+ # only init once
+ object_list = [] # for hierarchy
+ object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
+ pivot_list = [] # pivots with hierarchy handling
+
+ def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
+ bmesh = bpy.data.meshes.new(contextObName)
+
+ if myContextMesh_facels is None:
+ myContextMesh_facels = []
+
+ if myContextMesh_vertls:
+
+ bmesh.vertices.add(len(myContextMesh_vertls) // 3)
+ bmesh.faces.add(len(myContextMesh_facels))
+ bmesh.vertices.foreach_set("co", myContextMesh_vertls)
+
+ eekadoodle_faces = []
+ for v1, v2, v3 in myContextMesh_facels:
+ eekadoodle_faces.extend([v3, v1, v2, 0] if v3 == 0 else [v1, v2, v3, 0])
+ bmesh.faces.foreach_set("vertices_raw", eekadoodle_faces)
+
+ if bmesh.faces and contextMeshUV:
+ bmesh.uv_textures.new()
+ uv_faces = bmesh.uv_textures.active.data[:]
+ else:
+ uv_faces = None
+
+ for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials):
+ if matName is None:
+ bmat = None
+ else:
+ bmat = MATDICT.get(matName)
+ # in rare cases no materials defined.
+ if bmat:
+ img = TEXTURE_DICT.get(bmat.name)
+ else:
+ print(" warning: material %r not defined!" % matName)
+ bmat = MATDICT[matName] = bpy.data.materials.new(matName)
+ img = None
+
+ bmesh.materials.append(bmat) # can be None
+
+ if uv_faces and img:
+ for fidx in faces:
+ bmesh.faces[fidx].material_index = mat_idx
+ uf = uv_faces[fidx]
+ uf.image = img
+ uf.use_image = True
+ else:
+ for fidx in faces:
+ bmesh.faces[fidx].material_index = mat_idx
+
+ if uv_faces:
+ for fidx, uf in enumerate(uv_faces):
+ face = myContextMesh_facels[fidx]
+ v1, v2, v3 = face
+
+ # eekadoodle
+ if v3 == 0:
+ v1, v2, v3 = v3, v1, v2
+
+ uf.uv1 = contextMeshUV[v1 * 2:(v1 * 2) + 2]
+ uf.uv2 = contextMeshUV[v2 * 2:(v2 * 2) + 2]
+ uf.uv3 = contextMeshUV[v3 * 2:(v3 * 2) + 2]
+ # always a tri
+
+ bmesh.validate()
+ bmesh.update()
+
+ ob = bpy.data.objects.new(contextObName, bmesh)
+ object_dictionary[contextObName] = ob
+ SCN.objects.link(ob)
+ importedObjects.append(ob)
+
+ if contextMatrix_rot:
+ ob.matrix_local = contextMatrix_rot
+ object_matrix[ob] = contextMatrix_rot.copy()
+
+ #a spare chunk
+ new_chunk = chunk()
+ temp_chunk = chunk()
+
+ CreateBlenderObject = False
+
+ def read_float_color(temp_chunk):
+ temp_data = file.read(struct.calcsize('3f'))
+ temp_chunk.bytes_read += 12
+ return [float(col) for col in struct.unpack('<3f', temp_data)]
+
+ def read_byte_color(temp_chunk):
+ temp_data = file.read(struct.calcsize('3B'))
+ temp_chunk.bytes_read += 3
+ return [float(col) / 255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+
+ def read_texture(new_chunk, temp_chunk, name, mapto):
+ new_texture = bpy.data.textures.new(name, type='IMAGE')
+
+ img = None
+ while (new_chunk.bytes_read < new_chunk.length):
+ #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
+ read_chunk(file, temp_chunk)
+
+ if (temp_chunk.ID == MAT_MAP_FILEPATH):
+ texture_name, read_str_len = read_string(file)
+ img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+ new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
+
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ # add the map to the material in the right channel
+ if img:
+ add_texture_to_material(img, new_texture, contextMaterial, mapto)
+
+ dirname = os.path.dirname(file.name)
+
+ #loop through all the data for this chunk (previous chunk) and see what it is
+ while (previous_chunk.bytes_read < previous_chunk.length):
+ #print '\t', previous_chunk.bytes_read, 'keep going'
+ #read the next chunk
+ #print 'reading a chunk'
+ read_chunk(file, new_chunk)
+
+ #is it a Version chunk?
+ if (new_chunk.ID == VERSION):
+ #print 'if (new_chunk.ID == VERSION):'
+ #print 'found a VERSION chunk'
+ #read in the version of the file
+ #it's an unsigned short (H)
+ temp_data = file.read(struct.calcsize('I'))
+ version = struct.unpack('<I', temp_data)[0]
+ new_chunk.bytes_read += 4 # read the 4 bytes for the version number
+ #this loader works with version 3 and below, but may not with 4 and above
+ if (version > 3):
+ print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version)
+
+ #is it an object info chunk?
+ elif (new_chunk.ID == OBJECTINFO):
+ #print 'elif (new_chunk.ID == OBJECTINFO):'
+ # print 'found an OBJECTINFO chunk'
+ process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
+
+ #keep track of how much we read in the main chunk
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ #is it an object chunk?
+ elif (new_chunk.ID == OBJECT):
+
+ if CreateBlenderObject:
+ putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+ contextMesh_vertls = []
+ contextMesh_facels = []
+
+ ## preparando para receber o proximo objeto
+ contextMeshMaterials = [] # matname:[face_idxs]
+ contextMeshUV = None
+ #contextMesh.vertexUV = 1 # Make sticky coords.
+ # Reset matrix
+ contextMatrix_rot = None
+ #contextMatrix_tx = None
+
+ CreateBlenderObject = True
+ contextObName, read_str_len = read_string(file)
+ new_chunk.bytes_read += read_str_len
+
+ #is it a material chunk?
+ elif (new_chunk.ID == MATERIAL):
+
+# print("read material")
+
+ #print 'elif (new_chunk.ID == MATERIAL):'
+ contextMaterial = bpy.data.materials.new('Material')
+
+ elif (new_chunk.ID == MAT_NAME):
+ #print 'elif (new_chunk.ID == MAT_NAME):'
+ material_name, read_str_len = read_string(file)
+
+# print("material name", material_name)
+
+ #plus one for the null character that ended the string
+ new_chunk.bytes_read += read_str_len
+
+ contextMaterial.name = material_name.rstrip() # remove trailing whitespace
+ MATDICT[material_name] = contextMaterial
+
+ elif (new_chunk.ID == MAT_AMBIENT):
+ #print 'elif (new_chunk.ID == MAT_AMBIENT):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.mirror_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.mirror_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_DIFFUSE):
+ #print 'elif (new_chunk.ID == MAT_DIFFUSE):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.diffuse_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.diffuse_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+
+# print("read material diffuse color", contextMaterial.diffuse_color)
+
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_SPECULAR):
+ #print 'elif (new_chunk.ID == MAT_SPECULAR):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.specular_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.specular_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_TEXTURE_MAP):
+ read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
+
+ elif (new_chunk.ID == MAT_SPECULAR_MAP):
+ read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
+
+ elif (new_chunk.ID == MAT_OPACITY_MAP):
+ read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
+
+ elif (new_chunk.ID == MAT_BUMP_MAP):
+ read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
+
+ elif (new_chunk.ID == MAT_TRANSPARENCY):
+ #print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
+ read_chunk(file, temp_chunk)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+
+ temp_chunk.bytes_read += 2
+ contextMaterial.alpha = 1 - (float(struct.unpack('<H', temp_data)[0]) / 100)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
+
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+
+ x, y, z = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+
+ # no lamp in dict that would be confusing
+ contextLamp[1] = bpy.data.lamps.new("Lamp", 'POINT')
+ contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1])
+
+ SCN.objects.link(ob)
+ importedObjects.append(contextLamp[0])
+
+ #print 'number of faces: ', num_faces
+ #print x,y,z
+ contextLamp[0].location = (x, y, z)
+# contextLamp[0].setLocation(x,y,z)
+
+ # Reset matrix
+ contextMatrix_rot = None
+ #contextMatrix_tx = None
+ #print contextLamp.name,
+
+ elif (new_chunk.ID == OBJECT_MESH):
+ # print 'Found an OBJECT_MESH chunk'
+ pass
+ elif (new_chunk.ID == OBJECT_VERTICES):
+ '''
+ Worldspace vertex locations
+ '''
+ # print 'elif (new_chunk.ID == OBJECT_VERTICES):'
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_verts = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ # print 'number of verts: ', num_verts
+ contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts))
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts
+ # dummyvert is not used atm!
+
+ #print 'object verts: bytes read: ', new_chunk.bytes_read
+
+ elif (new_chunk.ID == OBJECT_FACES):
+ # print 'elif (new_chunk.ID == OBJECT_FACES):'
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_faces = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+ #print 'number of faces: ', num_faces
+
+ # print '\ngetting a face'
+ temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces)
+ new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces # 4 short ints x 2 bytes each
+ contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
+ contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]
+
+ elif (new_chunk.ID == OBJECT_MATERIAL):
+ # print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
+ material_name, read_str_len = read_string(file)
+ new_chunk.bytes_read += read_str_len # remove 1 null character.
+
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_faces_using_mat = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat
+
+ temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data)
+
+ contextMeshMaterials.append((material_name, temp_data))
+
+ #look up the material in all the materials
+
+ elif (new_chunk.ID == OBJECT_UV):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_uv = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv)
+ new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv
+ contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
+
+ elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
+ # How do we know the matrix size? 54 == 4x4 48 == 4x3
+ temp_data = file.read(STRUCT_SIZE_4x3MAT)
+ data = list(struct.unpack('<ffffffffffff', temp_data))
+ new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
+
+ contextMatrix_rot = mathutils.Matrix((data[:3] + [0], \
+ data[3:6] + [0], \
+ data[6:9] + [0], \
+ data[9:] + [1], \
+ ))
+
+ elif (new_chunk.ID == MAT_MAP_FILEPATH):
+ texture_name, read_str_len = read_string(file)
+ try:
+ TEXTURE_DICT[contextMaterial.name]
+ except:
+ #img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH)
+ img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+# img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+
+ new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
+ elif new_chunk.ID == EDITKEYFRAME:
+ pass
+
+ # including these here means their EK_OB_NODE_HEADER are scanned
+ elif new_chunk.ID in {ED_KEY_AMBIENT_NODE,
+ ED_KEY_OBJECT_NODE,
+ ED_KEY_CAMERA_NODE,
+ ED_KEY_TARGET_NODE,
+ ED_KEY_LIGHT_NODE,
+ ED_KEY_L_TARGET_NODE,
+ ED_KEY_SPOTLIGHT_NODE}: # another object is being processed
+ child = None
+
+ elif new_chunk.ID == EK_OB_NODE_HEADER:
+ object_name, read_str_len = read_string(file)
+ new_chunk.bytes_read += read_str_len
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += 4
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ hierarchy = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ child = object_dictionary.get(object_name)
+
+ if child is None:
+ child = bpy.data.objects.new(object_name, None) # create an empty object
+ SCN.objects.link(child)
+ importedObjects.append(child)
+
+ object_list.append(child)
+ object_parent.append(hierarchy)
+ pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0)))
+
+ elif new_chunk.ID == EK_OB_INSTANCE_NAME:
+ object_name, read_str_len = read_string(file)
+ # child.name = object_name
+ child.name += "." + object_name
+ object_dictionary[object_name] = child
+ new_chunk.bytes_read += read_str_len
+ # print("new instance object:", object_name)
+
+ elif new_chunk.ID == EK_OB_PIVOT: # translation
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+ pivot = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot)
+
+ elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nkeys = struct.unpack('<H', temp_data)[0]
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ for i in range(nkeys):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nframe = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+ loc = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ if nframe == 0:
+ child.location = loc
+
+ elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nkeys = struct.unpack('<H', temp_data)[0]
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ for i in range(nkeys):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nframe = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ temp_data = file.read(STRUCT_SIZE_4FLOAT)
+ rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_4FLOAT
+ if nframe == 0:
+ child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler() # why negative?
+
+ elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nkeys = struct.unpack('<H', temp_data)[0]
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ for i in range(nkeys):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nframe = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+ sca = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ if nframe == 0:
+ child.scale = sca
+
+ else: # (new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
+ # print 'skipping to end of this chunk'
+ #print("unknown chunk: "+hex(new_chunk.ID))
+ buffer_size = new_chunk.length - new_chunk.bytes_read
+ binary_format = "%ic" % buffer_size
+ temp_data = file.read(struct.calcsize(binary_format))
+ new_chunk.bytes_read += buffer_size
+
+ #update the previous chunk bytes read
+ # print 'previous_chunk.bytes_read += new_chunk.bytes_read'
+ # print previous_chunk.bytes_read, new_chunk.bytes_read
+ previous_chunk.bytes_read += new_chunk.bytes_read
+ ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
+
+ # FINISHED LOOP
+ # There will be a number of objects still not added
+ if CreateBlenderObject:
+ putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+
+ # Assign parents to objects
+ for ind, ob in enumerate(object_list):
+ parent = object_parent[ind]
+ if parent == ROOT_OBJECT:
+ ob.parent = None
+ else:
+ ob.parent = object_list[parent]
+ # pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining?
+ # fix pivots
+ for ind, ob in enumerate(object_list):
+ if ob.type == 'MESH':
+ pivot = pivot_list[ind]
+ pivot_matrix = object_matrix.get(ob, mathutils.Matrix()) # unlikely to fail
+ pivot_matrix = mathutils.Matrix.Translation(-pivot * pivot_matrix.to_3x3())
+ ob.data.transform(pivot_matrix)
+
+
+def load_3ds(filepath,
+ context,
+ IMPORT_CONSTRAIN_BOUNDS=10.0,
+ IMAGE_SEARCH=True,
+ APPLY_MATRIX=True,
+ global_matrix=None):
+ global SCN
+
+ # XXX
+# if BPyMessages.Error_NoFile(filepath):
+# return
+
+ print("importing 3DS: %r..." % (filepath), end="")
+
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT')
+
+ time1 = time.clock()
+# time1 = Blender.sys.time()
+
+ current_chunk = chunk()
+
+ file = open(filepath, 'rb')
+
+ #here we go!
+ # print 'reading the first chunk'
+ read_chunk(file, current_chunk)
+ if (current_chunk.ID != PRIMARY):
+ print('\tFatal Error: Not a valid 3ds file: %r' % filepath)
+ file.close()
+ return
+
+ if IMPORT_CONSTRAIN_BOUNDS:
+ BOUNDS_3DS[:] = [1 << 30, 1 << 30, 1 << 30, -1 << 30, -1 << 30, -1 << 30]
+ else:
+ BOUNDS_3DS[:] = []
+
+ ##IMAGE_SEARCH
+
+ # fixme, make unglobal, clear incase
+ object_dictionary.clear()
+ object_matrix.clear()
+
+ scn = context.scene
+# scn = bpy.data.scenes.active
+ SCN = scn
+# SCN_OBJECTS = scn.objects
+# SCN_OBJECTS.selected = [] # de select all
+
+ importedObjects = [] # Fill this list with objects
+ process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
+
+ # fixme, make unglobal
+ object_dictionary.clear()
+ object_matrix.clear()
+
+ # Link the objects into this scene.
+ # Layers = scn.Layers
+
+ # REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
+
+ if APPLY_MATRIX:
+ for ob in importedObjects:
+ if ob.type == 'MESH':
+ me = ob.data
+ me.transform(ob.matrix_local.inverted())
+
+ # print(importedObjects)
+ if global_matrix:
+ for ob in importedObjects:
+ if ob.parent is None:
+ ob.matrix_world = ob.matrix_world * global_matrix
+
+ for ob in importedObjects:
+ ob.select = True
+
+ # Done DUMMYVERT
+ """
+ if IMPORT_AS_INSTANCE:
+ name = filepath.split('\\')[-1].split('/')[-1]
+ # Create a group for this import.
+ group_scn = Scene.New(name)
+ for ob in importedObjects:
+ group_scn.link(ob) # dont worry about the layers
+
+ grp = Blender.Group.New(name)
+ grp.objects = importedObjects
+
+ grp_ob = Object.New('Empty', name)
+ grp_ob.enableDupGroup = True
+ grp_ob.DupGroup = grp
+ scn.link(grp_ob)
+ grp_ob.Layers = Layers
+ grp_ob.sel = 1
+ else:
+ # Select all imported objects.
+ for ob in importedObjects:
+ scn.link(ob)
+ ob.Layers = Layers
+ ob.sel = 1
+ """
+
+ if 0:
+# if IMPORT_CONSTRAIN_BOUNDS!=0.0:
+ # Set bounds from objecyt bounding box
+ for ob in importedObjects:
+ if ob.type == 'MESH':
+# if ob.type=='Mesh':
+ ob.makeDisplayList() # Why dosnt this update the bounds?
+ for v in ob.getBoundBox():
+ for i in (0, 1, 2):
+ if v[i] < BOUNDS_3DS[i]:
+ BOUNDS_3DS[i] = v[i] # min
+
+ if v[i] > BOUNDS_3DS[i + 3]:
+ BOUNDS_3DS[i + 3] = v[i] # min
+
+ # Get the max axis x/y/z
+ max_axis = max(BOUNDS_3DS[3] - BOUNDS_3DS[0], BOUNDS_3DS[4] - BOUNDS_3DS[1], BOUNDS_3DS[5] - BOUNDS_3DS[2])
+ # print max_axis
+ if max_axis < 1 << 30: # Should never be false but just make sure.
+
+ # Get a new scale factor if set as an option
+ SCALE = 1.0
+ while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
+ SCALE /= 10.0
+
+ # SCALE Matrix
+ SCALE_MAT = mathutils.Matrix.Scale(SCALE, 4)
+
+ for ob in importedObjects:
+ if ob.parent is None:
+ ob.matrix_world = ob.matrix_world * SCALE_MAT
+
+ # Done constraining to bounds.
+
+ # Select all new objects.
+ print(" done in %.4f sec." % (time.clock() - time1))
+ file.close()
+
+
+def load(operator,
+ context,
+ filepath="",
+ constrain_size=0.0,
+ use_image_search=True,
+ use_apply_transform=True,
+ global_matrix=None,
+ ):
+
+ load_3ds(filepath,
+ context,
+ IMPORT_CONSTRAIN_BOUNDS=constrain_size,
+ IMAGE_SEARCH=use_image_search,
+ APPLY_MATRIX=use_apply_transform,
+ global_matrix=global_matrix,
+ )
+
+ return {'FINISHED'}
diff --git a/io_scene_fbx/__init__.py b/io_scene_fbx/__init__.py
new file mode 100644
index 00000000..5da1956e
--- /dev/null
+++ b/io_scene_fbx/__init__.py
@@ -0,0 +1,168 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Autodesk FBX format",
+ "author": "Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export FBX meshes, UV's, vertex colors, materials, textures, cameras and lamps",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Autodesk_FBX",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "export_fbx" in locals():
+ imp.reload(export_fbx)
+
+
+import bpy
+from bpy.props import StringProperty, BoolProperty, FloatProperty, EnumProperty
+from bpy_extras.io_utils import ExportHelper, path_reference_mode, axis_conversion
+
+
+class ExportFBX(bpy.types.Operator, ExportHelper):
+ '''Selection to an ASCII Autodesk FBX'''
+ bl_idname = "export_scene.fbx"
+ bl_label = "Export FBX"
+ bl_options = {'PRESET'}
+
+ filename_ext = ".fbx"
+ filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ use_selection = BoolProperty(name="Selected Objects", description="Export selected objects on visible layers", default=False)
+# EXP_OBS_SCENE = BoolProperty(name="Scene Objects", description="Export all objects in this scene", default=True)
+ global_scale = FloatProperty(name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='-Z',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Y',
+ )
+
+ object_types = EnumProperty(
+ name="Object Types",
+ options={'ENUM_FLAG'},
+ items=(('EMPTY', "Empty", ""),
+ ('CAMERA', "Camera", ""),
+ ('LAMP', "Lamp", ""),
+ ('ARMATURE', "Armature", ""),
+ ('MESH', "Mesh", ""),
+ ),
+ default={'EMPTY', 'CAMERA', 'LAMP', 'ARMATURE', 'MESH'},
+ )
+
+ mesh_apply_modifiers = BoolProperty(name="Apply Modifiers", description="Apply modifiers to mesh objects", default=True)
+
+ mesh_smooth_type = EnumProperty(
+ name="Smoothing",
+ items=(('OFF', "Off", "Don't write smoothing"),
+ ('FACE', "Face", "Write face smoothing"),
+ ('EDGE', "Edge", "Write edge smoothing"),
+ ),
+ default='FACE',
+ )
+
+# EXP_MESH_HQ_NORMALS = BoolProperty(name="HQ Normals", description="Generate high quality normals", default=True)
+ # armature animation
+ ANIM_ENABLE = BoolProperty(name="Enable Animation", description="Export keyframe animation", default=True)
+ ANIM_OPTIMIZE = BoolProperty(name="Optimize Keyframes", description="Remove double keyframes", default=True)
+ ANIM_OPTIMIZE_PRECISSION = FloatProperty(name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0)
+# ANIM_ACTION_ALL = BoolProperty(name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True)
+ ANIM_ACTION_ALL = BoolProperty(name="All Actions", description="Use all actions for armatures, if false, use current action", default=False)
+
+ batch_mode = EnumProperty(
+ name="Batch Mode",
+ items=(('OFF', "Off", "Active scene to file"),
+ ('SCENE', "Scene", "Each scene as a file"),
+ ('GROUP', "Group", "Each group as a file"),
+ ),
+ )
+
+ BATCH_OWN_DIR = BoolProperty(name="Own Dir", description="Create a dir for each exported file", default=True)
+ use_metadata = BoolProperty(name="Use Metadata", default=True, options={'HIDDEN'})
+
+ path_mode = path_reference_mode
+
+ @property
+ def check_extension(self):
+ return self.batch_mode == 'OFF'
+
+ def execute(self, context):
+ import math
+ from mathutils import Matrix
+ if not self.filepath:
+ raise Exception("filepath not set")
+
+ global_matrix = Matrix()
+ global_matrix[0][0] = global_matrix[1][1] = global_matrix[2][2] = self.global_scale
+ global_matrix = global_matrix * axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "global_scale", "check_existing", "filter_glob"))
+ keywords["global_matrix"] = global_matrix
+
+ from . import export_fbx
+ return export_fbx.save(self, context, **keywords)
+
+
+def menu_func(self, context):
+ self.layout.operator(ExportFBX.bl_idname, text="Autodesk FBX (.fbx)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_export.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_export.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_fbx/export_fbx.py b/io_scene_fbx/export_fbx.py
new file mode 100644
index 00000000..965941d9
--- /dev/null
+++ b/io_scene_fbx/export_fbx.py
@@ -0,0 +1,2865 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+
+"""
+This script is an exporter to the FBX file format.
+
+http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx
+"""
+
+import os
+import time
+import math # math.pi
+import shutil # for file copying
+
+import bpy
+from mathutils import Vector, Matrix
+
+
+# I guess FBX uses degrees instead of radians (Arystan).
+# Call this function just before writing to FBX.
+# 180 / math.pi == 57.295779513
+def tuple_rad_to_deg(eul):
+ return eul[0] * 57.295779513, eul[1] * 57.295779513, eul[2] * 57.295779513
+
+# def strip_path(p):
+# return p.split('\\')[-1].split('/')[-1]
+
+# Used to add the scene name into the filepath without using odd chars
+sane_name_mapping_ob = {}
+sane_name_mapping_mat = {}
+sane_name_mapping_tex = {}
+sane_name_mapping_take = {}
+sane_name_mapping_group = {}
+
+# Make sure reserved names are not used
+sane_name_mapping_ob['Scene'] = 'Scene_'
+
+
+def increment_string(t):
+ name = t
+ num = ''
+ while name and name[-1].isdigit():
+ num = name[-1] + num
+ name = name[:-1]
+ if num:
+ return '%s%d' % (name, int(num) + 1)
+ else:
+ return name + '_0'
+
+
+# todo - Disallow the name 'Scene' - it will bugger things up.
+def sane_name(data, dct):
+ #if not data: return None
+
+ if type(data) == tuple: # materials are paired up with images
+ data, other = data
+ use_other = True
+ else:
+ other = None
+ use_other = False
+
+ name = data.name if data else None
+ orig_name = name
+
+ if other:
+ orig_name_other = other.name
+ name = '%s #%s' % (name, orig_name_other)
+ else:
+ orig_name_other = None
+
+ # dont cache, only ever call once for each data type now,
+ # so as to avoid namespace collision between types - like with objects <-> bones
+ #try: return dct[name]
+ #except: pass
+
+ if not name:
+ name = 'unnamed' # blank string, ASKING FOR TROUBLE!
+ else:
+
+ name = bpy.path.clean_name(name) # use our own
+
+ while name in iter(dct.values()):
+ name = increment_string(name)
+
+ if use_other: # even if other is None - orig_name_other will be a string or None
+ dct[orig_name, orig_name_other] = name
+ else:
+ dct[orig_name] = name
+
+ return name
+
+
+def sane_obname(data):
+ return sane_name(data, sane_name_mapping_ob)
+
+
+def sane_matname(data):
+ return sane_name(data, sane_name_mapping_mat)
+
+
+def sane_texname(data):
+ return sane_name(data, sane_name_mapping_tex)
+
+
+def sane_takename(data):
+ return sane_name(data, sane_name_mapping_take)
+
+
+def sane_groupname(data):
+ return sane_name(data, sane_name_mapping_group)
+
+
+def mat4x4str(mat):
+ return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([f for v in mat for f in v])
+
+
+# XXX not used
+# duplicated in OBJ exporter
+def getVertsFromGroup(me, group_index):
+ ret = []
+
+ for i, v in enumerate(me.vertices):
+ for g in v.groups:
+ if g.group == group_index:
+ ret.append((i, g.weight))
+
+ return ret
+
+
+# ob must be OB_MESH
+def BPyMesh_meshWeight2List(ob, me):
+ ''' Takes a mesh and return its group names and a list of lists, one list per vertex.
+ aligning the each vert list with the group names, each list contains float value for the weight.
+ These 2 lists can be modified and then used with list2MeshWeight to apply the changes.
+ '''
+
+ # Clear the vert group.
+ groupNames = [g.name for g in ob.vertex_groups]
+ len_groupNames = len(groupNames)
+
+ if not len_groupNames:
+ # no verts? return a vert aligned empty list
+ return [[] for i in range(len(me.vertices))], []
+ else:
+ vWeightList = [[0.0] * len_groupNames for i in range(len(me.vertices))]
+
+ for i, v in enumerate(me.vertices):
+ for g in v.groups:
+ vWeightList[i][g.group] = g.weight
+
+ return groupNames, vWeightList
+
+
+def meshNormalizedWeights(ob, me):
+ try: # account for old bad BPyMesh
+ groupNames, vWeightList = BPyMesh_meshWeight2List(ob, me)
+ except:
+ return [], []
+
+ if not groupNames:
+ return [], []
+
+ for i, vWeights in enumerate(vWeightList):
+ tot = 0.0
+ for w in vWeights:
+ tot += w
+
+ if tot:
+ for j, w in enumerate(vWeights):
+ vWeights[j] = w / tot
+
+ return groupNames, vWeightList
+
+header_comment = \
+'''; FBX 6.1.0 project file
+; Created by Blender FBX Exporter
+; for support mail: ideasman42@gmail.com
+; ----------------------------------------------------
+
+'''
+
+
+# This func can be called with just the filepath
+def save_single(operator, scene, filepath="",
+ global_matrix=None,
+ context_objects=None,
+ object_types={'EMPTY', 'CAMERA', 'LAMP', 'ARMATURE', 'MESH'},
+ mesh_apply_modifiers=True,
+ mesh_smooth_type='FACE',
+ ANIM_ENABLE=True,
+ ANIM_OPTIMIZE=True,
+ ANIM_OPTIMIZE_PRECISSION=6,
+ ANIM_ACTION_ALL=False,
+ use_metadata=True,
+ path_mode='AUTO',
+ ):
+
+ import bpy_extras.io_utils
+
+ mtx_x90 = Matrix.Rotation(math.pi / 2.0, 3, 'X')
+ mtx4_z90 = Matrix.Rotation(math.pi / 2.0, 4, 'Z')
+
+ if global_matrix is None:
+ global_matrix = Matrix()
+
+ # Use this for working out paths relative to the export location
+ base_src = os.path.dirname(bpy.data.filepath)
+ base_dst = os.path.dirname(filepath)
+
+ # collect images to copy
+ copy_set = set()
+
+ # ----------------------------------------------
+ # storage classes
+ class my_bone_class(object):
+ __slots__ = ("blenName",
+ "blenBone",
+ "blenMeshes",
+ "restMatrix",
+ "parent",
+ "blenName",
+ "fbxName",
+ "fbxArm",
+ "__pose_bone",
+ "__anim_poselist")
+
+ def __init__(self, blenBone, fbxArm):
+
+ # This is so 2 armatures dont have naming conflicts since FBX bones use object namespace
+ self.fbxName = sane_obname(blenBone)
+
+ self.blenName = blenBone.name
+ self.blenBone = blenBone
+ self.blenMeshes = {} # fbxMeshObName : mesh
+ self.fbxArm = fbxArm
+ self.restMatrix = blenBone.matrix_local
+# self.restMatrix = blenBone.matrix['ARMATURESPACE']
+
+ # not used yet
+ # self.restMatrixInv = self.restMatrix.inverted()
+ # self.restMatrixLocal = None # set later, need parent matrix
+
+ self.parent = None
+
+ # not public
+ pose = fbxArm.blenObject.pose
+ self.__pose_bone = pose.bones[self.blenName]
+
+ # store a list if matricies here, (poseMatrix, head, tail)
+ # {frame:posematrix, frame:posematrix, ...}
+ self.__anim_poselist = {}
+
+ '''
+ def calcRestMatrixLocal(self):
+ if self.parent:
+ self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.inverted()
+ else:
+ self.restMatrixLocal = self.restMatrix.copy()
+ '''
+ def setPoseFrame(self, f):
+ # cache pose info here, frame must be set beforehand
+
+ # Didnt end up needing head or tail, if we do - here it is.
+ '''
+ self.__anim_poselist[f] = (\
+ self.__pose_bone.poseMatrix.copy(),\
+ self.__pose_bone.head.copy(),\
+ self.__pose_bone.tail.copy() )
+ '''
+
+ self.__anim_poselist[f] = self.__pose_bone.matrix.copy()
+
+ def getPoseBone(self):
+ return self.__pose_bone
+
+ # get pose from frame.
+ def getPoseMatrix(self, f): # ----------------------------------------------
+ return self.__anim_poselist[f]
+ '''
+ def getPoseHead(self, f):
+ #return self.__pose_bone.head.copy()
+ return self.__anim_poselist[f][1].copy()
+ def getPoseTail(self, f):
+ #return self.__pose_bone.tail.copy()
+ return self.__anim_poselist[f][2].copy()
+ '''
+ # end
+
+ def getAnimParRelMatrix(self, frame):
+ #arm_mat = self.fbxArm.matrixWorld
+ #arm_mat = self.fbxArm.parRelMatrix()
+ if not self.parent:
+ #return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore
+ return self.getPoseMatrix(frame) * mtx4_z90
+ else:
+ #return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat))) * (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).inverted()
+ return (self.parent.getPoseMatrix(frame) * mtx4_z90).inverted() * ((self.getPoseMatrix(frame)) * mtx4_z90)
+
+ # we need thes because cameras and lights modified rotations
+ def getAnimParRelMatrixRot(self, frame):
+ return self.getAnimParRelMatrix(frame)
+
+ def flushAnimData(self):
+ self.__anim_poselist.clear()
+
+ class my_object_generic(object):
+ __slots__ = ("fbxName",
+ "blenObject",
+ "blenData",
+ "origData",
+ "blenTextures",
+ "blenMaterials",
+ "blenMaterialList",
+ "blenAction",
+ "blenActionList",
+ "fbxGroupNames",
+ "fbxParent",
+ "fbxBoneParent",
+ "fbxBones",
+ "fbxArm",
+ "matrixWorld",
+ "__anim_poselist",
+ )
+
+ # Other settings can be applied for each type - mesh, armature etc.
+ def __init__(self, ob, matrixWorld=None):
+ self.fbxName = sane_obname(ob)
+ self.blenObject = ob
+ self.fbxGroupNames = []
+ self.fbxParent = None # set later on IF the parent is in the selection.
+ self.fbxArm = None
+ if matrixWorld:
+ self.matrixWorld = global_matrix * matrixWorld
+ else:
+ self.matrixWorld = global_matrix * ob.matrix_world
+
+ self.__anim_poselist = {} # we should only access this
+
+ def parRelMatrix(self):
+ if self.fbxParent:
+ return self.fbxParent.matrixWorld.inverted() * self.matrixWorld
+ else:
+ return self.matrixWorld
+
+ def setPoseFrame(self, f, fake=False):
+ if fake:
+ self.__anim_poselist[f] = self.matrixWorld * global_matrix.inverted()
+ else:
+ self.__anim_poselist[f] = self.blenObject.matrix_world.copy()
+
+ def getAnimParRelMatrix(self, frame):
+ if self.fbxParent:
+ #return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].inverted() ) * global_matrix
+ return (global_matrix * self.fbxParent.__anim_poselist[frame]).inverted() * (global_matrix * self.__anim_poselist[frame])
+ else:
+ return global_matrix * self.__anim_poselist[frame]
+
+ def getAnimParRelMatrixRot(self, frame):
+ obj_type = self.blenObject.type
+ if self.fbxParent:
+ matrix_rot = ((global_matrix * self.fbxParent.__anim_poselist[frame]).inverted() * (global_matrix * self.__anim_poselist[frame])).to_3x3()
+ else:
+ matrix_rot = (global_matrix * self.__anim_poselist[frame]).to_3x3()
+
+ # Lamps need to be rotated
+ if obj_type == 'LAMP':
+ matrix_rot = matrix_rot * mtx_x90
+ elif obj_type == 'CAMERA':
+ y = Vector((0.0, 1.0, 0.0)) * matrix_rot
+ matrix_rot = Matrix.Rotation(math.pi / 2.0, 3, y) * matrix_rot
+
+ return matrix_rot
+
+ # ----------------------------------------------
+
+ print('\nFBX export starting... %r' % filepath)
+ start_time = time.clock()
+ try:
+ file = open(filepath, "w", encoding="utf8", newline="\n")
+ except:
+ import traceback
+ traceback.print_exc()
+ operator.report({'ERROR'}, "Could'nt open file %r" % filepath)
+ return {'CANCELLED'}
+
+ # scene = context.scene # now passed as an arg instead of context
+ world = scene.world
+
+ # ---------------------------- Write the header first
+ file.write(header_comment)
+ if use_metadata:
+ curtime = time.localtime()[0:6]
+ else:
+ curtime = (0, 0, 0, 0, 0, 0)
+ #
+ file.write(\
+'''FBXHeaderExtension: {
+ FBXHeaderVersion: 1003
+ FBXVersion: 6100
+ CreationTimeStamp: {
+ Version: 1000
+ Year: %.4i
+ Month: %.2i
+ Day: %.2i
+ Hour: %.2i
+ Minute: %.2i
+ Second: %.2i
+ Millisecond: 0
+ }
+ Creator: "FBX SDK/FBX Plugins build 20070228"
+ OtherFlags: {
+ FlagPLE: 0
+ }
+}''' % (curtime))
+
+ file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime)
+ file.write('\nCreator: "Blender version %s"' % bpy.app.version_string)
+
+ pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way
+
+ # --------------- funcs for exporting
+ def object_tx(ob, loc, matrix, matrix_mod=None):
+ '''
+ Matrix mod is so armature objects can modify their bone matricies
+ '''
+ if isinstance(ob, bpy.types.Bone):
+# if isinstance(ob, Blender.Types.BoneType):
+
+ # we know we have a matrix
+ # matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod)
+ matrix = ob.matrix_local * mtx4_z90 # dont apply armature matrix anymore
+# matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
+
+ parent = ob.parent
+ if parent:
+ #par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod)
+ par_matrix = parent.matrix_local * mtx4_z90 # dont apply armature matrix anymore
+# par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
+ matrix = par_matrix.inverted() * matrix
+
+ loc, rot, scale = matrix.decompose()
+ matrix_rot = rot.to_matrix()
+
+ loc = tuple(loc)
+ rot = tuple(rot.to_euler()) # quat -> euler
+ scale = tuple(scale)
+ else:
+ # This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore
+ #if ob and not matrix: matrix = ob.matrix_world * global_matrix
+ if ob and not matrix:
+ raise Exception("error: this should never happen!")
+
+ matrix_rot = matrix
+ #if matrix:
+ # matrix = matrix_scale * matrix
+
+ if matrix:
+ loc, rot, scale = matrix.decompose()
+ matrix_rot = rot.to_matrix()
+
+ # Lamps need to be rotated
+ if ob and ob.type == 'LAMP':
+ matrix_rot = matrix_rot * mtx_x90
+ elif ob and ob.type == 'CAMERA':
+ y = Vector((0.0, 1.0, 0.0)) * matrix_rot
+ matrix_rot = Matrix.Rotation(math.pi / 2.0, 3, y) * matrix_rot
+ # else do nothing.
+
+ loc = tuple(loc)
+ rot = tuple(matrix_rot.to_euler())
+ scale = tuple(scale)
+ else:
+ if not loc:
+ loc = 0.0, 0.0, 0.0
+ scale = 1.0, 1.0, 1.0
+ rot = 0.0, 0.0, 0.0
+
+ return loc, rot, scale, matrix, matrix_rot
+
+ def write_object_tx(ob, loc, matrix, matrix_mod=None):
+ '''
+ We have loc to set the location if non blender objects that have a location
+
+ matrix_mod is only used for bones at the moment
+ '''
+ loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod)
+
+ file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc)
+ file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % tuple_rad_to_deg(rot))
+# file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot)
+ file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale)
+ return loc, rot, scale, matrix, matrix_rot
+
+ def get_constraints(ob=None):
+ # Set variables to their defaults.
+ constraint_values = {"loc_min": (0.0, 0.0, 0.0),
+ "loc_max": (0.0, 0.0, 0.0),
+ "loc_limit": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
+ "rot_min": (0.0, 0.0, 0.0),
+ "rot_max": (0.0, 0.0, 0.0),
+ "rot_limit": (0.0, 0.0, 0.0),
+ "sca_min": (1.0, 1.0, 1.0),
+ "sca_max": (1.0, 1.0, 1.0),
+ "sca_limit": (0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
+ }
+
+ # Iterate through the list of constraints for this object to get the information in a format which is compatible with the FBX format.
+ if ob is not None:
+ for constraint in ob.constraints:
+ if constraint.type == 'LIMIT_LOCATION':
+ constraint_values["loc_min"] = constraint.min_x, constraint.min_y, constraint.min_z
+ constraint_values["loc_max"] = constraint.max_x, constraint.max_y, constraint.max_z
+ constraint_values["loc_limit"] = constraint.use_min_x, constraint.use_min_y, constraint.use_min_z, constraint.use_max_x, constraint.use_max_y, constraint.use_max_z
+ elif constraint.type == 'LIMIT_ROTATION':
+ constraint_values["rot_min"] = math.degrees(constraint.min_x), math.degrees(constraint.min_y), math.degrees(constraint.min_z)
+ constraint_values["rot_max"] = math.degrees(constraint.max_x), math.degrees(constraint.max_y), math.degrees(constraint.max_z)
+ constraint_values["rot_limit"] = constraint.use_limit_x, constraint.use_limit_y, constraint.use_limit_z
+ elif constraint.type == 'LIMIT_SCALE':
+ constraint_values["sca_min"] = constraint.min_x, constraint.min_y, constraint.min_z
+ constraint_values["sca_max"] = constraint.max_x, constraint.max_y, constraint.max_z
+ constraint_values["sca_limit"] = constraint.use_min_x, constraint.use_min_y, constraint.use_min_z, constraint.use_max_x, constraint.use_max_y, constraint.use_max_z
+
+ # incase bad values are assigned.
+ assert(len(constraint_values) == 9)
+
+ return constraint_values
+
+ def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None, pose_bone=None):
+ # Check if a pose exists for this object and set the constraint soruce accordingly. (Poses only exsit if the object is a bone.)
+ if pose_bone:
+ constraints = get_constraints(pose_bone)
+ else:
+ constraints = get_constraints(ob)
+
+ # if the type is 0 its an empty otherwise its a mesh
+ # only difference at the moment is one has a color
+ file.write('''
+ Properties60: {
+ Property: "QuaternionInterpolate", "bool", "",0
+ Property: "Visibility", "Visibility", "A+",1''')
+
+ loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod)
+
+ # Rotation order, note, for FBX files Iv loaded normal order is 1
+ # setting to zero.
+ # eEULER_XYZ = 0
+ # eEULER_XZY
+ # eEULER_YZX
+ # eEULER_YXZ
+ # eEULER_ZXY
+ # eEULER_ZYX
+
+ file.write('\n\t\t\tProperty: "RotationOffset", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "RotationPivot", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "ScalingOffset", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "ScalingPivot", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "TranslationActive", "bool", "",0')
+ file.write('\n\t\t\tProperty: "TranslationMin", "Vector3D", "",%.15g,%.15g,%.15g' % constraints["loc_min"])
+ file.write('\n\t\t\tProperty: "TranslationMax", "Vector3D", "",%.15g,%.15g,%.15g' % constraints["loc_max"])
+ file.write('\n\t\t\tProperty: "TranslationMinX", "bool", "",%d' % constraints["loc_limit"][0])
+ file.write('\n\t\t\tProperty: "TranslationMinY", "bool", "",%d' % constraints["loc_limit"][1])
+ file.write('\n\t\t\tProperty: "TranslationMinZ", "bool", "",%d' % constraints["loc_limit"][2])
+ file.write('\n\t\t\tProperty: "TranslationMaxX", "bool", "",%d' % constraints["loc_limit"][3])
+ file.write('\n\t\t\tProperty: "TranslationMaxY", "bool", "",%d' % constraints["loc_limit"][4])
+ file.write('\n\t\t\tProperty: "TranslationMaxZ", "bool", "",%d' % constraints["loc_limit"][5])
+ file.write('\n\t\t\tProperty: "RotationOrder", "enum", "",0')
+ file.write('\n\t\t\tProperty: "RotationSpaceForLimitOnly", "bool", "",0')
+ file.write('\n\t\t\tProperty: "AxisLen", "double", "",10')
+ file.write('\n\t\t\tProperty: "PreRotation", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "PostRotation", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "RotationActive", "bool", "",0')
+ file.write('\n\t\t\tProperty: "RotationMin", "Vector3D", "",%.15g,%.15g,%.15g' % constraints["rot_min"])
+ file.write('\n\t\t\tProperty: "RotationMax", "Vector3D", "",%.15g,%.15g,%.15g' % constraints["rot_max"])
+ file.write('\n\t\t\tProperty: "RotationMinX", "bool", "",%d' % constraints["rot_limit"][0])
+ file.write('\n\t\t\tProperty: "RotationMinY", "bool", "",%d' % constraints["rot_limit"][1])
+ file.write('\n\t\t\tProperty: "RotationMinZ", "bool", "",%d' % constraints["rot_limit"][2])
+ file.write('\n\t\t\tProperty: "RotationMaxX", "bool", "",%d' % constraints["rot_limit"][0])
+ file.write('\n\t\t\tProperty: "RotationMaxY", "bool", "",%d' % constraints["rot_limit"][1])
+ file.write('\n\t\t\tProperty: "RotationMaxZ", "bool", "",%d' % constraints["rot_limit"][2])
+ file.write('\n\t\t\tProperty: "RotationStiffnessX", "double", "",0')
+ file.write('\n\t\t\tProperty: "RotationStiffnessY", "double", "",0')
+ file.write('\n\t\t\tProperty: "RotationStiffnessZ", "double", "",0')
+ file.write('\n\t\t\tProperty: "MinDampRangeX", "double", "",0')
+ file.write('\n\t\t\tProperty: "MinDampRangeY", "double", "",0')
+ file.write('\n\t\t\tProperty: "MinDampRangeZ", "double", "",0')
+ file.write('\n\t\t\tProperty: "MaxDampRangeX", "double", "",0')
+ file.write('\n\t\t\tProperty: "MaxDampRangeY", "double", "",0')
+ file.write('\n\t\t\tProperty: "MaxDampRangeZ", "double", "",0')
+ file.write('\n\t\t\tProperty: "MinDampStrengthX", "double", "",0')
+ file.write('\n\t\t\tProperty: "MinDampStrengthY", "double", "",0')
+ file.write('\n\t\t\tProperty: "MinDampStrengthZ", "double", "",0')
+ file.write('\n\t\t\tProperty: "MaxDampStrengthX", "double", "",0')
+ file.write('\n\t\t\tProperty: "MaxDampStrengthY", "double", "",0')
+ file.write('\n\t\t\tProperty: "MaxDampStrengthZ", "double", "",0')
+ file.write('\n\t\t\tProperty: "PreferedAngleX", "double", "",0')
+ file.write('\n\t\t\tProperty: "PreferedAngleY", "double", "",0')
+ file.write('\n\t\t\tProperty: "PreferedAngleZ", "double", "",0')
+ file.write('\n\t\t\tProperty: "InheritType", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ScalingActive", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ScalingMin", "Vector3D", "",%.15g,%.15g,%.15g' % constraints["sca_min"])
+ file.write('\n\t\t\tProperty: "ScalingMax", "Vector3D", "",%.15g,%.15g,%.15g' % constraints["sca_max"])
+ file.write('\n\t\t\tProperty: "ScalingMinX", "bool", "",%d' % constraints["sca_limit"][0])
+ file.write('\n\t\t\tProperty: "ScalingMinY", "bool", "",%d' % constraints["sca_limit"][1])
+ file.write('\n\t\t\tProperty: "ScalingMinZ", "bool", "",%d' % constraints["sca_limit"][2])
+ file.write('\n\t\t\tProperty: "ScalingMaxX", "bool", "",%d' % constraints["sca_limit"][3])
+ file.write('\n\t\t\tProperty: "ScalingMaxY", "bool", "",%d' % constraints["sca_limit"][4])
+ file.write('\n\t\t\tProperty: "ScalingMaxZ", "bool", "",%d' % constraints["sca_limit"][5])
+ file.write('\n\t\t\tProperty: "GeometricTranslation", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "GeometricRotation", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "GeometricScaling", "Vector3D", "",1,1,1')
+ file.write('\n\t\t\tProperty: "LookAtProperty", "object", ""')
+ file.write('\n\t\t\tProperty: "UpVectorProperty", "object", ""')
+ file.write('\n\t\t\tProperty: "Show", "bool", "",1')
+ file.write('\n\t\t\tProperty: "NegativePercentShapeSupport", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DefaultAttributeIndex", "int", "",0')
+ if ob and not isinstance(ob, bpy.types.Bone):
+ # Only mesh objects have color
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+ file.write('\n\t\t\tProperty: "Size", "double", "",100')
+ file.write('\n\t\t\tProperty: "Look", "enum", "",1')
+
+ return loc, rot, scale, matrix, matrix_rot
+
+ # -------------------------------------------- Armatures
+ #def write_bone(bone, name, matrix_mod):
+ def write_bone(my_bone):
+ file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName)
+ file.write('\n\t\tVersion: 232')
+
+ #poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3]
+ poseMatrix = write_object_props(my_bone.blenBone, pose_bone=my_bone.getPoseBone())[3] # dont apply bone matricies anymore
+ pose_items.append((my_bone.fbxName, poseMatrix))
+
+ # file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
+ file.write('\n\t\t\tProperty: "Size", "double", "",1')
+
+ #((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length)
+
+ """
+ file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\
+ ((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
+ """
+
+ file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %
+ (my_bone.blenBone.head_local - my_bone.blenBone.tail_local).length)
+# (my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length)
+
+ #file.write('\n\t\t\tProperty: "LimbLength", "double", "",1')
+ file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8')
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 1')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Skeleton"')
+ file.write('\n\t}')
+
+ def write_camera_switch():
+ file.write('''
+ Model: "Model::Camera Switcher", "CameraSwitcher" {
+ Version: 232''')
+
+ write_object_props()
+ file.write('''
+ Property: "Color", "Color", "A",0.8,0.8,0.8
+ Property: "Camera Index", "Integer", "A+",100
+ }
+ MultiLayer: 0
+ MultiTake: 1
+ Hidden: "True"
+ Shading: W
+ Culling: "CullingOff"
+ Version: 101
+ Name: "Model::Camera Switcher"
+ CameraId: 0
+ CameraName: 100
+ CameraIndexName:
+ }''')
+
+ def write_camera_dummy(name, loc, near, far, proj_type, up):
+ file.write('\n\tModel: "Model::%s", "Camera" {' % name)
+ file.write('\n\t\tVersion: 232')
+ write_object_props(None, loc)
+
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+ file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
+ file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40')
+ file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63')
+ file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
+ file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
+ file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "GateFit", "enum", "",0')
+ file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486')
+ file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
+ file.write('\n\t\t\tProperty: "AspectW", "double", "",320')
+ file.write('\n\t\t\tProperty: "AspectH", "double", "",200')
+ file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1')
+ file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
+ file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near)
+ file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far)
+ file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816')
+ file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612')
+ file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333')
+ file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
+ file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4')
+ file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
+ file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
+ file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Center", "bool", "",1')
+ file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
+ file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
+ file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
+ file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
+ file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333')
+ file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
+ file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
+ file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type)
+ file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
+ file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
+ file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
+ file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
+ file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
+ file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 0')
+ file.write('\n\t\tHidden: "True"')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Camera"')
+ file.write('\n\t\tGeometryVersion: 124')
+ file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
+ file.write('\n\t\tUp: %i,%i,%i' % up)
+ file.write('\n\t\tLookAt: 0,0,0')
+ file.write('\n\t\tShowInfoOnMoving: 1')
+ file.write('\n\t\tShowAudio: 0')
+ file.write('\n\t\tAudioColor: 0,1,0')
+ file.write('\n\t\tCameraOrthoZoom: 1')
+ file.write('\n\t}')
+
+ def write_camera_default():
+ # This sucks but to match FBX converter its easier to
+ # write the cameras though they are not needed.
+ write_camera_dummy('Producer Perspective', (0, 71.3, 287.5), 10, 4000, 0, (0, 1, 0))
+ write_camera_dummy('Producer Top', (0, 4000, 0), 1, 30000, 1, (0, 0, -1))
+ write_camera_dummy('Producer Bottom', (0, -4000, 0), 1, 30000, 1, (0, 0, -1))
+ write_camera_dummy('Producer Front', (0, 0, 4000), 1, 30000, 1, (0, 1, 0))
+ write_camera_dummy('Producer Back', (0, 0, -4000), 1, 30000, 1, (0, 1, 0))
+ write_camera_dummy('Producer Right', (4000, 0, 0), 1, 30000, 1, (0, 1, 0))
+ write_camera_dummy('Producer Left', (-4000, 0, 0), 1, 30000, 1, (0, 1, 0))
+
+ def write_camera(my_cam):
+ '''
+ Write a blender camera
+ '''
+ render = scene.render
+ width = render.resolution_x
+ height = render.resolution_y
+ aspect = width / height
+
+ data = my_cam.blenObject.data
+
+ file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName)
+ file.write('\n\t\tVersion: 232')
+ loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix())
+
+ file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
+ file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % math.degrees(data.angle))
+ file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
+ # file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026')
+ file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shift_x) # not sure if this is in the correct units?
+ file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shift_y) # ditto
+ file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0')
+ file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
+ file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
+ file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "GateFit", "enum", "",2')
+ file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
+ file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width)
+ file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height)
+
+ '''Camera aspect ratio modes.
+ 0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant.
+ 1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value.
+ 2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels.
+ 3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value.
+ 4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value.
+
+ Definition at line 234 of file kfbxcamera.h. '''
+
+ file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2')
+
+ file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
+ file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clip_start)
+ file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clip_end)
+ file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0')
+ file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0')
+ file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect)
+ file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
+ file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
+ file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
+ file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Center", "bool", "",1')
+ file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
+ file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
+ file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
+ file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
+ file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect)
+ file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
+ file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
+ file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0')
+ file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
+ file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
+ file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
+ file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
+ file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
+ file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
+
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 0')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Camera"')
+ file.write('\n\t\tGeometryVersion: 124')
+ file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
+ file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Vector((0.0, 1.0, 0.0)) * matrix_rot))
+ file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Vector((0.0, 0.0, -1.0)) * matrix_rot))
+
+ #file.write('\n\t\tUp: 0,0,0' )
+ #file.write('\n\t\tLookAt: 0,0,0' )
+
+ file.write('\n\t\tShowInfoOnMoving: 1')
+ file.write('\n\t\tShowAudio: 0')
+ file.write('\n\t\tAudioColor: 0,1,0')
+ file.write('\n\t\tCameraOrthoZoom: 1')
+ file.write('\n\t}')
+
+ def write_light(my_light):
+ light = my_light.blenObject.data
+ file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName)
+ file.write('\n\t\tVersion: 232')
+
+ write_object_props(my_light.blenObject, None, my_light.parRelMatrix())
+
+ # Why are these values here twice?????? - oh well, follow the holy sdk's output
+
+ # Blender light types match FBX's, funny coincidence, we just need to
+ # be sure that all unsupported types are made into a point light
+ #ePOINT,
+ #eDIRECTIONAL
+ #eSPOT
+ light_type_items = {'POINT': 0, 'SUN': 1, 'SPOT': 2, 'HEMI': 3, 'AREA': 4}
+ light_type = light_type_items[light.type]
+
+ if light_type > 2:
+ light_type = 1 # hemi and area lights become directional
+
+ if light.type in ('HEMI', ):
+ do_light = not (light.use_diffuse or light.use_specular)
+ do_shadow = False
+ else:
+ do_light = not (light.use_only_shadow or (not light.use_diffuse and not light.use_specular))
+ do_shadow = (light.shadow_method in ('RAY_SHADOW', 'BUFFER_SHADOW'))
+
+ scale = abs(global_matrix.to_scale()[0]) # scale is always uniform in this case
+
+ file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
+ file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
+ file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
+ file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1')
+ file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy * 100.0, 200.0))) # clamp below 200
+ if light.type == 'SPOT':
+ file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % math.degrees(light.spot_size))
+ file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.color))
+
+ file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy * 100.0, 200.0))) # clamp below 200
+
+ file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
+ file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
+ file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light)
+ file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
+ file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
+ file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
+ file.write('\n\t\t\tProperty: "DecayType", "enum", "",0')
+ file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.distance)
+ file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0')
+ file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0')
+ file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0')
+ file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0')
+ file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow)
+ file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1')
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 0')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Light"')
+ file.write('\n\t\tGeometryVersion: 124')
+ file.write('\n\t}')
+
+ # matrixOnly is not used at the moment
+ def write_null(my_null=None, fbxName=None):
+ # ob can be null
+ if not fbxName:
+ fbxName = my_null.fbxName
+
+ file.write('\n\tModel: "Model::%s", "Null" {' % fbxName)
+ file.write('\n\t\tVersion: 232')
+
+ if my_null:
+ poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3]
+ else:
+ poseMatrix = write_object_props()[3]
+
+ pose_items.append((fbxName, poseMatrix))
+
+ file.write('''
+ }
+ MultiLayer: 0
+ MultiTake: 1
+ Shading: Y
+ Culling: "CullingOff"
+ TypeFlags: "Null"
+ }''')
+
+ # Material Settings
+ if world:
+ world_amb = world.ambient_color[:]
+ else:
+ world_amb = 0.0, 0.0, 0.0 # default value
+
+ def write_material(matname, mat):
+ file.write('\n\tMaterial: "Material::%s", "" {' % matname)
+
+ # Todo, add more material Properties.
+ if mat:
+ mat_cold = tuple(mat.diffuse_color)
+ mat_cols = tuple(mat.specular_color)
+ #mat_colm = tuple(mat.mirCol) # we wont use the mirror color
+ mat_colamb = world_amb
+
+ mat_dif = mat.diffuse_intensity
+ mat_amb = mat.ambient
+ mat_hard = (float(mat.specular_hardness) - 1.0) / 5.10
+ mat_spec = mat.specular_intensity / 2.0
+ mat_alpha = mat.alpha
+ mat_emit = mat.emit
+ mat_shadeless = mat.use_shadeless
+ if mat_shadeless:
+ mat_shader = 'Lambert'
+ else:
+ if mat.diffuse_shader == 'LAMBERT':
+ mat_shader = 'Lambert'
+ else:
+ mat_shader = 'Phong'
+ else:
+ mat_cols = mat_cold = 0.8, 0.8, 0.8
+ mat_colamb = 0.0, 0.0, 0.0
+ # mat_colm
+ mat_dif = 1.0
+ mat_amb = 0.5
+ mat_hard = 20.0
+ mat_spec = 0.2
+ mat_alpha = 1.0
+ mat_emit = 0.0
+ mat_shadeless = False
+ mat_shader = 'Phong'
+
+ file.write('\n\t\tVersion: 102')
+ file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower())
+ file.write('\n\t\tMultiLayer: 0')
+
+ file.write('\n\t\tProperties60: {')
+ file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader)
+ file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0')
+ file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) # emit and diffuse color are he same in blender
+ file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit)
+
+ file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb)
+ file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb)
+ file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold)
+ file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif)
+ file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1')
+ file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha))
+ if not mat_shadeless:
+ file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols)
+ file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec)
+ file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0')
+ file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0')
+ file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1')
+ file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0')
+ file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb)
+ file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold)
+ if not mat_shadeless:
+ file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols)
+ file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard)
+ file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha)
+ if not mat_shadeless:
+ file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0')
+
+ file.write('\n\t\t}')
+ file.write('\n\t}')
+
+ # tex is an Image (Arystan)
+ def write_video(texname, tex):
+ # Same as texture really!
+ file.write('\n\tVideo: "Video::%s", "Clip" {' % texname)
+
+ file.write('''
+ Type: "Clip"
+ Properties60: {
+ Property: "FrameRate", "double", "",0
+ Property: "LastFrame", "int", "",0
+ Property: "Width", "int", "",0
+ Property: "Height", "int", "",0''')
+ if tex:
+ fname_rel = bpy_extras.io_utils.path_reference(tex.filepath, base_src, base_dst, path_mode, "", copy_set)
+ fname_strip = os.path.basename(fname_rel)
+ else:
+ fname_strip = fname_rel = ""
+
+ file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip)
+
+ file.write('''
+ Property: "StartFrame", "int", "",0
+ Property: "StopFrame", "int", "",0
+ Property: "PlaySpeed", "double", "",1
+ Property: "Offset", "KTime", "",0
+ Property: "InterlaceMode", "enum", "",0
+ Property: "FreeRunning", "bool", "",0
+ Property: "Loop", "bool", "",0
+ Property: "AccessMode", "enum", "",0
+ }
+ UseMipMap: 0''')
+
+ file.write('\n\t\tFilename: "%s"' % fname_strip)
+ file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # make relative
+ file.write('\n\t}')
+
+ def write_texture(texname, tex, num):
+ # if tex is None then this is a dummy tex
+ file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname)
+ file.write('\n\t\tType: "TextureVideoClip"')
+ file.write('\n\t\tVersion: 202')
+ # TODO, rare case _empty_ exists as a name.
+ file.write('\n\t\tTextureName: "Texture::%s"' % texname)
+
+ file.write('''
+ Properties60: {
+ Property: "Translation", "Vector", "A+",0,0,0
+ Property: "Rotation", "Vector", "A+",0,0,0
+ Property: "Scaling", "Vector", "A+",1,1,1''')
+ file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num)
+
+ # WrapModeU/V 0==rep, 1==clamp, TODO add support
+ file.write('''
+ Property: "TextureTypeUse", "enum", "",0
+ Property: "CurrentTextureBlendMode", "enum", "",1
+ Property: "UseMaterial", "bool", "",0
+ Property: "UseMipMap", "bool", "",0
+ Property: "CurrentMappingType", "enum", "",0
+ Property: "UVSwap", "bool", "",0''')
+
+ file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.use_clamp_x)
+ file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.use_clamp_y)
+
+ file.write('''
+ Property: "TextureRotationPivot", "Vector3D", "",0,0,0
+ Property: "TextureScalingPivot", "Vector3D", "",0,0,0
+ Property: "VideoProperty", "object", ""
+ }''')
+
+ file.write('\n\t\tMedia: "Video::%s"' % texname)
+
+ if tex:
+ fname_rel = bpy_extras.io_utils.path_reference(tex.filepath, base_src, base_dst, path_mode, "", copy_set)
+ fname_strip = os.path.basename(fname_rel)
+ else:
+ fname_strip = fname_rel = ""
+
+ file.write('\n\t\tFileName: "%s"' % fname_strip)
+ file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # need some make relative command
+
+ file.write('''
+ ModelUVTranslation: 0,0
+ ModelUVScaling: 1,1
+ Texture_Alpha_Source: "None"
+ Cropping: 0,0,0,0
+ }''')
+
+ def write_deformer_skin(obname):
+ '''
+ Each mesh has its own deformer
+ '''
+ file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname)
+ file.write('''
+ Version: 100
+ MultiLayer: 0
+ Type: "Skin"
+ Properties60: {
+ }
+ Link_DeformAcuracy: 50
+ }''')
+
+ # in the example was 'Bip01 L Thigh_2'
+ def write_sub_deformer_skin(my_mesh, my_bone, weights):
+
+ '''
+ Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers
+ So the SubDeformer needs the mesh-object name as a prefix to make it unique
+
+ Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer,
+ a but silly but dosnt really matter
+ '''
+ file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName))
+
+ file.write('''
+ Version: 100
+ MultiLayer: 0
+ Type: "Cluster"
+ Properties60: {
+ Property: "SrcModel", "object", ""
+ Property: "SrcModelReference", "object", ""
+ }
+ UserData: "", ""''')
+
+ # Support for bone parents
+ if my_mesh.fbxBoneParent:
+ if my_mesh.fbxBoneParent == my_bone:
+ # TODO - this is a bit lazy, we could have a simple write loop
+ # for this case because all weights are 1.0 but for now this is ok
+ # Parent Bones arent used all that much anyway.
+ vgroup_data = [(j, 1.0) for j in range(len(my_mesh.blenData.vertices))]
+ else:
+ # This bone is not a parent of this mesh object, no weights
+ vgroup_data = []
+
+ else:
+ # Normal weight painted mesh
+ if my_bone.blenName in weights[0]:
+ # Before we used normalized wright list
+ #vgroup_data = me.getVertsFromGroup(bone.name, 1)
+ group_index = weights[0].index(my_bone.blenName)
+ vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]]
+ else:
+ vgroup_data = []
+
+ file.write('\n\t\tIndexes: ')
+
+ i = -1
+ for vg in vgroup_data:
+ if i == -1:
+ file.write('%i' % vg[0])
+ i = 0
+ else:
+ if i == 23:
+ file.write('\n\t\t')
+ i = 0
+ file.write(',%i' % vg[0])
+ i += 1
+
+ file.write('\n\t\tWeights: ')
+ i = -1
+ for vg in vgroup_data:
+ if i == -1:
+ file.write('%.8f' % vg[1])
+ i = 0
+ else:
+ if i == 38:
+ file.write('\n\t\t')
+ i = 0
+ file.write(',%.8f' % vg[1])
+ i += 1
+
+ if my_mesh.fbxParent:
+ # TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible!
+ m = (my_mesh.matrixWorld.inverted() * my_bone.fbxArm.matrixWorld.copy() * my_bone.restMatrix) * mtx4_z90
+ else:
+ # Yes! this is it... - but dosnt work when the mesh is a.
+ m = (my_mesh.matrixWorld.inverted() * my_bone.fbxArm.matrixWorld.copy() * my_bone.restMatrix) * mtx4_z90
+
+ #m = mtx4_z90 * my_bone.restMatrix
+ matstr = mat4x4str(m)
+ matstr_i = mat4x4str(m.inverted())
+
+ file.write('\n\t\tTransform: %s' % matstr_i) # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/
+ file.write('\n\t\tTransformLink: %s' % matstr)
+ file.write('\n\t}')
+
+ def write_mesh(my_mesh):
+
+ me = my_mesh.blenData
+
+ # if there are non NULL materials on this mesh
+ do_materials = bool(my_mesh.blenMaterials)
+ do_textures = bool(my_mesh.blenTextures)
+ do_uvs = bool(me.uv_textures)
+
+ file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName)
+ file.write('\n\t\tVersion: 232') # newline is added in write_object_props
+
+ # convert into lists once.
+ me_vertices = me.vertices[:]
+ me_edges = me.edges[:]
+ me_faces = me.faces[:]
+
+ poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3]
+ pose_items.append((my_mesh.fbxName, poseMatrix))
+
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 1')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+
+ # Write the Real Mesh data here
+ file.write('\n\t\tVertices: ')
+ i = -1
+
+ for v in me_vertices:
+ if i == -1:
+ file.write('%.6f,%.6f,%.6f' % v.co[:])
+ i = 0
+ else:
+ if i == 7:
+ file.write('\n\t\t')
+ i = 0
+ file.write(',%.6f,%.6f,%.6f' % v.co[:])
+ i += 1
+
+ file.write('\n\t\tPolygonVertexIndex: ')
+ i = -1
+ for f in me_faces:
+ fi = f.vertices[:]
+
+ # last index XORd w. -1 indicates end of face
+ if i == -1:
+ if len(fi) == 3:
+ file.write('%i,%i,%i' % (fi[0], fi[1], fi[2] ^ -1))
+ else:
+ file.write('%i,%i,%i,%i' % (fi[0], fi[1], fi[2], fi[3] ^ -1))
+ i = 0
+ else:
+ if i == 13:
+ file.write('\n\t\t')
+ i = 0
+ if len(fi) == 3:
+ file.write(',%i,%i,%i' % (fi[0], fi[1], fi[2] ^ -1))
+ else:
+ file.write(',%i,%i,%i,%i' % (fi[0], fi[1], fi[2], fi[3] ^ -1))
+ i += 1
+
+ # write loose edges as faces.
+ for ed in me_edges:
+ if ed.is_loose:
+ ed_val = ed.vertices[:]
+ ed_val = ed_val[0], ed_val[-1] ^ -1
+
+ if i == -1:
+ file.write('%i,%i' % ed_val)
+ i = 0
+ else:
+ if i == 13:
+ file.write('\n\t\t')
+ i = 0
+ file.write(',%i,%i' % ed_val)
+ i += 1
+
+ file.write('\n\t\tEdges: ')
+ i = -1
+ for ed in me_edges:
+ if i == -1:
+ file.write('%i,%i' % (ed.vertices[0], ed.vertices[1]))
+ i = 0
+ else:
+ if i == 13:
+ file.write('\n\t\t')
+ i = 0
+ file.write(',%i,%i' % (ed.vertices[0], ed.vertices[1]))
+ i += 1
+
+ file.write('\n\t\tGeometryVersion: 124')
+
+ file.write('''
+ LayerElementNormal: 0 {
+ Version: 101
+ Name: ""
+ MappingInformationType: "ByVertice"
+ ReferenceInformationType: "Direct"
+ Normals: ''')
+
+ i = -1
+ for v in me_vertices:
+ if i == -1:
+ file.write('%.15f,%.15f,%.15f' % v.normal[:])
+ i = 0
+ else:
+ if i == 2:
+ file.write('\n\t\t\t ')
+ i = 0
+ file.write(',%.15f,%.15f,%.15f' % v.normal[:])
+ i += 1
+ file.write('\n\t\t}')
+
+ # Write Face Smoothing
+ if mesh_smooth_type == 'FACE':
+ file.write('''
+ LayerElementSmoothing: 0 {
+ Version: 102
+ Name: ""
+ MappingInformationType: "ByPolygon"
+ ReferenceInformationType: "Direct"
+ Smoothing: ''')
+
+ i = -1
+ for f in me_faces:
+ if i == -1:
+ file.write('%i' % f.use_smooth)
+ i = 0
+ else:
+ if i == 54:
+ file.write('\n\t\t\t ')
+ i = 0
+ file.write(',%i' % f.use_smooth)
+ i += 1
+
+ file.write('\n\t\t}')
+
+ elif mesh_smooth_type == 'EDGE':
+ # Write Edge Smoothing
+ file.write('''
+ LayerElementSmoothing: 0 {
+ Version: 101
+ Name: ""
+ MappingInformationType: "ByEdge"
+ ReferenceInformationType: "Direct"
+ Smoothing: ''')
+
+ i = -1
+ for ed in me_edges:
+ if i == -1:
+ file.write('%i' % (ed.use_edge_sharp))
+ i = 0
+ else:
+ if i == 54:
+ file.write('\n\t\t\t ')
+ i = 0
+ file.write(',%i' % (ed.use_edge_sharp))
+ i += 1
+
+ file.write('\n\t\t}')
+ elif mesh_smooth_type == 'OFF':
+ pass
+ else:
+ raise Exception("invalid mesh_smooth_type: %r" % mesh_smooth_type)
+
+ # Write VertexColor Layers
+ # note, no programs seem to use this info :/
+ collayers = []
+ if len(me.vertex_colors):
+ collayers = me.vertex_colors
+ for colindex, collayer in enumerate(collayers):
+ file.write('\n\t\tLayerElementColor: %i {' % colindex)
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: "%s"' % collayer.name)
+
+ file.write('''
+ MappingInformationType: "ByPolygonVertex"
+ ReferenceInformationType: "IndexToDirect"
+ Colors: ''')
+
+ i = -1
+ ii = 0 # Count how many Colors we write
+
+ for fi, cf in enumerate(collayer.data):
+ if len(me_faces[fi].vertices) == 4:
+ colors = cf.color1[:], cf.color2[:], cf.color3[:], cf.color4[:]
+ else:
+ colors = cf.color1[:], cf.color2[:], cf.color3[:]
+
+ for col in colors:
+ if i == -1:
+ file.write('%.4f,%.4f,%.4f,1' % col)
+ i = 0
+ else:
+ if i == 7:
+ file.write('\n\t\t\t\t')
+ i = 0
+ file.write(',%.4f,%.4f,%.4f,1' % col)
+ i += 1
+ ii += 1 # One more Color
+
+ file.write('\n\t\t\tColorIndex: ')
+ i = -1
+ for j in range(ii):
+ if i == -1:
+ file.write('%i' % j)
+ i = 0
+ else:
+ if i == 55:
+ file.write('\n\t\t\t\t')
+ i = 0
+ file.write(',%i' % j)
+ i += 1
+
+ file.write('\n\t\t}')
+
+ # Write UV and texture layers.
+ uvlayers = []
+ if do_uvs:
+ uvlayers = me.uv_textures
+ uvlayer_orig = me.uv_textures.active
+ for uvindex, uvlayer in enumerate(me.uv_textures):
+ file.write('\n\t\tLayerElementUV: %i {' % uvindex)
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: "%s"' % uvlayer.name)
+
+ file.write('''
+ MappingInformationType: "ByPolygonVertex"
+ ReferenceInformationType: "IndexToDirect"
+ UV: ''')
+
+ i = -1
+ ii = 0 # Count how many UVs we write
+
+ for uf in uvlayer.data:
+ # workaround, since uf.uv iteration is wrong atm
+ for uv in uf.uv:
+ if i == -1:
+ file.write('%.6f,%.6f' % uv[:])
+ i = 0
+ else:
+ if i == 7:
+ file.write('\n\t\t\t ')
+ i = 0
+ file.write(',%.6f,%.6f' % uv[:])
+ i += 1
+ ii += 1 # One more UV
+
+ file.write('\n\t\t\tUVIndex: ')
+ i = -1
+ for j in range(ii):
+ if i == -1:
+ file.write('%i' % j)
+ i = 0
+ else:
+ if i == 55:
+ file.write('\n\t\t\t\t')
+ i = 0
+ file.write(',%i' % j)
+ i += 1
+
+ file.write('\n\t\t}')
+
+ if do_textures:
+ file.write('\n\t\tLayerElementTexture: %i {' % uvindex)
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: "%s"' % uvlayer.name)
+
+ if len(my_mesh.blenTextures) == 1:
+ file.write('\n\t\t\tMappingInformationType: "AllSame"')
+ else:
+ file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
+
+ file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
+ file.write('\n\t\t\tBlendMode: "Translucent"')
+ file.write('\n\t\t\tTextureAlpha: 1')
+ file.write('\n\t\t\tTextureId: ')
+
+ if len(my_mesh.blenTextures) == 1:
+ file.write('0')
+ else:
+ texture_mapping_local = {None: -1}
+
+ i = 0 # 1 for dummy
+ for tex in my_mesh.blenTextures:
+ if tex: # None is set above
+ texture_mapping_local[tex] = i
+ i += 1
+
+ i = -1
+ for f in uvlayer.data:
+ img_key = f.image
+
+ if i == -1:
+ i = 0
+ file.write('%s' % texture_mapping_local[img_key])
+ else:
+ if i == 55:
+ file.write('\n ')
+ i = 0
+
+ file.write(',%s' % texture_mapping_local[img_key])
+ i += 1
+
+ else:
+ file.write('''
+ LayerElementTexture: 0 {
+ Version: 101
+ Name: ""
+ MappingInformationType: "NoMappingInformation"
+ ReferenceInformationType: "IndexToDirect"
+ BlendMode: "Translucent"
+ TextureAlpha: 1
+ TextureId: ''')
+ file.write('\n\t\t}')
+
+ # Done with UV/textures.
+ if do_materials:
+ file.write('\n\t\tLayerElementMaterial: 0 {')
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: ""')
+
+ if len(my_mesh.blenMaterials) == 1:
+ file.write('\n\t\t\tMappingInformationType: "AllSame"')
+ else:
+ file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
+
+ file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
+ file.write('\n\t\t\tMaterials: ')
+
+ if len(my_mesh.blenMaterials) == 1:
+ file.write('0')
+ else:
+ # Build a material mapping for this
+ material_mapping_local = {} # local-mat & tex : global index.
+
+ for j, mat_tex_pair in enumerate(my_mesh.blenMaterials):
+ material_mapping_local[mat_tex_pair] = j
+
+ len_material_mapping_local = len(material_mapping_local)
+
+ mats = my_mesh.blenMaterialList
+
+ if me.uv_textures.active:
+ uv_faces = me.uv_textures.active.data
+ else:
+ uv_faces = [None] * len(me_faces)
+
+ i = -1
+ for f, uf in zip(me_faces, uv_faces):
+# for f in me_faces:
+ try:
+ mat = mats[f.material_index]
+ except:
+ mat = None
+
+ if do_uvs:
+ tex = uf.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/
+ else:
+ tex = None
+
+ if i == -1:
+ i = 0
+ file.write('%s' % (material_mapping_local[mat, tex])) # None for mat or tex is ok
+ else:
+ if i == 55:
+ file.write('\n\t\t\t\t')
+ i = 0
+
+ file.write(',%s' % (material_mapping_local[mat, tex]))
+ i += 1
+
+ file.write('\n\t\t}')
+
+ file.write('''
+ Layer: 0 {
+ Version: 100
+ LayerElement: {
+ Type: "LayerElementNormal"
+ TypedIndex: 0
+ }''')
+
+ if do_materials:
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementMaterial"
+ TypedIndex: 0
+ }''')
+
+ # Smoothing info
+ if mesh_smooth_type != 'OFF':
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementSmoothing"
+ TypedIndex: 0
+ }''')
+
+ # Always write this
+ if do_textures:
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementTexture"
+ TypedIndex: 0
+ }''')
+
+ if me.vertex_colors:
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementColor"
+ TypedIndex: 0
+ }''')
+
+ if do_uvs: # same as me.faceUV
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementUV"
+ TypedIndex: 0
+ }''')
+
+ file.write('\n\t\t}')
+
+ if len(uvlayers) > 1:
+ for i in range(1, len(uvlayers)):
+
+ file.write('\n\t\tLayer: %i {' % i)
+ file.write('\n\t\t\tVersion: 100')
+
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementUV"''')
+
+ file.write('\n\t\t\t\tTypedIndex: %i' % i)
+ file.write('\n\t\t\t}')
+
+ if do_textures:
+
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementTexture"''')
+
+ file.write('\n\t\t\t\tTypedIndex: %i' % i)
+ file.write('\n\t\t\t}')
+
+ file.write('\n\t\t}')
+
+ if len(collayers) > 1:
+ # Take into account any UV layers
+ layer_offset = 0
+ if uvlayers:
+ layer_offset = len(uvlayers) - 1
+
+ for i in range(layer_offset, len(collayers) + layer_offset):
+ file.write('\n\t\tLayer: %i {' % i)
+ file.write('\n\t\t\tVersion: 100')
+
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementColor"''')
+
+ file.write('\n\t\t\t\tTypedIndex: %i' % i)
+ file.write('\n\t\t\t}')
+ file.write('\n\t\t}')
+ file.write('\n\t}')
+
+ def write_group(name):
+ file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name)
+
+ file.write('''
+ Properties60: {
+ Property: "MultiLayer", "bool", "",0
+ Property: "Pickable", "bool", "",1
+ Property: "Transformable", "bool", "",1
+ Property: "Show", "bool", "",1
+ }
+ MultiLayer: 0
+ }''')
+
+ # add meshes here to clear because they are not used anywhere.
+ meshes_to_clear = []
+
+ ob_meshes = []
+ ob_lights = []
+ ob_cameras = []
+ # in fbx we export bones as children of the mesh
+ # armatures not a part of a mesh, will be added to ob_arms
+ ob_bones = []
+ ob_arms = []
+ ob_null = [] # emptys
+
+ # List of types that have blender objects (not bones)
+ ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null]
+
+ groups = [] # blender groups, only add ones that have objects in the selections
+ materials = {} # (mat, image) keys, should be a set()
+ textures = {} # should be a set()
+
+ tmp_ob_type = ob_type = None # incase no objects are exported, so as not to raise an error
+
+## XXX
+
+ if 'ARMATURE' in object_types:
+ # This is needed so applying modifiers dosnt apply the armature deformation, its also needed
+ # ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes.
+ # set every armature to its rest, backup the original values so we done mess up the scene
+ ob_arms_orig_rest = [arm.pose_position for arm in bpy.data.armatures]
+
+ for arm in bpy.data.armatures:
+ arm.pose_position = 'REST'
+
+ if ob_arms_orig_rest:
+ for ob_base in bpy.data.objects:
+ if ob_base.type == 'ARMATURE':
+ ob_base.update_tag()
+
+ # This causes the makeDisplayList command to effect the mesh
+ scene.frame_set(scene.frame_current)
+
+ for ob_base in context_objects:
+
+ # ignore dupli children
+ if ob_base.parent and ob_base.parent.dupli_type in {'VERTS', 'FACES'}:
+ continue
+
+ obs = [(ob_base, ob_base.matrix_world.copy())]
+ if ob_base.dupli_type != 'NONE':
+ ob_base.dupli_list_create(scene)
+ obs = [(dob.object, dob.matrix.copy()) for dob in ob_base.dupli_list]
+
+ for ob, mtx in obs:
+# for ob, mtx in BPyObject.getDerivedObjects(ob_base):
+ tmp_ob_type = ob.type
+ if tmp_ob_type == 'CAMERA':
+ if 'CAMERA' in object_types:
+ ob_cameras.append(my_object_generic(ob, mtx))
+ elif tmp_ob_type == 'LAMP':
+ if 'LAMP' in object_types:
+ ob_lights.append(my_object_generic(ob, mtx))
+ elif tmp_ob_type == 'ARMATURE':
+ if 'ARMATURE' in object_types:
+ # TODO - armatures dont work in dupligroups!
+ if ob not in ob_arms:
+ ob_arms.append(ob)
+ # ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)"
+ elif tmp_ob_type == 'EMPTY':
+ if 'EMPTY' in object_types:
+ ob_null.append(my_object_generic(ob, mtx))
+ elif 'MESH' in object_types:
+ origData = True
+ if tmp_ob_type != 'MESH':
+ try:
+ me = ob.to_mesh(scene, True, 'PREVIEW')
+ except:
+ me = None
+
+ if me:
+ meshes_to_clear.append(me)
+ mats = me.materials
+ origData = False
+ else:
+ # Mesh Type!
+ if mesh_apply_modifiers:
+ me = ob.to_mesh(scene, True, 'PREVIEW')
+
+ # print ob, me, me.getVertGroupNames()
+ meshes_to_clear.append(me)
+ origData = False
+ mats = me.materials
+ else:
+ me = ob.data
+ mats = me.materials
+
+# # Support object colors
+# tmp_colbits = ob.colbits
+# if tmp_colbits:
+# tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too.
+# for i in xrange(16):
+# if tmp_colbits & (1<<i):
+# mats[i] = tmp_ob_mats[i]
+# del tmp_ob_mats
+# del tmp_colbits
+
+ if me:
+# # This WILL modify meshes in blender if mesh_apply_modifiers is disabled.
+# # so strictly this is bad. but only in rare cases would it have negative results
+# # say with dupliverts the objects would rotate a bit differently
+# if EXP_MESH_HQ_NORMALS:
+# BPyMesh.meshCalcNormals(me) # high quality normals nice for realtime engines.
+
+ texture_mapping_local = {}
+ material_mapping_local = {}
+ if me.uv_textures:
+ for uvlayer in me.uv_textures:
+ for f, uf in zip(me.faces, uvlayer.data):
+ tex = uf.image
+ textures[tex] = texture_mapping_local[tex] = None
+
+ try:
+ mat = mats[f.material_index]
+ except:
+ mat = None
+
+ materials[mat, tex] = material_mapping_local[mat, tex] = None # should use sets, wait for blender 2.5
+
+ else:
+ for mat in mats:
+ # 2.44 use mat.lib too for uniqueness
+ materials[mat, None] = material_mapping_local[mat, None] = None
+ else:
+ materials[None, None] = None
+
+ if 'ARMATURE' in object_types:
+ armob = ob.find_armature()
+ blenParentBoneName = None
+
+ # parent bone - special case
+ if (not armob) and ob.parent and ob.parent.type == 'ARMATURE' and \
+ ob.parent_type == 'BONE':
+ armob = ob.parent
+ blenParentBoneName = ob.parent_bone
+
+ if armob and armob not in ob_arms:
+ ob_arms.append(armob)
+
+ # Warning for scaled, mesh objects with armatures
+ if abs(ob.scale[0] - 1.0) > 0.05 or abs(ob.scale[1] - 1.0) > 0.05 or abs(ob.scale[1] - 1.0) > 0.05:
+ operator.report('WARNING', "Object '%s' has a scale of (%.3f, %.3f, %.3f), Armature deformation will not work as expected!, Apply Scale to fix." % ((ob.name,) + tuple(ob.scale)))
+
+ else:
+ blenParentBoneName = armob = None
+
+ my_mesh = my_object_generic(ob, mtx)
+ my_mesh.blenData = me
+ my_mesh.origData = origData
+ my_mesh.blenMaterials = list(material_mapping_local.keys())
+ my_mesh.blenMaterialList = mats
+ my_mesh.blenTextures = list(texture_mapping_local.keys())
+
+ # sort the name so we get predictable output, some items may be NULL
+ my_mesh.blenMaterials.sort(key=lambda m: (getattr(m[0], "name", ""), getattr(m[1], "name", "")))
+ my_mesh.blenTextures.sort(key=lambda m: getattr(m, "name", ""))
+
+ # if only 1 null texture then empty the list
+ if len(my_mesh.blenTextures) == 1 and my_mesh.blenTextures[0] is None:
+ my_mesh.blenTextures = []
+
+ my_mesh.fbxArm = armob # replace with my_object_generic armature instance later
+ my_mesh.fbxBoneParent = blenParentBoneName # replace with my_bone instance later
+
+ ob_meshes.append(my_mesh)
+
+ # not forgetting to free dupli_list
+ if ob_base.dupli_list:
+ ob_base.dupli_list_clear()
+
+ if 'ARMATURE' in object_types:
+ # now we have the meshes, restore the rest arm position
+ for i, arm in enumerate(bpy.data.armatures):
+ arm.pose_position = ob_arms_orig_rest[i]
+
+ if ob_arms_orig_rest:
+ for ob_base in bpy.data.objects:
+ if ob_base.type == 'ARMATURE':
+ ob_base.update_tag()
+ # This causes the makeDisplayList command to effect the mesh
+ scene.frame_set(scene.frame_current)
+
+ del tmp_ob_type, context_objects
+
+ # now we have collected all armatures, add bones
+ for i, ob in enumerate(ob_arms):
+
+ ob_arms[i] = my_arm = my_object_generic(ob)
+
+ my_arm.fbxBones = []
+ my_arm.blenData = ob.data
+ if ob.animation_data:
+ my_arm.blenAction = ob.animation_data.action
+ else:
+ my_arm.blenAction = None
+# my_arm.blenAction = ob.action
+ my_arm.blenActionList = []
+
+ # fbxName, blenderObject, my_bones, blenderActions
+ #ob_arms[i] = fbxArmObName, ob, arm_my_bones, (ob.action, [])
+
+ for bone in my_arm.blenData.bones:
+ my_bone = my_bone_class(bone, my_arm)
+ my_arm.fbxBones.append(my_bone)
+ ob_bones.append(my_bone)
+
+ # add the meshes to the bones and replace the meshes armature with own armature class
+ #for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
+ for my_mesh in ob_meshes:
+ # Replace
+ # ...this could be sped up with dictionary mapping but its unlikely for
+ # it ever to be a bottleneck - (would need 100+ meshes using armatures)
+ if my_mesh.fbxArm:
+ for my_arm in ob_arms:
+ if my_arm.blenObject == my_mesh.fbxArm:
+ my_mesh.fbxArm = my_arm
+ break
+
+ for my_bone in ob_bones:
+
+ # The mesh uses this bones armature!
+ if my_bone.fbxArm == my_mesh.fbxArm:
+ if my_bone.blenBone.use_deform:
+ my_bone.blenMeshes[my_mesh.fbxName] = me
+
+ # parent bone: replace bone names with our class instances
+ # my_mesh.fbxBoneParent is None or a blender bone name initialy, replacing if the names match.
+ if my_mesh.fbxBoneParent == my_bone.blenName:
+ my_mesh.fbxBoneParent = my_bone
+
+ bone_deformer_count = 0 # count how many bones deform a mesh
+ my_bone_blenParent = None
+ for my_bone in ob_bones:
+ my_bone_blenParent = my_bone.blenBone.parent
+ if my_bone_blenParent:
+ for my_bone_parent in ob_bones:
+ # Note 2.45rc2 you can compare bones normally
+ if my_bone_blenParent.name == my_bone_parent.blenName and my_bone.fbxArm == my_bone_parent.fbxArm:
+ my_bone.parent = my_bone_parent
+ break
+
+ # Not used at the moment
+ # my_bone.calcRestMatrixLocal()
+ bone_deformer_count += len(my_bone.blenMeshes)
+
+ del my_bone_blenParent
+
+ # Build blenObject -> fbxObject mapping
+ # this is needed for groups as well as fbxParenting
+ bpy.data.objects.tag(False)
+
+ # using a list of object names for tagging (Arystan)
+
+ tmp_obmapping = {}
+ for ob_generic in ob_all_typegroups:
+ for ob_base in ob_generic:
+ ob_base.blenObject.tag = True
+ tmp_obmapping[ob_base.blenObject] = ob_base
+
+ # Build Groups from objects we export
+ for blenGroup in bpy.data.groups:
+ fbxGroupName = None
+ for ob in blenGroup.objects:
+ if ob.tag:
+ if fbxGroupName is None:
+ fbxGroupName = sane_groupname(blenGroup)
+ groups.append((fbxGroupName, blenGroup))
+
+ tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName) # also adds to the objects fbxGroupNames
+
+ groups.sort() # not really needed
+
+ # Assign parents using this mapping
+ for ob_generic in ob_all_typegroups:
+ for my_ob in ob_generic:
+ parent = my_ob.blenObject.parent
+ if parent and parent.tag: # does it exist and is it in the mapping
+ my_ob.fbxParent = tmp_obmapping[parent]
+
+ del tmp_obmapping
+ # Finished finding groups we use
+
+ materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.keys()]
+ textures = [(sane_texname(tex), tex) for tex in textures.keys() if tex]
+ materials.sort(key=lambda m: m[0]) # sort by name
+ textures.sort(key=lambda m: m[0])
+
+ camera_count = 8
+ file.write('''
+
+; Object definitions
+;------------------------------------------------------------------
+
+Definitions: {
+ Version: 100
+ Count: %i''' % (\
+ 1 + camera_count + \
+ len(ob_meshes) + \
+ len(ob_lights) + \
+ len(ob_cameras) + \
+ len(ob_arms) + \
+ len(ob_null) + \
+ len(ob_bones) + \
+ bone_deformer_count + \
+ len(materials) + \
+ (len(textures) * 2))) # add 1 for global settings
+
+ del bone_deformer_count
+
+ file.write('''
+ ObjectType: "Model" {
+ Count: %i
+ }''' % (\
+ camera_count + \
+ len(ob_meshes) + \
+ len(ob_lights) + \
+ len(ob_cameras) + \
+ len(ob_arms) + \
+ len(ob_null) + \
+ len(ob_bones)))
+
+ file.write('''
+ ObjectType: "Geometry" {
+ Count: %i
+ }''' % len(ob_meshes))
+
+ if materials:
+ file.write('''
+ ObjectType: "Material" {
+ Count: %i
+ }''' % len(materials))
+
+ if textures:
+ file.write('''
+ ObjectType: "Texture" {
+ Count: %i
+ }''' % len(textures)) # add 1 for an empty tex
+ file.write('''
+ ObjectType: "Video" {
+ Count: %i
+ }''' % len(textures)) # add 1 for an empty tex
+
+ tmp = 0
+ # Add deformer nodes
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ tmp += 1
+
+ # Add subdeformers
+ for my_bone in ob_bones:
+ tmp += len(my_bone.blenMeshes)
+
+ if tmp:
+ file.write('''
+ ObjectType: "Deformer" {
+ Count: %i
+ }''' % tmp)
+ del tmp
+
+ # we could avoid writing this possibly but for now just write it
+
+ file.write('''
+ ObjectType: "Pose" {
+ Count: 1
+ }''')
+
+ if groups:
+ file.write('''
+ ObjectType: "GroupSelection" {
+ Count: %i
+ }''' % len(groups))
+
+ file.write('''
+ ObjectType: "GlobalSettings" {
+ Count: 1
+ }
+}''')
+
+ file.write('''
+
+; Object properties
+;------------------------------------------------------------------
+
+Objects: {''')
+
+ # To comply with other FBX FILES
+ write_camera_switch()
+
+ for my_null in ob_null:
+ write_null(my_null)
+
+ for my_arm in ob_arms:
+ write_null(my_arm)
+
+ for my_cam in ob_cameras:
+ write_camera(my_cam)
+
+ for my_light in ob_lights:
+ write_light(my_light)
+
+ for my_mesh in ob_meshes:
+ write_mesh(my_mesh)
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ write_bone(my_bone)
+
+ write_camera_default()
+
+ for matname, (mat, tex) in materials:
+ write_material(matname, mat) # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard)
+
+ # each texture uses a video, odd
+ for texname, tex in textures:
+ write_video(texname, tex)
+ i = 0
+ for texname, tex in textures:
+ write_texture(texname, tex, i)
+ i += 1
+
+ for groupname, group in groups:
+ write_group(groupname)
+
+ # NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do.
+
+ # Write armature modifiers
+ # TODO - add another MODEL? - because of this skin definition.
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ write_deformer_skin(my_mesh.fbxName)
+
+ # Get normalized weights for temorary use
+ if my_mesh.fbxBoneParent:
+ weights = None
+ else:
+ weights = meshNormalizedWeights(my_mesh.blenObject, my_mesh.blenData)
+
+ #for bonename, bone, obname, bone_mesh, armob in ob_bones:
+ for my_bone in ob_bones:
+ if me in iter(my_bone.blenMeshes.values()):
+ write_sub_deformer_skin(my_mesh, my_bone, weights)
+
+ # Write pose's really weird, only needed when an armature and mesh are used together
+ # each by themselves dont need pose data. for now only pose meshes and bones
+
+ file.write('''
+ Pose: "Pose::BIND_POSES", "BindPose" {
+ Type: "BindPose"
+ Version: 100
+ Properties60: {
+ }
+ NbPoseNodes: ''')
+ file.write(str(len(pose_items)))
+
+ for fbxName, matrix in pose_items:
+ file.write('\n\t\tPoseNode: {')
+ file.write('\n\t\t\tNode: "Model::%s"' % fbxName)
+ file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix if matrix else Matrix()))
+ file.write('\n\t\t}')
+
+ file.write('\n\t}')
+
+ # Finish Writing Objects
+ # Write global settings
+ file.write('''
+ GlobalSettings: {
+ Version: 1000
+ Properties60: {
+ Property: "UpAxis", "int", "",1
+ Property: "UpAxisSign", "int", "",1
+ Property: "FrontAxis", "int", "",2
+ Property: "FrontAxisSign", "int", "",1
+ Property: "CoordAxis", "int", "",0
+ Property: "CoordAxisSign", "int", "",1
+ Property: "UnitScaleFactor", "double", "",1
+ }
+ }
+''')
+ file.write('}')
+
+ file.write('''
+
+; Object relations
+;------------------------------------------------------------------
+
+Relations: {''')
+
+ for my_null in ob_null:
+ file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName)
+
+ for my_arm in ob_arms:
+ file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName)
+
+ for my_mesh in ob_meshes:
+ file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName)
+
+ # TODO - limbs can have the same name for multiple armatures, should prefix.
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName)
+
+ for my_cam in ob_cameras:
+ file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName)
+
+ for my_light in ob_lights:
+ file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName)
+
+ file.write('''
+ Model: "Model::Producer Perspective", "Camera" {
+ }
+ Model: "Model::Producer Top", "Camera" {
+ }
+ Model: "Model::Producer Bottom", "Camera" {
+ }
+ Model: "Model::Producer Front", "Camera" {
+ }
+ Model: "Model::Producer Back", "Camera" {
+ }
+ Model: "Model::Producer Right", "Camera" {
+ }
+ Model: "Model::Producer Left", "Camera" {
+ }
+ Model: "Model::Camera Switcher", "CameraSwitcher" {
+ }''')
+
+ for matname, (mat, tex) in materials:
+ file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname)
+
+ if textures:
+ for texname, tex in textures:
+ file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname)
+ for texname, tex in textures:
+ file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname)
+
+ # deformers - modifiers
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName)
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ for fbxMeshObName in my_bone.blenMeshes: # .keys() - fbxMeshObName
+ # is this bone effecting a mesh?
+ file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName))
+
+ # This should be at the end
+ # file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}')
+
+ for groupname, group in groups:
+ file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname)
+
+ file.write('\n}')
+ file.write('''
+
+; Object connections
+;------------------------------------------------------------------
+
+Connections: {''')
+
+ # NOTE - The FBX SDK dosnt care about the order but some importers DO!
+ # for instance, defining the material->mesh connection
+ # before the mesh->parent crashes cinema4d
+
+ for ob_generic in ob_all_typegroups: # all blender 'Object's we support
+ for my_ob in ob_generic:
+ # for deformed meshes, don't have any parents or they can get twice transformed.
+ if my_ob.fbxParent and (not my_ob.fbxArm):
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName))
+ else:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::Scene"' % my_ob.fbxName)
+
+ if materials:
+ for my_mesh in ob_meshes:
+ # Connect all materials to all objects, not good form but ok for now.
+ for mat, tex in my_mesh.blenMaterials:
+ mat_name = mat.name if mat else None
+ tex_name = tex.name if tex else None
+
+ file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName))
+
+ if textures:
+ for my_mesh in ob_meshes:
+ if my_mesh.blenTextures:
+ # file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName)
+ for tex in my_mesh.blenTextures:
+ if tex:
+ file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName))
+
+ for texname, tex in textures:
+ file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname))
+
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName))
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ for fbxMeshObName in my_bone.blenMeshes: # .keys()
+ file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName))
+
+ # limbs -> deformers
+ # for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ for fbxMeshObName in my_bone.blenMeshes: # .keys()
+ file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName))
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ # Always parent to armature now
+ if my_bone.parent:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName))
+ else:
+ # the armature object is written as an empty and all root level bones connect to it
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName))
+
+ # groups
+ if groups:
+ for ob_generic in ob_all_typegroups:
+ for ob_base in ob_generic:
+ for fbxGroupName in ob_base.fbxGroupNames:
+ file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName))
+
+ for my_arm in ob_arms:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::Scene"' % my_arm.fbxName)
+
+ file.write('\n}')
+
+ # Needed for scene footer as well as animation
+ render = scene.render
+
+ # from the FBX sdk
+ #define KTIME_ONE_SECOND KTime (K_LONGLONG(46186158000))
+ def fbx_time(t):
+ # 0.5 + val is the same as rounding.
+ return int(0.5 + ((t / fps) * 46186158000))
+
+ fps = float(render.fps)
+ start = scene.frame_start
+ end = scene.frame_end
+ if end < start:
+ start, end = end, st
+
+ # comment the following line, otherwise we dont get the pose
+ # if start==end: ANIM_ENABLE = False
+
+ # animations for these object types
+ ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms
+
+ if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]:
+
+ frame_orig = scene.frame_current
+
+ if ANIM_OPTIMIZE:
+ ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION
+
+ # default action, when no actions are avaioable
+ tmp_actions = []
+ blenActionDefault = None
+ action_lastcompat = None
+
+ # instead of tagging
+ tagged_actions = []
+
+ if ANIM_ACTION_ALL:
+# bpy.data.actions.tag = False
+ tmp_actions = bpy.data.actions[:]
+
+ # find which actions are compatible with the armatures
+ # blenActions is not yet initialized so do it now.
+ tmp_act_count = 0
+ for my_arm in ob_arms:
+
+ # get the default name
+ if not blenActionDefault:
+ blenActionDefault = my_arm.blenAction
+
+ arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones])
+
+ for action in tmp_actions:
+
+ action_chan_names = arm_bone_names.intersection(set([g.name for g in action.groups]))
+# action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) )
+
+ if action_chan_names: # at least one channel matches.
+ my_arm.blenActionList.append(action)
+ tagged_actions.append(action.name)
+# action.tag = True
+ tmp_act_count += 1
+
+ # incase there is no actions applied to armatures
+ action_lastcompat = action
+
+ if tmp_act_count:
+ # unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature.
+ if not blenActionDefault:
+ blenActionDefault = action_lastcompat
+
+ del action_lastcompat
+
+ tmp_actions.insert(0, None) # None is the default action
+
+ file.write('''
+;Takes and animation section
+;----------------------------------------------------
+
+Takes: {''')
+
+ if blenActionDefault:
+ file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault))
+ else:
+ file.write('\n\tCurrent: "Default Take"')
+
+ for blenAction in tmp_actions:
+ # we have tagged all actious that are used be selected armatures
+ if blenAction:
+ if blenAction.name in tagged_actions:
+# if blenAction.tag:
+ print('\taction: "%s" exporting...' % blenAction.name)
+ else:
+ print('\taction: "%s" has no armature using it, skipping' % blenAction.name)
+ continue
+
+ if blenAction is None:
+ # Warning, this only accounts for tmp_actions being [None]
+ file.write('\n\tTake: "Default Take" {')
+ act_start = start
+ act_end = end
+ else:
+ # use existing name
+ if blenAction == blenActionDefault: # have we already got the name
+ file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name])
+ else:
+ file.write('\n\tTake: "%s" {' % sane_takename(blenAction))
+
+ act_start, act_end = blenAction.frame_range
+ act_start = int(act_start)
+ act_end = int(act_end)
+
+ # Set the action active
+ for my_arm in ob_arms:
+ if my_arm.blenObject.animation_data and blenAction in my_arm.blenActionList:
+ my_arm.blenObject.animation_data.action = blenAction
+ # print('\t\tSetting Action!', blenAction)
+ # scene.update(1)
+
+ file.write('\n\t\tFileName: "Default_Take.tak"') # ??? - not sure why this is needed
+ file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start - 1), fbx_time(act_end - 1))) # ??? - not sure why this is needed
+ file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start - 1), fbx_time(act_end - 1))) # ??? - not sure why this is needed
+
+ file.write('''
+
+ ;Models animation
+ ;----------------------------------------------------''')
+
+ # set pose data for all bones
+ # do this here incase the action changes
+ '''
+ for my_bone in ob_bones:
+ my_bone.flushAnimData()
+ '''
+ i = act_start
+ while i <= act_end:
+ scene.frame_set(i)
+ for ob_generic in ob_anim_lists:
+ for my_ob in ob_generic:
+ #Blender.Window.RedrawAll()
+ if ob_generic == ob_meshes and my_ob.fbxArm:
+ # We cant animate armature meshes!
+ my_ob.setPoseFrame(i, fake=True)
+ else:
+ my_ob.setPoseFrame(i)
+
+ i += 1
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms):
+
+ for my_ob in ob_generic:
+
+ if ob_generic == ob_meshes and my_ob.fbxArm:
+ # do nothing,
+ pass
+ else:
+
+ file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName) # ??? - not sure why this is needed
+ file.write('\n\t\t\tVersion: 1.1')
+ file.write('\n\t\t\tChannel: "Transform" {')
+
+ context_bone_anim_mats = [(my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in range(act_start, act_end + 1)]
+
+ # ----------------
+ # ----------------
+ for TX_LAYER, TX_CHAN in enumerate('TRS'): # transform, rotate, scale
+
+ if TX_CHAN == 'T':
+ context_bone_anim_vecs = [mtx[0].to_translation() for mtx in context_bone_anim_mats]
+ elif TX_CHAN == 'S':
+ context_bone_anim_vecs = [mtx[0].to_scale() for mtx in context_bone_anim_mats]
+ elif TX_CHAN == 'R':
+ # Was....
+ # elif TX_CHAN=='R': context_bone_anim_vecs = [mtx[1].to_euler() for mtx in context_bone_anim_mats]
+ #
+ # ...but we need to use the previous euler for compatible conversion.
+ context_bone_anim_vecs = []
+ prev_eul = None
+ for mtx in context_bone_anim_mats:
+ if prev_eul:
+ prev_eul = mtx[1].to_euler('XYZ', prev_eul)
+ else:
+ prev_eul = mtx[1].to_euler()
+ context_bone_anim_vecs.append(tuple_rad_to_deg(prev_eul))
+
+ file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN) # translation
+
+ for i in range(3):
+ # Loop on each axis of the bone
+ file.write('\n\t\t\t\t\tChannel: "%s" {' % ('XYZ'[i])) # translation
+ file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i])
+ file.write('\n\t\t\t\t\t\tKeyVer: 4005')
+
+ if not ANIM_OPTIMIZE:
+ # Just write all frames, simple but in-eficient
+ file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start))
+ file.write('\n\t\t\t\t\t\tKey: ')
+ frame = act_start
+ while frame <= act_end:
+ if frame != act_start:
+ file.write(',')
+
+ # Curve types are 'C,n' for constant, 'L' for linear
+ # C,n is for bezier? - linear is best for now so we can do simple keyframe removal
+ file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame - 1), context_bone_anim_vecs[frame - act_start][i]))
+ frame += 1
+ else:
+ # remove unneeded keys, j is the frame, needed when some frames are removed.
+ context_bone_anim_keys = [(vec[i], j) for j, vec in enumerate(context_bone_anim_vecs)]
+
+ # last frame to fisrt frame, missing 1 frame on either side.
+ # removeing in a backwards loop is faster
+ #for j in xrange( (act_end-act_start)-1, 0, -1 ):
+ # j = (act_end-act_start)-1
+ j = len(context_bone_anim_keys) - 2
+ while j > 0 and len(context_bone_anim_keys) > 2:
+ # print j, len(context_bone_anim_keys)
+ # Is this key the same as the ones next to it?
+
+ # co-linear horizontal...
+ if abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j - 1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and \
+ abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j + 1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
+
+ del context_bone_anim_keys[j]
+
+ else:
+ frame_range = float(context_bone_anim_keys[j + 1][1] - context_bone_anim_keys[j - 1][1])
+ frame_range_fac1 = (context_bone_anim_keys[j + 1][1] - context_bone_anim_keys[j][1]) / frame_range
+ frame_range_fac2 = 1.0 - frame_range_fac1
+
+ if abs(((context_bone_anim_keys[j - 1][0] * frame_range_fac1 + context_bone_anim_keys[j + 1][0] * frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
+ del context_bone_anim_keys[j]
+ else:
+ j -= 1
+
+ # keep the index below the list length
+ if j > len(context_bone_anim_keys) - 2:
+ j = len(context_bone_anim_keys) - 2
+
+ if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]:
+
+ # This axis has no moton, its okay to skip KeyCount and Keys in this case
+ # pass
+
+ # better write one, otherwise we loose poses with no animation
+ file.write('\n\t\t\t\t\t\tKeyCount: 1')
+ file.write('\n\t\t\t\t\t\tKey: ')
+ file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(start), context_bone_anim_keys[0][0]))
+ else:
+ # We only need to write these if there is at least one
+ file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys))
+ file.write('\n\t\t\t\t\t\tKey: ')
+ for val, frame in context_bone_anim_keys:
+ if frame != context_bone_anim_keys[0][1]: # not the first
+ file.write(',')
+ # frame is already one less then blenders frame
+ file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val))
+
+ if i == 0:
+ file.write('\n\t\t\t\t\t\tColor: 1,0,0')
+ elif i == 1:
+ file.write('\n\t\t\t\t\t\tColor: 0,1,0')
+ elif i == 2:
+ file.write('\n\t\t\t\t\t\tColor: 0,0,1')
+
+ file.write('\n\t\t\t\t\t}')
+ file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER + 1))
+ file.write('\n\t\t\t\t}')
+
+ # ---------------
+
+ file.write('\n\t\t\t}')
+ file.write('\n\t\t}')
+
+ # end the take
+ file.write('\n\t}')
+
+ # end action loop. set original actions
+ # do this after every loop incase actions effect eachother.
+ for my_arm in ob_arms:
+ if my_arm.blenObject.animation_data:
+ my_arm.blenObject.animation_data.action = my_arm.blenAction
+
+ file.write('\n}')
+
+ scene.frame_set(frame_orig)
+
+ else:
+ # no animation
+ file.write('\n;Takes and animation section')
+ file.write('\n;----------------------------------------------------')
+ file.write('\n')
+ file.write('\nTakes: {')
+ file.write('\n\tCurrent: ""')
+ file.write('\n}')
+
+ # write meshes animation
+ #for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
+
+ # Clear mesh data Only when writing with modifiers applied
+ for me in meshes_to_clear:
+ bpy.data.meshes.remove(me)
+
+ # --------------------------- Footer
+ if world:
+ m = world.mist_settings
+ has_mist = m.use_mist
+ mist_intense = m.intensity
+ mist_start = m.start
+ mist_end = m.depth
+ mist_height = m.height
+ world_hor = world.horizon_color
+ else:
+ has_mist = mist_intense = mist_start = mist_end = mist_height = 0
+ world_hor = 0, 0, 0
+
+ file.write('\n;Version 5 settings')
+ file.write('\n;------------------------------------------------------------------')
+ file.write('\n')
+ file.write('\nVersion5: {')
+ file.write('\n\tAmbientRenderSettings: {')
+ file.write('\n\t\tVersion: 101')
+ file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb))
+ file.write('\n\t}')
+ file.write('\n\tFogOptions: {')
+ file.write('\n\t\tFlogEnable: %i' % has_mist)
+ file.write('\n\t\tFogMode: 0')
+ file.write('\n\t\tFogDensity: %.3f' % mist_intense)
+ file.write('\n\t\tFogStart: %.3f' % mist_start)
+ file.write('\n\t\tFogEnd: %.3f' % mist_end)
+ file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor))
+ file.write('\n\t}')
+ file.write('\n\tSettings: {')
+ file.write('\n\t\tFrameRate: "%i"' % int(fps))
+ file.write('\n\t\tTimeFormat: 1')
+ file.write('\n\t\tSnapOnFrames: 0')
+ file.write('\n\t\tReferenceTimeIndex: -1')
+ file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start - 1))
+ file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end - 1))
+ file.write('\n\t}')
+ file.write('\n\tRendererSetting: {')
+ file.write('\n\t\tDefaultCamera: "Producer Perspective"')
+ file.write('\n\t\tDefaultViewingMode: 0')
+ file.write('\n\t}')
+ file.write('\n}')
+ file.write('\n')
+
+ # XXX, shouldnt be global!
+ sane_name_mapping_ob.clear()
+ sane_name_mapping_mat.clear()
+ sane_name_mapping_tex.clear()
+ sane_name_mapping_take.clear()
+ sane_name_mapping_group.clear()
+
+ ob_arms[:] = []
+ ob_bones[:] = []
+ ob_cameras[:] = []
+ ob_lights[:] = []
+ ob_meshes[:] = []
+ ob_null[:] = []
+
+ file.close()
+
+ # copy all collected files.
+ bpy_extras.io_utils.path_reference_copy(copy_set)
+
+ print('export finished in %.4f sec.' % (time.clock() - start_time))
+ return {'FINISHED'}
+
+
+def save(operator, context,
+ filepath="",
+ use_selection=True,
+ batch_mode='OFF',
+ BATCH_OWN_DIR=False,
+ **kwargs
+ ):
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ if batch_mode == 'OFF':
+ kwargs_mod = kwargs.copy()
+ if use_selection:
+ kwargs_mod["context_objects"] = context.selected_objects
+ else:
+ kwargs_mod["context_objects"] = context.scene.objects
+
+ return save_single(operator, context.scene, filepath, **kwargs_mod)
+ else:
+ fbxpath = filepath
+
+ prefix = os.path.basename(fbxpath)
+ if prefix:
+ fbxpath = os.path.dirname(fbxpath)
+
+ if not fbxpath.endswith(os.sep):
+ fbxpath += os.sep
+
+ if batch_mode == 'GROUP':
+ data_seq = bpy.data.groups
+ else:
+ data_seq = bpy.data.scenes
+
+ # call this function within a loop with BATCH_ENABLE == False
+ # no scene switching done at the moment.
+ # orig_sce = context.scene
+
+ new_fbxpath = fbxpath # own dir option modifies, we need to keep an original
+ for data in data_seq: # scene or group
+ newname = prefix + bpy.path.clean_name(data.name)
+
+ if BATCH_OWN_DIR:
+ new_fbxpath = fbxpath + newname + os.sep
+ # path may already exist
+ # TODO - might exist but be a file. unlikely but should probably account for it.
+
+ if not os.path.exists(new_fbxpath):
+ os.makedirs(new_fbxpath)
+
+ filepath = new_fbxpath + newname + '.fbx'
+
+ print('\nBatch exporting %s as...\n\t%r' % (data, filepath))
+
+ # XXX don't know what to do with this, probably do the same? (Arystan)
+ if batch_mode == 'GROUP': # group
+ # group, so objects update properly, add a dummy scene.
+ scene = bpy.data.scenes.new(name="FBX_Temp")
+ scene.layers = [True] * 20
+ # bpy.data.scenes.active = scene # XXX, cant switch
+ for ob_base in data.objects:
+ scene.objects.link(ob_base)
+
+ scene.update()
+ else:
+ scene = data
+
+ # TODO - BUMMER! Armatures not in the group wont animate the mesh
+
+ # else: # scene
+ # data_seq.active = data
+
+ # Call self with modified args
+ # Dont pass batch options since we already usedt them
+ kwargs_batch = kwargs.copy()
+
+ kwargs_batch["context_objects"] = data.objects
+
+ save_single(operator, scene, filepath, **kwargs_batch)
+
+ if batch_mode == 'GROUP':
+ # remove temp group scene
+ bpy.data.scenes.remove(scene)
+
+ # no active scene changing!
+ # bpy.data.scenes.active = orig_sce
+
+ return {'FINISHED'} # so the script wont run after we have batch exported.
+
+
+
+
+# NOTES (all line numbers correspond to original export_fbx.py (under release/scripts)
+# - Draw.PupMenu alternative in 2.5?, temporarily replaced PupMenu with print
+# - get rid of bpy.path.clean_name somehow
+# + fixed: isinstance(inst, bpy.types.*) doesn't work on RNA objects: line 565
+# + get rid of BPyObject_getObjectArmature, move it in RNA?
+# - BATCH_ENABLE and BATCH_GROUP options: line 327
+# - implement all BPyMesh_* used here with RNA
+# - getDerivedObjects is not fully replicated with .dupli* funcs
+# - talk to Campbell, this code won't work? lines 1867-1875
+# - don't know what those colbits are, do we need them? they're said to be deprecated in DNA_object_types.h: 1886-1893
+# - no hq normals: 1900-1901
+
+# TODO
+
+# - bpy.data.remove_scene: line 366
+# - bpy.sys.time move to bpy.sys.util?
+# - new scene creation, activation: lines 327-342, 368
+# - uses bpy.path.abspath, *.relpath - replace at least relpath
diff --git a/io_scene_m3/__init__.py b/io_scene_m3/__init__.py
new file mode 100644
index 00000000..78c2498d
--- /dev/null
+++ b/io_scene_m3/__init__.py
@@ -0,0 +1,97 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ 'name': 'Import Blizzard M3 format (.m3)',
+ 'author': 'Cory Perry',
+ 'version': (0, 2, 1),
+ "blender": (2, 5, 7),
+ "api": 36079,
+ 'location': 'File > Import > Blizzard M3 (.m3)',
+ 'description': 'Imports the Blizzard M3 format (.m3)',
+ 'warning': '',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/'\
+ 'Import-Export/M3_Import',
+ 'tracker_url': 'http://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=24017',
+ 'category': 'Import-Export'}
+
+
+# To support reload properly, try to access a package var, if it's there,
+# reload everything
+if "bpy" in locals():
+ import imp
+ if 'import_m3' in locals():
+ imp.reload(import_m3)
+# if 'export_m3' in locals():
+# imp.reload(export_m3)
+
+import time
+import datetime
+import bpy
+from bpy.props import StringProperty, BoolProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper
+
+
+class ImportM3(bpy.types.Operator, ImportHelper):
+ '''Import from M3 file format (.m3)'''
+ bl_idname = 'import_scene.blizzard_m3'
+ bl_label = 'Import M3'
+
+ filename_ext = '.m3'
+ filter_glob = StringProperty(default='*.m3', options={'HIDDEN'})
+
+ use_image_search = BoolProperty(name='Image Search',
+ description='Search subdirectories for any associated'\
+ 'images', default=True)
+
+ def execute(self, context):
+ from . import import_m3
+ print('Importing file', self.filepath)
+ t = time.mktime(datetime.datetime.now().timetuple())
+ with open(self.filepath, 'rb') as file:
+ import_m3.read(file, context, self)
+ t = time.mktime(datetime.datetime.now().timetuple()) - t
+ print('Finished importing in', t, 'seconds')
+ return {'FINISHED'}
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportM3.bl_idname, text='Blizzard M3 (.m3)')
+
+
+#def menu_func_export(self, context):
+# self.layout.operator(ExportM3.bl_idname, text='Blizzard M3 (.m3)')
+
+
+def register():
+ bpy.utils.register_module(__name__)
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+# bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+# bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_m3/import_m3.py b/io_scene_m3/import_m3.py
new file mode 100644
index 00000000..402128da
--- /dev/null
+++ b/io_scene_m3/import_m3.py
@@ -0,0 +1,367 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+__author__ = "Cory Perry (muraj)"
+__version__ = "0.2.1"
+__bpydoc__ = """\
+This script imports m3 format files to Blender.
+
+The m3 file format, used by Blizzard in several games, is based around the
+mdx and m2 file format. Thanks to the efforts of Volcore, madyavic and the
+people working on libm3, the file format has been reversed engineered
+enough to make this script possible (Thanks guys!).
+
+This script currently imports the following:<br>
+ - Geometry data (vertices, faces, submeshes [in vertex groups])
+ - Model Textures (currently only the first material is supported)
+
+ Blender supports the DDS file format and needs the image in the same
+ directory. This script will notify you of any missing textures.
+
+TODO:<br>
+ - Documentation & clean up
+ - Full MD34 and MD33 testing (possibly batch importing for a testing suite)
+ - Import *ALL* materials and bind accordingly (currently supports diffuse,
+ specular, and normal.
+ - Adjust vertices to bind pose (import IREF matrices)
+ - Import Bone data
+ - Import Animation data
+
+Usage:<br>
+ Execute this script from the "File->Import" menu and choose a m3 file to
+open.
+
+Notes:<br>
+ Known issue with Thor.m3, seems to add a lot of unecessary verts.
+ Generates the standard verts and faces lists.
+"""
+
+import bpy
+import mathutils
+import struct
+import os.path
+from bpy.props import *
+from bpy_extras.image_utils import load_image
+
+##################
+## Struct setup ##
+##################
+verFlag = False # Version flag (MD34 == True, MD33 == False)
+
+
+class ref:
+ fmt = 'LL'
+
+ def __init__(self, file):
+ global verFlag
+ if verFlag:
+ self.fmt += 'L' # Extra unknown...
+ _s = file.read(struct.calcsize(self.fmt))
+ self.entries, self.refid = struct.unpack(self.fmt, _s)[:2]
+
+ @classmethod
+ def size(cls):
+ global verFlag
+ return struct.calcsize(cls.fmt + ('L' if verFlag else ''))
+
+
+class animref:
+ fmt = 'HHL'
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ self.flags, self.animflags, self.animid = struct.unpack(self.fmt, _s)
+
+
+class Tag:
+ fmt = '4sLLL'
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ self.name, self.ofs, self.nTag, self.version = \
+ struct.unpack(self.fmt, _s)
+
+
+class matrix:
+ fmt = 'f' * 16
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ self.mat = struct.unpack(self.fmt, _s)
+
+
+class vect:
+ fmt = 'fff'
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ self.v = struct.unpack(self.fmt, _s)
+
+
+class vertex:
+ fmt = "4B4b4B%dH4B"
+ ver = {0x020000: 2, 0x060000: 4, 0x0A0000: 6, 0x120000: 8}
+
+ def __init__(self, file, flag):
+ self.pos = vect(file)
+ _fmt = self.fmt % (self.ver[flag])
+ _s = file.read(struct.calcsize(_fmt))
+ _s = struct.unpack(_fmt, _s)
+ self.boneWeight = _s[0:4]
+ self.boneIndex = _s[4:8]
+ self.normal = _s[8:12]
+ self.uv = _s[12:14]
+ self.tan = _s[-4:] # Skipping the middle ukn value if needed
+ self.boneWeight = [b / 255.0 for b in self.boneWeight]
+ self.normal = [x * 2.0 / 255.0 - 1.0 for x in self.normal]
+ self.tan = [x * 2.0 / 255.0 - 1.0 for x in self.tan]
+ self.uv = [x / 2046.0 for x in self.uv]
+ self.uv[1] = 1.0 - self.uv[1]
+
+ @classmethod
+ def size(cls, flag=0x020000):
+ return struct.calcsize('fff' + cls.fmt % (cls.ver[flag]))
+
+
+class quat:
+ fmt = 'ffff'
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ self.v = struct.unpack(self.fmt, _s)
+ #Quats are stored x,y,z,w - this fixes it
+ self.v = [self.v[-1], self.v[0], self.v[1], self.v[2]]
+
+
+class bone:
+
+ def __init__(self, file):
+ file.read(4) # ukn1
+ self.name = ref(file)
+ self.flag, self.parent, _ = struct.unpack('LhH', file.read(8))
+ self.posid = animref(file)
+ self.pos = vect(file)
+ file.read(4 * 4) # ukn
+ self.rotid = animref(file)
+ self.rot = quat(file)
+ file.read(4 * 5) # ukn
+ self.scaleid = animref(file)
+ self.scale = vect(file)
+ vect(file) # ukn
+ file.read(4 * 6) # ukn
+
+
+class div:
+
+ def __init__(self, file):
+ self.faces = ref(file)
+ self.regn = ref(file)
+ self.bat = ref(file)
+ self.msec = ref(file)
+ file.read(4) # ukn
+
+
+class regn:
+ fmt = 'L2H2L6H'
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ _ukn1, self.ofsVert, self.nVerts, self.ofsIndex, self.nIndex, \
+ self.boneCount, self.indBone, self.nBone = \
+ struct.unpack(self.fmt, _s)[:8]
+
+
+class mat:
+
+ def __init__(self, file):
+ self.name = ref(file)
+ file.read(4 * 10) # ukn
+ self.layers = [ref(file) for _ in range(13)]
+ file.read(4 * 15) # ukn
+
+
+class layr:
+
+ def __init__(self, file):
+ file.read(4)
+ self.name = ref(file)
+ #Rest not implemented.
+
+
+class hdr:
+ fmt = '4sLL'
+
+ def __init__(self, file):
+ _s = file.read(struct.calcsize(self.fmt))
+ self.magic, self.ofsTag, self.nTag = struct.unpack(self.fmt, _s)
+ self.MODLref = ref(file)
+
+
+class MODL:
+
+ def __init__(self, file, flag=20):
+ global verFlag
+ self.name = ref(file)
+ self.ver = struct.unpack('L', file.read(4))[0]
+ self.seqHdr = ref(file)
+ self.seqData = ref(file)
+ self.seqLookup = ref(file)
+ file.read(0x1C if verFlag else 0x14) # ukn1
+ self.bones = ref(file)
+ file.read(4) # ukn2
+ self.flags = struct.unpack('L', file.read(4))[0]
+ self.vert = ref(file)
+ self.views = ref(file)
+ self.boneLookup = ref(file)
+ self.extents = [vect(file), vect(file)]
+ self.radius = struct.unpack('f', file.read(4))[0]
+ if verFlag:
+ file.read(4) # ukn MD34 addition
+ if not verFlag:
+ if flag == 20:
+ file.read(0x2C)
+ else:
+ file.read(0x34)
+ else:
+ if flag == 20:
+ file.read(0x30)
+ else:
+ file.read(0x3C)
+ self.attach = ref(file)
+ file.read(5 * ref.size())
+ self.materialsLookup = ref(file)
+ self.materials = ref(file)
+ file.read(ref.size())
+ if not verFlag:
+ file.read(0x90)
+ else:
+ file.read(0xD8)
+ self.iref = ref(file)
+
+
+def read(file, context, op):
+ """Imports as an m3 file"""
+ global verFlag
+ h = hdr(file)
+ if h.magic[::-1] == b'MD34':
+ print('m3_import: !WARNING! MD34 files not full tested...')
+ verFlag = True
+ elif h.magic[::-1] == b'MD33':
+ verFlag = False
+ else:
+ raise Exception('m3_import: !ERROR! Not a valid or supported m3 file')
+ file.seek(h.ofsTag) # Jump to the Tag table
+ print('m3_import: !INFO! Reading TagTable...')
+ tagTable = [Tag(file) for _ in range(h.nTag)]
+ file.seek(tagTable[h.MODLref.refid].ofs)
+ m = MODL(file, tagTable[h.MODLref.refid].version)
+ if not m.flags & 0x20000:
+ raise Exception('m3_import: !ERROR! Model doesn\'t have any vertices')
+ print('m3_import: !INFO! Reading Vertices...')
+ vert_flags = m.flags & 0x1E0000 # Mask out the vertex version
+ file.seek(tagTable[m.views.refid].ofs)
+ d = div(file)
+ file.seek(tagTable[m.vert.refid].ofs)
+ verts = [vertex(file, vert_flags) \
+ for _ in range(tagTable[m.vert.refid].nTag // vertex.size(vert_flags))]
+ file.seek(tagTable[d.faces.refid].ofs)
+ print('m3_import: !INFO! Reading Faces...')
+ rawfaceTable = struct.unpack('H' * (tagTable[d.faces.refid].nTag), \
+ file.read(tagTable[d.faces.refid].nTag * 2))
+ faceTable = []
+ for i in range(1, len(rawfaceTable) + 1):
+ faceTable.append(rawfaceTable[i - 1])
+ if i % 3 == 0: # Add a zero for the fourth index to the face.
+ faceTable.append(0)
+ print("m3_import: !INFO! Read %d vertices and %d faces" \
+ % (len(verts), len(faceTable)))
+ print('m3_import: !INFO! Adding Geometry...')
+ mesh = bpy.data.meshes.new(os.path.basename(op.properties.filepath))
+ mobj = bpy.data.objects.new(os.path.basename(op.properties.filepath), mesh)
+ context.scene.objects.link(mobj)
+ v, n = [], []
+ for vert in verts: # "Flatten" the vertex array...
+ v.extend(vert.pos.v)
+ n.extend(vert.normal)
+ mesh.vertices.add(len(verts))
+ mesh.faces.add(len(rawfaceTable) // 3)
+ mesh.vertices.foreach_set('co', v)
+ mesh.vertices.foreach_set('normal', n)
+ mesh.faces.foreach_set('vertices_raw', faceTable)
+ uvtex = mesh.uv_textures.new()
+ for i, face in enumerate(mesh.faces):
+ uf = uvtex.data[i]
+ uf.uv1 = verts[faceTable[i * 4 + 0]].uv
+ uf.uv2 = verts[faceTable[i * 4 + 1]].uv
+ uf.uv3 = verts[faceTable[i * 4 + 2]].uv
+ uf.uv4 = (0, 0)
+ print('m3_import: !INFO! Importing materials...')
+ material = bpy.data.materials.new('Mat00')
+ mesh.materials.append(material)
+ file.seek(tagTable[m.materials.refid].ofs)
+ mm = mat(file)
+ tex_map = [('use_map_color_diffuse', 0), ('use_map_specular', 2),\
+ ('use_map_normal', 9)]
+ for map, i in tex_map:
+ file.seek(tagTable[mm.layers[i].refid].ofs)
+ nref = layr(file).name
+ file.seek(tagTable[nref.refid].ofs)
+ name = bytes.decode(file.read(nref.entries - 1))
+ name = os.path.basename(str(name))
+ tex = bpy.data.textures.new(name=name, type='IMAGE')
+ tex.image = load_image(name, os.path.dirname(op.filepath))
+ if tex.image != None:
+ print("m3_import: !INFO! Loaded %s" % (name))
+ else:
+ print("m3_import: !WARNING! Cannot find texture \"%s\"" % (name))
+ mtex = material.texture_slots.add()
+ mtex.texture = tex
+ mtex.texture_coords = 'UV'
+ if i == 9:
+ mtex.normal_factor = 0.1 # Just a guess, seems to look nice
+ mtex.use_map_color_diffuse = (i == 0)
+ setattr(mtex, map, True)
+
+
+class M3Importer(bpy.types.Operator):
+ '''Import from M3 file format (.m3)'''
+ bl_idname = "import_mesh.blizzard_m3"
+ bl_label = 'Import M3'
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ filepath = StringProperty(
+ name="File Path",\
+ description="Filepath used for importing the M3 file",\
+ maxlen=1024,\
+ default="")
+
+ def execute(self, context):
+ t = time.mktime(datetime.datetime.now().timetuple())
+ with open(self.properties.filepath, 'rb') as file:
+ print('Importing file', self.properties.filepath)
+ read(file, context, self)
+ t = time.mktime(datetime.datetime.now().timetuple()) - t
+ print('Finished importing in', t, 'seconds')
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ wm.add_fileselect(self)
+ return {'RUNNING_MODAL'}
diff --git a/io_scene_map/__init__.py b/io_scene_map/__init__.py
new file mode 100644
index 00000000..8db02a7f
--- /dev/null
+++ b/io_scene_map/__init__.py
@@ -0,0 +1,93 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Quake MAP format",
+ "author": "Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Export",
+ "description": "Export MAP brushes, nurbs surfaces, lamps and empties as map nodes",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Quake_MAP",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "export_map" in locals():
+ imp.reload(export_map)
+
+
+import bpy
+from bpy.props import StringProperty, BoolProperty, FloatProperty, EnumProperty
+from bpy_extras.io_utils import ExportHelper, axis_conversion
+
+
+class ExportMAP(bpy.types.Operator, ExportHelper):
+ '''Export selection to a quake map'''
+ bl_idname = "export_scene.quake_map"
+ bl_label = "Export MAP"
+ bl_options = {'PRESET'}
+
+ filename_ext = ".map"
+ filter_glob = StringProperty(default="*.map", options={'HIDDEN'})
+
+ def execute(self, context):
+ import math
+ from mathutils import Matrix
+ if not self.filepath:
+ raise Exception("filepath not set")
+
+ '''
+ global_matrix = Matrix()
+ global_matrix[0][0] = global_matrix[1][1] = global_matrix[2][2] = self.global_scale
+ global_matrix = global_matrix * axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "global_scale", "check_existing", "filter_glob"))
+ keywords["global_matrix"] = global_matrix
+ '''
+
+ keywords = self.as_keywords(ignore=("check_existing", "filter_glob"))
+
+ from . import export_map
+ return export_map.save(self, context, **keywords)
+
+
+def menu_func(self, context):
+ self.layout.operator(ExportMAP.bl_idname, text="Quake MAP (.map)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_export.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_export.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_map/export_map.py b/io_scene_map/export_map.py
new file mode 100644
index 00000000..aeb8ed97
--- /dev/null
+++ b/io_scene_map/export_map.py
@@ -0,0 +1,472 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+import os
+
+# TODO, make options
+PREF_SCALE = 100
+PREF_FACE_THICK = 0.1
+PREF_GRID_SNAP = False
+# Quake 1/2?
+# PREF_DEF_TEX_OPTS = Draw.Create(' 0 0 0 1 1\n') # not user settable yet
+# Quake 3+?
+PREF_DEF_TEX_OPTS = ' 0 0 0 1 1 0 0 0\n' # not user settable yet
+
+PREF_NULL_TEX = 'NULL' # not user settable yet
+PREF_INVIS_TEX = 'common/caulk'
+
+
+def face_uv_get(face):
+ """ Workaround 2.5x change.
+ """
+ me = face.id_data
+ uv_faces = me.uv_textures.active
+ if uv_faces:
+ return uv_faces.data[face.index]
+ else:
+ return None
+
+
+def write_cube2brush(file, faces):
+ '''
+ Takes 6 faces and writes a brush,
+ these faces can be from 1 mesh, 1 cube within a mesh of larger cubes
+ Faces could even come from different meshes or be contrived.
+ '''
+ import os
+ # comment only
+ # file.write('// brush "%s", "%s"\n' % (ob.name, ob.data.name))
+ file.write('// brush from cube\n{\n')
+
+ if PREF_GRID_SNAP:
+ format_vec = '( %d %d %d ) '
+ else:
+ format_vec = '( %.8f %.8f %.8f ) '
+
+ for f in faces:
+ # from 4 verts this gets them in reversed order and only 3 of them
+ # 0,1,2,3 -> 2,1,0
+ me = f.id_data # XXX25
+ for v in f.vertices[:][2::-1]:
+ file.write(format_vec % me.vertices[v].co[:])
+
+ uf = face_uv_get(f)
+
+ if uf and uf.hide:
+ file.write(PREF_INVIS_TEX)
+ else:
+ image = uf.image if uf else None
+
+ if image:
+ file.write(os.path.splitext(os.path.basename(image.filepath))[0])
+ else:
+ file.write(PREF_NULL_TEX)
+
+ # Texture stuff ignored for now
+ file.write(PREF_DEF_TEX_OPTS)
+ file.write('}\n')
+
+
+def round_vec(v):
+ if PREF_GRID_SNAP:
+ return v.to_tuple(0)
+ else:
+ return v[:]
+
+
+def write_face2brush(file, face):
+ '''
+ takes a face and writes it as a brush
+ each face is a cube/brush
+ '''
+
+ if PREF_GRID_SNAP:
+ format_vec = '( %d %d %d ) '
+ else:
+ format_vec = '( %.8f %.8f %.8f ) '
+
+ image_text = PREF_NULL_TEX
+
+ uf = face_uv_get(face)
+
+ if uf and uf.hide:
+ image_text = PREF_INVIS_TEX
+ else:
+ image = uf.image if uf else None
+
+ if image:
+ image_text = os.path.splitext(os.path.basename(image.filepath))[0]
+
+ # reuse face vertices
+ _v = face.id_data.vertices # XXX25
+ f_vertices = [_v[vi] for vi in face.vertices]
+ del _v # XXX25
+
+ # original verts as tuples for writing
+ orig_vco = [v.co[:] for v in f_vertices]
+
+ # new verts that give the face a thickness
+ dist = PREF_SCALE * PREF_FACE_THICK
+ new_vco = [round_vec(v.co - (v.normal * dist)) for v in f_vertices]
+ #new_vco = [round_vec(v.co - (face.no * dist)) for v in face]
+
+ file.write('// brush from face\n{\n')
+ # front
+ for co in orig_vco[2::-1]:
+ file.write(format_vec % co)
+ file.write(image_text)
+ # Texture stuff ignored for now
+ file.write(PREF_DEF_TEX_OPTS)
+
+ for co in new_vco[:3]:
+ file.write(format_vec % co)
+ if uf and uf.use_twoside:
+ file.write(image_text)
+ else:
+ file.write(PREF_INVIS_TEX)
+
+ # Texture stuff ignored for now
+ file.write(PREF_DEF_TEX_OPTS)
+
+ # sides.
+ if len(orig_vco) == 3: # Tri, it seemms tri brushes are supported.
+ index_pairs = ((0, 1), (1, 2), (2, 0))
+ else:
+ index_pairs = ((0, 1), (1, 2), (2, 3), (3, 0))
+
+ for i1, i2 in index_pairs:
+ for co in orig_vco[i1], orig_vco[i2], new_vco[i2]:
+ file.write(format_vec % co)
+ file.write(PREF_INVIS_TEX)
+ file.write(PREF_DEF_TEX_OPTS)
+
+ file.write('}\n')
+
+
+def is_cube_facegroup(faces):
+ '''
+ Returens a bool, true if the faces make up a cube
+ '''
+ # cube must have 6 faces
+ if len(faces) != 6:
+ # print('1')
+ return False
+
+ # Check for quads and that there are 6 unique verts
+ verts = {}
+ for f in faces:
+ f_v = f.vertices[:]
+ if len(f_v) != 4:
+ return False
+
+ for v in f_v:
+ verts[v] = 0
+
+ if len(verts) != 8:
+ return False
+
+ # Now check that each vert has 3 face users
+ for f in faces:
+ f_v = f.vertices[:]
+ for v in f_v:
+ verts[v] += 1
+
+ for v in verts.values():
+ if v != 3: # vert has 3 users?
+ return False
+
+ # Could we check for 12 unique edges??, probably not needed.
+ return True
+
+
+def is_tricyl_facegroup(faces):
+ '''
+ is the face group a tri cylinder
+ Returens a bool, true if the faces make an extruded tri solid
+ '''
+
+ # cube must have 5 faces
+ if len(faces) != 5:
+ # print('1')
+ return False
+
+ # Check for quads and that there are 6 unique verts
+ verts = {}
+ tottri = 0
+ for f in faces:
+ if len(f) == 3:
+ tottri += 1
+
+ for v in f:
+ verts[v.index] = 0
+
+ if len(verts) != 6 or tottri != 2:
+ return False
+
+ # Now check that each vert has 3 face users
+ for f in faces:
+ for v in f:
+ verts[v.index] += 1
+
+ for v in verts.values():
+ if v != 3: # vert has 3 users?
+ return False
+
+ # Could we check for 12 unique edges??, probably not needed.
+ return True
+
+
+def write_node_map(file, ob):
+ '''
+ Writes the properties of an object (empty in this case)
+ as a MAP node as long as it has the property name - classname
+ returns True/False based on weather a node was written
+ '''
+ props = [(p.name, p.value) for p in ob.game.properties]
+
+ IS_MAP_NODE = False
+ for name, value in props:
+ if name == "classname":
+ IS_MAP_NODE = True
+ break
+
+ if not IS_MAP_NODE:
+ return False
+
+ # Write a node
+ file.write('{\n')
+ for name_value in props:
+ file.write('"%s" "%s"\n' % name_value)
+ if PREF_GRID_SNAP:
+ file.write('"origin" "%d %d %d"\n' %
+ tuple([round(axis * PREF_SCALE)
+ for axis in ob.matrix_world.to_translation()]))
+ else:
+ file.write('"origin" "%.6f %.6f %.6f"\n' %
+ tuple([axis * PREF_SCALE
+ for axis in ob.matrix_world.to_translation()]))
+
+ file.write('}\n')
+ return True
+
+
+def export_map(context, filepath):
+ """
+ pup_block = [\
+ ('Scale:', PREF_SCALE, 1, 1000, 'Scale the blender scene by this value.'),\
+ ('Face Width:', PREF_FACE_THICK, 0.01, 10, 'Thickness of faces exported as brushes.'),\
+ ('Grid Snap', PREF_GRID_SNAP, 'snaps floating point values to whole numbers.'),\
+ 'Null Texture',\
+ ('', PREF_NULL_TEX, 1, 128, 'Export textureless faces with this texture'),\
+ 'Unseen Texture',\
+ ('', PREF_INVIS_TEX, 1, 128, 'Export invisible faces with this texture'),\
+ ]
+
+ if not Draw.PupBlock('map export', pup_block):
+ return
+ """
+ import time
+ from mathutils import Vector, Matrix
+ from bpy_extras import mesh_utils
+
+ t = time.time()
+ print("Map Exporter 0.0")
+ file = open(filepath, 'w')
+
+ scene = context.scene
+ objects = context.selected_objects
+
+ obs_mesh = []
+ obs_lamp = []
+ obs_surf = []
+ obs_empty = []
+
+ SCALE_MAT = Matrix()
+ SCALE_MAT[0][0] = SCALE_MAT[1][1] = SCALE_MAT[2][2] = PREF_SCALE
+
+ TOTBRUSH = TOTLAMP = TOTNODE = 0
+
+ for ob in objects:
+ type = ob.type
+ if type == 'MESH':
+ obs_mesh.append(ob)
+ elif type == 'SURFACE':
+ obs_surf.append(ob)
+ elif type == 'LAMP':
+ obs_lamp.append(ob)
+ elif type == 'EMPTY':
+ obs_empty.append(ob)
+
+ if obs_mesh or obs_surf:
+ # brushes and surf's must be under worldspan
+ file.write('\n// entity 0\n')
+ file.write('{\n')
+ file.write('"classname" "worldspawn"\n')
+
+ print("\twriting cubes from meshes")
+ for ob in obs_mesh:
+ dummy_mesh = ob.to_mesh(scene, True, 'PREVIEW')
+
+ #print len(mesh_split2connected(dummy_mesh))
+
+ # Is the object 1 cube? - object-is-a-brush
+ # 1 to tx the normals also
+ dummy_mesh.transform(ob.matrix_world * SCALE_MAT)
+
+ if PREF_GRID_SNAP:
+ for v in dummy_mesh.verts:
+ v.co[:] = v.co.to_tuple(0)
+
+ # High quality normals
+ #XXX25: BPyMesh.meshCalcNormals(dummy_mesh)
+
+ # Split mesh into connected regions
+ for face_group in mesh_utils.mesh_linked_faces(dummy_mesh):
+ if is_cube_facegroup(face_group):
+ write_cube2brush(file, face_group)
+ TOTBRUSH += 1
+ elif is_tricyl_facegroup(face_group):
+ write_cube2brush(file, face_group)
+ TOTBRUSH += 1
+ else:
+ for f in face_group:
+ write_face2brush(file, f)
+ TOTBRUSH += 1
+
+ #print 'warning, not exporting "%s" it is not a cube' % ob.name
+ bpy.data.meshes.remove(dummy_mesh)
+
+ valid_dims = 3, 5, 7, 9, 11, 13, 15
+ for ob in obs_surf:
+ '''
+ Surf, patches
+ '''
+ data = ob.data
+ surf_name = data.name
+ mat = ob.matrix_world * SCALE_MAT
+
+ # This is what a valid patch looks like
+
+ """
+// brush 0
+{
+patchDef2
+{
+NULL
+( 3 3 0 0 0 )
+(
+( ( -64 -64 0 0 0 ) ( -64 0 0 0 -2 ) ( -64 64 0 0 -4 ) )
+( ( 0 -64 0 2 0 ) ( 0 0 0 2 -2 ) ( 0 64 0 2 -4 ) )
+( ( 64 -64 0 4 0 ) ( 64 0 0 4 -2 ) ( 80 88 0 4 -4 ) )
+)
+}
+}
+ """
+ for i, nurb in enumerate(data.splines):
+ u = nurb.point_count_u
+ v = nurb.point_count_v
+ if u in valid_dims and v in valid_dims:
+
+ file.write('// brush %d surf_name\n' % i)
+ file.write('{\n')
+ file.write('patchDef2\n')
+ file.write('{\n')
+ file.write('NULL\n')
+ file.write('( %d %d 0 0 0 )\n' % (u, v))
+ file.write('(\n')
+
+ u_iter = 0
+ for p in nurb.points:
+
+ if u_iter == 0:
+ file.write('(')
+
+ u_iter += 1
+
+ # add nmapping 0 0 ?
+ if PREF_GRID_SNAP:
+ file.write(" ( %d %d %d 0 0 )" %
+ round_vec(p.co.xyz * mat))
+ else:
+ file.write(' ( %.6f %.6f %.6f 0 0 )' %
+ (p.co.xyz * mat)[:])
+
+ # Move to next line
+ if u_iter == u:
+ file.write(' )\n')
+ u_iter = 0
+
+ file.write(')\n')
+ file.write('}\n')
+ file.write('}\n')
+ # Debugging
+ # for p in nurb: print 'patch', p
+
+ else:
+ print("Warning: not exporting patch",
+ surf_name, u, v, 'Unsupported')
+
+ if obs_mesh or obs_surf:
+ file.write('}\n') # end worldspan
+
+ print("\twriting lamps")
+ for ob in obs_lamp:
+ print("\t\t%s" % ob.name)
+ lamp = ob.data
+ file.write('{\n')
+ file.write('"classname" "light"\n')
+ file.write('"light" "%.6f"\n' % (lamp.distance * PREF_SCALE))
+ if PREF_GRID_SNAP:
+ file.write('"origin" "%d %d %d"\n' %
+ tuple([round(axis * PREF_SCALE)
+ for axis in ob.matrix_world.to_translation()]))
+ else:
+ file.write('"origin" "%.6f %.6f %.6f"\n' %
+ tuple([axis * PREF_SCALE
+ for axis in ob.matrix_world.to_translation()]))
+
+ file.write('"_color" "%.6f %.6f %.6f"\n' % tuple(lamp.color))
+ file.write('"style" "0"\n')
+ file.write('}\n')
+ TOTLAMP += 1
+
+ print("\twriting empty objects as nodes")
+ for ob in obs_empty:
+ if write_node_map(file, ob):
+ print("\t\t%s" % ob.name)
+ TOTNODE += 1
+ else:
+ print("\t\tignoring %s" % ob.name)
+
+ file.close()
+
+ print("Exported Map in %.4fsec" % (time.time() - t))
+ print("Brushes: %d Nodes: %d Lamps %d\n" % (TOTBRUSH, TOTNODE, TOTLAMP))
+
+
+def save(operator,
+ context,
+ filepath=None,
+ ):
+
+ export_map(context, filepath)
+
+ return {'FINISHED'}
diff --git a/io_scene_obj/__init__.py b/io_scene_obj/__init__.py
new file mode 100644
index 00000000..660a38fc
--- /dev/null
+++ b/io_scene_obj/__init__.py
@@ -0,0 +1,259 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Wavefront OBJ format",
+ "author": "Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export OBJ, Import OBJ mesh, UV's, materials and textures",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Wavefront_OBJ",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "import_obj" in locals():
+ imp.reload(import_obj)
+ if "export_obj" in locals():
+ imp.reload(export_obj)
+
+
+import bpy
+from bpy.props import BoolProperty, FloatProperty, StringProperty, EnumProperty
+from bpy_extras.io_utils import ExportHelper, ImportHelper, path_reference_mode, axis_conversion
+
+
+class ImportOBJ(bpy.types.Operator, ImportHelper):
+ '''Load a Wavefront OBJ File'''
+ bl_idname = "import_scene.obj"
+ bl_label = "Import OBJ"
+ bl_options = {'PRESET'}
+
+ filename_ext = ".obj"
+ filter_glob = StringProperty(default="*.obj;*.mtl", options={'HIDDEN'})
+
+ use_ngons = BoolProperty(name="NGons", description="Import faces with more then 4 verts as fgons", default=True)
+ use_edges = BoolProperty(name="Lines", description="Import lines and faces with 2 verts as edge", default=True)
+ use_smooth_groups = BoolProperty(name="Smooth Groups", description="Surround smooth groups by sharp edges", default=True)
+
+ use_split_objects = BoolProperty(name="Object", description="Import OBJ Objects into Blender Objects", default=True)
+ use_split_groups = BoolProperty(name="Group", description="Import OBJ Groups into Blender Objects", default=True)
+
+ use_groups_as_vgroups = BoolProperty(name="Poly Groups", description="Import OBJ groups as vertex groups.", default=False)
+
+ use_image_search = BoolProperty(name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default=True)
+
+ split_mode = EnumProperty(
+ name="Split",
+ items=(('ON', "Split", "Split geometry, omits unused verts"),
+ ('OFF', "Keep Vert Order", "Maintain vertex order from file"),
+ ),
+ )
+
+ global_clamp_size = FloatProperty(name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0)
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='-Z',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Y',
+ )
+
+ # fake prop, only disables split.
+ # keep_vertex_order = BoolProperty(name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True)
+
+ def execute(self, context):
+ # print("Selected: " + context.active_object.name)
+ from . import import_obj
+ from mathutils import Matrix
+
+ if self.split_mode == 'OFF':
+ self.use_split_objects = False
+ self.use_split_groups = False
+ else:
+ self.use_groups_as_vgroups = False
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob", "split_mode"))
+
+ global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return import_obj.load(self, context, **keywords)
+
+ def draw(self, context):
+ layout = self.layout
+
+ row = layout.row(align=True)
+ row.prop(self, "use_ngons")
+ row.prop(self, "use_edges")
+
+ layout.prop(self, "use_smooth_groups")
+
+ box = layout.box()
+ row = box.row()
+ row.prop(self, "split_mode", expand=True)
+
+ row = box.row()
+ if self.split_mode == 'ON':
+ row.label(text="Split by:")
+ row.prop(self, "use_split_objects")
+ row.prop(self, "use_split_groups")
+ else:
+ row.prop(self, "use_groups_as_vgroups")
+
+ row = layout.split(percentage=0.67)
+ row.prop(self, "global_clamp_size")
+ layout.prop(self, "axis_forward")
+ layout.prop(self, "axis_up")
+
+ layout.prop(self, "use_image_search")
+
+
+class ExportOBJ(bpy.types.Operator, ExportHelper):
+ '''Save a Wavefront OBJ File'''
+
+ bl_idname = "export_scene.obj"
+ bl_label = 'Export OBJ'
+ bl_options = {'PRESET'}
+
+ filename_ext = ".obj"
+ filter_glob = StringProperty(default="*.obj;*.mtl", options={'HIDDEN'})
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ # context group
+ use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
+ use_all_scenes = BoolProperty(name="All Scenes", description="", default=False)
+ use_animation = BoolProperty(name="Animation", description="", default=False)
+
+ # object group
+ use_apply_modifiers = BoolProperty(name="Apply Modifiers", description="Apply modifiers (preview resolution)", default=True)
+
+ # extra data group
+ use_edges = BoolProperty(name="Edges", description="", default=True)
+ use_normals = BoolProperty(name="Normals", description="", default=False)
+ use_hq_normals = BoolProperty(name="High Quality Normals", description="", default=True)
+ use_uvs = BoolProperty(name="UVs", description="", default=True)
+ use_materials = BoolProperty(name="Materials", description="", default=True)
+ # copy_images = BoolProperty(name="Copy Images", description="", default=False)
+ use_triangles = BoolProperty(name="Triangulate", description="", default=False)
+ use_vertex_groups = BoolProperty(name="Polygroups", description="", default=False)
+ use_nurbs = BoolProperty(name="Nurbs", description="", default=False)
+
+ # grouping group
+ use_blen_objects = BoolProperty(name="Objects as OBJ Objects", description="", default=True)
+ group_by_object = BoolProperty(name="Objects as OBJ Groups ", description="", default=False)
+ group_by_material = BoolProperty(name="Material Groups", description="", default=False)
+ keep_vertex_order = BoolProperty(name="Keep Vertex Order", description="", default=False)
+
+ global_scale = FloatProperty(name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='-Z',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Y',
+ )
+
+ path_mode = path_reference_mode
+
+ def execute(self, context):
+ from . import export_obj
+
+ from mathutils import Matrix
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "global_scale", "check_existing", "filter_glob"))
+
+ global_matrix = Matrix()
+ global_matrix[0][0] = global_matrix[1][1] = global_matrix[2][2] = self.global_scale
+ global_matrix = global_matrix * axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+ return export_obj.save(self, context, **keywords)
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportOBJ.bl_idname, text="Wavefront (.obj)")
+
+
+def menu_func_export(self, context):
+ self.layout.operator(ExportOBJ.bl_idname, text="Wavefront (.obj)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+
+# CONVERSION ISSUES
+# - matrix problem
+# - duplis - only tested dupliverts
+# - all scenes export
+# + normals calculation
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_obj/export_obj.py b/io_scene_obj/export_obj.py
new file mode 100644
index 00000000..594a4167
--- /dev/null
+++ b/io_scene_obj/export_obj.py
@@ -0,0 +1,791 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import os
+import time
+import shutil
+
+import bpy
+import mathutils
+import bpy_extras.io_utils
+
+
+def name_compat(name):
+ if name is None:
+ return 'None'
+ else:
+ return name.replace(' ', '_')
+
+
+def write_mtl(scene, filepath, path_mode, copy_set, mtl_dict):
+
+ world = scene.world
+ if world:
+ worldAmb = world.ambient_color[:]
+ else:
+ worldAmb = 0.0, 0.0, 0.0
+
+ source_dir = bpy.data.filepath
+ dest_dir = os.path.dirname(filepath)
+
+ file = open(filepath, "w", encoding="utf8", newline="\n")
+ file.write('# Blender MTL File: %r\n' % os.path.basename(bpy.data.filepath))
+ file.write('# Material Count: %i\n' % len(mtl_dict))
+
+ mtl_dict_values = list(mtl_dict.values())
+ mtl_dict_values.sort(key=lambda m: m[0])
+
+ # Write material/image combinations we have used.
+ # Using mtl_dict.values() directly gives un-predictable order.
+ for mtl_mat_name, mat, face_img in mtl_dict_values:
+
+ # Get the Blender data for the material and the image.
+ # Having an image named None will make a bug, dont do it :)
+
+ file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
+
+ if mat:
+ # convert from blenders spec to 0 - 1000 range.
+ if mat.specular_shader == 'WARDISO':
+ tspec = (0.4 - mat.specular_slope) / 0.0004
+ else:
+ tspec = (mat.specular_hardness - 1) * 1.9607843137254901
+ file.write('Ns %.6f\n' % tspec)
+ del tspec
+
+ file.write('Ka %.6f %.6f %.6f\n' % tuple(c * mat.ambient for c in worldAmb)) # Ambient, uses mirror colour,
+ file.write('Kd %.6f %.6f %.6f\n' % tuple(c * mat.diffuse_intensity for c in mat.diffuse_color)) # Diffuse
+ file.write('Ks %.6f %.6f %.6f\n' % tuple(c * mat.specular_intensity for c in mat.specular_color)) # Specular
+ if hasattr(mat, "ior"):
+ file.write('Ni %.6f\n' % mat.ior) # Refraction index
+ else:
+ file.write('Ni %.6f\n' % 1.0)
+ file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
+
+ # 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
+ if mat.use_shadeless:
+ file.write('illum 0\n') # ignore lighting
+ elif mat.specular_intensity == 0:
+ file.write('illum 1\n') # no specular.
+ else:
+ file.write('illum 2\n') # light normaly
+
+ else:
+ #write a dummy material here?
+ file.write('Ns 0\n')
+ file.write('Ka %.6f %.6f %.6f\n' % tuple(c for c in worldAmb)) # Ambient, uses mirror colour,
+ file.write('Kd 0.8 0.8 0.8\n')
+ file.write('Ks 0.8 0.8 0.8\n')
+ file.write('d 1\n') # No alpha
+ file.write('illum 2\n') # light normaly
+
+ # Write images!
+ if face_img: # We have an image on the face!
+ # write relative image path
+ rel = bpy_extras.io_utils.path_reference(face_img.filepath, source_dir, dest_dir, path_mode, "", copy_set)
+ file.write('map_Kd %s\n' % rel) # Diffuse mapping image
+
+ if mat: # No face image. if we havea material search for MTex image.
+ image_map = {}
+ # backwards so topmost are highest priority
+ for mtex in reversed(mat.texture_slots):
+ if mtex and mtex.texture.type == 'IMAGE':
+ image = mtex.texture.image
+ if image:
+ # texface overrides others
+ if mtex.use_map_color_diffuse and face_img is None:
+ image_map["map_Kd"] = image
+ if mtex.use_map_ambient:
+ image_map["map_Ka"] = image
+ if mtex.use_map_specular:
+ image_map["map_Ks"] = image
+ if mtex.use_map_alpha:
+ image_map["map_d"] = image
+ if mtex.use_map_translucency:
+ image_map["map_Tr"] = image
+ if mtex.use_map_normal:
+ image_map["map_Bump"] = image
+ if mtex.use_map_hardness:
+ image_map["map_Ns"] = image
+
+ for key, image in image_map.items():
+ filepath = bpy_extras.io_utils.path_reference(image.filepath, source_dir, dest_dir, path_mode, "", copy_set)
+ file.write('%s %s\n' % (key, repr(filepath)[1:-1]))
+
+ file.write('\n\n')
+
+ file.close()
+
+
+def test_nurbs_compat(ob):
+ if ob.type != 'CURVE':
+ return False
+
+ for nu in ob.data.splines:
+ if nu.point_count_v == 1 and nu.type != 'BEZIER': # not a surface and not bezier
+ return True
+
+ return False
+
+
+def write_nurb(file, ob, ob_mat):
+ tot_verts = 0
+ cu = ob.data
+
+ # use negative indices
+ for nu in cu.splines:
+ if nu.type == 'POLY':
+ DEG_ORDER_U = 1
+ else:
+ DEG_ORDER_U = nu.order_u - 1 # odd but tested to be correct
+
+ if nu.type == 'BEZIER':
+ print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
+ continue
+
+ if nu.point_count_v > 1:
+ print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
+ continue
+
+ if len(nu.points) <= DEG_ORDER_U:
+ print("\tWarning, order_u is lower then vert count, skipping:", ob.name)
+ continue
+
+ pt_num = 0
+ do_closed = nu.use_cyclic_u
+ do_endpoints = (do_closed == 0) and nu.use_endpoint_u
+
+ for pt in nu.points:
+ pt = pt.co.to_3d() * ob_mat
+ file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
+ pt_num += 1
+ tot_verts += pt_num
+
+ file.write('g %s\n' % (name_compat(ob.name))) # name_compat(ob.getData(1)) could use the data name too
+ file.write('cstype bspline\n') # not ideal, hard coded
+ file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
+
+ curve_ls = [-(i + 1) for i in range(pt_num)]
+
+ # 'curv' keyword
+ if do_closed:
+ if DEG_ORDER_U == 1:
+ pt_num += 1
+ curve_ls.append(-1)
+ else:
+ pt_num += DEG_ORDER_U
+ curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
+
+ file.write('curv 0.0 1.0 %s\n' % (" ".join([str(i) for i in curve_ls]))) # Blender has no U and V values for the curve
+
+ # 'parm' keyword
+ tot_parm = (DEG_ORDER_U + 1) + pt_num
+ tot_parm_div = float(tot_parm - 1)
+ parm_ls = [(i / tot_parm_div) for i in range(tot_parm)]
+
+ if do_endpoints: # end points, force param
+ for i in range(DEG_ORDER_U + 1):
+ parm_ls[i] = 0.0
+ parm_ls[-(1 + i)] = 1.0
+
+ file.write("parm u %s\n" % " ".join(["%.6f" % i for i in parm_ls]))
+
+ file.write('end\n')
+
+ return tot_verts
+
+
+def write_file(filepath, objects, scene,
+ EXPORT_TRI=False,
+ EXPORT_EDGES=False,
+ EXPORT_NORMALS=False,
+ EXPORT_NORMALS_HQ=False,
+ EXPORT_UV=True,
+ EXPORT_MTL=True,
+ EXPORT_APPLY_MODIFIERS=True,
+ EXPORT_BLEN_OBS=True,
+ EXPORT_GROUP_BY_OB=False,
+ EXPORT_GROUP_BY_MAT=False,
+ EXPORT_KEEP_VERT_ORDER=False,
+ EXPORT_POLYGROUPS=False,
+ EXPORT_CURVE_AS_NURBS=True,
+ EXPORT_GLOBAL_MATRIX=None,
+ EXPORT_PATH_MODE='AUTO',
+ ):
+ '''
+ Basic write function. The context and options must be already set
+ This can be accessed externaly
+ eg.
+ write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
+ '''
+
+ if EXPORT_GLOBAL_MATRIX is None:
+ EXPORT_GLOBAL_MATRIX = mathutils.Matrix()
+
+ # XXX
+ import math
+
+ def veckey3d(v):
+ return round(v.x, 6), round(v.y, 6), round(v.z, 6)
+
+ def veckey2d(v):
+ return round(v[0], 6), round(v[1], 6)
+
+ def findVertexGroupName(face, vWeightMap):
+ """
+ Searches the vertexDict to see what groups is assigned to a given face.
+ We use a frequency system in order to sort out the name because a given vetex can
+ belong to two or more groups at the same time. To find the right name for the face
+ we list all the possible vertex group names with their frequency and then sort by
+ frequency in descend order. The top element is the one shared by the highest number
+ of vertices is the face's group
+ """
+ weightDict = {}
+ for vert_index in face.vertices:
+# for vert in face:
+ vWeights = vWeightMap[vert_index]
+# vWeights = vWeightMap[vert]
+ for vGroupName, weight in vWeights:
+ weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
+
+ if weightDict:
+ alist = [(weight, vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight
+ alist.sort()
+ return(alist[-1][1]) # highest value last
+ else:
+ return '(null)'
+
+ print('OBJ Export path: %r' % filepath)
+ temp_mesh_name = '~tmp-mesh'
+
+ time1 = time.clock()
+# time1 = sys.time()
+# scn = Scene.GetCurrent()
+
+ file = open(filepath, "w", encoding="utf8", newline="\n")
+
+ # Write Header
+ file.write('# Blender v%s OBJ File: %r\n' % (bpy.app.version_string, os.path.basename(bpy.data.filepath)))
+ file.write('# www.blender.org\n')
+
+ # Tell the obj file what material file to use.
+ if EXPORT_MTL:
+ mtlfilepath = os.path.splitext(filepath)[0] + ".mtl"
+ file.write('mtllib %s\n' % repr(os.path.basename(mtlfilepath))[1:-1]) # filepath can contain non utf8 chars, use repr
+
+ # Initialize totals, these are updated each object
+ totverts = totuvco = totno = 1
+
+ face_vert_index = 1
+
+ globalNormals = {}
+
+ # A Dict of Materials
+ # (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
+ mtl_dict = {}
+
+ copy_set = set()
+
+ # Get all meshes
+ for ob_main in objects:
+
+ # ignore dupli children
+ if ob_main.parent and ob_main.parent.dupli_type != 'NONE':
+ # XXX
+ print(ob_main.name, 'is a dupli child - ignoring')
+ continue
+
+ obs = []
+ if ob_main.dupli_type != 'NONE':
+ # XXX
+ print('creating dupli_list on', ob_main.name)
+ ob_main.dupli_list_create(scene)
+
+ obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
+
+ # XXX debug print
+ print(ob_main.name, 'has', len(obs), 'dupli children')
+ else:
+ obs = [(ob_main, ob_main.matrix_world)]
+
+ for ob, ob_mat in obs:
+
+ # Nurbs curve support
+ if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
+ ob_mat = EXPORT_GLOBAL_MATRIX * ob_mat
+ totverts += write_nurb(file, ob, ob_mat)
+ continue
+ # END NURBS
+
+ try:
+ me = ob.to_mesh(scene, EXPORT_APPLY_MODIFIERS, 'PREVIEW')
+ except RuntimeError:
+ me = None
+
+ if me is None:
+ continue
+
+ me.transform(EXPORT_GLOBAL_MATRIX * ob_mat)
+
+# # Will work for non meshes now! :)
+# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
+# if not me:
+# continue
+
+ if EXPORT_UV:
+ faceuv = len(me.uv_textures) > 0
+ if faceuv:
+ uv_layer = me.uv_textures.active.data[:]
+ else:
+ faceuv = False
+
+ me_verts = me.vertices[:]
+
+ # Make our own list so it can be sorted to reduce context switching
+ face_index_pairs = [(face, index) for index, face in enumerate(me.faces)]
+ # faces = [ f for f in me.faces ]
+
+ if EXPORT_EDGES:
+ edges = me.edges
+ else:
+ edges = []
+
+ if not (len(face_index_pairs) + len(edges) + len(me.vertices)): # Make sure there is somthing to write
+
+ # clean up
+ bpy.data.meshes.remove(me)
+
+ continue # dont bother with this mesh.
+
+ # XXX
+ # High Quality Normals
+ if EXPORT_NORMALS and face_index_pairs:
+ me.calc_normals()
+# if EXPORT_NORMALS_HQ:
+# BPyMesh.meshCalcNormals(me)
+# else:
+# # transforming normals is incorrect
+# # when the matrix is scaled,
+# # better to recalculate them
+# me.calcNormals()
+
+ materials = me.materials
+
+ materialNames = []
+ materialItems = [m for m in materials]
+ if materials:
+ for mat in materials:
+ if mat:
+ materialNames.append(mat.name)
+ else:
+ materialNames.append(None)
+ # Cant use LC because some materials are None.
+ # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
+
+ # Possible there null materials, will mess up indices
+ # but at least it will export, wait until Blender gets fixed.
+ materialNames.extend((16 - len(materialNames)) * [None])
+ materialItems.extend((16 - len(materialItems)) * [None])
+
+ # Sort by Material, then images
+ # so we dont over context switch in the obj file.
+ if EXPORT_KEEP_VERT_ORDER:
+ pass
+ elif faceuv:
+ face_index_pairs.sort(key=lambda a: (a[0].material_index, hash(uv_layer[a[1]].image), a[0].use_smooth))
+ elif len(materials) > 1:
+ face_index_pairs.sort(key=lambda a: (a[0].material_index, a[0].use_smooth))
+ else:
+ # no materials
+ face_index_pairs.sort(key=lambda a: a[0].use_smooth)
+# if EXPORT_KEEP_VERT_ORDER:
+# pass
+# elif faceuv:
+# try: faces.sort(key = lambda a: (a.mat, a.image, a.use_smooth))
+# except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.use_smooth), (b.mat, b.image, b.use_smooth)))
+# elif len(materials) > 1:
+# try: faces.sort(key = lambda a: (a.mat, a.use_smooth))
+# except: faces.sort(lambda a,b: cmp((a.mat, a.use_smooth), (b.mat, b.use_smooth)))
+# else:
+# # no materials
+# try: faces.sort(key = lambda a: a.use_smooth)
+# except: faces.sort(lambda a,b: cmp(a.use_smooth, b.use_smooth))
+
+ # Set the default mat to no material and no image.
+ contextMat = 0, 0 # Can never be this, so we will label a new material teh first chance we get.
+ contextSmooth = None # Will either be true or false, set bad to force initialization switch.
+
+ if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
+ name1 = ob.name
+ name2 = ob.data.name
+ if name1 == name2:
+ obnamestring = name_compat(name1)
+ else:
+ obnamestring = '%s_%s' % (name_compat(name1), name_compat(name2))
+
+ if EXPORT_BLEN_OBS:
+ file.write('o %s\n' % obnamestring) # Write Object name
+ else: # if EXPORT_GROUP_BY_OB:
+ file.write('g %s\n' % obnamestring)
+
+ # Vert
+ for v in me_verts:
+ file.write('v %.6f %.6f %.6f\n' % v.co[:])
+
+ # UV
+ if faceuv:
+ uv_face_mapping = [[0, 0, 0, 0] for i in range(len(face_index_pairs))] # a bit of a waste for tri's :/
+
+ uv_dict = {} # could use a set() here
+ uv_layer = me.uv_textures.active.data
+ for f, f_index in face_index_pairs:
+ for uv_index, uv in enumerate(uv_layer[f_index].uv):
+ uvkey = veckey2d(uv)
+ try:
+ uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
+ except:
+ uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
+ file.write('vt %.6f %.6f\n' % uv[:])
+
+ uv_unique_count = len(uv_dict)
+# del uv, uvkey, uv_dict, f_index, uv_index
+ # Only need uv_unique_count and uv_face_mapping
+
+ # NORMAL, Smooth/Non smoothed.
+ if EXPORT_NORMALS:
+ for f, f_index in face_index_pairs:
+ if f.use_smooth:
+ for v_idx in f.vertices:
+ v = me_verts[v_idx]
+ noKey = veckey3d(v.normal)
+ if noKey not in globalNormals:
+ globalNormals[noKey] = totno
+ totno += 1
+ file.write('vn %.6f %.6f %.6f\n' % noKey)
+ else:
+ # Hard, 1 normal from the face.
+ noKey = veckey3d(f.normal)
+ if noKey not in globalNormals:
+ globalNormals[noKey] = totno
+ totno += 1
+ file.write('vn %.6f %.6f %.6f\n' % noKey)
+
+ if not faceuv:
+ f_image = None
+
+ # XXX
+ if EXPORT_POLYGROUPS:
+ # Retrieve the list of vertex groups
+ vertGroupNames = [g.name for g in ob.vertex_groups]
+
+ currentVGroup = ''
+ # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
+ vgroupsMap = [[] for _i in range(len(me_verts))]
+ for v_idx, v in enumerate(me.vertices):
+ for g in v.groups:
+ vgroupsMap[v_idx].append((vertGroupNames[g.group], g.weight))
+
+ for f, f_index in face_index_pairs:
+ f_smooth = f.use_smooth
+ f_mat = min(f.material_index, len(materialNames) - 1)
+
+ if faceuv:
+ tface = uv_layer[f_index]
+ f_image = tface.image
+
+ # MAKE KEY
+ if faceuv and f_image: # Object is always true.
+ key = materialNames[f_mat], f_image.name
+ else:
+ key = materialNames[f_mat], None # No image, use None instead.
+
+ # Write the vertex group
+ if EXPORT_POLYGROUPS:
+ if ob.vertex_groups:
+ # find what vertext group the face belongs to
+ theVGroup = findVertexGroupName(f, vgroupsMap)
+ if theVGroup != currentVGroup:
+ currentVGroup = theVGroup
+ file.write('g %s\n' % theVGroup)
+
+ # CHECK FOR CONTEXT SWITCH
+ if key == contextMat:
+ pass # Context already switched, dont do anything
+ else:
+ if key[0] is None and key[1] is None:
+ # Write a null material, since we know the context has changed.
+ if EXPORT_GROUP_BY_MAT:
+ # can be mat_image or (null)
+ file.write("g %s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name))) # can be mat_image or (null)
+ file.write("usemtl (null)\n") # mat, image
+
+ else:
+ mat_data = mtl_dict.get(key)
+ if not mat_data:
+ # First add to global dict so we can export to mtl
+ # Then write mtl
+
+ # Make a new names from the mat and image name,
+ # converting any spaces to underscores with name_compat.
+
+ # If none image dont bother adding it to the name
+ if key[1] is None:
+ mat_data = mtl_dict[key] = ("%s" % name_compat(key[0])), materialItems[f_mat], f_image
+ else:
+ mat_data = mtl_dict[key] = ("%s_%s" % (name_compat(key[0]), name_compat(key[1]))), materialItems[f_mat], f_image
+
+ if EXPORT_GROUP_BY_MAT:
+ file.write("g %s_%s_%s\n" % (name_compat(ob.name), name_compat(ob.data.name), mat_data[0])) # can be mat_image or (null)
+
+ file.write("usemtl %s\n" % mat_data[0]) # can be mat_image or (null)
+
+ contextMat = key
+ if f_smooth != contextSmooth:
+ if f_smooth: # on now off
+ file.write('s 1\n')
+ contextSmooth = f_smooth
+ else: # was off now on
+ file.write('s off\n')
+ contextSmooth = f_smooth
+
+ f_v_orig = [(vi, me_verts[v_idx]) for vi, v_idx in enumerate(f.vertices)]
+
+ if not EXPORT_TRI or len(f_v_orig) == 3:
+ f_v_iter = (f_v_orig, )
+ else:
+ f_v_iter = (f_v_orig[0], f_v_orig[1], f_v_orig[2]), (f_v_orig[0], f_v_orig[2], f_v_orig[3])
+
+ # support for triangulation
+ for f_v in f_v_iter:
+ file.write('f')
+
+ if faceuv:
+ if EXPORT_NORMALS:
+ if f_smooth: # Smoothed, use vertex normals
+ for vi, v in f_v:
+ file.write(" %d/%d/%d" % \
+ (v.index + totverts,
+ totuvco + uv_face_mapping[f_index][vi],
+ globalNormals[veckey3d(v.normal)])) # vert, uv, normal
+
+ else: # No smoothing, face normals
+ no = globalNormals[veckey3d(f.normal)]
+ for vi, v in f_v:
+ file.write(" %d/%d/%d" % \
+ (v.index + totverts,
+ totuvco + uv_face_mapping[f_index][vi],
+ no)) # vert, uv, normal
+ else: # No Normals
+ for vi, v in f_v:
+ file.write(" %d/%d" % (\
+ v.index + totverts,\
+ totuvco + uv_face_mapping[f_index][vi])) # vert, uv
+
+ face_vert_index += len(f_v)
+
+ else: # No UV's
+ if EXPORT_NORMALS:
+ if f_smooth: # Smoothed, use vertex normals
+ for vi, v in f_v:
+ file.write(" %d//%d" %
+ (v.index + totverts, globalNormals[veckey3d(v.normal)]))
+ else: # No smoothing, face normals
+ no = globalNormals[veckey3d(f.normal)]
+ for vi, v in f_v:
+ file.write(" %d//%d" % (v.index + totverts, no))
+ else: # No Normals
+ for vi, v in f_v:
+ file.write(" %d" % (v.index + totverts))
+
+ file.write('\n')
+
+ # Write edges.
+ if EXPORT_EDGES:
+ for ed in edges:
+ if ed.is_loose:
+ file.write('f %d %d\n' % (ed.vertices[0] + totverts, ed.vertices[1] + totverts))
+
+ # Make the indices global rather then per mesh
+ totverts += len(me_verts)
+ if faceuv:
+ totuvco += uv_unique_count
+
+ # clean up
+ bpy.data.meshes.remove(me)
+
+ if ob_main.dupli_type != 'NONE':
+ ob_main.dupli_list_clear()
+
+ file.close()
+
+ # Now we have all our materials, save them
+ if EXPORT_MTL:
+ write_mtl(scene, mtlfilepath, EXPORT_PATH_MODE, copy_set, mtl_dict)
+
+ # copy all collected files.
+ bpy_extras.io_utils.path_reference_copy(copy_set)
+
+ print("OBJ Export time: %.2f" % (time.clock() - time1))
+
+
+def _write(context, filepath,
+ EXPORT_TRI, # ok
+ EXPORT_EDGES,
+ EXPORT_NORMALS, # not yet
+ EXPORT_NORMALS_HQ, # not yet
+ EXPORT_UV, # ok
+ EXPORT_MTL,
+ EXPORT_APPLY_MODIFIERS, # ok
+ EXPORT_BLEN_OBS,
+ EXPORT_GROUP_BY_OB,
+ EXPORT_GROUP_BY_MAT,
+ EXPORT_KEEP_VERT_ORDER,
+ EXPORT_POLYGROUPS,
+ EXPORT_CURVE_AS_NURBS,
+ EXPORT_SEL_ONLY, # ok
+ EXPORT_ALL_SCENES, # XXX not working atm
+ EXPORT_ANIMATION,
+ EXPORT_GLOBAL_MATRIX,
+ EXPORT_PATH_MODE,
+ ): # Not used
+
+ base_name, ext = os.path.splitext(filepath)
+ context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
+
+ orig_scene = context.scene
+
+ # Exit edit mode before exporting, so current object states are exported properly.
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+# if EXPORT_ALL_SCENES:
+# export_scenes = bpy.data.scenes
+# else:
+# export_scenes = [orig_scene]
+
+ # XXX only exporting one scene atm since changing
+ # current scene is not possible.
+ # Brecht says that ideally in 2.5 we won't need such a function,
+ # allowing multiple scenes open at once.
+ export_scenes = [orig_scene]
+
+ # Export all scenes.
+ for scene in export_scenes:
+ orig_frame = scene.frame_current
+
+ if EXPORT_ALL_SCENES: # Add scene name into the context_name
+ context_name[1] = '_%s' % bpy.path.clean_name(scene.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
+
+ # Export an animation?
+ if EXPORT_ANIMATION:
+ scene_frames = range(scene.frame_start, scene.frame_end + 1) # Up to and including the end frame.
+ else:
+ scene_frames = [orig_frame] # Dont export an animation.
+
+ # Loop through all frames in the scene and export.
+ for frame in scene_frames:
+ if EXPORT_ANIMATION: # Add frame to the filepath.
+ context_name[2] = '_%.6d' % frame
+
+ scene.frame_set(frame, 0.0)
+ if EXPORT_SEL_ONLY:
+ objects = context.selected_objects
+ else:
+ objects = scene.objects
+
+ full_path = ''.join(context_name)
+
+ # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
+ # EXPORT THE FILE.
+ write_file(full_path, objects, scene,
+ EXPORT_TRI,
+ EXPORT_EDGES,
+ EXPORT_NORMALS,
+ EXPORT_NORMALS_HQ,
+ EXPORT_UV,
+ EXPORT_MTL,
+ EXPORT_APPLY_MODIFIERS,
+ EXPORT_BLEN_OBS,
+ EXPORT_GROUP_BY_OB,
+ EXPORT_GROUP_BY_MAT,
+ EXPORT_KEEP_VERT_ORDER,
+ EXPORT_POLYGROUPS,
+ EXPORT_CURVE_AS_NURBS,
+ EXPORT_GLOBAL_MATRIX,
+ EXPORT_PATH_MODE,
+ )
+
+ scene.frame_set(orig_frame, 0.0)
+
+ # Restore old active scene.
+# orig_scene.makeCurrent()
+# Window.WaitCursor(0)
+
+
+'''
+Currently the exporter lacks these features:
+* multiple scene export (only active scene is written)
+* particles
+'''
+
+
+def save(operator, context, filepath="",
+ use_triangles=False,
+ use_edges=True,
+ use_normals=False,
+ use_hq_normals=False,
+ use_uvs=True,
+ use_materials=True,
+ use_apply_modifiers=True,
+ use_blen_objects=True,
+ group_by_object=False,
+ group_by_material=False,
+ keep_vertex_order=False,
+ use_vertex_groups=False,
+ use_nurbs=True,
+ use_selection=True,
+ use_all_scenes=False,
+ use_animation=False,
+ global_matrix=None,
+ path_mode='AUTO'
+ ):
+
+ _write(context, filepath,
+ EXPORT_TRI=use_triangles,
+ EXPORT_EDGES=use_edges,
+ EXPORT_NORMALS=use_normals,
+ EXPORT_NORMALS_HQ=use_hq_normals,
+ EXPORT_UV=use_uvs,
+ EXPORT_MTL=use_materials,
+ EXPORT_APPLY_MODIFIERS=use_apply_modifiers,
+ EXPORT_BLEN_OBS=use_blen_objects,
+ EXPORT_GROUP_BY_OB=group_by_object,
+ EXPORT_GROUP_BY_MAT=group_by_material,
+ EXPORT_KEEP_VERT_ORDER=keep_vertex_order,
+ EXPORT_POLYGROUPS=use_vertex_groups,
+ EXPORT_CURVE_AS_NURBS=use_nurbs,
+ EXPORT_SEL_ONLY=use_selection,
+ EXPORT_ALL_SCENES=use_all_scenes,
+ EXPORT_ANIMATION=use_animation,
+ EXPORT_GLOBAL_MATRIX=global_matrix,
+ EXPORT_PATH_MODE=path_mode,
+ )
+
+ return {'FINISHED'}
diff --git a/io_scene_obj/import_obj.py b/io_scene_obj/import_obj.py
new file mode 100644
index 00000000..697c6508
--- /dev/null
+++ b/io_scene_obj/import_obj.py
@@ -0,0 +1,1148 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Campbell Barton
+# Contributors: Campbell Barton, Jiri Hnidek, Paolo Ciccone
+
+"""
+This script imports a Wavefront OBJ files to Blender.
+
+Usage:
+Run this script from "File->Import" menu and then load the desired OBJ file.
+Note, This loads mesh objects and materials only, nurbs and curves are not supported.
+
+http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj
+"""
+
+import os
+import time
+import bpy
+import mathutils
+from bpy_extras.io_utils import unpack_list, unpack_face_list
+from bpy_extras.image_utils import load_image
+
+
+def line_value(line_split):
+ '''
+ Returns 1 string represneting the value for this line
+ None will be returned if theres only 1 word
+ '''
+ length = len(line_split)
+ if length == 1:
+ return None
+
+ elif length == 2:
+ return line_split[1]
+
+ elif length > 2:
+ return b' '.join(line_split[1:])
+
+
+def obj_image_load(imagepath, DIR, use_image_search):
+ if b'_' in imagepath:
+ image = load_image(imagepath.replace(b'_', b' '), DIR)
+ if image:
+ return image
+
+ image = load_image(imagepath, DIR)
+ if image:
+ return image
+
+ print("failed to load %r doesn't exist" % imagepath)
+ return None
+
+# def obj_image_load(imagepath, DIR, use_image_search):
+# '''
+# Mainly uses comprehensiveImageLoad
+# but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores.
+# '''
+
+# if '_' in imagepath:
+# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= use_image_search)
+# if image: return image
+# # Did the exporter rename the image?
+# image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= use_image_search)
+# if image: return image
+
+# # Return an image, placeholder if it dosnt exist
+# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= use_image_search)
+# return image
+
+
+def create_materials(filepath, material_libs, unique_materials, unique_material_images, use_image_search):
+ '''
+ Create all the used materials in this obj,
+ assign colors and images to the materials from all referenced material libs
+ '''
+ DIR = os.path.dirname(filepath)
+
+ #==================================================================================#
+ # This function sets textures defined in .mtl file #
+ #==================================================================================#
+ def load_material_image(blender_material, context_material_name, imagepath, type):
+
+ texture = bpy.data.textures.new(name=type, type='IMAGE')
+
+ # Absolute path - c:\.. etc would work here
+ image = obj_image_load(imagepath, DIR, use_image_search)
+ has_data = False
+
+ if image:
+ texture.image = image
+ has_data = image.has_data
+
+ # Adds textures for materials (rendering)
+ if type == 'Kd':
+ if has_data and image.depth == 32:
+ # Image has alpha
+
+ mtex = blender_material.texture_slots.add()
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_color_diffuse = True
+ mtex.use_map_alpha = True
+
+ texture.use_mipmap = True
+ texture.use_interpolation = True
+ texture.use_alpha = True
+ blender_material.use_transparency = True
+ blender_material.alpha = 0.0
+ else:
+ mtex = blender_material.texture_slots.add()
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_color_diffuse = True
+
+ # adds textures to faces (Textured/Alt-Z mode)
+ # Only apply the diffuse texture to the face if the image has not been set with the inline usemat func.
+ unique_material_images[context_material_name] = image, has_data # set the texface image
+
+ elif type == 'Ka':
+ mtex = blender_material.texture_slots.add()
+ mtex.use_map_color_diffuse = False
+
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_ambient = True
+
+ elif type == 'Ks':
+ mtex = blender_material.texture_slots.add()
+ mtex.use_map_color_diffuse = False
+
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_specular = True
+
+ elif type == 'Bump':
+ mtex = blender_material.texture_slots.add()
+ mtex.use_map_color_diffuse = False
+
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_normal = True
+
+ elif type == 'D':
+ mtex = blender_material.texture_slots.add()
+ mtex.use_map_color_diffuse = False
+
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_alpha = True
+ blender_material.use_transparency = True
+ blender_material.transparency_method = 'Z_TRANSPARENCY'
+ blender_material.alpha = 0.0
+ # Todo, unset deffuse material alpha if it has an alpha channel
+
+ elif type == 'refl':
+ mtex = blender_material.texture_slots.add()
+ mtex.use_map_color_diffuse = False
+
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_reflect = True
+
+ else:
+ raise Exception("invalid type %r" % type)
+
+ # Add an MTL with the same name as the obj if no MTLs are spesified.
+ temp_mtl = os.path.splitext((os.path.basename(filepath)))[0] + b'.mtl'
+
+ if os.path.exists(os.path.join(DIR, temp_mtl)) and temp_mtl not in material_libs:
+ material_libs.append(temp_mtl)
+ del temp_mtl
+
+ #Create new materials
+ for name in unique_materials: # .keys()
+ if name is not None:
+ unique_materials[name] = bpy.data.materials.new(name.decode('utf-8', "replace"))
+ unique_material_images[name] = None, False # assign None to all material images to start with, add to later.
+
+ unique_materials[None] = None
+ unique_material_images[None] = None, False
+
+ for libname in material_libs:
+ # print(libname)
+ mtlpath = os.path.join(DIR, libname)
+ if not os.path.exists(mtlpath):
+ print ("\tMaterial not found MTL: %r" % mtlpath)
+ else:
+ #print('\t\tloading mtl: %e' % mtlpath)
+ context_material = None
+ mtl = open(mtlpath, 'rb')
+ for line in mtl: # .readlines():
+ line = line.strip()
+ if not line or line.startswith(b'#'):
+ pass
+ elif line.startswith(b'newmtl'):
+ context_material_name = line_value(line.split())
+ context_material = unique_materials.get(context_material_name)
+
+ elif context_material:
+ # we need to make a material to assign properties to it.
+ line_split = line.split()
+ line_lower = line.lower().lstrip()
+ if line_lower.startswith(b'ka'):
+ context_material.mirror_color = float(line_split[1]), float(line_split[2]), float(line_split[3])
+ elif line_lower.startswith(b'kd'):
+ context_material.diffuse_color = float(line_split[1]), float(line_split[2]), float(line_split[3])
+ elif line_lower.startswith(b'ks'):
+ context_material.specular_color = float(line_split[1]), float(line_split[2]), float(line_split[3])
+ elif line_lower.startswith(b'ns'):
+ context_material.specular_hardness = int((float(line_split[1]) * 0.51))
+ elif line_lower.startswith(b'ni'): # Refraction index
+ context_material.raytrace_transparency.ior = max(1, min(float(line_split[1]), 3)) # between 1 and 3
+ elif line_lower.startswith(b'd') or line_lower.startswith(b'tr'):
+ context_material.alpha = float(line_split[1])
+ context_material.use_transparency = True
+ context_material.transparency_method = 'Z_TRANSPARENCY'
+ elif line_lower.startswith(b'tf'):
+ # rgb, filter color, blender has no support for this.
+ pass
+ elif line_lower.startswith(b'illum'):
+ illum = int(line_split[1])
+
+ do_ambient = True
+ do_highlight = False
+ do_reflection = False
+ do_transparency = False
+ do_glass = False
+ do_fresnel = False
+ do_raytrace = False
+
+ # inline comments are from the spec, v4.2
+ if illum == 0:
+ # Color on and Ambient off
+ do_ambient = False
+ elif illum == 1:
+ # Color on and Ambient on
+ pass
+ elif illum == 2:
+ # Highlight on
+ do_highlight = True
+ elif illum == 3:
+ # Reflection on and Ray trace on
+ do_reflection = True
+ do_raytrace = True
+ elif illum == 4:
+ # Transparency: Glass on
+ # Reflection: Ray trace on
+ do_transparency = True
+ do_reflection = True
+ do_glass = True
+ do_raytrace = True
+ elif illum == 5:
+ # Reflection: Fresnel on and Ray trace on
+ do_reflection = True
+ do_fresnel = True
+ do_raytrace = True
+ elif illum == 6:
+ # Transparency: Refraction on
+ # Reflection: Fresnel off and Ray trace on
+ do_transparency = True
+ do_reflection = True
+ do_raytrace = True
+ elif illum == 7:
+ # Transparency: Refraction on
+ # Reflection: Fresnel on and Ray trace on
+ do_transparency = True
+ do_reflection = True
+ do_fresnel = True
+ do_raytrace = True
+ elif illum == 8:
+ # Reflection on and Ray trace off
+ do_reflection = True
+ elif illum == 9:
+ # Transparency: Glass on
+ # Reflection: Ray trace off
+ do_transparency = True
+ do_reflection = True
+ do_glass = True
+ elif illum == 10:
+ # Casts shadows onto invisible surfaces
+
+ # blender cant do this
+ pass
+
+ if do_ambient:
+ context_material.ambient = 1.0
+ else:
+ context_material.ambient = 0.0
+
+ if do_highlight:
+ # FIXME, how else to use this?
+ context_material.specular_intensity = 1.0
+
+ if do_reflection:
+ context_material.raytrace_mirror.use = True
+ context_material.raytrace_mirror.reflect_factor = 1.0
+
+ if do_transparency:
+ context_material.use_transparency = True
+ context_material.transparency_method = 'RAYTRACE' if do_raytrace else 'Z_TRANSPARENCY'
+ context_material.alpha = 0.0
+
+ if do_glass:
+ context_material.raytrace_transparency.ior = 1.5
+
+ if do_fresnel:
+ context_material.raytrace_mirror.fresnel = 1.0 # could be any value for 'ON'
+
+ """
+ if do_raytrace:
+ context_material.use_raytrace = True
+ else:
+ context_material.use_raytrace = False
+ """
+ # XXX, this is not following the OBJ spec, but this was
+ # written when raytracing wasnt default, annoying to disable for blender users.
+ context_material.use_raytrace = True
+
+ elif line_lower.startswith(b'map_ka'):
+ img_filepath = line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Ka')
+ elif line_lower.startswith(b'map_ks'):
+ img_filepath = line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Ks')
+ elif line_lower.startswith(b'map_kd'):
+ img_filepath = line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Kd')
+ elif line_lower.startswith(b'map_bump') or line_lower.startswith(b'bump'): # 'bump' is incorrect but some files use it.
+ img_filepath = line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Bump')
+ elif line_lower.startswith(b'map_d') or line_lower.startswith(b'map_tr'): # Alpha map - Dissolve
+ img_filepath = line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'D')
+
+ elif line_lower.startswith(b'refl'): # reflectionmap
+ img_filepath = line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'refl')
+ else:
+ print("\t%r:%r (ignored)" % (filepath, line))
+ mtl.close()
+
+
+def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP):
+ '''
+ Takes vert_loc and faces, and separates into multiple sets of
+ (verts_loc, faces, unique_materials, dataname)
+ '''
+
+ filename = os.path.splitext((os.path.basename(filepath)))[0]
+
+ if not SPLIT_OB_OR_GROUP:
+ # use the filename for the object name since we arnt chopping up the mesh.
+ return [(verts_loc, faces, unique_materials, filename)]
+
+ def key_to_name(key):
+ # if the key is a tuple, join it to make a string
+ if not key:
+ return filename # assume its a string. make sure this is true if the splitting code is changed
+ else:
+ return key
+
+ # Return a key that makes the faces unique.
+ face_split_dict = {}
+
+ oldkey = -1 # initialize to a value that will never match the key
+
+ for face in faces:
+ key = face[4]
+
+ if oldkey != key:
+ # Check the key has changed.
+ try:
+ verts_split, faces_split, unique_materials_split, vert_remap = face_split_dict[key]
+ except KeyError:
+ faces_split = []
+ verts_split = []
+ unique_materials_split = {}
+ vert_remap = [-1] * len(verts_loc)
+
+ face_split_dict[key] = (verts_split, faces_split, unique_materials_split, vert_remap)
+
+ oldkey = key
+
+ face_vert_loc_indices = face[0]
+
+ # Remap verts to new vert list and add where needed
+ for enum, i in enumerate(face_vert_loc_indices):
+ if vert_remap[i] == -1:
+ new_index = len(verts_split)
+ vert_remap[i] = new_index # set the new remapped index so we only add once and can reference next time.
+ face_vert_loc_indices[enum] = new_index # remap to the local index
+ verts_split.append(verts_loc[i]) # add the vert to the local verts
+ else:
+ face_vert_loc_indices[enum] = vert_remap[i] # remap to the local index
+
+ matname = face[2]
+ if matname and matname not in unique_materials_split:
+ unique_materials_split[matname] = unique_materials[matname]
+
+ faces_split.append(face)
+
+ # remove one of the itemas and reorder
+ return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())]
+
+
+def create_mesh(new_objects, has_ngons, use_ngons, use_edges, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname):
+ '''
+ Takes all the data gathered and generates a mesh, adding the new object to new_objects
+ deals with fgons, sharp edges and assigning materials
+ '''
+ from bpy_extras.mesh_utils import ngon_tesselate
+
+ if not has_ngons:
+ use_ngons = False
+
+ if unique_smooth_groups:
+ sharp_edges = {}
+ smooth_group_users = {context_smooth_group: {} for context_smooth_group in list(unique_smooth_groups.keys())}
+ context_smooth_group_old = -1
+
+ # Split fgons into tri's
+ fgon_edges = {} # Used for storing fgon keys
+ if use_edges:
+ edges = []
+
+ context_object = None
+
+ # reverse loop through face indices
+ for f_idx in range(len(faces) - 1, -1, -1):
+
+ face_vert_loc_indices,\
+ face_vert_tex_indices,\
+ context_material,\
+ context_smooth_group,\
+ context_object = faces[f_idx]
+
+ len_face_vert_loc_indices = len(face_vert_loc_indices)
+
+ if len_face_vert_loc_indices == 1:
+ faces.pop(f_idx) # cant add single vert faces
+
+ elif not face_vert_tex_indices or len_face_vert_loc_indices == 2: # faces that have no texture coords are lines
+ if use_edges:
+ # generators are better in python 2.4+ but can't be used in 2.3
+ # edges.extend( (face_vert_loc_indices[i], face_vert_loc_indices[i+1]) for i in xrange(len_face_vert_loc_indices-1) )
+ edges.extend([(face_vert_loc_indices[i], face_vert_loc_indices[i + 1]) for i in range(len_face_vert_loc_indices - 1)])
+
+ faces.pop(f_idx)
+ else:
+
+ # Smooth Group
+ if unique_smooth_groups and context_smooth_group:
+ # Is a part of of a smooth group and is a face
+ if context_smooth_group_old is not context_smooth_group:
+ edge_dict = smooth_group_users[context_smooth_group]
+ context_smooth_group_old = context_smooth_group
+
+ for i in range(len_face_vert_loc_indices):
+ i1 = face_vert_loc_indices[i]
+ i2 = face_vert_loc_indices[i - 1]
+ if i1 > i2:
+ i1, i2 = i2, i1
+
+ try:
+ edge_dict[i1, i2] += 1
+ except KeyError:
+ edge_dict[i1, i2] = 1
+
+ # FGons into triangles
+ if has_ngons and len_face_vert_loc_indices > 4:
+
+ ngon_face_indices = ngon_tesselate(verts_loc, face_vert_loc_indices)
+ faces.extend(
+ [(
+ [face_vert_loc_indices[ngon[0]], face_vert_loc_indices[ngon[1]], face_vert_loc_indices[ngon[2]]],
+ [face_vert_tex_indices[ngon[0]], face_vert_tex_indices[ngon[1]], face_vert_tex_indices[ngon[2]]],
+ context_material,
+ context_smooth_group,
+ context_object)
+ for ngon in ngon_face_indices]
+ )
+
+ # edges to make fgons
+ if use_ngons:
+ edge_users = {}
+ for ngon in ngon_face_indices:
+ for i in (0, 1, 2):
+ i1 = face_vert_loc_indices[ngon[i]]
+ i2 = face_vert_loc_indices[ngon[i - 1]]
+ if i1 > i2:
+ i1, i2 = i2, i1
+
+ try:
+ edge_users[i1, i2] += 1
+ except KeyError:
+ edge_users[i1, i2] = 1
+
+ for key, users in edge_users.items():
+ if users > 1:
+ fgon_edges[key] = None
+
+ # remove all after 3, means we dont have to pop this one.
+ faces.pop(f_idx)
+
+ # Build sharp edges
+ if unique_smooth_groups:
+ for edge_dict in list(smooth_group_users.values()):
+ for key, users in list(edge_dict.items()):
+ if users == 1: # This edge is on the boundry of a group
+ sharp_edges[key] = None
+
+ # map the material names to an index
+ material_mapping = {name: i for i, name in enumerate(unique_materials)} # enumerate over unique_materials keys()
+
+ materials = [None] * len(unique_materials)
+
+ for name, index in list(material_mapping.items()):
+ materials[index] = unique_materials[name]
+
+ me = bpy.data.meshes.new(dataname.decode('utf-8', "replace"))
+
+ # make sure the list isnt too big
+ for material in materials:
+ me.materials.append(material)
+
+ me.vertices.add(len(verts_loc))
+ me.faces.add(len(faces))
+
+ # verts_loc is a list of (x, y, z) tuples
+ me.vertices.foreach_set("co", unpack_list(verts_loc))
+
+ # faces is a list of (vert_indices, texco_indices, ...) tuples
+ # XXX faces should contain either 3 or 4 verts
+ # XXX no check for valid face indices
+ me.faces.foreach_set("vertices_raw", unpack_face_list([f[0] for f in faces]))
+
+ if verts_tex and me.faces:
+ me.uv_textures.new()
+
+ context_material_old = -1 # avoid a dict lookup
+ mat = 0 # rare case it may be un-initialized.
+ me_faces = me.faces
+
+ for i, face in enumerate(faces):
+ if len(face[0]) < 2:
+ pass # raise "bad face"
+ elif len(face[0]) == 2:
+ if use_edges:
+ edges.append(face[0])
+ else:
+
+ blender_face = me.faces[i]
+
+ face_vert_loc_indices,\
+ face_vert_tex_indices,\
+ context_material,\
+ context_smooth_group,\
+ context_object = face
+
+ if context_smooth_group:
+ blender_face.use_smooth = True
+
+ if context_material:
+ if context_material_old is not context_material:
+ mat = material_mapping[context_material]
+ context_material_old = context_material
+
+ blender_face.material_index = mat
+# blender_face.mat= mat
+
+ if verts_tex:
+
+ blender_tface = me.uv_textures[0].data[i]
+
+ if context_material:
+ image, has_data = unique_material_images[context_material]
+ if image: # Can be none if the material dosnt have an image.
+ blender_tface.image = image
+ blender_tface.use_image = True
+ if has_data and image.depth == 32:
+ blender_tface.blend_type = 'ALPHA'
+
+ # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
+ if len(face_vert_loc_indices) == 4:
+ if face_vert_loc_indices[2] == 0 or face_vert_loc_indices[3] == 0:
+ face_vert_tex_indices = face_vert_tex_indices[2], face_vert_tex_indices[3], face_vert_tex_indices[0], face_vert_tex_indices[1]
+ else: # length of 3
+ if face_vert_loc_indices[2] == 0:
+ face_vert_tex_indices = face_vert_tex_indices[1], face_vert_tex_indices[2], face_vert_tex_indices[0]
+ # END EEEKADOODLE FIX
+
+ # assign material, uv's and image
+ blender_tface.uv1 = verts_tex[face_vert_tex_indices[0]]
+ blender_tface.uv2 = verts_tex[face_vert_tex_indices[1]]
+ blender_tface.uv3 = verts_tex[face_vert_tex_indices[2]]
+
+ if len(face_vert_loc_indices) == 4:
+ blender_tface.uv4 = verts_tex[face_vert_tex_indices[3]]
+
+# for ii, uv in enumerate(blender_face.uv):
+# uv.x, uv.y= verts_tex[face_vert_tex_indices[ii]]
+ del me_faces
+# del ALPHA
+
+ if use_edges and not edges:
+ use_edges = False
+
+ if use_edges:
+ me.edges.add(len(edges))
+
+ # edges should be a list of (a, b) tuples
+ me.edges.foreach_set("vertices", unpack_list(edges))
+# me_edges.extend( edges )
+
+# del me_edges
+
+ # Add edge faces.
+# me_edges= me.edges
+
+ def edges_match(e1, e2):
+ return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0])
+
+ # XXX slow
+# if use_ngons and fgon_edges:
+# for fgon_edge in fgon_edges.keys():
+# for ed in me.edges:
+# if edges_match(fgon_edge, ed.vertices):
+# ed.is_fgon = True
+
+# if use_ngons and fgon_edges:
+# FGON= Mesh.EdgeFlags.FGON
+# for ed in me.findEdges( fgon_edges.keys() ):
+# if ed is not None:
+# me_edges[ed].flag |= FGON
+# del FGON
+
+ # XXX slow
+# if unique_smooth_groups and sharp_edges:
+# for sharp_edge in sharp_edges.keys():
+# for ed in me.edges:
+# if edges_match(sharp_edge, ed.vertices):
+# ed.use_edge_sharp = True
+
+# if unique_smooth_groups and sharp_edges:
+# SHARP= Mesh.EdgeFlags.SHARP
+# for ed in me.findEdges( sharp_edges.keys() ):
+# if ed is not None:
+# me_edges[ed].flag |= SHARP
+# del SHARP
+
+ me.validate()
+ me.update(calc_edges=use_edges)
+
+ ob = bpy.data.objects.new("Mesh", me)
+ new_objects.append(ob)
+
+ # Create the vertex groups. No need to have the flag passed here since we test for the
+ # content of the vertex_groups. If the user selects to NOT have vertex groups saved then
+ # the following test will never run
+ for group_name, group_indices in vertex_groups.items():
+ group = ob.vertex_groups.new(group_name.decode('utf-8', "replace"))
+ group.add(group_indices, 1.0, 'REPLACE')
+
+
+def create_nurbs(context_nurbs, vert_loc, new_objects):
+ '''
+ Add nurbs object to blender, only support one type at the moment
+ '''
+ deg = context_nurbs.get(b'deg', (3,))
+ curv_range = context_nurbs.get(b'curv_range')
+ curv_idx = context_nurbs.get(b'curv_idx', [])
+ parm_u = context_nurbs.get(b'parm_u', [])
+ parm_v = context_nurbs.get(b'parm_v', [])
+ name = context_nurbs.get(b'name', b'ObjNurb')
+ cstype = context_nurbs.get(b'cstype')
+
+ if cstype is None:
+ print('\tWarning, cstype not found')
+ return
+ if cstype != b'bspline':
+ print('\tWarning, cstype is not supported (only bspline)')
+ return
+ if not curv_idx:
+ print('\tWarning, curv argument empty or not set')
+ return
+ if len(deg) > 1 or parm_v:
+ print('\tWarning, surfaces not supported')
+ return
+
+ cu = bpy.data.curves.new(name.decode('utf-8', "replace"), 'CURVE')
+ cu.dimensions = '3D'
+
+ nu = cu.splines.new('NURBS')
+ nu.points.add(len(curv_idx) - 1) # a point is added to start with
+ nu.points.foreach_set("co", [co_axis for vt_idx in curv_idx for co_axis in (vert_loc[vt_idx] + (1.0,))])
+
+ nu.order_u = deg[0] + 1
+
+ # get for endpoint flag from the weighting
+ if curv_range and len(parm_u) > deg[0] + 1:
+ do_endpoints = True
+ for i in range(deg[0] + 1):
+
+ if abs(parm_u[i] - curv_range[0]) > 0.0001:
+ do_endpoints = False
+ break
+
+ if abs(parm_u[-(i + 1)] - curv_range[1]) > 0.0001:
+ do_endpoints = False
+ break
+
+ else:
+ do_endpoints = False
+
+ if do_endpoints:
+ nu.use_endpoint_u = True
+
+ # close
+ '''
+ do_closed = False
+ if len(parm_u) > deg[0]+1:
+ for i in xrange(deg[0]+1):
+ #print curv_idx[i], curv_idx[-(i+1)]
+
+ if curv_idx[i]==curv_idx[-(i+1)]:
+ do_closed = True
+ break
+
+ if do_closed:
+ nu.use_cyclic_u = True
+ '''
+
+ ob = bpy.data.objects.new(name.decode('utf-8', "replace"), cu)
+
+ new_objects.append(ob)
+
+
+def strip_slash(line_split):
+ if line_split[-1][-1] == 92: # '\' char
+ if len(line_split[-1]) == 1:
+ line_split.pop() # remove the \ item
+ else:
+ line_split[-1] = line_split[-1][:-1] # remove the \ from the end last number
+ return True
+ return False
+
+
+def get_float_func(filepath):
+ '''
+ find the float function for this obj file
+ - whether to replace commas or not
+ '''
+ file = open(filepath, 'rb')
+ for line in file: # .readlines():
+ line = line.lstrip()
+ if line.startswith(b'v'): # vn vt v
+ if b',' in line:
+ file.close()
+ return lambda f: float(f.replace(b',', b'.'))
+ elif b'.' in line:
+ file.close()
+ return float
+
+ file.close()
+ # incase all vert values were ints
+ return float
+
+
+def load(operator, context, filepath,
+ global_clamp_size=0.0,
+ use_ngons=True,
+ use_smooth_groups=True,
+ use_edges=True,
+ use_split_objects=True,
+ use_split_groups=True,
+ use_image_search=True,
+ use_groups_as_vgroups=False,
+ global_matrix=None,
+ ):
+ '''
+ Called by the user interface or another script.
+ load_obj(path) - should give acceptable results.
+ This function passes the file and sends the data off
+ to be split into objects and then converted into mesh objects
+ '''
+ print('\nimporting obj %r' % filepath)
+
+ filepath = os.fsencode(filepath)
+
+ if global_matrix is None:
+ global_matrix = mathutils.Matrix()
+
+ if use_split_objects or use_split_groups:
+ use_groups_as_vgroups = False
+
+ time_main = time.time()
+
+ verts_loc = []
+ verts_tex = []
+ faces = [] # tuples of the faces
+ material_libs = [] # filanems to material libs this uses
+ vertex_groups = {} # when use_groups_as_vgroups is true
+
+ # Get the string to float conversion func for this file- is 'float' for almost all files.
+ float_func = get_float_func(filepath)
+
+ # Context variables
+ context_material = None
+ context_smooth_group = None
+ context_object = None
+ context_vgroup = None
+
+ # Nurbs
+ context_nurbs = {}
+ nurbs = []
+ context_parm = b'' # used by nurbs too but could be used elsewhere
+
+ has_ngons = False
+ # has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0
+
+ # Until we can use sets
+ unique_materials = {}
+ unique_material_images = {}
+ unique_smooth_groups = {}
+ # unique_obects= {} - no use for this variable since the objects are stored in the face.
+
+ # when there are faces that end with \
+ # it means they are multiline-
+ # since we use xreadline we cant skip to the next line
+ # so we need to know whether
+ context_multi_line = b''
+
+ print("\tparsing obj file...")
+ time_sub = time.time()
+# time_sub= sys.time()
+
+ file = open(filepath, 'rb')
+ for line in file: # .readlines():
+ line = line.lstrip() # rare cases there is white space at the start of the line
+
+ if line.startswith(b"v "):
+ line_split = line.split()
+ verts_loc.append((float_func(line_split[1]), float_func(line_split[2]), float_func(line_split[3])))
+
+ elif line.startswith(b"vn "):
+ pass
+
+ elif line.startswith(b"vt "):
+ line_split = line.split()
+ verts_tex.append((float_func(line_split[1]), float_func(line_split[2])))
+
+ # Handel faces lines (as faces) and the second+ lines of fa multiline face here
+ # use 'f' not 'f ' because some objs (very rare have 'fo ' for faces)
+ elif line.startswith(b'f') or context_multi_line == b'f':
+
+ if context_multi_line:
+ # use face_vert_loc_indices and face_vert_tex_indices previously defined and used the obj_face
+ line_split = line.split()
+
+ else:
+ line_split = line[2:].split()
+ face_vert_loc_indices = []
+ face_vert_tex_indices = []
+
+ # Instance a face
+ faces.append((\
+ face_vert_loc_indices,\
+ face_vert_tex_indices,\
+ context_material,\
+ context_smooth_group,\
+ context_object\
+ ))
+
+ if strip_slash(line_split):
+ context_multi_line = b'f'
+ else:
+ context_multi_line = b''
+
+ for v in line_split:
+ obj_vert = v.split(b'/')
+ vert_loc_index = int(obj_vert[0]) - 1
+ # Add the vertex to the current group
+ # *warning*, this wont work for files that have groups defined around verts
+ if use_groups_as_vgroups and context_vgroup:
+ vertex_groups[context_vgroup].append(vert_loc_index)
+
+ # Make relative negative vert indices absolute
+ if vert_loc_index < 0:
+ vert_loc_index = len(verts_loc) + vert_loc_index + 1
+
+ face_vert_loc_indices.append(vert_loc_index)
+
+ if len(obj_vert) > 1 and obj_vert[1]:
+ # formatting for faces with normals and textures us
+ # loc_index/tex_index/nor_index
+
+ vert_tex_index = int(obj_vert[1]) - 1
+ # Make relative negative vert indices absolute
+ if vert_tex_index < 0:
+ vert_tex_index = len(verts_tex) + vert_tex_index + 1
+
+ face_vert_tex_indices.append(vert_tex_index)
+ else:
+ # dummy
+ face_vert_tex_indices.append(0)
+
+ if len(face_vert_loc_indices) > 4:
+ has_ngons = True
+
+ elif use_edges and (line.startswith(b'l ') or context_multi_line == b'l'):
+ # very similar to the face load function above with some parts removed
+
+ if context_multi_line:
+ # use face_vert_loc_indices and face_vert_tex_indices previously defined and used the obj_face
+ line_split = line.split()
+
+ else:
+ line_split = line[2:].split()
+ face_vert_loc_indices = []
+ face_vert_tex_indices = []
+
+ # Instance a face
+ faces.append((\
+ face_vert_loc_indices,\
+ face_vert_tex_indices,\
+ context_material,\
+ context_smooth_group,\
+ context_object\
+ ))
+
+ if strip_slash(line_split):
+ context_multi_line = b'l'
+ else:
+ context_multi_line = b''
+
+ isline = line.startswith(b'l')
+
+ for v in line_split:
+ vert_loc_index = int(v) - 1
+
+ # Make relative negative vert indices absolute
+ if vert_loc_index < 0:
+ vert_loc_index = len(verts_loc) + vert_loc_index + 1
+
+ face_vert_loc_indices.append(vert_loc_index)
+
+ elif line.startswith(b's'):
+ if use_smooth_groups:
+ context_smooth_group = line_value(line.split())
+ if context_smooth_group == b'off':
+ context_smooth_group = None
+ elif context_smooth_group: # is not None
+ unique_smooth_groups[context_smooth_group] = None
+
+ elif line.startswith(b'o'):
+ if use_split_objects:
+ context_object = line_value(line.split())
+ # unique_obects[context_object]= None
+
+ elif line.startswith(b'g'):
+ if use_split_groups:
+ context_object = line_value(line.split())
+ # print 'context_object', context_object
+ # unique_obects[context_object]= None
+ elif use_groups_as_vgroups:
+ context_vgroup = line_value(line.split())
+ if context_vgroup and context_vgroup != b'(null)':
+ vertex_groups.setdefault(context_vgroup, [])
+ else:
+ context_vgroup = None # dont assign a vgroup
+
+ elif line.startswith(b'usemtl'):
+ context_material = line_value(line.split())
+ unique_materials[context_material] = None
+ elif line.startswith(b'mtllib'): # usemap or usemat
+ material_libs = list(set(material_libs) | set(line.split()[1:])) # can have multiple mtllib filenames per line, mtllib can appear more than once, so make sure only occurance of material exists
+
+ # Nurbs support
+ elif line.startswith(b'cstype '):
+ context_nurbs[b'cstype'] = line_value(line.split()) # 'rat bspline' / 'bspline'
+ elif line.startswith(b'curv ') or context_multi_line == b'curv':
+ line_split = line.split()
+
+ curv_idx = context_nurbs[b'curv_idx'] = context_nurbs.get(b'curv_idx', []) # incase were multiline
+
+ if not context_multi_line:
+ context_nurbs[b'curv_range'] = float_func(line_split[1]), float_func(line_split[2])
+ line_split[0:3] = [] # remove first 3 items
+
+ if strip_slash(line_split):
+ context_multi_line = b'curv'
+ else:
+ context_multi_line = b''
+
+ for i in line_split:
+ vert_loc_index = int(i) - 1
+
+ if vert_loc_index < 0:
+ vert_loc_index = len(verts_loc) + vert_loc_index + 1
+
+ curv_idx.append(vert_loc_index)
+
+ elif line.startswith(b'parm') or context_multi_line == b'parm':
+ line_split = line.split()
+
+ if context_multi_line:
+ context_multi_line = b''
+ else:
+ context_parm = line_split[1]
+ line_split[0:2] = [] # remove first 2
+
+ if strip_slash(line_split):
+ context_multi_line = b'parm'
+ else:
+ context_multi_line = b''
+
+ if context_parm.lower() == b'u':
+ context_nurbs.setdefault(b'parm_u', []).extend([float_func(f) for f in line_split])
+ elif context_parm.lower() == b'v': # surfaces not supported yet
+ context_nurbs.setdefault(b'parm_v', []).extend([float_func(f) for f in line_split])
+ # else: # may want to support other parm's ?
+
+ elif line.startswith(b'deg '):
+ context_nurbs[b'deg'] = [int(i) for i in line.split()[1:]]
+ elif line.startswith(b'end'):
+ # Add the nurbs curve
+ if context_object:
+ context_nurbs[b'name'] = context_object
+ nurbs.append(context_nurbs)
+ context_nurbs = {}
+ context_parm = b''
+
+ ''' # How to use usemap? depricated?
+ elif line.startswith(b'usema'): # usemap or usemat
+ context_image= line_value(line.split())
+ '''
+
+ file.close()
+ time_new = time.time()
+ print("%.4f sec" % (time_new - time_sub))
+ time_sub = time_new
+
+ print('\tloading materials and images...')
+ create_materials(filepath, material_libs, unique_materials, unique_material_images, use_image_search)
+
+ time_new = time.time()
+ print("%.4f sec" % (time_new - time_sub))
+ time_sub = time_new
+
+ # deselect all
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT')
+
+ scene = context.scene
+# scn.objects.selected = []
+ new_objects = [] # put new objects here
+
+ print('\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % (len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups)))
+ # Split the mesh by objects/materials, may
+ if use_split_objects or use_split_groups:
+ SPLIT_OB_OR_GROUP = True
+ else:
+ SPLIT_OB_OR_GROUP = False
+
+ for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP):
+ # Create meshes from the data, warning 'vertex_groups' wont support splitting
+ create_mesh(new_objects, has_ngons, use_ngons, use_edges, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname)
+
+ # nurbs support
+ for context_nurbs in nurbs:
+ create_nurbs(context_nurbs, verts_loc, new_objects)
+
+ # Create new obj
+ for obj in new_objects:
+ base = scene.objects.link(obj)
+ base.select = True
+
+ # we could apply this anywhere before scaling.
+ obj.matrix_world = global_matrix
+
+ scene.update()
+
+ axis_min = [1000000000] * 3
+ axis_max = [-1000000000] * 3
+
+ if global_clamp_size:
+ # Get all object bounds
+ for ob in new_objects:
+ for v in ob.bound_box:
+ for axis, value in enumerate(v):
+ if axis_min[axis] > value:
+ axis_min[axis] = value
+ if axis_max[axis] < value:
+ axis_max[axis] = value
+
+ # Scale objects
+ max_axis = max(axis_max[0] - axis_min[0], axis_max[1] - axis_min[1], axis_max[2] - axis_min[2])
+ scale = 1.0
+
+ while global_clamp_size < max_axis * scale:
+ scale = scale / 10.0
+
+ for obj in new_objects:
+ obj.scale = scale, scale, scale
+
+ time_new = time.time()
+
+ print("finished importing: %r in %.4f sec." % (filepath, (time_new - time_main)))
+ return {'FINISHED'}
+
+
+# NOTES (all line numbers refer to 2.4x import_obj.py, not this file)
+# check later: line 489
+# can convert now: edge flags, edges: lines 508-528
+# ngon (uses python module BPyMesh): 384-414
+# NEXT clamp size: get bound box with RNA
+# get back to l 140 (here)
+# search image in bpy.config.textureDir - load_image
+# replaced BPyImage.comprehensiveImageLoad with a simplified version that only checks additional directory specified, but doesn't search dirs recursively (obj_image_load)
+# bitmask won't work? - 132
+# uses bpy.sys.time()
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_x3d/__init__.py b/io_scene_x3d/__init__.py
new file mode 100644
index 00000000..c176d95c
--- /dev/null
+++ b/io_scene_x3d/__init__.py
@@ -0,0 +1,167 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Web3D X3D/VRML format",
+ "author": "Campbell Barton, Bart",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export X3D, Import VRML",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Web3D",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "export_x3d" in locals():
+ imp.reload(export_x3d)
+
+
+import bpy
+from bpy.props import StringProperty, BoolProperty, EnumProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper, axis_conversion, path_reference_mode
+
+
+class ImportX3D(bpy.types.Operator, ImportHelper):
+ '''Import and X3D or VRML file'''
+ bl_idname = "import_scene.x3d"
+ bl_label = "Import X3D/VRML"
+
+ filename_ext = ".x3d"
+ filter_glob = StringProperty(default="*.x3d;*.wrl", options={'HIDDEN'})
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='Z',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Y',
+ )
+
+ def execute(self, context):
+ from . import import_x3d
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob"))
+ global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return import_x3d.load(self, context, **keywords)
+
+
+class ExportX3D(bpy.types.Operator, ExportHelper):
+ '''Export selection to Extensible 3D file (.x3d)'''
+ bl_idname = "export_scene.x3d"
+ bl_label = 'Export X3D'
+
+ filename_ext = ".x3d"
+ filter_glob = StringProperty(default="*.x3d", options={'HIDDEN'})
+
+ use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
+ use_apply_modifiers = BoolProperty(name="Apply Modifiers", description="Use transformed mesh data from each object", default=True)
+ use_triangulate = BoolProperty(name="Triangulate", description="Write quads into 'IndexedTriangleSet'", default=True)
+ use_normals = BoolProperty(name="Normals", description="Write normals with geometry", default=False)
+ use_compress = BoolProperty(name="Compress", description="GZip the resulting file, requires a full python install", default=False)
+ use_hierarchy = BoolProperty(name="Hierarchy", description="Export parent child relationships", default=True)
+ use_h3d = BoolProperty(name="H3D Extensions", description="Export shaders for H3D", default=False)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='Z',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Y',
+ )
+
+ path_mode = path_reference_mode
+
+ def execute(self, context):
+ from . import export_x3d
+ from mathutils import Matrix
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "check_existing", "filter_glob"))
+ global_matrix = axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return export_x3d.save(self, context, **keywords)
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportX3D.bl_idname, text="X3D Extensible 3D (.x3d/.wrl)")
+
+
+def menu_func_export(self, context):
+ self.layout.operator(ExportX3D.bl_idname, text="X3D Extensible 3D (.x3d)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+# NOTES
+# - blender version is hardcoded
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_x3d/export_x3d.py b/io_scene_x3d/export_x3d.py
new file mode 100644
index 00000000..42334337
--- /dev/null
+++ b/io_scene_x3d/export_x3d.py
@@ -0,0 +1,1346 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Contributors: bart:neeneenee*de, http://www.neeneenee.de/vrml, Campbell Barton
+
+"""
+This script exports to X3D format.
+
+Usage:
+Run this script from "File->Export" menu. A pop-up will ask whether you
+want to export only selected or all relevant objects.
+
+Known issues:
+ Doesn't handle multiple materials (don't use material indices);<br>
+ Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);<br>
+ Can't get the texture array associated with material * not the UV ones;
+"""
+
+import math
+import os
+
+import bpy
+import mathutils
+
+from bpy_extras.io_utils import create_derived_objects, free_derived_objects
+
+x3d_names_reserved = {'Anchor', 'Appearance', 'Arc2D', 'ArcClose2D', 'AudioClip', 'Background', 'Billboard',
+ 'BooleanFilter', 'BooleanSequencer', 'BooleanToggle', 'BooleanTrigger', 'Box', 'Circle2D',
+ 'Collision', 'Color', 'ColorInterpolator', 'ColorRGBA', 'component', 'Cone', 'connect',
+ 'Contour2D', 'ContourPolyline2D', 'Coordinate', 'CoordinateDouble', 'CoordinateInterpolator',
+ 'CoordinateInterpolator2D', 'Cylinder', 'CylinderSensor', 'DirectionalLight', 'Disk2D',
+ 'ElevationGrid', 'EspduTransform', 'EXPORT', 'ExternProtoDeclare', 'Extrusion', 'field',
+ 'fieldValue', 'FillProperties', 'Fog', 'FontStyle', 'GeoCoordinate', 'GeoElevationGrid',
+ 'GeoLocationLocation', 'GeoLOD', 'GeoMetadata', 'GeoOrigin', 'GeoPositionInterpolator',
+ 'GeoTouchSensor', 'GeoViewpoint', 'Group', 'HAnimDisplacer', 'HAnimHumanoid', 'HAnimJoint',
+ 'HAnimSegment', 'HAnimSite', 'head', 'ImageTexture', 'IMPORT', 'IndexedFaceSet',
+ 'IndexedLineSet', 'IndexedTriangleFanSet', 'IndexedTriangleSet', 'IndexedTriangleStripSet',
+ 'Inline', 'IntegerSequencer', 'IntegerTrigger', 'IS', 'KeySensor', 'LineProperties', 'LineSet',
+ 'LoadSensor', 'LOD', 'Material', 'meta', 'MetadataDouble', 'MetadataFloat', 'MetadataInteger',
+ 'MetadataSet', 'MetadataString', 'MovieTexture', 'MultiTexture', 'MultiTextureCoordinate',
+ 'MultiTextureTransform', 'NavigationInfo', 'Normal', 'NormalInterpolator', 'NurbsCurve',
+ 'NurbsCurve2D', 'NurbsOrientationInterpolator', 'NurbsPatchSurface',
+ 'NurbsPositionInterpolator', 'NurbsSet', 'NurbsSurfaceInterpolator', 'NurbsSweptSurface',
+ 'NurbsSwungSurface', 'NurbsTextureCoordinate', 'NurbsTrimmedSurface', 'OrientationInterpolator',
+ 'PixelTexture', 'PlaneSensor', 'PointLight', 'PointSet', 'Polyline2D', 'Polypoint2D',
+ 'PositionInterpolator', 'PositionInterpolator2D', 'ProtoBody', 'ProtoDeclare', 'ProtoInstance',
+ 'ProtoInterface', 'ProximitySensor', 'ReceiverPdu', 'Rectangle2D', 'ROUTE', 'ScalarInterpolator',
+ 'Scene', 'Script', 'Shape', 'SignalPdu', 'Sound', 'Sphere', 'SphereSensor', 'SpotLight', 'StaticGroup',
+ 'StringSensor', 'Switch', 'Text', 'TextureBackground', 'TextureCoordinate', 'TextureCoordinateGenerator',
+ 'TextureTransform', 'TimeSensor', 'TimeTrigger', 'TouchSensor', 'Transform', 'TransmitterPdu',
+ 'TriangleFanSet', 'TriangleSet', 'TriangleSet2D', 'TriangleStripSet', 'Viewpoint', 'VisibilitySensor',
+ 'WorldInfo', 'X3D', 'XvlShell', 'VertexShader', 'FragmentShader', 'MultiShaderAppearance', 'ShaderAppearance'}
+
+
+def clamp_color(col):
+ return tuple([max(min(c, 1.0), 0.0) for c in col])
+
+
+def matrix_direction_neg_z(matrix):
+ return (mathutils.Vector((0.0, 0.0, -1.0)) * matrix.to_3x3()).normalized()[:]
+
+
+def prefix_quoted_str(value, prefix):
+ return value[0] + prefix + value[1:]
+
+
+def build_hierarchy(objects):
+ """ returns parent child relationships, skipping
+ """
+ objects_set = set(objects)
+ par_lookup = {}
+
+ def test_parent(parent):
+ while (parent is not None) and (parent not in objects_set):
+ parent = parent.parent
+ return parent
+
+ for obj in objects:
+ par_lookup.setdefault(test_parent(obj.parent), []).append((obj, []))
+
+ for parent, children in par_lookup.items():
+ for obj, subchildren in children:
+ subchildren[:] = par_lookup.get(obj, [])
+
+ return par_lookup[None]
+
+
+# -----------------------------------------------------------------------------
+# H3D Functions
+# -----------------------------------------------------------------------------
+def h3d_shader_glsl_frag_patch(filepath):
+ h3d_file = open(filepath, 'r')
+ lines = []
+ for l in h3d_file:
+ l = l.replace("uniform mat4 unfinvviewmat;", "")
+ l = l.replace("unfinvviewmat", "gl_ModelViewMatrixInverse")
+
+ '''
+ l = l.replace("varying vec3 varposition;", "")
+ l = l.replace("varposition", "gl_Vertex") # not needed int H3D
+ '''
+
+ #l = l.replace("varying vec3 varnormal;", "")
+ #l = l.replace("varnormal", "gl_Normal") # view normal
+ #l = l.replace("varnormal", "normalize(-(gl_ModelViewMatrix * gl_Vertex).xyz)") # view normal
+ # l = l.replace("varnormal", "gl_NormalMatrix * gl_Normal") # view normal
+ lines.append(l)
+
+ h3d_file.close()
+
+ h3d_file = open(filepath, 'w')
+ h3d_file.writelines(lines)
+ h3d_file.close()
+
+
+# -----------------------------------------------------------------------------
+# Functions for writing output file
+# -----------------------------------------------------------------------------
+
+def export(file,
+ global_matrix,
+ scene,
+ use_apply_modifiers=False,
+ use_selection=True,
+ use_triangulate=False,
+ use_normals=False,
+ use_hierarchy=True,
+ use_h3d=False,
+ path_mode='AUTO',
+ ):
+
+ # -------------------------------------------------------------------------
+ # Global Setup
+ # -------------------------------------------------------------------------
+ import bpy_extras
+ from bpy_extras.io_utils import unique_name
+ from xml.sax.saxutils import quoteattr
+
+ uuid_cache_object = {} # object
+ uuid_cache_lamp = {} # 'LA_' + object.name
+ uuid_cache_view = {} # object, different namespace
+ uuid_cache_mesh = {} # mesh
+ uuid_cache_material = {} # material
+ uuid_cache_image = {} # image
+ uuid_cache_world = {} # world
+
+ # store files to copy
+ copy_set = set()
+
+ fw = file.write
+ base_src = os.path.dirname(bpy.data.filepath)
+ base_dst = os.path.dirname(file.name)
+ filename_strip = os.path.splitext(os.path.basename(file.name))[0]
+ gpu_shader_cache = {}
+
+ if use_h3d:
+ import gpu
+ gpu_shader_dummy_mat = bpy.data.materials.new('X3D_DYMMY_MAT')
+ gpu_shader_cache[None] = gpu.export_shader(scene, gpu_shader_dummy_mat)
+
+ # -------------------------------------------------------------------------
+ # File Writing Functions
+ # -------------------------------------------------------------------------
+
+ def writeHeader(ident):
+ filepath_quoted = quoteattr(os.path.basename(file.name))
+ blender_ver_quoted = quoteattr('Blender %s' % bpy.app.version_string)
+
+ fw('%s<?xml version="1.0" encoding="UTF-8"?>\n' % ident)
+ if use_h3d:
+ fw('%s<X3D profile="H3DAPI" version="1.4">\n' % ident)
+ else:
+ fw('%s<!DOCTYPE X3D PUBLIC "ISO//Web3D//DTD X3D 3.0//EN" "http://www.web3d.org/specifications/x3d-3.0.dtd">\n' % ident)
+ fw('%s<X3D version="3.0" profile="Immersive" xmlns:xsd="http://www.w3.org/2001/XMLSchema-instance" xsd:noNamespaceSchemaLocation="http://www.web3d.org/specifications/x3d-3.0.xsd">\n' % ident)
+
+ ident += '\t'
+ fw('%s<head>\n' % ident)
+ ident += '\t'
+ fw('%s<meta name="filename" content=%s />\n' % (ident, filepath_quoted))
+ fw('%s<meta name="generator" content=%s />\n' % (ident, blender_ver_quoted))
+ # this info was never updated, so blender version should be enough
+ # fw('%s<meta name="translator" content="X3D exporter v1.55 (2006/01/17)" />\n' % ident)
+ ident = ident[:-1]
+ fw('%s</head>\n' % ident)
+ fw('%s<Scene>\n' % ident)
+ ident += '\t'
+ return ident
+
+ def writeFooter(ident):
+ ident = ident[:-1]
+ fw('%s</Scene>\n' % ident)
+ ident = ident[:-1]
+ fw('%s</X3D>' % ident)
+ return ident
+
+ def writeViewpoint(ident, obj, matrix, scene):
+ view_id = unique_name(obj, 'CA_' + obj.name, uuid_cache_view, clean_func=quoteattr)
+
+ loc, quat, scale = matrix.decompose()
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Viewpoint ' % ident)))
+ fw('DEF=%s\n' % view_id)
+ fw(ident_step + 'centerOfRotation="0 0 0"\n')
+ fw(ident_step + 'position="%3.2f %3.2f %3.2f"\n' % loc[:])
+ fw(ident_step + 'orientation="%3.2f %3.2f %3.2f %3.2f"\n' % (quat.axis[:] + (quat.angle, )))
+ fw(ident_step + 'fieldOfView="%.3g"\n' % obj.data.angle)
+ fw(ident_step + '/>\n')
+
+ def writeFog(ident, world):
+ if world:
+ mtype = world.mist_settings.falloff
+ mparam = world.mist_settings
+ else:
+ return
+
+ if mparam.use_mist:
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Fog ' % ident)))
+ fw('fogType="%s"\n' % ('LINEAR' if (mtype == 'LINEAR') else 'EXPONENTIAL'))
+ fw(ident_step + 'color="%.3g %.3g %.3g"\n' % clamp_color(world.horizon_color))
+ fw(ident_step + 'visibilityRange="%.3g"\n' % mparam.depth)
+ fw(ident_step + '/>\n')
+ else:
+ return
+
+ def writeNavigationInfo(ident, scene):
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<NavigationInfo ' % ident)))
+ fw('headlight="false"\n')
+ fw(ident_step + 'visibilityLimit="0.0"\n')
+ fw(ident_step + 'type=\'"EXAMINE", "ANY"\'\n')
+ fw(ident_step + 'avatarSize="0.25, 1.75, 0.75"\n')
+ fw(ident_step + '/>\n')
+
+ def writeTransform_begin(ident, matrix, def_id):
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Transform ' % ident)))
+ if def_id is not None:
+ fw('DEF=%s\n' % def_id)
+ else:
+ fw('\n')
+
+ loc, quat, sca = matrix.decompose()
+
+ fw(ident_step + 'translation="%.6g %.6g %.6g"\n' % loc[:])
+ # fw(ident_step + 'center="%.6g %.6g %.6g"\n' % (0, 0, 0))
+ fw(ident_step + 'scale="%.6g %.6g %.6g"\n' % sca[:])
+ fw(ident_step + 'rotation="%.6g %.6g %.6g %.6g"\n' % (quat.axis[:] + (quat.angle, )))
+ fw(ident_step + '>\n')
+ ident += '\t'
+ return ident
+
+ def writeTransform_end(ident):
+ ident = ident[:-1]
+ fw('%s</Transform>\n' % ident)
+ return ident
+
+ def writeSpotLight(ident, obj, matrix, lamp, world):
+ # note, lamp_id is not re-used
+ lamp_id = unique_name(obj, 'LA_' + obj.name, uuid_cache_lamp, clean_func=quoteattr)
+
+ if world:
+ ambi = world.ambient_color
+ amb_intensity = ((ambi[0] + ambi[1] + ambi[2]) / 3.0) / 2.5
+ del ambi
+ else:
+ amb_intensity = 0.0
+
+ # compute cutoff and beamwidth
+ intensity = min(lamp.energy / 1.75, 1.0)
+ beamWidth = lamp.spot_size * 0.37
+ # beamWidth=((lamp.spotSize*math.pi)/180.0)*.37
+ cutOffAngle = beamWidth * 1.3
+
+ orientation = matrix_direction_neg_z(matrix)
+
+ location = matrix.to_translation()[:]
+
+ radius = lamp.distance * math.cos(beamWidth)
+ # radius = lamp.dist*math.cos(beamWidth)
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<SpotLight ' % ident)))
+ fw('DEF=%s\n' % lamp_id)
+ fw(ident_step + 'radius="%.4g"\n' % radius)
+ fw(ident_step + 'ambientIntensity="%.4g"\n' % amb_intensity)
+ fw(ident_step + 'intensity="%.4g"\n' % intensity)
+ fw(ident_step + 'color="%.4g %.4g %.4g"\n' % clamp_color(lamp.color))
+ fw(ident_step + 'beamWidth="%.4g"\n' % beamWidth)
+ fw(ident_step + 'cutOffAngle="%.4g"\n' % cutOffAngle)
+ fw(ident_step + 'direction="%.4g %.4g %.4g"\n' % orientation)
+ fw(ident_step + 'location="%.4g %.4g %.4g"\n' % location)
+ fw(ident_step + '/>\n')
+
+ def writeDirectionalLight(ident, obj, matrix, lamp, world):
+ # note, lamp_id is not re-used
+ lamp_id = unique_name(obj, 'LA_' + obj.name, uuid_cache_lamp, clean_func=quoteattr)
+
+ if world:
+ ambi = world.ambient_color
+ # ambi = world.amb
+ amb_intensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3.0) / 2.5
+ else:
+ ambi = 0
+ amb_intensity = 0.0
+
+ intensity = min(lamp.energy / 1.75, 1.0)
+
+ orientation = matrix_direction_neg_z(matrix)
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<DirectionalLight ' % ident)))
+ fw('DEF=%s\n' % lamp_id)
+ fw(ident_step + 'ambientIntensity="%.4g"\n' % amb_intensity)
+ fw(ident_step + 'color="%.4g %.4g %.4g"\n' % clamp_color(lamp.color))
+ fw(ident_step + 'intensity="%.4g"\n' % intensity)
+ fw(ident_step + 'direction="%.4g %.4g %.4g"\n' % orientation)
+ fw(ident_step + '/>\n')
+
+ def writePointLight(ident, obj, matrix, lamp, world):
+ # note, lamp_id is not re-used
+ lamp_id = unique_name(obj, 'LA_' + obj.name, uuid_cache_lamp, clean_func=quoteattr)
+
+ if world:
+ ambi = world.ambient_color
+ # ambi = world.amb
+ amb_intensity = ((float(ambi[0] + ambi[1] + ambi[2])) / 3.0) / 2.5
+ else:
+ ambi = 0.0
+ amb_intensity = 0.0
+
+ intensity = min(lamp.energy / 1.75, 1.0)
+ location = matrix.to_translation()[:]
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<PointLight ' % ident)))
+ fw('DEF=%s\n' % lamp_id)
+ fw(ident_step + 'ambientIntensity="%.4g"\n' % amb_intensity)
+ fw(ident_step + 'color="%.4g %.4g %.4g"\n' % clamp_color(lamp.color))
+
+ fw(ident_step + 'intensity="%.4g"\n' % intensity)
+ fw(ident_step + 'radius="%.4g" \n' % lamp.distance)
+ fw(ident_step + 'location="%.4g %.4g %.4g"\n' % location)
+ fw(ident_step + '/>\n')
+
+ def writeIndexedFaceSet(ident, obj, mesh, matrix, world):
+ obj_id = unique_name(obj, 'OB_' + obj.name, uuid_cache_object, clean_func=quoteattr)
+ mesh_id = unique_name(mesh, 'ME_' + mesh.name, uuid_cache_mesh, clean_func=quoteattr)
+ mesh_id_group = prefix_quoted_str(mesh_id, 'group_')
+ mesh_id_coords = prefix_quoted_str(mesh_id, 'coords_')
+ mesh_id_normals = prefix_quoted_str(mesh_id, 'normals_')
+
+ if not mesh.faces:
+ return
+
+ texface_use_halo = 0
+ texface_use_billboard = 0
+ texface_use_collision = 0
+
+ use_halonode = False
+ use_billnode = False
+ use_collnode = False
+
+ if mesh.uv_textures.active: # if mesh.faceUV:
+ for face in mesh.uv_textures.active.data: # for face in mesh.faces:
+ texface_use_halo |= face.use_halo
+ texface_use_billboard |= face.use_billboard
+ texface_use_collision |= face.use_collision
+ # texface_use_object_color |= face.use_object_color
+
+ if texface_use_halo:
+ fw('%s<Billboard axisOfRotation="0 0 0">\n' % ident)
+ use_halonode = True
+ ident += '\t'
+ elif texface_use_billboard:
+ fw('%s<Billboard axisOfRotation="0 1 0">\n' % ident)
+ use_billnode = True
+ ident += '\t'
+ elif texface_use_collision:
+ fw('%s<Collision enabled="false">\n' % ident)
+ use_collnode = True
+ ident += '\t'
+
+ del texface_use_halo
+ del texface_use_billboard
+ del texface_use_collision
+ # del texface_use_object_color
+
+ ident = writeTransform_begin(ident, matrix, None)
+
+ if mesh.tag:
+ fw('%s<Group USE=%s />\n' % (ident, mesh_id_group))
+ else:
+ mesh.tag = True
+
+ fw('%s<Group DEF=%s>\n' % (ident, mesh_id_group))
+ ident += '\t'
+
+ is_uv = bool(mesh.uv_textures.active)
+ # is_col, defined for each material
+
+ is_coords_written = False
+
+ mesh_materials = mesh.materials[:]
+ if not mesh_materials:
+ mesh_materials = [None]
+
+ mesh_material_tex = [None] * len(mesh_materials)
+ mesh_material_mtex = [None] * len(mesh_materials)
+ mesh_material_images = [None] * len(mesh_materials)
+
+ for i, material in enumerate(mesh_materials):
+ if material:
+ for mtex in material.texture_slots:
+ if mtex:
+ tex = mtex.texture
+ if tex and tex.type == 'IMAGE':
+ image = tex.image
+ if image:
+ mesh_material_tex[i] = tex
+ mesh_material_mtex[i] = mtex
+ mesh_material_images[i] = image
+ break
+
+ mesh_materials_use_face_texture = [getattr(material, 'use_face_texture', True) for material in mesh_materials]
+
+ # fast access!
+ mesh_vertices = mesh.vertices[:]
+ mesh_faces = mesh.faces[:]
+ mesh_faces_materials = [f.material_index for f in mesh_faces]
+ mesh_faces_vertices = [f.vertices[:] for f in mesh_faces]
+
+ if is_uv and True in mesh_materials_use_face_texture:
+ mesh_faces_image = [(fuv.image if (mesh_materials_use_face_texture[mesh_faces_materials[i]] and fuv.use_image) else mesh_material_images[mesh_faces_materials[i]]) for i, fuv in enumerate(mesh.uv_textures.active.data)]
+ mesh_faces_image_unique = set(mesh_faces_image)
+ elif len(set(mesh_material_images) | {None}) > 1: # make sure there is at least one image
+ mesh_faces_image = [mesh_material_images[material_index] for material_index in mesh_faces_materials]
+ mesh_faces_image_unique = set(mesh_faces_image)
+ else:
+ mesh_faces_image = [None] * len(mesh_faces)
+ mesh_faces_image_unique = {None}
+
+ # group faces
+ face_groups = {}
+ for material_index in range(len(mesh_materials)):
+ for image in mesh_faces_image_unique:
+ face_groups[material_index, image] = []
+ del mesh_faces_image_unique
+
+ for i, (material_index, image) in enumerate(zip(mesh_faces_materials, mesh_faces_image)):
+ face_groups[material_index, image].append(i)
+
+ # same as face_groups.items() but sorted so we can get predictable output.
+ face_groups_items = list(face_groups.items())
+ face_groups_items.sort(key=lambda m: (m[0][0], getattr(m[0][1], 'name', '')))
+
+ for (material_index, image), face_group in face_groups_items: # face_groups.items()
+ if face_group:
+ material = mesh_materials[material_index]
+
+ fw('%s<Shape>\n' % ident)
+ ident += '\t'
+
+ is_smooth = False
+ is_col = (mesh.vertex_colors.active and (material is None or material.use_vertex_color_paint))
+
+ # kludge but as good as it gets!
+ for i in face_group:
+ if mesh_faces[i].use_smooth:
+ is_smooth = True
+ break
+
+ # UV's and VCols split verts off which effects smoothing
+ # force writing normals in this case.
+ is_force_normals = use_triangulate and is_smooth and (is_uv or is_col)
+
+ if use_h3d:
+ gpu_shader = gpu_shader_cache.get(material) # material can be 'None', uses dummy cache
+ if gpu_shader is None:
+ gpu_shader = gpu_shader_cache[material] = gpu.export_shader(scene, material)
+
+ if 1: # XXX DEBUG
+ gpu_shader_tmp = gpu.export_shader(scene, material)
+ import pprint
+ print('\nWRITING MATERIAL:', material.name)
+ del gpu_shader_tmp['fragment']
+ del gpu_shader_tmp['vertex']
+ pprint.pprint(gpu_shader_tmp, width=120)
+ #pprint.pprint(val['vertex'])
+ del gpu_shader_tmp
+
+ fw('%s<Appearance>\n' % ident)
+ ident += '\t'
+
+ if image and not use_h3d:
+ writeImageTexture(ident, image)
+
+ if mesh_materials_use_face_texture[material_index]:
+ if image.use_tiles:
+ fw('%s<TextureTransform scale="%s %s" />\n' % (ident, image.tiles_x, image.tiles_y))
+ else:
+ # transform by mtex
+ loc = mesh_material_mtex[material_index].offset[:2]
+
+ # mtex_scale * tex_repeat
+ sca_x, sca_y = mesh_material_mtex[material_index].scale[:2]
+
+ sca_x *= mesh_material_tex[material_index].repeat_x
+ sca_y *= mesh_material_tex[material_index].repeat_y
+
+ # flip x/y is a sampling feature, convert to transform
+ if mesh_material_tex[material_index].use_flip_axis:
+ rot = math.pi / -2.0
+ sca_x, sca_y = sca_y, -sca_x
+ else:
+ rot = 0.0
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<TextureTransform ' % ident)))
+ # fw('center="%.6g %.6g" ' % (0.0, 0.0))
+ fw(ident_step + 'translation="%.6g %.6g"\n' % loc)
+ fw(ident_step + 'scale="%.6g %.6g"\n' % (sca_x, sca_y))
+ fw(ident_step + 'rotation="%.6g"\n' % rot)
+ fw(ident_step + '/>\n')
+
+ if use_h3d:
+ mat_tmp = material if material else gpu_shader_dummy_mat
+ writeMaterialH3D(ident, mat_tmp, world,
+ obj, gpu_shader)
+ del mat_tmp
+ else:
+ if material:
+ writeMaterial(ident, material, world)
+
+ ident = ident[:-1]
+ fw('%s</Appearance>\n' % ident)
+
+ mesh_faces_col = mesh.vertex_colors.active.data if is_col else None
+ mesh_faces_uv = mesh.uv_textures.active.data if is_uv else None
+
+ #-- IndexedFaceSet or IndexedLineSet
+ if use_triangulate:
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<IndexedTriangleSet ' % ident)))
+
+ # --- Write IndexedTriangleSet Attributes (same as IndexedFaceSet)
+ fw('solid="%s"\n' % ('true' if mesh.show_double_sided else 'false'))
+
+ # creaseAngle unsupported for IndexedTriangleSet's
+
+ if use_normals or is_force_normals:
+ # currently not optional, could be made so:
+ fw(ident_step + 'normalPerVertex="true"\n')
+
+ slot_uv = None
+ slot_col = None
+
+ if is_uv and is_col:
+ slot_uv = 0
+ slot_col = 1
+
+ def vertex_key(fidx, f_cnr_idx):
+ return (
+ mesh_faces_uv[fidx].uv[f_cnr_idx][:],
+ getattr(mesh_faces_col[fidx], "color%d" % (f_cnr_idx + 1))[:],
+ )
+ elif is_uv:
+ slot_uv = 0
+
+ def vertex_key(fidx, f_cnr_idx):
+ return (
+ mesh_faces_uv[fidx].uv[f_cnr_idx][:],
+ )
+ elif is_col:
+ slot_col = 0
+
+ def vertex_key(fidx, f_cnr_idx):
+ return (
+ getattr(mesh_faces_col[fidx], "color%d" % (f_cnr_idx + 1))[:],
+ )
+ else:
+ # ack, not especially efficient in this case
+ def vertex_key(fidx, f_cnr_idx):
+ return None
+
+ # build a mesh mapping dict
+ vertex_hash = [{} for i in range(len(mesh.vertices))]
+ # worst case every face is a quad
+ face_tri_list = [[None, None, None] for i in range(len(mesh.faces) * 2)]
+ vert_tri_list = []
+ totvert = 0
+ totface = 0
+ temp_face = [None] * 4
+ for i in face_group:
+ fv = mesh_faces_vertices[i]
+ for j, v_idx in enumerate(fv):
+ key = vertex_key(i, j)
+ vh = vertex_hash[v_idx]
+ x3d_v = vh.get(key)
+ if x3d_v is None:
+ x3d_v = key, v_idx, totvert
+ vh[key] = x3d_v
+ # key / original_vertex / new_vertex
+ vert_tri_list.append(x3d_v)
+ totvert += 1
+ temp_face[j] = x3d_v
+
+ if len(fv) == 4:
+ f_iter = ((0, 1, 2), (0, 2, 3))
+ else:
+ f_iter = ((0, 1, 2), )
+
+ for f_it in f_iter:
+ # loop over a quad as 2 tris
+ f_tri = face_tri_list[totface]
+ for ji, j in enumerate(f_it):
+ f_tri[ji] = temp_face[j]
+ # quads run this twice
+ totface += 1
+
+ # clear unused faces
+ face_tri_list[totface:] = []
+
+ fw(ident_step + 'index="')
+ for x3d_f in face_tri_list:
+ fw('%i %i %i ' % (x3d_f[0][2], x3d_f[1][2], x3d_f[2][2]))
+ fw('"\n')
+
+ # close IndexedTriangleSet
+ fw(ident_step + '>\n')
+ ident += '\t'
+
+ fw('%s<Coordinate ' % ident)
+ fw('point="')
+ for x3d_v in vert_tri_list:
+ fw('%.6g %.6g %.6g ' % mesh_vertices[x3d_v[1]].co[:])
+ fw('" />\n')
+
+ if use_normals or is_force_normals:
+ fw('%s<Normal ' % ident)
+ fw('vector="')
+ for x3d_v in vert_tri_list:
+ fw('%.6g %.6g %.6g ' % mesh_vertices[x3d_v[1]].normal[:])
+ fw('" />\n')
+
+ if is_uv:
+ fw('%s<TextureCoordinate point="' % ident)
+ for x3d_v in vert_tri_list:
+ fw('%.4g %.4g ' % x3d_v[0][slot_uv])
+ fw('" />\n')
+
+ if is_col:
+ fw('%s<Color color="' % ident)
+ for x3d_v in vert_tri_list:
+ fw('%.3g %.3g %.3g ' % x3d_v[0][slot_col])
+ fw('" />\n')
+
+ if use_h3d:
+ # write attributes
+ for gpu_attr in gpu_shader['attributes']:
+
+ # UVs
+ if gpu_attr['type'] == gpu.CD_MTFACE:
+ if gpu_attr['datatype'] == gpu.GPU_DATA_2F:
+ fw('%s<FloatVertexAttribute ' % ident)
+ fw('name="%s" ' % gpu_attr['varname'])
+ fw('numComponents="2" ')
+ fw('value="')
+ for x3d_v in vert_tri_list:
+ fw('%.4g %.4g ' % x3d_v[0][slot_uv])
+ fw('" />\n')
+ else:
+ assert(0)
+
+ elif gpu_attr['type'] == gpu.CD_MCOL:
+ if gpu_attr['datatype'] == gpu.GPU_DATA_4UB:
+ pass # XXX, H3D can't do
+ else:
+ assert(0)
+
+ ident = ident[:-1]
+
+ fw('%s</IndexedTriangleSet>\n' % ident)
+
+ else:
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<IndexedFaceSet ' % ident)))
+
+ # --- Write IndexedFaceSet Attributes (same as IndexedTriangleSet)
+ fw('solid="%s"\n' % ('true' if mesh.show_double_sided else 'false'))
+ if is_smooth:
+ fw(ident_step + 'creaseAngle="%.4g"\n' % mesh.auto_smooth_angle)
+
+ if use_normals:
+ # currently not optional, could be made so:
+ fw(ident_step + 'normalPerVertex="true"\n')
+
+ # IndexedTriangleSet assumes true
+ if is_col:
+ fw(ident_step + 'colorPerVertex="false"\n')
+
+ # for IndexedTriangleSet we use a uv per vertex so this isnt needed.
+ if is_uv:
+ fw(ident_step + 'texCoordIndex="')
+
+ j = 0
+ for i in face_group:
+ if len(mesh_faces_vertices[i]) == 4:
+ fw('%d %d %d %d -1 ' % (j, j + 1, j + 2, j + 3))
+ j += 4
+ else:
+ fw('%d %d %d -1 ' % (j, j + 1, j + 2))
+ j += 3
+ fw('"\n')
+ # --- end texCoordIndex
+
+ if True:
+ fw(ident_step + 'coordIndex="')
+ for i in face_group:
+ fv = mesh_faces_vertices[i]
+ if len(fv) == 3:
+ fw('%i %i %i -1 ' % fv)
+ else:
+ fw('%i %i %i %i -1 ' % fv)
+
+ fw('"\n')
+ # --- end coordIndex
+
+ # close IndexedFaceSet
+ fw(ident_step + '>\n')
+ ident += '\t'
+
+ # --- Write IndexedFaceSet Elements
+ if True:
+ if is_coords_written:
+ fw('%s<Coordinate USE=%s />\n' % (ident, mesh_id_coords))
+ if use_normals:
+ fw('%s<Normal USE=%s />\n' % (ident, mesh_id_normals))
+ else:
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Coordinate ' % ident)))
+ fw('DEF=%s\n' % mesh_id_coords)
+ fw(ident_step + 'point="')
+ for v in mesh.vertices:
+ fw('%.6g %.6g %.6g ' % v.co[:])
+ fw('"\n')
+ fw(ident_step + '/>\n')
+
+ is_coords_written = True
+
+ if use_normals:
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Normal ' % ident)))
+ fw('DEF=%s\n' % mesh_id_normals)
+ fw(ident_step + 'vector="')
+ for v in mesh.vertices:
+ fw('%.6g %.6g %.6g ' % v.normal[:])
+ fw('"\n')
+ fw(ident_step + '/>\n')
+
+ if is_uv:
+ fw('%s<TextureCoordinate point="' % ident)
+ for i in face_group:
+ for uv in mesh_faces_uv[i].uv:
+ fw('%.4g %.4g ' % uv[:])
+ del mesh_faces_uv
+ fw('" />\n')
+
+ if is_col:
+ fw('%s<Color color="' % ident)
+ # XXX, 1 color per face, only
+ for i in face_group:
+ fw('%.3g %.3g %.3g ' % mesh_faces_col[i].color1[:])
+ fw('" />\n')
+
+ #--- output vertexColors
+
+ #--- output closing braces
+ ident = ident[:-1]
+
+ fw('%s</IndexedFaceSet>\n' % ident)
+
+ ident = ident[:-1]
+ fw('%s</Shape>\n' % ident)
+
+ # XXX
+
+ #fw('%s<PythonScript DEF="PS" url="object.py" >\n' % ident)
+ #fw('%s <ShaderProgram USE="MA_Material.005" containerField="references"/>\n' % ident)
+ #fw('%s</PythonScript>\n' % ident)
+
+ ident = ident[:-1]
+ fw('%s</Group>\n' % ident)
+
+ ident = writeTransform_end(ident)
+
+ if use_halonode:
+ ident = ident[:-1]
+ fw('%s</Billboard>\n' % ident)
+ elif use_billnode:
+ ident = ident[:-1]
+ fw('%s</Billboard>\n' % ident)
+ elif use_collnode:
+ ident = ident[:-1]
+ fw('%s</Collision>\n' % ident)
+
+ def writeMaterial(ident, material, world):
+ material_id = unique_name(material, 'MA_' + material.name, uuid_cache_material, clean_func=quoteattr)
+
+ # look up material name, use it if available
+ if material.tag:
+ fw('%s<Material USE=%s />\n' % (ident, material_id))
+ else:
+ material.tag = True
+
+ emit = material.emit
+ ambient = material.ambient / 3.0
+ diffuseColor = material.diffuse_color[:]
+ if world:
+ ambiColor = ((material.ambient * 2.0) * world.ambient_color)[:]
+ else:
+ ambiColor = 0.0, 0.0, 0.0
+
+ emitColor = tuple(((c * emit) + ambiColor[i]) / 2.0 for i, c in enumerate(diffuseColor))
+ shininess = material.specular_hardness / 512.0
+ specColor = tuple((c + 0.001) / (1.25 / (material.specular_intensity + 0.001)) for c in material.specular_color)
+ transp = 1.0 - material.alpha
+
+ if material.use_shadeless:
+ ambient = 1.0
+ shininess = 0.0
+ specColor = emitColor = diffuseColor
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Material ' % ident)))
+ fw('DEF=%s\n' % material_id)
+ fw(ident_step + 'diffuseColor="%.3g %.3g %.3g"\n' % clamp_color(diffuseColor))
+ fw(ident_step + 'specularColor="%.3g %.3g %.3g"\n' % clamp_color(specColor))
+ fw(ident_step + 'emissiveColor="%.3g %.3g %.3g"\n' % clamp_color(emitColor))
+ fw(ident_step + 'ambientIntensity="%.3g"\n' % ambient)
+ fw(ident_step + 'shininess="%.3g"\n' % shininess)
+ fw(ident_step + 'transparency="%s"\n' % transp)
+ fw(ident_step + '/>\n')
+
+ def writeMaterialH3D(ident, material, world,
+ obj, gpu_shader):
+ material_id = unique_name(material, 'MA_' + material.name, uuid_cache_material, clean_func=quoteattr)
+
+ fw('%s<Material />\n' % ident)
+ if material.tag:
+ fw('%s<ComposedShader USE=%s />\n' % (ident, material_id))
+ else:
+ material.tag = True
+
+ # GPU_material_bind_uniforms
+ # GPU_begin_object_materials
+
+ #~ CD_MCOL 6
+ #~ CD_MTFACE 5
+ #~ CD_ORCO 14
+ #~ CD_TANGENT 18
+ #~ GPU_DATA_16F 7
+ #~ GPU_DATA_1F 2
+ #~ GPU_DATA_1I 1
+ #~ GPU_DATA_2F 3
+ #~ GPU_DATA_3F 4
+ #~ GPU_DATA_4F 5
+ #~ GPU_DATA_4UB 8
+ #~ GPU_DATA_9F 6
+ #~ GPU_DYNAMIC_LAMP_DYNCO 7
+ #~ GPU_DYNAMIC_LAMP_DYNCOL 11
+ #~ GPU_DYNAMIC_LAMP_DYNENERGY 10
+ #~ GPU_DYNAMIC_LAMP_DYNIMAT 8
+ #~ GPU_DYNAMIC_LAMP_DYNPERSMAT 9
+ #~ GPU_DYNAMIC_LAMP_DYNVEC 6
+ #~ GPU_DYNAMIC_OBJECT_COLOR 5
+ #~ GPU_DYNAMIC_OBJECT_IMAT 4
+ #~ GPU_DYNAMIC_OBJECT_MAT 2
+ #~ GPU_DYNAMIC_OBJECT_VIEWIMAT 3
+ #~ GPU_DYNAMIC_OBJECT_VIEWMAT 1
+ #~ GPU_DYNAMIC_SAMPLER_2DBUFFER 12
+ #~ GPU_DYNAMIC_SAMPLER_2DIMAGE 13
+ #~ GPU_DYNAMIC_SAMPLER_2DSHADOW 14
+
+ '''
+ inline const char* typeToString( X3DType t ) {
+ switch( t ) {
+ case SFFLOAT: return "SFFloat";
+ case MFFLOAT: return "MFFloat";
+ case SFDOUBLE: return "SFDouble";
+ case MFDOUBLE: return "MFDouble";
+ case SFTIME: return "SFTime";
+ case MFTIME: return "MFTime";
+ case SFINT32: return "SFInt32";
+ case MFINT32: return "MFInt32";
+ case SFVEC2F: return "SFVec2f";
+ case MFVEC2F: return "MFVec2f";
+ case SFVEC2D: return "SFVec2d";
+ case MFVEC2D: return "MFVec2d";
+ case SFVEC3F: return "SFVec3f";
+ case MFVEC3F: return "MFVec3f";
+ case SFVEC3D: return "SFVec3d";
+ case MFVEC3D: return "MFVec3d";
+ case SFVEC4F: return "SFVec4f";
+ case MFVEC4F: return "MFVec4f";
+ case SFVEC4D: return "SFVec4d";
+ case MFVEC4D: return "MFVec4d";
+ case SFBOOL: return "SFBool";
+ case MFBOOL: return "MFBool";
+ case SFSTRING: return "SFString";
+ case MFSTRING: return "MFString";
+ case SFNODE: return "SFNode";
+ case MFNODE: return "MFNode";
+ case SFCOLOR: return "SFColor";
+ case MFCOLOR: return "MFColor";
+ case SFCOLORRGBA: return "SFColorRGBA";
+ case MFCOLORRGBA: return "MFColorRGBA";
+ case SFROTATION: return "SFRotation";
+ case MFROTATION: return "MFRotation";
+ case SFQUATERNION: return "SFQuaternion";
+ case MFQUATERNION: return "MFQuaternion";
+ case SFMATRIX3F: return "SFMatrix3f";
+ case MFMATRIX3F: return "MFMatrix3f";
+ case SFMATRIX4F: return "SFMatrix4f";
+ case MFMATRIX4F: return "MFMatrix4f";
+ case SFMATRIX3D: return "SFMatrix3d";
+ case MFMATRIX3D: return "MFMatrix3d";
+ case SFMATRIX4D: return "SFMatrix4d";
+ case MFMATRIX4D: return "MFMatrix4d";
+ case UNKNOWN_X3D_TYPE:
+ default:return "UNKNOWN_X3D_TYPE";
+ '''
+ import gpu
+
+ fw('%s<ComposedShader DEF=%s language="GLSL" >\n' % (ident, material_id))
+ ident += '\t'
+
+ shader_url_frag = 'shaders/%s_%s.frag' % (filename_strip, material_id[1:-1])
+ shader_url_vert = 'shaders/%s_%s.vert' % (filename_strip, material_id[1:-1])
+
+ # write files
+ shader_dir = os.path.join(base_dst, 'shaders')
+ if not os.path.isdir(shader_dir):
+ os.mkdir(shader_dir)
+
+ for uniform in gpu_shader['uniforms']:
+ if uniform['type'] == gpu.GPU_DYNAMIC_SAMPLER_2DIMAGE:
+ fw('%s<field name="%s" type="SFNode" accessType="inputOutput">\n' % (ident, uniform['varname']))
+ writeImageTexture(ident + '\t', bpy.data.images[uniform['image']])
+ fw('%s</field>\n' % ident)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_LAMP_DYNCO:
+ if uniform['datatype'] == gpu.GPU_DATA_3F: # should always be true!
+ value = '%.6g %.6g %.6g' % (global_matrix * bpy.data.objects[uniform['lamp']].matrix_world).to_translation()[:]
+ fw('%s<field name="%s" type="SFVec3f" accessType="inputOutput" value="%s" />\n' % (ident, uniform['varname'], value))
+ else:
+ assert(0)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_LAMP_DYNCOL:
+ # odd we have both 3, 4 types.
+ lamp = bpy.data.objects[uniform['lamp']].data
+ value = '%.6g %.6g %.6g' % (lamp.color * lamp.energy)[:]
+ if uniform['datatype'] == gpu.GPU_DATA_3F:
+ fw('%s<field name="%s" type="SFVec3f" accessType="inputOutput" value="%s" />\n' % (ident, uniform['varname'], value))
+ elif uniform['datatype'] == gpu.GPU_DATA_4F:
+ fw('%s<field name="%s" type="SFVec4f" accessType="inputOutput" value="%s 1.0" />\n' % (ident, uniform['varname'], value))
+ else:
+ assert(0)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_LAMP_DYNENERGY:
+ # not used ?
+ assert(0)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_LAMP_DYNVEC:
+ if uniform['datatype'] == gpu.GPU_DATA_3F:
+ value = '%.6g %.6g %.6g' % (mathutils.Vector((0.0, 0.0, 1.0)) * (global_matrix * bpy.data.objects[uniform['lamp']].matrix_world).to_quaternion()).normalized()[:]
+ fw('%s<field name="%s" type="SFVec3f" accessType="inputOutput" value="%s" />\n' % (ident, uniform['varname'], value))
+ else:
+ assert(0)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_OBJECT_VIEWIMAT:
+ if uniform['datatype'] == gpu.GPU_DATA_16F:
+ # must be updated dynamically
+ # TODO, write out 'viewpointMatrices.py'
+ value = ' '.join(['%.6f' % f for v in mathutils.Matrix() for f in v])
+ fw('%s<field name="%s" type="SFMatrix4f" accessType="inputOutput" value="%s" />\n' % (ident, uniform['varname'], value))
+ else:
+ assert(0)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_OBJECT_IMAT:
+ if uniform['datatype'] == gpu.GPU_DATA_16F:
+ value = ' '.join(['%.6f' % f for v in (global_matrix * obj.matrix_world).inverted() for f in v])
+ fw('%s<field name="%s" type="SFMatrix4f" accessType="inputOutput" value="%s" />\n' % (ident, uniform['varname'], value))
+ else:
+ assert(0)
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_SAMPLER_2DSHADOW:
+ pass # XXX, shadow buffers not supported.
+
+ elif uniform['type'] == gpu.GPU_DYNAMIC_SAMPLER_2DBUFFER:
+ if uniform['datatype'] == gpu.GPU_DATA_1I:
+ if 1:
+ tex = uniform['texpixels']
+ value = []
+ for i in range(0, len(tex) - 1, 4):
+ col = tex[i:i + 4]
+ value.append('0x%.2x%.2x%.2x%.2x' % (col[0], col[1], col[2], col[3]))
+
+ fw('%s<field name="%s" type="SFNode" accessType="inputOutput">\n' % (ident, uniform['varname']))
+
+ ident += '\t'
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<PixelTexture \n' % ident)))
+ fw(ident_step + 'repeatS="false"\n')
+ fw(ident_step + 'repeatT="false"\n')
+
+ fw(ident_step + 'image="%s 1 4 %s"\n' % (len(value), " ".join(value)))
+
+ fw(ident_step + '/>\n')
+
+ ident = ident[:-1]
+
+ fw('%s</field>\n' % ident)
+
+ #for i in range(0, 10, 4)
+ #value = ' '.join(['%d' % f for f in uniform['texpixels']])
+ # value = ' '.join(['%.6g' % (f / 256) for f in uniform['texpixels']])
+
+ #fw('%s<field name="%s" type="SFInt32" accessType="inputOutput" value="%s" />\n' % (ident, uniform['varname'], value))
+ #print('test', len(uniform['texpixels']))
+ else:
+ assert(0)
+ else:
+ print("SKIPPING", uniform['type'])
+
+ file_frag = open(os.path.join(base_dst, shader_url_frag), 'w')
+ file_frag.write(gpu_shader['fragment'])
+ file_frag.close()
+ # patch it
+ h3d_shader_glsl_frag_patch(os.path.join(base_dst, shader_url_frag))
+
+ file_vert = open(os.path.join(base_dst, shader_url_vert), 'w')
+ file_vert.write(gpu_shader['vertex'])
+ file_vert.close()
+
+ fw('%s<ShaderPart type="FRAGMENT" url="%s" />\n' % (ident, shader_url_frag))
+ fw('%s<ShaderPart type="VERTEX" url="%s" />\n' % (ident, shader_url_vert))
+ ident = ident[:-1]
+
+ fw('%s</ComposedShader>\n' % ident)
+
+ def writeImageTexture(ident, image):
+ image_id = unique_name(image, 'IM_' + image.name, uuid_cache_image, clean_func=quoteattr)
+
+ if image.tag:
+ fw('%s<ImageTexture USE=%s />\n' % (ident, image_id))
+ else:
+ image.tag = True
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<ImageTexture ' % ident)))
+ fw('DEF=%s\n' % image_id)
+
+ # collect image paths, can load multiple
+ # [relative, name-only, absolute]
+ filepath = image.filepath
+ filepath_full = bpy.path.abspath(filepath)
+ filepath_ref = bpy_extras.io_utils.path_reference(filepath_full, base_src, base_dst, path_mode, "textures", copy_set)
+ filepath_base = os.path.basename(filepath_ref)
+
+ images = [
+ filepath_base,
+ filepath_ref,
+ filepath_full,
+ ]
+
+ images = [f.replace('\\', '/') for f in images]
+ images = [f for i, f in enumerate(images) if f not in images[:i]]
+
+ fw(ident_step + "url='%s' " % ' '.join(['"%s"' % f for f in images]))
+ fw(ident_step + '/>\n')
+
+ def writeBackground(ident, world):
+
+ if world is None:
+ return
+
+ # note, not re-used
+ world_id = unique_name(world, 'WO_' + world.name, uuid_cache_world, clean_func=quoteattr)
+
+ blending = world.use_sky_blend, world.use_sky_paper, world.use_sky_real
+
+ grd_triple = clamp_color(world.horizon_color)
+ sky_triple = clamp_color(world.zenith_color)
+ mix_triple = clamp_color((grd_triple[i] + sky_triple[i]) / 2.0 for i in range(3))
+
+ ident_step = ident + (' ' * (-len(ident) + \
+ fw('%s<Background ' % ident)))
+ fw('DEF=%s\n' % world_id)
+ # No Skytype - just Hor color
+ if blending == (False, False, False):
+ fw(ident_step + 'groundColor="%.3g %.3g %.3g"\n' % grd_triple)
+ fw(ident_step + 'skyColor="%.3g %.3g %.3g"\n' % grd_triple)
+ # Blend Gradient
+ elif blending == (True, False, False):
+ fw(ident_step + 'groundColor="%.3g %.3g %.3g, %.3g %.3g %.3g"\n' % (grd_triple + mix_triple))
+ fw(ident_step + 'groundAngle="1.57, 1.57"\n')
+ fw(ident_step + 'skyColor="%.3g %.3g %.3g, %.3g %.3g %.3g"\n' % (sky_triple + mix_triple))
+ fw(ident_step + 'skyAngle="1.57, 1.57"\n')
+ # Blend+Real Gradient Inverse
+ elif blending == (True, False, True):
+ fw(ident_step + 'groundColor="%.3g %.3g %.3g, %.3g %.3g %.3g"\n' % (sky_triple + grd_triple))
+ fw(ident_step + 'groundAngle="1.57"\n')
+ fw(ident_step + 'skyColor="%.3g %.3g %.3g, %.3g %.3g %.3g, %.3g %.3g %.3g"\n' % (sky_triple + grd_triple + sky_triple))
+ fw(ident_step + 'skyAngle="1.57, 3.14159"\n')
+ # Paper - just Zen Color
+ elif blending == (False, False, True):
+ fw(ident_step + 'groundColor="%.3g %.3g %.3g"\n' % sky_triple)
+ fw(ident_step + 'skyColor="%.3g %.3g %.3g"\n' % sky_triple)
+ # Blend+Real+Paper - komplex gradient
+ elif blending == (True, True, True):
+ fw(ident_step + 'groundColor="%.3g %.3g %.3g, %.3g %.3g %.3g"\n' % (sky_triple + grd_triple))
+ fw(ident_step + 'groundAngle="1.57, 1.57"\n')
+ fw(ident_step + 'skyColor="%.3g %.3g %.3g, %.3g %.3g %.3g"\n' % (sky_triple + grd_triple))
+ fw(ident_step + 'skyAngle="1.57, 1.57"\n')
+ # Any Other two colors
+ else:
+ fw(ident_step + 'groundColor="%.3g %.3g %.3g"\n' % grd_triple)
+ fw(ident_step + 'skyColor="%.3g %.3g %.3g"\n' % sky_triple)
+
+ for tex in bpy.data.textures:
+ if tex.type == 'IMAGE' and tex.image:
+ namemat = tex.name
+ pic = tex.image
+ basename = os.path.basename(bpy.path.abspath(pic.filepath))
+
+ if namemat == 'back':
+ fw(ident_step + 'backUrl="%s"\n' % basename)
+ elif namemat == 'bottom':
+ fw(ident_step + 'bottomUrl="%s"\n' % basename)
+ elif namemat == 'front':
+ fw(ident_step + 'frontUrl="%s"\n' % basename)
+ elif namemat == 'left':
+ fw(ident_step + 'leftUrl="%s"\n' % basename)
+ elif namemat == 'right':
+ fw(ident_step + 'rightUrl="%s"\n' % basename)
+ elif namemat == 'top':
+ fw(ident_step + 'topUrl="%s"\n' % basename)
+
+ fw(ident_step + '/>\n')
+
+ # -------------------------------------------------------------------------
+ # Export Object Hierarchy (recursively called)
+ # -------------------------------------------------------------------------
+ def export_object(ident, obj_main_parent, obj_main, obj_children):
+ world = scene.world
+ free, derived = create_derived_objects(scene, obj_main)
+
+ if derived is None:
+ return
+
+ if use_hierarchy:
+ obj_main_matrix_world = obj_main.matrix_world
+ if obj_main_parent:
+ obj_main_matrix = obj_main_parent.matrix_world.inverted() * obj_main_matrix_world
+ else:
+ obj_main_matrix = obj_main_matrix_world
+ obj_main_matrix_world_invert = obj_main_matrix_world.inverted()
+
+ obj_main_id = unique_name(obj_main, obj_main.name, uuid_cache_object, clean_func=quoteattr)
+
+ ident = writeTransform_begin(ident, obj_main_matrix if obj_main_parent else global_matrix * obj_main_matrix, obj_main_id)
+
+ for obj, obj_matrix in derived:
+ obj_type = obj.type
+
+ if use_hierarchy:
+ # make transform node relative
+ obj_matrix = obj_main_matrix_world_invert * obj_matrix
+
+ if obj_type == 'CAMERA':
+ writeViewpoint(ident, obj, obj_matrix, scene)
+ elif obj_type in ('MESH', 'CURVE', 'SURF', 'FONT'):
+ if (obj_type != 'MESH') or (use_apply_modifiers and obj.is_modified(scene, 'PREVIEW')):
+ try:
+ me = obj.to_mesh(scene, use_apply_modifiers, 'PREVIEW')
+ except:
+ me = None
+ else:
+ me = obj.data
+
+ if me is not None:
+ writeIndexedFaceSet(ident, obj, me, obj_matrix, world)
+
+ # free mesh created with create_mesh()
+ if me != obj.data:
+ bpy.data.meshes.remove(me)
+
+ elif obj_type == 'LAMP':
+ data = obj.data
+ datatype = data.type
+ if datatype == 'POINT':
+ writePointLight(ident, obj, obj_matrix, data, world)
+ elif datatype == 'SPOT':
+ writeSpotLight(ident, obj, obj_matrix, data, world)
+ elif datatype == 'SUN':
+ writeDirectionalLight(ident, obj, obj_matrix, data, world)
+ else:
+ writeDirectionalLight(ident, obj, obj_matrix, data, world)
+ else:
+ #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
+ pass
+
+ if free:
+ free_derived_objects(obj_main)
+
+ # ---------------------------------------------------------------------
+ # write out children recursively
+ # ---------------------------------------------------------------------
+ for obj_child, obj_child_children in obj_children:
+ export_object(ident, obj_main, obj_child, obj_child_children)
+
+ if use_hierarchy:
+ ident = writeTransform_end(ident)
+
+ # -------------------------------------------------------------------------
+ # Main Export Function
+ # -------------------------------------------------------------------------
+ def export_main():
+ world = scene.world
+
+ # tag un-exported IDs
+ bpy.data.meshes.tag(False)
+ bpy.data.materials.tag(False)
+ bpy.data.images.tag(False)
+
+ print('Info: starting X3D export to %r...' % file.name)
+ ident = ''
+ ident = writeHeader(ident)
+
+ writeNavigationInfo(ident, scene)
+ writeBackground(ident, world)
+ writeFog(ident, world)
+
+ ident = '\t\t'
+
+ if use_selection:
+ objects = [obj for obj in scene.objects if obj.is_visible(scene) and o.select]
+ else:
+ objects = [obj for obj in scene.objects if obj.is_visible(scene)]
+
+ if use_hierarchy:
+ objects_hierarchy = build_hierarchy(objects)
+ else:
+ objects_hierarchy = ((obj, []) for obj in objects)
+
+ for obj_main, obj_main_children in objects_hierarchy:
+ export_object(ident, None, obj_main, obj_main_children)
+
+ ident = writeFooter(ident)
+
+ export_main()
+
+ # -------------------------------------------------------------------------
+ # global cleanup
+ # -------------------------------------------------------------------------
+ file.close()
+
+ if use_h3d:
+ bpy.data.materials.remove(gpu_shader_dummy_mat)
+
+ # copy all collected files.
+ print(copy_set)
+ bpy_extras.io_utils.path_reference_copy(copy_set)
+
+ print('Info: finished X3D export to %r' % file.name)
+
+
+##########################################################
+# Callbacks, needed before Main
+##########################################################
+
+
+def save(operator, context, filepath="",
+ use_selection=True,
+ use_apply_modifiers=False,
+ use_triangulate=False,
+ use_normals=False,
+ use_compress=False,
+ use_hierarchy=True,
+ use_h3d=False,
+ global_matrix=None,
+ path_mode='AUTO',
+ ):
+
+ bpy.path.ensure_ext(filepath, '.x3dz' if use_compress else '.x3d')
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ file = None
+ if use_compress:
+ try:
+ import gzip
+ file = gzip.open(filepath, 'w')
+ except:
+ print('failed to import compression modules, exporting uncompressed')
+ filepath = filepath[:-1] # remove trailing z
+
+ if file is None:
+ file = open(filepath, 'w')
+
+ if global_matrix is None:
+ global_matrix = mathutils.Matrix()
+
+ export(file,
+ global_matrix,
+ context.scene,
+ use_apply_modifiers=use_apply_modifiers,
+ use_selection=use_selection,
+ use_triangulate=use_triangulate,
+ use_normals=use_normals,
+ use_hierarchy=use_hierarchy,
+ use_h3d=use_h3d,
+ path_mode=path_mode,
+ )
+
+ return {'FINISHED'}
diff --git a/io_scene_x3d/import_x3d.py b/io_scene_x3d/import_x3d.py
new file mode 100644
index 00000000..28c0abac
--- /dev/null
+++ b/io_scene_x3d/import_x3d.py
@@ -0,0 +1,2656 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+DEBUG = False
+
+# This should work without a blender at all
+import os
+
+
+def imageConvertCompat(path):
+
+ if os.sep == '\\':
+ return path # assime win32 has quicktime, dont convert
+
+ if path.lower().endswith('.gif'):
+ path_to = path[:-3] + 'png'
+
+ '''
+ if exists(path_to):
+ return path_to
+ '''
+ # print('\n'+path+'\n'+path_to+'\n')
+ os.system('convert "%s" "%s"' % (path, path_to)) # for now just hope we have image magick
+
+ if os.path.exists(path_to):
+ return path_to
+
+ return path
+
+# notes
+# transform are relative
+# order dosnt matter for loc/size/rot
+# right handed rotation
+# angles are in radians
+# rotation first defines axis then ammount in radians
+
+
+# =============================== VRML Spesific
+
+def vrmlFormat(data):
+ '''
+ Keep this as a valid vrml file, but format in a way we can predict.
+ '''
+ # Strip all commends - # not in strings - warning multiline strings are ignored.
+ def strip_comment(l):
+ #l = ' '.join(l.split())
+ l = l.strip()
+
+ if l.startswith('#'):
+ return ''
+
+ i = l.find('#')
+
+ if i == -1:
+ return l
+
+ # Most cases accounted for! if we have a comment at the end of the line do this...
+ #j = l.find('url "')
+ j = l.find('"')
+
+ if j == -1: # simple no strings
+ return l[:i].strip()
+
+ q = False
+ for i, c in enumerate(l):
+ if c == '"':
+ q = not q # invert
+
+ elif c == '#':
+ if q == False:
+ return l[:i - 1]
+
+ return l
+
+ data = '\n'.join([strip_comment(l) for l in data.split('\n')]) # remove all whitespace
+
+ EXTRACT_STRINGS = True # only needed when strings or filesnames containe ,[]{} chars :/
+
+ if EXTRACT_STRINGS:
+
+ # We need this so we can detect URL's
+ data = '\n'.join([' '.join(l.split()) for l in data.split('\n')]) # remove all whitespace
+
+ string_ls = []
+
+ #search = 'url "'
+ search = '"'
+
+ ok = True
+ last_i = 0
+ while ok:
+ ok = False
+ i = data.find(search, last_i)
+ if i != -1:
+
+ start = i + len(search) # first char after end of search
+ end = data.find('"', start)
+ if end != -1:
+ item = data[start:end]
+ string_ls.append(item)
+ data = data[:start] + data[end:]
+ ok = True # keep looking
+
+ last_i = (end - len(item)) + 1
+ # print(last_i, item, '|' + data[last_i] + '|')
+
+ # done with messy extracting strings part
+
+ # Bad, dont take strings into account
+ '''
+ data = data.replace('#', '\n#')
+ data = '\n'.join([ll for l in data.split('\n') for ll in (l.strip(),) if not ll.startswith('#')]) # remove all whitespace
+ '''
+ data = data.replace('{', '\n{\n')
+ data = data.replace('}', '\n}\n')
+ data = data.replace('[', '\n[\n')
+ data = data.replace(']', '\n]\n')
+ data = data.replace(',', ' , ') # make sure comma's separate
+
+ if EXTRACT_STRINGS:
+ # add strings back in
+
+ search = '"' # fill in these empty strings
+
+ ok = True
+ last_i = 0
+ while ok:
+ ok = False
+ i = data.find(search + '"', last_i)
+ # print(i)
+ if i != -1:
+ start = i + len(search) # first char after end of search
+ item = string_ls.pop(0)
+ # print(item)
+ data = data[:start] + item + data[start:]
+
+ last_i = start + len(item) + 1
+
+ ok = True
+
+ # More annoying obscure cases where USE or DEF are placed on a newline
+ # data = data.replace('\nDEF ', ' DEF ')
+ # data = data.replace('\nUSE ', ' USE ')
+
+ data = '\n'.join([' '.join(l.split()) for l in data.split('\n')]) # remove all whitespace
+
+ # Better to parse the file accounting for multiline arrays
+ '''
+ data = data.replace(',\n', ' , ') # remove line endings with commas
+ data = data.replace(']', '\n]\n') # very very annoying - but some comma's are at the end of the list, must run this again.
+ '''
+
+ return [l for l in data.split('\n') if l]
+
+NODE_NORMAL = 1 # {}
+NODE_ARRAY = 2 # []
+NODE_REFERENCE = 3 # USE foobar
+# NODE_PROTO = 4 #
+
+lines = []
+
+
+def getNodePreText(i, words):
+ # print(lines[i])
+ use_node = False
+ while len(words) < 5:
+
+ if i >= len(lines):
+ break
+ '''
+ elif lines[i].startswith('PROTO'):
+ return NODE_PROTO, i+1
+ '''
+ elif lines[i] == '{':
+ # words.append(lines[i]) # no need
+ # print("OK")
+ return NODE_NORMAL, i + 1
+ elif lines[i].count('"') % 2 != 0: # odd number of quotes? - part of a string.
+ # print('ISSTRING')
+ break
+ else:
+ new_words = lines[i].split()
+ if 'USE' in new_words:
+ use_node = True
+
+ words.extend(new_words)
+ i += 1
+
+ # Check for USE node - no {
+ # USE #id - should always be on the same line.
+ if use_node:
+ # print('LINE', i, words[:words.index('USE')+2])
+ words[:] = words[:words.index('USE') + 2]
+ if lines[i] == '{' and lines[i + 1] == '}':
+ # USE sometimes has {} after it anyway
+ i += 2
+ return NODE_REFERENCE, i
+
+ # print("error value!!!", words)
+ return 0, -1
+
+
+def is_nodeline(i, words):
+
+ if not lines[i][0].isalpha():
+ return 0, 0
+
+ #if lines[i].startswith('field'):
+ # return 0, 0
+
+ # Is this a prototype??
+ if lines[i].startswith('PROTO'):
+ words[:] = lines[i].split()
+ return NODE_NORMAL, i + 1 # TODO - assumes the next line is a '[\n', skip that
+ if lines[i].startswith('EXTERNPROTO'):
+ words[:] = lines[i].split()
+ return NODE_ARRAY, i + 1 # TODO - assumes the next line is a '[\n', skip that
+
+ '''
+ proto_type, new_i = is_protoline(i, words, proto_field_defs)
+ if new_i != -1:
+ return proto_type, new_i
+ '''
+
+ # Simple "var [" type
+ if lines[i + 1] == '[':
+ if lines[i].count('"') % 2 == 0:
+ words[:] = lines[i].split()
+ return NODE_ARRAY, i + 2
+
+ node_type, new_i = getNodePreText(i, words)
+
+ if not node_type:
+ if DEBUG:
+ print("not node_type", lines[i])
+ return 0, 0
+
+ # Ok, we have a { after some values
+ # Check the values are not fields
+ for i, val in enumerate(words):
+ if i != 0 and words[i - 1] in {'DEF', 'USE'}:
+ # ignore anything after DEF, it is a ID and can contain any chars.
+ pass
+ elif val[0].isalpha() and val not in {'TRUE', 'FALSE'}:
+ pass
+ else:
+ # There is a number in one of the values, therefor we are not a node.
+ return 0, 0
+
+ #if node_type==NODE_REFERENCE:
+ # print(words, "REF_!!!!!!!")
+ return node_type, new_i
+
+
+def is_numline(i):
+ '''
+ Does this line start with a number?
+ '''
+
+ # Works but too slow.
+ '''
+ l = lines[i]
+ for w in l.split():
+ if w==',':
+ pass
+ else:
+ try:
+ float(w)
+ return True
+
+ except:
+ return False
+
+ return False
+ '''
+
+ l = lines[i]
+
+ line_start = 0
+
+ if l.startswith(', '):
+ line_start += 2
+
+ line_end = len(l) - 1
+ line_end_new = l.find(' ', line_start) # comma's always have a space before them
+
+ if line_end_new != -1:
+ line_end = line_end_new
+
+ try:
+ float(l[line_start:line_end]) # works for a float or int
+ return True
+ except:
+ return False
+
+
+class vrmlNode(object):
+ __slots__ = ('id',
+ 'fields',
+ 'proto_node',
+ 'proto_field_defs',
+ 'proto_fields',
+ 'node_type',
+ 'parent',
+ 'children',
+ 'parent',
+ 'array_data',
+ 'reference',
+ 'lineno',
+ 'filename',
+ 'blendObject',
+ 'DEF_NAMESPACE',
+ 'ROUTE_IPO_NAMESPACE',
+ 'PROTO_NAMESPACE',
+ 'x3dNode')
+
+ def __init__(self, parent, node_type, lineno):
+ self.id = None
+ self.node_type = node_type
+ self.parent = parent
+ self.blendObject = None
+ self.x3dNode = None # for x3d import only
+ if parent:
+ parent.children.append(self)
+
+ self.lineno = lineno
+
+ # This is only set from the root nodes.
+ # Having a filename also denotes a root node
+ self.filename = None
+ self.proto_node = None # proto field definition eg: "field SFColor seatColor .6 .6 .1"
+
+ # Store in the root node because each inline file needs its own root node and its own namespace
+ self.DEF_NAMESPACE = None
+ self.ROUTE_IPO_NAMESPACE = None
+ '''
+ self.FIELD_NAMESPACE = None
+ '''
+
+ self.PROTO_NAMESPACE = None
+
+ self.reference = None
+
+ if node_type == NODE_REFERENCE:
+ # For references, only the parent and ID are needed
+ # the reference its self is assigned on parsing
+ return
+
+ self.fields = [] # fields have no order, in some cases rool level values are not unique so dont use a dict
+
+ self.proto_field_defs = [] # proto field definition eg: "field SFColor seatColor .6 .6 .1"
+ self.proto_fields = [] # proto field usage "diffuseColor IS seatColor"
+ self.children = []
+ self.array_data = [] # use for arrays of data - should only be for NODE_ARRAY types
+
+ # Only available from the root node
+ '''
+ def getFieldDict(self):
+ if self.FIELD_NAMESPACE != None:
+ return self.FIELD_NAMESPACE
+ else:
+ return self.parent.getFieldDict()
+ '''
+ def getProtoDict(self):
+ if self.PROTO_NAMESPACE != None:
+ return self.PROTO_NAMESPACE
+ else:
+ return self.parent.getProtoDict()
+
+ def getDefDict(self):
+ if self.DEF_NAMESPACE != None:
+ return self.DEF_NAMESPACE
+ else:
+ return self.parent.getDefDict()
+
+ def getRouteIpoDict(self):
+ if self.ROUTE_IPO_NAMESPACE != None:
+ return self.ROUTE_IPO_NAMESPACE
+ else:
+ return self.parent.getRouteIpoDict()
+
+ def setRoot(self, filename):
+ self.filename = filename
+ # self.FIELD_NAMESPACE = {}
+ self.DEF_NAMESPACE = {}
+ self.ROUTE_IPO_NAMESPACE = {}
+ self.PROTO_NAMESPACE = {}
+
+ def isRoot(self):
+ if self.filename is None:
+ return False
+ else:
+ return True
+
+ def getFilename(self):
+ if self.filename:
+ return self.filename
+ elif self.parent:
+ return self.parent.getFilename()
+ else:
+ return None
+
+ def getRealNode(self):
+ if self.reference:
+ return self.reference
+ else:
+ return self
+
+ def getSpec(self):
+ self_real = self.getRealNode()
+ try:
+ return self_real.id[-1] # its possible this node has no spec
+ except:
+ return None
+
+ def findSpecRecursive(self, spec):
+ self_real = self.getRealNode()
+ if spec == self_real.getSpec():
+ return self
+
+ for child in self_real.children:
+ if child.findSpecRecursive(spec):
+ return child
+
+ return None
+
+ def getPrefix(self):
+ if self.id:
+ return self.id[0]
+ return None
+
+ def getSpecialTypeName(self, typename):
+ self_real = self.getRealNode()
+ try:
+ return self_real.id[list(self_real.id).index(typename) + 1]
+ except:
+ return None
+
+ def getDefName(self):
+ return self.getSpecialTypeName('DEF')
+
+ def getProtoName(self):
+ return self.getSpecialTypeName('PROTO')
+
+ def getExternprotoName(self):
+ return self.getSpecialTypeName('EXTERNPROTO')
+
+ def getChildrenBySpec(self, node_spec): # spec could be Transform, Shape, Appearance
+ self_real = self.getRealNode()
+ # using getSpec functions allows us to use the spec of USE children that dont have their spec in their ID
+ if type(node_spec) == str:
+ return [child for child in self_real.children if child.getSpec() == node_spec]
+ else:
+ # Check inside a list of optional types
+ return [child for child in self_real.children if child.getSpec() in node_spec]
+
+ def getChildBySpec(self, node_spec): # spec could be Transform, Shape, Appearance
+ # Use in cases where there is only ever 1 child of this type
+ ls = self.getChildrenBySpec(node_spec)
+ if ls:
+ return ls[0]
+ else:
+ return None
+
+ def getChildrenByName(self, node_name): # type could be geometry, children, appearance
+ self_real = self.getRealNode()
+ return [child for child in self_real.children if child.id if child.id[0] == node_name]
+
+ def getChildByName(self, node_name):
+ self_real = self.getRealNode()
+ for child in self_real.children:
+ if child.id and child.id[0] == node_name: # and child.id[-1]==node_spec:
+ return child
+
+ def getSerialized(self, results, ancestry):
+ ''' Return this node and all its children in a flat list '''
+ ancestry = ancestry[:] # always use a copy
+
+ # self_real = self.getRealNode()
+
+ results.append((self, tuple(ancestry)))
+ ancestry.append(self)
+ for child in self.getRealNode().children:
+ if child not in ancestry:
+ # We dont want to load proto's, they are only references
+ # We could enforce this elsewhere
+
+ # Only add this in a very special case
+ # where the parent of this object is not the real parent
+ # - In this case we have added the proto as a child to a node instancing it.
+ # This is a bit arbitary, but its how Proto's are done with this importer.
+ if child.getProtoName() is None and child.getExternprotoName() is None:
+ child.getSerialized(results, ancestry)
+ else:
+
+ if DEBUG:
+ print('getSerialized() is proto:', child.getProtoName(), child.getExternprotoName(), self.getSpec())
+
+ self_spec = self.getSpec()
+
+ if child.getProtoName() == self_spec or child.getExternprotoName() == self_spec:
+ #if DEBUG:
+ # "FoundProto!"
+ child.getSerialized(results, ancestry)
+
+ return results
+
+ def searchNodeTypeID(self, node_spec, results):
+ self_real = self.getRealNode()
+ # print(self.lineno, self.id)
+ if self_real.id and self_real.id[-1] == node_spec: # use last element, could also be only element
+ results.append(self_real)
+ for child in self_real.children:
+ child.searchNodeTypeID(node_spec, results)
+ return results
+
+ def getFieldName(self, field, ancestry, AS_CHILD=False):
+ self_real = self.getRealNode() # incase we're an instance
+
+ for f in self_real.fields:
+ # print(f)
+ if f and f[0] == field:
+ # print('\tfound field', f)
+
+ if len(f) >= 3 and f[1] == 'IS': # eg: 'diffuseColor IS legColor'
+ field_id = f[2]
+
+ # print("\n\n\n\n\n\nFOND IS!!!")
+ f_proto_lookup = None
+ f_proto_child_lookup = None
+ i = len(ancestry)
+ while i:
+ i -= 1
+ node = ancestry[i]
+ node = node.getRealNode()
+
+ # proto settings are stored in "self.proto_node"
+ if node.proto_node:
+ # Get the default value from the proto, this can be overwridden by the proto instace
+ # 'field SFColor legColor .8 .4 .7'
+ if AS_CHILD:
+ for child in node.proto_node.children:
+ #if child.id and len(child.id) >= 3 and child.id[2]==field_id:
+ if child.id and ('point' in child.id or 'points' in child.id):
+ f_proto_child_lookup = child
+
+ else:
+ for f_def in node.proto_node.proto_field_defs:
+ if len(f_def) >= 4:
+ if f_def[0] == 'field' and f_def[2] == field_id:
+ f_proto_lookup = f_def[3:]
+
+ # Node instance, Will be 1 up from the proto-node in the ancestry list. but NOT its parent.
+ # This is the setting as defined by the instance, including this setting is optional,
+ # and will override the default PROTO value
+ # eg: 'legColor 1 0 0'
+ if AS_CHILD:
+ for child in node.children:
+ if child.id and child.id[0] == field_id:
+ f_proto_child_lookup = child
+ else:
+ for f_def in node.fields:
+ if len(f_def) >= 2:
+ if f_def[0] == field_id:
+ if DEBUG:
+ print("getFieldName(), found proto", f_def)
+ f_proto_lookup = f_def[1:]
+
+ if AS_CHILD:
+ if f_proto_child_lookup:
+ if DEBUG:
+ print("getFieldName() - AS_CHILD=True, child found")
+ print(f_proto_child_lookup)
+ return f_proto_child_lookup
+ else:
+ return f_proto_lookup
+ else:
+ if AS_CHILD:
+ return None
+ else:
+ # Not using a proto
+ return f[1:]
+ # print('\tfield not found', field)
+
+ # See if this is a proto name
+ if AS_CHILD:
+ child_array = None
+ for child in self_real.children:
+ if child.id and len(child.id) == 1 and child.id[0] == field:
+ return child
+
+ return None
+
+ def getFieldAsInt(self, field, default, ancestry):
+ self_real = self.getRealNode() # incase we're an instance
+
+ f = self_real.getFieldName(field, ancestry)
+ if f is None:
+ return default
+ if ',' in f:
+ f = f[:f.index(',')] # strip after the comma
+
+ if len(f) != 1:
+ print('\t"%s" wrong length for int conversion for field "%s"' % (f, field))
+ return default
+
+ try:
+ return int(f[0])
+ except:
+ print('\tvalue "%s" could not be used as an int for field "%s"' % (f[0], field))
+ return default
+
+ def getFieldAsFloat(self, field, default, ancestry):
+ self_real = self.getRealNode() # incase we're an instance
+
+ f = self_real.getFieldName(field, ancestry)
+ if f is None:
+ return default
+ if ',' in f:
+ f = f[:f.index(',')] # strip after the comma
+
+ if len(f) != 1:
+ print('\t"%s" wrong length for float conversion for field "%s"' % (f, field))
+ return default
+
+ try:
+ return float(f[0])
+ except:
+ print('\tvalue "%s" could not be used as a float for field "%s"' % (f[0], field))
+ return default
+
+ def getFieldAsFloatTuple(self, field, default, ancestry):
+ self_real = self.getRealNode() # incase we're an instance
+
+ f = self_real.getFieldName(field, ancestry)
+ if f is None:
+ return default
+ # if ',' in f: f = f[:f.index(',')] # strip after the comma
+
+ if len(f) < 1:
+ print('"%s" wrong length for float tuple conversion for field "%s"' % (f, field))
+ return default
+
+ ret = []
+ for v in f:
+ if v != ',':
+ try:
+ ret.append(float(v))
+ except:
+ break # quit of first non float, perhaps its a new field name on the same line? - if so we are going to ignore it :/ TODO
+ # print(ret)
+
+ if ret:
+ return ret
+ if not ret:
+ print('\tvalue "%s" could not be used as a float tuple for field "%s"' % (f, field))
+ return default
+
+ def getFieldAsBool(self, field, default, ancestry):
+ self_real = self.getRealNode() # incase we're an instance
+
+ f = self_real.getFieldName(field, ancestry)
+ if f is None:
+ return default
+ if ',' in f:
+ f = f[:f.index(',')] # strip after the comma
+
+ if len(f) != 1:
+ print('\t"%s" wrong length for bool conversion for field "%s"' % (f, field))
+ return default
+
+ if f[0].upper() == '"TRUE"' or f[0].upper() == 'TRUE':
+ return True
+ elif f[0].upper() == '"FALSE"' or f[0].upper() == 'FALSE':
+ return False
+ else:
+ print('\t"%s" could not be used as a bool for field "%s"' % (f[1], field))
+ return default
+
+ def getFieldAsString(self, field, default, ancestry):
+ self_real = self.getRealNode() # incase we're an instance
+
+ f = self_real.getFieldName(field, ancestry)
+ if f is None:
+ return default
+ if len(f) < 1:
+ print('\t"%s" wrong length for string conversion for field "%s"' % (f, field))
+ return default
+
+ if len(f) > 1:
+ # String may contain spaces
+ st = ' '.join(f)
+ else:
+ st = f[0]
+
+ # X3D HACK
+ if self.x3dNode:
+ return st
+
+ if st[0] == '"' and st[-1] == '"':
+ return st[1:-1]
+ else:
+ print('\tvalue "%s" could not be used as a string for field "%s"' % (f[0], field))
+ return default
+
+ def getFieldAsArray(self, field, group, ancestry):
+ '''
+ For this parser arrays are children
+ '''
+
+ def array_as_number(array_string):
+ array_data = []
+ try:
+ array_data = [int(val) for val in array_string]
+ except:
+ try:
+ array_data = [float(val) for val in array_string]
+ except:
+ print('\tWarning, could not parse array data from field')
+
+ return array_data
+
+ self_real = self.getRealNode() # incase we're an instance
+
+ child_array = self_real.getFieldName(field, ancestry, True)
+
+ #if type(child_array)==list: # happens occasionaly
+ # array_data = child_array
+
+ if child_array is None:
+ # For x3d, should work ok with vrml too
+ # for x3d arrays are fields, vrml they are nodes, annoying but not tooo bad.
+ data_split = self.getFieldName(field, ancestry)
+ if not data_split:
+ return []
+ array_data = ' '.join(data_split)
+ if array_data is None:
+ return []
+
+ array_data = array_data.replace(',', ' ')
+ data_split = array_data.split()
+
+ array_data = array_as_number(data_split)
+
+ elif type(child_array) == list:
+ # x3d creates these
+ data_split = [w.strip(",") for w in child_array]
+
+ array_data = array_as_number(data_split)
+ else:
+ # print(child_array)
+ # Normal vrml
+ array_data = child_array.array_data
+
+ # print('array_data', array_data)
+ if group == -1 or len(array_data) == 0:
+ return array_data
+
+ # We want a flat list
+ flat = True
+ for item in array_data:
+ if type(item) == list:
+ flat = False
+ break
+
+ # make a flat array
+ if flat:
+ flat_array = array_data # we are alredy flat.
+ else:
+ flat_array = []
+
+ def extend_flat(ls):
+ for item in ls:
+ if type(item) == list:
+ extend_flat(item)
+ else:
+ flat_array.append(item)
+
+ extend_flat(array_data)
+
+ # We requested a flat array
+ if group == 0:
+ return flat_array
+
+ new_array = []
+ sub_array = []
+
+ for item in flat_array:
+ sub_array.append(item)
+ if len(sub_array) == group:
+ new_array.append(sub_array)
+ sub_array = []
+
+ if sub_array:
+ print('\twarning, array was not aligned to requested grouping', group, 'remaining value', sub_array)
+
+ return new_array
+
+ def getFieldAsStringArray(self, field, ancestry):
+ '''
+ Get a list of strings
+ '''
+ self_real = self.getRealNode() # incase we're an instance
+
+ child_array = None
+ for child in self_real.children:
+ if child.id and len(child.id) == 1 and child.id[0] == field:
+ child_array = child
+ break
+ if not child_array:
+ return []
+
+ # each string gets its own list, remove ""'s
+ try:
+ new_array = [f[0][1:-1] for f in child_array.fields]
+ except:
+ print('\twarning, string array could not be made')
+ new_array = []
+
+ return new_array
+
+ def getLevel(self):
+ # Ignore self_real
+ level = 0
+ p = self.parent
+ while p:
+ level += 1
+ p = p.parent
+ if not p:
+ break
+
+ return level
+
+ def __repr__(self):
+ level = self.getLevel()
+ ind = ' ' * level
+ if self.node_type == NODE_REFERENCE:
+ brackets = ''
+ elif self.node_type == NODE_NORMAL:
+ brackets = '{}'
+ else:
+ brackets = '[]'
+
+ if brackets:
+ text = ind + brackets[0] + '\n'
+ else:
+ text = ''
+
+ text += ind + 'ID: ' + str(self.id) + ' ' + str(level) + (' lineno %d\n' % self.lineno)
+
+ if self.node_type == NODE_REFERENCE:
+ text += ind + "(reference node)\n"
+ return text
+
+ if self.proto_node:
+ text += ind + 'PROTO NODE...\n'
+ text += str(self.proto_node)
+ text += ind + 'PROTO NODE_DONE\n'
+
+ text += ind + 'FIELDS:' + str(len(self.fields)) + '\n'
+
+ for i, item in enumerate(self.fields):
+ text += ind + 'FIELD:\n'
+ text += ind + str(item) + '\n'
+
+ text += ind + 'PROTO_FIELD_DEFS:' + str(len(self.proto_field_defs)) + '\n'
+
+ for i, item in enumerate(self.proto_field_defs):
+ text += ind + 'PROTO_FIELD:\n'
+ text += ind + str(item) + '\n'
+
+ text += ind + 'ARRAY: ' + str(len(self.array_data)) + ' ' + str(self.array_data) + '\n'
+ #text += ind + 'ARRAY: ' + str(len(self.array_data)) + '[...] \n'
+
+ text += ind + 'CHILDREN: ' + str(len(self.children)) + '\n'
+ for i, child in enumerate(self.children):
+ text += ind + ('CHILD%d:\n' % i)
+ text += str(child)
+
+ text += '\n' + ind + brackets[1]
+
+ return text
+
+ def parse(self, i, IS_PROTO_DATA=False):
+ new_i = self.__parse(i, IS_PROTO_DATA)
+
+ # print(self.id, self.getFilename())
+
+ # Check if this node was an inline or externproto
+
+ url_ls = []
+
+ if self.node_type == NODE_NORMAL and self.getSpec() == 'Inline':
+ ancestry = [] # Warning! - PROTO's using this wont work at all.
+ url = self.getFieldAsString('url', None, ancestry)
+ if url:
+ url_ls = [(url, None)]
+ del ancestry
+
+ elif self.getExternprotoName():
+ # externproto
+ url_ls = []
+ for f in self.fields:
+
+ if type(f) == str:
+ f = [f]
+
+ for ff in f:
+ for f_split in ff.split('"'):
+ # print(f_split)
+ # "someextern.vrml#SomeID"
+ if '#' in f_split:
+
+ f_split, f_split_id = f_split.split('#') # there should only be 1 # anyway
+
+ url_ls.append((f_split, f_split_id))
+ else:
+ url_ls.append((f_split, None))
+
+ # Was either an Inline or an EXTERNPROTO
+ if url_ls:
+
+ # print(url_ls)
+
+ for url, extern_key in url_ls:
+ print(url)
+ urls = []
+ urls.append(url)
+ urls.append(bpy.path.resolve_ncase(urls[-1]))
+
+ urls.append(os.path.join(os.path.dirname(self.getFilename()), url))
+ urls.append(bpy.path.resolve_ncase(urls[-1]))
+
+ urls.append(os.path.join(os.path.dirname(self.getFilename()), os.path.basename(url)))
+ urls.append(bpy.path.resolve_ncase(urls[-1]))
+
+ try:
+ url = [url for url in urls if os.path.exists(url)][0]
+ url_found = True
+ except:
+ url_found = False
+
+ if not url_found:
+ print('\tWarning: Inline URL could not be found:', url)
+ else:
+ if url == self.getFilename():
+ print('\tWarning: cant Inline yourself recursively:', url)
+ else:
+
+ try:
+ data = gzipOpen(url)
+ except:
+ print('\tWarning: cant open the file:', url)
+ data = None
+
+ if data:
+ # Tricky - inline another VRML
+ print('\tLoading Inline:"%s"...' % url)
+
+ # Watch it! - backup lines
+ lines_old = lines[:]
+
+ lines[:] = vrmlFormat(data)
+
+ lines.insert(0, '{')
+ lines.insert(0, 'root_node____')
+ lines.append('}')
+ '''
+ ff = open('/tmp/test.txt', 'w')
+ ff.writelines([l+'\n' for l in lines])
+ '''
+
+ child = vrmlNode(self, NODE_NORMAL, -1)
+ child.setRoot(url) # initialized dicts
+ child.parse(0)
+
+ # if self.getExternprotoName():
+ if self.getExternprotoName():
+ if not extern_key: # if none is spesified - use the name
+ extern_key = self.getSpec()
+
+ if extern_key:
+
+ self.children.remove(child)
+ child.parent = None
+
+ extern_child = child.findSpecRecursive(extern_key)
+
+ if extern_child:
+ self.children.append(extern_child)
+ extern_child.parent = self
+
+ if DEBUG:
+ print("\tEXTERNPROTO ID found!:", extern_key)
+ else:
+ print("\tEXTERNPROTO ID not found!:", extern_key)
+
+ # Watch it! - restore lines
+ lines[:] = lines_old
+
+ return new_i
+
+ def __parse(self, i, IS_PROTO_DATA=False):
+ '''
+ print('parsing at', i, end="")
+ print(i, self.id, self.lineno)
+ '''
+ l = lines[i]
+
+ if l == '[':
+ # An anonymous list
+ self.id = None
+ i += 1
+ else:
+ words = []
+
+ node_type, new_i = is_nodeline(i, words)
+ if not node_type: # fail for parsing new node.
+ print("Failed to parse new node")
+ raise ValueError
+
+ if self.node_type == NODE_REFERENCE:
+ # Only assign the reference and quit
+ key = words[words.index('USE') + 1]
+ self.id = (words[0],)
+
+ self.reference = self.getDefDict()[key]
+ return new_i
+
+ self.id = tuple(words)
+
+ # fill in DEF/USE
+ key = self.getDefName()
+ if key != None:
+ self.getDefDict()[key] = self
+
+ key = self.getProtoName()
+ if not key:
+ key = self.getExternprotoName()
+
+ proto_dict = self.getProtoDict()
+ if key != None:
+ proto_dict[key] = self
+
+ # Parse the proto nodes fields
+ self.proto_node = vrmlNode(self, NODE_ARRAY, new_i)
+ new_i = self.proto_node.parse(new_i)
+
+ self.children.remove(self.proto_node)
+
+ # print(self.proto_node)
+
+ new_i += 1 # skip past the {
+
+ else: # If we're a proto instance, add the proto node as our child.
+ spec = self.getSpec()
+ try:
+ self.children.append(proto_dict[spec])
+ #pass
+ except:
+ pass
+
+ del spec
+
+ del proto_dict, key
+
+ i = new_i
+
+ # print(self.id)
+ ok = True
+ while ok:
+ if i >= len(lines):
+ return len(lines) - 1
+
+ l = lines[i]
+ # print('\tDEBUG:', i, self.node_type, l)
+ if l == '':
+ i += 1
+ continue
+
+ if l == '}':
+ if self.node_type != NODE_NORMAL: # also ends proto nodes, we may want a type for these too.
+ print('wrong node ending, expected an } ' + str(i) + ' ' + str(self.node_type))
+ if DEBUG:
+ raise ValueError
+ ### print("returning", i)
+ return i + 1
+ if l == ']':
+ if self.node_type != NODE_ARRAY:
+ print('wrong node ending, expected a ] ' + str(i) + ' ' + str(self.node_type))
+ if DEBUG:
+ raise ValueError
+ ### print("returning", i)
+ return i + 1
+
+ node_type, new_i = is_nodeline(i, [])
+ if node_type: # check text\n{
+ child = vrmlNode(self, node_type, i)
+ i = child.parse(i)
+
+ elif l == '[': # some files have these anonymous lists
+ child = vrmlNode(self, NODE_ARRAY, i)
+ i = child.parse(i)
+
+ elif is_numline(i):
+ l_split = l.split(',')
+
+ values = None
+ # See if each item is a float?
+
+ for num_type in (int, float):
+ try:
+ values = [num_type(v) for v in l_split]
+ break
+ except:
+ pass
+
+ try:
+ values = [[num_type(v) for v in segment.split()] for segment in l_split]
+ break
+ except:
+ pass
+
+ if values is None: # dont parse
+ values = l_split
+
+ # This should not extend over multiple lines however it is possible
+ # print(self.array_data)
+ if values:
+ self.array_data.extend(values)
+ i += 1
+ else:
+ words = l.split()
+ if len(words) > 2 and words[1] == 'USE':
+ vrmlNode(self, NODE_REFERENCE, i)
+ else:
+
+ # print("FIELD", i, l)
+ #
+ #words = l.split()
+ ### print('\t\ttag', i)
+ # this is a tag/
+ # print(words, i, l)
+ value = l
+ # print(i)
+ # javastrips can exist as values.
+ quote_count = l.count('"')
+ if quote_count % 2: # odd number?
+ # print('MULTILINE')
+ while 1:
+ i += 1
+ l = lines[i]
+ quote_count = l.count('"')
+ if quote_count % 2: # odd number?
+ value += '\n' + l[:l.rfind('"')]
+ break # assume
+ else:
+ value += '\n' + l
+
+ value_all = value.split()
+
+ def iskey(k):
+ if k[0] != '"' and k[0].isalpha() and k.upper() not in {'TRUE', 'FALSE'}:
+ return True
+ return False
+
+ def split_fields(value):
+ '''
+ key 0.0 otherkey 1,2,3 opt1 opt1 0.0
+ -> [key 0.0], [otherkey 1,2,3], [opt1 opt1 0.0]
+ '''
+ field_list = []
+ field_context = []
+
+ for j in range(len(value)):
+ if iskey(value[j]):
+ if field_context:
+ # this IS a key but the previous value was not a key, ot it was a defined field.
+ if (not iskey(field_context[-1])) or ((len(field_context) == 3 and field_context[1] == 'IS')):
+ field_list.append(field_context)
+
+ field_context = [value[j]]
+ else:
+ # The last item was not a value, multiple keys are needed in some cases.
+ field_context.append(value[j])
+ else:
+ # Is empty, just add this on
+ field_context.append(value[j])
+ else:
+ # Add a value to the list
+ field_context.append(value[j])
+
+ if field_context:
+ field_list.append(field_context)
+
+ return field_list
+
+ for value in split_fields(value_all):
+ # Split
+
+ if value[0] == 'field':
+ # field SFFloat creaseAngle 4
+ self.proto_field_defs.append(value)
+ else:
+ self.fields.append(value)
+ i += 1
+
+
+def gzipOpen(path):
+ try:
+ import gzip
+ except:
+ gzip = None
+
+ data = None
+ if gzip:
+ try:
+ data = gzip.open(path, 'r').read()
+ except:
+ pass
+ else:
+ print('\tNote, gzip module could not be imported, compressed files will fail to load')
+
+ if data is None:
+ try:
+ filehandle = open(path, 'rU')
+ data = filehandle.read()
+ filehandle.close()
+ except:
+ pass
+
+ return data
+
+
+def vrml_parse(path):
+ '''
+ Sets up the root node and returns it so load_web3d() can deal with the blender side of things.
+ Return root (vrmlNode, '') or (None, 'Error String')
+ '''
+ data = gzipOpen(path)
+
+ if data is None:
+ return None, 'Failed to open file: ' + path
+
+ # Stripped above
+ lines[:] = vrmlFormat(data)
+
+ lines.insert(0, '{')
+ lines.insert(0, 'dymmy_node')
+ lines.append('}')
+ # Use for testing our parsed output, so we can check on line numbers.
+
+ '''
+ ff = open('/tmp/test.txt', 'w')
+ ff.writelines([l+'\n' for l in lines])
+ ff.close()
+ '''
+
+ # Now evaluate it
+ node_type, new_i = is_nodeline(0, [])
+ if not node_type:
+ return None, 'Error: VRML file has no starting Node'
+
+ # Trick to make sure we get all root nodes.
+ lines.insert(0, '{')
+ lines.insert(0, 'root_node____') # important the name starts with an ascii char
+ lines.append('}')
+
+ root = vrmlNode(None, NODE_NORMAL, -1)
+ root.setRoot(path) # we need to set the root so we have a namespace and know the path incase of inlineing
+
+ # Parse recursively
+ root.parse(0)
+
+ # This prints a load of text
+ if DEBUG:
+ print(root)
+
+ return root, ''
+
+
+# ====================== END VRML
+
+# ====================== X3d Support
+
+# Sane as vrml but replace the parser
+class x3dNode(vrmlNode):
+ def __init__(self, parent, node_type, x3dNode):
+ vrmlNode.__init__(self, parent, node_type, -1)
+ self.x3dNode = x3dNode
+
+ def parse(self, IS_PROTO_DATA=False):
+ # print(self.x3dNode.tagName)
+
+ define = self.x3dNode.getAttributeNode('DEF')
+ if define:
+ self.getDefDict()[define.value] = self
+ else:
+ use = self.x3dNode.getAttributeNode('USE')
+ if use:
+ try:
+ self.reference = self.getDefDict()[use.value]
+ self.node_type = NODE_REFERENCE
+ except:
+ print('\tWarning: reference', use.value, 'not found')
+ self.parent.children.remove(self)
+
+ return
+
+ for x3dChildNode in self.x3dNode.childNodes:
+ if x3dChildNode.nodeType in {x3dChildNode.TEXT_NODE, x3dChildNode.COMMENT_NODE, x3dChildNode.CDATA_SECTION_NODE}:
+ continue
+
+ node_type = NODE_NORMAL
+ # print(x3dChildNode, dir(x3dChildNode))
+ if x3dChildNode.getAttributeNode('USE'):
+ node_type = NODE_REFERENCE
+
+ child = x3dNode(self, node_type, x3dChildNode)
+ child.parse()
+
+ # TODO - x3d Inline
+
+ def getSpec(self):
+ return self.x3dNode.tagName # should match vrml spec
+
+ def getDefName(self):
+ data = self.x3dNode.getAttributeNode('DEF')
+ if data:
+ data.value # XXX, return??
+ return None
+
+ # Other funcs operate from vrml, but this means we can wrap XML fields, still use nice utility funcs
+ # getFieldAsArray getFieldAsBool etc
+ def getFieldName(self, field, ancestry, AS_CHILD=False):
+ # ancestry and AS_CHILD are ignored, only used for VRML now
+
+ self_real = self.getRealNode() # incase we're an instance
+ field_xml = self.x3dNode.getAttributeNode(field)
+ if field_xml:
+ value = field_xml.value
+
+ # We may want to edit. for x3d spesific stuff
+ # Sucks a bit to return the field name in the list but vrml excepts this :/
+ return value.split()
+ else:
+ return None
+
+
+def x3d_parse(path):
+ '''
+ Sets up the root node and returns it so load_web3d() can deal with the blender side of things.
+ Return root (x3dNode, '') or (None, 'Error String')
+ '''
+
+ try:
+ import xml.dom.minidom
+ except:
+ return None, 'Error, import XML parsing module (xml.dom.minidom) failed, install python'
+
+ '''
+ try: doc = xml.dom.minidom.parse(path)
+ except: return None, 'Could not parse this X3D file, XML error'
+ '''
+
+ # Could add a try/except here, but a console error is more useful.
+ data = gzipOpen(path)
+
+ if data is None:
+ return None, 'Failed to open file: ' + path
+
+ doc = xml.dom.minidom.parseString(data)
+
+ try:
+ x3dnode = doc.getElementsByTagName('X3D')[0]
+ except:
+ return None, 'Not a valid x3d document, cannot import'
+
+ root = x3dNode(None, NODE_NORMAL, x3dnode)
+ root.setRoot(path) # so images and Inline's we load have a relative path
+ root.parse()
+
+ return root, ''
+
+## f = open('/_Cylinder.wrl', 'r')
+# f = open('/fe/wrl/Vrml/EGS/TOUCHSN.WRL', 'r')
+# vrml_parse('/fe/wrl/Vrml/EGS/TOUCHSN.WRL')
+#vrml_parse('/fe/wrl/Vrml/EGS/SCRIPT.WRL')
+'''
+import os
+files = os.popen('find /fe/wrl -iname "*.wrl"').readlines()
+files.sort()
+tot = len(files)
+for i, f in enumerate(files):
+ #if i < 801:
+ # continue
+
+ f = f.strip()
+ print(f, i, tot)
+ vrml_parse(f)
+'''
+
+# NO BLENDER CODE ABOVE THIS LINE.
+# -----------------------------------------------------------------------------------
+import bpy
+from bpy_extras import image_utils
+# import BPyImage
+# import BPySys
+# reload(BPySys)
+# reload(BPyImage)
+# import Blender
+# from Blender import Texture, Material, Mathutils, Mesh, Types, Window
+from mathutils import Vector, Matrix
+
+RAD_TO_DEG = 57.29578
+
+GLOBALS = {'CIRCLE_DETAIL': 16}
+
+
+def translateRotation(rot):
+ ''' axis, angle '''
+ return Matrix.Rotation(rot[3], 4, Vector(rot[:3]))
+
+
+def translateScale(sca):
+ mat = Matrix() # 4x4 default
+ mat[0][0] = sca[0]
+ mat[1][1] = sca[1]
+ mat[2][2] = sca[2]
+ return mat
+
+
+def translateTransform(node, ancestry):
+ cent = node.getFieldAsFloatTuple('center', None, ancestry) # (0.0, 0.0, 0.0)
+ rot = node.getFieldAsFloatTuple('rotation', None, ancestry) # (0.0, 0.0, 1.0, 0.0)
+ sca = node.getFieldAsFloatTuple('scale', None, ancestry) # (1.0, 1.0, 1.0)
+ scaori = node.getFieldAsFloatTuple('scaleOrientation', None, ancestry) # (0.0, 0.0, 1.0, 0.0)
+ tx = node.getFieldAsFloatTuple('translation', None, ancestry) # (0.0, 0.0, 0.0)
+
+ if cent:
+ cent_mat = Matrix.Translation(cent)
+ cent_imat = cent_mat.inverted()
+ else:
+ cent_mat = cent_imat = None
+
+ if rot:
+ rot_mat = translateRotation(rot)
+ else:
+ rot_mat = None
+
+ if sca:
+ sca_mat = translateScale(sca)
+ else:
+ sca_mat = None
+
+ if scaori:
+ scaori_mat = translateRotation(scaori)
+ scaori_imat = scaori_mat.inverted()
+ else:
+ scaori_mat = scaori_imat = None
+
+ if tx:
+ tx_mat = Matrix.Translation(tx)
+ else:
+ tx_mat = None
+
+ new_mat = Matrix()
+
+ mats = [tx_mat, cent_mat, rot_mat, scaori_mat, sca_mat, scaori_imat, cent_imat]
+ for mtx in mats:
+ if mtx:
+ new_mat = new_mat * mtx
+
+ return new_mat
+
+
+def translateTexTransform(node, ancestry):
+ cent = node.getFieldAsFloatTuple('center', None, ancestry) # (0.0, 0.0)
+ rot = node.getFieldAsFloat('rotation', None, ancestry) # 0.0
+ sca = node.getFieldAsFloatTuple('scale', None, ancestry) # (1.0, 1.0)
+ tx = node.getFieldAsFloatTuple('translation', None, ancestry) # (0.0, 0.0)
+
+ if cent:
+ # cent is at a corner by default
+ cent_mat = Matrix.Translation(Vector(cent).to_3d())
+ cent_imat = cent_mat.inverted()
+ else:
+ cent_mat = cent_imat = None
+
+ if rot:
+ rot_mat = Matrix.Rotation(rot, 4, 'Z') # translateRotation(rot)
+ else:
+ rot_mat = None
+
+ if sca:
+ sca_mat = translateScale((sca[0], sca[1], 0.0))
+ else:
+ sca_mat = None
+
+ if tx:
+ tx_mat = Matrix.Translation(Vector(tx).to_3d())
+ else:
+ tx_mat = None
+
+ new_mat = Matrix()
+
+ # as specified in VRML97 docs
+ mats = [cent_imat, sca_mat, rot_mat, cent_mat, tx_mat]
+
+ for mtx in mats:
+ if mtx:
+ new_mat = new_mat * mtx
+
+ return new_mat
+
+
+# 90d X rotation
+import math
+MATRIX_Z_TO_Y = Matrix.Rotation(math.pi / 2.0, 4, 'X')
+
+
+def getFinalMatrix(node, mtx, ancestry, global_matrix):
+
+ transform_nodes = [node_tx for node_tx in ancestry if node_tx.getSpec() == 'Transform']
+ if node.getSpec() == 'Transform':
+ transform_nodes.append(node)
+ transform_nodes.reverse()
+
+ if mtx is None:
+ mtx = Matrix()
+
+ for node_tx in transform_nodes:
+ mat = translateTransform(node_tx, ancestry)
+ mtx = mat * mtx
+
+ # worldspace matrix
+ mtx = global_matrix * mtx
+
+ return mtx
+
+
+def importMesh_IndexedFaceSet(geom, bpyima, ancestry):
+ # print(geom.lineno, geom.id, vrmlNode.DEF_NAMESPACE.keys())
+
+ ccw = geom.getFieldAsBool('ccw', True, ancestry)
+ ifs_colorPerVertex = geom.getFieldAsBool('colorPerVertex', True, ancestry) # per vertex or per face
+ ifs_normalPerVertex = geom.getFieldAsBool('normalPerVertex', True, ancestry)
+
+ # This is odd how point is inside Coordinate
+
+ # VRML not x3d
+ #coord = geom.getChildByName('coord') # 'Coordinate'
+
+ coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml
+
+ if coord:
+ ifs_points = coord.getFieldAsArray('point', 3, ancestry)
+ else:
+ coord = []
+
+ if not coord:
+ print('\tWarnint: IndexedFaceSet has no points')
+ return None, ccw
+
+ ifs_faces = geom.getFieldAsArray('coordIndex', 0, ancestry)
+
+ coords_tex = None
+ if ifs_faces: # In rare cases this causes problems - no faces but UVs???
+
+ # WORKS - VRML ONLY
+ # coords_tex = geom.getChildByName('texCoord')
+ coords_tex = geom.getChildBySpec('TextureCoordinate')
+
+ if coords_tex:
+ ifs_texpoints = coords_tex.getFieldAsArray('point', 2, ancestry)
+ ifs_texfaces = geom.getFieldAsArray('texCoordIndex', 0, ancestry)
+
+ if not ifs_texpoints:
+ # IF we have no coords, then dont bother
+ coords_tex = None
+
+ # WORKS - VRML ONLY
+ # vcolor = geom.getChildByName('color')
+ vcolor = geom.getChildBySpec('Color')
+ vcolor_spot = None # spot color when we dont have an array of colors
+ if vcolor:
+ # float to char
+ ifs_vcol = [(0, 0, 0)] # EEKADOODLE - vertex start at 1
+ ifs_vcol.extend([col for col in vcolor.getFieldAsArray('color', 3, ancestry)])
+ ifs_color_index = geom.getFieldAsArray('colorIndex', 0, ancestry)
+
+ if not ifs_vcol:
+ vcolor_spot = vcolor.getFieldAsFloatTuple('color', [], ancestry)
+
+ # Convert faces into somthing blender can use
+ edges = []
+
+ # All lists are aligned!
+ faces = []
+ faces_uv = [] # if ifs_texfaces is empty then the faces_uv will match faces exactly.
+ faces_orig_index = [] # for ngons, we need to know our original index
+
+ if coords_tex and ifs_texfaces:
+ do_uvmap = True
+ else:
+ do_uvmap = False
+
+ # current_face = [0] # pointer anyone
+
+ def add_face(face, fuvs, orig_index):
+ l = len(face)
+ if l == 3 or l == 4:
+ faces.append(face)
+ # faces_orig_index.append(current_face[0])
+ if do_uvmap:
+ faces_uv.append(fuvs)
+
+ faces_orig_index.append(orig_index)
+ elif l == 2:
+ edges.append(face)
+ elif l > 4:
+ for i in range(2, len(face)):
+ faces.append([face[0], face[i - 1], face[i]])
+ if do_uvmap:
+ faces_uv.append([fuvs[0], fuvs[i - 1], fuvs[i]])
+ faces_orig_index.append(orig_index)
+ else:
+ # faces with 1 verts? pfft!
+ # still will affect index ordering
+ pass
+
+ face = []
+ fuvs = []
+ orig_index = 0
+ for i, fi in enumerate(ifs_faces):
+ # ifs_texfaces and ifs_faces should be aligned
+ if fi != -1:
+ # face.append(int(fi)) # in rare cases this is a float
+ # EEKADOODLE!!!
+ # Annoyance where faces that have a zero index vert get rotated. This will then mess up UVs and VColors
+ face.append(int(fi) + 1) # in rare cases this is a float, +1 because of stupid EEKADOODLE :/
+
+ if do_uvmap:
+ if i >= len(ifs_texfaces):
+ print('\tWarning: UV Texface index out of range')
+ fuvs.append(ifs_texfaces[0])
+ else:
+ fuvs.append(ifs_texfaces[i])
+ else:
+ add_face(face, fuvs, orig_index)
+ face = []
+ if do_uvmap:
+ fuvs = []
+ orig_index += 1
+
+ add_face(face, fuvs, orig_index)
+ del add_face # dont need this func anymore
+
+ bpymesh = bpy.data.meshes.new(name="XXX")
+
+ # EEKADOODLE
+ bpymesh.vertices.add(1 + (len(ifs_points)))
+ bpymesh.vertices.foreach_set("co", [0, 0, 0] + [a for v in ifs_points for a in v]) # XXX25 speed
+
+ # print(len(ifs_points), faces, edges, ngons)
+
+ try:
+ bpymesh.faces.add(len(faces))
+ bpymesh.faces.foreach_set("vertices_raw", [a for f in faces for a in (f + [0] if len(f) == 3 else f)]) # XXX25 speed
+ except KeyError:
+ print("one or more vert indices out of range. corrupt file?")
+ #for f in faces:
+ # bpymesh.faces.extend(faces, smooth=True)
+
+ bpymesh.validate()
+ bpymesh.update()
+
+ if len(bpymesh.faces) != len(faces):
+ print('\tWarning: adding faces did not work! file is invalid, not adding UVs or vcolors')
+ return bpymesh, ccw
+
+ # Apply UVs if we have them
+ if not do_uvmap:
+ faces_uv = faces # fallback, we didnt need a uvmap in the first place, fallback to the face/vert mapping.
+ if coords_tex:
+ #print(ifs_texpoints)
+ # print(geom)
+ uvlay = bpymesh.uv_textures.new()
+
+ for i, f in enumerate(uvlay.data):
+ f.image = bpyima
+ fuv = faces_uv[i] # uv indices
+ for j, uv in enumerate(f.uv):
+ # print(fuv, j, len(ifs_texpoints))
+ try:
+ f.uv[j] = ifs_texpoints[fuv[j]] # XXX25, speedup
+ except:
+ print('\tWarning: UV Index out of range')
+ f.uv[j] = ifs_texpoints[0] # XXX25, speedup
+
+ elif bpyima and len(bpymesh.faces):
+ # Oh Bugger! - we cant really use blenders ORCO for for texture space since texspace dosnt rotate.
+ # we have to create VRML's coords as UVs instead.
+
+ # VRML docs
+ '''
+ If the texCoord field is NULL, a default texture coordinate mapping is calculated using the local
+ coordinate system bounding box of the shape. The longest dimension of the bounding box defines the S coordinates,
+ and the next longest defines the T coordinates. If two or all three dimensions of the bounding box are equal,
+ ties shall be broken by choosing the X, Y, or Z dimension in that order of preference.
+ The value of the S coordinate ranges from 0 to 1, from one end of the bounding box to the other.
+ The T coordinate ranges between 0 and the ratio of the second greatest dimension of the bounding box to the greatest dimension.
+ '''
+
+ # Note, S,T == U,V
+ # U gets longest, V gets second longest
+ xmin, ymin, zmin = ifs_points[0]
+ xmax, ymax, zmax = ifs_points[0]
+ for co in ifs_points:
+ x, y, z = co
+ if x < xmin:
+ xmin = x
+ if y < ymin:
+ ymin = y
+ if z < zmin:
+ zmin = z
+
+ if x > xmax:
+ xmax = x
+ if y > ymax:
+ ymax = y
+ if z > zmax:
+ zmax = z
+
+ xlen = xmax - xmin
+ ylen = ymax - ymin
+ zlen = zmax - zmin
+
+ depth_min = xmin, ymin, zmin
+ depth_list = [xlen, ylen, zlen]
+ depth_sort = depth_list[:]
+ depth_sort.sort()
+
+ depth_idx = [depth_list.index(val) for val in depth_sort]
+
+ axis_u = depth_idx[-1]
+ axis_v = depth_idx[-2] # second longest
+
+ # Hack, swap these !!! TODO - Why swap??? - it seems to work correctly but should not.
+ # axis_u,axis_v = axis_v,axis_u
+
+ min_u = depth_min[axis_u]
+ min_v = depth_min[axis_v]
+ depth_u = depth_list[axis_u]
+ depth_v = depth_list[axis_v]
+
+ depth_list[axis_u]
+
+ if axis_u == axis_v:
+ # This should be safe because when 2 axies have the same length, the lower index will be used.
+ axis_v += 1
+
+ uvlay = bpymesh.uv_textures.new()
+
+ # HACK !!! - seems to be compatible with Cosmo though.
+ depth_v = depth_u = max(depth_v, depth_u)
+
+ bpymesh_vertices = bpymesh.vertices[:]
+ bpymesh_faces = bpymesh.faces[:]
+
+ for j, f in enumerate(uvlay.data):
+ f.image = bpyima
+ fuv = f.uv
+ f_v = bpymesh_faces[j].vertices[:] # XXX25 speed
+
+ for i, v in enumerate(f_v):
+ co = bpymesh_vertices[v].co
+ fuv[i] = (co[axis_u] - min_u) / depth_u, (co[axis_v] - min_v) / depth_v
+
+ # Add vcote
+ if vcolor:
+ # print(ifs_vcol)
+ collay = bpymesh.vertex_colors.new()
+
+ for f_idx, f in enumerate(collay.data):
+ fv = bpymesh.faces[f_idx].vertices[:]
+ if len(fv) == 3: # XXX speed
+ fcol = f.color1, f.color2, f.color3
+ else:
+ fcol = f.color1, f.color2, f.color3, f.color4
+ if ifs_colorPerVertex:
+ for i, c in enumerate(fcol):
+ color_index = fv[i] # color index is vert index
+ if ifs_color_index:
+ try:
+ color_index = ifs_color_index[color_index]
+ except:
+ print('\tWarning: per vertex color index out of range')
+ continue
+
+ if color_index < len(ifs_vcol):
+ c.r, c.g, c.b = ifs_vcol[color_index]
+ else:
+ #print('\tWarning: per face color index out of range')
+ pass
+ else:
+ if vcolor_spot: # use 1 color, when ifs_vcol is []
+ for c in fcol:
+ c.r, c.g, c.b = vcolor_spot
+ else:
+ color_index = faces_orig_index[f_idx] # color index is face index
+ #print(color_index, ifs_color_index)
+ if ifs_color_index:
+ if color_index >= len(ifs_color_index):
+ print('\tWarning: per face color index out of range')
+ color_index = 0
+ else:
+ color_index = ifs_color_index[color_index]
+ try:
+ col = ifs_vcol[color_index]
+ except IndexError:
+ # TODO, look
+ col = (1.0, 1.0, 1.0)
+ for i, c in enumerate(fcol):
+ c.r, c.g, c.b = col
+
+ # XXX25
+ # bpymesh.vertices.delete([0, ]) # EEKADOODLE
+
+ return bpymesh, ccw
+
+
+def importMesh_IndexedLineSet(geom, ancestry):
+ # VRML not x3d
+ #coord = geom.getChildByName('coord') # 'Coordinate'
+ coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml
+ if coord:
+ points = coord.getFieldAsArray('point', 3, ancestry)
+ else:
+ points = []
+
+ if not points:
+ print('\tWarning: IndexedLineSet had no points')
+ return None
+
+ ils_lines = geom.getFieldAsArray('coordIndex', 0, ancestry)
+
+ lines = []
+ line = []
+
+ for il in ils_lines:
+ if il == -1:
+ lines.append(line)
+ line = []
+ else:
+ line.append(int(il))
+ lines.append(line)
+
+ # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color
+
+ bpycurve = bpy.data.curves.new('IndexedCurve', 'CURVE')
+ bpycurve.dimensions = '3D'
+
+ for line in lines:
+ if not line:
+ continue
+ co = points[line[0]]
+ nu = bpycurve.splines.new('POLY')
+ nu.points.add(len(line) - 1) # the new nu has 1 point to begin with
+ for il, pt in zip(line, nu.points):
+ pt.co[0:3] = points[il]
+
+ return bpycurve
+
+
+def importMesh_PointSet(geom, ancestry):
+ # VRML not x3d
+ #coord = geom.getChildByName('coord') # 'Coordinate'
+ coord = geom.getChildBySpec('Coordinate') # works for x3d and vrml
+ if coord:
+ points = coord.getFieldAsArray('point', 3, ancestry)
+ else:
+ points = []
+
+ # vcolor = geom.getChildByName('color') # blender dosnt have per vertex color
+
+ bpymesh = bpy.data.meshes.new("XXX")
+ bpymesh.vertices.add(len(points))
+ bpymesh.vertices.foreach_set("co", [a for v in points for a in v])
+
+ # No need to validate
+ bpymesh.update()
+ return bpymesh
+
+GLOBALS['CIRCLE_DETAIL'] = 12
+
+
+def bpy_ops_add_object_hack(): # XXX25, evil
+ scene = bpy.context.scene
+ obj = scene.objects[0]
+ scene.objects.unlink(obj)
+ bpymesh = obj.data
+ bpy.data.objects.remove(obj)
+ return bpymesh
+
+
+def importMesh_Sphere(geom, ancestry):
+ diameter = geom.getFieldAsFloat('radius', 0.5, ancestry)
+ # bpymesh = Mesh.Primitives.UVsphere(GLOBALS['CIRCLE_DETAIL'], GLOBALS['CIRCLE_DETAIL'], diameter)
+
+ bpy.ops.mesh.primitive_uv_sphere_add(segments=GLOBALS['CIRCLE_DETAIL'],
+ ring_count=GLOBALS['CIRCLE_DETAIL'],
+ size=diameter,
+ view_align=False,
+ enter_editmode=False,
+ )
+
+ bpymesh = bpy_ops_add_object_hack()
+
+ bpymesh.transform(MATRIX_Z_TO_Y)
+ return bpymesh
+
+
+def importMesh_Cylinder(geom, ancestry):
+ # bpymesh = bpy.data.meshes.new()
+ diameter = geom.getFieldAsFloat('radius', 1.0, ancestry)
+ height = geom.getFieldAsFloat('height', 2, ancestry)
+
+ # bpymesh = Mesh.Primitives.Cylinder(GLOBALS['CIRCLE_DETAIL'], diameter, height)
+
+ bpy.ops.mesh.primitive_cylinder_add(vertices=GLOBALS['CIRCLE_DETAIL'],
+ radius=diameter,
+ depth=height,
+ cap_ends=True,
+ view_align=False,
+ enter_editmode=False,
+ )
+
+ bpymesh = bpy_ops_add_object_hack()
+
+ bpymesh.transform(MATRIX_Z_TO_Y)
+
+ # Warning - Rely in the order Blender adds verts
+ # not nice design but wont change soon.
+
+ bottom = geom.getFieldAsBool('bottom', True, ancestry)
+ side = geom.getFieldAsBool('side', True, ancestry)
+ top = geom.getFieldAsBool('top', True, ancestry)
+
+ if not top: # last vert is top center of tri fan.
+ # bpymesh.vertices.delete([(GLOBALS['CIRCLE_DETAIL'] + GLOBALS['CIRCLE_DETAIL']) + 1]) # XXX25
+ pass
+
+ if not bottom: # second last vert is bottom of triangle fan
+ # XXX25
+ # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL'] + GLOBALS['CIRCLE_DETAIL']])
+ pass
+
+ if not side:
+ # remove all quads
+ # XXX25
+ # bpymesh.faces.delete(1, [f for f in bpymesh.faces if len(f) == 4])
+ pass
+
+ return bpymesh
+
+
+def importMesh_Cone(geom, ancestry):
+ # bpymesh = bpy.data.meshes.new()
+ diameter = geom.getFieldAsFloat('bottomRadius', 1.0, ancestry)
+ height = geom.getFieldAsFloat('height', 2, ancestry)
+
+ # bpymesh = Mesh.Primitives.Cone(GLOBALS['CIRCLE_DETAIL'], diameter, height)
+
+ bpy.ops.mesh.primitive_cone_add(vertices=GLOBALS['CIRCLE_DETAIL'],
+ radius=diameter,
+ depth=height,
+ cap_end=True,
+ view_align=False,
+ enter_editmode=False,
+ )
+
+ bpymesh = bpy_ops_add_object_hack()
+
+ bpymesh.transform(MATRIX_Z_TO_Y)
+
+ # Warning - Rely in the order Blender adds verts
+ # not nice design but wont change soon.
+
+ bottom = geom.getFieldAsBool('bottom', True, ancestry)
+ side = geom.getFieldAsBool('side', True, ancestry)
+
+ if not bottom: # last vert is on the bottom
+ # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL'] + 1]) # XXX25
+ pass
+ if not side: # second last vert is on the pointy bit of the cone
+ # bpymesh.vertices.delete([GLOBALS['CIRCLE_DETAIL']]) # XXX25
+ pass
+
+ return bpymesh
+
+
+def importMesh_Box(geom, ancestry):
+ # bpymesh = bpy.data.meshes.new()
+
+ size = geom.getFieldAsFloatTuple('size', (2.0, 2.0, 2.0), ancestry)
+
+ # bpymesh = Mesh.Primitives.Cube(1.0)
+ bpy.ops.mesh.primitive_cube_add(view_align=False,
+ enter_editmode=False,
+ )
+
+ bpymesh = bpy_ops_add_object_hack()
+
+ # Scale the box to the size set
+ scale_mat = Matrix(((size[0], 0, 0), (0, size[1], 0), (0, 0, size[2]))) * 0.5
+ bpymesh.transform(scale_mat.to_4x4())
+
+ return bpymesh
+
+
+def importShape(node, ancestry, global_matrix):
+ vrmlname = node.getDefName()
+ if not vrmlname:
+ vrmlname = 'Shape'
+
+ # works 100% in vrml, but not x3d
+ #appr = node.getChildByName('appearance') # , 'Appearance'
+ #geom = node.getChildByName('geometry') # , 'IndexedFaceSet'
+
+ # Works in vrml and x3d
+ appr = node.getChildBySpec('Appearance')
+ geom = node.getChildBySpec(['IndexedFaceSet', 'IndexedLineSet', 'PointSet', 'Sphere', 'Box', 'Cylinder', 'Cone'])
+
+ # For now only import IndexedFaceSet's
+ if geom:
+ bpymat = None
+ bpyima = None
+ texmtx = None
+
+ depth = 0 # so we can set alpha face flag later
+
+ if appr:
+
+ #mat = appr.getChildByName('material') # 'Material'
+ #ima = appr.getChildByName('texture') # , 'ImageTexture'
+ #if ima and ima.getSpec() != 'ImageTexture':
+ # print('\tWarning: texture type "%s" is not supported' % ima.getSpec())
+ # ima = None
+ # textx = appr.getChildByName('textureTransform')
+
+ mat = appr.getChildBySpec('Material')
+ ima = appr.getChildBySpec('ImageTexture')
+
+ textx = appr.getChildBySpec('TextureTransform')
+
+ if textx:
+ texmtx = translateTexTransform(textx, ancestry)
+
+ # print(mat, ima)
+ if mat or ima:
+
+ if not mat:
+ mat = ima # This is a bit dumb, but just means we use default values for all
+
+ # all values between 0.0 and 1.0, defaults from VRML docs
+ bpymat = bpy.data.materials.new("XXX")
+ bpymat.ambient = mat.getFieldAsFloat('ambientIntensity', 0.2, ancestry)
+ bpymat.diffuse_color = mat.getFieldAsFloatTuple('diffuseColor', [0.8, 0.8, 0.8], ancestry)
+
+ # NOTE - blender dosnt support emmisive color
+ # Store in mirror color and approximate with emit.
+ emit = mat.getFieldAsFloatTuple('emissiveColor', [0.0, 0.0, 0.0], ancestry)
+ bpymat.mirror_color = emit
+ bpymat.emit = (emit[0] + emit[1] + emit[2]) / 3.0
+
+ bpymat.specular_hardness = int(1 + (510 * mat.getFieldAsFloat('shininess', 0.2, ancestry))) # 0-1 -> 1-511
+ bpymat.specular_color = mat.getFieldAsFloatTuple('specularColor', [0.0, 0.0, 0.0], ancestry)
+ bpymat.alpha = 1.0 - mat.getFieldAsFloat('transparency', 0.0, ancestry)
+ if bpymat.alpha < 0.999:
+ bpymat.use_transparency = True
+
+ if ima:
+ ima_url = ima.getFieldAsString('url', None, ancestry)
+
+ if ima_url is None:
+ try:
+ ima_url = ima.getFieldAsStringArray('url', ancestry)[0] # in some cases we get a list of images.
+ except:
+ ima_url = None
+
+ if ima_url is None:
+ print("\twarning, image with no URL, this is odd")
+ else:
+ bpyima = image_utils.image_load(ima_url, os.path.dirname(node.getFilename()), place_holder=False, recursive=False, convert_callback=imageConvertCompat)
+ if bpyima:
+ texture = bpy.data.textures.new("XXX", 'IMAGE')
+ texture.image = bpyima
+
+ # Adds textures for materials (rendering)
+ try:
+ depth = bpyima.depth
+ except:
+ depth = -1
+
+ if depth == 32:
+ # Image has alpha
+ bpymat.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA)
+ texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha')
+ bpymat.mode |= Material.Modes.ZTRANSP
+ bpymat.alpha = 0.0
+ else:
+ mtex = bpymat.texture_slots.add()
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_diffuse = True
+
+ ima_repS = ima.getFieldAsBool('repeatS', True, ancestry)
+ ima_repT = ima.getFieldAsBool('repeatT', True, ancestry)
+
+ # To make this work properly we'd need to scale the UV's too, better to ignore th
+ # texture.repeat = max(1, ima_repS * 512), max(1, ima_repT * 512)
+
+ if not ima_repS:
+ bpyima.use_clamp_x = True
+ if not ima_repT:
+ bpyima.use_clamp_y = True
+
+ bpydata = None
+ geom_spec = geom.getSpec()
+ ccw = True
+ if geom_spec == 'IndexedFaceSet':
+ bpydata, ccw = importMesh_IndexedFaceSet(geom, bpyima, ancestry)
+ elif geom_spec == 'IndexedLineSet':
+ bpydata = importMesh_IndexedLineSet(geom, ancestry)
+ elif geom_spec == 'PointSet':
+ bpydata = importMesh_PointSet(geom, ancestry)
+ elif geom_spec == 'Sphere':
+ bpydata = importMesh_Sphere(geom, ancestry)
+ elif geom_spec == 'Box':
+ bpydata = importMesh_Box(geom, ancestry)
+ elif geom_spec == 'Cylinder':
+ bpydata = importMesh_Cylinder(geom, ancestry)
+ elif geom_spec == 'Cone':
+ bpydata = importMesh_Cone(geom, ancestry)
+ else:
+ print('\tWarning: unsupported type "%s"' % geom_spec)
+ return
+
+ if bpydata:
+ vrmlname = vrmlname + geom_spec
+
+ bpydata.name = vrmlname
+
+ bpyob = node.blendObject = bpy.data.objects.new(vrmlname, bpydata)
+ bpy.context.scene.objects.link(bpyob)
+
+ if type(bpydata) == bpy.types.Mesh:
+ is_solid = geom.getFieldAsBool('solid', True, ancestry)
+ creaseAngle = geom.getFieldAsFloat('creaseAngle', None, ancestry)
+
+ if creaseAngle is not None:
+ bpydata.auto_smooth_angle = creaseAngle
+ bpydata.use_auto_smooth = True
+
+ # Only ever 1 material per shape
+ if bpymat:
+ bpydata.materials.append(bpymat)
+
+ if bpydata.uv_textures:
+
+ if depth == 32: # set the faces alpha flag?
+ transp = Mesh.FaceTranspModes.ALPHA
+ for f in bpydata.uv_textures.active.data:
+ f.blend_type = 'ALPHA'
+
+ if texmtx:
+ # Apply texture transform?
+ uv_copy = Vector()
+ for f in bpydata.uv_textures.active.data:
+ fuv = f.uv
+ for i, uv in enumerate(fuv):
+ uv_copy.x = uv[0]
+ uv_copy.y = uv[1]
+
+ fuv[i] = (uv_copy * texmtx)[0:2]
+ # Done transforming the texture
+
+ # Must be here and not in IndexedFaceSet because it needs an object for the flip func. Messy :/
+ if not ccw:
+ # bpydata.flipNormals()
+ # XXX25
+ pass
+
+ # else could be a curve for example
+
+ # Can transform data or object, better the object so we can instance the data
+ #bpymesh.transform(getFinalMatrix(node))
+ bpyob.matrix_world = getFinalMatrix(node, None, ancestry, global_matrix)
+
+
+def importLamp_PointLight(node, ancestry):
+ vrmlname = node.getDefName()
+ if not vrmlname:
+ vrmlname = 'PointLight'
+
+ # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO
+ # attenuation = node.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO
+ color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
+ intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher.
+ location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry)
+ # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
+ radius = node.getFieldAsFloat('radius', 100.0, ancestry)
+
+ bpylamp = bpy.data.lamps.new("ToDo", 'POINT')
+ bpylamp.energy = intensity
+ bpylamp.distance = radius
+ bpylamp.color = color
+
+ mtx = Matrix.Translation(Vector(location))
+
+ return bpylamp, mtx
+
+
+def importLamp_DirectionalLight(node, ancestry):
+ vrmlname = node.getDefName()
+ if not vrmlname:
+ vrmlname = 'DirectLight'
+
+ # ambientIntensity = node.getFieldAsFloat('ambientIntensity', 0.0) # TODO
+ color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
+ direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry)
+ intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher.
+ # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
+
+ bpylamp = bpy.data.lamps.new(vrmlname, 'SUN')
+ bpylamp.energy = intensity
+ bpylamp.color = color
+
+ # lamps have their direction as -z, yup
+ mtx = Vector(direction).to_track_quat('-Z', 'Y').to_matrix().to_4x4()
+
+ return bpylamp, mtx
+
+# looks like default values for beamWidth and cutOffAngle were swapped in VRML docs.
+
+
+def importLamp_SpotLight(node, ancestry):
+ vrmlname = node.getDefName()
+ if not vrmlname:
+ vrmlname = 'SpotLight'
+
+ # ambientIntensity = geom.getFieldAsFloat('ambientIntensity', 0.0, ancestry) # TODO
+ # attenuation = geom.getFieldAsFloatTuple('attenuation', (1.0, 0.0, 0.0), ancestry) # TODO
+ beamWidth = node.getFieldAsFloat('beamWidth', 1.570796, ancestry) # max is documented to be 1.0 but some files have higher.
+ color = node.getFieldAsFloatTuple('color', (1.0, 1.0, 1.0), ancestry)
+ cutOffAngle = node.getFieldAsFloat('cutOffAngle', 0.785398, ancestry) * 2.0 # max is documented to be 1.0 but some files have higher.
+ direction = node.getFieldAsFloatTuple('direction', (0.0, 0.0, -1.0), ancestry)
+ intensity = node.getFieldAsFloat('intensity', 1.0, ancestry) # max is documented to be 1.0 but some files have higher.
+ location = node.getFieldAsFloatTuple('location', (0.0, 0.0, 0.0), ancestry)
+ # is_on = node.getFieldAsBool('on', True, ancestry) # TODO
+ radius = node.getFieldAsFloat('radius', 100.0, ancestry)
+
+ bpylamp = bpy.data.lamps.new(vrmlname, 'SPOT')
+ bpylamp.energy = intensity
+ bpylamp.distance = radius
+ bpylamp.color = color
+ bpylamp.spot_size = cutOffAngle
+ if beamWidth > cutOffAngle:
+ bpylamp.spot_blend = 0.0
+ else:
+ if cutOffAngle == 0.0: # this should never happen!
+ bpylamp.spot_blend = 0.5
+ else:
+ bpylamp.spot_blend = beamWidth / cutOffAngle
+
+ # Convert
+
+ # lamps have their direction as -z, y==up
+ mtx = Matrix.Translation(location) * Vector(direction).to_track_quat('-Z', 'Y').to_matrix().to_4x4()
+
+ return bpylamp, mtx
+
+
+def importLamp(node, spec, ancestry, global_matrix):
+ if spec == 'PointLight':
+ bpylamp, mtx = importLamp_PointLight(node, ancestry)
+ elif spec == 'DirectionalLight':
+ bpylamp, mtx = importLamp_DirectionalLight(node, ancestry)
+ elif spec == 'SpotLight':
+ bpylamp, mtx = importLamp_SpotLight(node, ancestry)
+ else:
+ print("Error, not a lamp")
+ raise ValueError
+
+ bpyob = node.blendObject = bpy.data.objects.new("TODO", bpylamp)
+ bpy.context.scene.objects.link(bpyob)
+
+ bpyob.matrix_world = getFinalMatrix(node, mtx, ancestry, global_matrix)
+
+
+def importViewpoint(node, ancestry, global_matrix):
+ name = node.getDefName()
+ if not name:
+ name = 'Viewpoint'
+
+ fieldOfView = node.getFieldAsFloat('fieldOfView', 0.785398, ancestry) # max is documented to be 1.0 but some files have higher.
+ # jump = node.getFieldAsBool('jump', True, ancestry)
+ orientation = node.getFieldAsFloatTuple('orientation', (0.0, 0.0, 1.0, 0.0), ancestry)
+ position = node.getFieldAsFloatTuple('position', (0.0, 0.0, 0.0), ancestry)
+ description = node.getFieldAsString('description', '', ancestry)
+
+ bpycam = bpy.data.cameras.new(name)
+
+ bpycam.angle = fieldOfView
+
+ mtx = Matrix.Translation(Vector(position)) * translateRotation(orientation)
+
+ bpyob = node.blendObject = bpy.data.objects.new(name, bpycam)
+ bpy.context.scene.objects.link(bpyob)
+ bpyob.matrix_world = getFinalMatrix(node, mtx, ancestry, global_matrix)
+
+
+def importTransform(node, ancestry, global_matrix):
+ name = node.getDefName()
+ if not name:
+ name = 'Transform'
+
+ bpyob = node.blendObject = bpy.data.objects.new(name, None)
+ bpy.context.scene.objects.link(bpyob)
+
+ bpyob.matrix_world = getFinalMatrix(node, None, ancestry, global_matrix)
+
+ # so they are not too annoying
+ bpyob.empty_draw_type = 'PLAIN_AXES'
+ bpyob.empty_draw_size = 0.2
+
+
+#def importTimeSensor(node):
+def action_fcurve_ensure(action, data_path, array_index):
+ for fcu in action.fcurves:
+ if fcu.data_path == data_path and fcu.array_index == array_index:
+ return fcu
+
+ return action.fcurves.new(data_path=data_path, index=array_index)
+
+
+def translatePositionInterpolator(node, action, ancestry):
+ key = node.getFieldAsArray('key', 0, ancestry)
+ keyValue = node.getFieldAsArray('keyValue', 3, ancestry)
+
+ loc_x = action_fcurve_ensure(action, "location", 0)
+ loc_y = action_fcurve_ensure(action, "location", 1)
+ loc_z = action_fcurve_ensure(action, "location", 2)
+
+ for i, time in enumerate(key):
+ try:
+ x, y, z = keyValue[i]
+ except:
+ continue
+
+ loc_x.keyframe_points.insert(time, x)
+ loc_y.keyframe_points.insert(time, y)
+ loc_z.keyframe_points.insert(time, z)
+
+ for fcu in (loc_x, loc_y, loc_z):
+ for kf in fcu.keyframe_points:
+ kf.interpolation = 'LINEAR'
+
+
+def translateOrientationInterpolator(node, action, ancestry):
+ key = node.getFieldAsArray('key', 0, ancestry)
+ keyValue = node.getFieldAsArray('keyValue', 4, ancestry)
+
+ rot_x = action_fcurve_ensure(action, "rotation_euler", 0)
+ rot_y = action_fcurve_ensure(action, "rotation_euler", 1)
+ rot_z = action_fcurve_ensure(action, "rotation_euler", 2)
+
+ for i, time in enumerate(key):
+ try:
+ x, y, z, w = keyValue[i]
+ except:
+ continue
+
+ mtx = translateRotation((x, y, z, w))
+ eul = mtx.to_euler()
+ rot_x.keyframe_points.insert(time, eul.x)
+ rot_y.keyframe_points.insert(time, eul.y)
+ rot_z.keyframe_points.insert(time, eul.z)
+
+ for fcu in (rot_x, rot_y, rot_z):
+ for kf in fcu.keyframe_points:
+ kf.interpolation = 'LINEAR'
+
+
+# Untested!
+def translateScalarInterpolator(node, action, ancestry):
+ key = node.getFieldAsArray('key', 0, ancestry)
+ keyValue = node.getFieldAsArray('keyValue', 4, ancestry)
+
+ sca_x = action_fcurve_ensure(action, "scale", 0)
+ sca_y = action_fcurve_ensure(action, "scale", 1)
+ sca_z = action_fcurve_ensure(action, "scale", 2)
+
+ for i, time in enumerate(key):
+ try:
+ x, y, z = keyValue[i]
+ except:
+ continue
+
+ sca_x.keyframe_points.new(time, x)
+ sca_y.keyframe_points.new(time, y)
+ sca_z.keyframe_points.new(time, z)
+
+
+def translateTimeSensor(node, action, ancestry):
+ '''
+ Apply a time sensor to an action, VRML has many combinations of loop/start/stop/cycle times
+ to give different results, for now just do the basics
+ '''
+
+ # XXX25 TODO
+ if 1:
+ return
+
+ time_cu = action.addCurve('Time')
+ time_cu.interpolation = Blender.IpoCurve.InterpTypes.LINEAR
+
+ cycleInterval = node.getFieldAsFloat('cycleInterval', None, ancestry)
+
+ startTime = node.getFieldAsFloat('startTime', 0.0, ancestry)
+ stopTime = node.getFieldAsFloat('stopTime', 250.0, ancestry)
+
+ if cycleInterval != None:
+ stopTime = startTime + cycleInterval
+
+ loop = node.getFieldAsBool('loop', False, ancestry)
+
+ time_cu.append((1 + startTime, 0.0))
+ time_cu.append((1 + stopTime, 1.0 / 10.0)) # anoying, the UI uses /10
+
+ if loop:
+ time_cu.extend = Blender.IpoCurve.ExtendTypes.CYCLIC # or - EXTRAP, CYCLIC_EXTRAP, CONST,
+
+
+def importRoute(node, ancestry):
+ '''
+ Animation route only at the moment
+ '''
+
+ if not hasattr(node, 'fields'):
+ return
+
+ routeIpoDict = node.getRouteIpoDict()
+
+ def getIpo(id):
+ try:
+ action = routeIpoDict[id]
+ except:
+ action = routeIpoDict[id] = bpy.data.actions.new('web3d_ipo')
+ return action
+
+ # for getting definitions
+ defDict = node.getDefDict()
+ '''
+ Handles routing nodes to eachother
+
+ROUTE vpPI.value_changed TO champFly001.set_position
+ROUTE vpOI.value_changed TO champFly001.set_orientation
+ROUTE vpTs.fraction_changed TO vpPI.set_fraction
+ROUTE vpTs.fraction_changed TO vpOI.set_fraction
+ROUTE champFly001.bindTime TO vpTs.set_startTime
+ '''
+
+ #from_id, from_type = node.id[1].split('.')
+ #to_id, to_type = node.id[3].split('.')
+
+ #value_changed
+ set_position_node = None
+ set_orientation_node = None
+ time_node = None
+
+ for field in node.fields:
+ if field and field[0] == 'ROUTE':
+ try:
+ from_id, from_type = field[1].split('.')
+ to_id, to_type = field[3].split('.')
+ except:
+ print("Warning, invalid ROUTE", field)
+ continue
+
+ if from_type == 'value_changed':
+ if to_type == 'set_position':
+ action = getIpo(to_id)
+ set_data_from_node = defDict[from_id]
+ translatePositionInterpolator(set_data_from_node, action, ancestry)
+
+ if to_type in {'set_orientation', 'rotation'}:
+ action = getIpo(to_id)
+ set_data_from_node = defDict[from_id]
+ translateOrientationInterpolator(set_data_from_node, action, ancestry)
+
+ if to_type == 'set_scale':
+ action = getIpo(to_id)
+ set_data_from_node = defDict[from_id]
+ translateScalarInterpolator(set_data_from_node, action, ancestry)
+
+ elif from_type == 'bindTime':
+ action = getIpo(from_id)
+ time_node = defDict[to_id]
+ translateTimeSensor(time_node, action, ancestry)
+
+
+def load_web3d(path,
+ PREF_FLAT=False,
+ PREF_CIRCLE_DIV=16,
+ global_matrix=None,
+ HELPER_FUNC=None,
+ ):
+
+ # Used when adding blender primitives
+ GLOBALS['CIRCLE_DETAIL'] = PREF_CIRCLE_DIV
+
+ #root_node = vrml_parse('/_Cylinder.wrl')
+ if path.lower().endswith('.x3d'):
+ root_node, msg = x3d_parse(path)
+ else:
+ root_node, msg = vrml_parse(path)
+
+ if not root_node:
+ print(msg)
+ return
+
+ if global_matrix is None:
+ global_matrix = Matrix()
+
+ # fill with tuples - (node, [parents-parent, parent])
+ all_nodes = root_node.getSerialized([], [])
+
+ for node, ancestry in all_nodes:
+ #if 'castle.wrl' not in node.getFilename():
+ # continue
+
+ spec = node.getSpec()
+ '''
+ prefix = node.getPrefix()
+ if prefix=='PROTO':
+ pass
+ else
+ '''
+ if HELPER_FUNC and HELPER_FUNC(node, ancestry):
+ # Note, include this function so the VRML/X3D importer can be extended
+ # by an external script. - gets first pick
+ pass
+ if spec == 'Shape':
+ importShape(node, ancestry, global_matrix)
+ elif spec in {'PointLight', 'DirectionalLight', 'SpotLight'}:
+ importLamp(node, spec, ancestry, global_matrix)
+ elif spec == 'Viewpoint':
+ importViewpoint(node, ancestry, global_matrix)
+ elif spec == 'Transform':
+ # Only use transform nodes when we are not importing a flat object hierarchy
+ if PREF_FLAT == False:
+ importTransform(node, ancestry, global_matrix)
+ '''
+ # These are delt with later within importRoute
+ elif spec=='PositionInterpolator':
+ action = bpy.data.ipos.new('web3d_ipo', 'Object')
+ translatePositionInterpolator(node, action)
+ '''
+
+ # After we import all nodes, route events - anim paths
+ for node, ancestry in all_nodes:
+ importRoute(node, ancestry)
+
+ for node, ancestry in all_nodes:
+ if node.isRoot():
+ # we know that all nodes referenced from will be in
+ # routeIpoDict so no need to run node.getDefDict() for every node.
+ routeIpoDict = node.getRouteIpoDict()
+ defDict = node.getDefDict()
+
+ for key, action in routeIpoDict.items():
+
+ # Assign anim curves
+ node = defDict[key]
+ if node.blendObject is None: # Add an object if we need one for animation
+ node.blendObject = bpy.data.objects.new('AnimOb', None) # , name)
+ bpy.context.scene.objects.link(node.blendObject)
+
+ if node.blendObject.animation_data is None:
+ node.blendObject.animation_data_create()
+
+ node.blendObject.animation_data.action = action
+
+ # Add in hierarchy
+ if PREF_FLAT == False:
+ child_dict = {}
+ for node, ancestry in all_nodes:
+ if node.blendObject:
+ blendObject = None
+
+ # Get the last parent
+ i = len(ancestry)
+ while i:
+ i -= 1
+ blendObject = ancestry[i].blendObject
+ if blendObject:
+ break
+
+ if blendObject:
+ # Parent Slow, - 1 liner but works
+ # blendObject.makeParent([node.blendObject], 0, 1)
+
+ # Parent FAST
+ try:
+ child_dict[blendObject].append(node.blendObject)
+ except:
+ child_dict[blendObject] = [node.blendObject]
+
+ # Parent
+ for parent, children in child_dict.items():
+ for c in children:
+ c.parent = parent
+
+ # update deps
+ bpy.context.scene.update()
+ del child_dict
+
+
+def load(operator, context, filepath="", global_matrix=None):
+
+ load_web3d(filepath,
+ PREF_FLAT=True,
+ PREF_CIRCLE_DIV=16,
+ global_matrix=global_matrix,
+ )
+
+ return {'FINISHED'}
diff --git a/io_shape_mdd/__init__.py b/io_shape_mdd/__init__.py
new file mode 100644
index 00000000..bc195735
--- /dev/null
+++ b/io_shape_mdd/__init__.py
@@ -0,0 +1,138 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "NewTek MDD format",
+ "author": "Bill L.Nieuwendorp",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export MDD as mesh shape keys",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/NewTek_OBJ",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "import_mdd" in locals():
+ imp.reload(import_mdd)
+ if "export_mdd" in locals():
+ imp.reload(export_mdd)
+
+
+import bpy
+from bpy.props import StringProperty, IntProperty
+from bpy_extras.io_utils import ExportHelper, ImportHelper
+
+
+class ImportMDD(bpy.types.Operator, ImportHelper):
+ '''Import MDD vertex keyframe file to shape keys'''
+ bl_idname = "import_shape.mdd"
+ bl_label = "Import MDD"
+
+ filename_ext = ".mdd"
+ filter_glob = StringProperty(default="*.mdd", options={'HIDDEN'})
+
+ frame_start = IntProperty(name="Start Frame", description="Start frame for inserting animation", min=-300000, max=300000, default=0)
+ frame_step = IntProperty(name="Step", min=1, max=1000, default=1)
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return (ob and ob.type == 'MESH')
+
+ def execute(self, context):
+
+ # initialize from scene if unset
+ scene = context.scene
+ if not self.frame_start:
+ self.frame_start = scene.frame_current
+
+ from . import import_mdd
+ return import_mdd.load(self, context, **self.as_keywords(ignore=("filter_glob",)))
+
+
+class ExportMDD(bpy.types.Operator, ExportHelper):
+ '''Animated mesh to MDD vertex keyframe file'''
+ bl_idname = "export_shape.mdd"
+ bl_label = "Export MDD"
+
+ filename_ext = ".mdd"
+ filter_glob = StringProperty(default="*.mdd", options={'HIDDEN'})
+
+ # get first scene to get min and max properties for frames, fps
+
+ minframe = 1
+ maxframe = 300000
+ minfps = 1
+ maxfps = 120
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+ fps = IntProperty(name="Frames Per Second", description="Number of frames/second", min=minfps, max=maxfps, default=25)
+ frame_start = IntProperty(name="Start Frame", description="Start frame for baking", min=minframe, max=maxframe, default=1)
+ frame_end = IntProperty(name="End Frame", description="End frame for baking", min=minframe, max=maxframe, default=250)
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ return (obj and obj.type == 'MESH')
+
+ def execute(self, context):
+ # initialize from scene if unset
+ scene = context.scene
+ if not self.frame_start:
+ self.frame_start = scene.frame_start
+ if not self.frame_end:
+ self.frame_end = scene.frame_end
+ if not self.fps:
+ self.fps = scene.render.fps
+
+ from . import export_mdd
+ return export_mdd.save(self, context, **self.as_keywords(ignore=("check_existing", "filter_glob")))
+
+
+def menu_func_import(self, context):
+ self.layout.operator(ImportMDD.bl_idname, text="Lightwave Point Cache (.mdd)")
+
+
+def menu_func_export(self, context):
+ self.layout.operator(ExportMDD.bl_idname, text="Lightwave Point Cache (.mdd)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_shape_mdd/export_mdd.py b/io_shape_mdd/export_mdd.py
new file mode 100644
index 00000000..85163280
--- /dev/null
+++ b/io_shape_mdd/export_mdd.py
@@ -0,0 +1,130 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Contributors: Bill L.Nieuwendorp
+
+"""
+This script Exports Lightwaves MotionDesigner format.
+
+The .mdd format has become quite a popular Pipeline format<br>
+for moving animations from package to package.
+
+Be sure not to use modifiers that change the number or order of verts in the mesh
+"""
+
+import bpy
+import mathutils
+from struct import pack
+
+
+def zero_file(filepath):
+ '''
+ If a file fails, this replaces it with 1 char, better not remove it?
+ '''
+ file = open(filepath, 'w')
+ file.write('\n') # apparently macosx needs some data in a blank file?
+ file.close()
+
+
+def check_vertcount(mesh, vertcount):
+ '''
+ check and make sure the vertcount is consistent throughout the frame range
+ '''
+ if len(mesh.vertices) != vertcount:
+ raise Exception('Error, number of verts has changed during animation, cannot export')
+ f.close()
+ zero_file(filepath)
+ return
+
+
+def save(operator, context, filepath="", frame_start=1, frame_end=300, fps=25):
+ """
+ Blender.Window.WaitCursor(1)
+
+ mesh_orig = Mesh.New()
+ mesh_orig.getFromObject(obj.name)
+ """
+
+ scene = context.scene
+ obj = context.object
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ orig_frame = scene.frame_current
+ scene.frame_set(frame_start)
+ me = obj.to_mesh(scene, True, 'PREVIEW')
+
+ #Flip y and z
+ mat_flip = mathutils.Matrix(((1.0, 0.0, 0.0, 0.0), \
+ (0.0, 0.0, 1.0, 0.0), \
+ (0.0, 1.0, 0.0, 0.0), \
+ (0.0, 0.0, 0.0, 1.0), \
+ ))
+
+ numverts = len(me.vertices)
+
+ numframes = frame_end - frame_start + 1
+ fps = float(fps)
+ f = open(filepath, 'wb') # no Errors yet:Safe to create file
+
+ # Write the header
+ f.write(pack(">2i", numframes, numverts))
+
+ # Write the frame times (should we use the time IPO??)
+ f.write(pack(">%df" % (numframes), *[frame / fps for frame in range(numframes)])) # seconds
+
+ #rest frame needed to keep frames in sync
+ """
+ Blender.Set('curframe', frame_start)
+ me_tmp.getFromObject(obj.name)
+ """
+
+ check_vertcount(me, numverts)
+ me.transform(mat_flip * obj.matrix_world)
+ f.write(pack(">%df" % (numverts * 3), *[axis for v in me.vertices for axis in v.co]))
+
+ for frame in range(frame_start, frame_end + 1): # in order to start at desired frame
+ """
+ Blender.Set('curframe', frame)
+ me_tmp.getFromObject(obj.name)
+ """
+
+ scene.frame_set(frame)
+ me = obj.to_mesh(scene, True, 'PREVIEW')
+ check_vertcount(me, numverts)
+ me.transform(mat_flip * obj.matrix_world)
+
+ # Write the vertex data
+ f.write(pack(">%df" % (numverts * 3), *[axis for v in me.vertices for axis in v.co]))
+
+ """
+ me_tmp.vertices= None
+ """
+ f.close()
+
+ print('MDD Exported: %r frames:%d\n' % (filepath, numframes - 1))
+ """
+ Blender.Window.WaitCursor(0)
+ Blender.Set('curframe', orig_frame)
+ """
+ scene.frame_set(orig_frame)
+
+ return {'FINISHED'}
diff --git a/io_shape_mdd/import_mdd.py b/io_shape_mdd/import_mdd.py
new file mode 100644
index 00000000..0fdc78cb
--- /dev/null
+++ b/io_shape_mdd/import_mdd.py
@@ -0,0 +1,102 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# mdd importer by Bill L.Nieuwendorp
+# conversion to blender 2.5: Ivo Grigull (loolarge)
+#
+# Warning if the vertex order or vertex count differs from the
+# origonal model the mdd was Baked out from their will be Strange
+# behavior
+#
+# vertex animation to ShapeKeys with ipo and gives the frame a value of 1.0
+# A modifier to read mdd files would be Ideal but thats for another day :)
+#
+# Please send any fixes,updates,bugs to Slow67_at_Gmail.com
+# Bill Niewuendorp
+
+import bpy
+from struct import unpack
+
+
+def load(operator, context, filepath, frame_start=0, frame_step=1):
+
+ scene = context.scene
+ obj = context.object
+
+ print('\n\nimporting mdd %r' % filepath)
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ file = open(filepath, 'rb')
+ frames, points = unpack(">2i", file.read(8))
+ time = unpack((">%df" % frames), file.read(frames * 4))
+
+ print('\tpoints:%d frames:%d' % (points, frames))
+
+ # If target object doesn't have Basis shape key, create it.
+ try:
+ num_keys = len(obj.data.shape_keys.keys)
+ except:
+ basis = obj.shape_key_add()
+ basis.name = "Basis"
+ obj.data.update()
+
+ scene.frame_current = frame_start
+
+ def UpdateMesh(ob, fr):
+
+ # Insert new shape key
+ new_shapekey = obj.shape_key_add()
+ new_shapekey.name = ("frame_%.4d" % fr)
+ new_shapekey_name = new_shapekey.name
+
+ obj.active_shape_key_index = len(obj.data.shape_keys.keys) - 1
+ index = len(obj.data.shape_keys.keys) - 1
+ obj.show_only_shape_key = True
+
+ verts = obj.data.shape_keys.keys[len(obj.data.shape_keys.keys) - 1].data
+
+ for v in verts: # 12 is the size of 3 floats
+ v.co[:] = unpack('>3f', file.read(12))
+ # me.update()
+ obj.show_only_shape_key = False
+
+ # insert keyframes
+ shape_keys = obj.data.shape_keys
+
+ scene.frame_current -= 1
+ obj.data.shape_keys.keys[index].value = 0.0
+ shape_keys.keys[len(obj.data.shape_keys.keys) - 1].keyframe_insert("value")
+
+ scene.frame_current += 1
+ obj.data.shape_keys.keys[index].value = 1.0
+ shape_keys.keys[len(obj.data.shape_keys.keys) - 1].keyframe_insert("value")
+
+ scene.frame_current += 1
+ obj.data.shape_keys.keys[index].value = 0.0
+ shape_keys.keys[len(obj.data.shape_keys.keys) - 1].keyframe_insert("value")
+
+ obj.data.update()
+
+ for i in range(frames):
+ UpdateMesh(obj, i)
+
+ return {'FINISHED'}
diff --git a/light_field_tools/__init__.py b/light_field_tools/__init__.py
new file mode 100644
index 00000000..862c399b
--- /dev/null
+++ b/light_field_tools/__init__.py
@@ -0,0 +1,119 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+
+bl_info = {
+ 'name': 'Light Field Tools',
+ 'author': 'Aurel Wildfellner',
+ 'description': 'Tools to create a light field camera and projector',
+ 'version': (0, 2, 1),
+ 'blender': (2, 5, 7),
+ 'api': 36103,
+ 'location': 'View3D > Tool Shelf > Light Field Tools',
+ 'url': 'http://www.jku.at/cg/',
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/Render/Light_Field_Tools",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?func=detail&aid=25719",
+ 'category': 'Render'
+}
+
+
+if "bpy" in locals():
+ import imp
+ imp.reload(light_field_tools)
+else:
+ from . import light_field_tools
+
+
+import bpy
+from bpy.props import *
+
+
+# global properties for the script, mainly for UI
+class LightFieldPropertyGroup(bpy.types.PropertyGroup):
+ angle = FloatProperty(
+ name="Angle",
+ # 40 degrees
+ default=0.69813170079,
+ min=0,
+ # 172 degrees
+ max=3.001966313430247,
+ precision=2,
+ subtype = 'ANGLE',
+ description="Field of view of camera and angle of beam for spotlights")
+ row_length = IntProperty(
+ name="Row Length",
+ default=1,
+ min=1,
+ description="The number of cameras/lights in one row")
+ create_handler = BoolProperty(
+ name="Handler",
+ default=True,
+ description="Creates an empty object, to which the cameras and spotlights are parented to")
+ do_camera = BoolProperty(
+ name="Create Camera",
+ default=True,
+ description="A light field camera is created")
+ animate_camera = BoolProperty(
+ name="Animate Camera",
+ default=True,
+ description="Animates a single camera, so not multiple cameras get created")
+ do_projection = BoolProperty(
+ name="Create Projector",
+ default=False,
+ description="A light field projector is created")
+ texture_path = StringProperty(
+ name="Texture Path",
+ description="From this path textures for the spotlights will be loaded",
+ subtype='DIR_PATH')
+ light_intensity = FloatProperty(
+ name="Light Intensity",
+ default=2,
+ min=0,
+ precision=3,
+ description="Total intensity of all lamps")
+ # blending of the spotlights
+ spot_blend = FloatProperty(
+ name="Blend",
+ default=0,
+ min=0,
+ max=1,
+ precision=3,
+ description="Blending of the spotlights")
+ # spacing in pixels on the focal plane
+ spacing = IntProperty(
+ name="Spacing",
+ default=10,
+ min=0,
+ description="The spacing in pixels between two cameras on the focal plane")
+
+
+
+def register():
+ # register properties
+ bpy.utils.register_class(LightFieldPropertyGroup)
+ bpy.types.Scene.lightfield = bpy.props.PointerProperty(type=LightFieldPropertyGroup)
+ bpy.utils.register_module(__name__)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+
+if __name__ == "__main__":
+ register()
+
diff --git a/light_field_tools/light_field_tools.py b/light_field_tools/light_field_tools.py
new file mode 100644
index 00000000..dcf44c95
--- /dev/null
+++ b/light_field_tools/light_field_tools.py
@@ -0,0 +1,432 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+from bpy.props import *
+
+import os
+import math
+
+import mathutils
+
+__bpydoc__ = """
+Light Field Tools
+
+This script helps setting up rendering of lightfields. It
+also supports the projection of lightfields with textured
+spotlights.
+
+Usage:
+A simple interface can be accessed in the tool shelf panel
+in 3D View ([T] Key).
+
+A base mesh has to be provided, which will normaly be a
+subdivided plane. The script will then create a camera rig
+and a light rig with adjustable properties. A sample camera
+and a spotlight will be created on each vertex of the
+basemesh object axis (maybe vertex normal in future
+versions).
+
+ Vertex order:
+ The user has to provide the number of cameras or
+ lights in one row in an unevenly spaced grid, the
+ basemesh. Then the right vertex order can be
+ computed as shown here.
+ 6-7-8
+ | | |
+ ^ 3-4-5
+ | | | |
+ y 0-1-2
+ x->
+
+There is also a tool to create a basemesh, which is an
+evenly spaced grid. The row length parameter is taken to
+construct such a NxN grid. Someone would start out by adding
+a rectengular plane as the slice plane of the frustrum of
+the most middle camera of the light field rig. The spacing
+parameter then places the other cameras in a way, so they
+have an offset of n pixels from the other camera on this
+plane.
+
+
+Version history:
+v0.2.1 - Empty handler, multiple camera grid, r34843
+v0.2.0 - To be included in contrib, r34456
+v0.1.4 - To work with r34261
+v0.1.3 - Fixed base mesh creation for r29998
+v0.1.2 - Minor fixes, working with r29994
+v0.1.1 - Basemesh from focal plane.
+v0.1.0 - API updates, draft done.
+v0.0.4 - Texturing.
+v0.0.3 - Creates an array of non textured spotlights.
+v0.0.2 - Renders lightfields.
+v0.0.1 - Initial version.
+
+TODO:
+* Restore view after primary camera is changed.
+* Apply object matrix to normals.
+* Allign to normals, somehow,....
+* StringPropertie with PATH tag, for proper ui.
+"""
+
+
+class OBJECT_OT_create_lightfield_rig(bpy.types.Operator):
+ """Create a lightfield rig based on the active object/mesh"""
+ bl_idname="object.create_lightfield_rig"
+ bl_label="Create a light field rig based on the active object/mesh"
+ bl_options = {'REGISTER'}
+
+ layer0 = [True] + [False]*19
+
+ numSamples = 0
+ baseObject = None
+ handler = None
+ verts = []
+ imagePaths = []
+
+
+ def arrangeVerts(self):
+ """Sorts the vertices as described in the usage part of the doc."""
+ #FIXME get mesh with applied modifer stack
+ scene = bpy.context.scene
+ mesh = self.baseObject.data
+ verts = []
+ row_length = scene.lightfield.row_length
+
+ for vert in mesh.vertices:
+ # world/parent origin
+ co = vert.co * self.baseObject.matrix_local
+ normal = vert.normal
+ verts.append([co, normal])
+
+ def key_x(v):
+ return v[0][0]
+ def key_y(v):
+ return v[0][1]
+ verts.sort(key=key_y)
+ sorted_verts = []
+ for i in range(0, len(verts), row_length):
+ row = verts[i:i+row_length]
+ row.sort(key=key_x)
+ sorted_verts.extend(row)
+
+ return sorted_verts
+
+
+ def createCameraAnimated(self):
+ scene = bpy.context.scene
+
+ bpy.ops.object.camera_add(view_align=False)
+ cam = bpy.context.active_object
+ cam.name = "light_field_camera"
+
+ # set props
+ cam.data.angle = scene.lightfield.angle
+
+ # display options of the camera
+ cam.data.lens_unit = 'DEGREES'
+
+ # handler parent
+ if scene.lightfield.create_handler:
+ cam.parent = self.handler
+
+ # set as primary camera
+ scene.camera = cam
+
+ ### animate ###
+ scene.frame_current = 0
+
+ for frame, vert in enumerate(self.verts):
+ scene.frame_current = frame
+ # translate
+ cam.location = vert[0]
+ # rotation
+ cam.rotation_euler = self.baseObject.rotation_euler
+ # insert LocRot keyframes
+ cam.keyframe_insert('location')
+
+ # set anim render props
+ scene.frame_current = 0
+ scene.frame_start = 0
+ scene.frame_end = self.numSamples-1
+
+
+ def createCameraMultiple(self):
+ scene = bpy.context.scene
+
+ for cam_idx, vert in enumerate(self.verts):
+ # add and name camera
+ bpy.ops.object.camera_add(view_align=False)
+ cam = bpy.context.active_object
+ cam.name = "light_field_cam_" + str(cam_idx)
+
+ # translate
+ cam.location = vert[0]
+ # rotation
+ cam.rotation_euler = self.baseObject.rotation_euler
+
+ # set camera props
+ cam.data.angle = scene.lightfield.angle
+
+ # display options of the camera
+ cam.data.draw_size = 0.15
+ cam.data.lens_unit = 'DEGREES'
+
+ # handler parent
+ if scene.lightfield.create_handler:
+ cam.parent = self.handler
+
+
+ def createCamera(self):
+ if bpy.context.scene.lightfield.animate_camera:
+ self.createCameraAnimated()
+ else:
+ self.createCameraMultiple()
+
+
+ def getImagePaths(self):
+ path = bpy.context.scene.lightfield.texture_path
+ if not os.path.isdir(path):
+ return False
+ files = os.listdir(path)
+ if not len(files) == self.numSamples:
+ return False
+ files.sort()
+ self.imagePaths = list(map(lambda f : os.path.join(path, f), files))
+ return True
+
+
+ def createTexture(self, index):
+ name = "light_field_spot_tex_" + str(index)
+ tex = bpy.data.textures.new(name, type='IMAGE')
+
+ # load and set the image
+ #FIXME width, height. not necessary to set in the past.
+ img = bpy.data.images.new("lfe_str_" + str(index), width=5, height=5)
+ img.filepath = self.imagePaths[index]
+ img.source = 'FILE'
+ tex.image = img
+
+ return tex
+
+
+ def createSpot(self, index, textured=False):
+ scene = bpy.context.scene
+ bpy.ops.object.lamp_add(
+ type='SPOT',
+ layers=self.layer0)
+ spot = bpy.context.active_object
+
+ # set object props
+ spot.name = "light_field_spot_" + str(index)
+
+ # set constants
+ spot.data.use_square = True
+ spot.data.shadow_method = "RAY_SHADOW"
+ # FIXME
+ spot.data.distance = 10
+
+ # set spot props
+ spot.data.energy = scene.lightfield.light_intensity / self.numSamples
+ spot.data.spot_size = scene.lightfield.angle
+ spot.data.spot_blend = scene.lightfield.spot_blend
+
+ # add texture
+ if textured:
+ spot.data.active_texture = self.createTexture(index)
+ # texture mapping
+ spot.data.texture_slots[0].texture_coords = 'VIEW'
+
+ # handler parent
+ if scene.lightfield.create_handler:
+ spot.parent = self.handler
+
+ return spot
+
+
+ def createLightfieldEmitter(self, textured=False):
+ for i, vert in enumerate(self.verts):
+ spot = self.createSpot(i, textured)
+ spot.location = vert[0]
+ spot.rotation_euler = self.baseObject.rotation_euler
+
+
+ def execute(self, context):
+ scene = context.scene
+
+ obj = self.baseObject = context.active_object
+ if not obj or obj.type != 'MESH':
+ self.report({'ERROR'}, "No selected mesh object!")
+ return 'CANCELLED'
+
+ self.verts = self.arrangeVerts()
+ self.numSamples = len(self.verts)
+
+ if scene.lightfield.create_handler:
+ #create an empty
+ bpy.ops.object.add(type='EMPTY')
+ empty = bpy.context.active_object
+ empty.location = self.baseObject.location
+ empty.rotation_euler = self.baseObject.rotation_euler
+ self.handler = empty
+
+ if scene.lightfield.do_camera:
+ self.createCamera()
+
+ if scene.lightfield.do_projection:
+ if self.getImagePaths():
+ self.createLightfieldEmitter(textured=True)
+ else:
+ self.createLightfieldEmitter(textured=False)
+
+ return 'FINISHED'
+
+
+
+
+class OBJECT_OT_create_lightfield_basemesh(bpy.types.Operator):
+ """Creates a basemsh from the selected focal plane"""
+ bl_idname="object.create_lightfield_basemesh"
+ bl_label="Create a basemesh from the selected focal plane"
+ bl_options = {'REGISTER'}
+
+ objName = "lf_basemesh"
+
+
+ def getWidth(self, obj):
+ mat = obj.matrix_local
+ mesh = obj.data
+ v0 = mesh.vertices[mesh.edges[0].vertices[0]].co * mat
+ v1 = mesh.vertices[mesh.edges[0].vertices[1]].co * mat
+ return (v0-v1).length
+
+
+ def getCamVec(self, obj, angle):
+ width = self.getWidth(obj)
+ itmat = obj.matrix_local.inverted().transposed()
+ normal = (obj.data.faces[0].normal * itmat).normalized()
+ vl = (width/2) * (1/math.tan(math.radians(angle/2)))
+ return normal*vl
+
+
+ def addMeshObj(self, mesh):
+ scene = bpy.context.scene
+
+ for o in scene.objects:
+ o.select = False
+
+ mesh.update()
+ nobj = bpy.data.objects.new(self.objName, mesh)
+ scene.objects.link(nobj)
+ nobj.select = True
+
+ if scene.objects.active == None or scene.objects.active.mode == 'OBJECT':
+ scene.objects.active = nobj
+
+
+ def execute(self, context):
+ scene = context.scene
+ obj = context.active_object
+ # check if active object is a mesh object
+ if not obj or obj.type != 'MESH':
+ self.report({'ERROR'}, "No selected mesh object!")
+ return 'CANCELLED'
+
+ # check if it has one single face
+ if len(obj.data.faces) != 1:
+ self.report({'ERROR'}, "The selected mesh object has to have exactly one quad!")
+ return 'CANCELLED'
+
+ rl = scene.lightfield.row_length
+ # use a degree angle here
+ angle = math.degrees(scene.lightfield.angle)
+ spacing = scene.lightfield.spacing
+ # resolution of final renderings
+ res = round(scene.render.resolution_x * (scene.render.resolution_percentage/100.))
+ width = self.getWidth(obj)
+
+ # the offset between n pixels on the focal plane
+ fplane_offset = (width/res) * spacing
+
+ # vertices for the basemesh
+ verts = []
+ # the offset vector
+ vec = self.getCamVec(obj, angle)
+ # lower left coordinates of the grid
+ sx = obj.location[0] - fplane_offset * int(rl/2)
+ sy = obj.location[1] - fplane_offset * int(rl/2)
+ z = obj.location[2]
+ # position on the focal plane
+ fplane_pos = mathutils.Vector()
+ for x in [sx + fplane_offset*i for i in range(rl)]:
+ for y in [sy + fplane_offset*i for i in range(rl)]:
+ fplane_pos.x = x
+ fplane_pos.y = y
+ fplane_pos.z = z
+ # position of a vertex in a basemesh
+ pos = fplane_pos + vec
+ # pack coordinates flat into the vert list
+ verts.append( (pos.x, pos.y, pos.z) )
+
+ # setup the basemesh and add verts
+ mesh = bpy.data.meshes.new(self.objName)
+ mesh.from_pydata(verts, [], [])
+ self.addMeshObj(mesh)
+
+ return 'FINISHED'
+
+
+
+
+class VIEW3D_OT_lightfield_tools(bpy.types.Panel):
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "TOOLS"
+ bl_context = "objectmode"
+ bl_label = "Light Field Tools"
+
+ def draw(self, context):
+ scene = context.scene
+
+ layout = self.layout
+ row = layout.row()
+ col = layout.column()
+
+ col.prop(scene.lightfield, "row_length")
+ col.prop(scene.lightfield, "angle")
+
+ col.prop(scene.lightfield, "create_handler")
+
+ col.prop(scene.lightfield, "do_camera")
+ col.prop(scene.lightfield, "animate_camera")
+ col.prop(scene.lightfield, "do_projection")
+
+ sub = layout.row()
+ sub.enabled = scene.lightfield.do_projection
+ subcol = sub.column(align=True)
+ subcol.prop(scene.lightfield, "texture_path")
+ subcol.prop(scene.lightfield, "light_intensity")
+ subcol.prop(scene.lightfield, "spot_blend")
+
+ # create a basemesh
+ sub = layout.row()
+ subcol = sub.column(align=True)
+ subcol.operator("object.create_lightfield_basemesh", "Create Base Grid")
+ subcol.prop(scene.lightfield, "spacing")
+
+ layout.operator("object.create_lightfield_rig", "Create Rig")
+
diff --git a/mesh_bsurfaces.py b/mesh_bsurfaces.py
new file mode 100644
index 00000000..645138bc
--- /dev/null
+++ b/mesh_bsurfaces.py
@@ -0,0 +1,857 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Bsurfaces GPL Edition",
+ "author": "Eclectiel",
+ "version": (0,9),
+ "blender": (2, 5, 7),
+ "api": 35733,
+ "location": "View3D > EditMode > ToolShelf",
+ "description": "Draw meshes and re-topologies with Grease Pencil",
+ "warning": "Beta",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Mesh/Surface_Sketch",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=26642&group_id=153&atid=469",
+ "category": "Mesh"}
+
+
+import bpy
+import math
+
+from math import *
+
+
+class VIEW3D_PT_tools_SURF_SKETCH(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+
+ bl_context = "mesh_edit"
+ bl_label = "Bsurfaces"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object
+
+ def draw(self, context):
+ layout = self.layout
+
+ scn = context.scene
+ ob = context.object
+
+ col = layout.column(align=True)
+ row = layout.row()
+ row.separator()
+ col.operator("gpencil.surfsk_add_surface", text="Add Surface")
+ col.prop(scn, "SURFSK_edges_U")
+ col.prop(scn, "SURFSK_edges_V")
+ row.separator()
+ col.prop(scn, "SURFSK_keep_strokes")
+ col.separator()
+ row.separator()
+ col.operator("gpencil.surfsk_strokes_to_curves", text="Strokes to curves")
+
+
+
+class GPENCIL_OT_SURFSK_add_surface(bpy.types.Operator):
+ bl_idname = "gpencil.surfsk_add_surface"
+ bl_label = "Bsurfaces add surface"
+ bl_description = "Generates a surface from grease pencil strokes or from curves."
+ bl_options = {'REGISTER', 'UNDO'}
+
+
+ ##### Get an ordered list of a chain of vertices.
+ def get_ordered_verts(self, ob, all_selected_edges_idx, all_selected_verts_idx, first_vert_idx, middle_vertex_idx):
+ # Order selected vertexes.
+ verts_ordered = []
+ verts_ordered.append(self.main_object.data.vertices[first_vert_idx])
+ prev_v = first_vert_idx
+ prev_ed = None
+ finish_while = False
+ while True:
+ edges_non_matched = 0
+ for i in all_selected_edges_idx:
+ if ob.data.edges[i] != prev_ed and ob.data.edges[i].vertices[0] == prev_v and ob.data.edges[i].vertices[1] in all_selected_verts_idx:
+ verts_ordered.append(self.main_object.data.vertices[ob.data.edges[i].vertices[1]])
+ prev_v = ob.data.edges[i].vertices[1]
+ prev_ed = ob.data.edges[i]
+ elif ob.data.edges[i] != prev_ed and ob.data.edges[i].vertices[1] == prev_v and ob.data.edges[i].vertices[0] in all_selected_verts_idx:
+ verts_ordered.append(self.main_object.data.vertices[ob.data.edges[i].vertices[0]])
+ prev_v = ob.data.edges[i].vertices[0]
+ prev_ed = ob.data.edges[i]
+ else:
+ edges_non_matched += 1
+
+ if edges_non_matched == len(all_selected_edges_idx):
+ finish_while = True
+
+ if finish_while:
+ break
+
+ if middle_vertex_idx != None:
+ verts_ordered.append(self.main_object.data.vertices[middle_vertex_idx])
+ verts_ordered.reverse()
+
+ return verts_ordered
+
+
+ #### Calculates length of a chain of points.
+ def get_chain_length(self, object, verts_ordered):
+ matrix = object.matrix_world
+
+ edges_lengths = []
+ edges_lengths_sum = 0
+ for i in range(0, len(verts_ordered)):
+ if i == 0:
+ prev_v_co = verts_ordered[i].co * matrix
+ else:
+ v_co = verts_ordered[i].co * matrix
+
+ v_difs = [prev_v_co[0] - v_co[0], prev_v_co[1] - v_co[1], prev_v_co[2] - v_co[2]]
+ edge_length = abs(sqrt(v_difs[0] * v_difs[0] + v_difs[1] * v_difs[1] + v_difs[2] * v_difs[2]))
+
+ edges_lengths.append(edge_length)
+ edges_lengths_sum += edge_length
+
+ prev_v_co = v_co
+
+
+ return edges_lengths, edges_lengths_sum
+
+
+ #### Calculates the proportion of the edges of a chain of edges, relative to the full chain length.
+ def get_edges_proportions(self, edges_lengths, edges_lengths_sum, use_boundaries, fixed_edges_num):
+ edges_proportions = []
+ if use_boundaries:
+ verts_count = 1
+ for l in edges_lengths:
+ edges_proportions.append(l / edges_lengths_sum)
+ verts_count += 1
+ else:
+ verts_count = 1
+ for n in range(0, fixed_edges_num):
+ edges_proportions.append(1 / fixed_edges_num)
+ verts_count += 1
+
+ return edges_proportions
+
+
+ #### Calculates the angle between two pairs of points in space.
+ def orientation_difference(self, points_A_co, points_B_co): # each parameter should be a list with two elements, and each element should be a x,y,z coordinate.
+ vec_A = points_A_co[0] - points_A_co[1]
+ vec_B = points_B_co[0] - points_B_co[1]
+
+ angle = vec_A.angle(vec_B)
+
+ if angle > 0.5 * math.pi:
+ angle = abs(angle - math.pi)
+
+ return angle
+
+
+ #### Calculate distance between two points
+ def pts_distance(self, p1_co, p2_co):
+ p_difs = [p1_co[0] - p2_co[0], p1_co[1] - p2_co[1], p1_co[2] - p2_co[2]]
+ distance = abs(sqrt(p_difs[0] * p_difs[0] + p_difs[1] * p_difs[1] + p_difs[2] * p_difs[2]))
+
+ return distance
+
+
+ def execute(self, context):
+ #### Selected edges.
+ all_selected_edges_idx = []
+ all_selected_verts = []
+ all_verts_idx = []
+ for ed in self.main_object.data.edges:
+ if ed.select:
+ all_selected_edges_idx.append(ed.index)
+
+ # Selected vertexes.
+ if not ed.vertices[0] in all_selected_verts:
+ all_selected_verts.append(self.main_object.data.vertices[ed.vertices[0]])
+ if not ed.vertices[1] in all_selected_verts:
+ all_selected_verts.append(self.main_object.data.vertices[ed.vertices[1]])
+
+ # All verts (both from each edge) to determine later which are at the tips (those not repeated twice).
+ all_verts_idx.append(ed.vertices[0])
+ all_verts_idx.append(ed.vertices[1])
+
+
+ #### Identify the tips and "middle-vertex" that separates U from V, if there is one.
+ all_chains_tips_idx = []
+ for v_idx in all_verts_idx:
+ if all_verts_idx.count(v_idx) < 2:
+ all_chains_tips_idx.append(v_idx)
+
+ edges_connected_to_tips = []
+ for ed in self.main_object.data.edges:
+ if (ed.vertices[0] in all_chains_tips_idx or ed.vertices[1] in all_chains_tips_idx) and not (ed.vertices[0] in all_verts_idx and ed.vertices[1] in all_verts_idx):
+ edges_connected_to_tips.append(ed)
+
+ middle_vertex_idx = None
+ tips_to_discard_idx = []
+ for ed_tips in edges_connected_to_tips:
+ for ed_tips_b in edges_connected_to_tips:
+ if (ed_tips != ed_tips_b):
+ if ed_tips.vertices[0] in all_verts_idx and (((ed_tips.vertices[1] == ed_tips_b.vertices[0]) or ed_tips.vertices[1] == ed_tips_b.vertices[1])):
+ middle_vertex_idx = ed_tips.vertices[1]
+ tips_to_discard_idx.append(ed_tips.vertices[0])
+ elif ed_tips.vertices[1] in all_verts_idx and (((ed_tips.vertices[0] == ed_tips_b.vertices[0]) or ed_tips.vertices[0] == ed_tips_b.vertices[1])):
+ middle_vertex_idx = ed_tips.vertices[0]
+ tips_to_discard_idx.append(ed_tips.vertices[1])
+
+
+ #### List with pairs of verts that belong to the tips of each selection chain (row).
+ verts_tips_same_chain_idx = []
+ if len(all_chains_tips_idx) >= 2:
+ checked_v = []
+ for i in range(0, len(all_chains_tips_idx)):
+ if all_chains_tips_idx[i] not in checked_v:
+ v_chain = self.get_ordered_verts(self.main_object, all_selected_edges_idx, all_verts_idx, all_chains_tips_idx[i], middle_vertex_idx)
+
+ verts_tips_same_chain_idx.append([v_chain[0].index, v_chain[len(v_chain) - 1].index])
+
+ checked_v.append(v_chain[0].index)
+ checked_v.append(v_chain[len(v_chain) - 1].index)
+
+
+ #### Selection tips (vertices)
+ verts_tips_parsed_idx = []
+ if len(all_chains_tips_idx) >= 2:
+ for spec_v_idx in all_chains_tips_idx:
+ if (spec_v_idx not in tips_to_discard_idx):
+ verts_tips_parsed_idx.append(spec_v_idx)
+
+
+ #### Identify the type of selection made by the user.
+ if middle_vertex_idx != None:
+ if len(all_chains_tips_idx) == 4: # If there are 4 tips (two selection chains)
+ selection_type = "TWO_CONNECTED"
+ else:
+ # The type of the selection was not identified, so the script stops.
+ return
+ else:
+ if len(all_chains_tips_idx) == 2: # If there are 2 tips (one selection chain)
+ selection_type = "SINGLE"
+ elif len(all_chains_tips_idx) == 4: # If there are 4 tips (two selection chains)
+ selection_type = "TWO_NOT_CONNECTED"
+ elif len(all_chains_tips_idx) == 0:
+ selection_type = "NO_SELECTION"
+ else:
+ # The type of the selection was not identified, so the script stops.
+ return
+
+
+ #### Check if it will be used grease pencil strokes or curves.
+ selected_objs = bpy.context.selected_objects
+ if len(selected_objs) > 1:
+ for ob in selected_objs:
+ if ob != bpy.context.scene.objects.active:
+ ob_gp_strokes = ob
+ using_external_curves = True
+
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ else:
+ #### Vheck if there is a grease pencil layer. If not, quit.
+ try:
+ x = self.main_object.grease_pencil.layers.active.active_frame.strokes
+ except:
+ return{'CANCELLED'}
+
+ #### Convert grease pencil strokes to curve.
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.gpencil.convert('INVOKE_REGION_WIN', type='CURVE')
+ ob_gp_strokes = bpy.context.object
+
+
+ using_external_curves = False
+
+
+
+ ob_gp_strokes.name = "SURFSK_temp_strokes"
+
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = ob_gp_strokes.name)
+ bpy.context.scene.objects.active = bpy.context.scene.objects[ob_gp_strokes.name]
+
+
+ #### If "Keep strokes" is active make a duplicate of the original strokes, which will be intact
+ if bpy.context.scene.SURFSK_keep_strokes:
+ bpy.ops.object.duplicate('INVOKE_REGION_WIN')
+ bpy.context.object.name = "SURFSK_used_strokes"
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = ob_gp_strokes.name)
+ bpy.context.scene.objects.active = bpy.context.scene.objects[ob_gp_strokes.name]
+
+
+ #### Enter editmode for the new curve (converted from grease pencil strokes).
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+
+ selection_U_exists = False
+ selection_U2_exists = False
+ selection_V_exists = False
+ selection_V2_exists = False
+ #### Define what vertexes are at the tips of each selection and are not the middle-vertex.
+ if selection_type == "TWO_CONNECTED":
+ selection_U_exists = True
+ selection_V_exists = True
+
+ # Determine which selection is Selection-U and which is Selection-V.
+ points_A = []
+ points_B = []
+ points_first_stroke_tips = []
+
+ points_A.append(self.main_object.data.vertices[verts_tips_parsed_idx[0]].co * self.main_object.matrix_world)
+ points_A.append(self.main_object.data.vertices[middle_vertex_idx].co * self.main_object.matrix_world)
+
+ points_B.append(self.main_object.data.vertices[verts_tips_parsed_idx[1]].co * self.main_object.matrix_world)
+ points_B.append(self.main_object.data.vertices[middle_vertex_idx].co * self.main_object.matrix_world)
+
+ points_first_stroke_tips.append(ob_gp_strokes.data.splines[0].bezier_points[0].co)
+ points_first_stroke_tips.append(ob_gp_strokes.data.splines[0].bezier_points[len(ob_gp_strokes.data.splines[0].bezier_points) - 1].co)
+
+ angle_A = self.orientation_difference(points_A, points_first_stroke_tips)
+ angle_B = self.orientation_difference(points_B, points_first_stroke_tips)
+
+ if angle_A < angle_B:
+ first_vert_U_idx = verts_tips_parsed_idx[0]
+ first_vert_V_idx = verts_tips_parsed_idx[1]
+ else:
+ first_vert_U_idx = verts_tips_parsed_idx[1]
+ first_vert_V_idx = verts_tips_parsed_idx[0]
+
+ elif selection_type == "SINGLE" or selection_type == "TWO_NOT_CONNECTED":
+ first_sketched_point_first_stroke_co = ob_gp_strokes.data.splines[0].bezier_points[0].co
+ last_sketched_point_first_stroke_co = ob_gp_strokes.data.splines[0].bezier_points[len(ob_gp_strokes.data.splines[0].bezier_points) - 1].co
+
+ first_sketched_point_last_stroke_co = ob_gp_strokes.data.splines[len(ob_gp_strokes.data.splines) - 1].bezier_points[0].co
+
+ # The tip of the selected vertices nearest to the first point of the first sketched stroke.
+ prev_dist = 999999999999
+ for i in range(0, len(verts_tips_same_chain_idx)):
+ for v_idx in range(0, len(verts_tips_same_chain_idx[i])):
+ dist = self.pts_distance(first_sketched_point_first_stroke_co, self.main_object.data.vertices[verts_tips_same_chain_idx[i][v_idx]].co * self.main_object.matrix_world)
+ if dist < prev_dist:
+ prev_dist = dist
+
+ nearest_tip_first_st_first_pt_idx = i
+
+ nearest_tip_first_pair_first_pt_idx = v_idx
+
+ # Shortest distance to the first point of the first stroke
+ shortest_distance_to_first_stroke = dist
+
+
+ # The tip of the selected vertices nearest to the last point of the first sketched stroke.
+ prev_dist = 999999999999
+ for i in range(0, len(verts_tips_same_chain_idx)):
+ for v_idx in range(0, len(verts_tips_same_chain_idx[i])):
+ dist = self.pts_distance(last_sketched_point_first_stroke_co, self.main_object.data.vertices[verts_tips_same_chain_idx[i][v_idx]].co * self.main_object.matrix_world)
+ if dist < prev_dist:
+ prev_dist = dist
+
+ nearest_tip_first_st_last_pt_pair_idx = i
+ nearest_tip_first_st_last_pt_point_idx = v_idx
+
+
+ # The tip of the selected vertices nearest to the first point of the last sketched stroke.
+ prev_dist = 999999999999
+ for i in range(0, len(verts_tips_same_chain_idx)):
+ for v_idx in range(0, len(verts_tips_same_chain_idx[i])):
+ dist = self.pts_distance(first_sketched_point_last_stroke_co, self.main_object.data.vertices[verts_tips_same_chain_idx[i][v_idx]].co * self.main_object.matrix_world)
+ if dist < prev_dist:
+ prev_dist = dist
+
+ nearest_tip_last_st_first_pt_pair_idx = i
+ nearest_tip_last_st_first_pt_point_idx = v_idx
+
+
+ points_tips = []
+ points_first_stroke_tips = []
+
+ # Determine if the single selection will be treated as U or as V.
+ edges_sum = 0
+ for i in all_selected_edges_idx:
+ edges_sum += self.pts_distance(self.main_object.data.vertices[self.main_object.data.edges[i].vertices[0]].co * self.main_object.matrix_world, self.main_object.data.vertices[self.main_object.data.edges[i].vertices[1]].co * self.main_object.matrix_world)
+
+ average_edge_length = edges_sum / len(all_selected_edges_idx)
+
+
+
+ # If the beginning of the first stroke is near enough to interpret things as an "extrude along strokes" instead of "extrude through strokes"
+ if shortest_distance_to_first_stroke < average_edge_length / 3:
+ selection_U_exists = False
+ selection_V_exists = True
+
+ first_vert_V_idx = verts_tips_same_chain_idx[nearest_tip_first_st_first_pt_idx][nearest_tip_first_pair_first_pt_idx]
+
+ if selection_type == "TWO_NOT_CONNECTED":
+ selection_V2_exists = True
+
+ first_vert_V2_idx = verts_tips_same_chain_idx[nearest_tip_first_st_last_pt_pair_idx][nearest_tip_first_st_last_pt_point_idx]
+
+ else:
+ selection_V2_exists = False
+
+ else:
+ selection_U_exists = True
+ selection_V_exists = False
+
+ points_tips.append(self.main_object.data.vertices[verts_tips_same_chain_idx[nearest_tip_first_st_first_pt_idx][0]].co * self.main_object.matrix_world)
+ points_tips.append(self.main_object.data.vertices[verts_tips_same_chain_idx[nearest_tip_first_st_first_pt_idx][1]].co * self.main_object.matrix_world)
+
+ points_first_stroke_tips.append(ob_gp_strokes.data.splines[0].bezier_points[0].co)
+ points_first_stroke_tips.append(ob_gp_strokes.data.splines[0].bezier_points[len(ob_gp_strokes.data.splines[0].bezier_points) - 1].co)
+
+ vec_A = points_tips[0] - points_tips[1]
+ vec_B = points_first_stroke_tips[0] - points_first_stroke_tips[1]
+
+ # Compare the direction of the selection and the first grease pencil stroke to determine which is the "first" vertex of the selection.
+ if vec_A.dot(vec_B) < 0:
+ first_vert_U_idx = verts_tips_same_chain_idx[nearest_tip_first_st_first_pt_idx][1]
+ else:
+ first_vert_U_idx = verts_tips_same_chain_idx[nearest_tip_first_st_first_pt_idx][0]
+
+ if selection_type == "TWO_NOT_CONNECTED":
+ selection_U2_exists = True
+
+ first_vert_U2_idx = verts_tips_same_chain_idx[nearest_tip_last_st_first_pt_pair_idx][nearest_tip_last_st_first_pt_point_idx]
+ else:
+ selection_U2_exists = False
+
+ elif selection_type == "NO_SELECTION":
+ selection_U_exists = False
+ selection_V_exists = False
+
+
+ #### Get an ordered list of the vertices of Selection-U.
+ if selection_U_exists:
+ verts_ordered_U = self.get_ordered_verts(self.main_object, all_selected_edges_idx, all_verts_idx, first_vert_U_idx, middle_vertex_idx)
+
+ #### Get an ordered list of the vertices of Selection-U.
+ if selection_U2_exists:
+ verts_ordered_U2 = self.get_ordered_verts(self.main_object, all_selected_edges_idx, all_verts_idx, first_vert_U2_idx, middle_vertex_idx)
+
+ #### Get an ordered list of the vertices of Selection-V.
+ if selection_V_exists:
+ verts_ordered_V = self.get_ordered_verts(self.main_object, all_selected_edges_idx, all_verts_idx, first_vert_V_idx, middle_vertex_idx)
+
+ #### Get an ordered list of the vertices of Selection-U.
+ if selection_V2_exists:
+ verts_ordered_V2 = self.get_ordered_verts(self.main_object, all_selected_edges_idx, all_verts_idx, first_vert_V2_idx, middle_vertex_idx)
+
+
+ #### Calculate edges U proportions.
+
+ # Sum selected edges U lengths.
+ edges_lengths_U = []
+ edges_lengths_sum_U = 0
+
+ if selection_U_exists:
+ edges_lengths_U, edges_lengths_sum_U = self.get_chain_length(self.main_object, verts_ordered_U)
+
+ # Sum selected edges V lengths.
+ edges_lengths_V = []
+ edges_lengths_sum_V = 0
+
+ if selection_V_exists:
+ edges_lengths_V, edges_lengths_sum_V = self.get_chain_length(self.main_object, verts_ordered_V)
+
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ for i in range(0, int(bpy.context.scene.SURFSK_precision)):
+ bpy.ops.curve.subdivide('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+ # Proportions U.
+ edges_proportions_U = []
+ edges_proportions_U = self.get_edges_proportions(edges_lengths_U, edges_lengths_sum_U, selection_U_exists, bpy.context.scene.SURFSK_edges_U)
+ verts_count_U = len(edges_proportions_U) + 1
+
+ # Proportions V.
+ edges_proportions_V = []
+ edges_proportions_V = self.get_edges_proportions(edges_lengths_V, edges_lengths_sum_V, selection_V_exists, bpy.context.scene.SURFSK_edges_V)
+ verts_count_V = len(edges_proportions_V) + 1
+
+
+
+ #### Get ordered lists of points on each sketched curve that mimics the proportions of the edges in the vertex selection.
+ sketched_splines = ob_gp_strokes.data.splines
+ sketched_splines_lengths = []
+ sketched_splines_parsed = []
+ for sp_idx in range(0, len(sketched_splines)):
+ # Calculate spline length
+ sketched_splines_lengths.append(0)
+ for i in range(0, len(sketched_splines[sp_idx].bezier_points)):
+ if i == 0:
+ prev_p = sketched_splines[sp_idx].bezier_points[i]
+ else:
+ p = sketched_splines[sp_idx].bezier_points[i]
+
+ p_difs = [prev_p.co[0] - p.co[0], prev_p.co[1] - p.co[1], prev_p.co[2] - p.co[2]]
+ edge_length = abs(sqrt(p_difs[0] * p_difs[0] + p_difs[1] * p_difs[1] + p_difs[2] * p_difs[2]))
+
+ sketched_splines_lengths[sp_idx] += edge_length
+
+ prev_p = p
+
+ # Calculate vertex positions with apropriate edge proportions, and ordered, for each spline.
+ sketched_splines_parsed.append([])
+ partial_spline_length = 0
+ related_edge_U = 0
+ edges_proportions_sum_U = 0
+ edges_lengths_sum_U = 0
+ for i in range(0, len(sketched_splines[sp_idx].bezier_points)):
+ if i == 0:
+ prev_p = sketched_splines[sp_idx].bezier_points[i]
+ sketched_splines_parsed[sp_idx].append(prev_p.co)
+ elif i != len(sketched_splines[sp_idx].bezier_points) - 1:
+ p = sketched_splines[sp_idx].bezier_points[i]
+
+ p_difs = [prev_p.co[0] - p.co[0], prev_p.co[1] - p.co[1], prev_p.co[2] - p.co[2]]
+ edge_length = abs(sqrt(p_difs[0] * p_difs[0] + p_difs[1] * p_difs[1] + p_difs[2] * p_difs[2]))
+
+
+ if edges_proportions_sum_U + edges_proportions_U[related_edge_U] - ((edges_lengths_sum_U + partial_spline_length + edge_length) / sketched_splines_lengths[sp_idx]) > 0: # comparing proportions to see if the proportion in the selection is found in the spline.
+ partial_spline_length += edge_length
+ elif related_edge_U < len(edges_proportions_U) - 1:
+ sketched_splines_parsed[sp_idx].append(prev_p.co)
+
+ edges_proportions_sum_U += edges_proportions_U[related_edge_U]
+ related_edge_U += 1
+
+ edges_lengths_sum_U += partial_spline_length
+ partial_spline_length = edge_length
+
+ prev_p = p
+ else: # last point of the spline for the last edge
+ p = sketched_splines[sp_idx].bezier_points[len(sketched_splines[sp_idx].bezier_points) - 1]
+ sketched_splines_parsed[sp_idx].append(p.co)
+
+
+ #### If the selection type is "TWO_NOT_CONNECTED" replace the last point of each spline with the points in the "target" selection.
+ if selection_type == "TWO_NOT_CONNECTED":
+ if selection_U2_exists:
+ for i in range(0, len(sketched_splines_parsed[len(sketched_splines_parsed) - 1])):
+ sketched_splines_parsed[len(sketched_splines_parsed) - 1][i] = verts_ordered_U2[i].co * self.main_object.matrix_world
+
+
+ #### Create temporary curves along the "control-points" found on the sketched curves and the mesh selection.
+ mesh_ctrl_pts_name = "SURFSK_ctrl_pts"
+ me = bpy.data.meshes.new(mesh_ctrl_pts_name)
+ ob_ctrl_pts = bpy.data.objects.new(mesh_ctrl_pts_name, me)
+ ob_ctrl_pts.data = me
+ bpy.context.scene.objects.link(ob_ctrl_pts)
+
+
+ for i in range(0, verts_count_U):
+ vert_num_in_spline = 1
+
+ if selection_U_exists:
+ ob_ctrl_pts.data.vertices.add(1)
+ last_v = ob_ctrl_pts.data.vertices[len(ob_ctrl_pts.data.vertices) - 1]
+ last_v.co = verts_ordered_U[i].co * self.main_object.matrix_world
+
+ vert_num_in_spline += 1
+
+ for sp in sketched_splines_parsed:
+ ob_ctrl_pts.data.vertices.add(1)
+ v = ob_ctrl_pts.data.vertices[len(ob_ctrl_pts.data.vertices) - 1]
+ v.co = sp[i]
+
+ if vert_num_in_spline > 1:
+ ob_ctrl_pts.data.edges.add(1)
+ ob_ctrl_pts.data.edges[len(ob_ctrl_pts.data.edges) - 1].vertices[0] = len(ob_ctrl_pts.data.vertices) - 2
+ ob_ctrl_pts.data.edges[len(ob_ctrl_pts.data.edges) - 1].vertices[1] = len(ob_ctrl_pts.data.vertices) - 1
+
+ last_v = v
+
+ vert_num_in_spline += 1
+
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = ob_ctrl_pts.name)
+ bpy.context.scene.objects.active = bpy.data.objects[ob_ctrl_pts.name]
+
+
+ # Create curves from control points.
+ bpy.ops.object.convert('INVOKE_REGION_WIN', target='CURVE', keep_original=False)
+ ob_curves_surf = bpy.context.scene.objects.active
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.curve.spline_type_set('INVOKE_REGION_WIN', type='BEZIER')
+ bpy.ops.curve.handle_type_set('INVOKE_REGION_WIN', type='AUTOMATIC')
+ for i in range(0, int(bpy.context.scene.SURFSK_precision)):
+ bpy.ops.curve.subdivide('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+
+ # Calculate the length of each final surface spline.
+ surface_splines = ob_curves_surf.data.splines
+ surface_splines_lengths = []
+ surface_splines_parsed = []
+ for sp_idx in range(0, len(surface_splines)):
+ # Calculate spline length
+ surface_splines_lengths.append(0)
+ for i in range(0, len(surface_splines[sp_idx].bezier_points)):
+ if i == 0:
+ prev_p = surface_splines[sp_idx].bezier_points[i]
+ else:
+ p = surface_splines[sp_idx].bezier_points[i]
+
+ edge_length = self.pts_distance(prev_p.co, p.co)
+
+ surface_splines_lengths[sp_idx] += edge_length
+
+ prev_p = p
+
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ for i in range(0, int(bpy.context.scene.SURFSK_precision)):
+ bpy.ops.curve.subdivide('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+ for sp_idx in range(0, len(surface_splines)):
+ # Calculate vertex positions with apropriate edge proportions, and ordered, for each spline.
+ surface_splines_parsed.append([])
+ partial_spline_length = 0
+ related_edge_V = 0
+ edges_proportions_sum_V = 0
+ edges_lengths_sum_V = 0
+ for i in range(0, len(surface_splines[sp_idx].bezier_points)):
+ if i == 0:
+ prev_p = surface_splines[sp_idx].bezier_points[i]
+ surface_splines_parsed[sp_idx].append(prev_p.co)
+ elif i != len(surface_splines[sp_idx].bezier_points) - 1:
+ p = surface_splines[sp_idx].bezier_points[i]
+
+ edge_length = self.pts_distance(prev_p.co, p.co)
+
+ if edges_proportions_sum_V + edges_proportions_V[related_edge_V] - ((edges_lengths_sum_V + partial_spline_length + edge_length) / surface_splines_lengths[sp_idx]) > 0: # comparing proportions to see if the proportion in the selection is found in the spline.
+ partial_spline_length += edge_length
+ elif related_edge_V < len(edges_proportions_V) - 1:
+ surface_splines_parsed[sp_idx].append(prev_p.co)
+
+ edges_proportions_sum_V += edges_proportions_V[related_edge_V]
+ related_edge_V += 1
+
+ edges_lengths_sum_V += partial_spline_length
+ partial_spline_length = edge_length
+
+ prev_p = p
+ else: # last point of the spline for the last edge
+ p = surface_splines[sp_idx].bezier_points[len(surface_splines[sp_idx].bezier_points) - 1]
+ surface_splines_parsed[sp_idx].append(p.co)
+
+ # Set the first and last verts of each spline to the locations of the respective verts in the selections.
+ if selection_V_exists:
+ for i in range(0, len(surface_splines_parsed[0])):
+ surface_splines_parsed[len(surface_splines_parsed) - 1][i] = verts_ordered_V[i].co * self.main_object.matrix_world
+
+ if selection_type == "TWO_NOT_CONNECTED":
+ if selection_V2_exists:
+ for i in range(0, len(surface_splines_parsed[0])):
+ surface_splines_parsed[0][i] = verts_ordered_V2[i].co * self.main_object.matrix_world
+
+
+ #### Delete object with control points and object from grease pencil convertion.
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = ob_ctrl_pts.name)
+ bpy.context.scene.objects.active = bpy.data.objects[ob_ctrl_pts.name]
+ bpy.ops.object.delete()
+
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = ob_gp_strokes.name)
+ bpy.context.scene.objects.active = bpy.data.objects[ob_gp_strokes.name]
+ bpy.ops.object.delete()
+
+
+
+ #### Generate surface.
+
+ # Get all verts coords.
+ all_surface_verts_co = []
+ for i in range(0, len(surface_splines_parsed)):
+ # Get coords of all verts and make a list with them
+ for pt_co in surface_splines_parsed[i]:
+ all_surface_verts_co.append(pt_co)
+
+
+ # Define verts for each face.
+ all_surface_faces = []
+ for i in range(0, len(all_surface_verts_co) - len(surface_splines_parsed[0])):
+ if ((i + 1) / len(surface_splines_parsed[0]) != int((i + 1) / len(surface_splines_parsed[0]))):
+ all_surface_faces.append([i+1, i , i + len(surface_splines_parsed[0]), i + len(surface_splines_parsed[0]) + 1])
+
+
+ # Build the mesh.
+ surf_me_name = "SURFSK_surface"
+ me_surf = bpy.data.meshes.new(surf_me_name)
+
+ me_surf.from_pydata(all_surface_verts_co, [], all_surface_faces)
+
+ me_surf.update()
+
+ ob_surface = bpy.data.objects.new(surf_me_name, me_surf)
+ bpy.context.scene.objects.link(ob_surface)
+
+
+ #### Join the new mesh to the main object.
+ ob_surface.select = True
+ self.main_object.select = True
+ bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
+ bpy.ops.object.join('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.mesh.remove_doubles('INVOKE_REGION_WIN', limit=0.0001)
+ bpy.ops.mesh.normals_make_consistent('INVOKE_REGION_WIN', inside=False)
+ bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='DESELECT')
+
+
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+
+ #### Delete grease pencil strokes
+ try:
+ bpy.ops.gpencil.active_frame_delete('INVOKE_REGION_WIN')
+ except:
+ pass
+
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+
+
+ return {"FINISHED"}
+
+ def invoke (self, context, event):
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ self.main_object = bpy.context.scene.objects.active
+
+ self.execute(context)
+
+ return {"FINISHED"}
+
+
+
+
+class GPENCIL_OT_SURFSK_strokes_to_curves(bpy.types.Operator):
+ bl_idname = "gpencil.surfsk_strokes_to_curves"
+ bl_label = "Bsurfaces strokes to curves"
+ bl_description = "Convert grease pencil strokes into curves and enter edit mode"
+
+
+ def execute(self, context):
+ #### Convert grease pencil strokes to curve.
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.gpencil.convert('INVOKE_REGION_WIN', type='CURVE')
+ ob_gp_strokes = bpy.context.object
+ ob_gp_strokes.name = "SURFSK_strokes"
+
+ #### Delete grease pencil strokes.
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = self.main_object.name)
+ bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
+ bpy.ops.gpencil.active_frame_delete('INVOKE_REGION_WIN')
+
+
+ bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
+ bpy.ops.object.select_name('INVOKE_REGION_WIN', name = ob_gp_strokes.name)
+ bpy.context.scene.objects.active = bpy.data.objects[ob_gp_strokes.name]
+
+
+ #bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+ bpy.ops.curve.smooth('INVOKE_REGION_WIN')
+
+ curve_crv = ob_gp_strokes.data
+ bpy.ops.curve.spline_type_set('INVOKE_REGION_WIN', type="BEZIER")
+ bpy.ops.curve.handle_type_set('INVOKE_REGION_WIN', type="AUTOMATIC")
+ bpy.data.curves[curve_crv.name].show_handles = False
+ bpy.data.curves[curve_crv.name].show_normal_face = False
+
+
+ def invoke (self, context, event):
+ self.main_object = bpy.context.object
+
+
+ self.execute(context)
+
+ return {"FINISHED"}
+
+
+def register():
+ bpy.utils.register_class(GPENCIL_OT_SURFSK_add_surface)
+ bpy.utils.register_class(VIEW3D_PT_tools_SURF_SKETCH)
+ bpy.utils.register_class(GPENCIL_OT_SURFSK_strokes_to_curves)
+
+ bpy.types.Scene.SURFSK_edges_U = bpy.props.IntProperty(name="Cross", description="Number of edge rings crossing the strokes (perpendicular to strokes direction)", default=10, min=0, max=100000)
+ bpy.types.Scene.SURFSK_edges_V = bpy.props.IntProperty(name="Follow", description="Number of edge rings following the strokes (parallel to strokes direction)", default=10, min=0, max=100000)
+ bpy.types.Scene.SURFSK_precision = bpy.props.IntProperty(name="Precision", description="Precision level of the surface calculation", default=4, min=0, max=100000)
+ bpy.types.Scene.SURFSK_keep_strokes = bpy.props.BoolProperty(name="Keep strokes", description="Keeps the sketched strokes after adding the surface", default=False)
+
+ keymap_item_add_surf = bpy.data.window_managers[0].keyconfigs.active.keymaps["3D View"].keymap_items.new("gpencil.surfsk_add_surface","E","PRESS", key_modifier="D")
+ keymap_item_stroke_to_curve = bpy.data.window_managers[0].keyconfigs.active.keymaps["3D View"].keymap_items.new("gpencil.surfsk_strokes_to_curves","C","PRESS", key_modifier="D")
+
+
+def unregister():
+ bpy.utils.unregister_class(GPENCIL_OT_SURFSK_add_surface)
+ bpy.utils.unregister_class(VIEW3D_PT_tools_SURF_SKETCH)
+ bpy.utils.unregister_class(GPENCIL_OT_SURFSK_strokes_to_curves)
+
+ del bpy.types.Scene.SURFSK_edges_U
+ del bpy.types.Scene.SURFSK_edges_V
+ del bpy.types.Scene.SURFSK_precision
+ del bpy.types.Scene.SURFSK_keep_strokes
+
+ km = bpy.data.window_managers[0].keyconfigs.active.keymaps["3D View"]
+ for kmi in km.keymap_items:
+ if kmi.idname == 'wm.call_menu':
+ if kmi.properties.name == "GPENCIL_OT_SURFSK_add_surface":
+ km.keymap_items.remove(kmi)
+ elif kmi.properties.name == "GPENCIL_OT_SURFSK_strokes_to_curves":
+ km.keymap_items.remove(kmi)
+ else:
+ continue
+
+
+if __name__ == "__main__":
+ register()
diff --git a/mesh_inset/__init__.py b/mesh_inset/__init__.py
new file mode 100644
index 00000000..95a03830
--- /dev/null
+++ b/mesh_inset/__init__.py
@@ -0,0 +1,203 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Inset Polygon",
+ "author": "Howard Trickey",
+ "version": (0, 3),
+ "blender": (2, 5, 7),
+ "api": 36147,
+ "location": "View3D > Tools",
+ "description": "Make an inset polygon inside selection.",
+ "warning": "",
+ "wiki_url": \
+ "http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/Modeling/Inset-Polygon",
+ "tracker_url": \
+ "http://projects.blender.org/tracker/index.php?func=detail&aid=27290&group_id=153&atid=468",
+ "category": "Mesh"}
+
+if "bpy" in locals():
+ import imp
+else:
+ from . import geom
+ from . import model
+ from . import offset
+ from . import triquad
+
+import math
+import bpy
+import mathutils
+from bpy.props import *
+
+
+class Inset(bpy.types.Operator):
+ bl_idname = "mesh.inset"
+ bl_label = "Inset"
+ bl_description = "Make an inset polygon inside selection"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ inset_amount = FloatProperty(name="Amount",
+ description="Amount to move inset edges",
+ default=5.0,
+ min=0.0,
+ max=1000.0,
+ soft_min=0.0,
+ soft_max=100.0,
+ unit='LENGTH')
+ inset_height = FloatProperty(name="Height",
+ description="Amount to raise inset faces",
+ default=0.0,
+ min=-10000.0,
+ max=10000.0,
+ soft_min=-500.0,
+ soft_max=500.0,
+ unit='LENGTH')
+ region = BoolProperty(name="Region",
+ description="Inset selection as one region?",
+ default=True)
+ scale = EnumProperty(name="Scale",
+ description="Scale for amount",
+ items=[
+ ('PERCENT', "Percent",
+ "Percentage of maximum inset amount"),
+ ('ABSOLUTE', "Absolute",
+ "Length in blender units")
+ ],
+ default='PERCENT')
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ return (obj and obj.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ box = layout.box()
+ box.label("Inset Options")
+ box.prop(self, "scale")
+ box.prop(self, "inset_amount")
+ box.prop(self, "inset_height")
+ box.prop(self, "region")
+
+ def invoke(self, context, event):
+ self.action(context)
+ return {'FINISHED'}
+
+ def execute(self, context):
+ self.action(context)
+ return {'FINISHED'}
+
+ def action(self, context):
+ save_global_undo = bpy.context.user_preferences.edit.use_global_undo
+ bpy.context.user_preferences.edit.use_global_undo = False
+ bpy.ops.object.mode_set(mode='OBJECT')
+ obj = bpy.context.active_object
+ mesh = obj.data
+ do_inset(mesh, self.inset_amount, self.inset_height, self.region,
+ self.scale == 'PERCENT')
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.context.user_preferences.edit.use_global_undo = save_global_undo
+
+
+def do_inset(mesh, amount, height, region, as_percent):
+ if amount <= 0.0:
+ return
+ pitch = math.atan(height / amount)
+ selfaces = []
+ selface_indices = []
+ for face in mesh.faces:
+ if face.select and not face.hide:
+ selfaces.append(face)
+ selface_indices.append(face.index)
+ m = geom.Model()
+ # if add all mesh.vertices, coord indices will line up
+ # Note: not using Points.AddPoint which does dup elim
+ # because then would have to map vertices in and out
+ m.points.pos = [v.co.to_tuple() for v in mesh.vertices]
+ for f in selfaces:
+ m.faces.append(list(f.vertices))
+ m.face_data.append(f.index)
+ orig_numv = len(m.points.pos)
+ orig_numf = len(m.faces)
+ model.BevelSelectionInModel(m, amount, pitch, True, region, as_percent)
+ if len(m.faces) == orig_numf:
+ # something went wrong with Bevel - just treat as no-op
+ return
+ # blender_faces: newfaces but all 4-tuples and no 0
+ # in 4th position if a 4-sided poly
+ blender_faces = []
+ blender_old_face_index = []
+ for i in range(orig_numf, len(m.faces)):
+ f = m.faces[i]
+ if len(f) == 3:
+ blender_faces.append(list(f) + [0])
+ blender_old_face_index.append(m.face_data[i])
+ elif len(f) == 4:
+ if f[3] == 0:
+ blender_faces.append([f[3], f[0], f[1], f[2]])
+ else:
+ blender_faces.append(f)
+ blender_old_face_index.append(m.face_data[i])
+ num_new_vertices = len(m.points.pos) - orig_numv
+ mesh.vertices.add(num_new_vertices)
+ for i in range(orig_numv, len(m.points.pos)):
+ mesh.vertices[i].co = mathutils.Vector(m.points.pos[i])
+ start_faces = len(mesh.faces)
+ mesh.faces.add(len(blender_faces))
+ for i, newf in enumerate(blender_faces):
+ mesh.faces[start_faces + i].vertices_raw = newf
+ # copy face attributes from old face that it was derived from
+ bfi = blender_old_face_index[i]
+ if bfi and 0 <= bfi < start_faces:
+ bfacenew = mesh.faces[start_faces + i]
+ bface = mesh.faces[bfi]
+ bfacenew.material_index = bface.material_index
+ bfacenew.use_smooth = bface.use_smooth
+ mesh.update(calc_edges=True)
+ # remove original faces
+ bpy.ops.object.mode_set(mode='EDIT')
+ save_select_mode = bpy.context.tool_settings.mesh_select_mode
+ bpy.context.tool_settings.mesh_select_mode = [False, False, True]
+ bpy.ops.mesh.select_all(action='DESELECT')
+ bpy.ops.object.mode_set(mode='OBJECT')
+ for fi in selface_indices:
+ mesh.faces[fi].select = True
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.mesh.delete(type='FACE')
+ bpy.context.tool_settings.mesh_select_mode = save_select_mode
+
+
+def panel_func(self, context):
+ self.layout.label(text="Inset:")
+ self.layout.operator("mesh.inset", text="Inset")
+
+
+def register():
+ bpy.utils.register_class(Inset)
+ bpy.types.VIEW3D_PT_tools_meshedit.append(panel_func)
+
+
+def unregister():
+ bpy.utils.unregister_class(Inset)
+ bpy.types.VIEW3D_PT_tools_meshedit.remove(panel_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/mesh_inset/geom.py b/mesh_inset/geom.py
new file mode 100644
index 00000000..98059dcf
--- /dev/null
+++ b/mesh_inset/geom.py
@@ -0,0 +1,719 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""Geometry classes and operations.
+Also, vector file representation (Art).
+"""
+
+__author__ = "howard.trickey@gmail.com"
+
+import math
+
+# distances less than about DISTTOL will be considered
+# essentially zero
+DISTTOL = 1e-3
+INVDISTTOL = 1e3
+
+
+class Points(object):
+ """Container of points without duplication, each mapped to an int.
+
+ Points are either have dimension at least 2, maybe more.
+
+ Implementation:
+ In order to efficiently find duplicates, we quantize the points
+ to triples of ints and map from quantized triples to vertex
+ index.
+
+ Attributes:
+ pos: list of tuple of float - coordinates indexed by
+ vertex number
+ invmap: dict of (int, int, int) to int - quantized coordinates
+ to vertex number map
+ """
+
+ def __init__(self, initlist=[]):
+ self.pos = []
+ self.invmap = dict()
+ for p in initlist:
+ self.AddPoint(p)
+
+ @staticmethod
+ def Quantize(p):
+ """Quantize the float tuple into an int tuple.
+
+ Args:
+ p: tuple of float
+ Returns:
+ tuple of int - scaled by INVDISTTOL and rounded p
+ """
+
+ return tuple([int(round(v * INVDISTTOL)) for v in p])
+
+ def AddPoint(self, p):
+ """Add point p to the Points set and return vertex number.
+
+ If there is an existing point which quantizes the same,,
+ don't add a new one but instead return existing index.
+
+ Args:
+ p: tuple of float - coordinates (2-tuple or 3-tuple)
+ Returns:
+ int - the vertex number of added (or existing) point
+ """
+
+ qp = Points.Quantize(p)
+ if qp in self.invmap:
+ return self.invmap[qp]
+ else:
+ self.invmap[qp] = len(self.pos)
+ self.pos.append(p)
+ return len(self.pos) - 1
+
+ def AddPoints(self, points):
+ """Add another set of points to this set.
+
+ We need to return a mapping from indices
+ in the argument points space into indices
+ in this point space.
+
+ Args:
+ points: Points - to union into this set
+ Returns:
+ list of int: maps added indices to new ones
+ """
+
+ vmap = [0] * len(points.pos)
+ for i in range(len(points.pos)):
+ vmap[i] = self.AddPoint(points.pos[i])
+ return vmap
+
+ def AddZCoord(self, z):
+ """Change this in place to have a z coordinate, with value z.
+
+ Assumes the coordinates are currently 2d.
+
+ Args:
+ z: the value of the z coordinate to add
+ Side Effect:
+ self now has a z-coordinate added
+ """
+
+ assert(len(self.pos) == 0 or len(self.pos[0]) == 2)
+ newinvmap = dict()
+ for i, (x, y) in enumerate(self.pos):
+ newp = (x, y, z)
+ self.pos[i] = newp
+ newinvmap[self.Quantize(newp)] = i
+ self.invmap = newinvmap
+
+ def AddToZCoord(self, i, delta):
+ """Change the z-coordinate of point with index i to add delta.
+
+ Assumes the coordinates are currently 3d.
+
+ Args:
+ i: int - index of a point
+ delta: float - value to add to z-coord
+ """
+
+ (x, y, z) = self.pos[i]
+ self.pos[i] = (x, y, z + delta)
+
+
+class PolyArea(object):
+ """Contains a Polygonal Area (polygon with possible holes).
+
+ A polygon is a list of vertex ids, each an index given by
+ a Points object. The list represents a CCW-oriented
+ outer boundary (implicitly closed).
+ If there are holes, they are lists of CW-oriented vertices
+ that should be contained in the outer boundary.
+ (So the left face of both the poly and the holes is
+ the filled part.)
+
+ Attributes:
+ points: Points
+ poly: list of vertex ids
+ holes: list of lists of vertex ids (each a hole in poly)
+ data: any - application data (can hold color, e.g.)
+ """
+
+ def __init__(self, points=None, poly=None, holes=None, data=None):
+ self.points = points if points else Points()
+ self.poly = poly if poly else []
+ self.holes = holes if holes else []
+ self.data = data
+
+ def AddHole(self, holepa):
+ """Add a PolyArea's poly as a hole of self.
+
+ Need to reverse the contour and
+ adjust the the point indexes and self.points.
+
+ Args:
+ holepa: PolyArea
+ """
+
+ vmap = self.points.AddPoints(holepa.points)
+ holepoly = [vmap[i] for i in holepa.poly]
+ holepoly.reverse()
+ self.holes.append(holepoly)
+
+ def ContainsPoly(self, poly, points):
+ """Tests if poly is contained within self.poly.
+
+ Args:
+ poly: list of int - indices into points
+ points: Points - maps to coords
+ Returns:
+ bool - True if poly is fully contained within self.poly
+ """
+
+ for v in poly:
+ if PointInside(points.pos[v], self.poly, self.points) == -1:
+ return False
+ return True
+
+ def Normal(self):
+ """Returns the normal of the polyarea's main poly."""
+
+ pos = self.points.pos
+ poly = self.poly
+ if len(pos) == 0 or len(pos[0]) == 2 or len(poly) == 0:
+ print("whoops, not enough info to calculate normal")
+ return (0.0, 0.0, 1.0)
+ return Newell(poly, self.points)
+
+
+class PolyAreas(object):
+ """Contains a list of PolyAreas and a shared Points.
+
+ Attributes:
+ polyareas: list of PolyArea
+ points: Points
+ """
+
+ def __init__(self):
+ self.polyareas = []
+ self.points = Points()
+
+ def scale_and_center(self, scaled_side_target):
+ """Adjust the coordinates of the polyareas so that
+ it is centered at the origin and has its longest
+ dimension scaled to be scaled_side_target."""
+
+ if len(self.points.pos) == 0:
+ return
+ (minv, maxv) = self.bounds()
+ maxside = max([maxv[i] - minv[i] for i in range(2)])
+ if maxside > 0.0:
+ scale = scaled_side_target / maxside
+ else:
+ scale = 1.0
+ translate = [-0.5 * (maxv[i] + minv[i]) for i in range(2)]
+ dim = len(self.points.pos[0])
+ if dim == 3:
+ translate.append([0.0])
+ for v in range(len(self.points.pos)):
+ self.points.pos[v] = tuple([scale * (self.points.pos[v][i] + \
+ translate[i]) for i in range(dim)])
+
+ def bounds(self):
+ """Find bounding box of polyareas in xy.
+
+ Returns:
+ ([minx,miny],[maxx,maxy]) - all floats
+ """
+
+ huge = 1e100
+ minv = [huge, huge]
+ maxv = [-huge, -huge]
+ for pa in self.polyareas:
+ for face in [pa.poly] + pa.holes:
+ for v in face:
+ vcoords = self.points.pos[v]
+ for i in range(2):
+ if vcoords[i] < minv[i]:
+ minv[i] = vcoords[i]
+ if vcoords[i] > maxv[i]:
+ maxv[i] = vcoords[i]
+ if minv[0] == huge:
+ minv = [0.0, 0.0]
+ if maxv[0] == huge:
+ maxv = [0.0, 0.0]
+ return (minv, maxv)
+
+
+class Model(object):
+ """Contains a generic 3d model.
+
+ A generic 3d model has vertices with 3d coordinates.
+ Each vertex gets a 'vertex id', which is an index that
+ can be used to refer to the vertex and can be used
+ to retrieve the 3d coordinates of the point.
+
+ The actual visible part of the geometry are the faces,
+ which are n-gons (n>2), specified by a vector of the
+ n corner vertices.
+ Faces may also have data associated with them,
+ and the data will be copied into newly created faces
+ from the most likely neighbor faces..
+
+ Attributes:
+ points: geom.Points - the 3d vertices
+ faces: list of list of indices (each a CCW traversal of a face)
+ face_data: list of any - if present, is parallel to
+ faces list and holds arbitrary data
+ """
+
+ def __init__(self):
+ self.points = Points()
+ self.faces = []
+ self.face_data = []
+
+
+class Art(object):
+ """Contains a vector art diagram.
+
+ Attributes:
+ paths: list of Path objects
+ """
+
+ def __init__(self):
+ self.paths = []
+
+
+class Paint(object):
+ """A color or pattern to fill or stroke with.
+
+ For now, just do colors, but could later do
+ patterns or images too.
+
+ Attributes:
+ color: (r,g,b) triple of floats, 0.0=no color, 1.0=max color
+ """
+
+ def __init__(self, r=0.0, g=0.0, b=0.0):
+ self.color = (r, g, b)
+
+ @staticmethod
+ def CMYK(c, m, y, k):
+ """Return Paint specified in CMYK model.
+
+ Uses formula from 6.2.4 of PDF Reference.
+
+ Args:
+ c, m, y, k: float - in range [0, 1]
+ Returns:
+ Paint - with components in rgb form now
+ """
+
+ return Paint(1.0 - min(1.0, c + k),
+ 1.0 - min(1.0, m + k), 1.0 - min(1.0, y + k))
+
+black_paint = Paint()
+white_paint = Paint(1.0, 1.0, 1.0)
+
+ColorDict = {
+ 'aqua': Paint(0.0, 1.0, 1.0),
+ 'black': Paint(0.0, 0.0, 0.0),
+ 'blue': Paint(0.0, 0.0, 1.0),
+ 'fuchsia': Paint(1.0, 0.0, 1.0),
+ 'gray': Paint(0.5, 0.5, 0.5),
+ 'green': Paint(0.0, 0.5, 0.0),
+ 'lime': Paint(0.0, 1.0, 0.0),
+ 'maroon': Paint(0.5, 0.0, 0.0),
+ 'navy': Paint(0.0, 0.0, 0.5),
+ 'olive': Paint(0.5, 0.5, 0.0),
+ 'purple': Paint(0.5, 0.0, 0.5),
+ 'red': Paint(1.0, 0.0, 0.0),
+ 'silver': Paint(0.75, 0.75, 0.75),
+ 'teal': Paint(0.0, 0.5, 0.5),
+ 'white': Paint(1.0, 1.0, 1.0),
+ 'yellow': Paint(1.0, 1.0, 0.0)
+}
+
+
+class Path(object):
+ """Represents a path in the PDF sense, with painting instructions.
+
+ Attributes:
+ subpaths: list of Subpath objects
+ filled: True if path is to be filled
+ fillevenodd: True if use even-odd rule to fill (else non-zero winding)
+ stroked: True if path is to be stroked
+ fillpaint: Paint to fill with
+ strokepaint: Paint to stroke with
+ """
+
+ def __init__(self):
+ self.subpaths = []
+ self.filled = False
+ self.fillevenodd = False
+ self.stroked = False
+ self.fillpaint = black_paint
+ self.strokepaint = black_paint
+
+ def AddSubpath(self, subpath):
+ """"Add a subpath."""
+
+ self.subpaths.append(subpath)
+
+ def Empty(self):
+ """Returns True if this Path as no subpaths."""
+
+ return not self.subpaths
+
+
+class Subpath(object):
+ """Represents a subpath in PDF sense, either open or closed.
+
+ We'll represent lines, bezier pieces, circular arc pieces
+ as tuples with letters giving segment type in first position
+ and coordinates (2-tuples of floats) in the other positions.
+
+ Segment types:
+ ('L', a, b) - line from a to b
+ ('B', a, b, c, d) - cubic bezier from a to b, with control points c,d
+ ('Q', a, b, c) - quadratic bezier from a to b, with 1 control point c
+ ('A', a, b, rad, xrot, large-arc, ccw) - elliptical arc from a to b,
+ with rad=(rx, ry) as radii, xrot is x-axis rotation in degrees,
+ large-arc is True if arc should be >= 180 degrees,
+ ccw is True if start->end follows counter-clockwise direction
+ (see SVG spec); note that after rad,
+ the rest are floats or bools, not coordinate pairs
+ Note that s[1] and s[2] are the start and end points for any segment s.
+
+ Attributes:
+ segments: list of segment tuples (see above)
+ closed: True if closed
+ """
+
+ def __init__(self):
+ self.segments = []
+ self.closed = False
+
+ def Empty(self):
+ """Returns True if this subpath as no segments."""
+
+ return not self.segments
+
+ def AddSegment(self, seg):
+ """Add a segment."""
+
+ self.segments.append(seg)
+
+ @staticmethod
+ def SegStart(s):
+ """Return start point for segment.
+
+ Args:
+ s: a segment tuple
+ Returns:
+ (float, float): the coordinates of the segment's start point
+ """
+
+ return s[1]
+
+ @staticmethod
+ def SegEnd(s):
+ """Return end point for segment.
+
+ Args:
+ s: a segment tuple
+ Returns:
+ (float, float): the coordinates of the segment's end point
+ """
+
+ return s[2]
+
+
+class TransformMatrix(object):
+ """Transformation matrix for 2d coordinates.
+
+ The transform matrix is:
+ [ a b 0 ]
+ [ c d 0 ]
+ [ e f 1 ]
+ and coordinate tranformation is defined by:
+ [x' y' 1] = [x y 1] x TransformMatrix
+
+ Attributes:
+ a, b, c, d, e, f: floats
+ """
+
+ def __init__(self, a=1.0, b=0.0, c=0.0, d=1.0, e=0.0, f=0.0):
+ self.a = a
+ self.b = b
+ self.c = c
+ self.d = d
+ self.e = e
+ self.f = f
+
+ def __str__(self):
+ return str([self.a, self.b, self.c, self.d, self.e, self.f])
+
+ def Copy(self):
+ """Return a copy of this matrix."""
+
+ return TransformMatrix(self.a, self.b, self.c, self.d, self.e, self.f)
+
+ def ComposeTransform(self, a, b, c, d, e, f):
+ """Apply the transform given the the arguments on top of this one.
+
+ This is accomplished by returning t x sel
+ where t is the transform matrix that would be formed from the args.
+
+ Arguments:
+ a, b, c, d, e, f: float - defines a composing TransformMatrix
+ """
+
+ newa = a * self.a + b * self.c
+ newb = a * self.b + b * self.d
+ newc = c * self.a + d * self.c
+ newd = c * self.b + d * self.d
+ newe = e * self.a + f * self.c + self.e
+ newf = e * self.b + f * self.d + self.f
+ self.a = newa
+ self.b = newb
+ self.c = newc
+ self.d = newd
+ self.e = newe
+ self.f = newf
+
+ def Apply(self, pt):
+ """Return the result of applying this tranform to pt = (x,y).
+
+ Arguments:
+ (x, y) : (float, float)
+ Returns:
+ (x', y'): 2-tuple of floats, the result of [x y 1] x self
+ """
+
+ (x, y) = pt
+ return (self.a * x + self.c * y + self.e, \
+ self.b * x + self.d * y + self.f)
+
+
+def ApproxEqualPoints(p, q):
+ """Return True if p and q are approximately the same points.
+
+ Args:
+ p: n-tuple of float
+ q: n-tuple of float
+ Returns:
+ bool - True if the 1-norm <= DISTTOL
+ """
+
+ for i in range(len(p)):
+ if abs(p[i] - q[i]) > DISTTOL:
+ return False
+ return True
+
+
+def PointInside(v, a, points):
+ """Return 1, 0, or -1 as v is inside, on, or outside polygon.
+
+ Cf. Eric Haines ptinpoly in Graphics Gems IV.
+
+ Args:
+ v : (float, float) or (float, float, float) - coordinates of a point
+ a : list of vertex indices defining polygon (assumed CCW)
+ points: Points - to get coordinates for polygon
+ Returns:
+ 1, 0, -1: as v is inside, on, or outside polygon a
+ """
+
+ (xv, yv) = (v[0], v[1])
+ vlast = points.pos[a[-1]]
+ (x0, y0) = (vlast[0], vlast[1])
+ if x0 == xv and y0 == yv:
+ return 0
+ yflag0 = y0 > yv
+ inside = False
+ n = len(a)
+ for i in range(0, n):
+ vi = points.pos[a[i]]
+ (x1, y1) = (vi[0], vi[1])
+ if x1 == xv and y1 == yv:
+ return 0
+ yflag1 = y1 > yv
+ if yflag0 != yflag1:
+ xflag0 = x0 > xv
+ xflag1 = x1 > xv
+ if xflag0 == xflag1:
+ if xflag0:
+ inside = not inside
+ else:
+ z = x1 - (y1 - yv) * (x0 - x1) / (y0 - y1)
+ if z >= xv:
+ inside = not inside
+ x0 = x1
+ y0 = y1
+ yflag0 = yflag1
+ if inside:
+ return 1
+ else:
+ return -1
+
+
+def SignedArea(polygon, points):
+ """Return the area of the polgon, positive if CCW, negative if CW.
+
+ Args:
+ polygon: list of vertex indices
+ points: Points
+ Returns:
+ float - area of polygon, positive if it was CCW, else negative
+ """
+
+ a = 0.0
+ n = len(polygon)
+ for i in range(0, n):
+ u = points.pos[polygon[i]]
+ v = points.pos[polygon[(i + 1) % n]]
+ a += u[0] * v[1] - u[1] * v[0]
+ return 0.5 * a
+
+
+def VecAdd(a, b):
+ """Return vector a-b.
+
+ Args:
+ a: n-tuple of floats
+ b: n-tuple of floats
+ Returns:
+ n-tuple of floats - pairwise addition a+b
+ """
+
+ n = len(a)
+ assert(n == len(b))
+ return tuple([a[i] + b[i] for i in range(n)])
+
+
+def VecSub(a, b):
+ """Return vector a-b.
+
+ Args:
+ a: n-tuple of floats
+ b: n-tuple of floats
+ Returns:
+ n-tuple of floats - pairwise subtraction a-b
+ """
+
+ n = len(a)
+ assert(n == len(b))
+ return tuple([a[i] - b[i] for i in range(n)])
+
+
+def VecDot(a, b):
+ """Return the dot product of two vectors.
+
+ Args:
+ a: n-tuple of floats
+ b: n-tuple of floats
+ Returns:
+ n-tuple of floats - dot product of a and b
+ """
+
+ n = len(a)
+ assert(n == len(b))
+ sum = 0.0
+ for i in range(n):
+ sum += a[i] * b[i]
+ return sum
+
+
+def VecLen(a):
+ """Return the Euclidean lenght of the argument vector.
+
+ Args:
+ a: n-tuple of floats
+ Returns:
+ float: the 2-norm of a
+ """
+
+ s = 0.0
+ for v in a:
+ s += v * v
+ return math.sqrt(s)
+
+
+def Newell(poly, points):
+ """Use Newell method to find polygon normal.
+
+ Assume poly has length at least 3 and points are 3d.
+
+ Args:
+ poly: list of int - indices into points.pos
+ points: Points - assumed 3d
+ Returns:
+ (float, float, float) - the average normal
+ """
+
+ sumx = 0.0
+ sumy = 0.0
+ sumz = 0.0
+ n = len(poly)
+ pos = points.pos
+ for i, ai in enumerate(poly):
+ bi = poly[(i + 1) % n]
+ a = pos[ai]
+ b = pos[bi]
+ sumx += (a[1] - b[1]) * (a[2] + b[2])
+ sumy += (a[2] - b[2]) * (a[0] + b[0])
+ sumz += (a[0] - b[0]) * (a[1] + b[1])
+ return Norm3(sumx, sumy, sumz)
+
+
+def Norm3(x, y, z):
+ """Return vector (x,y,z) normalized by dividing by squared length.
+ Return (0.0, 0.0, 1.0) if the result is undefined."""
+ sqrlen = x * x + y * y + z * z
+ if sqrlen < 1e-100:
+ return (0.0, 0.0, 1.0)
+ else:
+ try:
+ d = math.sqrt(sqrlen)
+ return (x / d, y / d, z / d)
+ except:
+ return (0.0, 0.0, 1.0)
+
+
+# We're using right-hand coord system, where
+# forefinger=x, middle=y, thumb=z on right hand.
+# Then, e.g., (1,0,0) x (0,1,0) = (0,0,1)
+def Cross3(a, b):
+ """Return the cross product of two vectors, a x b."""
+
+ (ax, ay, az) = a
+ (bx, by, bz) = b
+ return (ay * bz - az * by, az * bx - ax * bz, ax * by - ay * bx)
+
+
+def MulPoint3(p, m):
+ """Return matrix multiplication of p times m
+ where m is a 4x3 matrix and p is a 3d point, extended with 1."""
+
+ (x, y, z) = p
+ return (x * m[0] + y * m[3] + z * m[6] + m[9],
+ x * m[1] + y * m[4] + z * m[7] + m[10],
+ x * m[2] + y * m[5] + z * m[8] + m[11])
diff --git a/mesh_inset/model.py b/mesh_inset/model.py
new file mode 100644
index 00000000..a3eb2aac
--- /dev/null
+++ b/mesh_inset/model.py
@@ -0,0 +1,575 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""Manipulations of Models.
+"""
+
+__author__ = "howard.trickey@gmail.com"
+
+from . import geom
+from . import triquad
+from . import offset
+import math
+
+
+def PolyAreasToModel(polyareas, bevel_amount, bevel_pitch, quadrangulate):
+ """Convert a PolyAreas into a Model object.
+
+ Assumes polyareas are in xy plane.
+
+ Args:
+ polyareas: geom.PolyAreas
+ bevel_amount: float - if > 0, amount of bevel
+ bevel_pitch: float - if > 0, angle in radians of bevel
+ quadrangulate: bool - should n-gons be quadrangulated?
+ Returns:
+ geom.Model
+ """
+
+ m = geom.Model()
+ if not polyareas:
+ return m
+ polyareas.points.AddZCoord(0.0)
+ m.points = polyareas.points
+ for pa in polyareas.polyareas:
+ PolyAreaToModel(m, pa, bevel_amount, bevel_pitch, quadrangulate)
+ return m
+
+
+def PolyAreaToModel(m, pa, bevel_amount, bevel_pitch, quadrangulate):
+ if bevel_amount > 0.0:
+ BevelPolyAreaInModel(m, pa, bevel_amount, bevel_pitch, quadrangulate,
+ False)
+ elif quadrangulate:
+ if len(pa.poly) == 0:
+ return
+ qpa = triquad.QuadrangulateFaceWithHoles(pa.poly, pa.holes, pa.points)
+ m.faces.extend(qpa)
+ m.face_data.extend([pa.data] * len(qpa))
+ else:
+ m.faces.append(pa.poly)
+ # TODO: just the first part of QuadrangulateFaceWithHoles, to join
+ # holes to outer poly
+ m.face_data.append(pa.data)
+
+
+def ExtrudePolyAreasInModel(mdl, polyareas, depth, cap_back):
+ """Extrude the boundaries given by polyareas by -depth in z.
+
+ Assumes polyareas are in xy plane.
+
+ Arguments:
+ mdl: geom.Model - where to do extrusion
+ polyareas: geom.Polyareas
+ depth: float
+ cap_back: bool - if True, cap off the back
+ Side Effects:
+ For all edges in polys in polyareas, make quads in Model
+ extending those edges by depth in the negative z direction.
+ The application data will be the data of the face that the edge
+ is part of.
+ """
+
+ for pa in polyareas.polyareas:
+ back_poly = _ExtrudePoly(mdl, pa.poly, depth, pa.data, True)
+ back_holes = []
+ for p in pa.holes:
+ back_holes.append(_ExtrudePoly(mdl, p, depth, pa.data, False))
+ if cap_back:
+ qpa = triquad.QuadrangulateFaceWithHoles(back_poly, back_holes,
+ polyareas.points)
+ # need to reverse each poly to get normals pointing down
+ for i, p in enumerate(qpa):
+ t = list(p)
+ t.reverse()
+ qpa[i] = tuple(t)
+ mdl.faces.extend(qpa)
+ mdl.face_data.extend([pa.data] * len(qpa))
+
+
+def _ExtrudePoly(mdl, poly, depth, data, isccw):
+ """Extrude the poly by -depth in z
+
+ Arguments:
+ mdl: geom.Model - where to do extrusion
+ poly: list of vertex indices
+ depth: float
+ data: application data
+ isccw: True if counter-clockwise
+ Side Effects
+ For all edges in poly, make quads in Model
+ extending those edges by depth in the negative z direction.
+ The application data will be the data of the face that the edge
+ is part of.
+ Returns:
+ list of int - vertices for extruded poly
+ """
+
+ if len(poly) < 2:
+ return
+ extruded_poly = []
+ points = mdl.points
+ if isccw:
+ incr = 1
+ else:
+ incr = -1
+ for i, v in enumerate(poly):
+ vnext = poly[(i + incr) % len(poly)]
+ (x0, y0, z0) = points.pos[v]
+ (x1, y1, z1) = points.pos[vnext]
+ vextrude = points.AddPoint((x0, y0, z0 - depth))
+ vnextextrude = points.AddPoint((x1, y1, z1 - depth))
+ if isccw:
+ sideface = [v, vextrude, vnextextrude, vnext]
+ else:
+ sideface = [v, vnext, vnextextrude, vextrude]
+ mdl.faces.append(sideface)
+ mdl.face_data.append(data)
+ extruded_poly.append(vextrude)
+ return extruded_poly
+
+
+def BevelPolyAreaInModel(mdl, polyarea,
+ bevel_amount, bevel_pitch, quadrangulate, as_percent):
+ """Bevel the interior of polyarea in model.
+
+ This does smart beveling: advancing edges are merged
+ rather than doing an 'overlap'. Advancing edges that
+ hit an opposite edge result in a split into two beveled areas.
+
+ If the polyarea is not in the xy plane, do the work in a
+ transformed model, and then transfer the changes back.
+
+ Arguments:
+ mdl: geom.Model - where to do bevel
+ polyarea geom.PolyArea - area to bevel into
+ bevel_amount: float - if > 0, amount of bevel
+ bevel_pitch: float - if > 0, angle in radians of bevel
+ quadrangulate: bool - should n-gons be quadrangulated?
+ as_percent: bool - if True, interpret amount as percent of max
+ Side Effects:
+ Faces and points are added to model to model the
+ bevel and the interior of the polyareas.
+ """
+
+ pa_norm = polyarea.Normal()
+ if pa_norm == (0.0, 0.0, 1.0):
+ m = mdl
+ pa_rot = polyarea
+ else:
+ (pa_rot, inv_rot, inv_map) = _RotatedPolyAreaToXY(polyarea, pa_norm)
+ # don't have to add the original faces into model, just their points.
+ m = geom.Model()
+ m.points = pa_rot.points
+ vspeed = math.tan(bevel_pitch)
+ off = offset.Offset(pa_rot, 0.0, vspeed)
+ if as_percent:
+ bevel_amount = bevel_amount * off.MaxAmount() / 100.0
+ off.Build(bevel_amount)
+ inner_pas = AddOffsetFacesToModel(m, off, polyarea.data)
+ for pa in inner_pas.polyareas:
+ if quadrangulate:
+ if len(pa.poly) == 0:
+ continue
+ qpa = triquad.QuadrangulateFaceWithHoles(pa.poly, pa.holes,
+ pa.points)
+ m.faces.extend(qpa)
+ m.face_data.extend([pa.data] * len(qpa))
+ else:
+ m.faces.append(pa.poly)
+ m.face_data.append(pa.data)
+ if m != mdl:
+ _AddTransformedPolysToModel(mdl, m.faces, m.points, m.face_data,
+ inv_rot, inv_map)
+
+
+def AddOffsetFacesToModel(mdl, off, data=None):
+ """Add the faces due to an offset into model.
+
+ Returns the remaining interiors of the offset as a PolyAreas.
+
+ Args:
+ mdl: geom.Model - model to add offset faces into
+ off: offset.Offset
+ data: any - application data to be copied to the faces
+ Returns:
+ geom.PolyAreas
+ """
+
+ mdl.points = off.polyarea.points
+ assert(len(mdl.points.pos) == 0 or len(mdl.points.pos[0]) == 3)
+ o = off
+ ostack = []
+ while o:
+ if o.endtime != 0.0:
+ for face in o.facespokes:
+ n = len(face)
+ for i, spoke in enumerate(face):
+ nextspoke = face[(i + 1) % n]
+ v0 = spoke.origin
+ v1 = nextspoke.origin
+ v2 = nextspoke.dest
+ v3 = spoke.dest
+ if v2 == v3:
+ mface = [v0, v1, v2]
+ else:
+ mface = [v0, v1, v2, v3]
+ mdl.faces.append(mface)
+ mdl.face_data.append(data)
+ ostack.extend(o.inneroffsets)
+ if ostack:
+ o = ostack.pop()
+ else:
+ o = None
+ return off.InnerPolyAreas()
+
+
+def BevelSelectionInModel(mdl, bevel_amount, bevel_pitch, quadrangulate,
+ as_region, as_percent):
+ """Bevel all the faces in the model, perhaps as one region.
+
+ If as_region is False, each face is beveled individually,
+ otherwise regions of contiguous faces are merged into
+ PolyAreas and beveled as a whole.
+
+ TODO: something if extracted PolyAreas are not approximately
+ planar.
+
+ Args:
+ mdl: geom.Model
+ bevel_amount: float - amount to inset
+ bevel_pitch: float - angle of bevel side
+ quadrangulate: bool - should insides be quadrangulated?
+ as_region: bool - should faces be merged into regions?
+ as_percent: bool - should amount be interpreted as a percent
+ of the maximum amount (if True) or an absolute amount?
+ Side effect:
+ Beveling faces will be added to the model
+ """
+
+ pas = []
+ if as_region:
+ pas = RegionToPolyAreas(mdl.faces, mdl.points, mdl.face_data)
+ else:
+ for f, face in enumerate(mdl.faces):
+ pas.append(geom.PolyArea(mdl.points, face, [],
+ mdl.face_data[f]))
+ for pa in pas:
+ BevelPolyAreaInModel(mdl, pa,
+ bevel_amount, bevel_pitch, quadrangulate, as_percent)
+
+
+def RegionToPolyAreas(faces, points, data):
+ """Find polygonal outlines induced by union of faces.
+
+ Finds the polygons formed by boundary edges (those not
+ sharing an edge with another face in region_faces), and
+ turns those into PolyAreas.
+ In the general case, there will be holes inside.
+ We want to associate data with the region PolyAreas.
+ Just choose a representative element of data[] when
+ more than one face is combined into a PolyArea.
+
+ Args:
+ faces: list of list of int - each sublist is a face (indices into points)
+ points: geom.Points - gives coordinates for vertices
+ data: list of any - parallel to faces, app data to put in PolyAreas
+ Returns:
+ list of geom.PolyArea
+ """
+
+ ans = []
+ (edges, vtoe) = _GetEdgeData(faces)
+ (face_adj, is_interior_edge) = _GetFaceGraph(faces, edges, vtoe, points)
+ (components, ftoc) = _FindFaceGraphComponents(faces, face_adj)
+ for c in range(len(components)):
+ boundary_edges = set()
+ betodata = dict()
+ vstobe = dict()
+ for e, ((vs, ve), f) in enumerate(edges):
+ if ftoc[f] != c or is_interior_edge[e]:
+ continue
+ boundary_edges.add(e)
+ # vstobe[v] is set of edges leaving v
+ # (could be more than one if boundary touches itself at a vertex)
+ if vs in vstobe:
+ vstobe[vs].append(e)
+ else:
+ vstobe[vs] = [e]
+ betodata[(vs, ve)] = data[f]
+ polys = []
+ poly_data = []
+ while boundary_edges:
+ e = boundary_edges.pop()
+ ((vstart, ve), face_i) = edges[e]
+ poly = [vstart, ve]
+ datum = betodata[(vstart, ve)]
+ while ve != vstart:
+ if ve not in vstobe:
+ print("whoops, couldn't close boundary")
+ break
+ nextes = vstobe[ve]
+ if len(nextes) == 1:
+ nexte = nextes[0]
+ else:
+ # find a next edge with face index face_i
+ # TODO: this is not guaranteed to work,
+ # as continuation edge may have been for a different
+ # face that is now combined with face_i by erasing
+ # interior edges. Find a better algorithm here.
+ nexte = -1
+ for ne_cand in nextes:
+ if edges[ne_cand][1] == face_i:
+ nexte = ne_cand
+ break
+ if nexte == -1:
+ # case mentioned in TODO may have happened;
+ # just choose any nexte - may mess things up
+ nexte = nextes[0]
+ ((_, ve), face_i) = edges[nexte]
+ if nexte not in boundary_edges:
+ print("whoops, nexte not a boundary edge", nexte)
+ break
+ boundary_edges.remove(nexte)
+ if ve != vstart:
+ poly.append(ve)
+ polys.append(poly)
+ poly_data.append(datum)
+ if len(polys) == 0:
+ # can happen if an entire closed polytope is given
+ # at least until we do an edge check
+ return []
+ elif len(polys) == 1:
+ ans.append(geom.PolyArea(points, polys[0], [], poly_data[0]))
+ else:
+ outerf = _FindOuterPoly(polys, points, faces)
+ pa = geom.PolyArea(points, polys[outerf], [], poly_data[outerf])
+ pa.holes = [polys[i] for i in range(len(polys)) if i != outerf]
+ ans.append(pa)
+ return ans
+
+
+def _GetEdgeData(faces):
+ """Find edges from faces, and some lookup dictionaries.
+
+ Args:
+ faces: list of list of int - each a closed CCW polygon of vertex indices
+ Returns:
+ (list of ((int, int), int), dict{ int->list of int}) -
+ list elements are ((startv, endv), face index)
+ dict maps vertices to edge indices
+ """
+
+ edges = []
+ vtoe = dict()
+ for findex, f in enumerate(faces):
+ nf = len(f)
+ for i, v in enumerate(f):
+ endv = f[(i + 1) % nf]
+ edges.append(((v, endv), findex))
+ eindex = len(edges) - 1
+ if v in vtoe:
+ vtoe[v].append(eindex)
+ else:
+ vtoe[v] = [eindex]
+ return (edges, vtoe)
+
+
+def _GetFaceGraph(faces, edges, vtoe, points):
+ """Find the face adjacency graph.
+
+ Faces are adjacent if they share an edge,
+ and the shared edge goes in the reverse direction,
+ and if the angle between them isn't too large.
+
+ Args:
+ faces: list of list of int
+ edges: list of ((int, int), int) - see _GetEdgeData
+ vtoe: dict{ int->list of int } - see _GetEdgeData
+ points: geom.Points
+ Returns:
+ (list of list of int, list of bool) -
+ first list: each sublist is adjacent face indices for each face
+ second list: maps edge index to True if it separates adjacent faces
+ """
+
+ face_adj = [[] for i in range(len(faces))]
+ is_interior_edge = [False] * len(edges)
+ for e, ((vs, ve), f) in enumerate(edges):
+ for othere in vtoe[ve]:
+ ((_, we), g) = edges[othere]
+ if we == vs:
+ # face g is adjacent to face f
+ # TODO: angle check
+ if g not in face_adj[f]:
+ face_adj[f].append(g)
+ is_interior_edge[e] = True
+ # Don't bother with mirror relations, will catch later
+ return (face_adj, is_interior_edge)
+
+
+def _FindFaceGraphComponents(faces, face_adj):
+ """Partition faces into connected components.
+
+ Args:
+ faces: list of list of int
+ face_adj: list of list of int - see _GetFaceGraph
+ Returns:
+ (list of list of int, list of int) -
+ first list partitions face indices into separate lists,
+ each a component
+ second list maps face indices into their component index
+ """
+
+ if not faces:
+ return ([], [])
+ components = []
+ ftoc = [-1] * len(faces)
+ for i in range(len(faces)):
+ if ftoc[i] == -1:
+ compi = len(components)
+ comp = []
+ _FFGCSearch(i, faces, face_adj, ftoc, compi, comp)
+ components.append(comp)
+ return (components, ftoc)
+
+
+def _FFGCSearch(findex, faces, face_adj, ftoc, compi, comp):
+ """Depth first search helper function for _FindFaceGraphComponents
+
+ Searches recursively through all faces connected to findex, adding
+ each face found to comp and setting ftoc for that face to compi.
+ """
+
+ comp.append(findex)
+ ftoc[findex] = compi
+ for otherf in face_adj[findex]:
+ if ftoc[otherf] == -1:
+ _FFGCSearch(otherf, faces, face_adj, ftoc, compi, comp)
+
+
+def _FindOuterPoly(polys, points, faces):
+ """Assuming polys has one CCW-oriented face when looking
+ down average normal of faces, return that one.
+
+ Only one of the faces should have a normal whose dot product
+ with the average normal of faces is positive.
+
+ Args:
+ polys: list of list of int - list of polys given by vertex indices
+ points: geom.Points
+ faces: list of list of int - original selected region, used to find
+ average normal
+ Returns:
+ int - the index in polys of the outermost one
+ """
+
+ if len(polys) < 2:
+ return 0
+ fnorm = (0.0, 0.0, 0.0)
+ for face in faces:
+ if len(face) > 2:
+ fnorm = geom.VecAdd(fnorm, geom.Newell(face, points))
+ if fnorm == (0.0, 0.0, 0.0):
+ return 0
+ # fnorm is really a multiple of the normal, but fine for test below
+ for i, poly in enumerate(polys):
+ if len(poly) > 2:
+ pnorm = geom.Newell(poly, points)
+ if geom.VecDot(fnorm, pnorm) > 0:
+ return i
+ print("whoops, couldn't find an outermost poly")
+ return 0
+
+
+def _RotatedPolyAreaToXY(polyarea, norm):
+ """Return a PolyArea rotated to xy plane.
+
+ Only the points in polyarea will be transferred.
+
+ Args:
+ polyarea: geom.PolyArea
+ norm: the normal for polyarea
+ Returns:
+ (geom.PolyArea, (float, ..., float), dict{ int -> int }) - new PolyArea,
+ 4x3 inverse transform, dict mapping new verts to old ones
+ """
+
+ # find rotation matrix that takes norm to (0,0,1)
+ (nx, ny, nz) = norm
+ if abs(nx) < abs(ny) and abs(nx) < abs(nz):
+ v = (vx, vy, vz) = geom.Norm3(0.0, nz, - ny)
+ elif abs(ny) < abs(nz):
+ v = (vx, vy, vz) = geom.Norm3(nz, 0.0, - nx)
+ else:
+ v = (vx, vy, vz) = geom.Norm3(ny, - nx, 0.0)
+ (ux, uy, uz) = geom.Cross3(v, norm)
+ rotmat = [ux, vx, nx, uy, vy, ny, uz, vz, nz, 0.0, 0.0, 0.0]
+ # rotation matrices are orthogonal, so inverse is transpose
+ invrotmat = [ux, uy, uz, vx, vy, vz, nx, ny, nz, 0.0, 0.0, 0.0]
+ pointmap = dict()
+ invpointmap = dict()
+ newpoints = geom.Points()
+ for poly in [polyarea.poly] + polyarea.holes:
+ for v in poly:
+ vcoords = polyarea.points.pos[v]
+ newvcoords = geom.MulPoint3(vcoords, rotmat)
+ newv = newpoints.AddPoint(newvcoords)
+ pointmap[v] = newv
+ invpointmap[newv] = v
+ pa = geom.PolyArea(newpoints)
+ pa.poly = [pointmap[v] for v in polyarea.poly]
+ pa.holes = [[pointmap[v] for v in hole] for hole in polyarea.holes]
+ pa.data = polyarea.data
+ return (pa, invrotmat, invpointmap)
+
+
+def _AddTransformedPolysToModel(mdl, polys, points, poly_data,
+ transform, pointmap):
+ """Add (transformed) the points and faces to a model.
+
+ Add polys to mdl. The polys have coordinates given by indices
+ into points.pos; those need to be transformed by multiplying by
+ the transform matrix.
+ The vertices may already exist in mdl. Rather than relying on
+ AddPoint to detect the duplicate (transform rounding error makes
+ that dicey), the pointmap dictionar is used to map vertex indices
+ in polys into those in mdl - if they exist already.
+
+ Args:
+ mdl: geom.Model - where to put new vertices, faces
+ polys: list of list of int - each sublist a poly
+ points: geom.Points - coords for vertices in polys
+ poly_data: list of any - parallel to polys
+ transform: (float, ..., float) - 12-tuple, a 4x3 transform matrix
+ pointmap: dict { int -> int } - maps new vertex indices to old ones
+ Side Effects:
+ The model gets new faces and vertices, based on those in polys.
+ We are allowed to modify pointmap, as it will be discarded after call.
+ """
+
+ for i, coords in enumerate(points.pos):
+ if i not in pointmap:
+ p = geom.MulPoint3(coords, transform)
+ pointmap[i] = mdl.points.AddPoint(p)
+ for i, poly in enumerate(polys):
+ mpoly = [pointmap[v] for v in poly]
+ mdl.faces.append(mpoly)
+ mdl.face_data.append(poly_data[i])
diff --git a/mesh_inset/offset.py b/mesh_inset/offset.py
new file mode 100644
index 00000000..3a44b95b
--- /dev/null
+++ b/mesh_inset/offset.py
@@ -0,0 +1,755 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""Creating offset polygons inside faces."""
+
+__author__ = "howard.trickey@gmail.com"
+
+import math
+from . import triquad
+from . import geom
+from .triquad import Sub2, Add2, Angle, Ccw, Normalized2, Perp2, Length2, \
+ LinInterp2, TOL
+from .geom import Points
+
+AREATOL = 1e-4
+
+
+class Spoke(object):
+ """A Spoke is a line growing from an outer vertex to an inner one.
+
+ A Spoke is contained in an Offset (see below).
+
+ Attributes:
+ origin: int - index of origin point in a Points
+ dest: int - index of dest point
+ is_reflex: bool - True if spoke grows from a reflex angle
+ dir: (float, float, float) - direction vector (normalized)
+ speed: float - at time t, other end of spoke is
+ origin + t*dir. Speed is such that the wavefront
+ from the face edges moves at speed 1.
+ face: int - index of face containing this Spoke, in Offset
+ index: int - index of this Spoke in its face
+ destindex: int - index of Spoke dest in its face
+ """
+
+ def __init__(self, v, prev, next, face, index, points):
+ """Set attribute of spoke from points making up initial angle.
+
+ The spoke grows from an angle inside a face along the bisector
+ of that angle. Its speed is 1/sin(.5a), where a is the angle
+ formed by (prev, v, next). That speed means that the perpendicular
+ from the end of the spoke to either of the prev->v or v->prev
+ edges will grow at speed 1.
+
+ Args:
+ v: int - index of point spoke grows from
+ prev: int - index of point before v on boundary (in CCW order)
+ next: int - index of point after v on boundary (in CCW order)
+ face: int - index of face containing this spoke, in containing offset
+ index: int - index of this spoke in its face
+ points: geom.Points - maps vertex indices to 3d coords
+ """
+
+ self.origin = v
+ self.dest = v
+ self.face = face
+ self.index = index
+ self.destindex = -1
+ vmap = points.pos
+ vp = vmap[v]
+ prevp = vmap[prev]
+ nextp = vmap[next]
+ uin = Normalized2(Sub2(vp, prevp))
+ uout = Normalized2(Sub2(nextp, vp))
+ uavg = Normalized2((0.5 * (uin[0] + uout[0]), \
+ 0.5 * (uin[1] + uout[1])))
+ if abs(Length2(uavg)) < TOL:
+ # in and out vectors are reverse of each other
+ self.dir = (uout[0], uout[1], 0.0)
+ self.is_reflex = False
+ self.speed = 1e7
+ else:
+ # bisector direction is 90 degree CCW rotation of
+ # average incoming/outgoing
+ self.dir = (-uavg[1], uavg[0], 0.0)
+ self.is_reflex = Ccw(next, v, prev, points)
+ ang = Angle(prev, v, next, points) # in range [0, 180)
+ sin_half_ang = math.sin(math.pi * ang / 360.0)
+ if abs(sin_half_ang) < TOL:
+ self.speed = 1e7
+ else:
+ self.speed = 1.0 / sin_half_ang
+
+ def __repr__(self):
+ """Printing representation of a Spoke."""
+
+ return "@%d+%gt%s <%d,%d>" % (self.origin, \
+ self.speed, str(self.dir), \
+ self.face, self.index)
+
+ def EndPoint(self, t, points, vspeed):
+ """Return the coordinates of the non-origin point at time t.
+
+ Args:
+ t: float - time to end of spoke
+ points: geom.Points - coordinate map
+ vspeed: float - speed in z direction
+ Returns:
+ (float, float, float) - coords of spoke's endpoint at time t
+ """
+
+ p = points.pos[self.origin]
+ d = self.dir
+ v = self.speed
+ return (p[0] + v * t * d[0], p[1] + v * t * d[1], p[2] + vspeed * t)
+
+ def VertexEvent(self, other, points):
+ """Intersect self with other spoke, and return the OffsetEvent, if any.
+
+ A vertex event is with one advancing spoke intersects an adjacent
+ adavancing spoke, forming a new vertex.
+
+ Args:
+ other: Spoke - other spoke to intersect with
+ points: Geom.points
+ Returns:
+ None or OffsetEvent - if there's an intersection in the growing
+ directions of the spokes, will return the OffsetEvent for
+ the intersection;
+ if lines are collinear or parallel, return None
+ """
+
+ vmap = points.pos
+ a = vmap[self.origin]
+ b = Add2(a, self.dir)
+ c = vmap[other.origin]
+ d = Add2(c, other.dir)
+ # find intersection of line ab with line cd
+ u = Sub2(b, a)
+ v = Sub2(d, c)
+ w = Sub2(a, c)
+ pp = Perp2(u, v)
+ if abs(pp) > TOL:
+ # lines or neither parallel nor collinear
+ si = Perp2(v, w) / pp
+ ti = Perp2(u, w) / pp
+ if si >= 0 and ti >= 0:
+ p = LinInterp2(a, b, si)
+ dist_ab = si * Length2(u)
+ dist_cd = ti * Length2(v)
+ time_ab = dist_ab / self.speed
+ time_cd = dist_cd / other.speed
+ time = max(time_ab, time_cd)
+ return OffsetEvent(True, time, p, self, other)
+ return None
+
+ def EdgeEvent(self, other, offset):
+ """Intersect self with advancing edge and return OffsetEvent, if any.
+
+ An edge event is when one advancing spoke intersects an advancing
+ edge. Advancing edges start out as face edges and move perpendicular
+ to them, at a rate of 1. The endpoints of the edge are the advancing
+ spokes on either end of the edge (so the edge shrinks or grows as
+ it advances). At some time, the edge may shrink to nothing and there
+ will be no EdgeEvent after that time.
+
+ We represent an advancing edge by the first spoke (in CCW order
+ of face) of the pair of defining spokes.
+
+ At time t, end of this spoke is at
+ o + d*s*t
+ where o=self.origin, d=self.dir, s= self.speed.
+ The advancing edge line has this equation:
+ oo + od*os*t + p*a
+ where oo, od, os are o, d, s for other spoke, and p is direction
+ vector parallel to advancing edge, and a is a real parameter.
+ Equating x and y of intersection point:
+
+ o.x + d.x*s*t = oo.x + od.x*os*t + p.x*w
+ o.y + d.y*s*t = oo.y + od.y*os*t + p.y*w
+
+ which can be rearranged into the form
+
+ a = bt + cw
+ d = et + fw
+
+ and solved for t, w.
+
+ Args:
+ other: Spoke - the edge out of this spoke's origin is the advancing
+ edge to be checked for intersection
+ offset: Offset - the containing Offset
+ Returns:
+ None or OffsetEvent - with data about the intersection, if any
+ """
+
+ vmap = offset.polyarea.points.pos
+ o = vmap[self.origin]
+ oo = vmap[other.origin]
+ otherface = offset.facespokes[other.face]
+ othernext = otherface[(other.index + 1) % len(otherface)]
+ oonext = vmap[othernext.origin]
+ p = Normalized2(Sub2(oonext, oo))
+ a = o[0] - oo[0]
+ d = o[1] - oo[1]
+ b = other.dir[0] * other.speed - self.dir[0] * self.speed
+ e = other.dir[1] * other.speed - self.dir[1] * self.speed
+ c = p[0]
+ f = p[1]
+ if abs(c) > TOL:
+ dem = e - f * b / c
+ if abs(dem) > TOL:
+ t = (d - f * a / c) / dem
+ w = (a - b * t) / c
+ else:
+ return None
+ elif abs(f) > TOL:
+ dem = b - c * e / f
+ if abs(dem) > TOL:
+ t = (a - c * d / f) / dem
+ w = (d - e * t) / f
+ else:
+ return None
+ else:
+ return None
+ if t < 0.0:
+ # intersection is in backward direction along self spoke
+ return None
+ if w < 0.0:
+ # intersection on wrong side of first end of advancing line segment
+ return None
+ # calculate the equivalent of w for the other end
+ aa = o[0] - oonext[0]
+ dd = o[1] - oonext[1]
+ bb = othernext.dir[0] * othernext.speed - self.dir[0] * self.speed
+ ee = othernext.dir[1] * othernext.speed - self.dir[1] * self.speed
+ cc = -p[0]
+ ff = -p[1]
+ if abs(cc) > TOL:
+ ww = (aa - bb * t) / cc
+ elif abs(ff) > TOL:
+ ww = (dd - ee * t) / ff
+ else:
+ return None
+ if ww < 0.0:
+ return None
+ evertex = (o[0] + self.dir[0] * self.speed * t, \
+ o[1] + self.dir[1] * self.speed * t)
+ return OffsetEvent(False, t, evertex, self, other)
+
+
+class OffsetEvent(object):
+ """An event involving a spoke during offset computation.
+
+ The events kinds are:
+ vertex event: the spoke intersects an adjacent spoke and makes a new
+ vertex
+ edge event: the spoke hits an advancing edge and splits it
+
+ Attributes:
+ is_vertex_event: True if this is a vertex event (else it is edge event)
+ time: float - time at which it happens (edges advance at speed 1)
+ event_vertex: (float, float) - intersection point of event
+ spoke: Spoke - the spoke that this event is for
+ other: Spoke - other spoke involved in event; if vertex event, this will
+ be an adjacent spoke that intersects; if an edge event, this is the
+ spoke whose origin's outgoing edge grows to hit this event's spoke
+ """
+
+ def __init__(self, isv, time, evertex, spoke, other):
+ """Creates and initializes attributes of an OffsetEvent."""
+
+ self.is_vertex_event = isv
+ self.time = time
+ self.event_vertex = evertex
+ self.spoke = spoke
+ self.other = other
+
+ def __repr__(self):
+ """Printing representation of an event."""
+
+ if self.is_vertex_event:
+ c = "V"
+ else:
+ c = "E"
+ return "%s t=%5f %s %s %s" % (c, self.time, str(self.event_vertex), \
+ repr(self.spoke), repr(self.other))
+
+
+class Offset(object):
+ """Represents an offset polygonal area, and used to construct one.
+
+ Currently, the polygonal area must lie approximately in the XY plane.
+ As well as growing inwards in that plane, the advancing lines also
+ move in the Z direction at the rate of vspeed.
+
+ Attributes:
+ polyarea: geom.PolyArea - the area we are offsetting from.
+ We share the polyarea.points, and add to it as points in
+ the offset polygonal area are computed.
+ facespokes: list of list of Spoke - each sublist is a closed face
+ (oriented CCW); the faces may mutually interfere.
+ These lists are spokes for polyarea.poly + polyarea.holes.
+ endtime: float - time when this offset hits its first
+ event (relative to beginning of this offset), or the amount
+ that takes this offset to the end of the total Build time
+ timesofar: float - sum of times taken by all containing Offsets
+ vspeed: float - speed that edges move perpendicular to offset plane
+ inneroffsets: list of Offset - the offsets that take over after this
+ (inside it)
+ """
+
+ def __init__(self, polyarea, time, vspeed):
+ """Set up initial state of Offset from a polyarea.
+
+ Args:
+ polyarea: geom.PolyArea
+ time: float - time so far
+ """
+
+ self.polyarea = polyarea
+ self.facespokes = []
+ self.endtime = 0.0
+ self.timesofar = time
+ self.vspeed = vspeed
+ self.inneroffsets = []
+ self.InitFaceSpokes(polyarea.poly)
+ for f in polyarea.holes:
+ self.InitFaceSpokes(f)
+
+ def __repr__(self):
+ ans = ["Offset: endtime=%g" % self.endtime]
+ for i, face in enumerate(self.facespokes):
+ ans.append(("<%d>" % i) + str([str(spoke) for spoke in face]))
+ return '\n'.join(ans)
+
+ def PrintNest(self, indent_level=0):
+ indent = " " * indent_level * 4
+ print(indent + "Offset timesofar=", self.timesofar, "endtime=",
+ self.endtime)
+ print(indent + " polyarea=", self.polyarea.poly, self.polyarea.holes)
+ for o in self.inneroffsets:
+ o.PrintNest(indent_level + 1)
+
+ def InitFaceSpokes(self, face_vertices):
+ """Initialize the offset representation of a face from vertex list.
+
+ If the face has no area or too small an area, don't bother making it.
+
+ Args:
+ face_vertices: list of int - point indices for boundary of face
+ Side effect:
+ A new face (list of spokes) may be added to self.facespokes
+ """
+
+ n = len(face_vertices)
+ if n <= 2:
+ return
+ points = self.polyarea.points
+ area = abs(geom.SignedArea(face_vertices, points))
+ if area < AREATOL:
+ return
+ findex = len(self.facespokes)
+ fspokes = [Spoke(v, face_vertices[(i - 1) % n], \
+ face_vertices[(i + 1) % n], findex, i, points) \
+ for i, v in enumerate(face_vertices)]
+ self.facespokes.append(fspokes)
+
+ def NextSpokeEvents(self, spoke):
+ """Return the OffsetEvents that will next happen for a given spoke.
+
+ It might happen that some events happen essentially simultaneously,
+ and also it is convenient to separate Edge and Vertex events, so
+ we return two lists.
+ But, for vertex events, only look at the event with the next Spoke,
+ as the event with the previous spoke will be accounted for when we
+ consider that previous spoke.
+
+ Args:
+ spoke: Spoke - a spoke in one of the faces of this object
+ Returns:
+ (float, list of OffsetEvent, list of OffsetEvent) -
+ time of next event,
+ next Vertex event list and next Edge event list
+ """
+
+ facespokes = self.facespokes[spoke.face]
+ n = len(facespokes)
+ bestt = 1e100
+ bestv = []
+ beste = []
+ # First find vertex event (only the one with next spoke)
+ next_spoke = facespokes[(spoke.index + 1) % n]
+ ev = spoke.VertexEvent(next_spoke, self.polyarea.points)
+ if ev:
+ bestv = [ev]
+ bestt = ev.time
+ # Now find edge events, if this is a reflex vertex
+ if spoke.is_reflex:
+ prev_spoke = facespokes[(spoke.index - 1) % n]
+ for f in self.facespokes:
+ for other in f:
+ if other == spoke or other == prev_spoke:
+ continue
+ ev = spoke.EdgeEvent(other, self)
+ if ev:
+ if ev.time < bestt - TOL:
+ beste = []
+ bestv = []
+ bestt = ev.time
+ if abs(ev.time - bestt) < TOL:
+ beste.append(ev)
+ return (bestt, bestv, beste)
+
+ def Build(self, target=2e100):
+ """Build the complete Offset structure or up until target time.
+
+ Find the next event(s), makes the appropriate inner Offsets
+ that are inside this one, and calls Build on those Offsets to continue
+ the process until only a single point is left or time reaches target.
+ """
+
+ bestt = 1e100
+ bestevs = [[], []]
+ for f in self.facespokes:
+ for s in f:
+ (t, ve, ee) = self.NextSpokeEvents(s)
+ if t < bestt - TOL:
+ bestevs = [[], []]
+ bestt = t
+ if abs(t - bestt) < TOL:
+ bestevs[0].extend(ve)
+ bestevs[1].extend(ee)
+ if bestt == 1e100:
+ # could happen if polygon is oriented wrong
+ # or in other special cases
+ return
+ if abs(bestt) < TOL:
+ # seems to be in a loop, so quit
+ return
+ self.endtime = bestt
+ (ve, ee) = bestevs
+ newfaces = []
+ splitjoin = None
+ if target < self.endtime:
+ self.endtime = target
+ newfaces = self.MakeNewFaces(self.endtime)
+ elif ve and not ee:
+ # Only vertex events.
+ # Merging of successive vertices in inset face will
+ # take care of the vertex events
+ newfaces = self.MakeNewFaces(self.endtime)
+ else:
+ # Edge events too
+ # First make the new faces (handles all vertex events)
+ newfaces = self.MakeNewFaces(self.endtime)
+ # Only do one edge event (handle other simultaneous edge
+ # events in subsequent recursive Build calls)
+ splitjoin = self.SplitJoinFaces(newfaces, ee[0])
+ nexttarget = target - self.endtime
+ if len(newfaces) > 0:
+ pa = geom.PolyArea(points=self.polyarea.points)
+ pa.data = self.polyarea.data
+ newt = self.timesofar + self.endtime
+ pa2 = None # may make another
+ if not splitjoin:
+ pa.poly = newfaces[0]
+ pa.holes = newfaces[1:]
+ elif splitjoin[0] == 'split':
+ (_, findex, newface0, newface1) = splitjoin
+ if findex == 0:
+ # Outer poly of polyarea was split.
+ # Now there will be two polyareas.
+ # If there were holes, need to allocate according to
+ # which one contains the holes.
+ pa.poly = newface0
+ pa2 = geom.PolyArea(points=self.polyarea.points)
+ pa2.data = self.polyarea.data
+ pa2.poly = newface1
+ if len(newfaces) > 1:
+ # print("need to allocate holes")
+ for hf in newfaces[1:]:
+ if pa.ContainsPoly(hf, self.polyarea.points):
+ # print("add", hf, "to", pa.poly)
+ pa.holes.append(hf)
+ elif pa2.ContainsPoly(hf, self.polyarea.points):
+ # print("add", hf, "to", pa2.poly)
+ pa2.holes.append(hf)
+ else:
+ print("whoops, hole in neither poly!")
+ self.inneroffsets = [Offset(pa, newt, self.vspeed), \
+ Offset(pa2, newt, self.vspeed)]
+ else:
+ # A hole was split. New faces just replace the split one.
+ pa.poly = newfaces[0]
+ pa.holes = newfaces[0:findex] + [newface0, newface1] + \
+ newfaces[findex + 1:]
+ else:
+ # A join
+ (_, findex, othfindex, newface0) = splitjoin
+ if findex == 0 or othfindex == 0:
+ # Outer poly was joined to one hole.
+ pa.poly = newface0
+ pa.holes = [f for f in newfaces if f is not None]
+ else:
+ # Two holes were joined
+ pa.poly = newfaces[0]
+ pa.holes = [f for f in newfaces if f is not None] + \
+ [newface0]
+ self.inneroffsets = [Offset(pa, newt, self.vspeed)]
+ if pa2:
+ self.inneroffsets.append(Offset(pa2, newt, self.vspeed))
+ if nexttarget > TOL:
+ for o in self.inneroffsets:
+ o.Build(nexttarget)
+
+ def FaceAtSpokeEnds(self, f, t):
+ """Return a new face that is at the spoke ends of face f at time t.
+
+ Also merges any adjacent approximately equal vertices into one vertex,
+ so returned list may be smaller than len(f).
+ Also sets the destindex fields of the spokes to the vertex they
+ will now end at.
+
+ Args:
+ f: list of Spoke - one of self.faces
+ t: float - time in this offset
+ Returns:
+ list of int - indices into self.polyarea.points
+ (which has been extended with new ones)
+ """
+
+ newface = []
+ points = self.polyarea.points
+ for i in range(0, len(f)):
+ s = f[i]
+ vcoords = s.EndPoint(t, points, self.vspeed)
+ v = points.AddPoint(vcoords)
+ if newface:
+ if v == newface[-1]:
+ s.destindex = len(newface) - 1
+ elif i == len(f) - 1 and v == newface[0]:
+ s.destindex = 0
+ else:
+ newface.append(v)
+ s.destindex = len(newface) - 1
+ else:
+ newface.append(v)
+ s.destindex = 0
+ s.dest = v
+ return newface
+
+ def MakeNewFaces(self, t):
+ """For each face in this offset, make new face extending spokes
+ to time t.
+
+ Args:
+ t: double - time
+ Returns:
+ list of list of int - list of new faces
+ """
+
+ ans = []
+ for f in self.facespokes:
+ newf = self.FaceAtSpokeEnds(f, t)
+ if len(newf) > 2:
+ ans.append(newf)
+ return ans
+
+ def SplitJoinFaces(self, newfaces, ev):
+ """Use event ev to split or join faces.
+
+ Given ev, an edge event, use the ev spoke to split the
+ other spoke's inner edge.
+ If the ev spoke's face and other's face are the same, this splits the
+ face into two; if the faces are different, it joins them into one.
+ We have just made faces at the end of the spokes.
+ We have to remove the edge going from the other spoke to its
+ next spoke, and replace it with two edges, going to and from
+ the event spoke's destination.
+ General situation:
+ __ s ____
+ c\ b\ | /a /e
+ \ \|/ /
+ f----------------g
+ / d \
+ o/ \h
+
+ where sd is the event spoke and of is the "other spoke",
+ hg is a spoke, and cf, fg. ge, ad, and db are edges in
+ the new inside face.
+ What we are to do is to split fg into two edges, with the
+ joining point attached where b,s,a join.
+ There are a bunch of special cases:
+ - one of split fg edges might have zero length because end points
+ are already coincident or nearly coincident.
+ - maybe c==b or e==a
+
+ Args:
+ newfaces: list of list of int - the new faces
+ ev: OffsetEvent - an edge event
+ Side Effects:
+ faces in newfaces that are involved in split or join are
+ set to None
+ Returns: one of:
+ ('split', int, list of int, list of int) - int is the index in
+ newfaces of the face that was split, two lists are the
+ split faces
+ ('join', int, int, list of int) - two ints are the indices in
+ newfaces of the faces that were joined, and the list is
+ the joined face
+ """
+
+ # print("SplitJoinFaces", newfaces, ev)
+ spoke = ev.spoke
+ other = ev.other
+ findex = spoke.face
+ othfindex = other.face
+ newface = newfaces[findex]
+ othface = newfaces[othfindex]
+ nnf = len(newface)
+ nonf = len(othface)
+ d = spoke.destindex
+ f = other.destindex
+ c = (f - 1) % nonf
+ g = (f + 1) % nonf
+ e = (f + 2) % nonf
+ a = (d - 1) % nnf
+ b = (d + 1) % nnf
+ # print("newface=", newface)
+ # if findex != othfindex: print("othface=", othface)
+ # print("d=", d, "f=", f, "c=", c, "g=", g, "e=", e, "a=", a, "b=", b)
+ newface0 = []
+ newface1 = []
+ # The two new faces put spoke si's dest on edge between
+ # pi's dest and qi (edge after pi)'s dest in original face.
+ # These are indices in the original face; the current dest face
+ # may have fewer elements because of merging successive points
+ if findex == othfindex:
+ # Case where splitting one new face into two.
+ # The new new faces are:
+ # [d, g, e, ..., a] and [d, b, ..., c, f]
+ # (except we actually want the vertex numbers at those positions)
+ newface0 = [newface[d]]
+ i = g
+ while i != d:
+ newface0.append(newface[i])
+ i = (i + 1) % nnf
+ newface1 = [newface[d]]
+ i = b
+ while i != f:
+ newface1.append(newface[i])
+ i = (i + 1) % nnf
+ newface1.append(newface[f])
+ # print("newface0=", newface0, "newface1=", newface1)
+ # now the destindex values for the spokes are messed up
+ # but I don't think we need them again
+ newfaces[findex] = None
+ return ('split', findex, newface0, newface1)
+ else:
+ # Case where joining two faces into one.
+ # The new face is splicing d's face between
+ # f and g in other face (or the reverse of all of that).
+ newface0 = [othface[i] for i in range(0, f + 1)]
+ newface0.append(newface[d])
+ i = b
+ while i != d:
+ newface0.append(newface[i])
+ i = (i + 1) % nnf
+ newface0.append(newface[d])
+ if g != 0:
+ newface0.extend([othface[i] for i in range(g, nonf)])
+ # print("newface0=", newface0)
+ newfaces[findex] = None
+ newfaces[othfindex] = None
+ return ('join', findex, othfindex, newface0)
+
+ def InnerPolyAreas(self):
+ """Return the interior of the offset (and contained offsets) as
+ PolyAreas.
+
+ Returns:
+ geom.PolyAreas
+ """
+
+ ans = geom.PolyAreas()
+ ans.points = self.polyarea.points
+ _AddInnerAreas(self, ans)
+ return ans
+
+ def MaxAmount(self):
+ """Returns the maximum offset amount possible.
+ Returns:
+ float - maximum amount
+ """
+
+ # Need to do Build on a copy of points
+ # so don't add points that won't be used when
+ # really do a Build with a smaller amount
+ test_points = geom.Points()
+ test_points.AddPoints(self.polyarea.points)
+ save_points = self.polyarea.points
+ self.polyarea.points = test_points
+ self.Build()
+ max_amount = self._MaxTime()
+ self.polyarea.points = save_points
+ return max_amount
+
+ def _MaxTime(self):
+ if self.inneroffsets:
+ return max([o._MaxTime() for o in self.inneroffsets])
+ else:
+ return self.timesofar + self.endtime
+
+
+def _AddInnerAreas(off, polyareas):
+ """Add the innermost areas of offset off to polyareas.
+
+ Assume that polyareas is already using the proper shared points.
+
+ Arguments:
+ off: Offset
+ polyareas: geom.PolyAreas
+ Side Effects:
+ Any non-zero-area faces in the very inside of off are
+ added to polyareas.
+ """
+
+ if off.inneroffsets:
+ for o in off.inneroffsets:
+ _AddInnerAreas(o, polyareas)
+ else:
+ newpa = geom.PolyArea(polyareas.points)
+ for i, f in enumerate(off.facespokes):
+ newface = off.FaceAtSpokeEnds(f, off.endtime)
+ area = abs(geom.SignedArea(newface, polyareas.points))
+ if area < AREATOL:
+ if i == 0:
+ break
+ else:
+ continue
+ if i == 0:
+ newpa.poly = newface
+ newpa.data = off.polyarea.data
+ else:
+ newpa.holes.append(newface)
+ if newpa.poly:
+ polyareas.polyareas.append(newpa)
diff --git a/mesh_inset/triquad.py b/mesh_inset/triquad.py
new file mode 100644
index 00000000..88affa89
--- /dev/null
+++ b/mesh_inset/triquad.py
@@ -0,0 +1,1172 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+
+from . import geom
+import math
+import random
+from math import sqrt
+
+# Points are 3-tuples or 2-tuples of reals: (x,y,z) or (x,y)
+# Faces are lists of integers (vertex indices into coord lists)
+# After triangulation/quadrangulation, the tris and quads will
+# be tuples instead of lists.
+# Vmaps are lists taking vertex index -> Point
+
+TOL = 1e-7 # a tolerance for fuzzy equality
+GTHRESH = 75 # threshold above which use greedy to _Quandrangulate
+ANGFAC = 1.0 # weighting for angles in quad goodness measure
+DEGFAC = 10.0 # weighting for degree in quad goodness measure
+
+# Angle kind constants
+Ang0 = 1
+Angconvex = 2
+Angreflex = 3
+Angtangential = 4
+Ang360 = 5
+
+
+def TriangulateFace(face, points):
+ """Triangulate the given face.
+
+ Uses an easy triangulation first, followed by a constrained delauney
+ triangulation to get better shaped triangles.
+
+ Args:
+ face: list of int - indices in points, assumed CCW-oriented
+ points: geom.Points - holds coordinates for vertices
+ Returns:
+ list of (int, int, int) - 3-tuples are CCW-oriented vertices of
+ triangles making up the triangulation
+ """
+
+ if len(face) <= 3:
+ return [tuple(face)]
+ tris = EarChopTriFace(face, points)
+ bord = _BorderEdges([face])
+ triscdt = _CDT(tris, bord, points)
+ return triscdt
+
+
+def TriangulateFaceWithHoles(face, holes, points):
+ """Like TriangulateFace, but with holes inside the face.
+
+ Works by making one complex polygon that has segments to
+ and from the holes ("islands"), and then using the same method
+ as TriangulateFace.
+
+ Args:
+ face: list of int - indices in points, assumed CCW-oriented
+ holes: list of list of int - each sublist is like face
+ but CW-oriented and assumed to be inside face
+ points: geom.Points - holds coordinates for vertices
+ Returns:
+ list of (int, int, int) - 3-tuples are CCW-oriented vertices of
+ triangles making up the triangulation
+ """
+
+ if len(holes) == 0:
+ return TriangulateFace(face, points)
+ allfaces = [face] + holes
+ sholes = [_SortFace(h, points) for h in holes]
+ joinedface = _JoinIslands(face, sholes, points)
+ tris = EarChopTriFace(joinedface, points)
+ bord = _BorderEdges(allfaces)
+ triscdt = _CDT(tris, bord, points)
+ return triscdt
+
+
+def QuadrangulateFace(face, points):
+ """Quadrangulate the face (subdivide into convex quads and tris).
+
+ Like TriangulateFace, but after triangulating, join as many pairs
+ of triangles as possible into convex quadrilaterals.
+
+ Args:
+ face: list of int - indices in points, assumed CCW-oriented
+ points: geom.Points - holds coordinates for vertices
+ Returns:
+ list of 3-tuples or 4-tuples of ints - CCW-oriented vertices of
+ quadrilaterals and triangles making up the quadrangulation.
+ """
+
+ if len(face) <= 3:
+ return [tuple(face)]
+ tris = EarChopTriFace(face, points)
+ bord = _BorderEdges([face])
+ triscdt = _CDT(tris, bord, points)
+ qs = _Quandrangulate(triscdt, bord, points)
+ return qs
+
+
+def QuadrangulateFaceWithHoles(face, holes, points):
+ """Like QuadrangulateFace, but with holes inside the faces.
+
+ Args:
+ face: list of int - indices in points, assumed CCW-oriented
+ holes: list of list of int - each sublist is like face
+ but CW-oriented and assumed to be inside face
+ points: geom.Points - holds coordinates for vertices
+ Returns:
+ list of 3-tuples or 4-tuples of ints - CCW-oriented vertices of
+ quadrilaterals and triangles making up the quadrangulation.
+ """
+
+ if len(holes) == 0:
+ return QuadrangulateFace(face, points)
+ allfaces = [face] + holes
+ sholes = [_SortFace(h, points) for h in holes]
+ joinedface = _JoinIslands(face, sholes, points)
+ tris = EarChopTriFace(joinedface, points)
+ bord = _BorderEdges(allfaces)
+ triscdt = _CDT(tris, bord, points)
+ qs = _Quandrangulate(triscdt, bord, points)
+ return qs
+
+
+def _SortFace(face, points):
+ """Rotate face so leftmost vertex is first, where face is
+ list of indices in points."""
+
+ n = len(face)
+ if n <= 1:
+ return face
+ lefti = 0
+ leftv = face[0]
+ for i in range(1, n):
+ # following comparison is lexicographic on n-tuple
+ # so sorts on x first, using lower y as tie breaker.
+ if points.pos[face[i]] < points.pos[leftv]:
+ lefti = i
+ leftv = face[i]
+ return face[lefti:] + face[0:lefti]
+
+
+def EarChopTriFace(face, points):
+ """Triangulate given face, with coords given by indexing into points.
+ Return list of faces, each of which will be a triangle.
+ Use the ear-chopping method."""
+
+ # start with lowest coord in 2d space to try
+ # to get a pleasing uniform triangulation if starting with
+ # a regular structure (like a grid)
+ start = _GetLeastIndex(face, points)
+ ans = []
+ incr = 1
+ n = len(face)
+ while n > 3:
+ i = _FindEar(face, n, start, incr, points)
+ vm1 = face[(i - 1) % n]
+ v0 = face[i]
+ v1 = face[(i + 1) % n]
+ face = _ChopEar(face, i)
+ n = len(face)
+ incr = - incr
+ if incr == 1:
+ start = i % n
+ else:
+ start = (i - 1) % n
+ ans.append((vm1, v0, v1))
+ ans.append(tuple(face))
+ return ans
+
+
+def _GetLeastIndex(face, points):
+ """Return index of coordinate that is leftmost, lowest in face."""
+
+ bestindex = 0
+ bestpos = points.pos[face[0]]
+ for i in range(1, len(face)):
+ pos = points.pos[face[i]]
+ if pos[0] < bestpos[0] or \
+ (pos[0] == bestpos[0] and pos[1] < bestpos[1]):
+ bestindex = i
+ bestpos = pos
+ return bestindex
+
+
+def _FindEar(face, n, start, incr, points):
+ """An ear of a polygon consists of three consecutive vertices
+ v(-1), v0, v1 such that v(-1) can connect to v1 without intersecting
+ the polygon.
+ Finds an ear, starting at index 'start' and moving
+ in direction incr. (We attempt to alternate directions, to find
+ 'nice' triangulations for simple convex polygons.)
+ Returns index into faces of v0 (will always find one, because
+ uses a desperation mode if fails to find one with above rule)."""
+
+ angk = _ClassifyAngles(face, n, points)
+ for mode in range(0, 5):
+ i = start
+ while True:
+ if _IsEar(face, i, n, angk, points, mode):
+ return i
+ i = (i + incr) % n
+ if i == start:
+ break # try next higher desperation mode
+
+
+def _IsEar(face, i, n, angk, points, mode):
+ """Return true, false depending on ear status of vertices
+ with indices i-1, i, i+1.
+ mode is amount of desperation: 0 is Normal mode,
+ mode 1 allows degenerate triangles (with repeated vertices)
+ mode 2 allows local self crossing (folded) ears
+ mode 3 allows any convex vertex (should always be one)
+ mode 4 allows anything (just to be sure loop terminates!)"""
+
+ k = angk[i]
+ vm2 = face[(i - 2) % n]
+ vm1 = face[(i - 1) % n]
+ v0 = face[i]
+ v1 = face[(i + 1) % n]
+ v2 = face[(i + 2) % n]
+ if vm1 == v0 or v0 == v1:
+ return (mode > 0)
+ b = (k == Angconvex or k == Angtangential or k == Ang0)
+ c = _InCone(vm1, v0, v1, v2, angk[(i + 1) % n], points) and \
+ _InCone(v1, vm2, vm1, v0, angk[(i - 1) % n], points)
+ if b and c:
+ return _EarCheck(face, n, angk, vm1, v0, v1, points)
+ if mode < 2:
+ return False
+ if mode == 3:
+ return SegsIntersect(vm2, vm1, v0, v1, points)
+ if mode == 4:
+ return b
+ return True
+
+
+def _EarCheck(face, n, angk, vm1, v0, v1, points):
+ """Return True if the successive vertices vm1, v0, v1
+ forms an ear. We already know that it is not a reflex
+ Angle, and that the local cone containment is ok.
+ What remains to check is that the edge vm1-v1 doesn't
+ intersect any other edge of the face (besides vm1-v0
+ and v0-v1). Equivalently, there can't be a reflex Angle
+ inside the triangle vm1-v0-v1. (Well, there are
+ messy cases when other points of the face coincide with
+ v0 or touch various lines involved in the ear.)"""
+ for j in range(0, n):
+ fv = face[j]
+ k = angk[j]
+ b = (k == Angreflex or k == Ang360) \
+ and not(fv == vm1 or fv == v0 or fv == v1)
+ if b:
+ # Is fv inside closure of triangle (vm1,v0,v1)?
+ c = not(Ccw(v0, vm1, fv, points) \
+ or Ccw(vm1, v1, fv, points) \
+ or Ccw(v1, v0, fv, points))
+ fvm1 = face[(j - 1) % n]
+ fv1 = face[(j + 1) % n]
+ # To try to deal with some degenerate cases,
+ # also check to see if either segment attached to fv
+ # intersects either segment of potential ear.
+ d = SegsIntersect(fvm1, fv, vm1, v0, points) or \
+ SegsIntersect(fvm1, fv, v0, v1, points) or \
+ SegsIntersect(fv, fv1, vm1, v0, points) or \
+ SegsIntersect(fv, fv1, v0, v1, points)
+ if c or d:
+ return False
+ return True
+
+
+def _ChopEar(face, i):
+ """Return a copy of face (of length n), omitting element i."""
+
+ return face[0:i] + face[i + 1:]
+
+
+def _InCone(vtest, a, b, c, bkind, points):
+ """Return true if point with index vtest is in Cone of points with
+ indices a, b, c, where Angle ABC has AngleKind Bkind.
+ The Cone is the set of points inside the left face defined by
+ segments ab and bc, disregarding all other segments of polygon for
+ purposes of inside test."""
+
+ if bkind == Angreflex or bkind == Ang360:
+ if _InCone(vtest, c, b, a, Angconvex, points):
+ return False
+ return not((not(Ccw(b, a, vtest, points)) \
+ and not(Ccw(b, vtest, a, points)) \
+ and Ccw(b, a, vtest, points))
+ or
+ (not(Ccw(b, c, vtest, points)) \
+ and not(Ccw(b, vtest, c, points)) \
+ and Ccw(b, a, vtest, points)))
+ else:
+ return Ccw(a, b, vtest, points) and Ccw(b, c, vtest, points)
+
+
+def _JoinIslands(face, holes, points):
+ """face is a CCW face containing the CW faces in the holes list,
+ where each hole is sorted so the leftmost-lowest vertex is first.
+ faces and holes are given as lists of indices into points.
+ The holes should be sorted by softface.
+ Add edges to make a new face that includes the holes (a Ccw traversal
+ of the new face will have the inside always on the left),
+ and return the new face."""
+
+ while len(holes) > 0:
+ (hole, holeindex) = _LeftMostFace(holes, points)
+ holes = holes[0:holeindex] + holes[holeindex + 1:]
+ face = _JoinIsland(face, hole, points)
+ return face
+
+
+def _JoinIsland(face, hole, points):
+ """Return a modified version of face that splices in the
+ vertices of hole (which should be sorted)."""
+
+ if len(hole) == 0:
+ return face
+ hv0 = hole[0]
+ d = _FindDiag(face, hv0, points)
+ newface = face[0:d + 1] + hole + [hv0] + face[d:]
+ return newface
+
+
+def _LeftMostFace(holes, points):
+ """Return (hole,index of hole in holes) where hole has
+ the leftmost first vertex. To be able to handle empty
+ holes gracefully, call an empty hole 'leftmost'.
+ Assumes holes are sorted by softface."""
+
+ assert(len(holes) > 0)
+ lefti = 0
+ lefthole = holes[0]
+ if len(lefthole) == 0:
+ return (lefthole, lefti)
+ leftv = lefthole[0]
+ for i in range(1, len(holes)):
+ ihole = holes[i]
+ if len(ihole) == 0:
+ return (ihole, i)
+ iv = ihole[0]
+ if points.pos[iv] < points.pos[leftv]:
+ (lefti, lefthole, leftv) = (i, ihole, iv)
+ return (lefthole, lefti)
+
+
+def _FindDiag(face, hv, points):
+ """Find a vertex in face that can see vertex hv, if possible,
+ and return the index into face of that vertex.
+ Should be able to find a diagonal that connects a vertex of face
+ left of v to hv without crossing face, but try two
+ more desperation passes after that to get SOME diagonal, even if
+ it might cross some edge somewhere.
+ First desperation pass (mode == 1): allow points right of hv.
+ Second desperation pass (mode == 2): allow crossing boundary poly"""
+
+ besti = - 1
+ bestdist = 1e30
+ for mode in range(0, 3):
+ for i in range(0, len(face)):
+ v = face[i]
+ if mode == 0 and points.pos[v] > points.pos[hv]:
+ continue # in mode 0, only want points left of hv
+ dist = _DistSq(v, hv, points)
+ if dist < bestdist:
+ if _IsDiag(i, v, hv, face, points) or mode == 2:
+ (besti, bestdist) = (i, dist)
+ if besti >= 0:
+ break # found one, so don't need other modes
+ assert(besti >= 0)
+ return besti
+
+
+def _IsDiag(i, v, hv, face, points):
+ """Return True if vertex v (at index i in face) can see vertex hv.
+ v and hv are indices into points.
+ (v, hv) is a diagonal if hv is in the cone of the Angle at index i on face
+ and no segment in face intersects (h, hv).
+ """
+
+ n = len(face)
+ vm1 = face[(i - 1) % n]
+ v1 = face[(i + 1) % n]
+ k = _AngleKind(vm1, v, v1, points)
+ if not _InCone(hv, vm1, v, v1, k, points):
+ return False
+ for j in range(0, n):
+ vj = face[j]
+ vj1 = face[(j + 1) % n]
+ if SegsIntersect(v, hv, vj, vj1, points):
+ return False
+ return True
+
+
+def _DistSq(a, b, points):
+ """Return distance squared between coords with indices a and b in points.
+ """
+
+ diff = Sub2(points.pos[a], points.pos[b])
+ return Dot2(diff, diff)
+
+
+def _BorderEdges(facelist):
+ """Return a set of (u,v) where u and v are successive vertex indices
+ in some face in the list in facelist."""
+
+ ans = set()
+ for i in range(0, len(facelist)):
+ f = facelist[i]
+ for j in range(1, len(f)):
+ ans.add((f[j - 1], f[j]))
+ ans.add((f[-1], f[0]))
+ return ans
+
+
+def _CDT(tris, bord, points):
+ """Tris is a list of triangles ((a,b,c), CCW-oriented indices into points)
+ Bord is a set of border edges (u,v), oriented so that tris
+ is a triangulation of the left face of the border(s).
+ Make the triangulation "Constrained Delaunay" by flipping "reversed"
+ quadrangulaterals until can flip no more.
+ Return list of triangles in new triangulation."""
+
+ td = _TriDict(tris)
+ re = _ReveresedEdges(tris, td, bord, points)
+ ts = set(tris)
+ # reverse the reversed edges until done.
+ # reversing and edge adds new edges, which may or
+ # may not be reversed or border edges, to re for
+ # consideration, but the process will stop eventually.
+ while len(re) > 0:
+ (a, b) = e = re.pop()
+ if e in bord or not _IsReversed(e, td, points):
+ continue
+ # rotate e in quad adbc to get other diagonal
+ erev = (b, a)
+ tl = td.get(e)
+ tr = td.get(erev)
+ if not tl or not tr:
+ continue # shouldn't happen
+ c = _OtherVert(tl, a, b)
+ d = _OtherVert(tr, a, b)
+ if c is None or d is None:
+ continue # shouldn't happen
+ newt1 = (c, d, b)
+ newt2 = (c, a, d)
+ del td[e]
+ del td[erev]
+ td[(c, d)] = newt1
+ td[(d, b)] = newt1
+ td[(b, c)] = newt1
+ td[(d, c)] = newt2
+ td[(c, a)] = newt2
+ td[(a, d)] = newt2
+ if tl in ts:
+ ts.remove(tl)
+ if tr in ts:
+ ts.remove(tr)
+ ts.add(newt1)
+ ts.add(newt2)
+ re.extend([(d, b), (b, c), (c, a), (a, d)])
+ return list(ts)
+
+
+def _TriDict(tris):
+ """tris is a list of triangles (a,b,c), CCW-oriented indices.
+ Return dict mapping all edges in the triangles to the containing
+ triangle list."""
+
+ ans = dict()
+ for i in range(0, len(tris)):
+ (a, b, c) = t = tris[i]
+ ans[(a, b)] = t
+ ans[(b, c)] = t
+ ans[(c, a)] = t
+ return ans
+
+
+def _ReveresedEdges(tris, td, bord, points):
+ """Return list of reversed edges in tris.
+ Only want edges not in bord, and only need one representative
+ of (u,v)/(v,u), so choose the one with u < v.
+ td is dictionary from _TriDict, and is used to find left and right
+ triangles of edges."""
+
+ ans = []
+ for i in range(0, len(tris)):
+ (a, b, c) = tris[i]
+ for e in [(a, b), (b, c), (c, a)]:
+ if e in bord:
+ continue
+ (u, v) = e
+ if u < v:
+ if _IsReversed(e, td, points):
+ ans.append(e)
+ return ans
+
+
+def _IsReversed(e, td, points):
+ """If e=(a,b) is a non-border edge, with left-face triangle tl and
+ right-face triangle tr, then it is 'reversed' if the circle through
+ a, b, and (say) the other vertex of tl containts the other vertex of tr.
+ td is a _TriDict, for finding triangles containing edges, and points
+ gives the coordinates for vertex indices used in edges."""
+
+ tl = td.get(e)
+ if not tl:
+ return False
+ (a, b) = e
+ tr = td.get((b, a))
+ if not tr:
+ return False
+ c = _OtherVert(tl, a, b)
+ d = _OtherVert(tr, a, b)
+ if c is None or d is None:
+ return False
+ return InCircle(a, b, c, d, points)
+
+
+def _OtherVert(tri, a, b):
+ """tri should be a tuple of 3 vertex indices, two of which are a and b.
+ Return the third index, or None if all vertices are a or b"""
+
+ for v in tri:
+ if v != a and v != b:
+ return v
+ return None
+
+
+def _ClassifyAngles(face, n, points):
+ """Return vector of anglekinds of the Angle around each point in face."""
+
+ return [_AngleKind(face[(i - 1) % n], face[i], face[(i + 1) % n], points) \
+ for i in list(range(0, n))]
+
+
+def _AngleKind(a, b, c, points):
+ """Return one of the Ang... constants to classify Angle formed by ABC,
+ in a counterclockwise traversal of a face,
+ where a, b, c are indices into points."""
+
+ if Ccw(a, b, c, points):
+ return Angconvex
+ elif Ccw(a, c, b, points):
+ return Angreflex
+ else:
+ vb = points.pos[b]
+ udotv = Dot2(Sub2(vb, points.pos[a]), Sub2(points.pos[c], vb))
+ if udotv > 0.0:
+ return Angtangential
+ else:
+ return Ang0 # to fix: return Ang360 if "inside" spur
+
+
+def _Quandrangulate(tris, bord, points):
+ """Tris is list of triangles, forming a triangulation of region whose
+ border edges are in set bord.
+ Combine adjacent triangles to make quads, trying for "good" quads where
+ possible. Some triangles will probably remain uncombined"""
+
+ (er, td) = _ERGraph(tris, bord, points)
+ if len(er) == 0:
+ return tris
+ if len(er) > GTHRESH:
+ match = _GreedyMatch(er)
+ else:
+ match = _MaxMatch(er)
+ return _RemoveEdges(tris, match)
+
+
+def _RemoveEdges(tris, match):
+ """tris is list of triangles.
+ er is as returned from _MaxMatch or _GreedyMatch.
+
+ Return list of (A,D,B,C) resulting from deleting edge (A,B) causing a merge
+ of two triangles; append to that list the remaining unmatched triangles."""
+
+ ans = []
+ triset = set(tris)
+ while len(match) > 0:
+ (_, e, tl, tr) = match.pop()
+ (a, b) = e
+ if tl in triset:
+ triset.remove(tl)
+ if tr in triset:
+ triset.remove(tr)
+ c = _OtherVert(tl, a, b)
+ d = _OtherVert(tr, a, b)
+ if c is None or d is None:
+ continue
+ ans.append((a, d, b, c))
+ return ans + list(triset)
+
+
+def _ERGraph(tris, bord, points):
+ """Make an 'Edge Removal Graph'.
+
+ Given a list of triangles, the 'Edge Removal Graph' is a graph whose
+ nodes are the triangles (think of a point in the center of them),
+ and whose edges go between adjacent triangles (they share a non-border
+ edge), such that it would be possible to remove the shared edge
+ and form a convex quadrilateral. Forming a quadrilateralization
+ is then a matter of finding a matching (set of edges that don't
+ share a vertex - remember, these are the 'face' vertices).
+ For better quadrilaterlization, we'll make the Edge Removal Graph
+ edges have weights, with higher weights going to the edges that
+ are more desirable to remove. Then we want a maximum weight matching
+ in this graph.
+
+ We'll return the graph in a kind of implicit form, using edges of
+ the original triangles as a proxy for the edges between the faces
+ (i.e., the edge of the triangle is the shared edge). We'll arbitrarily
+ pick the triangle graph edge with lower-index start vertex.
+ Also, to aid in traversing the implicit graph, we'll keep the left
+ and right triangle triples with edge 'ER edge'.
+ Finally, since we calculate it anyway, we'll return a dictionary
+ mapping edges of the triangles to the triangle triples they're in.
+
+ Args:
+ tris: list of (int, int, int) giving a triple of vertex indices for
+ triangles, assumed CCW oriented
+ bord: set of (int, int) giving vertex indices for border edges
+ points: geom.Points - for mapping vertex indices to coords
+ Returns:
+ (list of (weight,e,tl,tr), dict)
+ where edge e=(a,b) is non-border edge
+ with left face tl and right face tr (each a triple (i,j,k)),
+ where removing the edge would form an "OK" quad (no concave angles),
+ with weight representing the desirability of removing the edge
+ The dict maps int pairs (a,b) to int triples (i,j,k), that is,
+ mapping edges to their containing triangles.
+ """
+
+ td = _TriDict(tris)
+ dd = _DegreeDict(tris)
+ ans = []
+ ctris = tris[:] # copy, so argument not affected
+ while len(ctris) > 0:
+ (i, j, k) = tl = ctris.pop()
+ for e in [(i, j), (j, k), (k, i)]:
+ if e in bord:
+ continue
+ (a, b) = e
+ # just consider one of (a,b) and (b,a), to avoid dups
+ if a > b:
+ continue
+ erev = (b, a)
+ tr = td.get(erev)
+ if not tr:
+ continue
+ c = _OtherVert(tl, a, b)
+ d = _OtherVert(tr, a, b)
+ if c is None or d is None:
+ continue
+ # calculate amax, the max of the new angles that would
+ # be formed at a and b if tl and tr were combined
+ amax = max(Angle(c, a, b, points) + Angle(d, a, b, points),
+ Angle(c, b, a, points) + Angle(d, b, a, points))
+ if amax > 180.0:
+ continue
+ weight = ANGFAC * (180.0 - amax) + DEGFAC * (dd[a] + dd[b])
+ ans.append((weight, e, tl, tr))
+ return (ans, td)
+
+
+def _GreedyMatch(er):
+ """er is list of (weight,e,tl,tr).
+
+ Find maximal set so that each triangle appears in at most
+ one member of set"""
+
+ # sort in order of decreasing weight
+ er.sort(key=lambda v: v[0], reverse=True)
+ match = set()
+ ans = []
+ while len(er) > 0:
+ (_, _, tl, tr) = q = er.pop()
+ if tl not in match and tr not in match:
+ match.add(tl)
+ match.add(tr)
+ ans.append(q)
+ return ans
+
+
+def _MaxMatch(er):
+ """Like _GreedyMatch, but use divide and conquer to find best possible set.
+
+ Args:
+ er: list of (weight,e,tl,tr) - see _ERGraph
+ Returns:
+ list that is a subset of er giving a maximum weight match
+ """
+
+ (ans, _) = _DCMatch(er)
+ return ans
+
+
+def _DCMatch(er):
+ """Recursive helper for _MaxMatch.
+
+ Divide and Conquer approach to finding max weight matching.
+ If we're lucky, there's an edge in er that separates the edge removal
+ graph into (at least) two separate components. Then the max weight
+ is either one that includes that edge or excludes it - and we can
+ use a recursive call to _DCMatch to handle each component separately
+ on what remains of the graph after including/excluding the separating edge.
+ If we're not lucky, we fall back on _EMatch (see below).
+
+ Args:
+ er: list of (weight, e, tl, tr) (see _ERGraph)
+ Returns:
+ (list of (weight, e, tl, tr), float) - the subset forming a maximum
+ matching, and the total weight of the match.
+ """
+
+ if not er:
+ return ([], 0.0)
+ if len(er) == 1:
+ return (er, er[0][0])
+ match = []
+ matchw = 0.0
+ for i in range(0, len(er)):
+ (nc, comp) = _FindComponents(er, i)
+ if nc == 1:
+ # er[i] doesn't separate er
+ continue
+ (wi, _, tl, tr) = er[i]
+ if comp[tl] != comp[tr]:
+ # case 1: er separates graph
+ # compare the matches that include er[i] versus
+ # those that exclude it
+ (a, b) = _PartitionComps(er, comp, i, comp[tl], comp[tr])
+ ax = _CopyExcluding(a, tl, tr)
+ bx = _CopyExcluding(b, tl, tr)
+ (axmatch, wax) = _DCMatch(ax)
+ (bxmatch, wbx) = _DCMatch(bx)
+ if len(ax) == len(a):
+ wa = wax
+ amatch = axmatch
+ else:
+ (amatch, wa) = _DCMatch(a)
+ if len(bx) == len(b):
+ wb = wbx
+ bmatch = bxmatch
+ else:
+ (bmatch, wb) = _DCMatch(b)
+ w = wa + wb
+ wx = wax + wbx + wi
+ if w > wx:
+ match = amatch + bmatch
+ matchw = w
+ else:
+ match = [er[i]] + axmatch + bxmatch
+ matchw = wx
+ else:
+ # case 2: er not needed to separate graph
+ (a, b) = _PartitionComps(er, comp, -1, 0, 0)
+ (amatch, wa) = _DCMatch(a)
+ (bmatch, wb) = _DCMatch(b)
+ match = amatch + bmatch
+ matchw = wa + wb
+ if match:
+ break
+ if not match:
+ return _EMatch(er)
+ return (match, matchw)
+
+
+def _EMatch(er):
+ """Exhaustive match helper for _MaxMatch.
+
+ This is the case when we were unable to find a single edge
+ separating the edge removal graph into two components.
+ So pick a single edge and try _DCMatch on the two cases of
+ including or excluding that edge. We may be lucky in these
+ subcases (say, if the graph is currently a simple cycle, so
+ only needs one more edge after the one we pick here to separate
+ it into components). Otherwise, we'll end up back in _EMatch
+ again, and the worse case will be exponential.
+
+ Pick a random edge rather than say, the first, to hopefully
+ avoid some pathological cases.
+
+ Args:
+ er: list of (weight, el, tl, tr) (see _ERGraph)
+ Returns:
+ (list of (weight, e, tl, tr), float) - the subset forming a maximum
+ matching, and the total weight of the match.
+ """
+
+ if not er:
+ return ([], 0.0)
+ if len(er) == 1:
+ return (er, er[1][1])
+ i = random.randint(0, len(er) - 1)
+ eri = (wi, _, tl, tr) = er[i]
+ # case a: include eri. exlude other edges that touch tl or tr
+ a = _CopyExcluding(er, tl, tr)
+ a.append(eri)
+ (amatch, wa) = _DCMatch(a)
+ wa += wi
+ if len(a) == len(er) - 1:
+ # if a excludes only eri, then er didn't touch anything else
+ # in the graph, and the best match will always include er
+ # and we can skip the call for case b
+ wb = -1.0
+ bmatch = []
+ else:
+ b = er[:i] + er[i + 1:]
+ (bmatch, wb) = _DCMatch(b)
+ if wa > wb:
+ match = amatch
+ match.append(eri)
+ matchw = wa
+ else:
+ match = bmatch
+ matchw = wb
+ return (match, matchw)
+
+
+def _FindComponents(er, excepti):
+ """Find connected components induced by edges, excluding one edge.
+
+ Args:
+ er: list of (weight, el, tl, tr) (see _ERGraph)
+ excepti: index in er of edge to be excluded
+ Returns:
+ (int, dict): int is number of connected components found,
+ dict maps triangle triple ->
+ connected component index (starting at 1)
+ """
+
+ ncomps = 0
+ comp = dict()
+ for i in range(0, len(er)):
+ (_, _, tl, tr) = er[i]
+ for t in [tl, tr]:
+ if t not in comp:
+ ncomps += 1
+ _FCVisit(er, excepti, comp, t, ncomps)
+ return (ncomps, comp)
+
+
+def _FCVisit(er, excepti, comp, t, compnum):
+ """Helper for _FindComponents depth-first-search."""
+
+ comp[t] = compnum
+ for i in range(0, len(er)):
+ if i == excepti:
+ continue
+ (_, _, tl, tr) = er[i]
+ if tl == t or tr == t:
+ s = tl
+ if s == t:
+ s = tr
+ if s not in comp:
+ _FCVisit(er, excepti, comp, s, compnum)
+
+
+def _PartitionComps(er, comp, excepti, compa, compb):
+ """Partition the edges of er by component number, into two lists.
+
+ Generally, put odd components into first list and even into second,
+ except that component compa goes in the first and compb goes in the second,
+ and we ignore edge er[excepti].
+
+ Args:
+ er: list of (weight, el, tl, tr) (see _ERGraph)
+ comp: dict - mapping triangle triple -> connected component index
+ excepti: int - index in er to ignore (unless excepti==-1)
+ compa: int - component to go in first list of answer (unless 0)
+ compb: int - component to go in second list of answer (unless 0)
+ Returns:
+ (list, list) - a partition of er according to above rules
+ """
+
+ parta = []
+ partb = []
+ for i in range(0, len(er)):
+
+ if i == excepti:
+ continue
+ tl = er[i][2]
+ c = comp[tl]
+ if c == compa or (c != compb and (c & 1) == 1):
+ parta.append(er[i])
+ else:
+ partb.append(er[i])
+ return (parta, partb)
+
+
+def _CopyExcluding(er, s, t):
+ """Return a copy of er, excluding all those involving triangles s and t.
+
+ Args:
+ er: list of (weight, e, tl, tr) - see _ERGraph
+ s: 3-tuple of int - a triangle
+ t: 3-tuple of int - a triangle
+ Returns:
+ Copy of er excluding those with tl or tr == s or t
+ """
+
+ ans = []
+ for e in er:
+ (_, _, tl, tr) = e
+ if tl == s or tr == s or tl == t or tr == t:
+ continue
+ ans.append(e)
+ return ans
+
+
+def _DegreeDict(tris):
+ """Return a dictionary mapping vertices in tris to the number of triangles
+ that they are touch."""
+
+ ans = dict()
+ for t in tris:
+ for v in t:
+ if v in ans:
+ ans[v] = ans[v] + 1
+ else:
+ ans[v] = 1
+ return ans
+
+
+def PolygonPlane(face, points):
+ """Return a Normal vector for the face with 3d coords given by indexing
+ into points."""
+
+ if len(face) < 3:
+ return (0.0, 0.0, 1.0) # arbitrary, we really have no idea
+ else:
+ coords = [points.pos[i] for i in face]
+ return Normal(coords)
+
+
+# This Normal appears to be on the CCW-traversing side of a polygon
+def Normal(coords):
+ """Return an average Normal vector for the point list, 3d coords."""
+
+ if len(coords) < 3:
+ return (0.0, 0.0, 1.0) # arbitrary
+
+ (ax, ay, az) = coords[0]
+ (bx, by, bz) = coords[1]
+ (cx, cy, cz) = coords[2]
+
+ if len(coords) == 3:
+ sx = (ay - by) * (az + bz) + \
+ (by - cy) * (bz + cz) + \
+ (cy - ay) * (cz + az)
+ sy = (az - bz) * (ax + bx) + \
+ (bz - cz) * (bx + cx) + \
+ (cz - az) * (cx + ax)
+ sz = (ax - bx) * (by + by) + \
+ (bx - cx) * (by + cy) + \
+ (cx - ax) * (cy + ay)
+ return Norm3(sx, sy, sz)
+ else:
+ sx = (ay - by) * (az + bz) + (by - cy) * (bz + cz)
+ sy = (az - bz) * (ax + bx) + (bz - cz) * (bx + cx)
+ sz = (ax - bx) * (ay + by) + (bx - cx) * (by + cy)
+ return _NormalAux(coords[3:], coords[0], sx, sy, sz)
+
+
+def _NormalAux(rest, first, sx, sy, sz):
+ (ax, ay, az) = rest[0]
+ if len(rest) == 1:
+ (bx, by, bz) = first
+ else:
+ (bx, by, bz) = rest[1]
+ nx = sx + (ay - by) * (az + bz)
+ ny = sy + (az - bz) * (ax + bx)
+ nz = sz + (ax - bx) * (ay + by)
+ if len(rest) == 1:
+ return Norm3(nx, ny, nz)
+ else:
+ return _NormalAux(rest[1:], first, nx, ny, nz)
+
+
+def Norm3(x, y, z):
+ """Return vector (x,y,z) normalized by dividing by squared length.
+ Return (0.0, 0.0, 1.0) if the result is undefined."""
+ sqrlen = x * x + y * y + z * z
+ if sqrlen < 1e-100:
+ return (0.0, 0.0, 1.0)
+ else:
+ try:
+ d = sqrt(sqrlen)
+ return (x / d, y / d, z / d)
+ except:
+ return (0.0, 0.0, 1.0)
+
+
+# We're using right-hand coord system, where
+# forefinger=x, middle=y, thumb=z on right hand.
+# Then, e.g., (1,0,0) x (0,1,0) = (0,0,1)
+def Cross3(a, b):
+ """Return the cross product of two vectors, a x b."""
+
+ (ax, ay, az) = a
+ (bx, by, bz) = b
+ return (ay * bz - az * by, az * bx - ax * bz, ax * by - ay * bx)
+
+
+def Dot2(a, b):
+ """Return the dot product of two 2d vectors, a . b."""
+
+ return a[0] * b[0] + a[1] * b[1]
+
+
+def Perp2(a, b):
+ """Return a sort of 2d cross product."""
+
+ return a[0] * b[1] - a[1] * b[0]
+
+
+def Sub2(a, b):
+ """Return difference of 2d vectors, a-b."""
+
+ return (a[0] - b[0], a[1] - b[1])
+
+
+def Add2(a, b):
+ """Return the sum of 2d vectors, a+b."""
+
+ return (a[0] + b[0], a[1] + b[1])
+
+
+def Length2(v):
+ """Return length of vector v=(x,y)."""
+
+ return sqrt(v[0] * v[0] + v[1] * v[1])
+
+
+def LinInterp2(a, b, alpha):
+ """Return the point alpha of the way from a to b."""
+
+ beta = 1 - alpha
+ return (beta * a[0] + alpha * b[0], beta * a[1] + alpha * b[1])
+
+
+def Normalized2(p):
+ """Return vector p normlized by dividing by its squared length.
+ Return (0.0, 1.0) if the result is undefined."""
+
+ (x, y) = p
+ sqrlen = x * x + y * y
+ if sqrlen < 1e-100:
+ return (0.0, 1.0)
+ else:
+ try:
+ d = sqrt(sqrlen)
+ return (x / d, y / d)
+ except:
+ return (0.0, 1.0)
+
+
+def Angle(a, b, c, points):
+ """Return Angle abc in degrees, in range [0,180),
+ where a,b,c are indices into points."""
+
+ u = Sub2(points.pos[c], points.pos[b])
+ v = Sub2(points.pos[a], points.pos[b])
+ n1 = Length2(u)
+ n2 = Length2(v)
+ if n1 == 0.0 or n2 == 0.0:
+ return 0.0
+ else:
+ costheta = Dot2(u, v) / (n1 * n2)
+ if costheta > 1.0:
+ costheta = 1.0
+ if costheta < - 1.0:
+ costheta = - 1.0
+ return math.acos(costheta) * 180.0 / math.pi
+
+
+def SegsIntersect(ixa, ixb, ixc, ixd, points):
+ """Return true if segment AB intersects CD,
+ false if they just touch. ixa, ixb, ixc, ixd are indices
+ into points."""
+
+ a = points.pos[ixa]
+ b = points.pos[ixb]
+ c = points.pos[ixc]
+ d = points.pos[ixd]
+ u = Sub2(b, a)
+ v = Sub2(d, c)
+ w = Sub2(a, c)
+ pp = Perp2(u, v)
+ if abs(pp) > TOL:
+ si = Perp2(v, w) / pp
+ ti = Perp2(u, w) / pp
+ return 0.0 < si < 1.0 and 0.0 < ti < 1.0
+ else:
+ # parallel or overlapping
+ if Dot2(u, u) == 0.0 or Dot2(v, v) == 0.0:
+ return False
+ else:
+ pp2 = Perp2(w, v)
+ if abs(pp2) > TOL:
+ return False # parallel, not collinear
+ z = Sub2(b, c)
+ (vx, vy) = v
+ (wx, wy) = w
+ (zx, zy) = z
+ if vx == 0.0:
+ (t0, t1) = (wy / vy, zy / vy)
+ else:
+ (t0, t1) = (wx / vx, zx / vx)
+ return 0.0 < t0 < 1.0 and 0.0 < t1 < 1.0
+
+
+def Ccw(a, b, c, points):
+ """Return true if ABC is a counterclockwise-oriented triangle,
+ where a, b, and c are indices into points.
+ Returns false if not, or if colinear within TOL."""
+
+ (ax, ay) = (points.pos[a][0], points.pos[a][1])
+ (bx, by) = (points.pos[b][0], points.pos[b][1])
+ (cx, cy) = (points.pos[c][0], points.pos[c][1])
+ d = ax * by - bx * ay - ax * cy + cx * ay + bx * cy - cx * by
+ return d > TOL
+
+
+def InCircle(a, b, c, d, points):
+ """Return true if circle through points with indices a, b, c
+ contains point with index d (indices into points).
+ Except: if ABC forms a counterclockwise oriented triangle
+ then the test is reversed: return true if d is outside the circle.
+ Will get false, no matter what orientation, if d is cocircular, with TOL^2.
+ | xa ya xa^2+ya^2 1 |
+ | xb yb xb^2+yb^2 1 | > 0
+ | xc yc xc^2+yc^2 1 |
+ | xd yd xd^2+yd^2 1 |
+ """
+
+ (xa, ya, za) = _Icc(points.pos[a])
+ (xb, yb, zb) = _Icc(points.pos[b])
+ (xc, yc, zc) = _Icc(points.pos[c])
+ (xd, yd, zd) = _Icc(points.pos[d])
+ det = xa * (yb * zc - yc * zb - yb * zd + yd * zb + yc * zd - yd * zc) \
+ - xb * (ya * zc - yc * za - ya * zd + yd * za + yc * zd - yd * zc) \
+ + xc * (ya * zb - yb * za - ya * zd + yd * za + yb * zd - yd * zb) \
+ - xd * (ya * zb - yb * za - ya * zc + yc * za + yb * zc - yc * zb)
+ return det > TOL * TOL
+
+
+def _Icc(p):
+ (x, y) = (p[0], p[1])
+ return (x, y, x * x + y * y)
diff --git a/mesh_looptools.py b/mesh_looptools.py
new file mode 100644
index 00000000..2a403dd3
--- /dev/null
+++ b/mesh_looptools.py
@@ -0,0 +1,3710 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ 'name': "LoopTools",
+ 'author': "Bart Crouch",
+ 'version': (3, 2, 0),
+ 'blender': (2, 5, 7),
+ 'api': 35979,
+ 'location': "View3D > Toolbar and View3D > Specials (W-key)",
+ 'warning': "",
+ 'description': "Mesh modelling toolkit. Several tools to aid modelling",
+ 'wiki_url': "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Modeling/LoopTools",
+ 'tracker_url': "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=26189",
+ 'category': 'Mesh'}
+
+
+import bpy
+import mathutils
+import math
+
+
+##########################################
+####### General functions ################
+##########################################
+
+
+# used by all tools to improve speed on reruns
+looptools_cache = {}
+
+
+# force a full recalculation next time
+def cache_delete(tool):
+ if tool in looptools_cache:
+ del looptools_cache[tool]
+
+
+# check cache for stored information
+def cache_read(tool, object, mesh, input_method, boundaries):
+ # current tool not cached yet
+ if tool not in looptools_cache:
+ return(False, False, False, False, False)
+ # check if selected object didn't change
+ if object.name != looptools_cache[tool]["object"]:
+ return(False, False, False, False, False)
+ # check if input didn't change
+ if input_method != looptools_cache[tool]["input_method"]:
+ return(False, False, False, False, False)
+ if boundaries != looptools_cache[tool]["boundaries"]:
+ return(False, False, False, False, False)
+ modifiers = [mod.name for mod in object.modifiers if mod.show_viewport \
+ and mod.type == 'MIRROR']
+ if modifiers != looptools_cache[tool]["modifiers"]:
+ return(False, False, False, False, False)
+ input = [v.index for v in mesh.vertices if v.select and not v.hide]
+ if input != looptools_cache[tool]["input"]:
+ return(False, False, False, False, False)
+ # reading values
+ single_loops = looptools_cache[tool]["single_loops"]
+ loops = looptools_cache[tool]["loops"]
+ derived = looptools_cache[tool]["derived"]
+ mapping = looptools_cache[tool]["mapping"]
+
+ return(True, single_loops, loops, derived, mapping)
+
+
+# store information in the cache
+def cache_write(tool, object, mesh, input_method, boundaries, single_loops,
+loops, derived, mapping):
+ # clear cache of current tool
+ if tool in looptools_cache:
+ del looptools_cache[tool]
+ # prepare values to be saved to cache
+ input = [v.index for v in mesh.vertices if v.select and not v.hide]
+ modifiers = [mod.name for mod in object.modifiers if mod.show_viewport \
+ and mod.type == 'MIRROR']
+ # update cache
+ looptools_cache[tool] = {"input": input, "object": object.name,
+ "input_method": input_method, "boundaries": boundaries,
+ "single_loops": single_loops, "loops": loops,
+ "derived": derived, "mapping": mapping, "modifiers": modifiers}
+
+
+# calculates natural cubic splines through all given knots
+def calculate_cubic_splines(mesh_mod, tknots, knots):
+ # hack for circular loops
+ if knots[0] == knots[-1] and len(knots) > 1:
+ circular = True
+ k_new1 = []
+ for k in range(-1, -5, -1):
+ if k - 1 < -len(knots):
+ k += len(knots)
+ k_new1.append(knots[k-1])
+ k_new2 = []
+ for k in range(4):
+ if k + 1 > len(knots) - 1:
+ k -= len(knots)
+ k_new2.append(knots[k+1])
+ for k in k_new1:
+ knots.insert(0, k)
+ for k in k_new2:
+ knots.append(k)
+ t_new1 = []
+ total1 = 0
+ for t in range(-1, -5, -1):
+ if t - 1 < -len(tknots):
+ t += len(tknots)
+ total1 += tknots[t] - tknots[t-1]
+ t_new1.append(tknots[0] - total1)
+ t_new2 = []
+ total2 = 0
+ for t in range(4):
+ if t + 1 > len(tknots) - 1:
+ t -= len(tknots)
+ total2 += tknots[t+1] - tknots[t]
+ t_new2.append(tknots[-1] + total2)
+ for t in t_new1:
+ tknots.insert(0, t)
+ for t in t_new2:
+ tknots.append(t)
+ else:
+ circular = False
+ # end of hack
+
+ n = len(knots)
+ if n < 2:
+ return False
+ x = tknots[:]
+ locs = [mesh_mod.vertices[k].co[:] for k in knots]
+ result = []
+ for j in range(3):
+ a = []
+ for i in locs:
+ a.append(i[j])
+ h = []
+ for i in range(n-1):
+ if x[i+1] - x[i] == 0:
+ h.append(1e-8)
+ else:
+ h.append(x[i+1] - x[i])
+ q = [False]
+ for i in range(1, n-1):
+ q.append(3/h[i]*(a[i+1]-a[i]) - 3/h[i-1]*(a[i]-a[i-1]))
+ l = [1.0]
+ u = [0.0]
+ z = [0.0]
+ for i in range(1, n-1):
+ l.append(2*(x[i+1]-x[i-1]) - h[i-1]*u[i-1])
+ if l[i] == 0:
+ l[i] = 1e-8
+ u.append(h[i] / l[i])
+ z.append((q[i] - h[i-1] * z[i-1]) / l[i])
+ l.append(1.0)
+ z.append(0.0)
+ b = [False for i in range(n-1)]
+ c = [False for i in range(n)]
+ d = [False for i in range(n-1)]
+ c[n-1] = 0.0
+ for i in range(n-2, -1, -1):
+ c[i] = z[i] - u[i]*c[i+1]
+ b[i] = (a[i+1]-a[i])/h[i] - h[i]*(c[i+1]+2*c[i])/3
+ d[i] = (c[i+1]-c[i]) / (3*h[i])
+ for i in range(n-1):
+ result.append([a[i], b[i], c[i], d[i], x[i]])
+ splines = []
+ for i in range(len(knots)-1):
+ splines.append([result[i], result[i+n-1], result[i+(n-1)*2]])
+ if circular: # cleaning up after hack
+ knots = knots[4:-4]
+ tknots = tknots[4:-4]
+
+ return(splines)
+
+
+# calculates linear splines through all given knots
+def calculate_linear_splines(mesh_mod, tknots, knots):
+ splines = []
+ for i in range(len(knots)-1):
+ a = mesh_mod.vertices[knots[i]].co
+ b = mesh_mod.vertices[knots[i+1]].co
+ d = b-a
+ t = tknots[i]
+ u = tknots[i+1]-t
+ splines.append([a, d, t, u]) # [locStart, locDif, tStart, tDif]
+
+ return(splines)
+
+
+# calculate a best-fit plane to the given vertices
+def calculate_plane(mesh_mod, loop, method="best_fit", object=False):
+ # getting the vertex locations
+ locs = [mathutils.Vector(mesh_mod.vertices[v].co[:]) for v in loop[0]]
+
+ # calculating the center of masss
+ com = mathutils.Vector()
+ for loc in locs:
+ com += loc
+ com /= len(locs)
+ x, y, z = com
+
+ if method == 'best_fit':
+ # creating the covariance matrix
+ mat = mathutils.Matrix([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]])
+ for loc in locs:
+ mat[0][0] += (loc[0]-x)**2
+ mat[0][1] += (loc[0]-x)*(loc[1]-y)
+ mat[0][2] += (loc[0]-x)*(loc[2]-z)
+ mat[1][0] += (loc[1]-y)*(loc[0]-x)
+ mat[1][1] += (loc[1]-y)**2
+ mat[1][2] += (loc[1]-y)*(loc[2]-z)
+ mat[2][0] += (loc[2]-z)*(loc[0]-x)
+ mat[2][1] += (loc[2]-z)*(loc[1]-y)
+ mat[2][2] += (loc[2]-z)**2
+
+ # calculating the normal to the plane
+ normal = False
+ try:
+ mat.invert()
+ except:
+ if sum(mat[0]) == 0.0:
+ normal = mathutils.Vector([1.0, 0.0, 0.0])
+ elif sum(mat[1]) == 0.0:
+ normal = mathutils.Vector([0.0, 1.0, 0.0])
+ elif sum(mat[2]) == 0.0:
+ normal = mathutils.Vector([0.0, 0.0, 1.0])
+ if not normal:
+ itermax = 500
+ iter = 0
+ vec = mathutils.Vector([1.0, 1.0, 1.0])
+ vec2 = (vec*mat)/(vec*mat).length
+ while vec != vec2 and iter<itermax:
+ iter += 1
+ vec = vec2
+ vec2 = (vec*mat)/(vec*mat).length
+ normal = vec2
+
+ elif method == 'normal':
+ # averaging the vertex normals
+ v_normals = [mesh_mod.vertices[v].normal for v in loop[0]]
+ normal = mathutils.Vector()
+ for v_normal in v_normals:
+ normal += v_normal
+ normal /= len(v_normals)
+ normal.normalize()
+
+ elif method == 'view':
+ # calculate view normal
+ rotation = bpy.context.space_data.region_3d.view_matrix.to_3x3().\
+ inverted()
+ normal = mathutils.Vector([0.0, 0.0, 1.0]) * rotation
+ if object:
+ normal *= object.matrix_world.inverted().to_euler().to_matrix()
+
+ return(com, normal)
+
+
+# calculate splines based on given interpolation method (controller function)
+def calculate_splines(interpolation, mesh_mod, tknots, knots):
+ if interpolation == 'cubic':
+ splines = calculate_cubic_splines(mesh_mod, tknots, knots[:])
+ else: # interpolations == 'linear'
+ splines = calculate_linear_splines(mesh_mod, tknots, knots[:])
+
+ return(splines)
+
+
+# check loops and only return valid ones
+def check_loops(loops, mapping, mesh_mod):
+ valid_loops = []
+ for loop, circular in loops:
+ # loop needs to have at least 3 vertices
+ if len(loop) < 3:
+ continue
+ # loop needs at least 1 vertex in the original, non-mirrored mesh
+ if mapping:
+ all_virtual = True
+ for vert in loop:
+ if mapping[vert] > -1:
+ all_virtual = False
+ break
+ if all_virtual:
+ continue
+ # vertices can not all be at the same location
+ stacked = True
+ for i in range(len(loop) - 1):
+ if (mesh_mod.vertices[loop[i]].co - \
+ mesh_mod.vertices[loop[i+1]].co).length > 1e-6:
+ stacked = False
+ break
+ if stacked:
+ continue
+ # passed all tests, loop is valid
+ valid_loops.append([loop, circular])
+
+ return(valid_loops)
+
+
+# input: mesh, output: dict with the edge-key as key and face-index as value
+def dict_edge_faces(mesh):
+ edge_faces = dict([[edge.key, []] for edge in mesh.edges if not edge.hide])
+ for face in mesh.faces:
+ if face.hide:
+ continue
+ for key in face.edge_keys:
+ edge_faces[key].append(face.index)
+
+ return(edge_faces)
+
+# input: mesh (edge-faces optional), output: dict with face-face connections
+def dict_face_faces(mesh, edge_faces=False):
+ if not edge_faces:
+ edge_faces = dict_edge_faces(mesh)
+
+ connected_faces = dict([[face.index, []] for face in mesh.faces if \
+ not face.hide])
+ for face in mesh.faces:
+ if face.hide:
+ continue
+ for edge_key in face.edge_keys:
+ for connected_face in edge_faces[edge_key]:
+ if connected_face == face.index:
+ continue
+ connected_faces[face.index].append(connected_face)
+
+ return(connected_faces)
+
+
+# input: mesh, output: dict with the vert index as key and edge-keys as value
+def dict_vert_edges(mesh):
+ vert_edges = dict([[v.index, []] for v in mesh.vertices if not v.hide])
+ for edge in mesh.edges:
+ if edge.hide:
+ continue
+ for vert in edge.key:
+ vert_edges[vert].append(edge.key)
+
+ return(vert_edges)
+
+
+# input: mesh, output: dict with the vert index as key and face index as value
+def dict_vert_faces(mesh):
+ vert_faces = dict([[v.index, []] for v in mesh.vertices if not v.hide])
+ for face in mesh.faces:
+ if not face.hide:
+ for vert in face.vertices:
+ vert_faces[vert].append(face.index)
+
+ return(vert_faces)
+
+
+# input: list of edge-keys, output: dictionary with vertex-vertex connections
+def dict_vert_verts(edge_keys):
+ # create connection data
+ vert_verts = {}
+ for ek in edge_keys:
+ for i in range(2):
+ if ek[i] in vert_verts:
+ vert_verts[ek[i]].append(ek[1-i])
+ else:
+ vert_verts[ek[i]] = [ek[1-i]]
+
+ return(vert_verts)
+
+
+# calculate input loops
+def get_connected_input(object, mesh, scene, input):
+ # get mesh with modifiers applied
+ derived, mesh_mod = get_derived_mesh(object, mesh, scene)
+
+ # calculate selected loops
+ edge_keys = [edge.key for edge in mesh_mod.edges if \
+ edge.select and not edge.hide]
+ loops = get_connected_selections(edge_keys)
+
+ # if only selected loops are needed, we're done
+ if input == 'selected':
+ return(derived, mesh_mod, loops)
+ # elif input == 'all':
+ loops = get_parallel_loops(mesh_mod, loops)
+
+ return(derived, mesh_mod, loops)
+
+
+# sorts all edge-keys into a list of loops
+def get_connected_selections(edge_keys):
+ # create connection data
+ vert_verts = dict_vert_verts(edge_keys)
+
+ # find loops consisting of connected selected edges
+ loops = []
+ while len(vert_verts) > 0:
+ loop = [iter(vert_verts.keys()).__next__()]
+ growing = True
+ flipped = False
+
+ # extend loop
+ while growing:
+ # no more connection data for current vertex
+ if loop[-1] not in vert_verts:
+ if not flipped:
+ loop.reverse()
+ flipped = True
+ else:
+ growing = False
+ else:
+ extended = False
+ for i, next_vert in enumerate(vert_verts[loop[-1]]):
+ if next_vert not in loop:
+ vert_verts[loop[-1]].pop(i)
+ if len(vert_verts[loop[-1]]) == 0:
+ del vert_verts[loop[-1]]
+ # remove connection both ways
+ if next_vert in vert_verts:
+ if len(vert_verts[next_vert]) == 1:
+ del vert_verts[next_vert]
+ else:
+ vert_verts[next_vert].remove(loop[-1])
+ loop.append(next_vert)
+ extended = True
+ break
+ if not extended:
+ # found one end of the loop, continue with next
+ if not flipped:
+ loop.reverse()
+ flipped = True
+ # found both ends of the loop, stop growing
+ else:
+ growing = False
+
+ # check if loop is circular
+ if loop[0] in vert_verts:
+ if loop[-1] in vert_verts[loop[0]]:
+ # is circular
+ if len(vert_verts[loop[0]]) == 1:
+ del vert_verts[loop[0]]
+ else:
+ vert_verts[loop[0]].remove(loop[-1])
+ if len(vert_verts[loop[-1]]) == 1:
+ del vert_verts[loop[-1]]
+ else:
+ vert_verts[loop[-1]].remove(loop[0])
+ loop = [loop, True]
+ else:
+ # not circular
+ loop = [loop, False]
+ else:
+ # not circular
+ loop = [loop, False]
+
+ loops.append(loop)
+
+ return(loops)
+
+
+# get the derived mesh data, if there is a mirror modifier
+def get_derived_mesh(object, mesh, scene):
+ # check for mirror modifiers
+ if 'MIRROR' in [mod.type for mod in object.modifiers if mod.show_viewport]:
+ derived = True
+ # disable other modifiers
+ show_viewport = [mod.name for mod in object.modifiers if \
+ mod.show_viewport]
+ for mod in object.modifiers:
+ if mod.type != 'MIRROR':
+ mod.show_viewport = False
+ # get derived mesh
+ mesh_mod = object.to_mesh(scene, True, 'PREVIEW')
+ # re-enable other modifiers
+ for mod_name in show_viewport:
+ object.modifiers[mod_name].show_viewport = True
+ # no mirror modifiers, so no derived mesh necessary
+ else:
+ derived = False
+ mesh_mod = mesh
+
+ return(derived, mesh_mod)
+
+
+# return a mapping of derived indices to indices
+def get_mapping(derived, mesh, mesh_mod, single_vertices, full_search, loops):
+ if not derived:
+ return(False)
+
+ if full_search:
+ verts = [v for v in mesh.vertices if not v.hide]
+ else:
+ verts = [v for v in mesh.vertices if v.select and not v.hide]
+
+ # non-selected vertices around single vertices also need to be mapped
+ if single_vertices:
+ mapping = dict([[vert, -1] for vert in single_vertices])
+ verts_mod = [mesh_mod.vertices[vert] for vert in single_vertices]
+ for v in verts:
+ for v_mod in verts_mod:
+ if (v.co - v_mod.co).length < 1e-6:
+ mapping[v_mod.index] = v.index
+ break
+ real_singles = [v_real for v_real in mapping.values() if v_real>-1]
+
+ verts_indices = [vert.index for vert in verts]
+ for face in [face for face in mesh.faces if not face.select \
+ and not face.hide]:
+ for vert in face.vertices:
+ if vert in real_singles:
+ for v in face.vertices:
+ if not v in verts_indices:
+ if mesh.vertices[v] not in verts:
+ verts.append(mesh.vertices[v])
+ break
+
+ # create mapping of derived indices to indices
+ mapping = dict([[vert, -1] for loop in loops for vert in loop[0]])
+ if single_vertices:
+ for single in single_vertices:
+ mapping[single] = -1
+ verts_mod = [mesh_mod.vertices[i] for i in mapping.keys()]
+ for v in verts:
+ for v_mod in verts_mod:
+ if (v.co - v_mod.co).length < 1e-6:
+ mapping[v_mod.index] = v.index
+ verts_mod.remove(v_mod)
+ break
+
+ return(mapping)
+
+
+# returns a list of all loops parallel to the input, input included
+def get_parallel_loops(mesh_mod, loops):
+ # get required dictionaries
+ edge_faces = dict_edge_faces(mesh_mod)
+ connected_faces = dict_face_faces(mesh_mod, edge_faces)
+ # turn vertex loops into edge loops
+ edgeloops = []
+ for loop in loops:
+ edgeloop = [[sorted([loop[0][i], loop[0][i+1]]) for i in \
+ range(len(loop[0])-1)], loop[1]]
+ if loop[1]: # circular
+ edgeloop[0].append(sorted([loop[0][-1], loop[0][0]]))
+ edgeloops.append(edgeloop[:])
+ # variables to keep track while iterating
+ all_edgeloops = []
+ has_branches = False
+
+ for loop in edgeloops:
+ # initialise with original loop
+ all_edgeloops.append(loop[0])
+ newloops = [loop[0]]
+ verts_used = []
+ for edge in loop[0]:
+ if edge[0] not in verts_used:
+ verts_used.append(edge[0])
+ if edge[1] not in verts_used:
+ verts_used.append(edge[1])
+
+ # find parallel loops
+ while len(newloops) > 0:
+ side_a = []
+ side_b = []
+ for i in newloops[-1]:
+ i = tuple(i)
+ forbidden_side = False
+ if not i in edge_faces:
+ # weird input with branches
+ has_branches = True
+ break
+ for face in edge_faces[i]:
+ if len(side_a) == 0 and forbidden_side != "a":
+ side_a.append(face)
+ if forbidden_side:
+ break
+ forbidden_side = "a"
+ continue
+ elif side_a[-1] in connected_faces[face] and \
+ forbidden_side != "a":
+ side_a.append(face)
+ if forbidden_side:
+ break
+ forbidden_side = "a"
+ continue
+ if len(side_b) == 0 and forbidden_side != "b":
+ side_b.append(face)
+ if forbidden_side:
+ break
+ forbidden_side = "b"
+ continue
+ elif side_b[-1] in connected_faces[face] and \
+ forbidden_side != "b":
+ side_b.append(face)
+ if forbidden_side:
+ break
+ forbidden_side = "b"
+ continue
+
+ if has_branches:
+ # weird input with branches
+ break
+
+ newloops.pop(-1)
+ sides = []
+ if side_a:
+ sides.append(side_a)
+ if side_b:
+ sides.append(side_b)
+
+ for side in sides:
+ extraloop = []
+ for fi in side:
+ for key in mesh_mod.faces[fi].edge_keys:
+ if key[0] not in verts_used and key[1] not in \
+ verts_used:
+ extraloop.append(key)
+ break
+ if extraloop:
+ for key in extraloop:
+ for new_vert in key:
+ if new_vert not in verts_used:
+ verts_used.append(new_vert)
+ newloops.append(extraloop)
+ all_edgeloops.append(extraloop)
+
+ # input contains branches, only return selected loop
+ if has_branches:
+ return(loops)
+
+ # change edgeloops into normal loops
+ loops = []
+ for edgeloop in all_edgeloops:
+ loop = []
+ # grow loop by comparing vertices between consecutive edge-keys
+ for i in range(len(edgeloop)-1):
+ for vert in range(2):
+ if edgeloop[i][vert] in edgeloop[i+1]:
+ loop.append(edgeloop[i][vert])
+ break
+ if loop:
+ # add starting vertex
+ for vert in range(2):
+ if edgeloop[0][vert] != loop[0]:
+ loop = [edgeloop[0][vert]] + loop
+ break
+ # add ending vertex
+ for vert in range(2):
+ if edgeloop[-1][vert] != loop[-1]:
+ loop.append(edgeloop[-1][vert])
+ break
+ # check if loop is circular
+ if loop[0] == loop[-1]:
+ circular = True
+ loop = loop[:-1]
+ else:
+ circular = False
+ loops.append([loop, circular])
+
+ return(loops)
+
+
+# gather initial data
+def initialise():
+ global_undo = bpy.context.user_preferences.edit.use_global_undo
+ bpy.context.user_preferences.edit.use_global_undo = False
+ bpy.ops.object.mode_set(mode='OBJECT')
+ object = bpy.context.active_object
+ mesh = bpy.context.active_object.data
+
+ return(global_undo, object, mesh)
+
+
+# move the vertices to their new locations
+def move_verts(mesh, mapping, move, influence):
+ for loop in move:
+ for index, loc in loop:
+ if mapping:
+ if mapping[index] == -1:
+ continue
+ else:
+ index = mapping[index]
+ if influence >= 0:
+ mesh.vertices[index].co = loc*(influence/100) + \
+ mesh.vertices[index].co*((100-influence)/100)
+ else:
+ mesh.vertices[index].co = loc
+
+
+# load custom tool settings
+def settings_load(self):
+ lt = bpy.context.window_manager.looptools
+ tool = self.name.split()[0].lower()
+ keys = self.as_keywords().keys()
+ for key in keys:
+ setattr(self, key, getattr(lt, tool + "_" + key))
+
+
+# store custom tool settings
+def settings_write(self):
+ lt = bpy.context.window_manager.looptools
+ tool = self.name.split()[0].lower()
+ keys = self.as_keywords().keys()
+ for key in keys:
+ setattr(lt, tool + "_" + key, getattr(self, key))
+
+
+# clean up and set settings back to original state
+def terminate(global_undo):
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.context.user_preferences.edit.use_global_undo = global_undo
+
+
+##########################################
+####### Bridge functions #################
+##########################################
+
+# calculate a cubic spline through the middle section of 4 given coordinates
+def bridge_calculate_cubic_spline(mesh, coordinates):
+ result = []
+ x = [0, 1, 2, 3]
+
+ for j in range(3):
+ a = []
+ for i in coordinates:
+ a.append(float(i[j]))
+ h = []
+ for i in range(3):
+ h.append(x[i+1]-x[i])
+ q = [False]
+ for i in range(1,3):
+ q.append(3.0/h[i]*(a[i+1]-a[i])-3.0/h[i-1]*(a[i]-a[i-1]))
+ l = [1.0]
+ u = [0.0]
+ z = [0.0]
+ for i in range(1,3):
+ l.append(2.0*(x[i+1]-x[i-1])-h[i-1]*u[i-1])
+ u.append(h[i]/l[i])
+ z.append((q[i]-h[i-1]*z[i-1])/l[i])
+ l.append(1.0)
+ z.append(0.0)
+ b = [False for i in range(3)]
+ c = [False for i in range(4)]
+ d = [False for i in range(3)]
+ c[3] = 0.0
+ for i in range(2,-1,-1):
+ c[i] = z[i]-u[i]*c[i+1]
+ b[i] = (a[i+1]-a[i])/h[i]-h[i]*(c[i+1]+2.0*c[i])/3.0
+ d[i] = (c[i+1]-c[i])/(3.0*h[i])
+ for i in range(3):
+ result.append([a[i], b[i], c[i], d[i], x[i]])
+ spline = [result[1], result[4], result[7]]
+
+ return(spline)
+
+
+# return a list with new vertex location vectors, a list with face vertex
+# integers, and the highest vertex integer in the virtual mesh
+def bridge_calculate_geometry(mesh, lines, vertex_normals, segments,
+interpolation, cubic_strength, min_width, max_vert_index):
+ new_verts = []
+ faces = []
+
+ # calculate location based on interpolation method
+ def get_location(line, segment, splines):
+ v1 = mesh.vertices[lines[line][0]].co
+ v2 = mesh.vertices[lines[line][1]].co
+ if interpolation == 'linear':
+ return v1 + (segment/segments) * (v2-v1)
+ else: # interpolation == 'cubic'
+ m = (segment/segments)
+ ax,bx,cx,dx,tx = splines[line][0]
+ x = ax+bx*m+cx*m**2+dx*m**3
+ ay,by,cy,dy,ty = splines[line][1]
+ y = ay+by*m+cy*m**2+dy*m**3
+ az,bz,cz,dz,tz = splines[line][2]
+ z = az+bz*m+cz*m**2+dz*m**3
+ return mathutils.Vector([x,y,z])
+
+ # no interpolation needed
+ if segments == 1:
+ for i, line in enumerate(lines):
+ if i < len(lines)-1:
+ faces.append([line[0], lines[i+1][0], lines[i+1][1], line[1]])
+ # more than 1 segment, interpolate
+ else:
+ # calculate splines (if necessary) once, so no recalculations needed
+ if interpolation == 'cubic':
+ splines = []
+ for line in lines:
+ v1 = mesh.vertices[line[0]].co
+ v2 = mesh.vertices[line[1]].co
+ size = (v2-v1).length * cubic_strength
+ splines.append(bridge_calculate_cubic_spline(mesh,
+ [v1+size*vertex_normals[line[0]], v1, v2,
+ v2+size*vertex_normals[line[1]]]))
+ else:
+ splines = False
+
+ # create starting situation
+ virtual_width = [(mathutils.Vector(mesh.vertices[lines[i][0]].co) - \
+ mathutils.Vector(mesh.vertices[lines[i+1][0]].co)).length for i \
+ in range(len(lines)-1)]
+ new_verts = [get_location(0, seg, splines) for seg in range(1,
+ segments)]
+ first_line_indices = [i for i in range(max_vert_index+1,
+ max_vert_index+segments)]
+
+ prev_verts = new_verts[:] # vertex locations of verts on previous line
+ prev_vert_indices = first_line_indices[:]
+ max_vert_index += segments - 1 # highest vertex index in virtual mesh
+ next_verts = [] # vertex locations of verts on current line
+ next_vert_indices = []
+
+ for i, line in enumerate(lines):
+ if i < len(lines)-1:
+ v1 = line[0]
+ v2 = lines[i+1][0]
+ end_face = True
+ for seg in range(1, segments):
+ loc1 = prev_verts[seg-1]
+ loc2 = get_location(i+1, seg, splines)
+ if (loc1-loc2).length < (min_width/100)*virtual_width[i] \
+ and line[1]==lines[i+1][1]:
+ # triangle, no new vertex
+ faces.append([v1, v2, prev_vert_indices[seg-1],
+ prev_vert_indices[seg-1]])
+ next_verts += prev_verts[seg-1:]
+ next_vert_indices += prev_vert_indices[seg-1:]
+ end_face = False
+ break
+ else:
+ if i == len(lines)-2 and lines[0] == lines[-1]:
+ # quad with first line, no new vertex
+ faces.append([v1, v2, first_line_indices[seg-1],
+ prev_vert_indices[seg-1]])
+ v2 = first_line_indices[seg-1]
+ v1 = prev_vert_indices[seg-1]
+ else:
+ # quad, add new vertex
+ max_vert_index += 1
+ faces.append([v1, v2, max_vert_index,
+ prev_vert_indices[seg-1]])
+ v2 = max_vert_index
+ v1 = prev_vert_indices[seg-1]
+ new_verts.append(loc2)
+ next_verts.append(loc2)
+ next_vert_indices.append(max_vert_index)
+ if end_face:
+ faces.append([v1, v2, lines[i+1][1], line[1]])
+
+ prev_verts = next_verts[:]
+ prev_vert_indices = next_vert_indices[:]
+ next_verts = []
+ next_vert_indices = []
+
+ return(new_verts, faces, max_vert_index)
+
+
+# calculate lines (list of lists, vertex indices) that are used for bridging
+def bridge_calculate_lines(mesh, loops, mode, twist, reverse):
+ lines = []
+ loop1, loop2 = [i[0] for i in loops]
+ loop1_circular, loop2_circular = [i[1] for i in loops]
+ circular = loop1_circular or loop2_circular
+ circle_full = False
+
+ # calculate loop centers
+ centers = []
+ for loop in [loop1, loop2]:
+ center = mathutils.Vector([0,0,0])
+ for vertex in loop:
+ center += mesh.vertices[vertex].co
+ center /= len(loop)
+ centers.append(center)
+ for i, loop in enumerate([loop1, loop2]):
+ for vertex in loop:
+ if mesh.vertices[vertex].co == centers[i]:
+ # prevent zero-length vectors in angle comparisons
+ centers[i] += mathutils.Vector([0.01, 0, 0])
+ break
+ center1, center2 = centers
+
+ # calculate the normals of the virtual planes that the loops are on
+ normals = []
+ normal_plurity = False
+ for i, loop in enumerate([loop1, loop2]):
+ # covariance matrix
+ mat = mathutils.Matrix(((0.0, 0.0, 0.0), (0.0, 0.0, 0.0),
+ (0.0, 0.0, 0.0)))
+ x, y, z = centers[i]
+ for loc in [mesh.vertices[vertex].co for vertex in loop]:
+ mat[0][0] += (loc[0]-x)**2
+ mat[0][1] += (loc[0]-x)*(loc[1]-y)
+ mat[0][2] += (loc[0]-x)*(loc[2]-z)
+ mat[1][0] += (loc[1]-y)*(loc[0]-x)
+ mat[1][1] += (loc[1]-y)**2
+ mat[1][2] += (loc[1]-y)*(loc[2]-z)
+ mat[2][0] += (loc[2]-z)*(loc[0]-x)
+ mat[2][1] += (loc[2]-z)*(loc[1]-y)
+ mat[2][2] += (loc[2]-z)**2
+ # plane normal
+ normal = False
+ if sum(mat[0]) < 1e-6 or sum(mat[1]) < 1e-6 or sum(mat[2]) < 1e-6:
+ normal_plurity = True
+ try:
+ mat.invert()
+ except:
+ if sum(mat[0]) == 0:
+ normal = mathutils.Vector([1.0, 0.0, 0.0])
+ elif sum(mat[1]) == 0:
+ normal = mathutils.Vector([0.0, 1.0, 0.0])
+ elif sum(mat[2]) == 0:
+ normal = mathutils.Vector([0.0, 0.0, 1.0])
+ if not normal:
+ itermax = 500
+ iter = 0
+ vec = mathutils.Vector([1.0, 1.0, 1.0])
+ vec2 = (vec*mat)/(vec*mat).length
+ while vec != vec2 and iter<itermax:
+ iter+=1
+ vec = vec2
+ vec2 = (vec*mat)/(vec*mat).length
+ normal = vec2
+ normals.append(normal)
+ # have plane normals face in the same direction (maximum angle: 90 degrees)
+ if ((center1 + normals[0]) - center2).length < \
+ ((center1 - normals[0]) - center2).length:
+ normals[0].negate()
+ if ((center2 + normals[1]) - center1).length > \
+ ((center2 - normals[1]) - center1).length:
+ normals[1].negate()
+
+ # rotation matrix, representing the difference between the plane normals
+ axis = normals[0].cross(normals[1])
+ axis = mathutils.Vector([loc if abs(loc) > 1e-8 else 0 for loc in axis])
+ if axis.angle(mathutils.Vector([0, 0, 1]), 0) > 1.5707964:
+ axis.negate()
+ angle = normals[0].dot(normals[1])
+ rotation_matrix = mathutils.Matrix.Rotation(angle, 4, axis)
+
+ # if circular, rotate loops so they are aligned
+ if circular:
+ # make sure loop1 is the circular one (or both are circular)
+ if loop2_circular and not loop1_circular:
+ loop1_circular, loop2_circular = True, False
+ loop1, loop2 = loop2, loop1
+
+ # match start vertex of loop1 with loop2
+ target_vector = mesh.vertices[loop2[0]].co - center2
+ dif_angles = [[((mesh.vertices[vertex].co - center1) * \
+ rotation_matrix).angle(target_vector, 0), False, i] for \
+ i, vertex in enumerate(loop1)]
+ dif_angles.sort()
+ if len(loop1) != len(loop2):
+ angle_limit = dif_angles[0][0] * 1.2 # 20% margin
+ dif_angles = [[(mesh.vertices[loop2[0]].co - \
+ mesh.vertices[loop1[index]].co).length, angle, index] for \
+ angle, distance, index in dif_angles if angle <= angle_limit]
+ dif_angles.sort()
+ loop1 = loop1[dif_angles[0][2]:] + loop1[:dif_angles[0][2]]
+
+ # have both loops face the same way
+ if normal_plurity and not circular:
+ second_to_first, second_to_second, second_to_last = \
+ [(mesh.vertices[loop1[1]].co - center1).\
+ angle(mesh.vertices[loop2[i]].co - center2) for i in [0, 1, -1]]
+ last_to_first, last_to_second = [(mesh.vertices[loop1[-1]].co - \
+ center1).angle(mesh.vertices[loop2[i]].co - center2) for \
+ i in [0, 1]]
+ if (min(last_to_first, last_to_second)*1.1 < min(second_to_first, \
+ second_to_second)) or (loop2_circular and second_to_last*1.1 < \
+ min(second_to_first, second_to_second)):
+ loop1.reverse()
+ if circular:
+ loop1 = [loop1[-1]] + loop1[:-1]
+ else:
+ angle = (mesh.vertices[loop1[0]].co - center1).\
+ cross(mesh.vertices[loop1[1]].co - center1).angle(normals[0], 0)
+ target_angle = (mesh.vertices[loop2[0]].co - center2).\
+ cross(mesh.vertices[loop2[1]].co - center2).angle(normals[1], 0)
+ limit = 1.5707964 # 0.5*pi, 90 degrees
+ if not ((angle > limit and target_angle > limit) or \
+ (angle < limit and target_angle < limit)):
+ loop1.reverse()
+ if circular:
+ loop1 = [loop1[-1]] + loop1[:-1]
+ elif normals[0].angle(normals[1]) > limit:
+ loop1.reverse()
+ if circular:
+ loop1 = [loop1[-1]] + loop1[:-1]
+
+ # both loops have the same length
+ if len(loop1) == len(loop2):
+ # manual override
+ if twist:
+ if abs(twist) < len(loop1):
+ loop1 = loop1[twist:]+loop1[:twist]
+ if reverse:
+ loop1.reverse()
+
+ lines.append([loop1[0], loop2[0]])
+ for i in range(1, len(loop1)):
+ lines.append([loop1[i], loop2[i]])
+
+ # loops of different lengths
+ else:
+ # make loop1 longest loop
+ if len(loop2) > len(loop1):
+ loop1, loop2 = loop2, loop1
+ loop1_circular, loop2_circular = loop2_circular, loop1_circular
+
+ # manual override
+ if twist:
+ if abs(twist) < len(loop1):
+ loop1 = loop1[twist:]+loop1[:twist]
+ if reverse:
+ loop1.reverse()
+
+ # shortest angle difference doesn't always give correct start vertex
+ if loop1_circular and not loop2_circular:
+ shifting = 1
+ while shifting:
+ if len(loop1) - shifting < len(loop2):
+ shifting = False
+ break
+ to_last, to_first = [((mesh.vertices[loop1[-1]].co - \
+ center1) * rotation_matrix).angle((mesh.\
+ vertices[loop2[i]].co - center2), 0) for i in [-1, 0]]
+ if to_first < to_last:
+ loop1 = [loop1[-1]] + loop1[:-1]
+ shifting += 1
+ else:
+ shifting = False
+ break
+
+ # basic shortest side first
+ if mode == 'basic':
+ lines.append([loop1[0], loop2[0]])
+ for i in range(1, len(loop1)):
+ if i >= len(loop2) - 1:
+ # triangles
+ lines.append([loop1[i], loop2[-1]])
+ else:
+ # quads
+ lines.append([loop1[i], loop2[i]])
+
+ # shortest edge algorithm
+ else: # mode == 'shortest'
+ lines.append([loop1[0], loop2[0]])
+ prev_vert2 = 0
+ for i in range(len(loop1) -1):
+ if prev_vert2 == len(loop2) - 1 and not loop2_circular:
+ # force triangles, reached end of loop2
+ tri, quad = 0, 1
+ elif prev_vert2 == len(loop2) - 1 and loop2_circular:
+ # at end of loop2, but circular, so check with first vert
+ tri, quad = [(mathutils.Vector(mesh.vertices[loop1[i+1]].\
+ co) - mathutils.Vector(mesh.vertices[loop2[j]].co)).\
+ length for j in [prev_vert2, 0]]
+ circle_full = 2
+ elif len(loop1) - 1 - i == len(loop2) - 1 - prev_vert2 and \
+ not circle_full:
+ # force quads, otherwise won't make it to end of loop2
+ tri, quad = 1, 0
+ else:
+ # calculate if tri or quad gives shortest edge
+ tri, quad = [(mathutils.Vector(mesh.vertices[loop1[i+1]].\
+ co) - mathutils.Vector(mesh.vertices[loop2[j]].co)).\
+ length for j in range(prev_vert2, prev_vert2+2)]
+
+ # triangle
+ if tri < quad:
+ lines.append([loop1[i+1], loop2[prev_vert2]])
+ if circle_full == 2:
+ circle_full = False
+ # quad
+ elif not circle_full:
+ lines.append([loop1[i+1], loop2[prev_vert2+1]])
+ prev_vert2 += 1
+ # quad to first vertex of loop2
+ else:
+ lines.append([loop1[i+1], loop2[0]])
+ prev_vert2 = 0
+ circle_full = True
+
+ # final face for circular loops
+ if loop1_circular and loop2_circular:
+ lines.append([loop1[0], loop2[0]])
+
+ return(lines)
+
+
+# calculate number of segments needed
+def bridge_calculate_segments(mesh, lines, loops, segments):
+ # return if amount of segments is set by user
+ if segments != 0:
+ return segments
+
+ # edge lengths
+ average_edge_length = [(mesh.vertices[vertex].co - \
+ mesh.vertices[loop[0][i+1]].co).length for loop in loops for \
+ i, vertex in enumerate(loop[0][:-1])]
+ # closing edges of circular loops
+ average_edge_length += [(mesh.vertices[loop[0][-1]].co - \
+ mesh.vertices[loop[0][0]].co).length for loop in loops if loop[1]]
+
+ # average lengths
+ average_edge_length = sum(average_edge_length) / len(average_edge_length)
+ average_bridge_length = sum([(mesh.vertices[v1].co - \
+ mesh.vertices[v2].co).length for v1, v2 in lines]) / len(lines)
+
+ segments = max(1, round(average_bridge_length / average_edge_length))
+
+ return(segments)
+
+
+# return dictionary with vertex index as key, and the normal vector as value
+def bridge_calculate_virtual_vertex_normals(mesh, lines, loops, edge_faces,
+edgekey_to_edge):
+ if not edge_faces: # interpolation isn't set to cubic
+ return False
+
+ # pity reduce() isn't one of the basic functions in python anymore
+ def average_vector_dictionary(dic):
+ for key, vectors in dic.items():
+ #if type(vectors) == type([]) and len(vectors) > 1:
+ if len(vectors) > 1:
+ average = mathutils.Vector([0, 0, 0])
+ for vector in vectors:
+ average += vector
+ average /= len(vectors)
+ dic[key] = [average]
+ return dic
+
+ # get all edges of the loop
+ edges = [[edgekey_to_edge[tuple(sorted([loops[j][0][i],
+ loops[j][0][i+1]]))] for i in range(len(loops[j][0])-1)] for \
+ j in [0,1]]
+ edges = edges[0] + edges[1]
+ for j in [0, 1]:
+ if loops[j][1]: # circular
+ edges.append(edgekey_to_edge[tuple(sorted([loops[j][0][0],
+ loops[j][0][-1]]))])
+
+ """
+ calculation based on face topology (assign edge-normals to vertices)
+
+ edge_normal = face_normal x edge_vector
+ vertex_normal = average(edge_normals)
+ """
+ vertex_normals = dict([(vertex, []) for vertex in loops[0][0]+loops[1][0]])
+ for edge in edges:
+ faces = edge_faces[edge.key] # valid faces connected to edge
+
+ if faces:
+ # get edge coordinates
+ v1, v2 = [mesh.vertices[edge.key[i]].co for i in [0,1]]
+ edge_vector = v1 - v2
+ if edge_vector.length < 1e-4:
+ # zero-length edge, vertices at same location
+ continue
+ edge_center = (v1 + v2) / 2
+
+ # average face coordinates, if connected to more than 1 valid face
+ if len(faces) > 1:
+ face_normal = mathutils.Vector([0, 0, 0])
+ face_center = mathutils.Vector([0, 0, 0])
+ for face in faces:
+ face_normal += face.normal
+ face_center += face.center
+ face_normal /= len(faces)
+ face_center /= len(faces)
+ else:
+ face_normal = faces[0].normal
+ face_center = faces[0].center
+ if face_normal.length < 1e-4:
+ # faces with a surface of 0 have no face normal
+ continue
+
+ # calculate virtual edge normal
+ edge_normal = edge_vector.cross(face_normal)
+ edge_normal.length = 0.01
+ if (face_center - (edge_center + edge_normal)).length > \
+ (face_center - (edge_center - edge_normal)).length:
+ # make normal face the correct way
+ edge_normal.negate()
+ edge_normal.normalize()
+ # add virtual edge normal as entry for both vertices it connects
+ for vertex in edge.key:
+ vertex_normals[vertex].append(edge_normal)
+
+ """
+ calculation based on connection with other loop (vertex focused method)
+ - used for vertices that aren't connected to any valid faces
+
+ plane_normal = edge_vector x connection_vector
+ vertex_normal = plane_normal x edge_vector
+ """
+ vertices = [vertex for vertex, normal in vertex_normals.items() if not \
+ normal]
+
+ if vertices:
+ # edge vectors connected to vertices
+ edge_vectors = dict([[vertex, []] for vertex in vertices])
+ for edge in edges:
+ for v in edge.key:
+ if v in edge_vectors:
+ edge_vector = mesh.vertices[edge.key[0]].co - \
+ mesh.vertices[edge.key[1]].co
+ if edge_vector.length < 1e-4:
+ # zero-length edge, vertices at same location
+ continue
+ edge_vectors[v].append(edge_vector)
+
+ # connection vectors between vertices of both loops
+ connection_vectors = dict([[vertex, []] for vertex in vertices])
+ connections = dict([[vertex, []] for vertex in vertices])
+ for v1, v2 in lines:
+ if v1 in connection_vectors or v2 in connection_vectors:
+ new_vector = mesh.vertices[v1].co - mesh.vertices[v2].co
+ if new_vector.length < 1e-4:
+ # zero-length connection vector,
+ # vertices in different loops at same location
+ continue
+ if v1 in connection_vectors:
+ connection_vectors[v1].append(new_vector)
+ connections[v1].append(v2)
+ if v2 in connection_vectors:
+ connection_vectors[v2].append(new_vector)
+ connections[v2].append(v1)
+ connection_vectors = average_vector_dictionary(connection_vectors)
+ connection_vectors = dict([[vertex, vector[0]] if vector else \
+ [vertex, []] for vertex, vector in connection_vectors.items()])
+
+ for vertex, values in edge_vectors.items():
+ # vertex normal doesn't matter, just assign a random vector to it
+ if not connection_vectors[vertex]:
+ vertex_normals[vertex] = [mathutils.Vector([1, 0, 0])]
+ continue
+
+ # calculate to what location the vertex is connected,
+ # used to determine what way to flip the normal
+ connected_center = mathutils.Vector([0, 0, 0])
+ for v in connections[vertex]:
+ connected_center += mesh.vertices[v].co
+ if len(connections[vertex]) > 1:
+ connected_center /= len(connections[vertex])
+ if len(connections[vertex]) == 0:
+ # shouldn't be possible, but better safe than sorry
+ vertex_normals[vertex] = [mathutils.Vector([1, 0, 0])]
+ continue
+
+ # can't do proper calculations, because of zero-length vector
+ if not values:
+ if (connected_center - (mesh.vertices[vertex].co + \
+ connection_vectors[vertex])).length < (connected_center - \
+ (mesh.vertices[vertex].co - connection_vectors[vertex])).\
+ length:
+ connection_vectors[vertex].negate()
+ vertex_normals[vertex] = [connection_vectors[vertex].\
+ normalized()]
+ continue
+
+ # calculate vertex normals using edge-vectors,
+ # connection-vectors and the derived plane normal
+ for edge_vector in values:
+ plane_normal = edge_vector.cross(connection_vectors[vertex])
+ vertex_normal = edge_vector.cross(plane_normal)
+ vertex_normal.length = 0.1
+ if (connected_center - (mesh.vertices[vertex].co + \
+ vertex_normal)).length < (connected_center - \
+ (mesh.vertices[vertex].co - vertex_normal)).length:
+ # make normal face the correct way
+ vertex_normal.negate()
+ vertex_normal.normalize()
+ vertex_normals[vertex].append(vertex_normal)
+
+ # average virtual vertex normals, based on all edges it's connected to
+ vertex_normals = average_vector_dictionary(vertex_normals)
+ vertex_normals = dict([[vertex, vector[0]] for vertex, vector in \
+ vertex_normals.items()])
+
+ return(vertex_normals)
+
+
+# add vertices to mesh
+def bridge_create_vertices(mesh, vertices):
+ start_index = len(mesh.vertices)
+ mesh.vertices.add(len(vertices))
+ for i in range(len(vertices)):
+ mesh.vertices[start_index + i].co = vertices[i]
+
+
+# add faces to mesh
+def bridge_create_faces(mesh, faces, twist):
+ # have the normal point the correct way
+ if twist < 0:
+ [face.reverse() for face in faces]
+ faces = [face[2:]+face[:2] if face[0]==face[1] else face for \
+ face in faces]
+
+ # eekadoodle prevention
+ for i in range(len(faces)):
+ if not faces[i][-1]:
+ if faces[i][0] == faces[i][-1]:
+ faces[i] = [faces[i][1], faces[i][2], faces[i][3], faces[i][1]]
+ else:
+ faces[i] = [faces[i][-1]] + faces[i][:-1]
+
+ start_faces = len(mesh.faces)
+ mesh.faces.add(len(faces))
+ for i in range(len(faces)):
+ mesh.faces[start_faces + i].vertices_raw = faces[i]
+ mesh.update(calc_edges = True) # calc_edges prevents memory-corruption
+
+
+# calculate input loops
+def bridge_get_input(mesh):
+ # create list of internal edges, which should be skipped
+ eks_of_selected_faces = [item for sublist in [face.edge_keys for face \
+ in mesh.faces if face.select and not face.hide] for item in sublist]
+ edge_count = {}
+ for ek in eks_of_selected_faces:
+ if ek in edge_count:
+ edge_count[ek] += 1
+ else:
+ edge_count[ek] = 1
+ internal_edges = [ek for ek in edge_count if edge_count[ek] > 1]
+
+ # sort correct edges into loops
+ selected_edges = [edge.key for edge in mesh.edges if edge.select \
+ and not edge.hide and edge.key not in internal_edges]
+ loops = get_connected_selections(selected_edges)
+
+ return(loops)
+
+
+# return values needed by the bridge operator
+def bridge_initialise(mesh, interpolation):
+ if interpolation == 'cubic':
+ # dict with edge-key as key and list of connected valid faces as value
+ face_blacklist = [face.index for face in mesh.faces if face.select or \
+ face.hide]
+ edge_faces = dict([[edge.key, []] for edge in mesh.edges if not \
+ edge.hide])
+ for face in mesh.faces:
+ if face.index in face_blacklist:
+ continue
+ for key in face.edge_keys:
+ edge_faces[key].append(face)
+ # dictionary with the edge-key as key and edge as value
+ edgekey_to_edge = dict([[edge.key, edge] for edge in mesh.edges if \
+ edge.select and not edge.hide])
+ else:
+ edge_faces = False
+ edgekey_to_edge = False
+
+ # selected faces input
+ old_selected_faces = [face.index for face in mesh.faces if face.select \
+ and not face.hide]
+
+ # find out if faces created by bridging should be smoothed
+ smooth = False
+ if mesh.faces:
+ if sum([face.use_smooth for face in mesh.faces])/len(mesh.faces) \
+ >= 0.5:
+ smooth = True
+
+ return(edge_faces, edgekey_to_edge, old_selected_faces, smooth)
+
+
+# return a string with the input method
+def bridge_input_method(loft, loft_loop):
+ method = ""
+ if loft:
+ if loft_loop:
+ method = "Loft loop"
+ else:
+ method = "Loft no-loop"
+ else:
+ method = "Bridge"
+
+ return(method)
+
+
+# match up loops in pairs, used for multi-input bridging
+def bridge_match_loops(mesh, loops):
+ # calculate average loop normals and centers
+ normals = []
+ centers = []
+ for vertices, circular in loops:
+ normal = mathutils.Vector([0, 0, 0])
+ center = mathutils.Vector([0, 0, 0])
+ for vertex in vertices:
+ normal += mesh.vertices[vertex].normal
+ center += mesh.vertices[vertex].co
+ normals.append(normal / len(vertices) / 10)
+ centers.append(center / len(vertices))
+
+ # possible matches if loop normals are faced towards the center
+ # of the other loop
+ matches = dict([[i, []] for i in range(len(loops))])
+ matches_amount = 0
+ for i in range(len(loops) + 1):
+ for j in range(i+1, len(loops)):
+ if (centers[i] - centers[j]).length > (centers[i] - (centers[j] \
+ + normals[j])).length and (centers[j] - centers[i]).length > \
+ (centers[j] - (centers[i] + normals[i])).length:
+ matches_amount += 1
+ matches[i].append([(centers[i] - centers[j]).length, i, j])
+ matches[j].append([(centers[i] - centers[j]).length, j, i])
+ # if no loops face each other, just make matches between all the loops
+ if matches_amount == 0:
+ for i in range(len(loops) + 1):
+ for j in range(i+1, len(loops)):
+ matches[i].append([(centers[i] - centers[j]).length, i, j])
+ matches[j].append([(centers[i] - centers[j]).length, j, i])
+ for key, value in matches.items():
+ value.sort()
+
+ # matches based on distance between centers and number of vertices in loops
+ new_order = []
+ for loop_index in range(len(loops)):
+ if loop_index in new_order:
+ continue
+ loop_matches = matches[loop_index]
+ if not loop_matches:
+ continue
+ shortest_distance = loop_matches[0][0]
+ shortest_distance *= 1.1
+ loop_matches = [[abs(len(loops[loop_index][0]) - \
+ len(loops[loop[2]][0])), loop[0], loop[1], loop[2]] for loop in \
+ loop_matches if loop[0] < shortest_distance]
+ loop_matches.sort()
+ for match in loop_matches:
+ if match[3] not in new_order:
+ new_order += [loop_index, match[3]]
+ break
+
+ # reorder loops based on matches
+ if len(new_order) >= 2:
+ loops = [loops[i] for i in new_order]
+
+ return(loops)
+
+
+# have normals of selection face outside
+def bridge_recalculate_normals():
+ bpy.ops.object.mode_set(mode = 'EDIT')
+ bpy.ops.mesh.normals_make_consistent()
+
+
+# remove old_selected_faces
+def bridge_remove_internal_faces(mesh, old_selected_faces):
+ select_mode = [i for i in bpy.context.tool_settings.mesh_select_mode]
+ bpy.context.tool_settings.mesh_select_mode = [False, False, True]
+
+ # hack to keep track of the current selection
+ for edge in mesh.edges:
+ if edge.select and not edge.hide:
+ edge.bevel_weight = (edge.bevel_weight/3) + 0.2
+ else:
+ edge.bevel_weight = (edge.bevel_weight/3) + 0.6
+
+ # remove faces
+ bpy.ops.object.mode_set(mode = 'EDIT')
+ bpy.ops.mesh.select_all(action = 'DESELECT')
+ bpy.ops.object.mode_set(mode = 'OBJECT')
+ for face in old_selected_faces:
+ mesh.faces[face].select = True
+ bpy.ops.object.mode_set(mode = 'EDIT')
+ bpy.ops.mesh.delete(type = 'FACE')
+
+ # restore old selection, using hack
+ bpy.ops.object.mode_set(mode = 'OBJECT')
+ bpy.context.tool_settings.mesh_select_mode = [False, True, False]
+ for edge in mesh.edges:
+ if edge.bevel_weight < 0.6:
+ edge.bevel_weight = (edge.bevel_weight-0.2) * 3
+ edge.select = True
+ else:
+ edge.bevel_weight = (edge.bevel_weight-0.6) * 3
+ bpy.ops.object.mode_set(mode = 'EDIT')
+ bpy.ops.object.mode_set(mode = 'OBJECT')
+ bpy.context.tool_settings.mesh_select_mode = select_mode
+
+
+# update list of internal faces that are flagged for removal
+def bridge_save_unused_faces(mesh, old_selected_faces, loops):
+ # key: vertex index, value: lists of selected faces using it
+ vertex_to_face = dict([[i, []] for i in range(len(mesh.vertices))])
+ [[vertex_to_face[vertex_index].append(face) for vertex_index in \
+ mesh.faces[face].vertices] for face in old_selected_faces]
+
+ # group selected faces that are connected
+ groups = []
+ grouped_faces = []
+ for face in old_selected_faces:
+ if face in grouped_faces:
+ continue
+ grouped_faces.append(face)
+ group = [face]
+ new_faces = [face]
+ while new_faces:
+ grow_face = new_faces[0]
+ for vertex in mesh.faces[grow_face].vertices:
+ vertex_face_group = [face for face in vertex_to_face[vertex] \
+ if face not in grouped_faces]
+ new_faces += vertex_face_group
+ grouped_faces += vertex_face_group
+ group += vertex_face_group
+ new_faces.pop(0)
+ groups.append(group)
+
+ # key: vertex index, value: True/False (is it in a loop that is used)
+ used_vertices = dict([[i, 0] for i in range(len(mesh.vertices))])
+ for loop in loops:
+ for vertex in loop[0]:
+ used_vertices[vertex] = True
+
+ # check if group is bridged, if not remove faces from internal faces list
+ for group in groups:
+ used = False
+ for face in group:
+ if used:
+ break
+ for vertex in mesh.faces[face].vertices:
+ if used_vertices[vertex]:
+ used = True
+ break
+ if not used:
+ for face in group:
+ old_selected_faces.remove(face)
+
+
+# add the newly created faces to the selection
+def bridge_select_new_faces(mesh, amount, smooth):
+ select_mode = [i for i in bpy.context.tool_settings.mesh_select_mode]
+ bpy.context.tool_settings.mesh_select_mode = [False, False, True]
+ for i in range(amount):
+ mesh.faces[-(i+1)].select = True
+ mesh.faces[-(i+1)].use_smooth = smooth
+ bpy.ops.object.mode_set(mode = 'EDIT')
+ bpy.ops.object.mode_set(mode = 'OBJECT')
+ bpy.context.tool_settings.mesh_select_mode = select_mode
+
+
+# sort loops, so they are connected in the correct order when lofting
+def bridge_sort_loops(mesh, loops, loft_loop):
+ # simplify loops to single points, and prepare for pathfinding
+ x, y, z = [[sum([mesh.vertices[i].co[j] for i in loop[0]]) / \
+ len(loop[0]) for loop in loops] for j in range(3)]
+ nodes = [mathutils.Vector([x[i], y[i], z[i]]) for i in range(len(loops))]
+
+ active_node = 0
+ open = [i for i in range(1, len(loops))]
+ path = [[0,0]]
+ # connect node to path, that is shortest to active_node
+ while len(open) > 0:
+ distances = [(nodes[active_node] - nodes[i]).length for i in open]
+ active_node = open[distances.index(min(distances))]
+ open.remove(active_node)
+ path.append([active_node, min(distances)])
+ # check if we didn't start in the middle of the path
+ for i in range(2, len(path)):
+ if (nodes[path[i][0]]-nodes[0]).length < path[i][1]:
+ temp = path[:i]
+ path.reverse()
+ path = path[:-i] + temp
+ break
+
+ # reorder loops
+ loops = [loops[i[0]] for i in path]
+ # if requested, duplicate first loop at last position, so loft can loop
+ if loft_loop:
+ loops = loops + [loops[0]]
+
+ return(loops)
+
+
+##########################################
+####### Circle functions #################
+##########################################
+
+# convert 3d coordinates to 2d coordinates on plane
+def circle_3d_to_2d(mesh_mod, loop, com, normal):
+ # project vertices onto the plane
+ verts = [mesh_mod.vertices[v] for v in loop[0]]
+ verts_projected = [[mathutils.Vector(v.co[:]) - \
+ (mathutils.Vector(v.co[:])-com).dot(normal)*normal, v.index] \
+ for v in verts]
+
+ # calculate two vectors (p and q) along the plane
+ m = mathutils.Vector([normal[0]+1.0, normal[1], normal[2]])
+ p = m - (m.dot(normal) * normal)
+ if p.dot(p) == 0.0:
+ m = mathutils.Vector([normal[0], normal[1]+1.0, normal[2]])
+ p = m - (m.dot(normal) * normal)
+ q = p.cross(normal)
+
+ # change to 2d coordinates using perpendicular projection
+ locs_2d = []
+ for loc, vert in verts_projected:
+ vloc = loc - com
+ x = p.dot(vloc) / p.dot(p)
+ y = q.dot(vloc) / q.dot(q)
+ locs_2d.append([x, y, vert])
+
+ return(locs_2d, p, q)
+
+
+# calculate a best-fit circle to the 2d locations on the plane
+def circle_calculate_best_fit(locs_2d):
+ # initial guess
+ x0 = 0.0
+ y0 = 0.0
+ r = 1.0
+
+ # calculate center and radius (non-linear least squares solution)
+ for iter in range(500):
+ jmat = []
+ k = []
+ for v in locs_2d:
+ d = (v[0]**2-2.0*x0*v[0]+v[1]**2-2.0*y0*v[1]+x0**2+y0**2)**0.5
+ jmat.append([(x0-v[0])/d, (y0-v[1])/d, -1.0])
+ k.append(-(((v[0]-x0)**2+(v[1]-y0)**2)**0.5-r))
+ jmat2 = mathutils.Matrix([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], \
+ [0.0, 0.0, 0.0]])
+ k2 = mathutils.Vector([0.0, 0.0, 0.0])
+ for i in range(len(jmat)):
+ k2 += mathutils.Vector(jmat[i])*k[i]
+ jmat2[0][0] += jmat[i][0]**2
+ jmat2[0][1] += jmat[i][0]*jmat[i][1]
+ jmat2[0][2] += jmat[i][0]*jmat[i][2]
+ jmat2[1][1] += jmat[i][1]**2
+ jmat2[1][2] += jmat[i][1]*jmat[i][2]
+ jmat2[2][2] += jmat[i][2]**2
+ jmat2[1][0] = jmat2[0][1]
+ jmat2[2][0] = jmat2[0][2]
+ jmat2[2][1] = jmat2[1][2]
+ try:
+ jmat2.invert()
+ except:
+ pass
+ dx0, dy0, dr = k2 * jmat2
+ x0 += dx0
+ y0 += dy0
+ r += dr
+ # stop iterating if we're close enough to optimal solution
+ if abs(dx0)<1e-6 and abs(dy0)<1e-6 and abs(dr)<1e-6:
+ break
+
+ # return center of circle and radius
+ return(x0, y0, r)
+
+
+# calculate circle so no vertices have to be moved away from the center
+def circle_calculate_min_fit(locs_2d):
+ # center of circle
+ x0 = (min([i[0] for i in locs_2d])+max([i[0] for i in locs_2d]))/2.0
+ y0 = (min([i[1] for i in locs_2d])+max([i[1] for i in locs_2d]))/2.0
+ center = mathutils.Vector([x0, y0])
+ # radius of circle
+ r = min([(mathutils.Vector([i[0], i[1]])-center).length for i in locs_2d])
+
+ # return center of circle and radius
+ return(x0, y0, r)
+
+
+# calculate the new locations of the vertices that need to be moved
+def circle_calculate_verts(flatten, mesh_mod, locs_2d, com, p, q, normal):
+ # changing 2d coordinates back to 3d coordinates
+ locs_3d = []
+ for loc in locs_2d:
+ locs_3d.append([loc[2], loc[0]*p + loc[1]*q + com])
+
+ if flatten: # flat circle
+ return(locs_3d)
+
+ else: # project the locations on the existing mesh
+ vert_edges = dict_vert_edges(mesh_mod)
+ vert_faces = dict_vert_faces(mesh_mod)
+ faces = [f for f in mesh_mod.faces if not f.hide]
+ rays = [normal, -normal]
+ new_locs = []
+ for loc in locs_3d:
+ projection = False
+ if mesh_mod.vertices[loc[0]].co == loc[1]: # vertex hasn't moved
+ projection = loc[1]
+ else:
+ dif = normal.angle(loc[1]-mesh_mod.vertices[loc[0]].co)
+ if -1e-6 < dif < 1e-6 or math.pi-1e-6 < dif < math.pi+1e-6:
+ # original location is already along projection normal
+ projection = mesh_mod.vertices[loc[0]].co
+ else:
+ # quick search through adjacent faces
+ for face in vert_faces[loc[0]]:
+ verts = [mesh_mod.vertices[v].co for v in \
+ mesh_mod.faces[face].vertices]
+ if len(verts) == 3: # triangle
+ v1, v2, v3 = verts
+ v4 = False
+ else: # quad
+ v1, v2, v3, v4 = verts
+ for ray in rays:
+ intersect = mathutils.geometry.\
+ intersect_ray_tri(v1, v2, v3, ray, loc[1])
+ if intersect:
+ projection = intersect
+ break
+ elif v4:
+ intersect = mathutils.geometry.\
+ intersect_ray_tri(v1, v3, v4, ray, loc[1])
+ if intersect:
+ projection = intersect
+ break
+ if projection:
+ break
+ if not projection:
+ # check if projection is on adjacent edges
+ for edgekey in vert_edges[loc[0]]:
+ line1 = mesh_mod.vertices[edgekey[0]].co
+ line2 = mesh_mod.vertices[edgekey[1]].co
+ intersect, dist = mathutils.geometry.intersect_point_line(\
+ loc[1], line1, line2)
+ if 1e-6 < dist < 1 - 1e-6:
+ projection = intersect
+ break
+ if not projection:
+ # full search through the entire mesh
+ hits = []
+ for face in faces:
+ verts = [mesh_mod.vertices[v].co for v in face.vertices]
+ if len(verts) == 3: # triangle
+ v1, v2, v3 = verts
+ v4 = False
+ else: # quad
+ v1, v2, v3, v4 = verts
+ for ray in rays:
+ intersect = mathutils.geometry.intersect_ray_tri(\
+ v1, v2, v3, ray, loc[1])
+ if intersect:
+ hits.append([(loc[1] - intersect).length,
+ intersect])
+ break
+ elif v4:
+ intersect = mathutils.geometry.intersect_ray_tri(\
+ v1, v3, v4, ray, loc[1])
+ if intersect:
+ hits.append([(loc[1] - intersect).length,
+ intersect])
+ break
+ if len(hits) >= 1:
+ # if more than 1 hit with mesh, closest hit is new loc
+ hits.sort()
+ projection = hits[0][1]
+ if not projection:
+ # nothing to project on, remain at flat location
+ projection = loc[1]
+ new_locs.append([loc[0], projection])
+
+ # return new positions of projected circle
+ return(new_locs)
+
+
+# check loops and only return valid ones
+def circle_check_loops(single_loops, loops, mapping, mesh_mod):
+ valid_single_loops = {}
+ valid_loops = []
+ for i, [loop, circular] in enumerate(loops):
+ # loop needs to have at least 3 vertices
+ if len(loop) < 3:
+ continue
+ # loop needs at least 1 vertex in the original, non-mirrored mesh
+ if mapping:
+ all_virtual = True
+ for vert in loop:
+ if mapping[vert] > -1:
+ all_virtual = False
+ break
+ if all_virtual:
+ continue
+ # loop has to be non-collinear
+ collinear = True
+ loc0 = mathutils.Vector(mesh_mod.vertices[loop[0]].co[:])
+ loc1 = mathutils.Vector(mesh_mod.vertices[loop[1]].co[:])
+ for v in loop[2:]:
+ locn = mathutils.Vector(mesh_mod.vertices[v].co[:])
+ if loc0 == loc1 or loc1 == locn:
+ loc0 = loc1
+ loc1 = locn
+ continue
+ d1 = loc1-loc0
+ d2 = locn-loc1
+ if -1e-6 < d1.angle(d2, 0) < 1e-6:
+ loc0 = loc1
+ loc1 = locn
+ continue
+ collinear = False
+ break
+ if collinear:
+ continue
+ # passed all tests, loop is valid
+ valid_loops.append([loop, circular])
+ valid_single_loops[len(valid_loops)-1] = single_loops[i]
+
+ return(valid_single_loops, valid_loops)
+
+
+# calculate the location of single input vertices that need to be flattened
+def circle_flatten_singles(mesh_mod, com, p, q, normal, single_loop):
+ new_locs = []
+ for vert in single_loop:
+ loc = mathutils.Vector(mesh_mod.vertices[vert].co[:])
+ new_locs.append([vert, loc - (loc-com).dot(normal)*normal])
+
+ return(new_locs)
+
+
+# calculate input loops
+def circle_get_input(object, mesh, scene):
+ # get mesh with modifiers applied
+ derived, mesh_mod = get_derived_mesh(object, mesh, scene)
+
+ # create list of edge-keys based on selection state
+ faces = False
+ for face in mesh.faces:
+ if face.select and not face.hide:
+ faces = True
+ break
+ if faces:
+ # get selected, non-hidden , non-internal edge-keys
+ eks_selected = [key for keys in [face.edge_keys for face in \
+ mesh_mod.faces if face.select and not face.hide] for key in keys]
+ edge_count = {}
+ for ek in eks_selected:
+ if ek in edge_count:
+ edge_count[ek] += 1
+ else:
+ edge_count[ek] = 1
+ edge_keys = [edge.key for edge in mesh_mod.edges if edge.select \
+ and not edge.hide and edge_count.get(edge.key, 1)==1]
+ else:
+ # no faces, so no internal edges either
+ edge_keys = [edge.key for edge in mesh_mod.edges if edge.select \
+ and not edge.hide]
+
+ # add edge-keys around single vertices
+ verts_connected = dict([[vert, 1] for edge in [edge for edge in \
+ mesh_mod.edges if edge.select and not edge.hide] for vert in edge.key])
+ single_vertices = [vert.index for vert in mesh_mod.vertices if \
+ vert.select and not vert.hide and not \
+ verts_connected.get(vert.index, False)]
+
+ if single_vertices and len(mesh.faces)>0:
+ vert_to_single = dict([[v.index, []] for v in mesh_mod.vertices \
+ if not v.hide])
+ for face in [face for face in mesh_mod.faces if not face.select \
+ and not face.hide]:
+ for vert in face.vertices:
+ if vert in single_vertices:
+ for ek in face.edge_keys:
+ if not vert in ek:
+ edge_keys.append(ek)
+ if vert not in vert_to_single[ek[0]]:
+ vert_to_single[ek[0]].append(vert)
+ if vert not in vert_to_single[ek[1]]:
+ vert_to_single[ek[1]].append(vert)
+ break
+
+ # sort edge-keys into loops
+ loops = get_connected_selections(edge_keys)
+
+ # find out to which loops the single vertices belong
+ single_loops = dict([[i, []] for i in range(len(loops))])
+ if single_vertices and len(mesh.faces)>0:
+ for i, [loop, circular] in enumerate(loops):
+ for vert in loop:
+ if vert_to_single[vert]:
+ for single in vert_to_single[vert]:
+ if single not in single_loops[i]:
+ single_loops[i].append(single)
+
+ return(derived, mesh_mod, single_vertices, single_loops, loops)
+
+
+# recalculate positions based on the influence of the circle shape
+def circle_influence_locs(locs_2d, new_locs_2d, influence):
+ for i in range(len(locs_2d)):
+ oldx, oldy, j = locs_2d[i]
+ newx, newy, k = new_locs_2d[i]
+ altx = newx*(influence/100)+ oldx*((100-influence)/100)
+ alty = newy*(influence/100)+ oldy*((100-influence)/100)
+ locs_2d[i] = [altx, alty, j]
+
+ return(locs_2d)
+
+
+# project 2d locations on circle, respecting distance relations between verts
+def circle_project_non_regular(locs_2d, x0, y0, r):
+ for i in range(len(locs_2d)):
+ x, y, j = locs_2d[i]
+ loc = mathutils.Vector([x-x0, y-y0])
+ loc.length = r
+ locs_2d[i] = [loc[0], loc[1], j]
+
+ return(locs_2d)
+
+
+# project 2d locations on circle, with equal distance between all vertices
+def circle_project_regular(locs_2d, x0, y0, r):
+ # find offset angle and circling direction
+ x, y, i = locs_2d[0]
+ loc = mathutils.Vector([x-x0, y-y0])
+ loc.length = r
+ offset_angle = loc.angle(mathutils.Vector([1.0, 0.0]), 0.0)
+ loca = mathutils.Vector([x-x0, y-y0, 0.0])
+ if loc[1] < -1e-6:
+ offset_angle *= -1
+ x, y, j = locs_2d[1]
+ locb = mathutils.Vector([x-x0, y-y0, 0.0])
+ if loca.cross(locb)[2] >= 0:
+ ccw = 1
+ else:
+ ccw = -1
+ # distribute vertices along the circle
+ for i in range(len(locs_2d)):
+ t = offset_angle + ccw * (i / len(locs_2d) * 2 * math.pi)
+ x = math.cos(t) * r
+ y = math.sin(t) * r
+ locs_2d[i] = [x, y, locs_2d[i][2]]
+
+ return(locs_2d)
+
+
+# shift loop, so the first vertex is closest to the center
+def circle_shift_loop(mesh_mod, loop, com):
+ verts, circular = loop
+ distances = [[(mesh_mod.vertices[vert].co - com).length, i] \
+ for i, vert in enumerate(verts)]
+ distances.sort()
+ shift = distances[0][1]
+ loop = [verts[shift:] + verts[:shift], circular]
+
+ return(loop)
+
+
+##########################################
+####### Curve functions ##################
+##########################################
+
+# create lists with knots and points, all correctly sorted
+def curve_calculate_knots(loop, verts_selected):
+ knots = [v for v in loop[0] if v in verts_selected]
+ points = loop[0][:]
+ # circular loop, potential for weird splines
+ if loop[1]:
+ offset = int(len(loop[0]) / 4)
+ kpos = []
+ for k in knots:
+ kpos.append(loop[0].index(k))
+ kdif = []
+ for i in range(len(kpos) - 1):
+ kdif.append(kpos[i+1] - kpos[i])
+ kdif.append(len(loop[0]) - kpos[-1] + kpos[0])
+ kadd = []
+ for k in kdif:
+ if k > 2 * offset:
+ kadd.append([kdif.index(k), True])
+ # next 2 lines are optional, they insert
+ # an extra control point in small gaps
+ #elif k > offset:
+ # kadd.append([kdif.index(k), False])
+ kins = []
+ krot = False
+ for k in kadd: # extra knots to be added
+ if k[1]: # big gap (break circular spline)
+ kpos = loop[0].index(knots[k[0]]) + offset
+ if kpos > len(loop[0]) - 1:
+ kpos -= len(loop[0])
+ kins.append([knots[k[0]], loop[0][kpos]])
+ kpos2 = k[0] + 1
+ if kpos2 > len(knots)-1:
+ kpos2 -= len(knots)
+ kpos2 = loop[0].index(knots[kpos2]) - offset
+ if kpos2 < 0:
+ kpos2 += len(loop[0])
+ kins.append([loop[0][kpos], loop[0][kpos2]])
+ krot = loop[0][kpos2]
+ else: # small gap (keep circular spline)
+ k1 = loop[0].index(knots[k[0]])
+ k2 = k[0] + 1
+ if k2 > len(knots)-1:
+ k2 -= len(knots)
+ k2 = loop[0].index(knots[k2])
+ if k2 < k1:
+ dif = len(loop[0]) - 1 - k1 + k2
+ else:
+ dif = k2 - k1
+ kn = k1 + int(dif/2)
+ if kn > len(loop[0]) - 1:
+ kn -= len(loop[0])
+ kins.append([loop[0][k1], loop[0][kn]])
+ for j in kins: # insert new knots
+ knots.insert(knots.index(j[0]) + 1, j[1])
+ if not krot: # circular loop
+ knots.append(knots[0])
+ points = loop[0][loop[0].index(knots[0]):]
+ points += loop[0][0:loop[0].index(knots[0]) + 1]
+ else: # non-circular loop (broken by script)
+ krot = knots.index(krot)
+ knots = knots[krot:] + knots[0:krot]
+ if loop[0].index(knots[0]) > loop[0].index(knots[-1]):
+ points = loop[0][loop[0].index(knots[0]):]
+ points += loop[0][0:loop[0].index(knots[-1])+1]
+ else:
+ points = loop[0][loop[0].index(knots[0]):\
+ loop[0].index(knots[-1]) + 1]
+ # non-circular loop, add first and last point as knots
+ else:
+ if loop[0][0] not in knots:
+ knots.insert(0, loop[0][0])
+ if loop[0][-1] not in knots:
+ knots.append(loop[0][-1])
+
+ return(knots, points)
+
+
+# calculate relative positions compared to first knot
+def curve_calculate_t(mesh_mod, knots, points, pknots, regular, circular):
+ tpoints = []
+ loc_prev = False
+ len_total = 0
+
+ for p in points:
+ if p in knots:
+ loc = pknots[knots.index(p)] # use projected knot location
+ else:
+ loc = mathutils.Vector(mesh_mod.vertices[p].co[:])
+ if not loc_prev:
+ loc_prev = loc
+ len_total += (loc-loc_prev).length
+ tpoints.append(len_total)
+ loc_prev = loc
+ tknots = []
+ for p in points:
+ if p in knots:
+ tknots.append(tpoints[points.index(p)])
+ if circular:
+ tknots[-1] = tpoints[-1]
+
+ # regular option
+ if regular:
+ tpoints_average = tpoints[-1] / (len(tpoints) - 1)
+ for i in range(1, len(tpoints) - 1):
+ tpoints[i] = i * tpoints_average
+ for i in range(len(knots)):
+ tknots[i] = tpoints[points.index(knots[i])]
+ if circular:
+ tknots[-1] = tpoints[-1]
+
+
+ return(tknots, tpoints)
+
+
+# change the location of non-selected points to their place on the spline
+def curve_calculate_vertices(mesh_mod, knots, tknots, points, tpoints, splines,
+interpolation, restriction):
+ newlocs = {}
+ move = []
+
+ for p in points:
+ if p in knots:
+ continue
+ m = tpoints[points.index(p)]
+ if m in tknots:
+ n = tknots.index(m)
+ else:
+ t = tknots[:]
+ t.append(m)
+ t.sort()
+ n = t.index(m) - 1
+ if n > len(splines) - 1:
+ n = len(splines) - 1
+ elif n < 0:
+ n = 0
+
+ if interpolation == 'cubic':
+ ax, bx, cx, dx, tx = splines[n][0]
+ x = ax + bx*(m-tx) + cx*(m-tx)**2 + dx*(m-tx)**3
+ ay, by, cy, dy, ty = splines[n][1]
+ y = ay + by*(m-ty) + cy*(m-ty)**2 + dy*(m-ty)**3
+ az, bz, cz, dz, tz = splines[n][2]
+ z = az + bz*(m-tz) + cz*(m-tz)**2 + dz*(m-tz)**3
+ newloc = mathutils.Vector([x,y,z])
+ else: # interpolation == 'linear'
+ a, d, t, u = splines[n]
+ newloc = ((m-t)/u)*d + a
+
+ if restriction != 'none': # vertex movement is restricted
+ newlocs[p] = newloc
+ else: # set the vertex to its new location
+ move.append([p, newloc])
+
+ if restriction != 'none': # vertex movement is restricted
+ for p in points:
+ if p in newlocs:
+ newloc = newlocs[p]
+ else:
+ move.append([p, mesh_mod.vertices[p].co])
+ continue
+ oldloc = mesh_mod.vertices[p].co
+ normal = mesh_mod.vertices[p].normal
+ dloc = newloc - oldloc
+ if dloc.length < 1e-6:
+ move.append([p, newloc])
+ elif restriction == 'extrude': # only extrusions
+ if dloc.angle(normal, 0) < 0.5 * math.pi + 1e-6:
+ move.append([p, newloc])
+ else: # restriction == 'indent' only indentations
+ if dloc.angle(normal) > 0.5 * math.pi - 1e-6:
+ move.append([p, newloc])
+
+ return(move)
+
+
+# trim loops to part between first and last selected vertices (including)
+def curve_cut_boundaries(mesh_mod, loops):
+ cut_loops = []
+ for loop, circular in loops:
+ if circular:
+ # don't cut
+ cut_loops.append([loop, circular])
+ continue
+ selected = [mesh_mod.vertices[v].select for v in loop]
+ first = selected.index(True)
+ selected.reverse()
+ last = -selected.index(True)
+ if last == 0:
+ cut_loops.append([loop[first:], circular])
+ else:
+ cut_loops.append([loop[first:last], circular])
+
+ return(cut_loops)
+
+
+# calculate input loops
+def curve_get_input(object, mesh, boundaries, scene):
+ # get mesh with modifiers applied
+ derived, mesh_mod = get_derived_mesh(object, mesh, scene)
+
+ # vertices that still need a loop to run through it
+ verts_unsorted = [v.index for v in mesh_mod.vertices if \
+ v.select and not v.hide]
+ # necessary dictionaries
+ vert_edges = dict_vert_edges(mesh_mod)
+ edge_faces = dict_edge_faces(mesh_mod)
+ correct_loops = []
+
+ # find loops through each selected vertex
+ while len(verts_unsorted) > 0:
+ loops = curve_vertex_loops(mesh_mod, verts_unsorted[0], vert_edges,
+ edge_faces)
+ verts_unsorted.pop(0)
+
+ # check if loop is fully selected
+ search_perpendicular = False
+ i = -1
+ for loop, circular in loops:
+ i += 1
+ selected = [v for v in loop if mesh_mod.vertices[v].select]
+ if len(selected) < 2:
+ # only one selected vertex on loop, don't use
+ loops.pop(i)
+ continue
+ elif len(selected) == len(loop):
+ search_perpendicular = loop
+ break
+ # entire loop is selected, find perpendicular loops
+ if search_perpendicular:
+ for vert in loop:
+ if vert in verts_unsorted:
+ verts_unsorted.remove(vert)
+ perp_loops = curve_perpendicular_loops(mesh_mod, loop,
+ vert_edges, edge_faces)
+ for perp_loop in perp_loops:
+ correct_loops.append(perp_loop)
+ # normal input
+ else:
+ for loop, circular in loops:
+ correct_loops.append([loop, circular])
+
+ # boundaries option
+ if boundaries:
+ correct_loops = curve_cut_boundaries(mesh_mod, correct_loops)
+
+ return(derived, mesh_mod, correct_loops)
+
+
+# return all loops that are perpendicular to the given one
+def curve_perpendicular_loops(mesh_mod, start_loop, vert_edges, edge_faces):
+ # find perpendicular loops
+ perp_loops = []
+ for start_vert in start_loop:
+ loops = curve_vertex_loops(mesh_mod, start_vert, vert_edges,
+ edge_faces)
+ for loop, circular in loops:
+ selected = [v for v in loop if mesh_mod.vertices[v].select]
+ if len(selected) == len(loop):
+ continue
+ else:
+ perp_loops.append([loop, circular, loop.index(start_vert)])
+
+ # trim loops to same lengths
+ shortest = [[len(loop[0]), i] for i, loop in enumerate(perp_loops)\
+ if not loop[1]]
+ if not shortest:
+ # all loops are circular, not trimming
+ return([[loop[0], loop[1]] for loop in perp_loops])
+ else:
+ shortest = min(shortest)
+ shortest_start = perp_loops[shortest[1]][2]
+ before_start = shortest_start
+ after_start = shortest[0] - shortest_start - 1
+ bigger_before = before_start > after_start
+ trimmed_loops = []
+ for loop in perp_loops:
+ # have the loop face the same direction as the shortest one
+ if bigger_before:
+ if loop[2] < len(loop[0]) / 2:
+ loop[0].reverse()
+ loop[2] = len(loop[0]) - loop[2] - 1
+ else:
+ if loop[2] > len(loop[0]) / 2:
+ loop[0].reverse()
+ loop[2] = len(loop[0]) - loop[2] - 1
+ # circular loops can shift, to prevent wrong trimming
+ if loop[1]:
+ shift = shortest_start - loop[2]
+ if loop[2] + shift > 0 and loop[2] + shift < len(loop[0]):
+ loop[0] = loop[0][-shift:] + loop[0][:-shift]
+ loop[2] += shift
+ if loop[2] < 0:
+ loop[2] += len(loop[0])
+ elif loop[2] > len(loop[0]) -1:
+ loop[2] -= len(loop[0])
+ # trim
+ start = max(0, loop[2] - before_start)
+ end = min(len(loop[0]), loop[2] + after_start + 1)
+ trimmed_loops.append([loop[0][start:end], False])
+
+ return(trimmed_loops)
+
+
+# project knots on non-selected geometry
+def curve_project_knots(mesh_mod, verts_selected, knots, points, circular):
+ # function to project vertex on edge
+ def project(v1, v2, v3):
+ # v1 and v2 are part of a line
+ # v3 is projected onto it
+ v2 -= v1
+ v3 -= v1
+ p = v3.project(v2)
+ return(p + v1)
+
+ if circular: # project all knots
+ start = 0
+ end = len(knots)
+ pknots = []
+ else: # first and last knot shouldn't be projected
+ start = 1
+ end = -1
+ pknots = [mathutils.Vector(mesh_mod.vertices[knots[0]].co[:])]
+ for knot in knots[start:end]:
+ if knot in verts_selected:
+ knot_left = knot_right = False
+ for i in range(points.index(knot)-1, -1*len(points), -1):
+ if points[i] not in knots:
+ knot_left = points[i]
+ break
+ for i in range(points.index(knot)+1, 2*len(points)):
+ if i > len(points) - 1:
+ i -= len(points)
+ if points[i] not in knots:
+ knot_right = points[i]
+ break
+ if knot_left and knot_right and knot_left != knot_right:
+ knot_left = mathutils.Vector(\
+ mesh_mod.vertices[knot_left].co[:])
+ knot_right = mathutils.Vector(\
+ mesh_mod.vertices[knot_right].co[:])
+ knot = mathutils.Vector(mesh_mod.vertices[knot].co[:])
+ pknots.append(project(knot_left, knot_right, knot))
+ else:
+ pknots.append(mathutils.Vector(mesh_mod.vertices[knot].co[:]))
+ else: # knot isn't selected, so shouldn't be changed
+ pknots.append(mathutils.Vector(mesh_mod.vertices[knot].co[:]))
+ if not circular:
+ pknots.append(mathutils.Vector(mesh_mod.vertices[knots[-1]].co[:]))
+
+ return(pknots)
+
+
+# find all loops through a given vertex
+def curve_vertex_loops(mesh_mod, start_vert, vert_edges, edge_faces):
+ edges_used = []
+ loops = []
+
+ for edge in vert_edges[start_vert]:
+ if edge in edges_used:
+ continue
+ loop = []
+ circular = False
+ for vert in edge:
+ active_faces = edge_faces[edge]
+ new_vert = vert
+ growing = True
+ while growing:
+ growing = False
+ new_edges = vert_edges[new_vert]
+ loop.append(new_vert)
+ if len(loop) > 1:
+ edges_used.append(tuple(sorted([loop[-1], loop[-2]])))
+ if len(new_edges) < 3 or len(new_edges) > 4:
+ # pole
+ break
+ else:
+ # find next edge
+ for new_edge in new_edges:
+ if new_edge in edges_used:
+ continue
+ eliminate = False
+ for new_face in edge_faces[new_edge]:
+ if new_face in active_faces:
+ eliminate = True
+ break
+ if eliminate:
+ continue
+ # found correct new edge
+ active_faces = edge_faces[new_edge]
+ v1, v2 = new_edge
+ if v1 != new_vert:
+ new_vert = v1
+ else:
+ new_vert = v2
+ if new_vert == loop[0]:
+ circular = True
+ else:
+ growing = True
+ break
+ if circular:
+ break
+ loop.reverse()
+ loops.append([loop, circular])
+
+ return(loops)
+
+
+##########################################
+####### Flatten functions ################
+##########################################
+
+# sort input into loops
+def flatten_get_input(mesh):
+ vert_verts = dict_vert_verts([edge.key for edge in mesh.edges \
+ if edge.select and not edge.hide])
+ verts = [v.index for v in mesh.vertices if v.select and not v.hide]
+
+ # no connected verts, consider all selected verts as a single input
+ if not vert_verts:
+ return([[verts, False]])
+
+ loops = []
+ while len(verts) > 0:
+ # start of loop
+ loop = [verts[0]]
+ verts.pop(0)
+ if loop[-1] in vert_verts:
+ to_grow = vert_verts[loop[-1]]
+ else:
+ to_grow = []
+ # grow loop
+ while len(to_grow) > 0:
+ new_vert = to_grow[0]
+ to_grow.pop(0)
+ if new_vert in loop:
+ continue
+ loop.append(new_vert)
+ verts.remove(new_vert)
+ to_grow += vert_verts[new_vert]
+ # add loop to loops
+ loops.append([loop, False])
+
+ return(loops)
+
+
+# calculate position of vertex projections on plane
+def flatten_project(mesh, loop, com, normal):
+ verts = [mesh.vertices[v] for v in loop[0]]
+ verts_projected = [[v.index, mathutils.Vector(v.co[:]) - \
+ (mathutils.Vector(v.co[:])-com).dot(normal)*normal] for v in verts]
+
+ return(verts_projected)
+
+
+##########################################
+####### Relax functions ##################
+##########################################
+
+# create lists with knots and points, all correctly sorted
+def relax_calculate_knots(loops):
+ all_knots = []
+ all_points = []
+ for loop, circular in loops:
+ knots = [[], []]
+ points = [[], []]
+ if circular:
+ if len(loop)%2 == 1: # odd
+ extend = [False, True, 0, 1, 0, 1]
+ else: # even
+ extend = [True, False, 0, 1, 1, 2]
+ else:
+ if len(loop)%2 == 1: # odd
+ extend = [False, False, 0, 1, 1, 2]
+ else: # even
+ extend = [False, False, 0, 1, 1, 2]
+ for j in range(2):
+ if extend[j]:
+ loop = [loop[-1]] + loop + [loop[0]]
+ for i in range(extend[2+2*j], len(loop), 2):
+ knots[j].append(loop[i])
+ for i in range(extend[3+2*j], len(loop), 2):
+ if loop[i] == loop[-1] and not circular:
+ continue
+ if len(points[j]) == 0:
+ points[j].append(loop[i])
+ elif loop[i] != points[j][0]:
+ points[j].append(loop[i])
+ if circular:
+ if knots[j][0] != knots[j][-1]:
+ knots[j].append(knots[j][0])
+ if len(points[1]) == 0:
+ knots.pop(1)
+ points.pop(1)
+ for k in knots:
+ all_knots.append(k)
+ for p in points:
+ all_points.append(p)
+
+ return(all_knots, all_points)
+
+
+# calculate relative positions compared to first knot
+def relax_calculate_t(mesh_mod, knots, points, regular):
+ all_tknots = []
+ all_tpoints = []
+ for i in range(len(knots)):
+ amount = len(knots[i]) + len(points[i])
+ mix = []
+ for j in range(amount):
+ if j%2 == 0:
+ mix.append([True, knots[i][round(j/2)]])
+ elif j == amount-1:
+ mix.append([True, knots[i][-1]])
+ else:
+ mix.append([False, points[i][int(j/2)]])
+ len_total = 0
+ loc_prev = False
+ tknots = []
+ tpoints = []
+ for m in mix:
+ loc = mathutils.Vector(mesh_mod.vertices[m[1]].co[:])
+ if not loc_prev:
+ loc_prev = loc
+ len_total += (loc - loc_prev).length
+ if m[0]:
+ tknots.append(len_total)
+ else:
+ tpoints.append(len_total)
+ loc_prev = loc
+ if regular:
+ tpoints = []
+ for p in range(len(points[i])):
+ tpoints.append((tknots[p] + tknots[p+1]) / 2)
+ all_tknots.append(tknots)
+ all_tpoints.append(tpoints)
+
+ return(all_tknots, all_tpoints)
+
+
+# change the location of the points to their place on the spline
+def relax_calculate_verts(mesh_mod, interpolation, tknots, knots, tpoints,
+points, splines):
+ change = []
+ move = []
+ for i in range(len(knots)):
+ for p in points[i]:
+ m = tpoints[i][points[i].index(p)]
+ if m in tknots[i]:
+ n = tknots[i].index(m)
+ else:
+ t = tknots[i][:]
+ t.append(m)
+ t.sort()
+ n = t.index(m)-1
+ if n > len(splines[i]) - 1:
+ n = len(splines[i]) - 1
+ elif n < 0:
+ n = 0
+
+ if interpolation == 'cubic':
+ ax, bx, cx, dx, tx = splines[i][n][0]
+ x = ax + bx*(m-tx) + cx*(m-tx)**2 + dx*(m-tx)**3
+ ay, by, cy, dy, ty = splines[i][n][1]
+ y = ay + by*(m-ty) + cy*(m-ty)**2 + dy*(m-ty)**3
+ az, bz, cz, dz, tz = splines[i][n][2]
+ z = az + bz*(m-tz) + cz*(m-tz)**2 + dz*(m-tz)**3
+ change.append([p, mathutils.Vector([x,y,z])])
+ else: # interpolation == 'linear'
+ a, d, t, u = splines[i][n]
+ if u == 0:
+ u = 1e-8
+ change.append([p, ((m-t)/u)*d + a])
+ for c in change:
+ move.append([c[0], (mesh_mod.vertices[c[0]].co + c[1]) / 2])
+
+ return(move)
+
+
+##########################################
+####### Space functions ##################
+##########################################
+
+# calculate relative positions compared to first knot
+def space_calculate_t(mesh_mod, knots):
+ tknots = []
+ loc_prev = False
+ len_total = 0
+ for k in knots:
+ loc = mathutils.Vector(mesh_mod.vertices[k].co[:])
+ if not loc_prev:
+ loc_prev = loc
+ len_total += (loc - loc_prev).length
+ tknots.append(len_total)
+ loc_prev = loc
+ amount = len(knots)
+ t_per_segment = len_total / (amount - 1)
+ tpoints = [i * t_per_segment for i in range(amount)]
+
+ return(tknots, tpoints)
+
+
+# change the location of the points to their place on the spline
+def space_calculate_verts(mesh_mod, interpolation, tknots, tpoints, points,
+splines):
+ move = []
+ for p in points:
+ m = tpoints[points.index(p)]
+ if m in tknots:
+ n = tknots.index(m)
+ else:
+ t = tknots[:]
+ t.append(m)
+ t.sort()
+ n = t.index(m) - 1
+ if n > len(splines) - 1:
+ n = len(splines) - 1
+ elif n < 0:
+ n = 0
+
+ if interpolation == 'cubic':
+ ax, bx, cx, dx, tx = splines[n][0]
+ x = ax + bx*(m-tx) + cx*(m-tx)**2 + dx*(m-tx)**3
+ ay, by, cy, dy, ty = splines[n][1]
+ y = ay + by*(m-ty) + cy*(m-ty)**2 + dy*(m-ty)**3
+ az, bz, cz, dz, tz = splines[n][2]
+ z = az + bz*(m-tz) + cz*(m-tz)**2 + dz*(m-tz)**3
+ move.append([p, mathutils.Vector([x,y,z])])
+ else: # interpolation == 'linear'
+ a, d, t, u = splines[n]
+ move.append([p, ((m-t)/u)*d + a])
+
+ return(move)
+
+
+##########################################
+####### Operators ########################
+##########################################
+
+# bridge operator
+class Bridge(bpy.types.Operator):
+ bl_idname = 'mesh.looptools_bridge'
+ bl_label = "Bridge / Loft"
+ bl_description = "Bridge two, or loft several, loops of vertices"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ cubic_strength = bpy.props.FloatProperty(name = "Strength",
+ description = "Higher strength results in more fluid curves",
+ default = 1.0,
+ soft_min = -3.0,
+ soft_max = 3.0)
+ interpolation = bpy.props.EnumProperty(name = "Interpolation mode",
+ items = (('cubic', "Cubic", "Gives curved results"),
+ ('linear', "Linear", "Basic, fast, straight interpolation")),
+ description = "Interpolation mode: algorithm used when creating "\
+ "segments",
+ default = 'cubic')
+ loft = bpy.props.BoolProperty(name = "Loft",
+ description = "Loft multiple loops, instead of considering them as "\
+ "a multi-input for bridging",
+ default = False)
+ loft_loop = bpy.props.BoolProperty(name = "Loop",
+ description = "Connect the first and the last loop with each other",
+ default = False)
+ min_width = bpy.props.IntProperty(name = "Minimum width",
+ description = "Segments with an edge smaller than this are merged "\
+ "(compared to base edge)",
+ default = 0,
+ min = 0,
+ max = 100,
+ subtype = 'PERCENTAGE')
+ mode = bpy.props.EnumProperty(name = "Mode",
+ items = (('basic', "Basic", "Fast algorithm"), ('shortest',
+ "Shortest edge", "Slower algorithm with better vertex matching")),
+ description = "Algorithm used for bridging",
+ default = 'shortest')
+ remove_faces = bpy.props.BoolProperty(name = "Remove faces",
+ description = "Remove faces that are internal after bridging",
+ default = True)
+ reverse = bpy.props.BoolProperty(name = "Reverse",
+ description = "Manually override the direction in which the loops "\
+ "are bridged. Only use if the tool gives the wrong result.",
+ default = False)
+ segments = bpy.props.IntProperty(name = "Segments",
+ description = "Number of segments used to bridge the gap "\
+ "(0 = automatic)",
+ default = 1,
+ min = 0,
+ soft_max = 20)
+ twist = bpy.props.IntProperty(name = "Twist",
+ description = "Twist what vertices are connected to each other",
+ default = 0)
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return (ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ #layout.prop(self, "mode") # no cases yet where 'basic' mode is needed
+
+ # top row
+ col_top = layout.column(align=True)
+ row = col_top.row(align=True)
+ col_left = row.column(align=True)
+ col_right = row.column(align=True)
+ col_right.active = self.segments != 1
+ col_left.prop(self, "segments")
+ col_right.prop(self, "min_width", text="")
+ # bottom row
+ bottom_left = col_left.row()
+ bottom_left.active = self.segments != 1
+ bottom_left.prop(self, "interpolation", text="")
+ bottom_right = col_right.row()
+ bottom_right.active = self.interpolation == 'cubic'
+ bottom_right.prop(self, "cubic_strength")
+ # boolean properties
+ col_top.prop(self, "remove_faces")
+ if self.loft:
+ col_top.prop(self, "loft_loop")
+
+ # override properties
+ col_top.separator()
+ row = layout.row(align = True)
+ row.prop(self, "twist")
+ row.prop(self, "reverse")
+
+ def invoke(self, context, event):
+ # load custom settings
+ context.window_manager.looptools.bridge_loft = self.loft
+ settings_load(self)
+ return self.execute(context)
+
+ def execute(self, context):
+ # initialise
+ global_undo, object, mesh = initialise()
+ edge_faces, edgekey_to_edge, old_selected_faces, smooth = \
+ bridge_initialise(mesh, self.interpolation)
+ settings_write(self)
+
+ # check cache to see if we can save time
+ input_method = bridge_input_method(self.loft, self.loft_loop)
+ cached, single_loops, loops, derived, mapping = cache_read("Bridge",
+ object, mesh, input_method, False)
+ if not cached:
+ # get loops
+ loops = bridge_get_input(mesh)
+ if loops:
+ # reorder loops if there are more than 2
+ if len(loops) > 2:
+ if self.loft:
+ loops = bridge_sort_loops(mesh, loops, self.loft_loop)
+ else:
+ loops = bridge_match_loops(mesh, loops)
+
+ # saving cache for faster execution next time
+ if not cached:
+ cache_write("Bridge", object, mesh, input_method, False, False,
+ loops, False, False)
+
+ if loops:
+ # calculate new geometry
+ vertices = []
+ faces = []
+ max_vert_index = len(mesh.vertices)-1
+ for i in range(1, len(loops)):
+ if not self.loft and i%2 == 0:
+ continue
+ lines = bridge_calculate_lines(mesh, loops[i-1:i+1],
+ self.mode, self.twist, self.reverse)
+ vertex_normals = bridge_calculate_virtual_vertex_normals(mesh,
+ lines, loops[i-1:i+1], edge_faces, edgekey_to_edge)
+ segments = bridge_calculate_segments(mesh, lines,
+ loops[i-1:i+1], self.segments)
+ new_verts, new_faces, max_vert_index = \
+ bridge_calculate_geometry(mesh, lines, vertex_normals,
+ segments, self.interpolation, self.cubic_strength,
+ self.min_width, max_vert_index)
+ if new_verts:
+ vertices += new_verts
+ if new_faces:
+ faces += new_faces
+ # make sure faces in loops that aren't used, aren't removed
+ if self.remove_faces and old_selected_faces:
+ bridge_save_unused_faces(mesh, old_selected_faces, loops)
+ # create vertices
+ if vertices:
+ bridge_create_vertices(mesh, vertices)
+ # create faces
+ if faces:
+ bridge_create_faces(mesh, faces, self.twist)
+ bridge_select_new_faces(mesh, len(faces), smooth)
+ # edge-data could have changed, can't use cache next run
+ if faces and not vertices:
+ cache_delete("Bridge")
+ # delete internal faces
+ if self.remove_faces and old_selected_faces:
+ bridge_remove_internal_faces(mesh, old_selected_faces)
+ # make sure normals are facing outside
+ bridge_recalculate_normals()
+
+ terminate(global_undo)
+ return{'FINISHED'}
+
+
+# circle operator
+class Circle(bpy.types.Operator):
+ bl_idname = "mesh.looptools_circle"
+ bl_label = "Circle"
+ bl_description = "Move selected vertices into a circle shape"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ custom_radius = bpy.props.BoolProperty(name = "Radius",
+ description = "Force a custom radius",
+ default = False)
+ fit = bpy.props.EnumProperty(name = "Method",
+ items = (("best", "Best fit", "Non-linear least squares"),
+ ("inside", "Fit inside","Only move vertices towards the center")),
+ description = "Method used for fitting a circle to the vertices",
+ default = 'best')
+ flatten = bpy.props.BoolProperty(name = "Flatten",
+ description = "Flatten the circle, instead of projecting it on the " \
+ "mesh",
+ default = True)
+ influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ radius = bpy.props.FloatProperty(name = "Radius",
+ description = "Custom radius for circle",
+ default = 1.0,
+ min = 0.0,
+ soft_max = 1000.0)
+ regular = bpy.props.BoolProperty(name = "Regular",
+ description = "Distribute vertices at constant distances along the " \
+ "circle",
+ default = True)
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+
+ col.prop(self, "fit")
+ col.separator()
+
+ col.prop(self, "flatten")
+ row = col.row(align=True)
+ row.prop(self, "custom_radius")
+ row_right = row.row(align=True)
+ row_right.active = self.custom_radius
+ row_right.prop(self, "radius", text="")
+ col.prop(self, "regular")
+ col.separator()
+
+ col.prop(self, "influence")
+
+ def invoke(self, context, event):
+ # load custom settings
+ settings_load(self)
+ return self.execute(context)
+
+ def execute(self, context):
+ # initialise
+ global_undo, object, mesh = initialise()
+ settings_write(self)
+ # check cache to see if we can save time
+ cached, single_loops, loops, derived, mapping = cache_read("Circle",
+ object, mesh, False, False)
+ if cached:
+ derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
+ else:
+ # find loops
+ derived, mesh_mod, single_vertices, single_loops, loops = \
+ circle_get_input(object, mesh, context.scene)
+ mapping = get_mapping(derived, mesh, mesh_mod, single_vertices,
+ False, loops)
+ single_loops, loops = circle_check_loops(single_loops, loops,
+ mapping, mesh_mod)
+
+ # saving cache for faster execution next time
+ if not cached:
+ cache_write("Circle", object, mesh, False, False, single_loops,
+ loops, derived, mapping)
+
+ move = []
+ for i, loop in enumerate(loops):
+ # best fitting flat plane
+ com, normal = calculate_plane(mesh_mod, loop)
+ # if circular, shift loop so we get a good starting vertex
+ if loop[1]:
+ loop = circle_shift_loop(mesh_mod, loop, com)
+ # flatten vertices on plane
+ locs_2d, p, q = circle_3d_to_2d(mesh_mod, loop, com, normal)
+ # calculate circle
+ if self.fit == 'best':
+ x0, y0, r = circle_calculate_best_fit(locs_2d)
+ else: # self.fit == 'inside'
+ x0, y0, r = circle_calculate_min_fit(locs_2d)
+ # radius override
+ if self.custom_radius:
+ r = self.radius / p.length
+ # calculate positions on circle
+ if self.regular:
+ new_locs_2d = circle_project_regular(locs_2d[:], x0, y0, r)
+ else:
+ new_locs_2d = circle_project_non_regular(locs_2d[:], x0, y0, r)
+ # take influence into account
+ locs_2d = circle_influence_locs(locs_2d, new_locs_2d,
+ self.influence)
+ # calculate 3d positions of the created 2d input
+ move.append(circle_calculate_verts(self.flatten, mesh_mod,
+ locs_2d, com, p, q, normal))
+ # flatten single input vertices on plane defined by loop
+ if self.flatten and single_loops:
+ move.append(circle_flatten_singles(mesh_mod, com, p, q,
+ normal, single_loops[i]))
+
+ # move vertices to new locations
+ move_verts(mesh, mapping, move, -1)
+
+ # cleaning up
+ if derived:
+ bpy.context.blend_data.meshes.remove(mesh_mod)
+ terminate(global_undo)
+
+ return{'FINISHED'}
+
+
+# curve operator
+class Curve(bpy.types.Operator):
+ bl_idname = "mesh.looptools_curve"
+ bl_label = "Curve"
+ bl_description = "Turn a loop into a smooth curve"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ boundaries = bpy.props.BoolProperty(name = "Boundaries",
+ description = "Limit the tool to work within the boundaries of the "\
+ "selected vertices",
+ default = False)
+ influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ interpolation = bpy.props.EnumProperty(name = "Interpolation",
+ items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
+ ("linear", "Linear", "Simple and fast linear algorithm")),
+ description = "Algorithm used for interpolation",
+ default = 'cubic')
+ regular = bpy.props.BoolProperty(name = "Regular",
+ description = "Distribute vertices at constant distances along the" \
+ "curve",
+ default = True)
+ restriction = bpy.props.EnumProperty(name = "Restriction",
+ items = (("none", "None", "No restrictions on vertex movement"),
+ ("extrude", "Extrude only","Only allow extrusions (no "\
+ "indentations)"),
+ ("indent", "Indent only", "Only allow indentation (no "\
+ "extrusions)")),
+ description = "Restrictions on how the vertices can be moved",
+ default = 'none')
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+
+ col.prop(self, "interpolation")
+ col.prop(self, "restriction")
+ col.prop(self, "boundaries")
+ col.prop(self, "regular")
+ col.separator()
+
+ col.prop(self, "influence")
+
+ def invoke(self, context, event):
+ # load custom settings
+ settings_load(self)
+ return self.execute(context)
+
+ def execute(self, context):
+ # initialise
+ global_undo, object, mesh = initialise()
+ settings_write(self)
+ # check cache to see if we can save time
+ cached, single_loops, loops, derived, mapping = cache_read("Curve",
+ object, mesh, False, self.boundaries)
+ if cached:
+ derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
+ else:
+ # find loops
+ derived, mesh_mod, loops = curve_get_input(object, mesh,
+ self.boundaries, context.scene)
+ mapping = get_mapping(derived, mesh, mesh_mod, False, True, loops)
+ loops = check_loops(loops, mapping, mesh_mod)
+ verts_selected = [v.index for v in mesh_mod.vertices if v.select \
+ and not v.hide]
+
+ # saving cache for faster execution next time
+ if not cached:
+ cache_write("Curve", object, mesh, False, self.boundaries, False,
+ loops, derived, mapping)
+
+ move = []
+ for loop in loops:
+ knots, points = curve_calculate_knots(loop, verts_selected)
+ pknots = curve_project_knots(mesh_mod, verts_selected, knots,
+ points, loop[1])
+ tknots, tpoints = curve_calculate_t(mesh_mod, knots, points,
+ pknots, self.regular, loop[1])
+ splines = calculate_splines(self.interpolation, mesh_mod,
+ tknots, knots)
+ move.append(curve_calculate_vertices(mesh_mod, knots, tknots,
+ points, tpoints, splines, self.interpolation,
+ self.restriction))
+
+ # move vertices to new locations
+ move_verts(mesh, mapping, move, self.influence)
+
+ # cleaning up
+ if derived:
+ bpy.context.blend_data.meshes.remove(mesh_mod)
+
+ terminate(global_undo)
+ return{'FINISHED'}
+
+
+# flatten operator
+class Flatten(bpy.types.Operator):
+ bl_idname = "mesh.looptools_flatten"
+ bl_label = "Flatten"
+ bl_description = "Flatten vertices on a best-fitting plane"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ plane = bpy.props.EnumProperty(name = "Plane",
+ items = (("best_fit", "Best fit", "Calculate a best fitting plane"),
+ ("normal", "Normal", "Derive plane from averaging vertex "\
+ "normals"),
+ ("view", "View", "Flatten on a plane perpendicular to the "\
+ "viewing angle")),
+ description = "Plane on which vertices are flattened",
+ default = 'best_fit')
+ restriction = bpy.props.EnumProperty(name = "Restriction",
+ items = (("none", "None", "No restrictions on vertex movement"),
+ ("bounding_box", "Bounding box", "Vertices are restricted to "\
+ "movement inside the bounding box of the selection")),
+ description = "Restrictions on how the vertices can be moved",
+ default = 'none')
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+
+ col.prop(self, "plane")
+ #col.prop(self, "restriction")
+ col.separator()
+
+ col.prop(self, "influence")
+
+ def invoke(self, context, event):
+ # load custom settings
+ settings_load(self)
+ return self.execute(context)
+
+ def execute(self, context):
+ # initialise
+ global_undo, object, mesh = initialise()
+ settings_write(self)
+ # check cache to see if we can save time
+ cached, single_loops, loops, derived, mapping = cache_read("Flatten",
+ object, mesh, False, False)
+ if not cached:
+ # order input into virtual loops
+ loops = flatten_get_input(mesh)
+ loops = check_loops(loops, mapping, mesh)
+
+ # saving cache for faster execution next time
+ if not cached:
+ cache_write("Flatten", object, mesh, False, False, False, loops,
+ False, False)
+
+ move = []
+ for loop in loops:
+ # calculate plane and position of vertices on them
+ com, normal = calculate_plane(mesh, loop, method=self.plane,
+ object=object)
+ to_move = flatten_project(mesh, loop, com, normal)
+ if self.restriction == 'none':
+ move.append(to_move)
+ else:
+ move.append(to_move)
+ move_verts(mesh, False, move, self.influence)
+
+ terminate(global_undo)
+ return{'FINISHED'}
+
+
+# relax operator
+class Relax(bpy.types.Operator):
+ bl_idname = "mesh.looptools_relax"
+ bl_label = "Relax"
+ bl_description = "Relax the loop, so it is smoother"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ input = bpy.props.EnumProperty(name = "Input",
+ items = (("all", "Parallel (all)", "Also use non-selected "\
+ "parallel loops as input"),
+ ("selected", "Selection","Only use selected vertices as input")),
+ description = "Loops that are relaxed",
+ default = 'selected')
+ interpolation = bpy.props.EnumProperty(name = "Interpolation",
+ items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
+ ("linear", "Linear", "Simple and fast linear algorithm")),
+ description = "Algorithm used for interpolation",
+ default = 'cubic')
+ iterations = bpy.props.EnumProperty(name = "Iterations",
+ items = (("1", "1", "One"),
+ ("3", "3", "Three"),
+ ("5", "5", "Five"),
+ ("10", "10", "Ten"),
+ ("25", "25", "Twenty-five")),
+ description = "Number of times the loop is relaxed",
+ default = "1")
+ regular = bpy.props.BoolProperty(name = "Regular",
+ description = "Distribute vertices at constant distances along the" \
+ "loop",
+ default = True)
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+
+ col.prop(self, "interpolation")
+ col.prop(self, "input")
+ col.prop(self, "iterations")
+ col.prop(self, "regular")
+
+ def invoke(self, context, event):
+ # load custom settings
+ settings_load(self)
+ return self.execute(context)
+
+ def execute(self, context):
+ # initialise
+ global_undo, object, mesh = initialise()
+ settings_write(self)
+ # check cache to see if we can save time
+ cached, single_loops, loops, derived, mapping = cache_read("Relax",
+ object, mesh, self.input, False)
+ if cached:
+ derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
+ else:
+ # find loops
+ derived, mesh_mod, loops = get_connected_input(object, mesh,
+ context.scene, self.input)
+ mapping = get_mapping(derived, mesh, mesh_mod, False, False, loops)
+ loops = check_loops(loops, mapping, mesh_mod)
+ knots, points = relax_calculate_knots(loops)
+
+ # saving cache for faster execution next time
+ if not cached:
+ cache_write("Relax", object, mesh, self.input, False, False, loops,
+ derived, mapping)
+
+ for iteration in range(int(self.iterations)):
+ # calculate splines and new positions
+ tknots, tpoints = relax_calculate_t(mesh_mod, knots, points,
+ self.regular)
+ splines = []
+ for i in range(len(knots)):
+ splines.append(calculate_splines(self.interpolation, mesh_mod,
+ tknots[i], knots[i]))
+ move = [relax_calculate_verts(mesh_mod, self.interpolation,
+ tknots, knots, tpoints, points, splines)]
+ move_verts(mesh, mapping, move, -1)
+
+ # cleaning up
+ if derived:
+ bpy.context.blend_data.meshes.remove(mesh_mod)
+ terminate(global_undo)
+
+ return{'FINISHED'}
+
+
+# space operator
+class Space(bpy.types.Operator):
+ bl_idname = "mesh.looptools_space"
+ bl_label = "Space"
+ bl_description = "Space the vertices in a regular distrubtion on the loop"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ input = bpy.props.EnumProperty(name = "Input",
+ items = (("all", "Parallel (all)", "Also use non-selected "\
+ "parallel loops as input"),
+ ("selected", "Selection","Only use selected vertices as input")),
+ description = "Loops that are spaced",
+ default = 'selected')
+ interpolation = bpy.props.EnumProperty(name = "Interpolation",
+ items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
+ ("linear", "Linear", "Vertices are projected on existing edges")),
+ description = "Algorithm used for interpolation",
+ default = 'cubic')
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.active_object
+ return(ob and ob.type == 'MESH' and context.mode == 'EDIT_MESH')
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+
+ col.prop(self, "interpolation")
+ col.prop(self, "input")
+ col.separator()
+
+ col.prop(self, "influence")
+
+ def invoke(self, context, event):
+ # load custom settings
+ settings_load(self)
+ return self.execute(context)
+
+ def execute(self, context):
+ # initialise
+ global_undo, object, mesh = initialise()
+ settings_write(self)
+ # check cache to see if we can save time
+ cached, single_loops, loops, derived, mapping = cache_read("Space",
+ object, mesh, self.input, False)
+ if cached:
+ derived, mesh_mod = get_derived_mesh(object, mesh, context.scene)
+ else:
+ # find loops
+ derived, mesh_mod, loops = get_connected_input(object, mesh,
+ context.scene, self.input)
+ mapping = get_mapping(derived, mesh, mesh_mod, False, False, loops)
+ loops = check_loops(loops, mapping, mesh_mod)
+
+ # saving cache for faster execution next time
+ if not cached:
+ cache_write("Space", object, mesh, self.input, False, False, loops,
+ derived, mapping)
+
+ move = []
+ for loop in loops:
+ # calculate splines and new positions
+ if loop[1]: # circular
+ loop[0].append(loop[0][0])
+ tknots, tpoints = space_calculate_t(mesh_mod, loop[0][:])
+ splines = calculate_splines(self.interpolation, mesh_mod,
+ tknots, loop[0][:])
+ move.append(space_calculate_verts(mesh_mod, self.interpolation,
+ tknots, tpoints, loop[0][:-1], splines))
+
+ # move vertices to new locations
+ move_verts(mesh, mapping, move, self.influence)
+
+ # cleaning up
+ if derived:
+ bpy.context.blend_data.meshes.remove(mesh_mod)
+ terminate(global_undo)
+
+ return{'FINISHED'}
+
+
+##########################################
+####### GUI and registration #############
+##########################################
+
+# menu containing all tools
+class VIEW3D_MT_edit_mesh_looptools(bpy.types.Menu):
+ bl_label = "LoopTools"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("mesh.looptools_bridge", text="Bridge").loft = False
+ layout.operator("mesh.looptools_circle")
+ layout.operator("mesh.looptools_curve")
+ layout.operator("mesh.looptools_flatten")
+ layout.operator("mesh.looptools_bridge", text="Loft").loft = True
+ layout.operator("mesh.looptools_relax")
+ layout.operator("mesh.looptools_space")
+
+
+# panel containing all tools
+class VIEW3D_PT_tools_looptools(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+ bl_context = "mesh_edit"
+ bl_label = "LoopTools"
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column(align=True)
+ lt = context.window_manager.looptools
+
+ # bridge - first line
+ split = col.split(percentage=0.15)
+ if lt.display_bridge:
+ split.prop(lt, "display_bridge", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_bridge", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_bridge", text="Bridge").loft = False
+ # bridge - settings
+ if lt.display_bridge:
+ box = col.column(align=True).box().column()
+ #box.prop(self, "mode")
+
+ # top row
+ col_top = box.column(align=True)
+ row = col_top.row(align=True)
+ col_left = row.column(align=True)
+ col_right = row.column(align=True)
+ col_right.active = lt.bridge_segments != 1
+ col_left.prop(lt, "bridge_segments")
+ col_right.prop(lt, "bridge_min_width", text="")
+ # bottom row
+ bottom_left = col_left.row()
+ bottom_left.active = lt.bridge_segments != 1
+ bottom_left.prop(lt, "bridge_interpolation", text="")
+ bottom_right = col_right.row()
+ bottom_right.active = lt.bridge_interpolation == 'cubic'
+ bottom_right.prop(lt, "bridge_cubic_strength")
+ # boolean properties
+ col_top.prop(lt, "bridge_remove_faces")
+
+ # override properties
+ col_top.separator()
+ row = box.row(align = True)
+ row.prop(lt, "bridge_twist")
+ row.prop(lt, "bridge_reverse")
+
+ # circle - first line
+ split = col.split(percentage=0.15)
+ if lt.display_circle:
+ split.prop(lt, "display_circle", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_circle", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_circle")
+ # circle - settings
+ if lt.display_circle:
+ box = col.column(align=True).box().column()
+ box.prop(lt, "circle_fit")
+ box.separator()
+
+ box.prop(lt, "circle_flatten")
+ row = box.row(align=True)
+ row.prop(lt, "circle_custom_radius")
+ row_right = row.row(align=True)
+ row_right.active = lt.circle_custom_radius
+ row_right.prop(lt, "circle_radius", text="")
+ box.prop(lt, "circle_regular")
+ box.separator()
+
+ box.prop(lt, "circle_influence")
+
+ # curve - first line
+ split = col.split(percentage=0.15)
+ if lt.display_curve:
+ split.prop(lt, "display_curve", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_curve", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_curve")
+ # curve - settings
+ if lt.display_curve:
+ box = col.column(align=True).box().column()
+ box.prop(lt, "curve_interpolation")
+ box.prop(lt, "curve_restriction")
+ box.prop(lt, "curve_boundaries")
+ box.prop(lt, "curve_regular")
+ box.separator()
+
+ box.prop(lt, "curve_influence")
+
+ # flatten - first line
+ split = col.split(percentage=0.15)
+ if lt.display_flatten:
+ split.prop(lt, "display_flatten", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_flatten", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_flatten")
+ # flatten - settings
+ if lt.display_flatten:
+ box = col.column(align=True).box().column()
+ box.prop(lt, "flatten_plane")
+ #box.prop(lt, "flatten_restriction")
+ box.separator()
+
+ box.prop(lt, "flatten_influence")
+
+ # loft - first line
+ split = col.split(percentage=0.15)
+ if lt.display_loft:
+ split.prop(lt, "display_loft", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_loft", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_bridge", text="Loft").loft = True
+ # loft - settings
+ if lt.display_loft:
+ box = col.column(align=True).box().column()
+ #box.prop(self, "mode")
+
+ # top row
+ col_top = box.column(align=True)
+ row = col_top.row(align=True)
+ col_left = row.column(align=True)
+ col_right = row.column(align=True)
+ col_right.active = lt.bridge_segments != 1
+ col_left.prop(lt, "bridge_segments")
+ col_right.prop(lt, "bridge_min_width", text="")
+ # bottom row
+ bottom_left = col_left.row()
+ bottom_left.active = lt.bridge_segments != 1
+ bottom_left.prop(lt, "bridge_interpolation", text="")
+ bottom_right = col_right.row()
+ bottom_right.active = lt.bridge_interpolation == 'cubic'
+ bottom_right.prop(lt, "bridge_cubic_strength")
+ # boolean properties
+ col_top.prop(lt, "bridge_remove_faces")
+ col_top.prop(lt, "bridge_loft_loop")
+
+ # override properties
+ col_top.separator()
+ row = box.row(align = True)
+ row.prop(lt, "bridge_twist")
+ row.prop(lt, "bridge_reverse")
+
+ # relax - first line
+ split = col.split(percentage=0.15)
+ if lt.display_relax:
+ split.prop(lt, "display_relax", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_relax", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_relax")
+ # relax - settings
+ if lt.display_relax:
+ box = col.column(align=True).box().column()
+ box.prop(lt, "relax_interpolation")
+ box.prop(lt, "relax_input")
+ box.prop(lt, "relax_iterations")
+ box.prop(lt, "relax_regular")
+
+ # space - first line
+ split = col.split(percentage=0.15)
+ if lt.display_space:
+ split.prop(lt, "display_space", text="", icon='DOWNARROW_HLT')
+ else:
+ split.prop(lt, "display_space", text="", icon='RIGHTARROW')
+ split.operator("mesh.looptools_space")
+ # space - settings
+ if lt.display_space:
+ box = col.column(align=True).box().column()
+ box.prop(lt, "space_interpolation")
+ box.prop(lt, "space_input")
+ box.separator()
+
+ box.prop(lt, "space_influence")
+
+
+# property group containing all properties for the gui in the panel
+class LoopToolsProps(bpy.types.PropertyGroup):
+ """
+ Fake module like class
+ bpy.context.window_manager.looptools
+ """
+
+ # general display properties
+ display_bridge = bpy.props.BoolProperty(name = "Bridge settings",
+ description = "Display settings of the Bridge tool",
+ default = False)
+ display_circle = bpy.props.BoolProperty(name = "Circle settings",
+ description = "Display settings of the Circle tool",
+ default = False)
+ display_curve = bpy.props.BoolProperty(name = "Curve settings",
+ description = "Display settings of the Curve tool",
+ default = False)
+ display_flatten = bpy.props.BoolProperty(name = "Flatten settings",
+ description = "Display settings of the Flatten tool",
+ default = False)
+ display_loft = bpy.props.BoolProperty(name = "Loft settings",
+ description = "Display settings of the Loft tool",
+ default = False)
+ display_relax = bpy.props.BoolProperty(name = "Relax settings",
+ description = "Display settings of the Relax tool",
+ default = False)
+ display_space = bpy.props.BoolProperty(name = "Space settings",
+ description = "Display settings of the Space tool",
+ default = False)
+
+ # bridge properties
+ bridge_cubic_strength = bpy.props.FloatProperty(name = "Strength",
+ description = "Higher strength results in more fluid curves",
+ default = 1.0,
+ soft_min = -3.0,
+ soft_max = 3.0)
+ bridge_interpolation = bpy.props.EnumProperty(name = "Interpolation mode",
+ items = (('cubic', "Cubic", "Gives curved results"),
+ ('linear', "Linear", "Basic, fast, straight interpolation")),
+ description = "Interpolation mode: algorithm used when creating "\
+ "segments",
+ default = 'cubic')
+ bridge_loft = bpy.props.BoolProperty(name = "Loft",
+ description = "Loft multiple loops, instead of considering them as "\
+ "a multi-input for bridging",
+ default = False)
+ bridge_loft_loop = bpy.props.BoolProperty(name = "Loop",
+ description = "Connect the first and the last loop with each other",
+ default = False)
+ bridge_min_width = bpy.props.IntProperty(name = "Minimum width",
+ description = "Segments with an edge smaller than this are merged "\
+ "(compared to base edge)",
+ default = 0,
+ min = 0,
+ max = 100,
+ subtype = 'PERCENTAGE')
+ bridge_mode = bpy.props.EnumProperty(name = "Mode",
+ items = (('basic', "Basic", "Fast algorithm"), ('shortest',
+ "Shortest edge", "Slower algorithm with better vertex matching")),
+ description = "Algorithm used for bridging",
+ default = 'shortest')
+ bridge_remove_faces = bpy.props.BoolProperty(name = "Remove faces",
+ description = "Remove faces that are internal after bridging",
+ default = True)
+ bridge_reverse = bpy.props.BoolProperty(name = "Reverse",
+ description = "Manually override the direction in which the loops "\
+ "are bridged. Only use if the tool gives the wrong result.",
+ default = False)
+ bridge_segments = bpy.props.IntProperty(name = "Segments",
+ description = "Number of segments used to bridge the gap "\
+ "(0 = automatic)",
+ default = 1,
+ min = 0,
+ soft_max = 20)
+ bridge_twist = bpy.props.IntProperty(name = "Twist",
+ description = "Twist what vertices are connected to each other",
+ default = 0)
+
+ # circle properties
+ circle_custom_radius = bpy.props.BoolProperty(name = "Radius",
+ description = "Force a custom radius",
+ default = False)
+ circle_fit = bpy.props.EnumProperty(name = "Method",
+ items = (("best", "Best fit", "Non-linear least squares"),
+ ("inside", "Fit inside","Only move vertices towards the center")),
+ description = "Method used for fitting a circle to the vertices",
+ default = 'best')
+ circle_flatten = bpy.props.BoolProperty(name = "Flatten",
+ description = "Flatten the circle, instead of projecting it on the " \
+ "mesh",
+ default = True)
+ circle_influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ circle_radius = bpy.props.FloatProperty(name = "Radius",
+ description = "Custom radius for circle",
+ default = 1.0,
+ min = 0.0,
+ soft_max = 1000.0)
+ circle_regular = bpy.props.BoolProperty(name = "Regular",
+ description = "Distribute vertices at constant distances along the " \
+ "circle",
+ default = True)
+
+ # curve properties
+ curve_boundaries = bpy.props.BoolProperty(name = "Boundaries",
+ description = "Limit the tool to work within the boundaries of the "\
+ "selected vertices",
+ default = False)
+ curve_influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ curve_interpolation = bpy.props.EnumProperty(name = "Interpolation",
+ items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
+ ("linear", "Linear", "Simple and fast linear algorithm")),
+ description = "Algorithm used for interpolation",
+ default = 'cubic')
+ curve_regular = bpy.props.BoolProperty(name = "Regular",
+ description = "Distribute vertices at constant distances along the" \
+ "curve",
+ default = True)
+ curve_restriction = bpy.props.EnumProperty(name = "Restriction",
+ items = (("none", "None", "No restrictions on vertex movement"),
+ ("extrude", "Extrude only","Only allow extrusions (no "\
+ "indentations)"),
+ ("indent", "Indent only", "Only allow indentation (no "\
+ "extrusions)")),
+ description = "Restrictions on how the vertices can be moved",
+ default = 'none')
+
+ # flatten properties
+ flatten_influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ flatten_plane = bpy.props.EnumProperty(name = "Plane",
+ items = (("best_fit", "Best fit", "Calculate a best fitting plane"),
+ ("normal", "Normal", "Derive plane from averaging vertex "\
+ "normals"),
+ ("view", "View", "Flatten on a plane perpendicular to the "\
+ "viewing angle")),
+ description = "Plane on which vertices are flattened",
+ default = 'best_fit')
+ flatten_restriction = bpy.props.EnumProperty(name = "Restriction",
+ items = (("none", "None", "No restrictions on vertex movement"),
+ ("bounding_box", "Bounding box", "Vertices are restricted to "\
+ "movement inside the bounding box of the selection")),
+ description = "Restrictions on how the vertices can be moved",
+ default = 'none')
+
+ # relax properties
+ relax_input = bpy.props.EnumProperty(name = "Input",
+ items = (("all", "Parallel (all)", "Also use non-selected "\
+ "parallel loops as input"),
+ ("selected", "Selection","Only use selected vertices as input")),
+ description = "Loops that are relaxed",
+ default = 'selected')
+ relax_interpolation = bpy.props.EnumProperty(name = "Interpolation",
+ items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
+ ("linear", "Linear", "Simple and fast linear algorithm")),
+ description = "Algorithm used for interpolation",
+ default = 'cubic')
+ relax_iterations = bpy.props.EnumProperty(name = "Iterations",
+ items = (("1", "1", "One"),
+ ("3", "3", "Three"),
+ ("5", "5", "Five"),
+ ("10", "10", "Ten"),
+ ("25", "25", "Twenty-five")),
+ description = "Number of times the loop is relaxed",
+ default = "1")
+ relax_regular = bpy.props.BoolProperty(name = "Regular",
+ description = "Distribute vertices at constant distances along the" \
+ "loop",
+ default = True)
+
+ # space properties
+ space_influence = bpy.props.FloatProperty(name = "Influence",
+ description = "Force of the tool",
+ default = 100.0,
+ min = 0.0,
+ max = 100.0,
+ precision = 1,
+ subtype = 'PERCENTAGE')
+ space_input = bpy.props.EnumProperty(name = "Input",
+ items = (("all", "Parallel (all)", "Also use non-selected "\
+ "parallel loops as input"),
+ ("selected", "Selection","Only use selected vertices as input")),
+ description = "Loops that are spaced",
+ default = 'selected')
+ space_interpolation = bpy.props.EnumProperty(name = "Interpolation",
+ items = (("cubic", "Cubic", "Natural cubic spline, smooth results"),
+ ("linear", "Linear", "Vertices are projected on existing edges")),
+ description = "Algorithm used for interpolation",
+ default = 'cubic')
+
+
+# draw function for integration in menus
+def menu_func(self, context):
+ self.layout.menu("VIEW3D_MT_edit_mesh_looptools")
+ self.layout.separator()
+
+
+# define classes for registration
+classes = [VIEW3D_MT_edit_mesh_looptools,
+ VIEW3D_PT_tools_looptools,
+ LoopToolsProps,
+ Bridge,
+ Circle,
+ Curve,
+ Flatten,
+ Relax,
+ Space]
+
+
+# registering and menu integration
+def register():
+ for c in classes:
+ bpy.utils.register_class(c)
+ bpy.types.VIEW3D_MT_edit_mesh_specials.prepend(menu_func)
+ bpy.types.WindowManager.looptools = bpy.props.PointerProperty(\
+ type = LoopToolsProps)
+
+
+# unregistering and removing menus
+def unregister():
+ for c in classes:
+ bpy.utils.unregister_class(c)
+ bpy.types.VIEW3D_MT_edit_mesh_specials.remove(menu_func)
+ try:
+ del bpy.types.WindowManager.looptools
+ except:
+ pass
+
+
+if __name__ == "__main__":
+ register()
diff --git a/mesh_relax.py b/mesh_relax.py
new file mode 100644
index 00000000..ce363b92
--- /dev/null
+++ b/mesh_relax.py
@@ -0,0 +1,132 @@
+# mesh_relax.py Copyright (C) 2010, Fabian Fricke
+#
+# Relaxes selected vertices while retaining the shape as much as possible
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+bl_info = {
+ "name": "Relax",
+ "author": "Fabian Fricke",
+ "version": (1,1),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "View3D > Specials > Relax ",
+ "description": "Relax the selected verts while retaining the shape",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Modeling/Relax",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21421",
+ "category": "Mesh"}
+
+"""
+Usage:
+
+Launch from "W-menu" or from "Mesh -> Vertices -> Relax"
+
+
+Additional links:
+ Author Site: http://frigi.designdevil.de
+ e-mail: frigi.f {at} gmail {dot} com
+"""
+
+
+import bpy
+from bpy.props import IntProperty
+
+def relax_mesh(context):
+
+ # deselect everything that's not related
+ for obj in context.selected_objects:
+ obj.select = False
+
+ # get active object
+ obj = context.active_object
+
+ # duplicate the object so it can be used for the shrinkwrap modifier
+ obj.select = True # make sure the object is selected!
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.duplicate()
+ target = context.active_object
+
+ # remove all other modifiers from the target
+ for m in range(0, len(target.modifiers)):
+ target.modifiers.remove(target.modifiers[0])
+
+ context.scene.objects.active = obj
+
+ sw = obj.modifiers.new(type='SHRINKWRAP', name='relax_target')
+ sw.target = target
+
+ # run smooth operator to relax the mesh
+ bpy.ops.object.mode_set(mode='EDIT')
+ bpy.ops.mesh.vertices_smooth()
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # apply the modifier
+ bpy.ops.object.modifier_apply(modifier='relax_target')
+
+ # delete the target object
+ obj.select = False
+ target.select = True
+ bpy.ops.object.delete()
+
+ # go back to initial state
+ obj.select = True
+ bpy.ops.object.mode_set(mode='EDIT')
+
+class Relax(bpy.types.Operator):
+ '''Relaxes selected vertices while retaining the shape as much as possible'''
+ bl_idname = 'mesh.relax'
+ bl_label = 'Relax'
+ bl_options = {'REGISTER', 'UNDO'}
+
+ iterations = IntProperty(name="Relax iterations",
+ default=1, min=0, max=100, soft_min=0, soft_max=10)
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ return (obj and obj.type == 'MESH')
+
+ def execute(self, context):
+ for i in range(0,self.iterations):
+ relax_mesh(context)
+ return {'FINISHED'}
+
+
+def menu_func(self, context):
+ self.layout.operator(Relax.bl_idname, text="Relax")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.VIEW3D_MT_edit_mesh_specials.append(menu_func)
+ bpy.types.VIEW3D_MT_edit_mesh_vertices.append(menu_func)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.VIEW3D_MT_edit_mesh_specials.remove(menu_func)
+ bpy.types.VIEW3D_MT_edit_mesh_vertices.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/modules/add_utils.py b/modules/add_utils.py
new file mode 100644
index 00000000..27fc9d6d
--- /dev/null
+++ b/modules/add_utils.py
@@ -0,0 +1,141 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+########################################################
+#
+# Before changing this file please discuss with admins.
+#
+########################################################
+# <pep8 compliant>
+
+import bpy
+import mathutils
+from bpy.props import FloatVectorProperty
+
+
+class AddObjectHelper:
+ '''Helper Class for Add Object Operators'''
+ location = FloatVectorProperty(name='Location', description='Location of new Object')
+ rotation = FloatVectorProperty(name='Rotation', description='Rotation of new Object')
+
+
+#Initialize loc, rot of operator
+def add_object_align_init(context, operator):
+ '''Initialize loc, rot of operator
+ context: Blender Context
+ operator: the active Operator (self)
+ Initializes the Operators location and rotation variables
+ according to user preferences (align to view)
+ See AddObjectHelper class
+ Returns Matrix
+ '''
+ if (operator
+ and operator.properties.is_property_set("location")
+ and operator.properties.is_property_set("rotation")):
+ location = mathutils.Matrix.Translation(mathutils.Vector(operator.properties.location))
+ rotation = mathutils.Euler(operator.properties.rotation).to_matrix().to_4x4()
+ else:
+ # TODO, local view cursor!
+ location = mathutils.Matrix.Translation(context.scene.cursor_location)
+
+ if context.user_preferences.edit.object_align == 'VIEW' and context.space_data.type == 'VIEW_3D':
+ rotation = context.space_data.region_3d.view_matrix.to_3x3().inverted().to_4x4()
+ else:
+ rotation = mathutils.Matrix()
+
+ # set the operator properties
+ if operator:
+ operator.properties.location = location.to_translation()
+ operator.properties.rotation = rotation.to_euler()
+
+ return location * rotation
+
+
+def add_object_data(context, obdata, operator=None):
+ '''Create Object from data
+
+ context: Blender Context
+ obdata: Object data (mesh, curve, camera,...)
+ operator: the active operator (self)
+
+ Returns the Object
+ '''
+
+ scene = context.scene
+
+ # ugh, could be made nicer
+ for ob in scene.objects:
+ ob.select = False
+
+ obj_new = bpy.data.objects.new(obdata.name, obdata)
+ obj_new.update_tag()
+
+ base = scene.objects.link(obj_new)
+ base.select = True
+
+ if context.space_data and context.space_data.type == 'VIEW_3D':
+ base.layers_from_view(context.space_data)
+
+ obj_new.matrix_world = add_object_align_init(context, operator)
+
+ obj_act = scene.objects.active
+
+ if obj_act and obj_act.mode == 'EDIT' and obj_act.type == obj_new.type:
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ obj_act.select = True
+ scene.update() # apply location
+ #scene.objects.active = obj_new
+
+ bpy.ops.object.join() # join into the active.
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ else:
+ scene.objects.active = obj_new
+ if context.user_preferences.edit.use_enter_edit_mode:
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ return base
+
+
+def flatten_vector_list(list):
+ '''flatten a list of vetcors to use in foreach_set and the like'''
+ if not list:
+ return None
+
+ result = []
+ for vec in list:
+ result.extend([i for i in vec])
+
+ return result
+
+
+def list_to_vector_list(list, dimension=3):
+ '''make Vector objects out of a list'''
+ #test if list contains right number of elements
+
+ result = []
+ for i in range(0, len(list), dimension):
+ try:
+ vec = mathutils.Vector([list[i + ind] for ind in range(dimension)])
+ except:
+ print('Number of elemnts doesnt match into the vectors.')
+ return None
+
+ result.append(vec)
+
+ return result
diff --git a/modules/constants_utils.py b/modules/constants_utils.py
new file mode 100644
index 00000000..6840ae09
--- /dev/null
+++ b/modules/constants_utils.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+'''
+ constants_utils.py
+
+ Useful constants...
+
+
+
+'''
+
+
+
+# Golden mean
+PHI_INV = 0.61803398874989484820
+PHI = 1.61803398874989484820
+PHI_SQR = 2.61803398874989484820
diff --git a/modules/cursor_utils.py b/modules/cursor_utils.py
new file mode 100644
index 00000000..96262145
--- /dev/null
+++ b/modules/cursor_utils.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+'''
+ cursor_utils.py
+
+ Helper methods for accessing the 3D cursor
+
+
+
+'''
+
+
+import bpy
+
+
+class CursorAccess:
+
+ @classmethod
+ def findSpace(cls):
+ area = None
+ for area in bpy.data.window_managers[0].windows[0].screen.areas:
+ if area.type == 'VIEW_3D':
+ break
+ if area.type != 'VIEW_3D':
+ return None
+ for space in area.spaces:
+ if space.type == 'VIEW_3D':
+ break
+ if space.type != 'VIEW_3D':
+ return None
+ return space
+
+ @classmethod
+ def setCursor(cls,coordinates):
+ spc = cls.findSpace()
+ spc.cursor_location = coordinates
+
+ @classmethod
+ def getCursor(cls):
+ spc = cls.findSpace()
+ return spc.cursor_location
+
+
+
diff --git a/modules/extensions_framework/__init__.py b/modules/extensions_framework/__init__.py
new file mode 100644
index 00000000..df815eb3
--- /dev/null
+++ b/modules/extensions_framework/__init__.py
@@ -0,0 +1,371 @@
+# -*- coding: utf8 -*-
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# --------------------------------------------------------------------------
+# Blender 2.5 Extensions Framework
+# --------------------------------------------------------------------------
+#
+# Authors:
+# Doug Hammond
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+import time
+
+import bpy
+
+from extensions_framework.ui import EF_OT_msg
+bpy.utils.register_class(EF_OT_msg)
+del EF_OT_msg
+
+def log(str, popup=False, module_name='EF'):
+ """Print a message to the console, prefixed with the module_name
+ and the current time. If the popup flag is True, the message will
+ be raised in the UI as a warning using the operator bpy.ops.ef.msg.
+
+ """
+ print("[%s %s] %s" %
+ (module_name, time.strftime('%Y-%b-%d %H:%M:%S'), str))
+ if popup:
+ bpy.ops.ef.msg(
+ msg_type='WARNING',
+ msg_text=str
+ )
+
+
+added_property_cache = {}
+
+def init_properties(obj, props, cache=True):
+ """Initialise custom properties in the given object or type.
+ The props list is described in the declarative_property_group
+ class definition. If the cache flag is False, this function
+ will attempt to redefine properties even if they have already been
+ added.
+
+ """
+
+ if not obj in added_property_cache.keys():
+ added_property_cache[obj] = []
+
+ for prop in props:
+ try:
+ if cache and prop['attr'] in added_property_cache[obj]:
+ continue
+
+ if prop['type'] == 'bool':
+ t = bpy.props.BoolProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","options","subtype","update"]}
+ elif prop['type'] == 'bool_vector':
+ t = bpy.props.BoolVectorProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","options","subtype","size",
+ "update"]}
+ elif prop['type'] == 'collection':
+ t = bpy.props.CollectionProperty
+ a = {k: v for k,v in prop.items() if k in ["ptype","name",
+ "description","default","options"]}
+ a['type'] = a['ptype']
+ del a['ptype']
+ elif prop['type'] == 'enum':
+ t = bpy.props.EnumProperty
+ a = {k: v for k,v in prop.items() if k in ["items","name",
+ "description","default","options","update"]}
+ elif prop['type'] == 'float':
+ t = bpy.props.FloatProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","min","max","soft_min","soft_max",
+ "step","precision","options","subtype","unit","update"]}
+ elif prop['type'] == 'float_vector':
+ t = bpy.props.FloatVectorProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","min","max","soft_min","soft_max",
+ "step","precision","options","subtype","size","update"]}
+ elif prop['type'] == 'int':
+ t = bpy.props.IntProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","min","max","soft_min","soft_max",
+ "step","options","subtype","update"]}
+ elif prop['type'] == 'int_vector':
+ t = bpy.props.IntVectorProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","min","max","soft_min","soft_max",
+ "options","subtype","size","update"]}
+ elif prop['type'] == 'pointer':
+ t = bpy.props.PointerProperty
+ a = {k: v for k,v in prop.items() if k in ["ptype", "name",
+ "description","options","update"]}
+ a['type'] = a['ptype']
+ del a['ptype']
+ elif prop['type'] == 'string':
+ t = bpy.props.StringProperty
+ a = {k: v for k,v in prop.items() if k in ["name",
+ "description","default","maxlen","options","subtype",
+ "update"]}
+ else:
+ continue
+
+ setattr(obj, prop['attr'], t(**a))
+
+ added_property_cache[obj].append(prop['attr'])
+ except KeyError:
+ # Silently skip invalid entries in props
+ continue
+
+class declarative_property_group(bpy.types.PropertyGroup):
+ """A declarative_property_group describes a set of logically
+ related properties, using a declarative style to list each
+ property type, name, values, and other relevant information.
+ The information provided for each property depends on the
+ property's type.
+
+ The properties list attribute in this class describes the
+ properties present in this group.
+
+ Some additional information about the properties in this group
+ can be specified, so that a UI can be generated to display them.
+ To that end, the controls list attribute and the visibility dict
+ attribute are present here, to be read and interpreted by a
+ property_group_renderer object.
+ See extensions_framework.ui.property_group_renderer.
+
+ """
+
+ ef_initialised = False
+
+ """This property tells extensions_framework which bpy.type(s)
+ to attach this PropertyGroup to. If left as an empty list,
+ it will not be attached to any type, but its properties will
+ still be initialised. The type(s) given in the list should be
+ a string, such as 'Scene'.
+
+ """
+ ef_attach_to = []
+
+ @classmethod
+ def initialise_properties(cls):
+ """This is a function that should be called on
+ sub-classes of declarative_property_group in order
+ to ensure that they are initialised when the addon
+ is loaded.
+ the init_properties is called without caching here,
+ as it is assumed that any addon calling this function
+ will also call ef_remove_properties when it is
+ unregistered.
+
+ """
+
+ if not cls.ef_initialised:
+ for property_group_parent in cls.ef_attach_to:
+ if property_group_parent is not None:
+ prototype = getattr(bpy.types, property_group_parent)
+ if not hasattr(prototype, cls.__name__):
+ init_properties(prototype, [{
+ 'type': 'pointer',
+ 'attr': cls.__name__,
+ 'ptype': cls,
+ 'name': cls.__name__,
+ 'description': cls.__name__
+ }], cache=False)
+
+ init_properties(cls, cls.properties, cache=False)
+ cls.ef_initialised = True
+
+ return cls
+
+ @classmethod
+ def register_initialise_properties(cls):
+ """As ef_initialise_properties, but also registers the
+ class with RNA. Note that this isn't a great idea
+ because it's non-trivial to unregister the class, unless
+ you keep track of it yourself.
+ """
+
+ bpy.utils.register_class(cls)
+ cls.initialise_properties()
+ return cls
+
+ @classmethod
+ def remove_properties(cls):
+ """This is a function that should be called on
+ sub-classes of declarative_property_group in order
+ to ensure that they are un-initialised when the addon
+ is unloaded.
+
+ """
+
+ if cls.ef_initialised:
+ prototype = getattr(bpy.types, cls.__name__)
+ for prop in cls.properties:
+ if hasattr(prototype, prop['attr']):
+ delattr(prototype, prop['attr'])
+
+ for property_group_parent in cls.ef_attach_to:
+ if property_group_parent is not None:
+ prototype = getattr(bpy.types, property_group_parent)
+ if hasattr(prototype, cls.__name__):
+ delattr(prototype, cls.__name__)
+
+ cls.ef_initialised = False
+
+ return cls
+
+
+ """This list controls the order of property layout when rendered
+ by a property_group_renderer. This can be a nested list, where each
+ list becomes a row in the panel layout. Nesting may be to any depth.
+
+ """
+ controls = []
+
+ """The visibility dict controls the visibility of properties based on
+ the value of other properties. See extensions_framework.validate
+ for test syntax.
+
+ """
+ visibility = {}
+
+ """The enabled dict controls the enabled state of properties based on
+ the value of other properties. See extensions_framework.validate
+ for test syntax.
+
+ """
+ enabled = {}
+
+ """The alert dict controls the alert state of properties based on
+ the value of other properties. See extensions_framework.validate
+ for test syntax.
+
+ """
+ alert = {}
+
+ """The properties list describes each property to be created. Each
+ item should be a dict of args to pass to a
+ bpy.props.<?>Property function, with the exception of 'type'
+ which is used and stripped by extensions_framework in order to
+ determine which Property creation function to call.
+
+ Example item:
+ {
+ 'type': 'int', # bpy.props.IntProperty
+ 'attr': 'threads', # bpy.types.<type>.threads
+ 'name': 'Render Threads', # Rendered next to the UI
+ 'description': 'Number of threads to use', # Tooltip text in the UI
+ 'default': 1,
+ 'min': 1,
+ 'soft_min': 1,
+ 'max': 64,
+ 'soft_max': 64
+ }
+
+ """
+ properties = []
+
+ def draw_callback(self, context):
+ """Sub-classes can override this to get a callback when
+ rendering is completed by a property_group_renderer sub-class.
+
+ """
+
+ pass
+
+ @classmethod
+ def get_exportable_properties(cls):
+ """Return a list of properties which have the 'save_in_preset' key
+ set to True, and hence should be saved into preset files.
+
+ """
+
+ out = []
+ for prop in cls.properties:
+ if 'save_in_preset' in prop.keys() and prop['save_in_preset']:
+ out.append(prop)
+ return out
+
+ def reset(self):
+ """Reset all properties in this group to the default value,
+ if specified"""
+ for prop in self.properties:
+ pk = prop.keys()
+ if 'attr' in pk and 'default' in pk and hasattr(self, prop['attr']):
+ setattr(self, prop['attr'], prop['default'])
+
+class Addon(object):
+ """A list of classes registered by this addon"""
+ static_addon_count = 0
+
+ addon_serial = 0
+ addon_classes = None
+ bl_info = None
+
+ BL_VERSION = None
+ BL_IDNAME = None
+
+ def __init__(self, bl_info=None):
+ self.addon_classes = []
+ self.bl_info = bl_info
+
+ # Keep a count in case we have to give this addon an anonymous name
+ self.addon_serial = Addon.static_addon_count
+ Addon.static_addon_count += 1
+
+ if self.bl_info:
+ self.BL_VERSION = '.'.join(['%s'%v for v in self.bl_info['version']]).lower()
+ self.BL_IDNAME = self.bl_info['name'].lower() + '-' + self.BL_VERSION
+ else:
+ # construct anonymous name
+ self.BL_VERSION = '0'
+ self.BL_IDNAME = 'Addon-%03d'%self.addon_serial
+
+ def addon_register_class(self, cls):
+ """This method is designed to be used as a decorator on RNA-registerable
+ classes defined by the addon. By using this decorator, this class will
+ keep track of classes registered by this addon so that they can be
+ unregistered later in the correct order.
+
+ """
+ self.addon_classes.append(cls)
+ return cls
+
+ def register(self):
+ """This is the register function that should be exposed in the addon's
+ __init__.
+
+ """
+ for cls in self.addon_classes:
+ bpy.utils.register_class(cls)
+ if hasattr(cls, 'ef_attach_to'): cls.initialise_properties()
+
+ def unregister(self):
+ """This is the unregister function that should be exposed in the addon's
+ __init__.
+
+ """
+ for cls in self.addon_classes[::-1]: # unregister in reverse order
+ if hasattr(cls, 'ef_attach_to'): cls.remove_properties()
+ bpy.utils.unregister_class(cls)
+
+ def init_functions(self):
+ """Returns references to the three functions that this addon needs
+ for successful class registration management. In the addon's __init__
+ you would use like this:
+
+ addon_register_class, register, unregister = Addon().init_functions()
+
+ """
+
+ return self.register, self.unregister
diff --git a/modules/extensions_framework/ui.py b/modules/extensions_framework/ui.py
new file mode 100644
index 00000000..4f24d873
--- /dev/null
+++ b/modules/extensions_framework/ui.py
@@ -0,0 +1,337 @@
+# -*- coding: utf8 -*-
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# --------------------------------------------------------------------------
+# Blender 2.5 Extensions Framework
+# --------------------------------------------------------------------------
+#
+# Authors:
+# Doug Hammond
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+import bpy
+
+from extensions_framework.validate import Logician
+
+class EF_OT_msg(bpy.types.Operator):
+ """An operator to show simple messages in the UI"""
+ bl_idname = 'ef.msg'
+ bl_label = 'Show UI Message'
+ msg_type = bpy.props.StringProperty(default='INFO')
+ msg_text = bpy.props.StringProperty(default='')
+ def execute(self, context):
+ self.report({self.properties.msg_type}, self.properties.msg_text)
+ return {'FINISHED'}
+
+def _get_item_from_context(context, path):
+ """Utility to get an object when the path to it is known:
+ _get_item_from_context(context, ['a','b','c']) returns
+ context.a.b.c
+ No error checking is performed other than checking that context
+ is not None. Exceptions caused by invalid path should be caught in
+ the calling code.
+
+ """
+
+ if context is not None:
+ for p in path:
+ context = getattr(context, p)
+ return context
+
+class property_group_renderer(bpy.types.Panel):
+ """Mix-in class for sub-classes of bpy.types.Panel. This class
+ will provide the draw() method which implements drawing one or
+ more property groups derived from
+ extensions_framework.declarative_propery_group.
+ The display_property_groups list attribute describes which
+ declarative_property_groups should be drawn in the Panel, and
+ how to extract those groups from the context passed to draw().
+
+ """
+
+ """The display_property_groups list attribute specifies which
+ custom declarative_property_groups this panel should draw, and
+ where to find that property group in the active context.
+ Example item:
+ ( ('scene',), 'myaddon_property_group')
+ In this case, this renderer will look for properties in
+ context.scene.myaddon_property_group to draw in the Panel.
+
+ """
+ display_property_groups = []
+
+ def draw(self, context):
+ """Sub-classes should override this if they need to display
+ other (object-related) property groups. super().draw(context)
+ can be a useful call in those cases.
+
+ """
+ for property_group_path, property_group_name in \
+ self.display_property_groups:
+ ctx = _get_item_from_context(context, property_group_path)
+ property_group = getattr(ctx, property_group_name)
+ for p in property_group.controls:
+ self.draw_column(p, self.layout, ctx, context,
+ property_group=property_group)
+ property_group.draw_callback(context)
+
+ def check_visibility(self, lookup_property, property_group):
+ """Determine if the lookup_property should be drawn in the Panel"""
+ vt = Logician(property_group)
+ if lookup_property in property_group.visibility.keys():
+ if hasattr(property_group, lookup_property):
+ member = getattr(property_group, lookup_property)
+ else:
+ member = None
+ return vt.test_logic(member,
+ property_group.visibility[lookup_property])
+ else:
+ return True
+
+ def check_enabled(self, lookup_property, property_group):
+ """Determine if the lookup_property should be enabled in the Panel"""
+ et = Logician(property_group)
+ if lookup_property in property_group.enabled.keys():
+ if hasattr(property_group, lookup_property):
+ member = getattr(property_group, lookup_property)
+ else:
+ member = None
+ return et.test_logic(member,
+ property_group.enabled[lookup_property])
+ else:
+ return True
+
+ def check_alert(self, lookup_property, property_group):
+ """Determine if the lookup_property should be in an alert state in the Panel"""
+ et = Logician(property_group)
+ if lookup_property in property_group.alert.keys():
+ if hasattr(property_group, lookup_property):
+ member = getattr(property_group, lookup_property)
+ else:
+ member = None
+ return et.test_logic(member,
+ property_group.alert[lookup_property])
+ else:
+ return False
+
+ def is_real_property(self, lookup_property, property_group):
+ for prop in property_group.properties:
+ if prop['attr'] == lookup_property:
+ return prop['type'] not in ['text', 'prop_search']
+
+ return False
+
+ def draw_column(self, control_list_item, layout, context,
+ supercontext=None, property_group=None):
+ """Draw a column's worth of UI controls in this Panel"""
+ if type(control_list_item) is list:
+ draw_row = False
+
+ found_percent = None
+ for sp in control_list_item:
+ if type(sp) is float:
+ found_percent = sp
+ elif type(sp) is list:
+ for ssp in [s for s in sp if self.is_real_property(s, property_group)]:
+ draw_row = draw_row or self.check_visibility(ssp,
+ property_group)
+ else:
+ draw_row = draw_row or self.check_visibility(sp,
+ property_group)
+
+ next_items = [s for s in control_list_item if type(s) in [str, list]]
+ if draw_row and len(next_items) > 0:
+ if found_percent is not None:
+ splt = layout.split(percentage=found_percent)
+ else:
+ splt = layout.row(True)
+ for sp in next_items:
+ col2 = splt.column()
+ self.draw_column(sp, col2, context, supercontext,
+ property_group)
+ else:
+ if self.check_visibility(control_list_item, property_group):
+
+ for current_property in property_group.properties:
+ if current_property['attr'] == control_list_item:
+ current_property_keys = current_property.keys()
+
+ sub_layout_created = False
+ if not self.check_enabled(control_list_item, property_group):
+ last_layout = layout
+ sub_layout_created = True
+
+ layout = layout.row()
+ layout.enabled = False
+
+ if self.check_alert(control_list_item, property_group):
+ if not sub_layout_created:
+ last_layout = layout
+ sub_layout_created = True
+ layout = layout.row()
+ layout.alert = True
+
+ if 'type' in current_property_keys:
+ if current_property['type'] in ['int', 'float',
+ 'float_vector', 'string']:
+ layout.prop(
+ property_group,
+ control_list_item,
+ text = current_property['name'],
+ expand = current_property['expand'] \
+ if 'expand' in current_property_keys \
+ else False,
+ slider = current_property['slider'] \
+ if 'slider' in current_property_keys \
+ else False,
+ toggle = current_property['toggle'] \
+ if 'toggle' in current_property_keys \
+ else False,
+ icon_only = current_property['icon_only'] \
+ if 'icon_only' in current_property_keys \
+ else False,
+ event = current_property['event'] \
+ if 'event' in current_property_keys \
+ else False,
+ full_event = current_property['full_event'] \
+ if 'full_event' in current_property_keys \
+ else False,
+ emboss = current_property['emboss'] \
+ if 'emboss' in current_property_keys \
+ else True,
+ )
+ if current_property['type'] in ['enum']:
+ if 'use_menu' in current_property_keys and \
+ current_property['use_menu']:
+ layout.prop_menu_enum(
+ property_group,
+ control_list_item,
+ text = current_property['name']
+ )
+ else:
+ layout.prop(
+ property_group,
+ control_list_item,
+ text = current_property['name'],
+ expand = current_property['expand'] \
+ if 'expand' in current_property_keys \
+ else False,
+ slider = current_property['slider'] \
+ if 'slider' in current_property_keys \
+ else False,
+ toggle = current_property['toggle'] \
+ if 'toggle' in current_property_keys \
+ else False,
+ icon_only = current_property['icon_only'] \
+ if 'icon_only' in current_property_keys \
+ else False,
+ event = current_property['event'] \
+ if 'event' in current_property_keys \
+ else False,
+ full_event = current_property['full_event'] \
+ if 'full_event' in current_property_keys \
+ else False,
+ emboss = current_property['emboss'] \
+ if 'emboss' in current_property_keys \
+ else True,
+ )
+ if current_property['type'] in ['bool']:
+ layout.prop(
+ property_group,
+ control_list_item,
+ text = current_property['name'],
+ toggle = current_property['toggle'] \
+ if 'toggle' in current_property_keys \
+ else False,
+ icon_only = current_property['icon_only'] \
+ if 'icon_only' in current_property_keys \
+ else False,
+ event = current_property['event'] \
+ if 'event' in current_property_keys \
+ else False,
+ full_event = current_property['full_event'] \
+ if 'full_event' in current_property_keys \
+ else False,
+ emboss = current_property['emboss'] \
+ if 'emboss' in current_property_keys \
+ else True,
+ )
+ elif current_property['type'] in ['operator']:
+ args = {}
+ for optional_arg in ('text', 'icon'):
+ if optional_arg in current_property_keys:
+ args.update({
+ optional_arg: current_property[optional_arg],
+ })
+ layout.operator( current_property['operator'], **args )
+
+ elif current_property['type'] in ['menu']:
+ args = {}
+ for optional_arg in ('text', 'icon'):
+ if optional_arg in current_property_keys:
+ args.update({
+ optional_arg: current_property[optional_arg],
+ })
+ layout.menu(current_property['menu'], **args)
+
+ elif current_property['type'] in ['text']:
+ layout.label(
+ text = current_property['name']
+ )
+
+ elif current_property['type'] in ['template_list']:
+ layout.template_list(
+ current_property['src'](supercontext, context),
+ current_property['src_attr'],
+ current_property['trg'](supercontext, context),
+ current_property['trg_attr'],
+ rows = 4 \
+ if not 'rows' in current_property_keys \
+ else current_property['rows'],
+ maxrows = 4 \
+ if not 'rows' in current_property_keys \
+ else current_property['rows'],
+ type = 'DEFAULT' \
+ if not 'list_type' in current_property_keys \
+ else current_property['list_type']
+ )
+
+ elif current_property['type'] in ['prop_search']:
+ layout.prop_search(
+ current_property['trg'](supercontext,
+ context),
+ current_property['trg_attr'],
+ current_property['src'](supercontext,
+ context),
+ current_property['src_attr'],
+ text = current_property['name'],
+ )
+
+ elif current_property['type'] in ['ef_callback']:
+ getattr(self, current_property['method'])(supercontext)
+ else:
+ layout.prop(property_group, control_list_item)
+
+ if sub_layout_created:
+ layout = last_layout
+
+ # Fire a draw callback if specified
+ if 'draw' in current_property_keys:
+ current_property['draw'](supercontext, context)
+
+ break
diff --git a/modules/extensions_framework/util.py b/modules/extensions_framework/util.py
new file mode 100644
index 00000000..124ab5e7
--- /dev/null
+++ b/modules/extensions_framework/util.py
@@ -0,0 +1,232 @@
+# -*- coding: utf8 -*-
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# --------------------------------------------------------------------------
+# Blender 2.5 Extensions Framework
+# --------------------------------------------------------------------------
+#
+# Authors:
+# Doug Hammond
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+import configparser
+import datetime
+import os
+import tempfile
+import threading
+
+import bpy
+
+"""List of possibly appropriate paths to load/save addon config from/to"""
+config_paths = []
+if bpy.utils.user_resource('CONFIG', '') != "": config_paths.append(bpy.utils.user_resource('CONFIG', '', create=True))
+if bpy.utils.user_resource('SCRIPTS', '') != "": config_paths.append(bpy.utils.user_resource('SCRIPTS', '', create=True))
+# want to scan other script paths in reverse order, since the user path comes last
+sp = [p for p in bpy.utils.script_paths() if p != '']
+sp.reverse()
+config_paths.extend(sp)
+
+"""This path is set at the start of export, so that calls to
+path_relative_to_export() can make all exported paths relative to
+this one.
+"""
+export_path = '';
+
+def path_relative_to_export(p):
+ """Return a path that is relative to the export path"""
+ global export_path
+ p = filesystem_path(p)
+ ep = os.path.dirname(export_path)
+
+ if os.sys.platform[:3] == "win":
+ # Prevent an error whereby python thinks C: and c: are different drives
+ if p[1] == ':': p = p[0].lower() + p[1:]
+ if ep[1] == ':': ep = ep[0].lower() + ep[1:]
+
+ try:
+ relp = os.path.relpath(p, ep)
+ except ValueError: # path on different drive on windows
+ relp = p
+
+ return relp.replace('\\', '/')
+
+def filesystem_path(p):
+ """Resolve a relative Blender path to a real filesystem path"""
+ if p.startswith('//'):
+ pout = bpy.path.abspath(p)
+ else:
+ pout = os.path.realpath(p)
+
+ return pout.replace('\\', '/')
+
+# TODO: - somehow specify TYPES to get/set from config
+
+def find_config_value(module, section, key, default):
+ """Attempt to find the configuration value specified by string key
+ in the specified section of module's configuration file. If it is
+ not found, return default.
+
+ """
+ global config_paths
+ fc = []
+ for p in config_paths:
+ if os.path.exists(p) and os.path.isdir(p) and os.access(p, os.W_OK):
+ fc.append( '/'.join([p, '%s.cfg' % module]))
+
+ if len(fc) < 1:
+ print('Cannot find %s config file path' % module)
+ return default
+
+ cp = configparser.SafeConfigParser()
+
+ cfg_files = cp.read(fc)
+ if len(cfg_files) > 0:
+ try:
+ val = cp.get(section, key)
+ if val == 'true':
+ return True
+ elif val == 'false':
+ return False
+ else:
+ return val
+ except:
+ return default
+ else:
+ return default
+
+def write_config_value(module, section, key, value):
+ """Attempt to write the configuration value specified by string key
+ in the specified section of module's configuration file.
+
+ """
+ global config_paths
+ fc = []
+ for p in config_paths:
+ if os.path.exists(p) and os.path.isdir(p) and os.access(p, os.W_OK):
+ fc.append( '/'.join([p, '%s.cfg' % module]))
+
+ if len(fc) < 1:
+ raise Exception('Cannot find a writable path to store %s config file' %
+ module)
+
+ cp = configparser.SafeConfigParser()
+
+ cfg_files = cp.read(fc)
+
+ if not cp.has_section(section):
+ cp.add_section(section)
+
+ if value == True:
+ cp.set(section, key, 'true')
+ elif value == False:
+ cp.set(section, key, 'false')
+ else:
+ cp.set(section, key, value)
+
+ if len(cfg_files) < 1:
+ cfg_files = fc
+
+ fh=open(cfg_files[0],'w')
+ cp.write(fh)
+ fh.close()
+
+ return True
+
+def scene_filename():
+ """Construct a safe scene filename, using 'untitled' instead of ''"""
+ filename = os.path.splitext(os.path.basename(bpy.data.filepath))[0]
+ if filename == '':
+ filename = 'untitled'
+ return bpy.path.clean_name(filename)
+
+def temp_directory():
+ """Return the system temp directory"""
+ return tempfile.gettempdir()
+
+def temp_file(ext='tmp'):
+ """Get a temporary filename with the given extension. This function
+ will actually attempt to create the file."""
+ tf, fn = tempfile.mkstemp(suffix='.%s'%ext)
+ os.close(tf)
+ return fn
+
+class TimerThread(threading.Thread):
+ """Periodically call self.kick(). The period of time in seconds
+ between calling is given by self.KICK_PERIOD, and the first call
+ may be delayed by setting self.STARTUP_DELAY, also in seconds.
+ self.kick() will continue to be called at regular intervals until
+ self.stop() is called. Since this is a thread, calling self.join()
+ may be wise after calling self.stop() if self.kick() is performing
+ a task necessary for the continuation of the program.
+ The object that creates this TimerThread may pass into it data
+ needed during self.kick() as a dict LocalStorage in __init__().
+
+ """
+ STARTUP_DELAY = 0
+ KICK_PERIOD = 8
+
+ active = True
+ timer = None
+
+ LocalStorage = None
+
+ def __init__(self, LocalStorage=dict()):
+ threading.Thread.__init__(self)
+ self.LocalStorage = LocalStorage
+
+ def set_kick_period(self, period):
+ """Adjust the KICK_PERIOD between __init__() and start()"""
+ self.KICK_PERIOD = period + self.STARTUP_DELAY
+
+ def stop(self):
+ """Stop this timer. This method does not join()"""
+ self.active = False
+ if self.timer is not None:
+ self.timer.cancel()
+
+ def run(self):
+ """Timed Thread loop"""
+ while self.active:
+ self.timer = threading.Timer(self.KICK_PERIOD, self.kick_caller)
+ self.timer.start()
+ if self.timer.isAlive(): self.timer.join()
+
+ def kick_caller(self):
+ """Intermediary between the kick-wait-loop and kick to allow
+ adjustment of the first KICK_PERIOD by STARTUP_DELAY
+
+ """
+ if self.STARTUP_DELAY > 0:
+ self.KICK_PERIOD -= self.STARTUP_DELAY
+ self.STARTUP_DELAY = 0
+
+ self.kick()
+
+ def kick(self):
+ """Sub-classes do their work here"""
+ pass
+
+def format_elapsed_time(t):
+ """Format a duration in seconds as an HH:MM:SS format time"""
+
+ td = datetime.timedelta(seconds=t)
+ min = td.days*1440 + td.seconds/60.0
+ hrs = td.days*24 + td.seconds/3600.0
+
+ return '%i:%02i:%02i' % (hrs, min%60, td.seconds%60)
diff --git a/modules/extensions_framework/validate.py b/modules/extensions_framework/validate.py
new file mode 100644
index 00000000..d9cee8fd
--- /dev/null
+++ b/modules/extensions_framework/validate.py
@@ -0,0 +1,213 @@
+# -*- coding: utf8 -*-
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# --------------------------------------------------------------------------
+# Blender 2.5 Extensions Framework
+# --------------------------------------------------------------------------
+#
+# Authors:
+# Doug Hammond
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+"""
+Pure logic and validation class.
+
+By using a Subject object, and a dict of described logic tests, it
+is possible to arrive at a True or False result for various purposes:
+1. Data validation
+2. UI control visibility
+
+A Subject can be any object whose members are readable with getattr() :
+class Subject(object):
+ a = 0
+ b = 1
+ c = 'foo'
+ d = True
+ e = False
+ f = 8
+ g = 'bar'
+
+
+Tests are described thus:
+
+Use the special list types Logic_AND and Logic_OR to describe
+combinations of values and other members. Use Logic_Operator for
+numerical comparison.
+
+With regards to Subject, each of these evaluate to True:
+TESTA = {
+ 'a': 0,
+ 'c': Logic_OR([ 'foo', 'bar' ]),
+ 'd': Logic_AND([True, True]),
+ 'f': Logic_AND([8, {'b': 1}]),
+ 'e': {'b': Logic_Operator({'gte':1, 'lt':3}) },
+ 'g': Logic_OR([ 'baz', Logic_AND([{'b': 1}, {'f': 8}]) ])
+}
+
+With regards to Subject, each of these evaluate to False:
+TESTB = {
+ 'a': 'foo',
+ 'c': Logic_OR([ 'bar', 'baz' ]),
+ 'd': Logic_AND([ True, 'foo' ]),
+ 'f': Logic_AND([9, {'b': 1}]),
+ 'e': {'b': Logic_Operator({'gte':-10, 'lt': 1}) },
+ 'g': Logic_OR([ 'baz', Logic_AND([{'b':0}, {'f': 8}]) ])
+}
+
+With regards to Subject, this test is invalid
+TESTC = {
+ 'n': 0
+}
+
+Tests are executed thus:
+S = Subject()
+L = Logician(S)
+L.execute(TESTA)
+
+"""
+
+class Logic_AND(list):
+ pass
+class Logic_OR(list):
+ pass
+class Logic_Operator(dict):
+ pass
+
+class Logician(object):
+ """Given a subject and a dict that describes tests to perform on
+ its members, this class will evaluate True or False results for
+ each member/test pair. See the examples below for test syntax.
+
+ """
+
+ subject = None
+ def __init__(self, subject):
+ self.subject = subject
+
+ def get_member(self, member_name):
+ """Get a member value from the subject object. Raise exception
+ if subject is None or member not found.
+
+ """
+ if self.subject is None:
+ raise Exception('Cannot run tests on a subject which is None')
+
+ return getattr(self.subject, member_name)
+
+ def test_logic(self, member, logic, operator='eq'):
+ """Find the type of test to run on member, and perform that test"""
+
+ if type(logic) is dict:
+ return self.test_dict(member, logic)
+ elif type(logic) is Logic_AND:
+ return self.test_and(member, logic)
+ elif type(logic) is Logic_OR:
+ return self.test_or(member, logic)
+ elif type(logic) is Logic_Operator:
+ return self.test_operator(member, logic)
+ else:
+ # compare the value, I think using Logic_Operator() here
+ # allows completeness in test_operator(), but I can't put
+ # my finger on why for the minute
+ return self.test_operator(member,
+ Logic_Operator({operator: logic}))
+
+ def test_operator(self, member, value):
+ """Execute the operators contained within value and expect that
+ ALL operators are True
+
+ """
+
+ # something in this method is incomplete, what if operand is
+ # a dict, Logic_AND, Logic_OR or another Logic_Operator ?
+ # Do those constructs even make any sense ?
+
+ result = True
+ for operator, operand in value.items():
+ operator = operator.lower().strip()
+ if operator in ['eq', '==']:
+ result &= member==operand
+ if operator in ['not', '!=']:
+ result &= member!=operand
+ if operator in ['lt', '<']:
+ result &= member<operand
+ if operator in ['lte', '<=']:
+ result &= member<=operand
+ if operator in ['gt', '>']:
+ result &= member>operand
+ if operator in ['gte', '>=']:
+ result &= member>=operand
+ if operator in ['and', '&']:
+ result &= member&operand
+ if operator in ['or', '|']:
+ result &= member|operand
+ if operator in ['len']:
+ result &= len(member)==operand
+ # I can think of some more, but they're probably not useful.
+
+ return result
+
+ def test_or(self, member, logic):
+ """Member is a value, logic is a set of values, ANY of which
+ can be True
+
+ """
+ result = False
+ for test in logic:
+ result |= self.test_logic(member, test)
+
+ return result
+
+ def test_and(self, member, logic):
+ """Member is a value, logic is a list of values, ALL of which
+ must be True
+
+ """
+ result = True
+ for test in logic:
+ result &= self.test_logic(member, test)
+
+ return result
+
+ def test_dict(self, member, logic):
+ """Member is a value, logic is a dict of other members to
+ compare to. All other member tests must be True
+
+ """
+ result = True
+ for other_member, test in logic.items():
+ result &= self.test_logic(self.get_member(other_member), test)
+
+ return result
+
+ def execute(self, test):
+ """Subject is an object, test is a dict of {member: test} pairs
+ to perform on subject's members. Wach key in test is a member
+ of subject.
+
+ """
+
+ for member_name, logic in test.items():
+ result = self.test_logic(self.get_member(member_name), logic)
+ print('member %s is %s' % (member_name, result))
+
+# A couple of name aliases
+class Validation(Logician):
+ pass
+class Visibility(Logician):
+ pass
diff --git a/modules/geometry_utils.py b/modules/geometry_utils.py
new file mode 100644
index 00000000..e928ace2
--- /dev/null
+++ b/modules/geometry_utils.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+'''
+ geometry_utils.py
+
+ 3d geometry calculations
+
+
+
+'''
+
+
+from mathutils import Vector, Matrix
+from mathutils import geometry
+
+
+# 3D Geometry
+class G3:
+
+ @classmethod
+ def distanceP2P(cls, p1, p2):
+ return (p1-p2).length
+
+ @classmethod
+ def closestP2L(cls, p, l1, l2):
+ vA = p - l1
+ vL = l2- l1
+ vL.normalize()
+ return vL * (vL.dot(vA)) + l1
+
+ @classmethod
+ def closestP2E(cls, p, e1, e2):
+ q = G3.closestP2L(p, e1, e2)
+ de = G3.distanceP2P(e1, e2)
+ d1 = G3.distanceP2P(q, e1)
+ d2 = G3.distanceP2P(q, e2)
+ if d1>de and d1>d2:
+ q = e2
+ if d2>de and d2>d1:
+ q = e1
+ return q
+
+ @classmethod
+ def heightP2S(cls, p, sO, sN):
+ return (p-sO).dot(sN) / sN.dot(sN)
+
+ @classmethod
+ def closestP2S(cls, p, sO, sN):
+ k = - G3.heightP2S(p, sO, sN)
+ q = p+sN*k
+ return q
+
+ @classmethod
+ def closestP2F(cls, p, fv, sN):
+ q = G3.closestP2S(p, fv[0], sN)
+ #pi = MeshEditor.addVertex(p)
+ #qi = MeshEditor.addVertex(q)
+ #MeshEditor.addEdge(pi, qi)
+ #print ([d0,d1,d2])
+
+ if len(fv)==3:
+ h = G3.closestP2L(fv[0], fv[1], fv[2])
+ d = (fv[0]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[1], fv[2])
+ h = G3.closestP2L(fv[1], fv[2], fv[0])
+ d = (fv[1]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[2], fv[0])
+ h = G3.closestP2L(fv[2], fv[0], fv[1])
+ d = (fv[2]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[0], fv[1])
+ return q
+ if len(fv)==4:
+ h = G3.closestP2L(fv[0], fv[1], fv[2])
+ d = (fv[0]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[1], fv[2])
+ h = G3.closestP2L(fv[1], fv[2], fv[3])
+ d = (fv[1]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[2], fv[3])
+ h = G3.closestP2L(fv[2], fv[3], fv[0])
+ d = (fv[2]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[3], fv[0])
+ h = G3.closestP2L(fv[3], fv[0], fv[1])
+ d = (fv[3]-h).dot(q-h)
+ if d<=0:
+ return G3.closestP2E(q, fv[0], fv[1])
+ return q
+
+ @classmethod
+ def medianTriangle(cls, vv):
+ m0 = (vv[1]+vv[2])/2
+ m1 = (vv[0]+vv[2])/2
+ m2 = (vv[0]+vv[1])/2
+ return [m0, m1, m2]
+
+ @classmethod
+ def orthoCenter(cls, fv):
+ try:
+ h0 = G3.closestP2L(fv[0], fv[1], fv[2])
+ h1 = G3.closestP2L(fv[1], fv[0], fv[2])
+ #h2 = G3.closestP2L(fm[2], fm[0], fm[1])
+ return geometry.intersect_line_line (fv[0], h0, fv[1], h1)[0]
+ except(RuntimeError, TypeError):
+ return None
+
+ @classmethod
+ def circumCenter(cls, fv):
+ fm = G3.medianTriangle(fv)
+ return G3.orthoCenter(fm)
+
+ @classmethod
+ def ThreePnormal(cls, fv):
+ n = (fv[1]-fv[0]).cross(fv[2]-fv[0])
+ n.normalize()
+ return n
+
+ @classmethod
+ def closestP2CylinderAxis(cls, p, fv):
+ n = G3.ThreePnormal(fv)
+ c = G3.circumCenter(fv)
+ if(c==None):
+ return None
+ return G3.closestP2L(p, c, c+n)
+
+ @classmethod
+ def centerOfSphere(cls, fv):
+ try:
+ if len(fv)==3:
+ return G3.circumCenter(fv)
+ if len(fv)==4:
+ fv3 = [fv[0],fv[1],fv[2]]
+ c1 = G3.circumCenter(fv)
+ n1 = G3.ThreePnormal(fv)
+ fv3 = [fv[1],fv[2],fv[3]]
+ c2 = G3.circumCenter(fv3)
+ n2 = G3.ThreePnormal(fv3)
+ d1 = c1+n1
+ d2 = c2+n2
+ return geometry.intersect_line_line (c1, d1, c2, d2)[0]
+ except(RuntimeError, TypeError):
+ return None
+
+ @classmethod
+ def closestP2Sphere(cls, p, fv):
+ #print ("G3.closestP2Sphere")
+ try:
+ c = G3.centerOfSphere(fv)
+ if c==None:
+ return None
+ pc = p-c
+ if pc.length == 0:
+ pc = pc + Vector((1,0,0))
+ else:
+ pc.normalize()
+ return c + (pc * G3.distanceP2P(c, fv[0]))
+ except(RuntimeError, TypeError):
+ return None
+
+ @classmethod
+ def closestP2Cylinder(cls, p, fv):
+ #print ("G3.closestP2Sphere")
+ c = G3.closestP2CylinderAxis(p, fv)
+ if c==None:
+ return None
+ r = (fv[0] - G3.centerOfSphere(fv)).length
+ pc = p-c
+ if pc.length == 0:
+ pc = pc + Vector((1,0,0))
+ else:
+ pc.normalize()
+ return c + (pc * r)
+
+ #@classmethod
+ #def closestP2Sphere4(cls, p, fv4):
+ ##print ("G3.closestP2Sphere")
+ #fv = [fv4[0],fv4[1],fv4[2]]
+ #c1 = G3.circumCenter(fv)
+ #n1 = G3.ThreePnormal(fv)
+ #fv = [fv4[1],fv4[2],fv4[3]]
+ #c2 = G3.circumCenter(fv)
+ #n2 = G3.ThreePnormal(fv)
+ #d1 = c1+n1
+ #d2 = c2+n2
+ #c = geometry.intersect_line_line (c1, d1, c2, d2)[0]
+ #pc = p-c
+ #if pc.length == 0:
+ #pc = pc + Vector((1,0,0))
+ #else:
+ #pc.normalize()
+ #return c + (pc * G3.distanceP2P(c, fv[0]))
+
+
+
diff --git a/modules/misc_utils.py b/modules/misc_utils.py
new file mode 100644
index 00000000..fb03fe78
--- /dev/null
+++ b/modules/misc_utils.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+'''
+ misc_util.py
+
+ Miscellaneous helper methods.
+
+
+
+'''
+
+
+
+import bpy
+from mathutils import Vector, Matrix
+
+
+
+class BlenderFake:
+
+ @classmethod
+ def forceUpdate(cls):
+ if bpy.context.mode == 'EDIT_MESH':
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ @classmethod
+ def forceRedraw(cls):
+ CursorAccess.setCursor(CursorAccess.getCursor())
+
+
+
+# Converts 3D coordinates in a 3DRegion
+# into 2D screen coordinates for that region.
+# Borrowed from Buerbaum Martin (Pontiac)
+def region3d_get_2d_coordinates(context, loc_3d):
+ # Get screen information
+ mid_x = context.region.width / 2.0
+ mid_y = context.region.height / 2.0
+ width = context.region.width
+ height = context.region.height
+
+ # Get matrices
+ view_mat = context.space_data.region_3d.perspective_matrix
+ total_mat = view_mat
+
+ # order is important
+ vec = Vector((loc_3d[0], loc_3d[1], loc_3d[2], 1.0)) * total_mat
+
+ # dehomogenise
+ vec = Vector((
+ vec[0] / vec[3],
+ vec[1] / vec[3],
+ vec[2] / vec[3]))
+
+ x = int(mid_x + vec[0] * width / 2.0)
+ y = int(mid_y + vec[1] * height / 2.0)
+ z = vec[2]
+
+ return Vector((x, y, z))
diff --git a/modules/ui_utils.py b/modules/ui_utils.py
new file mode 100644
index 00000000..e938efa9
--- /dev/null
+++ b/modules/ui_utils.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+'''
+ ui_utils.py
+
+ Some UI utility functions
+
+
+
+'''
+
+
+
+class GUI:
+
+ @classmethod
+ def drawIconButton(cls, enabled, layout, iconName, operator, frame=True):
+ col = layout.column()
+ col.enabled = enabled
+ bt = col.operator(operator,
+ text='',
+ icon=iconName,
+ emboss=frame)
+
+ @classmethod
+ def drawTextButton(cls, enabled, layout, text, operator, frame=True):
+ col = layout.column()
+ col.enabled = enabled
+ bt = col.operator(operator,
+ text=text,
+ emboss=frame)
+
+
+
diff --git a/netrender/__init__.py b/netrender/__init__.py
new file mode 100644
index 00000000..5ae4b774
--- /dev/null
+++ b/netrender/__init__.py
@@ -0,0 +1,81 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# This directory is a Python package.
+
+bl_info = {
+ "name": "Network Renderer",
+ "author": "Martin Poirier",
+ "version": (1, 3),
+ "blender": (2, 5, 6),
+ "api": 35011,
+ "location": "Render > Engine > Network Render",
+ "description": "Distributed rendering for Blender",
+ "warning": "Stable but still work in progress",
+ "wiki_url": "http://wiki.blender.org/index.php/Doc:2.5/Manual/Render/Engines/Netrender",
+ "category": "Render"}
+
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "init_data" in locals():
+ import imp
+ imp.reload(model)
+ imp.reload(operators)
+ imp.reload(client)
+ imp.reload(slave)
+ imp.reload(master)
+ imp.reload(master_html)
+ imp.reload(utils)
+ imp.reload(balancing)
+ imp.reload(ui)
+ imp.reload(repath)
+ imp.reload(versioning)
+else:
+ from netrender import model
+ from netrender import operators
+ from netrender import client
+ from netrender import slave
+ from netrender import master
+ from netrender import master_html
+ from netrender import utils
+ from netrender import balancing
+ from netrender import ui
+ from netrender import repath
+ from netrender import versioning
+
+jobs = []
+slaves = []
+blacklist = []
+
+init_file = ""
+valid_address = False
+init_data = True
+
+
+def register():
+ import bpy
+ bpy.utils.register_module(__name__)
+
+ scene = bpy.context.scene
+ if scene:
+ ui.init_data(scene.network_render)
+
+
+def unregister():
+ import bpy
+ bpy.utils.unregister_module(__name__)
diff --git a/netrender/balancing.py b/netrender/balancing.py
new file mode 100644
index 00000000..dde3ad53
--- /dev/null
+++ b/netrender/balancing.py
@@ -0,0 +1,195 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import time
+
+from netrender.utils import *
+import netrender.model
+
+class RatingRule:
+ def __init__(self):
+ self.enabled = True
+
+ def id(self):
+ return str(id(self))
+
+ def rate(self, job):
+ return 0
+
+class ExclusionRule:
+ def __init__(self):
+ self.enabled = True
+
+ def id(self):
+ return str(id(self))
+
+ def test(self, job):
+ return False
+
+class PriorityRule:
+ def __init__(self):
+ self.enabled = True
+
+ def id(self):
+ return str(id(self))
+
+ def test(self, job):
+ return False
+
+class Balancer:
+ def __init__(self):
+ self.rules = []
+ self.priorities = []
+ self.exceptions = []
+
+ def ruleByID(self, rule_id):
+ for rule in self.rules:
+ if rule.id() == rule_id:
+ return rule
+ for rule in self.priorities:
+ if rule.id() == rule_id:
+ return rule
+ for rule in self.exceptions:
+ if rule.id() == rule_id:
+ return rule
+
+ return None
+
+ def addRule(self, rule):
+ self.rules.append(rule)
+
+ def addPriority(self, priority):
+ self.priorities.append(priority)
+
+ def addException(self, exception):
+ self.exceptions.append(exception)
+
+ def applyRules(self, job):
+ return sum((rule.rate(job) for rule in self.rules if rule.enabled))
+
+ def applyPriorities(self, job):
+ for priority in self.priorities:
+ if priority.enabled and priority.test(job):
+ return True # priorities are first
+
+ return False
+
+ def applyExceptions(self, job):
+ for exception in self.exceptions:
+ if exception.enabled and exception.test(job):
+ return True # exceptions are last
+
+ return False
+
+ def sortKey(self, job):
+ return (1 if self.applyExceptions(job) else 0, # exceptions after
+ 0 if self.applyPriorities(job) else 1, # priorities first
+ self.applyRules(job))
+
+ def balance(self, jobs):
+ if jobs:
+ # use inline copy to make sure the list is still accessible while sorting
+ jobs[:] = sorted(jobs, key=self.sortKey)
+ return jobs[0]
+ else:
+ return None
+
+# ==========================
+
+class RatingUsage(RatingRule):
+ def __str__(self):
+ return "Usage per job"
+
+ def rate(self, job):
+ # less usage is better
+ return job.usage / job.priority
+
+class RatingUsageByCategory(RatingRule):
+ def __init__(self, get_jobs):
+ super().__init__()
+ self.getJobs = get_jobs
+
+ def __str__(self):
+ return "Usage per category"
+
+ def rate(self, job):
+ total_category_usage = sum([j.usage for j in self.getJobs() if j.category == job.category])
+ maximum_priority = max([j.priority for j in self.getJobs() if j.category == job.category])
+
+ # less usage is better
+ return total_category_usage / maximum_priority
+
+class NewJobPriority(PriorityRule):
+ def __init__(self, limit = 1):
+ super().__init__()
+ self.limit = limit
+
+ def setLimit(self, value):
+ self.limit = int(value)
+
+ def str_limit(self):
+ return "less than %i frame%s done" % (self.limit, "s" if self.limit > 1 else "")
+
+ def __str__(self):
+ return "Priority to new jobs"
+
+ def test(self, job):
+ return job.countFrames(status = DONE) < self.limit
+
+class MinimumTimeBetweenDispatchPriority(PriorityRule):
+ def __init__(self, limit = 10):
+ super().__init__()
+ self.limit = limit
+
+ def setLimit(self, value):
+ self.limit = int(value)
+
+ def str_limit(self):
+ return "more than %i minute%s since last" % (self.limit, "s" if self.limit > 1 else "")
+
+ def __str__(self):
+ return "Priority to jobs that haven't been dispatched recently"
+
+ def test(self, job):
+ return job.countFrames(status = DISPATCHED) == 0 and (time.time() - job.last_dispatched) / 60 > self.limit
+
+class ExcludeQueuedEmptyJob(ExclusionRule):
+ def __str__(self):
+ return "Exclude non queued or empty jobs"
+
+ def test(self, job):
+ return job.status != JOB_QUEUED or job.countFrames(status = QUEUED) == 0
+
+class ExcludeSlavesLimit(ExclusionRule):
+ def __init__(self, count_jobs, count_slaves, limit = 0.75):
+ super().__init__()
+ self.count_jobs = count_jobs
+ self.count_slaves = count_slaves
+ self.limit = limit
+
+ def setLimit(self, value):
+ self.limit = float(value)
+
+ def str_limit(self):
+ return "more than %.0f%% of all slaves" % (self.limit * 100)
+
+ def __str__(self):
+ return "Exclude jobs that would use too many slaves"
+
+ def test(self, job):
+ return not ( self.count_jobs() == 1 or self.count_slaves() <= 1 or float(job.countSlaves() + 1) / self.count_slaves() <= self.limit )
diff --git a/netrender/client.py b/netrender/client.py
new file mode 100644
index 00000000..651b4660
--- /dev/null
+++ b/netrender/client.py
@@ -0,0 +1,376 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+import sys, os, re
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+import json
+
+import netrender
+import netrender.model
+import netrender.slave as slave
+import netrender.master as master
+from netrender.utils import *
+
+def addFluidFiles(job, path):
+ if os.path.exists(path):
+ pattern = re.compile("fluidsurface_(final|preview)_([0-9]+)\.(bobj|bvel)\.gz")
+
+ for fluid_file in sorted(os.listdir(path)):
+ match = pattern.match(fluid_file)
+
+ if match:
+ # fluid frames starts at 0, which explains the +1
+ # This is stupid
+ current_frame = int(match.groups()[1]) + 1
+ job.addFile(path + fluid_file, current_frame, current_frame)
+
+def addPointCache(job, ob, point_cache, default_path):
+ if not point_cache.use_disk_cache:
+ return
+
+
+ name = point_cache.name
+ if name == "":
+ name = "".join(["%02X" % ord(c) for c in ob.name])
+
+ cache_path = bpy.path.abspath(point_cache.filepath) if point_cache.use_external else default_path
+
+ index = "%02i" % point_cache.index
+
+ if os.path.exists(cache_path):
+ pattern = re.compile(name + "_([0-9]+)_" + index + "\.bphys")
+
+ cache_files = []
+
+ for cache_file in sorted(os.listdir(cache_path)):
+ match = pattern.match(cache_file)
+
+ if match:
+ cache_frame = int(match.groups()[0])
+ cache_files.append((cache_frame, cache_file))
+
+ cache_files.sort()
+
+ if len(cache_files) == 1:
+ cache_frame, cache_file = cache_files[0]
+ job.addFile(cache_path + cache_file, cache_frame, cache_frame)
+ else:
+ for i in range(len(cache_files)):
+ current_item = cache_files[i]
+ next_item = cache_files[i+1] if i + 1 < len(cache_files) else None
+ previous_item = cache_files[i - 1] if i > 0 else None
+
+ current_frame, current_file = current_item
+
+ if not next_item and not previous_item:
+ job.addFile(cache_path + current_file, current_frame, current_frame)
+ elif next_item and not previous_item:
+ next_frame = next_item[0]
+ job.addFile(cache_path + current_file, current_frame, next_frame - 1)
+ elif not next_item and previous_item:
+ previous_frame = previous_item[0]
+ job.addFile(cache_path + current_file, previous_frame + 1, current_frame)
+ else:
+ next_frame = next_item[0]
+ previous_frame = previous_item[0]
+ job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1)
+
+def fillCommonJobSettings(job, job_name, netsettings):
+ job.name = job_name
+ job.category = netsettings.job_category
+
+ for slave in netrender.blacklist:
+ job.blacklist.append(slave.id)
+
+ job.chunks = netsettings.chunks
+ job.priority = netsettings.priority
+
+ if netsettings.job_type == "JOB_BLENDER":
+ job.type = netrender.model.JOB_BLENDER
+ elif netsettings.job_type == "JOB_PROCESS":
+ job.type = netrender.model.JOB_PROCESS
+ elif netsettings.job_type == "JOB_VCS":
+ job.type = netrender.model.JOB_VCS
+
+def clientSendJob(conn, scene, anim = False):
+ netsettings = scene.network_render
+ if netsettings.job_type == "JOB_BLENDER":
+ return clientSendJobBlender(conn, scene, anim)
+ elif netsettings.job_type == "JOB_VCS":
+ return clientSendJobVCS(conn, scene, anim)
+
+def clientSendJobVCS(conn, scene, anim = False):
+ netsettings = scene.network_render
+ job = netrender.model.RenderJob()
+
+ if anim:
+ for f in range(scene.frame_start, scene.frame_end + 1):
+ job.addFrame(f)
+ else:
+ job.addFrame(scene.frame_current)
+
+ filename = bpy.data.filepath
+
+ if not filename.startswith(netsettings.vcs_wpath):
+ # this is an error, need better way to handle this
+ return
+
+ filename = filename[len(netsettings.vcs_wpath):]
+
+ if filename[0] in (os.sep, os.altsep):
+ filename = filename[1:]
+
+ print("CREATING VCS JOB", filename)
+
+ job.addFile(filename, signed=False)
+
+ job_name = netsettings.job_name
+ path, name = os.path.split(filename)
+ if job_name == "[default]":
+ job_name = name
+
+
+ fillCommonJobSettings(job, job_name, netsettings)
+
+ # VCS Specific code
+ job.version_info = netrender.model.VersioningInfo()
+ job.version_info.system = netsettings.vcs_system
+ job.version_info.wpath = netsettings.vcs_wpath
+ job.version_info.rpath = netsettings.vcs_rpath
+ job.version_info.revision = netsettings.vcs_revision
+
+ # try to send path first
+ conn.request("POST", "/job", json.dumps(job.serialize()))
+ response = conn.getresponse()
+ response.read()
+
+ job_id = response.getheader("job-id")
+
+ # a VCS job is always good right now, need error handling
+
+ return job_id
+
+def clientSendJobBlender(conn, scene, anim = False):
+ netsettings = scene.network_render
+ job = netrender.model.RenderJob()
+
+ if anim:
+ for f in range(scene.frame_start, scene.frame_end + 1):
+ job.addFrame(f)
+ else:
+ job.addFrame(scene.frame_current)
+
+ filename = bpy.data.filepath
+
+ if not os.path.exists(filename):
+ raise RuntimeError("Current file path not defined\nSave your file before sending a job")
+
+ job.addFile(filename)
+
+ job_name = netsettings.job_name
+ path, name = os.path.split(filename)
+ if job_name == "[default]":
+ job_name = name
+
+ ###########################
+ # LIBRARIES
+ ###########################
+ for lib in bpy.data.libraries:
+ file_path = bpy.path.abspath(lib.filepath)
+ if os.path.exists(file_path):
+ job.addFile(file_path)
+
+ ###########################
+ # IMAGES
+ ###########################
+ for image in bpy.data.images:
+ if image.source == "FILE" and not image.packed_file:
+ file_path = bpy.path.abspath(image.filepath)
+ if os.path.exists(file_path):
+ job.addFile(file_path)
+
+ tex_path = os.path.splitext(file_path)[0] + ".tex"
+ if os.path.exists(tex_path):
+ job.addFile(tex_path)
+
+ ###########################
+ # FLUID + POINT CACHE
+ ###########################
+ root, ext = os.path.splitext(name)
+ default_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
+
+ for object in bpy.data.objects:
+ for modifier in object.modifiers:
+ if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
+ addFluidFiles(job, bpy.path.abspath(modifier.settings.filepath))
+ elif modifier.type == "CLOTH":
+ addPointCache(job, object, modifier.point_cache, default_path)
+ elif modifier.type == "SOFT_BODY":
+ addPointCache(job, object, modifier.point_cache, default_path)
+ elif modifier.type == "SMOKE" and modifier.smoke_type == "TYPE_DOMAIN":
+ addPointCache(job, object, modifier.domain_settings.point_cache, default_path)
+ elif modifier.type == "MULTIRES" and modifier.is_external:
+ file_path = bpy.path.abspath(modifier.filepath)
+ job.addFile(file_path)
+
+ # particles modifier are stupid and don't contain data
+ # we have to go through the object property
+ for psys in object.particle_systems:
+ addPointCache(job, object, psys.point_cache, default_path)
+
+ #print(job.files)
+
+ fillCommonJobSettings(job, job_name, netsettings)
+
+ # try to send path first
+ conn.request("POST", "/job", json.dumps(job.serialize()))
+ response = conn.getresponse()
+ response.read()
+
+ job_id = response.getheader("job-id")
+
+ # if not ACCEPTED (but not processed), send files
+ if response.status == http.client.ACCEPTED:
+ for rfile in job.files:
+ f = open(rfile.filepath, "rb")
+ conn.request("PUT", fileURL(job_id, rfile.index), f)
+ f.close()
+ response = conn.getresponse()
+ response.read()
+
+ # server will reply with ACCEPTED until all files are found
+
+ return job_id
+
+def requestResult(conn, job_id, frame):
+ conn.request("GET", renderURL(job_id, frame))
+
+class NetworkRenderEngine(bpy.types.RenderEngine):
+ bl_idname = 'NET_RENDER'
+ bl_label = "Network Render"
+ bl_use_postprocess = False
+ def render(self, scene):
+ if scene.network_render.mode == "RENDER_CLIENT":
+ self.render_client(scene)
+ elif scene.network_render.mode == "RENDER_SLAVE":
+ self.render_slave(scene)
+ elif scene.network_render.mode == "RENDER_MASTER":
+ self.render_master(scene)
+ else:
+ print("UNKNOWN OPERATION MODE")
+
+ def render_master(self, scene):
+ netsettings = scene.network_render
+
+ address = "" if netsettings.server_address == "[default]" else netsettings.server_address
+
+ master.runMaster((address, netsettings.server_port), netsettings.use_master_broadcast, netsettings.use_master_clear, bpy.path.abspath(netsettings.path), self.update_stats, self.test_break)
+
+
+ def render_slave(self, scene):
+ slave.render_slave(self, scene.network_render, scene.render.threads)
+
+ def render_client(self, scene):
+ netsettings = scene.network_render
+ self.update_stats("", "Network render client initiation")
+
+
+ conn = clientConnection(netsettings.server_address, netsettings.server_port)
+
+ if conn:
+ # Sending file
+
+ self.update_stats("", "Network render exporting")
+
+ new_job = False
+
+ job_id = netsettings.job_id
+
+ # reading back result
+
+ self.update_stats("", "Network render waiting for results")
+
+
+ requestResult(conn, job_id, scene.frame_current)
+ response = conn.getresponse()
+ buf = response.read()
+
+ if response.status == http.client.NO_CONTENT:
+ new_job = True
+ netsettings.job_id = clientSendJob(conn, scene)
+ job_id = netsettings.job_id
+
+ requestResult(conn, job_id, scene.frame_current)
+ response = conn.getresponse()
+ buf = response.read()
+
+ while response.status == http.client.ACCEPTED and not self.test_break():
+ time.sleep(1)
+ requestResult(conn, job_id, scene.frame_current)
+ response = conn.getresponse()
+ buf = response.read()
+
+ # cancel new jobs (animate on network) on break
+ if self.test_break() and new_job:
+ conn.request("POST", cancelURL(job_id))
+ response = conn.getresponse()
+ response.read()
+ print( response.status, response.reason )
+ netsettings.job_id = 0
+
+ if response.status != http.client.OK:
+ conn.close()
+ return
+
+ r = scene.render
+ x= int(r.resolution_x*r.resolution_percentage*0.01)
+ y= int(r.resolution_y*r.resolution_percentage*0.01)
+
+ result_path = os.path.join(bpy.path.abspath(netsettings.path), "output.exr")
+
+ folder = os.path.split(result_path)[0]
+
+ if not os.path.exists(folder):
+ os.mkdir(folder)
+
+ f = open(result_path, "wb")
+
+ f.write(buf)
+
+ f.close()
+
+ result = self.begin_result(0, 0, x, y)
+ result.load_from_file(result_path)
+ self.end_result(result)
+
+ conn.close()
+
+def compatible(module):
+ module = __import__("bl_ui." + module)
+ for subclass in module.__dict__.values():
+ try: subclass.COMPAT_ENGINES.add('NET_RENDER')
+ except: pass
+ del module
+
+compatible("properties_world")
+compatible("properties_material")
+compatible("properties_data_mesh")
+compatible("properties_data_camera")
+compatible("properties_texture")
diff --git a/netrender/master.py b/netrender/master.py
new file mode 100644
index 00000000..280a4019
--- /dev/null
+++ b/netrender/master.py
@@ -0,0 +1,1064 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import sys, os
+import http, http.client, http.server, urllib, socket, socketserver, threading
+import subprocess, shutil, time, hashlib
+import pickle
+import select # for select.error
+import json
+
+from netrender.utils import *
+import netrender.model
+import netrender.balancing
+import netrender.master_html
+import netrender.thumbnail as thumbnail
+
+class MRenderFile(netrender.model.RenderFile):
+ def __init__(self, filepath, index, start, end, signature):
+ super().__init__(filepath, index, start, end, signature)
+ self.found = False
+
+ def test(self):
+ self.found = os.path.exists(self.filepath)
+ if self.found and self.signature != None:
+ found_signature = hashFile(self.filepath)
+ self.found = self.signature == found_signature
+
+ return self.found
+
+
+class MRenderSlave(netrender.model.RenderSlave):
+ def __init__(self, name, address, stats):
+ super().__init__()
+ self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest()
+ self.name = name
+ self.address = address
+ self.stats = stats
+ self.last_seen = time.time()
+
+ self.job = None
+ self.job_frames = []
+
+ netrender.model.RenderSlave._slave_map[self.id] = self
+
+ def seen(self):
+ self.last_seen = time.time()
+
+ def finishedFrame(self, frame_number):
+ self.job_frames.remove(frame_number)
+ if not self.job_frames:
+ self.job = None
+
+class MRenderJob(netrender.model.RenderJob):
+ def __init__(self, job_id, job_info):
+ super().__init__(job_info)
+ self.id = job_id
+ self.last_dispatched = time.time()
+
+ # force one chunk for process jobs
+ if self.type == netrender.model.JOB_PROCESS:
+ self.chunks = 1
+
+ # Force WAITING status on creation
+ self.status = JOB_WAITING
+
+ # special server properties
+ self.last_update = 0
+ self.save_path = ""
+ self.files = [MRenderFile(rfile.filepath, rfile.index, rfile.start, rfile.end, rfile.signature) for rfile in job_info.files]
+
+ def initInfo(self):
+ if not self.resolution:
+ self.resolution = tuple(getFileInfo(self.files[0].filepath, ["bpy.context.scene.render.resolution_x", "bpy.context.scene.render.resolution_y", "bpy.context.scene.render.resolution_percentage"]))
+
+ def save(self):
+ if self.save_path:
+ f = open(os.path.join(self.save_path, "job.txt"), "w")
+ f.write(json.dumps(self.serialize()))
+ f.close()
+
+ def edit(self, info_map):
+ if "status" in info_map:
+ self.status = info_map["status"]
+
+ if "priority" in info_map:
+ self.priority = info_map["priority"]
+
+ if "chunks" in info_map:
+ self.chunks = info_map["chunks"]
+
+ def testStart(self):
+ # Don't test files for versionned jobs
+ if not self.version_info:
+ for f in self.files:
+ if not f.test():
+ return False
+
+ self.start()
+ self.initInfo()
+ return True
+
+ def testFinished(self):
+ for f in self.frames:
+ if f.status == QUEUED or f.status == DISPATCHED:
+ break
+ else:
+ self.status = JOB_FINISHED
+
+ def pause(self, status = None):
+ if self.status not in {JOB_PAUSED, JOB_QUEUED}:
+ return
+
+ if status is None:
+ self.status = JOB_PAUSED if self.status == JOB_QUEUED else JOB_QUEUED
+ elif status:
+ self.status = JOB_QUEUED
+ else:
+ self.status = JOB_PAUSED
+
+ def start(self):
+ self.status = JOB_QUEUED
+
+ def addLog(self, frames):
+ log_name = "_".join(("%06d" % f for f in frames)) + ".log"
+ log_path = os.path.join(self.save_path, log_name)
+
+ for number in frames:
+ frame = self[number]
+ if frame:
+ frame.log_path = log_path
+
+ def addFrame(self, frame_number, command):
+ frame = MRenderFrame(frame_number, command)
+ self.frames.append(frame)
+ return frame
+
+ def reset(self, all):
+ for f in self.frames:
+ f.reset(all)
+
+ def getFrames(self):
+ frames = []
+ for f in self.frames:
+ if f.status == QUEUED:
+ self.last_dispatched = time.time()
+ frames.append(f)
+ if len(frames) >= self.chunks:
+ break
+
+ return frames
+
+class MRenderFrame(netrender.model.RenderFrame):
+ def __init__(self, frame, command):
+ super().__init__()
+ self.number = frame
+ self.slave = None
+ self.time = 0
+ self.status = QUEUED
+ self.command = command
+
+ self.log_path = None
+
+ def reset(self, all):
+ if all or self.status == ERROR:
+ self.log_path = None
+ self.slave = None
+ self.time = 0
+ self.status = QUEUED
+
+
+# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+file_pattern = re.compile("/file_([a-zA-Z0-9]+)_([0-9]+)")
+render_pattern = re.compile("/render_([a-zA-Z0-9]+)_([0-9]+).exr")
+thumb_pattern = re.compile("/thumb_([a-zA-Z0-9]+)_([0-9]+).jpg")
+log_pattern = re.compile("/log_([a-zA-Z0-9]+)_([0-9]+).log")
+reset_pattern = re.compile("/reset(all|)_([a-zA-Z0-9]+)_([0-9]+)")
+cancel_pattern = re.compile("/cancel_([a-zA-Z0-9]+)")
+pause_pattern = re.compile("/pause_([a-zA-Z0-9]+)")
+edit_pattern = re.compile("/edit_([a-zA-Z0-9]+)")
+
+class RenderHandler(http.server.BaseHTTPRequestHandler):
+ def log_message(self, format, *args):
+ # override because the original calls self.address_string(), which
+ # is extremely slow due to some timeout..
+ sys.stderr.write("[%s] %s\n" % (self.log_date_time_string(), format%args))
+
+ def getInfoMap(self):
+ length = int(self.headers['content-length'])
+
+ if length > 0:
+ msg = str(self.rfile.read(length), encoding='utf8')
+ return json.loads(msg)
+ else:
+ return {}
+
+ def send_head(self, code = http.client.OK, headers = {}, content = "application/octet-stream"):
+ self.send_response(code)
+
+ if code != http.client.OK and content:
+ self.send_header("Content-type", content)
+
+ for key, value in headers.items():
+ self.send_header(key, value)
+
+ self.end_headers()
+
+ def do_HEAD(self):
+
+ if self.path == "/status":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', -1))
+
+ job = self.server.getJobID(job_id)
+ if job:
+ frame = job[job_frame]
+
+
+ if frame:
+ self.send_head(http.client.OK)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+
+ def do_GET(self):
+
+ if self.path == "/version":
+ self.send_head()
+ self.server.stats("", "Version check")
+ self.wfile.write(VERSION)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/render"):
+ match = render_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+ frame_number = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[frame_number]
+
+ if frame:
+ if frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.ACCEPTED)
+ elif frame.status == DONE:
+ self.server.stats("", "Sending result to client")
+
+ filename = os.path.join(job.save_path, "%06d.exr" % frame_number)
+
+ f = open(filename, 'rb')
+ self.send_head(content = "image/x-exr")
+ shutil.copyfileobj(f, self.wfile)
+ f.close()
+ elif frame.status == ERROR:
+ self.send_head(http.client.PARTIAL_CONTENT)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/thumb"):
+ match = thumb_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+ frame_number = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[frame_number]
+
+ if frame:
+ if frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.ACCEPTED)
+ elif frame.status == DONE:
+ filename = os.path.join(job.save_path, "%06d.exr" % frame_number)
+
+ thumbname = thumbnail.generate(filename)
+
+ if thumbname:
+ f = open(thumbname, 'rb')
+ self.send_head(content = "image/jpeg")
+ shutil.copyfileobj(f, self.wfile)
+ f.close()
+ else: # thumbnail couldn't be generated
+ self.send_head(http.client.PARTIAL_CONTENT)
+ return
+ elif frame.status == ERROR:
+ self.send_head(http.client.PARTIAL_CONTENT)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/log"):
+ match = log_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+ frame_number = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[frame_number]
+
+ if frame:
+ if not frame.log_path or frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.PROCESSING)
+ else:
+ self.server.stats("", "Sending log to client")
+ f = open(frame.log_path, 'rb')
+
+ self.send_head(content = "text/plain")
+
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid URL
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/status":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', -1))
+
+ if job_id:
+
+ job = self.server.getJobID(job_id)
+ if job:
+ if job_frame != -1:
+ frame = job[frame]
+
+ if frame:
+ message = frame.serialize()
+ else:
+ # no such frame
+ self.send_heat(http.client.NO_CONTENT)
+ return
+ else:
+ message = job.serialize()
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ return
+ else: # status of all jobs
+ message = []
+
+ for job in self.server:
+ message.append(job.serialize())
+
+
+ self.server.stats("", "Sending status")
+ self.send_head()
+ self.wfile.write(bytes(json.dumps(message), encoding='utf8'))
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/job":
+ self.server.balance()
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job, frames = self.server.newDispatch(slave_id)
+
+ if job and frames:
+ for f in frames:
+ print("dispatch", f.number)
+ f.status = DISPATCHED
+ f.slave = slave
+
+ slave.job = job
+ slave.job_frames = [f.number for f in frames]
+
+ self.send_head(headers={"job-id": job.id})
+
+ message = job.serialize(frames)
+
+ self.wfile.write(bytes(json.dumps(message), encoding='utf8'))
+
+ self.server.stats("", "Sending job to slave")
+ else:
+ # no job available, return error code
+ slave.job = None
+ slave.job_frames = []
+
+ self.send_head(http.client.ACCEPTED)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/file"):
+ match = file_pattern.match(self.path)
+
+ if match:
+ slave_id = self.headers['slave-id']
+ slave = self.server.getSeenSlave(slave_id)
+
+ if not slave:
+ # invalid slave id
+ print("invalid slave id")
+
+ job_id = match.groups()[0]
+ file_index = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ render_file = job.files[file_index]
+
+ if render_file:
+ self.server.stats("", "Sending file to slave")
+ f = open(render_file.filepath, 'rb')
+
+ self.send_head()
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ else:
+ # no such file
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/slaves":
+ message = []
+
+ self.server.stats("", "Sending slaves status")
+
+ for slave in self.server.slaves:
+ message.append(slave.serialize())
+
+ self.send_head()
+
+ self.wfile.write(bytes(json.dumps(message), encoding='utf8'))
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ else:
+ # hand over the rest to the html section
+ netrender.master_html.get(self)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ def do_POST(self):
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ if self.path == "/job":
+
+ length = int(self.headers['content-length'])
+
+ job_info = netrender.model.RenderJob.materialize(json.loads(str(self.rfile.read(length), encoding='utf8')))
+
+ job_id = self.server.nextJobID()
+
+ job = MRenderJob(job_id, job_info)
+
+ for frame in job_info.frames:
+ frame = job.addFrame(frame.number, frame.command)
+
+ self.server.addJob(job)
+
+ headers={"job-id": job_id}
+
+ if job.testStart():
+ self.server.stats("", "New job, started")
+ self.send_head(headers=headers, content = None)
+ else:
+ self.server.stats("", "New job, missing files (%i total)" % len(job.files))
+ self.send_head(http.client.ACCEPTED, headers=headers)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/edit"):
+ match = edit_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ info_map = self.getInfoMap()
+
+ job.edit(info_map)
+ self.send_head(content = None)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/balance_limit":
+ info_map = self.getInfoMap()
+ for rule_id, limit in info_map.items():
+ try:
+ rule = self.server.balancer.ruleByID(rule_id)
+ if rule:
+ rule.setLimit(limit)
+ except:
+ pass # invalid type
+
+ self.send_head(content = None)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/balance_enable":
+ info_map = self.getInfoMap()
+ for rule_id, enabled in info_map.items():
+ rule = self.server.balancer.ruleByID(rule_id)
+ if rule:
+ rule.enabled = enabled
+
+ self.send_head(content = None)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/cancel"):
+ match = cancel_pattern.match(self.path)
+
+ if match:
+ info_map = self.getInfoMap()
+ clear = info_map.get("clear", False)
+
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ self.server.stats("", "Cancelling job")
+ self.server.removeJob(job, clear)
+ self.send_head(content = None)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/pause"):
+ match = pause_pattern.match(self.path)
+
+ if match:
+ info_map = self.getInfoMap()
+ status = info_map.get("status", None)
+
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ self.server.stats("", "Pausing job")
+ job.pause(status)
+ self.send_head(content = None)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/clear":
+ # cancel all jobs
+ info_map = self.getInfoMap()
+ clear = info_map.get("clear", False)
+
+ self.server.stats("", "Clearing jobs")
+ self.server.clear(clear)
+
+ self.send_head(content = None)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/reset"):
+ match = reset_pattern.match(self.path)
+
+ if match:
+ all = match.groups()[0] == 'all'
+ job_id = match.groups()[1]
+ job_frame = int(match.groups()[2])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ if job_frame != 0:
+
+ frame = job[job_frame]
+ if frame:
+ self.server.stats("", "Reset job frame")
+ frame.reset(all)
+ self.send_head(content = None)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+
+ else:
+ self.server.stats("", "Reset job")
+ job.reset(all)
+ self.send_head(content = None)
+
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/slave":
+ length = int(self.headers['content-length'])
+ job_frame_string = self.headers['job-frame']
+
+ self.server.stats("", "New slave connected")
+
+ slave_info = netrender.model.RenderSlave.materialize(json.loads(str(self.rfile.read(length), encoding='utf8')), cache = False)
+
+ slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats)
+
+ self.send_head(headers = {"slave-id": slave_id}, content = None)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/log":
+ length = int(self.headers['content-length'])
+
+ log_info = netrender.model.LogFile.materialize(json.loads(str(self.rfile.read(length), encoding='utf8')))
+
+ slave_id = log_info.slave_id
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job = self.server.getJobID(log_info.job_id)
+
+ if job:
+ self.server.stats("", "Log announcement")
+ job.addLog(log_info.frames)
+ self.send_head(content = None)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ def do_PUT(self):
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ if self.path.startswith("/file"):
+ match = file_pattern.match(self.path)
+
+ if match:
+ self.server.stats("", "Receiving job")
+
+ job_id = match.groups()[0]
+ file_index = int(match.groups()[1])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+
+ render_file = job.files[file_index]
+
+ if render_file:
+ main_file = job.files[0].filepath # filename of the first file
+
+ main_path, main_name = os.path.split(main_file)
+
+ if file_index > 0:
+ file_path = prefixPath(job.save_path, render_file.filepath, main_path)
+ else:
+ file_path = os.path.join(job.save_path, main_name)
+
+ # add same temp file + renames as slave
+
+ f = open(file_path, "wb")
+ shutil.copyfileobj(self.rfile, f)
+ f.close()
+
+ render_file.filepath = file_path # set the new path
+
+ if job.testStart():
+ self.server.stats("", "File upload, starting job")
+ self.send_head(content = None)
+ else:
+ self.server.stats("", "File upload, file missings")
+ self.send_head(http.client.ACCEPTED)
+ else: # invalid file
+ print("file not found", job_id, file_index)
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ print("job not found", job_id, file_index)
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ print("no match")
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/render":
+ self.server.stats("", "Receiving render result")
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job_id = self.headers['job-id']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(self.headers['job-frame'])
+ job_result = int(self.headers['job-result'])
+ job_time = float(self.headers['job-time'])
+
+ frame = job[job_frame]
+
+ if frame:
+ self.send_head(content = None)
+
+ if job.hasRenderResult():
+ if job_result == DONE:
+ f = open(os.path.join(job.save_path, "%06d.exr" % job_frame), 'wb')
+ shutil.copyfileobj(self.rfile, f)
+ f.close()
+
+ elif job_result == ERROR:
+ # blacklist slave on this job on error
+ # slaves might already be in blacklist if errors on the whole chunk
+ if not slave.id in job.blacklist:
+ job.blacklist.append(slave.id)
+
+ slave.finishedFrame(job_frame)
+
+ frame.status = job_result
+ frame.time = job_time
+
+ job.testFinished()
+
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/thumb":
+ self.server.stats("", "Receiving thumbnail result")
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job_id = self.headers['job-id']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(self.headers['job-frame'])
+
+ frame = job[job_frame]
+
+ if frame:
+ self.send_head(content = None)
+
+ if job.hasRenderResult():
+ f = open(os.path.join(job.save_path, "%06d.jpg" % job_frame), 'wb')
+ shutil.copyfileobj(self.rfile, f)
+ f.close()
+
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path.startswith("/log"):
+ self.server.stats("", "Receiving log file")
+
+ match = log_pattern.match(self.path)
+
+ if match:
+ job_id = match.groups()[0]
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(match.groups()[1])
+
+ frame = job[job_frame]
+
+ if frame and frame.log_path:
+ self.send_head(content = None)
+
+ f = open(frame.log_path, 'ab')
+ shutil.copyfileobj(self.rfile, f)
+ f.close()
+
+ self.server.getSeenSlave(self.headers['slave-id'])
+
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid url
+ self.send_head(http.client.NO_CONTENT)
+
+class RenderMasterServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
+ def __init__(self, address, handler_class, path, subdir=True):
+ super().__init__(address, handler_class)
+ self.jobs = []
+ self.jobs_map = {}
+ self.slaves = []
+ self.slaves_map = {}
+ self.job_id = 0
+
+ if subdir:
+ self.path = os.path.join(path, "master_" + str(os.getpid()))
+ else:
+ self.path = path
+
+ self.slave_timeout = 5 # 5 mins: need a parameter for that
+
+ self.balancer = netrender.balancing.Balancer()
+ self.balancer.addRule(netrender.balancing.RatingUsageByCategory(self.getJobs))
+ self.balancer.addRule(netrender.balancing.RatingUsage())
+ self.balancer.addException(netrender.balancing.ExcludeQueuedEmptyJob())
+ self.balancer.addException(netrender.balancing.ExcludeSlavesLimit(self.countJobs, self.countSlaves, limit = 0.9))
+ self.balancer.addPriority(netrender.balancing.NewJobPriority())
+ self.balancer.addPriority(netrender.balancing.MinimumTimeBetweenDispatchPriority(limit = 2))
+
+ if not os.path.exists(self.path):
+ os.mkdir(self.path)
+
+ def restore(self, jobs, slaves, balancer = None):
+ self.jobs = jobs
+ self.jobs_map = {}
+
+ for job in self.jobs:
+ self.jobs_map[job.id] = job
+ self.job_id = max(self.job_id, int(job.id))
+
+ self.slaves = slaves
+ for slave in self.slaves:
+ self.slaves_map[slave.id] = slave
+
+ if balancer:
+ self.balancer = balancer
+
+
+ def nextJobID(self):
+ self.job_id += 1
+ return str(self.job_id)
+
+ def addSlave(self, name, address, stats):
+ slave = MRenderSlave(name, address, stats)
+ self.slaves.append(slave)
+ self.slaves_map[slave.id] = slave
+
+ return slave.id
+
+ def removeSlave(self, slave):
+ self.slaves.remove(slave)
+ self.slaves_map.pop(slave.id)
+
+ def getSlave(self, slave_id):
+ return self.slaves_map.get(slave_id)
+
+ def getSeenSlave(self, slave_id):
+ slave = self.getSlave(slave_id)
+ if slave:
+ slave.seen()
+
+ return slave
+
+ def timeoutSlaves(self):
+ removed = []
+
+ t = time.time()
+
+ for slave in self.slaves:
+ if (t - slave.last_seen) / 60 > self.slave_timeout:
+ removed.append(slave)
+
+ if slave.job:
+ for f in slave.job_frames:
+ slave.job[f].status = ERROR
+
+ for slave in removed:
+ self.removeSlave(slave)
+
+ def updateUsage(self):
+ blend = 0.5
+ for job in self.jobs:
+ job.usage *= (1 - blend)
+
+ if self.slaves:
+ slave_usage = blend / self.countSlaves()
+
+ for slave in self.slaves:
+ if slave.job:
+ slave.job.usage += slave_usage
+
+
+ def clear(self, clear_files = False):
+ removed = self.jobs[:]
+
+ for job in removed:
+ self.removeJob(job, clear_files)
+
+ def balance(self):
+ self.balancer.balance(self.jobs)
+
+ def getJobs(self):
+ return self.jobs
+
+ def countJobs(self, status = JOB_QUEUED):
+ total = 0
+ for j in self.jobs:
+ if j.status == status:
+ total += 1
+
+ return total
+
+ def countSlaves(self):
+ return len(self.slaves)
+
+ def removeJob(self, job, clear_files = False):
+ self.jobs.remove(job)
+ self.jobs_map.pop(job.id)
+
+ if clear_files:
+ shutil.rmtree(job.save_path)
+
+ for slave in self.slaves:
+ if slave.job == job:
+ slave.job = None
+ slave.job_frames = []
+
+ def addJob(self, job):
+ self.jobs.append(job)
+ self.jobs_map[job.id] = job
+
+ # create job directory
+ job.save_path = os.path.join(self.path, "job_" + job.id)
+ if not os.path.exists(job.save_path):
+ os.mkdir(job.save_path)
+
+ job.save()
+
+ def getJobID(self, id):
+ return self.jobs_map.get(id)
+
+ def __iter__(self):
+ for job in self.jobs:
+ yield job
+
+ def newDispatch(self, slave_id):
+ if self.jobs:
+ for job in self.jobs:
+ if not self.balancer.applyExceptions(job) and slave_id not in job.blacklist:
+ return job, job.getFrames()
+
+ return None, None
+
+def clearMaster(path):
+ shutil.rmtree(path)
+
+def createMaster(address, clear, path):
+ filepath = os.path.join(path, "blender_master.data")
+
+ if not clear and os.path.exists(filepath):
+ print("loading saved master:", filepath)
+ with open(filepath, 'rb') as f:
+ path, jobs, slaves = pickle.load(f)
+
+ httpd = RenderMasterServer(address, RenderHandler, path, subdir=False)
+ httpd.restore(jobs, slaves)
+
+ return httpd
+
+ return RenderMasterServer(address, RenderHandler, path)
+
+def saveMaster(path, httpd):
+ filepath = os.path.join(path, "blender_master.data")
+
+ with open(filepath, 'wb') as f:
+ pickle.dump((httpd.path, httpd.jobs, httpd.slaves), f, pickle.HIGHEST_PROTOCOL)
+
+def runMaster(address, broadcast, clear, path, update_stats, test_break):
+ httpd = createMaster(address, clear, path)
+ httpd.timeout = 1
+ httpd.stats = update_stats
+
+ if broadcast:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ start_time = time.time() - 2
+
+ while not test_break():
+ try:
+ httpd.handle_request()
+ except select.error:
+ pass
+
+ if time.time() - start_time >= 2: # need constant here
+ httpd.timeoutSlaves()
+
+ httpd.updateUsage()
+
+ if broadcast:
+ print("broadcasting address")
+ s.sendto(bytes("%i" % address[1], encoding='utf8'), 0, ('<broadcast>', 8000))
+ start_time = time.time()
+
+ httpd.server_close()
+ if clear:
+ clearMaster(httpd.path)
+ else:
+ saveMaster(path, httpd)
+
diff --git a/netrender/master_html.py b/netrender/master_html.py
new file mode 100644
index 00000000..da975184
--- /dev/null
+++ b/netrender/master_html.py
@@ -0,0 +1,316 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import os
+import re
+import shutil
+from netrender.utils import *
+import netrender.model
+
+src_folder = os.path.split(__file__)[0]
+
+def get(handler):
+ def output(text):
+ handler.wfile.write(bytes(text, encoding='utf8'))
+
+ def head(title, refresh = False):
+ output("<html><head>")
+ if refresh:
+ output("<meta http-equiv='refresh' content=5>")
+ output("<script src='/html/netrender.js' type='text/javascript'></script>")
+# output("<script src='/html/json2.js' type='text/javascript'></script>")
+ output("<title>")
+ output(title)
+ output("</title></head><body>")
+ output("<link rel='stylesheet' href='/html/netrender.css' type='text/css'>")
+
+
+ def link(text, url, script=""):
+ return "<a href='%s' %s>%s</a>" % (url, script, text)
+
+ def tag(name, text, attr=""):
+ return "<%s %s>%s</%s>" % (name, attr, text, name)
+
+ def startTable(border=1, class_style = None, caption = None):
+ output("<table border='%i'" % border)
+
+ if class_style:
+ output(" class='%s'" % class_style)
+
+ output(">")
+
+ if caption:
+ output("<caption>%s</caption>" % caption)
+
+ def headerTable(*headers):
+ output("<thead><tr>")
+
+ for c in headers:
+ output("<td>" + c + "</td>")
+
+ output("</tr></thead>")
+
+ def rowTable(*data, id = None, class_style = None, extra = None):
+ output("<tr")
+
+ if id:
+ output(" id='%s'" % id)
+
+ if class_style:
+ output(" class='%s'" % class_style)
+
+ if extra:
+ output(" %s" % extra)
+
+ output(">")
+
+ for c in data:
+ output("<td>" + str(c) + "</td>")
+
+ output("</tr>")
+
+ def endTable():
+ output("</table>")
+
+ def checkbox(title, value, script=""):
+ return """<input type="checkbox" title="%s" %s %s>""" % (title, "checked" if value else "", ("onclick=\"%s\"" % script) if script else "")
+
+ if handler.path == "/html/netrender.js":
+ f = open(os.path.join(src_folder, "netrender.js"), 'rb')
+
+ handler.send_head(content = "text/javascript")
+ shutil.copyfileobj(f, handler.wfile)
+
+ f.close()
+ elif handler.path == "/html/netrender.css":
+ f = open(os.path.join(src_folder, "netrender.css"), 'rb')
+
+ handler.send_head(content = "text/css")
+ shutil.copyfileobj(f, handler.wfile)
+
+ f.close()
+ elif handler.path == "/html" or handler.path == "/":
+ handler.send_head(content = "text/html")
+ head("NetRender", refresh = True)
+
+ output("<h2>Jobs</h2>")
+
+ startTable()
+ headerTable(
+ "&nbsp;",
+ "id",
+ "name",
+ "category",
+ "type",
+ "chunks",
+ "priority",
+ "usage",
+ "wait",
+ "status",
+ "length",
+ "done",
+ "dispatched",
+ "error",
+ "priority",
+ "exception"
+ )
+
+ handler.server.balance()
+
+ for job in handler.server.jobs:
+ results = job.framesStatus()
+ rowTable(
+ """<button title="cancel job" onclick="cancel_job('%s');">X</button>""" % job.id +
+ """<button title="pause job" onclick="request('/pause_%s', null);">P</button>""" % job.id +
+ """<button title="reset all frames" onclick="request('/resetall_%s_0', null);">R</button>""" % job.id,
+ job.id,
+ link(job.name, "/html/job" + job.id),
+ job.category if job.category else "<i>None</i>",
+ netrender.model.JOB_TYPES[job.type],
+ str(job.chunks) +
+ """<button title="increase chunks size" onclick="request('/edit_%s', &quot;{'chunks': %i}&quot;);">+</button>""" % (job.id, job.chunks + 1) +
+ """<button title="decrease chunks size" onclick="request('/edit_%s', &quot;{'chunks': %i}&quot;);" %s>-</button>""" % (job.id, job.chunks - 1, "disabled=True" if job.chunks == 1 else ""),
+ str(job.priority) +
+ """<button title="increase priority" onclick="request('/edit_%s', &quot;{'priority': %i}&quot;);">+</button>""" % (job.id, job.priority + 1) +
+ """<button title="decrease priority" onclick="request('/edit_%s', &quot;{'priority': %i}&quot;);" %s>-</button>""" % (job.id, job.priority - 1, "disabled=True" if job.priority == 1 else ""),
+ "%0.1f%%" % (job.usage * 100),
+ "%is" % int(time.time() - job.last_dispatched),
+ job.statusText(),
+ len(job),
+ results[DONE],
+ results[DISPATCHED],
+ str(results[ERROR]) +
+ """<button title="reset error frames" onclick="request('/reset_%s_0', null);" %s>R</button>""" % (job.id, "disabled=True" if not results[ERROR] else ""),
+ "yes" if handler.server.balancer.applyPriorities(job) else "no",
+ "yes" if handler.server.balancer.applyExceptions(job) else "no"
+ )
+
+ endTable()
+
+ output("<h2>Slaves</h2>")
+
+ startTable()
+ headerTable("name", "address", "last seen", "stats", "job")
+
+ for slave in handler.server.slaves:
+ rowTable(slave.name, slave.address[0], time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None")
+
+ endTable()
+
+ output("<h2>Configuration</h2>")
+
+ output("""<button title="remove all jobs" onclick="clear_jobs();">CLEAR JOB LIST</button>""")
+
+ startTable(caption = "Rules", class_style = "rules")
+
+ headerTable("type", "enabled", "description", "limit")
+
+ for rule in handler.server.balancer.rules:
+ rowTable(
+ "rating",
+ checkbox("", rule.enabled, "balance_enable('%s', '%s')" % (rule.id(), str(not rule.enabled).lower())),
+ rule,
+ rule.str_limit() +
+ """<button title="edit limit" onclick="balance_edit('%s', '%s');">edit</button>""" % (rule.id(), str(rule.limit)) if hasattr(rule, "limit") else "&nbsp;"
+ )
+
+ for rule in handler.server.balancer.priorities:
+ rowTable(
+ "priority",
+ checkbox("", rule.enabled, "balance_enable('%s', '%s')" % (rule.id(), str(not rule.enabled).lower())),
+ rule,
+ rule.str_limit() +
+ """<button title="edit limit" onclick="balance_edit('%s', '%s');">edit</button>""" % (rule.id(), str(rule.limit)) if hasattr(rule, "limit") else "&nbsp;"
+ )
+
+ for rule in handler.server.balancer.exceptions:
+ rowTable(
+ "exception",
+ checkbox("", rule.enabled, "balance_enable('%s', '%s')" % (rule.id(), str(not rule.enabled).lower())),
+ rule,
+ rule.str_limit() +
+ """<button title="edit limit" onclick="balance_edit('%s', '%s');">edit</button>""" % (rule.id(), str(rule.limit)) if hasattr(rule, "limit") else "&nbsp;"
+ )
+
+ endTable()
+
+ output("</body></html>")
+
+ elif handler.path.startswith("/html/job"):
+ handler.send_head(content = "text/html")
+ job_id = handler.path[9:]
+
+ head("NetRender")
+
+ job = handler.server.getJobID(job_id)
+
+ if job:
+ output("<h2>Render Information</h2>")
+
+ job.initInfo()
+
+ startTable()
+
+ rowTable("resolution", "%ix%i at %i%%" % job.resolution)
+
+ endTable()
+
+
+ if job.type == netrender.model.JOB_BLENDER:
+ output("<h2>Files</h2>")
+
+ startTable()
+ headerTable("path")
+
+ tot_cache = 0
+ tot_fluid = 0
+
+ rowTable(job.files[0].filepath)
+ rowTable("Other Files", class_style = "toggle", extra = "onclick='toggleDisplay(&quot;.other&quot;, &quot;none&quot;, &quot;table-row&quot;)'")
+
+ for file in job.files:
+ if file.filepath.endswith(".bphys"):
+ tot_cache += 1
+ elif file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz"):
+ tot_fluid += 1
+ else:
+ if file != job.files[0]:
+ rowTable(file.filepath, class_style = "other")
+
+ if tot_cache > 0:
+ rowTable("%i physic cache files" % tot_cache, class_style = "toggle", extra = "onclick='toggleDisplay(&quot;.cache&quot;, &quot;none&quot;, &quot;table-row&quot;)'")
+ for file in job.files:
+ if file.filepath.endswith(".bphys"):
+ rowTable(os.path.split(file.filepath)[1], class_style = "cache")
+
+ if tot_fluid > 0:
+ rowTable("%i fluid bake files" % tot_fluid, class_style = "toggle", extra = "onclick='toggleDisplay(&quot;.fluid&quot;, &quot;none&quot;, &quot;table-row&quot;)'")
+ for file in job.files:
+ if file.filepath.endswith(".bobj.gz") or file.filepath.endswith(".bvel.gz"):
+ rowTable(os.path.split(file.filepath)[1], class_style = "fluid")
+
+ endTable()
+ elif job.type == netrender.model.JOB_VCS:
+ output("<h2>Versioning</h2>")
+
+ startTable()
+
+ rowTable("System", job.version_info.system.name)
+ rowTable("Remote Path", job.version_info.rpath)
+ rowTable("Working Path", job.version_info.wpath)
+ rowTable("Revision", job.version_info.revision)
+ rowTable("Render File", job.files[0].filepath)
+
+ endTable()
+
+ if job.blacklist:
+ output("<h2>Blacklist</h2>")
+
+ startTable()
+ headerTable("name", "address")
+
+ for slave_id in job.blacklist:
+ slave = handler.server.slaves_map.get(slave_id, None)
+ if slave:
+ rowTable(slave.name, slave.address[0])
+
+ endTable()
+
+ output("<h2>Frames</h2>")
+
+ startTable()
+ headerTable("no", "status", "render time", "slave", "log", "result", "")
+
+ for frame in job.frames:
+ rowTable(
+ frame.number,
+ frame.statusText(),
+ "%.1fs" % frame.time,
+ frame.slave.name if frame.slave else "&nbsp;",
+ link("view log", logURL(job_id, frame.number)) if frame.log_path else "&nbsp;",
+ link("view result", renderURL(job_id, frame.number)) + " [" +
+ tag("span", "show", attr="class='thumb' onclick='showThumb(%s, %i)'" % (job.id, frame.number)) + "]" if frame.status == DONE else "&nbsp;",
+ "<img name='thumb%i' title='hide thumbnails' src='' class='thumb' onclick='showThumb(%s, %i)'>" % (frame.number, job.id, frame.number)
+ )
+
+ endTable()
+ else:
+ output("no such job")
+
+ output("</body></html>")
+
diff --git a/netrender/model.py b/netrender/model.py
new file mode 100644
index 00000000..5fc0bc2a
--- /dev/null
+++ b/netrender/model.py
@@ -0,0 +1,360 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import sys, os
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+
+import netrender.versioning as versioning
+from netrender.utils import *
+
+class LogFile:
+ def __init__(self, job_id = 0, slave_id = 0, frames = []):
+ self.job_id = job_id
+ self.slave_id = slave_id
+ self.frames = frames
+
+ def serialize(self):
+ return {
+ "job_id": self.job_id,
+ "slave_id": self.slave_id,
+ "frames": self.frames
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ logfile = LogFile()
+ logfile.job_id = data["job_id"]
+ logfile.slave_id = data["slave_id"]
+ logfile.frames = data["frames"]
+
+ return logfile
+
+class RenderSlave:
+ _slave_map = {}
+
+ def __init__(self):
+ self.id = ""
+ self.name = ""
+ self.address = ("",0)
+ self.stats = ""
+ self.total_done = 0
+ self.total_error = 0
+ self.last_seen = 0.0
+
+ def serialize(self):
+ return {
+ "id": self.id,
+ "name": self.name,
+ "address": self.address,
+ "stats": self.stats,
+ "total_done": self.total_done,
+ "total_error": self.total_error,
+ "last_seen": self.last_seen
+ }
+
+ @staticmethod
+ def materialize(data, cache = True):
+ if not data:
+ return None
+
+ slave_id = data["id"]
+
+ if cache and slave_id in RenderSlave._slave_map:
+ return RenderSlave._slave_map[slave_id]
+
+ slave = RenderSlave()
+ slave.id = slave_id
+ slave.name = data["name"]
+ slave.address = data["address"]
+ slave.stats = data["stats"]
+ slave.total_done = data["total_done"]
+ slave.total_error = data["total_error"]
+ slave.last_seen = data["last_seen"]
+
+ if cache:
+ RenderSlave._slave_map[slave_id] = slave
+
+ return slave
+
+JOB_BLENDER = 1
+JOB_PROCESS = 2
+JOB_VCS = 3
+
+JOB_TYPES = {
+ JOB_BLENDER: "Blender",
+ JOB_PROCESS: "Process",
+ JOB_VCS: "Versioned",
+ }
+
+class VersioningInfo:
+ def __init__(self, info = None):
+ self._system = None
+ self.wpath = ""
+ self.rpath = ""
+ self.revision = ""
+
+ @property
+ def system(self):
+ return self._system
+
+ @system.setter
+ def system(self, value):
+ self._system = versioning.SYSTEMS[value]
+
+ def update(self):
+ self.system.update(self)
+
+ def serialize(self):
+ return {
+ "wpath": self.wpath,
+ "rpath": self.rpath,
+ "revision": self.revision,
+ "system": self.system.name
+ }
+
+ @staticmethod
+ def generate(system, path):
+ vs = VersioningInfo()
+ vs.wpath = path
+ vs.system = system
+
+ vs.rpath = vs.system.path(path)
+ vs.revision = vs.system.revision(path)
+
+ return vs
+
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ vs = VersioningInfo()
+ vs.wpath = data["wpath"]
+ vs.rpath = data["rpath"]
+ vs.revision = data["revision"]
+ vs.system = data["system"]
+
+ return vs
+
+
+class RenderFile:
+ def __init__(self, filepath = "", index = 0, start = -1, end = -1, signature=0):
+ self.filepath = filepath
+ self.original_path = filepath
+ self.signature = signature
+ self.index = index
+ self.start = start
+ self.end = end
+
+ def serialize(self):
+ return {
+ "filepath": self.filepath,
+ "original_path": self.original_path,
+ "index": self.index,
+ "start": self.start,
+ "end": self.end,
+ "signature": self.signature
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ rfile = RenderFile(data["filepath"], data["index"], data["start"], data["end"], data["signature"])
+ rfile.original_path = data["original_path"]
+
+ return rfile
+
+class RenderJob:
+ def __init__(self, job_info = None):
+ self.id = ""
+ self.type = JOB_BLENDER
+ self.name = ""
+ self.category = "None"
+ self.status = JOB_WAITING
+ self.files = []
+ self.chunks = 0
+ self.priority = 0
+ self.blacklist = []
+
+ self.version_info = None
+
+ self.resolution = None
+
+ self.usage = 0.0
+ self.last_dispatched = 0.0
+ self.frames = []
+
+ if job_info:
+ self.type = job_info.type
+ self.name = job_info.name
+ self.category = job_info.category
+ self.status = job_info.status
+ self.files = job_info.files
+ self.chunks = job_info.chunks
+ self.priority = job_info.priority
+ self.blacklist = job_info.blacklist
+ self.version_info = job_info.version_info
+
+ def hasRenderResult(self):
+ return self.type in (JOB_BLENDER, JOB_VCS)
+
+ def rendersWithBlender(self):
+ return self.type in (JOB_BLENDER, JOB_VCS)
+
+ def addFile(self, file_path, start=-1, end=-1, signed=True):
+ if signed:
+ signature = hashFile(file_path)
+ else:
+ signature = None
+ self.files.append(RenderFile(file_path, len(self.files), start, end, signature))
+
+ def addFrame(self, frame_number, command = ""):
+ frame = RenderFrame(frame_number, command)
+ self.frames.append(frame)
+ return frame
+
+ def __len__(self):
+ return len(self.frames)
+
+ def countFrames(self, status=QUEUED):
+ total = 0
+ for f in self.frames:
+ if f.status == status:
+ total += 1
+
+ return total
+
+ def countSlaves(self):
+ return len(set((frame.slave for frame in self.frames if frame.status == DISPATCHED)))
+
+ def statusText(self):
+ return JOB_STATUS_TEXT[self.status]
+
+ def framesStatus(self):
+ results = {
+ QUEUED: 0,
+ DISPATCHED: 0,
+ DONE: 0,
+ ERROR: 0
+ }
+
+ for frame in self.frames:
+ results[frame.status] += 1
+
+ return results
+
+ def __contains__(self, frame_number):
+ for f in self.frames:
+ if f.number == frame_number:
+ return True
+ else:
+ return False
+
+ def __getitem__(self, frame_number):
+ for f in self.frames:
+ if f.number == frame_number:
+ return f
+ else:
+ return None
+
+ def serialize(self, frames = None):
+ min_frame = min((f.number for f in frames)) if frames else -1
+ max_frame = max((f.number for f in frames)) if frames else -1
+ return {
+ "id": self.id,
+ "type": self.type,
+ "name": self.name,
+ "category": self.category,
+ "status": self.status,
+ "files": [f.serialize() for f in self.files if f.start == -1 or not frames or (f.start <= max_frame and f.end >= min_frame)],
+ "frames": [f.serialize() for f in self.frames if not frames or f in frames],
+ "chunks": self.chunks,
+ "priority": self.priority,
+ "usage": self.usage,
+ "blacklist": self.blacklist,
+ "last_dispatched": self.last_dispatched,
+ "version_info": self.version_info.serialize() if self.version_info else None,
+ "resolution": self.resolution
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ job = RenderJob()
+ job.id = data["id"]
+ job.type = data["type"]
+ job.name = data["name"]
+ job.category = data["category"]
+ job.status = data["status"]
+ job.files = [RenderFile.materialize(f) for f in data["files"]]
+ job.frames = [RenderFrame.materialize(f) for f in data["frames"]]
+ job.chunks = data["chunks"]
+ job.priority = data["priority"]
+ job.usage = data["usage"]
+ job.blacklist = data["blacklist"]
+ job.last_dispatched = data["last_dispatched"]
+ job.resolution = data["resolution"]
+
+ version_info = data.get("version_info", None)
+ if version_info:
+ job.version_info = VersioningInfo.materialize(version_info)
+
+ return job
+
+class RenderFrame:
+ def __init__(self, number = 0, command = ""):
+ self.number = number
+ self.time = 0
+ self.status = QUEUED
+ self.slave = None
+ self.command = command
+
+ def statusText(self):
+ return FRAME_STATUS_TEXT[self.status]
+
+ def serialize(self):
+ return {
+ "number": self.number,
+ "time": self.time,
+ "status": self.status,
+ "slave": None if not self.slave else self.slave.serialize(),
+ "command": self.command
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ frame = RenderFrame()
+ frame.number = data["number"]
+ frame.time = data["time"]
+ frame.status = data["status"]
+ frame.slave = RenderSlave.materialize(data["slave"])
+ frame.command = data["command"]
+
+ return frame
diff --git a/netrender/netrender.css b/netrender/netrender.css
new file mode 100644
index 00000000..0c54690e
--- /dev/null
+++ b/netrender/netrender.css
@@ -0,0 +1,88 @@
+body {
+ background-color:#eee;
+ font-size:12px;
+ font-family: "Lucida Sans","Lucida Sans Unicode","Lucida Grande",Lucida,sans-serif;
+
+}
+a {
+ /*text-decoration:none;*/
+ color:#666;
+}
+a:hover {
+ color:#000;
+}
+h2 {
+ background-color:#ddd;
+ font-size:120%;
+ padding:5px;
+}
+
+h2 {
+ background-color:#ddd;
+ font-size:110%;
+ padding:5px;
+}
+
+table {
+ text-align:center;
+ border:0;
+ background-color:#ddd;
+ padding: 0px;
+ margin: 0px;
+}
+thead{
+ font-size:90%;
+ color:#555;
+ background-color:#ccc;
+}
+td {
+ border:0;
+ padding:2px;
+ padding-left:10px;
+ padding-right:10px;
+ margin-left:20px;
+ background-color:#ddd;
+}
+td:hover {
+ background-color:#ccc;
+}
+tr {
+ border:0;
+}
+button {
+ color: #111;
+ width: auto;
+ height: auto;
+}
+
+.toggle {
+ text-decoration: underline;
+ cursor: pointer;
+}
+
+.cache {
+ display: none;
+}
+
+.fluid {
+ display: none;
+}
+
+.other {
+ display: none;
+}
+
+.rules {
+ width: 60em;
+ text-align: left;
+}
+
+img.thumb {
+ display: none;
+ cursor: pointer;
+}
+
+span.thumb {
+ text-decoration: underline;
+ cursor: pointer;
+}
diff --git a/netrender/netrender.js b/netrender/netrender.js
new file mode 100644
index 00000000..1024a169
--- /dev/null
+++ b/netrender/netrender.js
@@ -0,0 +1,146 @@
+lastFrame = -1
+maxFrame = -1
+minFrame = -1
+
+function request(url, data)
+{
+ xmlhttp = new XMLHttpRequest();
+ xmlhttp.open("POST", url, false);
+ xmlhttp.send(data);
+ window.location.reload()
+}
+
+function edit(id, info)
+{
+ request("/edit_" + id, info)
+}
+
+function clear_jobs()
+{
+ var r=confirm("Also delete files on master?");
+
+ if (r==true) {
+ request('/clear', '{"clear":true}');
+ } else {
+ request('/clear', '{"clear":false}');
+ }
+}
+
+function cancel_job(id)
+{
+ var r=confirm("Also delete files on master?");
+
+ if (r==true) {
+ request('/cancel_' + id, '{"clear":true}');
+ } else {
+ request('/cancel_' + id, '{"clear":false}');
+ }
+}
+
+function balance_edit(id, old_value)
+{
+ var new_value = prompt("New limit", old_value);
+ if (new_value != null && new_value != "") {
+ request("/balance_limit", '{"' + id + '":"' + new_value + '"}');
+ }
+}
+
+function balance_enable(id, value)
+{
+ request("/balance_enable", '{"' + id + '":' + value + "}");
+}
+
+function showThumb(job, frame)
+{
+ if (lastFrame != -1) {
+ if (maxFrame != -1 && minFrame != -1) {
+ if (frame >= minFrame && frame <= maxFrame) {
+ for(i = minFrame; i <= maxFrame; i=i+1) {
+ toggleThumb(job, i);
+ }
+ minFrame = -1;
+ maxFrame = -1;
+ lastFrame = -1;
+ } else if (frame > maxFrame) {
+ for(i = maxFrame+1; i <= frame; i=i+1) {
+ toggleThumb(job, i);
+ }
+ maxFrame = frame;
+ lastFrame = frame;
+ } else {
+ for(i = frame; i <= minFrame-1; i=i+1) {
+ toggleThumb(job, i);
+ }
+ minFrame = frame;
+ lastFrame = frame;
+ }
+ } else if (frame == lastFrame) {
+ toggleThumb(job, frame);
+ } else if (frame < lastFrame) {
+ minFrame = frame;
+ maxFrame = lastFrame;
+
+ for(i = minFrame; i <= maxFrame-1; i=i+1) {
+ toggleThumb(job, i);
+ }
+ lastFrame = frame;
+ } else {
+ minFrame = lastFrame;
+ maxFrame = frame;
+
+ for(i = minFrame+1; i <= maxFrame; i=i+1) {
+ toggleThumb(job, i);
+ }
+ lastFrame = frame;
+ }
+ } else {
+ toggleThumb(job, frame);
+ }
+}
+
+function toggleThumb(job, frame)
+{
+ img = document.images["thumb" + frame];
+ url = "/thumb_" + job + "_" + frame + ".jpg"
+
+ if (img.style.display == "block") {
+ img.style.display = "none";
+ img.src = "";
+ lastFrame = -1;
+ } else {
+ img.src = url;
+ img.style.display = "block";
+ lastFrame = frame;
+ }
+}
+
+function returnObjById( id )
+{
+ if (document.getElementById)
+ var returnVar = document.getElementById(id);
+ else if (document.all)
+ var returnVar = document.all[id];
+ else if (document.layers)
+ var returnVar = document.layers[id];
+ return returnVar;
+}
+
+function toggleDisplay( className, value1, value2 )
+{
+ style = getStyle(className)
+
+ if (style.style["display"] == value1) {
+ style.style["display"] = value2;
+ } else {
+ style.style["display"] = value1;
+ }
+}
+
+function getStyle(className) {
+ var classes = document.styleSheets[0].rules || document.styleSheets[0].cssRules
+ for(var x=0;x<classes.length;x++) {
+ if(classes[x].selectorText==className) {
+ return classes[x];
+ }
+ }
+} \ No newline at end of file
diff --git a/netrender/operators.py b/netrender/operators.py
new file mode 100644
index 00000000..563da1c5
--- /dev/null
+++ b/netrender/operators.py
@@ -0,0 +1,570 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+import sys, os
+import http, http.client, http.server, urllib, socket
+import webbrowser
+import json
+
+import netrender
+from netrender.utils import *
+import netrender.client as client
+import netrender.model
+import netrender.versioning as versioning
+
+class RENDER_OT_netslave_bake(bpy.types.Operator):
+ '''NEED DESCRIPTION'''
+ bl_idname = "render.netslavebake"
+ bl_label = "Bake all in file"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ scene = context.scene
+ netsettings = scene.network_render
+
+ filename = bpy.data.filepath
+ path, name = os.path.split(filename)
+ root, ext = os.path.splitext(name)
+ default_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
+ relative_path = os.sep + os.sep + "blendcache_" + root + os.sep
+
+ # Force all point cache next to the blend file
+ for object in bpy.data.objects:
+ for modifier in object.modifiers:
+ if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
+ modifier.settings.path = relative_path
+ bpy.ops.fluid.bake({"active_object": object, "scene": scene})
+ elif modifier.type == "CLOTH":
+ modifier.point_cache.frame_step = 1
+ modifier.point_cache.use_disk_cache = True
+ modifier.point_cache.use_external = False
+ elif modifier.type == "SOFT_BODY":
+ modifier.point_cache.frame_step = 1
+ modifier.point_cache.use_disk_cache = True
+ modifier.point_cache.use_external = False
+ elif modifier.type == "SMOKE" and modifier.smoke_type == "TYPE_DOMAIN":
+ modifier.domain_settings.point_cache.use_step = 1
+ modifier.domain_settings.point_cache.use_disk_cache = True
+ modifier.domain_settings.point_cache.use_external = False
+
+ # particles modifier are stupid and don't contain data
+ # we have to go through the object property
+ for psys in object.particle_systems:
+ psys.point_cache.use_step = 1
+ psys.point_cache.use_disk_cache = True
+ psys.point_cache.use_external = False
+ psys.point_cache.filepath = relative_path
+
+ bpy.ops.ptcache.bake_all()
+
+ #bpy.ops.wm.save_mainfile(filepath = path + os.sep + root + "_baked.blend")
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientanim(bpy.types.Operator):
+ '''Start rendering an animation on network'''
+ bl_idname = "render.netclientanim"
+ bl_label = "Animation on network"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ scene = context.scene
+ netsettings = scene.network_render
+
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ # Sending file
+ scene.network_render.job_id = client.clientSendJob(conn, scene, True)
+ conn.close()
+
+ bpy.ops.render.render('INVOKE_AREA', animation=True)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientrun(bpy.types.Operator):
+ '''Start network rendering service'''
+ bl_idname = "render.netclientstart"
+ bl_label = "Start Service"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ bpy.ops.render.render('INVOKE_AREA', animation=True)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientsend(bpy.types.Operator):
+ '''Send Render Job to the Network'''
+ bl_idname = "render.netclientsend"
+ bl_label = "Send job"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ scene = context.scene
+ netsettings = scene.network_render
+
+ try:
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ # Sending file
+ scene.network_render.job_id = client.clientSendJob(conn, scene, True)
+ conn.close()
+ self.report('INFO', "Job sent to master")
+ except Exception as err:
+ self.report('ERROR', str(err))
+
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientsendframe(bpy.types.Operator):
+ '''Send Render Job with current frame to the Network'''
+ bl_idname = "render.netclientsendframe"
+ bl_label = "Send current frame job"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ scene = context.scene
+ netsettings = scene.network_render
+
+ try:
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ # Sending file
+ scene.network_render.job_id = client.clientSendJob(conn, scene, False)
+ conn.close()
+ self.report('INFO', "Job sent to master")
+ except Exception as err:
+ self.report('ERROR', str(err))
+
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientstatus(bpy.types.Operator):
+ '''Refresh the status of the current jobs'''
+ bl_idname = "render.netclientstatus"
+ bl_label = "Client Status"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ conn.request("GET", "/status")
+
+ response = conn.getresponse()
+ content = response.read()
+ print( response.status, response.reason )
+
+ jobs = (netrender.model.RenderJob.materialize(j) for j in json.loads(str(content, encoding='utf8')))
+
+ while(len(netsettings.jobs) > 0):
+ netsettings.jobs.remove(0)
+
+ netrender.jobs = []
+
+ for j in jobs:
+ netrender.jobs.append(j)
+ netsettings.jobs.add()
+ job = netsettings.jobs[-1]
+
+ j.results = j.framesStatus() # cache frame status
+
+ job.name = j.name
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientblacklistslave(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ bl_idname = "render.netclientblacklistslave"
+ bl_label = "Client Blacklist Slave"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+ if netsettings.active_slave_index >= 0:
+
+ # deal with data
+ slave = netrender.slaves.pop(netsettings.active_slave_index)
+ netrender.blacklist.append(slave)
+
+ # deal with rna
+ netsettings.slaves_blacklist.add()
+ netsettings.slaves_blacklist[-1].name = slave.name
+
+ netsettings.slaves.remove(netsettings.active_slave_index)
+ netsettings.active_slave_index = -1
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientwhitelistslave(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ bl_idname = "render.netclientwhitelistslave"
+ bl_label = "Client Whitelist Slave"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+ if netsettings.active_blacklisted_slave_index >= 0:
+
+ # deal with data
+ slave = netrender.blacklist.pop(netsettings.active_blacklisted_slave_index)
+ netrender.slaves.append(slave)
+
+ # deal with rna
+ netsettings.slaves.add()
+ netsettings.slaves[-1].name = slave.name
+
+ netsettings.slaves_blacklist.remove(netsettings.active_blacklisted_slave_index)
+ netsettings.active_blacklisted_slave_index = -1
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+
+class RENDER_OT_netclientslaves(bpy.types.Operator):
+ '''Refresh status about available Render slaves'''
+ bl_idname = "render.netclientslaves"
+ bl_label = "Client Slaves"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ conn.request("GET", "/slaves")
+
+ response = conn.getresponse()
+ content = response.read()
+ print( response.status, response.reason )
+
+ slaves = (netrender.model.RenderSlave.materialize(s) for s in json.loads(str(content, encoding='utf8')))
+
+ while(len(netsettings.slaves_blacklist) > 0):
+ netsettings.slaves_blacklist.remove(0)
+
+ while(len(netsettings.slaves) > 0):
+ netsettings.slaves.remove(0)
+
+ netrender.slaves = []
+
+ for s in slaves:
+ for i in range(len(netrender.blacklist)):
+ slave = netrender.blacklist[i]
+ if slave.id == s.id:
+ netrender.blacklist[i] = s
+ netsettings.slaves_blacklist.add()
+ slave = netsettings.slaves_blacklist[-1]
+ slave.name = s.name
+ break
+ else:
+ netrender.slaves.append(s)
+
+ netsettings.slaves.add()
+ slave = netsettings.slaves[-1]
+ slave.name = s.name
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientcancel(bpy.types.Operator):
+ '''Cancel the selected network rendering job.'''
+ bl_idname = "render.netclientcancel"
+ bl_label = "Client Cancel"
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ job = netrender.jobs[netsettings.active_job_index]
+
+ conn.request("POST", cancelURL(job.id), json.dumps({'clear':False}))
+
+ response = conn.getresponse()
+ response.read()
+ print( response.status, response.reason )
+
+ netsettings.jobs.remove(netsettings.active_job_index)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class RENDER_OT_netclientcancelall(bpy.types.Operator):
+ '''Cancel all running network rendering jobs.'''
+ bl_idname = "render.netclientcancelall"
+ bl_label = "Client Cancel All"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ conn.request("POST", "/clear", json.dumps({'clear':False}))
+
+ response = conn.getresponse()
+ response.read()
+ print( response.status, response.reason )
+
+ while(len(netsettings.jobs) > 0):
+ netsettings.jobs.remove(0)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class netclientdownload(bpy.types.Operator):
+ '''Download render results from the network'''
+ bl_idname = "render.netclientdownload"
+ bl_label = "Client Download"
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return netsettings.active_job_index >= 0 and len(netsettings.jobs) > netsettings.active_job_index
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ rd = context.scene.render
+
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ job_id = netrender.jobs[netsettings.active_job_index].id
+
+ conn.request("GET", "/status", headers={"job-id":job_id})
+
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ self.report('ERROR', "Job ID %i not defined on master" % job_id)
+ return {'ERROR'}
+
+ content = response.read()
+
+ job = netrender.model.RenderJob.materialize(json.loads(str(content, encoding='utf8')))
+
+ conn.close()
+
+ finished_frames = []
+
+ nb_error = 0
+ nb_missing = 0
+
+ for frame in job.frames:
+ if frame.status == DONE:
+ finished_frames.append(frame.number)
+ elif frame.status == ERROR:
+ nb_error += 1
+ else:
+ nb_missing += 1
+
+ if not finished_frames:
+ return
+
+ frame_ranges = []
+
+ first = None
+ last = None
+
+ for i in range(len(finished_frames)):
+ current = finished_frames[i]
+
+ if not first:
+ first = current
+ last = current
+ elif last + 1 == current:
+ last = current
+
+ if last + 1 < current or i + 1 == len(finished_frames):
+ if first < last:
+ frame_ranges.append((first, last))
+ else:
+ frame_ranges.append((first,))
+
+ first = current
+ last = current
+
+ getResults(netsettings.server_address, netsettings.server_port, job_id, job.resolution[0], job.resolution[1], job.resolution[2], frame_ranges)
+
+ if nb_error and nb_missing:
+ self.report('ERROR', "Results downloaded but skipped %i frames with errors and %i unfinished frames" % (nb_error, nb_missing))
+ elif nb_error:
+ self.report('ERROR', "Results downloaded but skipped %i frames with errors" % nb_error)
+ elif nb_missing:
+ self.report('WARNING', "Results downloaded but skipped %i unfinished frames" % nb_missing)
+ else:
+ self.report('INFO', "All results downloaded")
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class netclientscan(bpy.types.Operator):
+ '''Listen on network for master server broadcasting its address and port.'''
+ bl_idname = "render.netclientscan"
+ bl_label = "Client Scan"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ address, port = clientScan(self.report)
+
+ if address:
+ scene = context.scene
+ netsettings = scene.network_render
+ netsettings.server_address = address
+ netsettings.server_port = port
+ netrender.valid_address = True
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+class netclientvcsguess(bpy.types.Operator):
+ '''Guess VCS setting for the current file'''
+ bl_idname = "render.netclientvcsguess"
+ bl_label = "VCS Guess"
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+ system = versioning.SYSTEMS.get(netsettings.vcs_system, None)
+
+ if system:
+ wpath, name = os.path.split(os.path.abspath(bpy.data.filepath))
+
+ rpath = system.path(wpath)
+ revision = system.revision(wpath)
+
+ netsettings.vcs_wpath = wpath
+ netsettings.vcs_rpath = rpath
+ netsettings.vcs_revision = revision
+
+
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+
+class netclientweb(bpy.types.Operator):
+ '''Open new window with information about running rendering jobs'''
+ bl_idname = "render.netclientweb"
+ bl_label = "Open Master Monitor"
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return netsettings.server_address != "[default]"
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+
+ # open connection to make sure server exists
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, self.report)
+
+ if conn:
+ conn.close()
+
+ webbrowser.open("http://%s:%i" % (netsettings.server_address, netsettings.server_port))
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ return self.execute(context)
diff --git a/netrender/repath.py b/netrender/repath.py
new file mode 100644
index 00000000..3ac9636b
--- /dev/null
+++ b/netrender/repath.py
@@ -0,0 +1,150 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import sys, os
+import subprocess
+
+import bpy
+
+from netrender.utils import *
+import netrender.model
+
+BLENDER_PATH = sys.argv[0]
+
+def reset(job):
+ main_file = job.files[0]
+
+ job_full_path = main_file.filepath
+
+ if os.path.exists(job_full_path + ".bak"):
+ os.remove(job_full_path) # repathed file
+ os.renames(job_full_path + ".bak", job_full_path)
+
+def update(job):
+ paths = []
+
+ main_file = job.files[0]
+
+ job_full_path = main_file.filepath
+
+
+ path, ext = os.path.splitext(job_full_path)
+
+ new_path = path + ".remap" + ext
+
+ # Disable for now. Partial repath should work anyway
+ #all = main_file.filepath != main_file.original_path
+ all = False
+
+ for rfile in job.files[1:]:
+ if all or rfile.original_path != rfile.filepath:
+ paths.append(rfile.original_path)
+ paths.append(rfile.filepath)
+
+ # Only update if needed
+ if paths:
+ process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-P", __file__, "--", new_path] + paths, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ process.wait()
+
+ os.renames(job_full_path, job_full_path + ".bak")
+ os.renames(new_path, job_full_path)
+
+def process(paths):
+ def processPointCache(point_cache):
+ point_cache.use_external = False
+
+ def processFluid(fluid):
+ new_path = path_map.get(fluid.filepath, None)
+ if new_path:
+ fluid.path = new_path
+
+ path_map = {}
+ for i in range(0, len(paths), 2):
+ # special case for point cache
+ if paths[i].endswith(".bphys"):
+ pass # Don't need them in the map, they all use the default external path
+ # NOTE: This is probably not correct all the time, need to be fixed.
+ # special case for fluids
+ elif paths[i].endswith(".bobj.gz"):
+ path_map[os.path.split(paths[i])[0]] = os.path.split(paths[i+1])[0]
+ else:
+ path_map[os.path.split(paths[i])[1]] = paths[i+1]
+
+ # TODO original paths aren't really the orignal path (they are the normalized path
+ # so we repath using the filenames only.
+
+ ###########################
+ # LIBRARIES
+ ###########################
+ for lib in bpy.data.libraries:
+ file_path = bpy.path.abspath(lib.filepath)
+ new_path = path_map.get(os.path.split(file_path)[1], None)
+ if new_path:
+ lib.filepath = new_path
+
+ ###########################
+ # IMAGES
+ ###########################
+ for image in bpy.data.images:
+ if image.source == "FILE" and not image.packed_file:
+ file_path = bpy.path.abspath(image.filepath)
+ new_path = path_map.get(os.path.split(file_path)[1], None)
+ if new_path:
+ image.filepath = new_path
+
+
+ ###########################
+ # FLUID + POINT CACHE
+ ###########################
+ for object in bpy.data.objects:
+ for modifier in object.modifiers:
+ if modifier.type == 'FLUID_SIMULATION' and modifier.settings.type == "DOMAIN":
+ processFluid(settings)
+ elif modifier.type == "CLOTH":
+ processPointCache(modifier.point_cache)
+ elif modifier.type == "SOFT_BODY":
+ processPointCache(modifier.point_cache)
+ elif modifier.type == "SMOKE" and modifier.smoke_type == "TYPE_DOMAIN":
+ processPointCache(modifier.domain_settings.point_cache_low)
+ if modifier.domain_settings.use_high_resolution:
+ processPointCache(modifier.domain_settings.point_cache_high)
+ elif modifier.type == "MULTIRES" and modifier.is_external:
+ file_path = bpy.path.abspath(modifier.filepath)
+ new_path = path_map.get(file_path, None)
+ if new_path:
+ modifier.filepath = new_path
+
+ # particles modifier are stupid and don't contain data
+ # we have to go through the object property
+ for psys in object.particle_systems:
+ processPointCache(psys.point_cache)
+
+
+if __name__ == "__main__":
+ try:
+ i = sys.argv.index("--")
+ except:
+ i = 0
+
+ if i:
+ new_path = sys.argv[i+1]
+ args = sys.argv[i+2:]
+
+ process(args)
+
+ bpy.ops.wm.save_as_mainfile(filepath=new_path, check_existing=False)
diff --git a/netrender/slave.py b/netrender/slave.py
new file mode 100644
index 00000000..7f72530e
--- /dev/null
+++ b/netrender/slave.py
@@ -0,0 +1,359 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import sys, os, platform, shutil
+import http, http.client, http.server, urllib
+import subprocess, time
+import json
+
+import bpy
+
+from netrender.utils import *
+import netrender.model
+import netrender.repath
+import netrender.thumbnail as thumbnail
+
+BLENDER_PATH = sys.argv[0]
+
+CANCEL_POLL_SPEED = 2
+MAX_TIMEOUT = 10
+INCREMENT_TIMEOUT = 1
+MAX_CONNECT_TRY = 10
+try:
+ system = platform.system()
+except UnicodeDecodeError:
+ import sys
+ system = sys.platform
+
+if system in ('Windows', 'win32') and platform.version() >= '5': # Error mode is only available on Win2k or higher, that's version 5
+ import ctypes
+ def SetErrorMode():
+ val = ctypes.windll.kernel32.SetErrorMode(0x0002)
+ ctypes.windll.kernel32.SetErrorMode(val | 0x0002)
+ return val
+
+ def RestoreErrorMode(val):
+ ctypes.windll.kernel32.SetErrorMode(val)
+else:
+ def SetErrorMode():
+ return 0
+
+ def RestoreErrorMode(val):
+ pass
+
+def clearSlave(path):
+ shutil.rmtree(path)
+
+def slave_Info():
+ sysname, nodename, release, version, machine, processor = platform.uname()
+ slave = netrender.model.RenderSlave()
+ slave.name = nodename
+ slave.stats = sysname + " " + release + " " + machine + " " + processor
+ return slave
+
+def testCancel(conn, job_id, frame_number):
+ conn.request("HEAD", "/status", headers={"job-id":job_id, "job-frame": str(frame_number)})
+
+ # canceled if job isn't found anymore
+ if responseStatus(conn) == http.client.NO_CONTENT:
+ return True
+ else:
+ return False
+
+def testFile(conn, job_id, slave_id, rfile, JOB_PREFIX, main_path = None):
+ job_full_path = prefixPath(JOB_PREFIX, rfile.filepath, main_path)
+
+ found = os.path.exists(job_full_path)
+
+ if found and rfile.signature != None:
+ found_signature = hashFile(job_full_path)
+ found = found_signature == rfile.signature
+
+ if not found:
+ print("Found file %s at %s but signature mismatch!" % (rfile.filepath, job_full_path))
+ os.remove(job_full_path)
+ job_full_path = prefixPath(JOB_PREFIX, rfile.filepath, main_path, force = True)
+
+ if not found:
+ # Force prefix path if not found
+ job_full_path = prefixPath(JOB_PREFIX, rfile.filepath, main_path, force = True)
+ temp_path = os.path.join(JOB_PREFIX, "slave.temp")
+ conn.request("GET", fileURL(job_id, rfile.index), headers={"slave-id":slave_id})
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ return None # file for job not returned by server, need to return an error code to server
+
+ f = open(temp_path, "wb")
+ buf = response.read(1024)
+
+ while buf:
+ f.write(buf)
+ buf = response.read(1024)
+
+ f.close()
+
+ os.renames(temp_path, job_full_path)
+
+ rfile.filepath = job_full_path
+
+ return job_full_path
+
+def breakable_timeout(timeout):
+ for i in range(timeout):
+ time.sleep(1)
+ if engine.test_break():
+ break
+
+def render_slave(engine, netsettings, threads):
+ timeout = 1
+
+ bisleep = BreakableIncrementedSleep(INCREMENT_TIMEOUT, 1, MAX_TIMEOUT, engine.test_break)
+
+ engine.update_stats("", "Network render node initiation")
+
+ slave_path = bpy.path.abspath(netsettings.path)
+
+ if not os.path.exists(slave_path):
+ print("Slave working path ( %s ) doesn't exist" % netsettings.path)
+ return
+
+ if not os.access(slave_path, os.W_OK):
+ print("Slave working path ( %s ) is not writable" % netsettings.path)
+ return
+
+ conn = clientConnection(netsettings.server_address, netsettings.server_port)
+
+ if not conn:
+ timeout = 1
+ print("Connection failed, will try connecting again at most %i times" % MAX_CONNECT_TRY)
+ bisleep.reset()
+
+ for i in range(MAX_CONNECT_TRY):
+ bisleep.sleep()
+
+ conn = clientConnection(netsettings.server_address, netsettings.server_port)
+
+ if conn or engine.test_break():
+ break
+
+ print("Retry %i failed, waiting %is before retrying" % (i + 1, bisleep.current))
+
+ if conn:
+ conn.request("POST", "/slave", json.dumps(slave_Info().serialize()))
+ response = conn.getresponse()
+ response.read()
+
+ slave_id = response.getheader("slave-id")
+
+ NODE_PREFIX = os.path.join(slave_path, "slave_" + slave_id)
+ if not os.path.exists(NODE_PREFIX):
+ os.mkdir(NODE_PREFIX)
+
+ engine.update_stats("", "Network render connected to master, waiting for jobs")
+
+ while not engine.test_break():
+ conn.request("GET", "/job", headers={"slave-id":slave_id})
+ response = conn.getresponse()
+
+ if response.status == http.client.OK:
+ bisleep.reset()
+
+ job = netrender.model.RenderJob.materialize(json.loads(str(response.read(), encoding='utf8')))
+ engine.update_stats("", "Network render processing job from master")
+
+ JOB_PREFIX = os.path.join(NODE_PREFIX, "job_" + job.id)
+ if not os.path.exists(JOB_PREFIX):
+ os.mkdir(JOB_PREFIX)
+
+ # set tempdir for fsaa temp files
+ # have to set environ var because render is done in a subprocess and that's the easiest way to propagate the setting
+ os.environ["TMP"] = JOB_PREFIX
+
+
+ if job.type == netrender.model.JOB_BLENDER:
+ job_path = job.files[0].filepath # path of main file
+ main_path, main_file = os.path.split(job_path)
+
+ job_full_path = testFile(conn, job.id, slave_id, job.files[0], JOB_PREFIX)
+ print("Fullpath", job_full_path)
+ print("File:", main_file, "and %i other files" % (len(job.files) - 1,))
+
+ for rfile in job.files[1:]:
+ testFile(conn, job.id, slave_id, rfile, JOB_PREFIX, main_path)
+ print("\t", rfile.filepath)
+
+ netrender.repath.update(job)
+
+ engine.update_stats("", "Render File "+ main_file+ " for job "+ job.id)
+ elif job.type == netrender.model.JOB_VCS:
+ if not job.version_info:
+ # Need to return an error to server, incorrect job type
+ pass
+
+ job_path = job.files[0].filepath # path of main file
+ main_path, main_file = os.path.split(job_path)
+
+ job.version_info.update()
+
+ # For VCS jobs, file path is relative to the working copy path
+ job_full_path = os.path.join(job.version_info.wpath, job_path)
+
+ engine.update_stats("", "Render File "+ main_file+ " for job "+ job.id)
+
+ # announce log to master
+ logfile = netrender.model.LogFile(job.id, slave_id, [frame.number for frame in job.frames])
+ conn.request("POST", "/log", bytes(json.dumps(logfile.serialize()), encoding='utf8'))
+ response = conn.getresponse()
+ response.read()
+
+
+ first_frame = job.frames[0].number
+
+ # start render
+ start_t = time.time()
+
+ if job.rendersWithBlender():
+ frame_args = []
+
+ for frame in job.frames:
+ print("frame", frame.number)
+ frame_args += ["-f", str(frame.number)]
+
+ val = SetErrorMode()
+ process = subprocess.Popen([BLENDER_PATH, "-b", "-noaudio", job_full_path, "-t", str(threads), "-o", os.path.join(JOB_PREFIX, "######"), "-E", "BLENDER_RENDER", "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ RestoreErrorMode(val)
+ elif job.type == netrender.model.JOB_PROCESS:
+ command = job.frames[0].command
+ val = SetErrorMode()
+ process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ RestoreErrorMode(val)
+
+ headers = {"slave-id":slave_id}
+
+ cancelled = False
+ stdout = bytes()
+ run_t = time.time()
+ while not cancelled and process.poll() is None:
+ stdout += process.stdout.read(1024)
+ current_t = time.time()
+ cancelled = engine.test_break()
+ if current_t - run_t > CANCEL_POLL_SPEED:
+
+ # update logs if needed
+ if stdout:
+ # (only need to update on one frame, they are linked
+ conn.request("PUT", logURL(job.id, first_frame), stdout, headers=headers)
+ responseStatus(conn)
+
+ # Also output on console
+ if netsettings.use_slave_output_log:
+ print(str(stdout, encoding='utf8'), end="")
+
+ stdout = bytes()
+
+ run_t = current_t
+ if testCancel(conn, job.id, first_frame):
+ cancelled = True
+
+ if job.type == netrender.model.JOB_BLENDER:
+ netrender.repath.reset(job)
+
+ # read leftovers if needed
+ stdout += process.stdout.read()
+
+ if cancelled:
+ # kill process if needed
+ if process.poll() is None:
+ try:
+ process.terminate()
+ except OSError:
+ pass
+ continue # to next frame
+
+ # flush the rest of the logs
+ if stdout:
+ # Also output on console
+ if netsettings.use_slave_thumb:
+ print(str(stdout, encoding='utf8'), end="")
+
+ # (only need to update on one frame, they are linked
+ conn.request("PUT", logURL(job.id, first_frame), stdout, headers=headers)
+ if responseStatus(conn) == http.client.NO_CONTENT:
+ continue
+
+ total_t = time.time() - start_t
+
+ avg_t = total_t / len(job.frames)
+
+ status = process.returncode
+
+ print("status", status)
+
+ headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)}
+
+
+ if status == 0: # non zero status is error
+ headers["job-result"] = str(DONE)
+ for frame in job.frames:
+ headers["job-frame"] = str(frame.number)
+ if job.hasRenderResult():
+ # send image back to server
+
+ filename = os.path.join(JOB_PREFIX, "%06d.exr" % frame.number)
+
+ # thumbnail first
+ if netsettings.use_slave_thumb:
+ thumbname = thumbnail.generate(filename)
+
+ if thumbname:
+ f = open(thumbname, 'rb')
+ conn.request("PUT", "/thumb", f, headers=headers)
+ f.close()
+ responseStatus(conn)
+
+ f = open(filename, 'rb')
+ conn.request("PUT", "/render", f, headers=headers)
+ f.close()
+ if responseStatus(conn) == http.client.NO_CONTENT:
+ continue
+
+ elif job.type == netrender.model.JOB_PROCESS:
+ conn.request("PUT", "/render", headers=headers)
+ if responseStatus(conn) == http.client.NO_CONTENT:
+ continue
+ else:
+ headers["job-result"] = str(ERROR)
+ for frame in job.frames:
+ headers["job-frame"] = str(frame.number)
+ # send error result back to server
+ conn.request("PUT", "/render", headers=headers)
+ if responseStatus(conn) == http.client.NO_CONTENT:
+ continue
+
+ engine.update_stats("", "Network render connected to master, waiting for jobs")
+ else:
+ bisleep.sleep()
+
+ conn.close()
+
+ if netsettings.use_slave_clear:
+ clearSlave(NODE_PREFIX)
+
+if __name__ == "__main__":
+ pass
diff --git a/netrender/thumbnail.py b/netrender/thumbnail.py
new file mode 100644
index 00000000..2ead6e82
--- /dev/null
+++ b/netrender/thumbnail.py
@@ -0,0 +1,81 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import sys, os
+import subprocess
+
+import bpy
+
+def generate(filename, external=True):
+ if external:
+ process = subprocess.Popen([sys.argv[0], "-b", "-noaudio", "-P", __file__, "--", filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ while process.poll() is None:
+ process.stdout.read(1024) # empty buffer to be sure
+ process.stdout.read()
+
+ return _thumbname(filename)
+ else:
+ return _internal(filename)
+
+def _thumbname(filename):
+ root = os.path.splitext(filename)[0]
+ return root + ".jpg"
+
+def _internal(filename):
+ imagename = os.path.split(filename)[1]
+ thumbname = _thumbname(filename)
+
+ if os.path.exists(thumbname):
+ return thumbname
+
+ if bpy:
+ scene = bpy.data.scenes[0] # FIXME, this is dodgy!
+ scene.render.file_format = "JPEG"
+ scene.render.file_quality = 90
+
+ # remove existing image, if there's a leftover (otherwise open changes the name)
+ if imagename in bpy.data.images:
+ img = bpy.data.images[imagename]
+ bpy.data.images.remove(img)
+
+ bpy.ops.image.open(filepath=filename)
+ img = bpy.data.images[imagename]
+
+ img.save_render(thumbname, scene=scene)
+
+ img.user_clear()
+ bpy.data.images.remove(img)
+
+ try:
+ process = subprocess.Popen(["convert", thumbname, "-resize", "300x300", thumbname])
+ process.wait()
+ return thumbname
+ except Exception as exp:
+ print("Error while generating thumbnail")
+ print(exp)
+
+ return None
+
+if __name__ == "__main__":
+ import bpy
+ try:
+ start = sys.argv.index("--") + 1
+ except ValueError:
+ start = 0
+ for filename in sys.argv[start:]:
+ generate(filename, external=False)
diff --git a/netrender/ui.py b/netrender/ui.py
new file mode 100644
index 00000000..6993173d
--- /dev/null
+++ b/netrender/ui.py
@@ -0,0 +1,553 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+import sys, os
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+
+import netrender
+import netrender.slave as slave
+import netrender.master as master
+
+from netrender.utils import *
+
+VERSION = b"0.3"
+
+PATH_PREFIX = "/tmp/"
+
+QUEUED = 0
+DISPATCHED = 1
+DONE = 2
+ERROR = 3
+
+LAST_ADDRESS_TEST = 0
+ADDRESS_TEST_TIMEOUT = 30
+
+def base_poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+def init_file():
+ if netrender.init_file != bpy.data.filepath:
+ netrender.init_file = bpy.data.filepath
+ netrender.init_data = True
+ netrender.valid_address = False
+
+def init_data(netsettings):
+ init_file()
+
+ if netrender.init_data:
+ netrender.init_data = False
+
+ netsettings.active_slave_index = 0
+ while(len(netsettings.slaves) > 0):
+ netsettings.slaves.remove(0)
+
+ netsettings.active_blacklisted_slave_index = 0
+ while(len(netsettings.slaves_blacklist) > 0):
+ netsettings.slaves_blacklist.remove(0)
+
+ netsettings.active_job_index = 0
+ while(len(netsettings.jobs) > 0):
+ netsettings.jobs.remove(0)
+
+def verify_address(netsettings, force=False):
+ global LAST_ADDRESS_TEST
+ init_file()
+
+ if force or LAST_ADDRESS_TEST + ADDRESS_TEST_TIMEOUT < time.time():
+ LAST_ADDRESS_TEST = time.time()
+
+ try:
+ conn = clientConnection(netsettings.server_address, netsettings.server_port, scan = False, timeout = 1)
+ except:
+ conn = None
+
+ if conn:
+ netrender.valid_address = True
+ conn.close()
+ else:
+ netrender.valid_address = False
+
+ return netrender.valid_address
+
+class NeedValidAddress():
+ @classmethod
+ def poll(cls, context):
+ return super().poll(context) and verify_address(context.scene.network_render)
+
+class NetRenderButtonsPanel():
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+ bl_context = "render"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return rd.engine == 'NET_RENDER' and rd.use_game_engine == False
+
+# Setting panel, use in the scene for now.
+class RENDER_PT_network_settings(NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Network Settings"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ return super().poll(context)
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ verify_address(netsettings)
+
+ layout.prop(netsettings, "mode", expand=True)
+
+ if netsettings.mode in ("RENDER_MASTER", "RENDER_SLAVE"):
+ layout.operator("render.netclientstart", icon='PLAY')
+
+ layout.prop(netsettings, "path")
+
+ split = layout.split(percentage=0.7)
+
+ col = split.column()
+ col.label(text="Server Address:")
+ col.prop(netsettings, "server_address", text="")
+
+ col = split.column()
+ col.label(text="Port:")
+ col.prop(netsettings, "server_port", text="")
+
+ if netsettings.mode != "RENDER_MASTER":
+ layout.operator("render.netclientscan", icon='FILE_REFRESH', text="")
+
+ if not netrender.valid_address:
+ layout.label(text="No master at specified address")
+
+ layout.operator("render.netclientweb", icon='QUESTION')
+
+class RENDER_PT_network_slave_settings(NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Slave Settings"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_SLAVE"
+
+ def draw(self, context):
+ layout = self.layout
+
+ rd = context.scene.render
+ netsettings = context.scene.network_render
+
+ layout.prop(netsettings, "use_slave_clear")
+ layout.prop(netsettings, "use_slave_thumb")
+ layout.prop(netsettings, "use_slave_output_log")
+ layout.label(text="Threads:")
+ layout.prop(rd, "threads_mode", expand=True)
+
+ col = layout.column()
+ col.enabled = rd.threads_mode == 'FIXED'
+ col.prop(rd, "threads")
+
+class RENDER_PT_network_master_settings(NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Master Settings"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_MASTER"
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ layout.prop(netsettings, "use_master_broadcast")
+ layout.prop(netsettings, "use_master_clear")
+
+class RENDER_PT_network_job(NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Job Settings"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ verify_address(netsettings)
+
+ if netsettings.server_address != "[default]":
+ layout.operator("render.netclientanim", icon='RENDER_ANIMATION')
+ layout.operator("render.netclientsend", icon='FILE_BLEND')
+ layout.operator("render.netclientsendframe", icon='RENDER_STILL')
+ if netsettings.job_id:
+ row = layout.row()
+ row.operator("render.render", text="Get Image", icon='RENDER_STILL')
+ row.operator("render.render", text="Get Animation", icon='RENDER_ANIMATION').animation = True
+
+ split = layout.split(percentage=0.3)
+
+ col = split.column()
+ col.label(text="Type:")
+ col.label(text="Name:")
+ col.label(text="Category:")
+
+ col = split.column()
+ col.prop(netsettings, "job_type", text="")
+ col.prop(netsettings, "job_name", text="")
+ col.prop(netsettings, "job_category", text="")
+
+ row = layout.row()
+ row.prop(netsettings, "priority")
+ row.prop(netsettings, "chunks")
+
+class RENDER_PT_network_job_vcs(NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "VCS Job Settings"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ scene = context.scene
+ return (super().poll(context)
+ and scene.network_render.mode == "RENDER_CLIENT"
+ and scene.network_render.job_type == "JOB_VCS")
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ layout.operator("render.netclientvcsguess", icon='FILE_REFRESH', text="")
+
+ layout.prop(netsettings, "vcs_system")
+ layout.prop(netsettings, "vcs_revision")
+ layout.prop(netsettings, "vcs_rpath")
+ layout.prop(netsettings, "vcs_wpath")
+
+class RENDER_PT_network_slaves(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Slaves Status"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ row = layout.row()
+ row.template_list(netsettings, "slaves", netsettings, "active_slave_index", rows=2)
+
+ sub = row.column(align=True)
+ sub.operator("render.netclientslaves", icon='FILE_REFRESH', text="")
+ sub.operator("render.netclientblacklistslave", icon='ZOOMOUT', text="")
+
+ if len(netrender.slaves) > netsettings.active_slave_index >= 0:
+ layout.separator()
+
+ slave = netrender.slaves[netsettings.active_slave_index]
+
+ layout.label(text="Name: " + slave.name)
+ layout.label(text="Address: " + slave.address[0])
+ layout.label(text="Seen: " + time.ctime(slave.last_seen))
+ layout.label(text="Stats: " + slave.stats)
+
+class RENDER_PT_network_slaves_blacklist(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Slaves Blacklist"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ row = layout.row()
+ row.template_list(netsettings, "slaves_blacklist", netsettings, "active_blacklisted_slave_index", rows=2)
+
+ sub = row.column(align=True)
+ sub.operator("render.netclientwhitelistslave", icon='ZOOMOUT', text="")
+
+ if len(netrender.blacklist) > netsettings.active_blacklisted_slave_index >= 0:
+ layout.separator()
+
+ slave = netrender.blacklist[netsettings.active_blacklisted_slave_index]
+
+ layout.label(text="Name: " + slave.name)
+ layout.label(text="Address: " + slave.address[0])
+ layout.label(text="Seen: " + time.ctime(slave.last_seen))
+ layout.label(text="Stats: " + slave.stats)
+
+class RENDER_PT_network_jobs(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Jobs"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ netsettings = context.scene.network_render
+
+ row = layout.row()
+ row.template_list(netsettings, "jobs", netsettings, "active_job_index", rows=2)
+
+ sub = row.column(align=True)
+ sub.operator("render.netclientstatus", icon='FILE_REFRESH', text="")
+ sub.operator("render.netclientcancel", icon='ZOOMOUT', text="")
+ sub.operator("render.netclientcancelall", icon='PANEL_CLOSE', text="")
+ sub.operator("render.netclientdownload", icon='RENDER_ANIMATION', text="")
+
+ if len(netrender.jobs) > netsettings.active_job_index >= 0:
+ layout.separator()
+
+ job = netrender.jobs[netsettings.active_job_index]
+
+ layout.label(text="Name: %s" % job.name)
+ layout.label(text="Length: %04i" % len(job))
+ layout.label(text="Done: %04i" % job.results[DONE])
+ layout.label(text="Error: %04i" % job.results[ERROR])
+
+import bl_ui.properties_render as properties_render
+class RENDER_PT_network_output(NeedValidAddress, NetRenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Output"
+ COMPAT_ENGINES = {'NET_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ netsettings = context.scene.network_render
+ return super().poll(context) and netsettings.mode == "RENDER_CLIENT"
+
+ draw = properties_render.RENDER_PT_output.draw
+
+
+class NetRenderSlave(bpy.types.PropertyGroup):
+ @classmethod
+ def register(NetRenderSlave):
+ from bpy.props import PointerProperty, StringProperty, BoolProperty, EnumProperty, IntProperty, CollectionProperty
+
+ NetRenderSlave.name = StringProperty(
+ name="Name of the slave",
+ description="",
+ maxlen = 64,
+ default = "")
+
+class NetRenderJob(bpy.types.PropertyGroup):
+ @classmethod
+ def register(NetRenderJob):
+ from bpy.props import PointerProperty, StringProperty, BoolProperty, EnumProperty, IntProperty, CollectionProperty
+
+ NetRenderJob.name = StringProperty(
+ name="Name of the job",
+ description="",
+ maxlen = 128,
+ default = "")
+
+class NetRenderSettings(bpy.types.PropertyGroup):
+ @classmethod
+ def register(NetRenderSettings):
+ from bpy.props import PointerProperty, StringProperty, BoolProperty, EnumProperty, IntProperty, CollectionProperty
+
+ def address_update_callback(self, context):
+ netsettings = context.scene.network_render
+ verify_address(netsettings, True)
+
+ NetRenderSettings.server_address = StringProperty(
+ name="Server address",
+ description="IP or name of the master render server",
+ maxlen = 128,
+ default = "[default]",
+ update = address_update_callback)
+
+ NetRenderSettings.server_port = IntProperty(
+ name="Server port",
+ description="port of the master render server",
+ default = 8000,
+ min=1,
+ max=65535)
+
+ NetRenderSettings.use_master_broadcast = BoolProperty(
+ name="Broadcast",
+ description="broadcast master server address on local network",
+ default = True)
+
+ NetRenderSettings.use_slave_clear = BoolProperty(
+ name="Clear on exit",
+ description="delete downloaded files on exit",
+ default = True)
+
+ NetRenderSettings.use_slave_thumb = BoolProperty(
+ name="Generate thumbnails",
+ description="Generate thumbnails on slaves instead of master",
+ default = False)
+
+ NetRenderSettings.use_slave_output_log = BoolProperty(
+ name="Output render log on console",
+ description="Output render text log to console as well as sending it to the master",
+ default = True)
+
+ NetRenderSettings.use_master_clear = BoolProperty(
+ name="Clear on exit",
+ description="delete saved files on exit",
+ default = False)
+
+ default_path = os.environ.get("TEMP")
+
+ if not default_path:
+ if os.name == 'nt':
+ default_path = "c:/tmp/"
+ else:
+ default_path = "/tmp/"
+ elif not default_path.endswith(os.sep):
+ default_path += os.sep
+
+ NetRenderSettings.path = StringProperty(
+ name="Path",
+ description="Path for temporary files",
+ maxlen = 128,
+ default = default_path,
+ subtype='FILE_PATH')
+
+ NetRenderSettings.job_type = EnumProperty(
+ items=(
+ ("JOB_BLENDER", "Blender", "Standard Blender Job"),
+ ("JOB_PROCESS", "Process", "Custom Process Job"),
+ ("JOB_VCS", "VCS", "Version Control System Managed Job"),
+ ),
+ name="Job Type",
+ description="Type of render job",
+ default="JOB_BLENDER")
+
+ NetRenderSettings.job_name = StringProperty(
+ name="Job name",
+ description="Name of the job",
+ maxlen = 128,
+ default = "[default]")
+
+ NetRenderSettings.job_category = StringProperty(
+ name="Job category",
+ description="Category of the job",
+ maxlen = 128,
+ default = "")
+
+ NetRenderSettings.chunks = IntProperty(
+ name="Chunks",
+ description="Number of frame to dispatch to each slave in one chunk",
+ default = 5,
+ min=1,
+ max=65535)
+
+ NetRenderSettings.priority = IntProperty(
+ name="Priority",
+ description="Priority of the job",
+ default = 1,
+ min=1,
+ max=10)
+
+ NetRenderSettings.vcs_wpath = StringProperty(
+ name="Working Copy",
+ description="Path of the local working copy",
+ maxlen = 1024,
+ default = "")
+
+ NetRenderSettings.vcs_rpath = StringProperty(
+ name="Remote Path",
+ description="Path of the server copy (protocol specific)",
+ maxlen = 1024,
+ default = "")
+
+ NetRenderSettings.vcs_revision = StringProperty(
+ name="Revision",
+ description="Revision for this job",
+ maxlen = 256,
+ default = "")
+
+ NetRenderSettings.vcs_system = StringProperty(
+ name="VCS",
+ description="Version Control System",
+ maxlen = 64,
+ default = "Subversion")
+
+ NetRenderSettings.job_id = StringProperty(
+ name="Network job id",
+ description="id of the last sent render job",
+ maxlen = 64,
+ default = "")
+
+ NetRenderSettings.active_slave_index = IntProperty(
+ name="Index of the active slave",
+ description="",
+ default = -1,
+ min= -1,
+ max=65535)
+
+ NetRenderSettings.active_blacklisted_slave_index = IntProperty(
+ name="Index of the active slave",
+ description="",
+ default = -1,
+ min= -1,
+ max=65535)
+
+ NetRenderSettings.active_job_index = IntProperty(
+ name="Index of the active job",
+ description="",
+ default = -1,
+ min= -1,
+ max=65535)
+
+ NetRenderSettings.mode = EnumProperty(
+ items=(
+ ("RENDER_CLIENT", "Client", "Act as render client"),
+ ("RENDER_MASTER", "Master", "Act as render master"),
+ ("RENDER_SLAVE", "Slave", "Act as render slave"),
+ ),
+ name="Network mode",
+ description="Mode of operation of this instance",
+ default="RENDER_CLIENT")
+
+ NetRenderSettings.slaves = CollectionProperty(type=NetRenderSlave, name="Slaves", description="")
+ NetRenderSettings.slaves_blacklist = CollectionProperty(type=NetRenderSlave, name="Slaves Blacklist", description="")
+ NetRenderSettings.jobs = CollectionProperty(type=NetRenderJob, name="Job List", description="")
+
+ bpy.types.Scene.network_render = PointerProperty(type=NetRenderSettings, name="Network Render", description="Network Render Settings")
+
+ @classmethod
+ def unregister(cls):
+ del bpy.types.Scene.network_render
diff --git a/netrender/utils.py b/netrender/utils.py
new file mode 100644
index 00000000..b0c8eb0b
--- /dev/null
+++ b/netrender/utils.py
@@ -0,0 +1,316 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import sys, os
+import re
+import http, http.client, http.server, urllib, socket
+import subprocess, shutil, time, hashlib, zlib
+
+import netrender, netrender.model
+
+
+try:
+ import bpy
+except:
+ bpy = None
+
+VERSION = bytes(".".join((str(n) for n in netrender.bl_info["version"])), encoding='utf8')
+
+# Jobs status
+JOB_WAITING = 0 # before all data has been entered
+JOB_PAUSED = 1 # paused by user
+JOB_FINISHED = 2 # finished rendering
+JOB_QUEUED = 3 # ready to be dispatched
+
+JOB_STATUS_TEXT = {
+ JOB_WAITING: "Waiting",
+ JOB_PAUSED: "Paused",
+ JOB_FINISHED: "Finished",
+ JOB_QUEUED: "Queued"
+ }
+
+
+# Frames status
+QUEUED = 0
+DISPATCHED = 1
+DONE = 2
+ERROR = 3
+
+FRAME_STATUS_TEXT = {
+ QUEUED: "Queued",
+ DISPATCHED: "Dispatched",
+ DONE: "Done",
+ ERROR: "Error"
+ }
+
+class DirectoryContext:
+ def __init__(self, path):
+ self.path = path
+
+ def __enter__(self):
+ self.curdir = os.path.abspath(os.curdir)
+ os.chdir(self.path)
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ os.chdir(self.curdir)
+
+class BreakableIncrementedSleep:
+ def __init__(self, increment, default_timeout, max_timeout, break_fct):
+ self.increment = increment
+ self.default = default_timeout
+ self.max = max_timeout
+ self.current = self.default
+ self.break_fct = break_fct
+
+ def reset(self):
+ self.current = self.default
+
+ def increase(self):
+ self.current = min(self.current + self.increment, self.max)
+
+ def sleep(self):
+ for i in range(self.current):
+ time.sleep(1)
+ if self.break_fct():
+ break
+
+ self.increase()
+
+def responseStatus(conn):
+ with conn.getresponse() as response:
+ length = int(response.getheader("content-length", "0"))
+ if length > 0:
+ response.read()
+ return response.status
+
+def reporting(report, message, errorType = None):
+ if errorType:
+ t = 'ERROR'
+ else:
+ t = 'INFO'
+
+ if report:
+ report(t, message)
+ return None
+ elif errorType:
+ raise errorType(message)
+ else:
+ return None
+
+def clientScan(report = None):
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ s.settimeout(30)
+
+ s.bind(('', 8000))
+
+ buf, address = s.recvfrom(64)
+
+ address = address[0]
+ port = int(str(buf, encoding='utf8'))
+
+ reporting(report, "Master server found")
+
+ return (address, port)
+ except socket.timeout:
+ reporting(report, "No master server on network", IOError)
+
+ return ("", 8000) # return default values
+
+def clientConnection(address, port, report = None, scan = True, timeout = 5):
+ if address == "[default]":
+# calling operator from python is fucked, scene isn't in context
+# if bpy:
+# bpy.ops.render.netclientscan()
+# else:
+ if not scan:
+ return None
+
+ address, port = clientScan()
+ if address == "":
+ return None
+
+ try:
+ conn = http.client.HTTPConnection(address, port, timeout = timeout)
+
+ if conn:
+ if clientVerifyVersion(conn):
+ return conn
+ else:
+ conn.close()
+ reporting(report, "Incorrect master version", ValueError)
+ except BaseException as err:
+ if report:
+ report('ERROR', str(err))
+ return None
+ else:
+ print(err)
+ return None
+
+def clientVerifyVersion(conn):
+ conn.request("GET", "/version")
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ conn.close()
+ return False
+
+ server_version = response.read()
+
+ if server_version != VERSION:
+ print("Incorrect server version!")
+ print("expected", str(VERSION, encoding='utf8'), "received", str(server_version, encoding='utf8'))
+ return False
+
+ return True
+
+def fileURL(job_id, file_index):
+ return "/file_%s_%i" % (job_id, file_index)
+
+def logURL(job_id, frame_number):
+ return "/log_%s_%i.log" % (job_id, frame_number)
+
+def renderURL(job_id, frame_number):
+ return "/render_%s_%i.exr" % (job_id, frame_number)
+
+def cancelURL(job_id):
+ return "/cancel_%s" % (job_id)
+
+def hashFile(path):
+ f = open(path, "rb")
+ value = hashData(f.read())
+ f.close()
+ return value
+
+def hashData(data):
+ m = hashlib.md5()
+ m.update(data)
+ return m.hexdigest()
+
+
+def prefixPath(prefix_directory, file_path, prefix_path, force = False):
+ if (os.path.isabs(file_path) or
+ len(file_path) >= 3 and (file_path[1:3] == ":/" or file_path[1:3] == ":\\") or # Windows absolute path don't count as absolute on unix, have to handle them myself
+ file_path[0] == "/" or file_path[0] == "\\"): # and vice versa
+
+ # if an absolute path, make sure path exists, if it doesn't, use relative local path
+ full_path = file_path
+ if force or not os.path.exists(full_path):
+ p, n = os.path.split(os.path.normpath(full_path))
+
+ if prefix_path and p.startswith(prefix_path):
+ if len(prefix_path) < len(p):
+ directory = os.path.join(prefix_directory, p[len(prefix_path)+1:]) # +1 to remove separator
+ if not os.path.exists(directory):
+ os.mkdir(directory)
+ else:
+ directory = prefix_directory
+ full_path = os.path.join(directory, n)
+ else:
+ full_path = os.path.join(prefix_directory, n)
+ else:
+ full_path = os.path.join(prefix_directory, file_path)
+
+ return full_path
+
+def getResults(server_address, server_port, job_id, resolution_x, resolution_y, resolution_percentage, frame_ranges):
+ if bpy.app.debug:
+ print("=============================================")
+ print("============= FETCHING RESULTS ==============")
+
+ frame_arguments = []
+ for r in frame_ranges:
+ if len(r) == 2:
+ frame_arguments.extend(["-s", str(r[0]), "-e", str(r[1]), "-a"])
+ else:
+ frame_arguments.extend(["-f", str(r[0])])
+
+ filepath = os.path.join(bpy.app.tempdir, "netrender_temp.blend")
+ bpy.ops.wm.save_as_mainfile(filepath=filepath, copy=True, check_existing=False)
+
+ arguments = [sys.argv[0], "-b", "-noaudio", filepath, "-o", bpy.path.abspath(bpy.context.scene.render.filepath), "-P", __file__] + frame_arguments + ["--", "GetResults", server_address, str(server_port), job_id, str(resolution_x), str(resolution_y), str(resolution_percentage)]
+ if bpy.app.debug:
+ print("Starting subprocess:")
+ print(" ".join(arguments))
+
+ process = subprocess.Popen(arguments, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ while process.poll() is None:
+ stdout = process.stdout.read(1024)
+ if bpy.app.debug:
+ print(str(stdout, encoding='utf-8'), end="")
+
+
+ # read leftovers if needed
+ stdout = process.stdout.read()
+ if bpy.app.debug:
+ print(str(stdout, encoding='utf-8'))
+
+ os.remove(filepath)
+
+ if bpy.app.debug:
+ print("=============================================")
+ return
+
+def _getResults(server_address, server_port, job_id, resolution_x, resolution_y, resolution_percentage):
+ render = bpy.context.scene.render
+
+ netsettings = bpy.context.scene.network_render
+
+ netsettings.server_address = server_address
+ netsettings.server_port = int(server_port)
+ netsettings.job_id = job_id
+
+ render.engine = 'NET_RENDER'
+ render.resolution_x = int(resolution_x)
+ render.resolution_y = int(resolution_y)
+ render.resolution_percentage = int(resolution_percentage)
+
+ render.use_full_sample = False
+ render.use_compositing = False
+ render.use_border = False
+
+
+def getFileInfo(filepath, infos):
+ process = subprocess.Popen([sys.argv[0], "-b", "-noaudio", filepath, "-P", __file__, "--", "FileInfo"] + infos, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ stdout = bytes()
+ while process.poll() is None:
+ stdout += process.stdout.read(1024)
+
+ # read leftovers if needed
+ stdout += process.stdout.read()
+
+ stdout = str(stdout, encoding="utf8")
+
+ values = [eval(v[1:].strip()) for v in stdout.split("\n") if v.startswith("$")]
+
+ return values
+
+
+if __name__ == "__main__":
+ try:
+ start = sys.argv.index("--") + 1
+ except ValueError:
+ start = 0
+ action, *args = sys.argv[start:]
+
+ if action == "FileInfo":
+ for info in args:
+ print("$", eval(info))
+ elif action == "GetResults":
+ _getResults(args[0], args[1], args[2], args[3], args[4], args[5])
diff --git a/netrender/versioning.py b/netrender/versioning.py
new file mode 100644
index 00000000..d4892e3e
--- /dev/null
+++ b/netrender/versioning.py
@@ -0,0 +1,108 @@
+import sys, os
+import re
+import subprocess
+
+from netrender.utils import *
+
+class AbstractVCS:
+ name = "ABSTRACT VCS"
+ def __init__(self):
+ pass
+
+ def update(self, info):
+ """update(info)
+ Update a working copy to the specified revision.
+ If working copy doesn't exist, do a full get from server to create it.
+ [info] model.VersioningInfo instance, specifies the working path, remote path and version number."""
+ pass
+
+ def revision(self, path):
+ """revision(path)
+ return the current revision of the specified working copy path"""
+ pass
+
+ def path(self, path):
+ """path(path)
+ return the remote path of the specified working copy path"""
+ pass
+
+class Subversion(AbstractVCS):
+ name = "Subversion"
+ def __init__(self):
+ super().__init__()
+ self.version_exp = re.compile("([0-9]*)")
+ self.path_exp = re.compile("URL: (.*)")
+
+ def update(self, info):
+ if not os.path.exists(info.wpath):
+ base, folder = os.path.split(info.wpath)
+
+ with DirectoryContext(base):
+ subprocess.call(["svn", "co", "%s@%s" % (info.rpath, str(info.revision)), folder])
+ else:
+ with DirectoryContext(info.wpath):
+ subprocess.call(["svn", "up", "--accept", "theirs-full", "-r", str(info.revision)])
+
+ def revision(self, path):
+ if not os.path.exists(path):
+ return
+
+ with DirectoryContext(path):
+ stdout = subprocess.check_output(["svnversion"])
+
+ match = self.version_exp.match(str(stdout, encoding="utf-8"))
+
+ if match:
+ return match.group(1)
+
+ def path(self, path):
+ if not os.path.exists(path):
+ return
+
+ with DirectoryContext(path):
+ stdout = subprocess.check_output(["svn", "info"])
+
+ match = self.path_exp.search(str(stdout, encoding="utf-8"))
+
+ if match:
+ return match.group(1)
+
+class Git(AbstractVCS):
+ name = "Git"
+ def __init__(self):
+ super().__init__()
+ self.version_exp = re.compile("^commit (.*)")
+
+ def update(self, info):
+ if not os.path.exists(info.wpath):
+ base, folder = os.path.split(info.wpath)
+
+ with DirectoryContext(base):
+ subprocess.call(["git", "clone", "%s" % (info.rpath), folder])
+
+ with DirectoryContext(info.wpath):
+ subprocess.call(["git", "checkout", str(info.revision)])
+
+ def revision(self, path):
+ if not os.path.exists(path):
+ return
+
+ with DirectoryContext(path):
+ stdout = subprocess.check_output(["git", "show"])
+
+ match = self.version_exp.search(str(stdout, encoding="utf-8"))
+
+ if match:
+ return match.group(1)
+
+ def path(self, path):
+ if not os.path.exists(path):
+ return
+
+ # find something that could somehow work for git (fun times)
+ return path
+
+SYSTEMS = {
+ Subversion.name: Subversion(),
+ Git.name: Git()
+ }
diff --git a/object_add_chain.py b/object_add_chain.py
new file mode 100644
index 00000000..a173af27
--- /dev/null
+++ b/object_add_chain.py
@@ -0,0 +1,154 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Add Chain",
+ "author": "Brian Hinton (Nichod)",
+ "version": (0,1),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "View3D > Add > Mesh",
+ "description": "Adds Chain with curve guide for easy creation",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Object/Add_Chain",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22203",
+ "category": "Object"}
+
+import bpy
+
+def Add_Chain():
+
+
+ ##Adds Empty to scene
+ bpy.ops.object.add(type='EMPTY', view_align=False, enter_editmode=False, location=(0, 0, 0),
+rotation=(0, 0, 0),)
+
+ ##Changes name of Empty to rot_link adds variable emp
+ emp = bpy.context.object
+ emp.name = "rot_link"
+
+ ##Rotate emp ~ 90 degrees
+ emp.rotation_euler = [1.570796, 0, 0]
+
+ ##Adds Curve Path to scene
+ bpy.ops.curve.primitive_nurbs_path_add( view_align=False, enter_editmode=False, location=(0, 0, 0), rotation=(0, 0, 0),)
+
+ ##Change Curve name to deform adds variable curv
+ curv = bpy.context.object
+ curv.name = "deform"
+
+ ##Inserts Torus primitive
+ bpy.ops.mesh.primitive_torus_add(
+ major_radius=1, minor_radius=0.25, major_segments=12, minor_segments=4,
+ use_abso=False, abso_major_rad=1, abso_minor_rad=0.5)
+
+ ##Positions Torus primitive to center of scene
+ bpy.context.active_object.location = [0, 0, 0]
+
+ ##Reseting Torus rotation in case of 'Align to view' option enabled
+ bpy.context.active_object.rotation_euler = [0, 0, 0]
+
+
+ ##Changes Torus name to chain adds variable tor
+ tor = bpy.context.object
+ tor.name = "chain"
+
+ ##Adds Array Modifier to tor
+ bpy.ops.object.modifier_add(type='ARRAY')
+
+ ##Adds subsurf modifier tor
+ bpy.ops.object.modifier_add(type='SUBSURF')
+
+ ##Smooths tor
+ bpy.ops.object.shade_smooth()
+
+ ##Select curv
+ sce = bpy.context.scene
+ sce.objects.active = curv
+
+ ##Toggle into editmode
+ bpy.ops.object.editmode_toggle()
+
+ ##Translate curve object
+ bpy.ops.transform.translate(
+ value=(2, 0, 0), constraint_axis=(True, False, False),
+ constraint_orientation='GLOBAL', mirror=False, proportional='DISABLED',
+ proportional_edit_falloff='SMOOTH', proportional_size=1, snap=False,
+ snap_target='CLOSEST', snap_point=(0, 0, 0), snap_align=False,
+ snap_normal=(0, 0, 0), release_confirm=False)
+
+ ##Toggle into objectmode
+ bpy.ops.object.editmode_toggle()
+
+ ##Select tor or chain
+ sce.objects.active = tor
+
+ ##Selects Array Modifier for editing
+ array = tor.modifiers['Array']
+
+ ##Change Array Modifier Parameters
+ array.fit_type = ('FIT_CURVE')
+ array.curve = curv
+ array.offset_object = emp
+ array.use_object_offset = True
+ array.relative_offset_displace = [ 0.549, 0, 0 ]
+
+ ##Add curve modifier
+ bpy.ops.object.modifier_add(type='CURVE')
+
+ ##Selects Curve Modifier for editing
+ cur = tor.modifiers['Curve']
+
+ ##Change Curve Modifier Parameters
+ cur.object = curv
+
+#makes AddChain an operator
+class AddChain(bpy.types.Operator):
+ '''Add a Chain.'''
+ bl_idname = "mesh.primitive_chain_add"
+ bl_label = "Add Chain"
+ bl_options = {'REGISTER', 'UNDO'}
+
+
+ def execute(self, context):
+ Add_Chain()
+
+ return {'FINISHED'}
+
+# Register the operator
+def menu_func(self, context):
+ self.layout.operator(AddChain.bl_idname, text="Chain", icon='PLUGIN')
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ # Add "Chain" menu to the "Add Mesh" menu.
+ bpy.types.INFO_MT_mesh_add.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ # Remove "Chain" menu from the "Add Mesh" menu.
+ bpy.types.INFO_MT_mesh_add.remove(menu_func)
+
+if __name__ == "__main__":
+ register()
diff --git a/object_animrenderbake.py b/object_animrenderbake.py
new file mode 100644
index 00000000..6a9efdc7
--- /dev/null
+++ b/object_animrenderbake.py
@@ -0,0 +1,189 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Animated Render Baker",
+ "author": "Janne Karhu (jahka)",
+ "version": (1, 0),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "Properties > Render > Bake Panel",
+ "description": "Renderbakes a series of frames",
+ "category": "Object",
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/' \
+ 'Scripts/Object/Animated_Render_Baker',
+ 'tracker_url': 'https://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=24836'}
+
+import bpy
+from bpy.props import *
+
+class OBJECT_OT_animrenderbake(bpy.types.Operator):
+ bl_label = "Animated Render Bake"
+ bl_description= "Bake animated image textures of selected objects"
+ bl_idname = "object.anim_bake_image"
+ bl_register = True
+
+ def framefile(self, orig, frame):
+ '''
+ Set frame number to file name image.png -> image0013.png
+ '''
+ dot = orig.rfind(".")
+ return orig[:dot] + ('%04d' % frame) + orig[dot:]
+
+ def invoke(self, context, event):
+ import bpy
+ import shutil
+
+ scene = context.scene
+
+ start = scene.animrenderbake_start
+ end = scene.animrenderbake_end
+
+ # Check for errors before starting
+ if start >= end:
+ self.report({'ERROR'}, "Start frame must be smaller than end frame")
+ return {'CANCELLED'}
+
+ selected = context.selected_objects
+
+ # Only single object baking for now
+ if scene.render.use_bake_selected_to_active:
+ if len(selected) > 2:
+ self.report({'ERROR'}, "Select only two objects for animated baking")
+ return {'CANCELLED'}
+ elif len(selected) > 1:
+ self.report({'ERROR'}, "Select only one object for animated baking")
+ return {'CANCELLED'}
+
+ if context.active_object.type != 'MESH':
+ self.report({'ERROR'}, "The baked object must be a mesh object")
+ return {'CANCELLED'}
+
+ img = None
+
+ #find the image that's used for rendering
+ for uvtex in context.active_object.data.uv_textures:
+ if uvtex.active_render == True:
+ for uvdata in uvtex.data:
+ if uvdata.image != None:
+ img = uvdata.image
+ break
+
+ if img is None:
+ self.report({'ERROR'}, "No valid image found to bake to")
+ return {'CANCELLED'}
+
+ if img.is_dirty:
+ self.report({'ERROR'}, "Save the image that's used for baking before use")
+ return {'CANCELLED'}
+
+ # make sure we have an absolute path so that copying works for sure
+ absp = bpy.path.abspath(img.filepath)
+
+ print("Animated baking for frames " + str(start) + " - " + str(end))
+
+ for cfra in range(start, end+1):
+ print("Baking frame " + str(cfra))
+
+ # update scene to new frame and bake to template image
+ scene.frame_set(cfra)
+ ret = bpy.ops.object.bake_image()
+ if 'CANCELLED' in ret:
+ return {'CANCELLED'}
+
+ #currently the api doesn't allow img.save_as(), so just save the template image as usual for every frame and copy to a file with frame specific filename
+ img.save()
+ shutil.copyfile(absp, self.framefile(absp, cfra))
+
+ print("Saved " + self.framefile(absp, cfra))
+ print("Baking done!")
+
+ return{'FINISHED'}
+
+# modifier copy of original bake panel draw function
+def draw_animrenderbake(self, context):
+ layout = self.layout
+
+ rd = context.scene.render
+
+ row = layout.row()
+ row.operator("object.bake_image", icon='RENDER_STILL')
+
+ #----------- beginning of modifications ----------------
+ row.operator("object.anim_bake_image", text="Animated Bake", icon="RENDER_ANIMATION")
+ row = layout.row(align=True)
+ row.prop(context.scene, "animrenderbake_start")
+ row.prop(context.scene, "animrenderbake_end")
+ #-------------- end of modifications ---------------------
+
+ layout.prop(rd, "bake_type")
+
+ if rd.bake_type == 'NORMALS':
+ layout.prop(rd, "bake_normal_space")
+ elif rd.bake_type in ('DISPLACEMENT', 'AO'):
+ layout.prop(rd, "use_bake_normalize")
+
+ # col.prop(rd, "bake_aa_mode")
+ # col.prop(rd, "use_bake_antialiasing")
+
+ layout.separator()
+
+ split = layout.split()
+
+ col = split.column()
+ col.prop(rd, "use_bake_clear")
+ col.prop(rd, "bake_margin")
+ col.prop(rd, "bake_quad_split", text="Split")
+
+ col = split.column()
+ col.prop(rd, "use_bake_selected_to_active")
+ sub = col.column()
+ sub.active = rd.use_bake_selected_to_active
+ sub.prop(rd, "bake_distance")
+ sub.prop(rd, "bake_bias")
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.Scene.animrenderbake_start = IntProperty(
+ name="Start",
+ description="Start frame of the animated bake",
+ default=1)
+
+ bpy.types.Scene.animrenderbake_end = IntProperty(
+ name="End",
+ description="End frame of the animated bake",
+ default=250)
+
+ # replace original panel draw function with modified one
+ panel = bpy.types.RENDER_PT_bake
+ panel.old_draw = panel.draw
+ panel.draw = draw_animrenderbake
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ # restore original panel draw function
+ bpy.types.RENDER_PT_bake.draw = bpy.types.RENDER_PT_bake.old_draw
+ del bpy.types.RENDER_PT_bake.old_draw
+ del bpy.types.Scene.animrenderbake_start
+ del bpy.types.Scene.animrenderbake_end
+
+if __name__ == "__main__":
+ register()
diff --git a/object_cloud_gen.py b/object_cloud_gen.py
new file mode 100644
index 00000000..954e5cf1
--- /dev/null
+++ b/object_cloud_gen.py
@@ -0,0 +1,753 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Cloud Generator",
+ "author": "Nick Keeline(nrk)",
+ "version": (1,0),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Tool Shelf > Cloud Generator Panel",
+ "description": "Creates Volumetric Clouds",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Object/Cloud_Gen",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22015",
+ "category": "Object"}
+
+"""
+Place this file in the .blender/scripts/addons dir
+You have to activated the script in the "Add-Ons" tab (user preferences).
+The functionality can then be accessed via the Tool shelf when objects
+are selected
+
+Rev 0 initial release
+Rev 0.1 added scene to create_mesh per python api change.
+Rev 0.2 Added Point Density turbulence and fixed degenerate
+Rev 0.3 Fixed bug in degenerate
+Rev 0.4 updated for api change/changed to new apply modifier technique
+Rev 0.5 made particle count equation with radius so radius increases with cloud volume
+Rev 0.6 added poll function to operator, fixing crash with no selected objects
+Rev 0.7 added particles option and Type of Cloud wanted selector
+Rev 0.8 fixed particles by commenting out add cloud texture force field
+Rev 0.9 Added smoothing and explosion material
+Rev 1.0 Added ability to convert object with particle system to cloud and auto resizing of bound box
+"""
+
+import bpy
+import mathutils
+from math import *
+from bpy.props import *
+
+
+# This routine takes an object and deletes all of the geometry in it
+# and adds a bounding box to it.
+# It will add or subtract the bound box size by the variable sizeDifference.
+
+def getMeshandPutinEditMode(scene, object):
+
+ # Go into Object Mode
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Deselect All
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # Select the object
+ object.select = True
+ scene.objects.active = object
+
+ # Go into Edit Mode
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ return object.data
+
+def maxAndMinVerts(scene, object):
+
+ mesh = getMeshandPutinEditMode(scene, object)
+ verts = mesh.vertices
+
+ #Set the max and min verts to the first vertex on the list
+ maxVert = [verts[0].co[0], verts[0].co[1], verts[0].co[2]]
+ minVert = [verts[0].co[0], verts[0].co[1], verts[0].co[2]]
+
+ #Create Max and Min Vertex array for the outer corners of the box
+ for vert in verts:
+ #Max vertex
+ if vert.co[0] > maxVert[0]:
+ maxVert[0] = vert.co[0]
+ if vert.co[1] > maxVert[1]:
+ maxVert[1] = vert.co[1]
+ if vert.co[2] > maxVert[2]:
+ maxVert[2] = vert.co[2]
+
+ #Min Vertex
+ if vert.co[0] < minVert[0]:
+ minVert[0] = vert.co[0]
+ if vert.co[1] < minVert[1]:
+ minVert[1] = vert.co[1]
+ if vert.co[2] < minVert[2]:
+ minVert[2] = vert.co[2]
+
+ return [maxVert, minVert]
+
+def makeObjectIntoBoundBox(scene, object, sizeDifference, takeFromObject):
+
+ #Let's find the max and min of the reference object, it can be the same as the destination object
+ [maxVert, minVert] = maxAndMinVerts(scene, takeFromObject)
+
+ #get objects mesh
+ mesh = getMeshandPutinEditMode(scene, object)
+
+ #Add the size difference to the max size of the box
+ maxVert[0] = maxVert[0] + sizeDifference
+ maxVert[1] = maxVert[1] + sizeDifference
+ maxVert[2] = maxVert[2] + sizeDifference
+
+ #subtract the size difference to the min size of the box
+ minVert[0] = minVert[0] - sizeDifference
+ minVert[1] = minVert[1] - sizeDifference
+ minVert[2] = minVert[2] - sizeDifference
+
+ #Create arrays of verts and faces to be added to the mesh
+ addVerts = []
+
+ #X high loop
+ addVerts.append([maxVert[0], maxVert[1], maxVert[2]])
+ addVerts.append([maxVert[0], maxVert[1], minVert[2]])
+ addVerts.append([maxVert[0], minVert[1], minVert[2]])
+ addVerts.append([maxVert[0], minVert[1], maxVert[2]])
+
+ #x low loop
+ addVerts.append([minVert[0], maxVert[1], maxVert[2]])
+ addVerts.append([minVert[0], maxVert[1], minVert[2]])
+ addVerts.append([minVert[0], minVert[1], minVert[2]])
+ addVerts.append([minVert[0], minVert[1], maxVert[2]])
+
+ # Make the faces of the bounding box.
+ addFaces = []
+
+ # Draw a box on paper and number the vertices.
+ # Use right hand rule to come up with number orders for faces on
+ # the box (with normals pointing out).
+ addFaces.append([0, 3, 2, 1])
+ addFaces.append([4, 5, 6, 7])
+ addFaces.append([0, 1, 5, 4])
+ addFaces.append([1, 2, 6, 5])
+ addFaces.append([2, 3, 7, 6])
+ addFaces.append([0, 4, 7, 3])
+
+ # Delete all geometry from the object.
+ bpy.ops.mesh.select_all(action='SELECT')
+ bpy.ops.mesh.delete(type='ALL')
+
+ # Must be in object mode for from_pydata to work
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Add the mesh data.
+ mesh.from_pydata(addVerts, [], addFaces)
+
+ # Update the mesh
+ mesh.update()
+
+def applyScaleRotLoc(scene, obj):
+ # Deselect All
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # Select the object
+ obj.select = True
+ scene.objects.active = obj
+
+ bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
+
+def totallyDeleteObject(scene, obj):
+ scene.objects.unlink(obj)
+ bpy.data.objects.remove(obj)
+
+
+def makeParent(parentobj, childobj, scene):
+
+ applyScaleRotLoc(scene, parentobj)
+ applyScaleRotLoc(scene, childobj)
+ childobj.parent = parentobj
+
+
+def addNewObject(scene, name, copyobj):
+
+ # Create new mesh
+ mesh = bpy.data.meshes.new(name)
+
+ # Create a new object.
+ ob_new = bpy.data.objects.new(name, mesh)
+ tempme = copyobj.data
+ ob_new.data = tempme.copy()
+ ob_new.scale = copyobj.scale
+ ob_new.location = copyobj.location
+
+ # Link new object to the given scene and select it.
+ scene.objects.link(ob_new)
+ ob_new.select = True
+
+ return ob_new
+
+def getpdensitytexture(object):
+
+ for mslot in object.material_slots:
+ mat = mslot.material
+ for tslot in mat.texture_slots:
+ if tslot!= 'NoneType':
+ tex = tslot.texture
+ if tex.type == 'POINT_DENSITY':
+ if tex.point_density.point_source == 'PARTICLE_SYSTEM':
+ return tex
+
+def removeParticleSystemFromObj(scene, object):
+
+ # Deselect All
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # Select the object.
+ object.select = True
+ scene.objects.active = object
+
+ bpy.ops.object.particle_system_remove()
+
+ # Deselect All
+ bpy.ops.object.select_all(action='DESELECT')
+
+def convertParticlesToMesh(scene, particlesobj, destobj, replacemesh):
+
+ # Select the Destination object.
+ destobj.select = True
+ scene.objects.active = destobj
+
+ #Go to Edit Mode
+ bpy.ops.object.mode_set(mode='EDIT',toggle=False)
+
+ #Delete everything in mesh if replace true
+ if replacemesh:
+ bpy.ops.mesh.select_all(action='SELECT')
+ bpy.ops.mesh.delete(type='ALL')
+
+ meshPnts = destobj.data
+
+ listCloudParticles = particlesobj.particles
+
+ listMeshPnts = []
+ for pTicle in listCloudParticles:
+ listMeshPnts.append(pTicle.location)
+
+ # Must be in object mode for from_pydata to work.
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Add in the mesh data.
+ meshPnts.from_pydata(listMeshPnts, [], [])
+
+ # Update the mesh.
+ meshPnts.update()
+
+def combineObjects(scene, combined, listobjs):
+ # scene is the current scene
+ # combined is the object we want to combine everything into
+ # listobjs is the list of objects to stick into combined
+
+ # Deselect All
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # Select the new object.
+ combined.select = True
+ scene.objects.active = combined
+
+ # Add data
+ if (len(listobjs) > 0):
+ for i in listobjs:
+ # Add a modifier
+ bpy.ops.object.modifier_add(type='BOOLEAN')
+
+ union = combined.modifiers
+ union[0].name = "AddEmUp"
+ union[0].object = i
+ union[0].operation = 'UNION'
+
+ # Apply modifier
+ bpy.ops.object.modifier_apply(apply_as='DATA', modifier=union[0].name)
+
+# Returns the action we want to take
+def getActionToDo(obj):
+
+ if not obj or obj.type != 'MESH':
+ return 'NOT_OBJ_DO_NOTHING'
+ elif obj is None:
+ return 'NO_SELECTION_DO_NOTHING'
+ elif "CloudMember" in obj:
+ if obj["CloudMember"] != None:
+ if obj["CloudMember"] == "MainObj":
+ return 'DEGENERATE'
+ elif obj["CloudMember"] == "CreatedObj" and len(obj.particle_systems) > 0:
+ return 'CLOUD_CONVERT_TO_MESH'
+ else:
+ return 'CLOUD_DO_NOTHING'
+ elif obj.type == 'MESH':
+ return 'GENERATE'
+ else:
+ return 'DO_NOTHING'
+
+class VIEW3D_PT_tools_cloud(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+
+ bl_label = "Cloud Generator"
+ bl_context = "objectmode"
+
+ def draw(self, context):
+ active_obj = context.active_object
+ layout = self.layout
+ col = layout.column(align=True)
+
+ WhatToDo = getActionToDo(active_obj)
+
+ if WhatToDo == 'DEGENERATE':
+
+ col.operator("cloud.generate_cloud", text="DeGenerate")
+
+ elif WhatToDo == 'CLOUD_CONVERT_TO_MESH':
+
+ col.operator("cloud.generate_cloud", text="Convert to Mesh")
+
+ elif WhatToDo == 'NO_SELECTION_DO_NOTHING':
+
+ col.label(text="Select one or more")
+ col.label(text="objects to generate")
+ col.label(text="a cloud.")
+
+ elif WhatToDo == 'CLOUD_DO_NOTHING':
+
+ col.label(text="Must select")
+ col.label(text="bound box")
+
+ elif WhatToDo == 'GENERATE':
+
+ col.operator("cloud.generate_cloud", text="Generate Cloud")
+
+ col.prop(context.scene, "cloud_type")
+ col.prop(context.scene, "cloudparticles")
+ col.prop(context.scene, "cloudsmoothing")
+ else:
+ col.label(text="Select one or more")
+ col.label(text="objects to generate")
+ col.label(text="a cloud.")
+
+
+class GenerateCloud(bpy.types.Operator):
+ bl_idname = "cloud.generate_cloud"
+ bl_label = "Generate Cloud"
+ bl_description = "Create a Cloud,Undo Cloud, or convert to Mesh Cloud depending on selection"
+ bl_register = True
+ bl_undo = True
+
+ @classmethod
+ def poll(cls, context):
+ if not context.active_object:
+ return False
+ else:
+ return (context.active_object.type=='MESH')
+
+ def execute(self, context):
+ # Make variable that is the current .blend file main data blocks
+ blend_data = context.blend_data
+
+ # Make variable that is the active object selected by user
+ active_object = context.active_object
+
+ # Make variable scene that is current scene
+ scene = context.scene
+
+ # Parameters the user may want to change:
+ # Number of points this number is multiplied by the volume to get
+ # the number of points the scripts will put in the volume.
+ numOfPoints = 1.0
+ maxNumOfPoints = 100000
+ maxPointDensityRadius = 1.5
+ scattering = 2.5
+ pointDensityRadiusFactor = 1.0
+ densityScale = 1.5
+
+ # What should we do?
+ WhatToDo = getActionToDo(active_object)
+
+ if WhatToDo == 'DEGENERATE':
+ # Degenerate Cloud
+ mainObj = active_object
+
+ cloudMembers = active_object.children
+
+ createdObjects = []
+ definitionObjects = []
+ for member in cloudMembers:
+ applyScaleRotLoc(scene, member)
+ if (member["CloudMember"] == "CreatedObj"):
+ createdObjects.append(member)
+ else:
+ definitionObjects.append(member)
+
+ for defObj in definitionObjects:
+ #Delete cloudmember data from objects
+ if "CloudMember" in defObj:
+ del(defObj["CloudMember"])
+
+ for createdObj in createdObjects:
+ totallyDeleteObject(scene, createdObj)
+
+ # Delete the blend_data object
+ totallyDeleteObject(scene, mainObj)
+
+ # Select all of the left over boxes so people can immediately
+ # press generate again if they want.
+ for eachMember in definitionObjects:
+ eachMember.draw_type = 'SOLID'
+ eachMember.select = True
+ eachMember.hide_render = False
+
+ elif WhatToDo == 'CLOUD_CONVERT_TO_MESH':
+
+ cloudParticles = active_object.particle_systems.active
+
+ bounds = active_object.parent
+
+ ###############Create CloudPnts for putting points in#########
+ # Create a new object cloudPnts
+ cloudPnts = addNewObject(scene, "CloudPoints", bounds)
+ cloudPnts["CloudMember"] = "CreatedObj"
+ cloudPnts.draw_type = 'WIRE'
+ cloudPnts.hide_render = True
+
+ makeParent(bounds, cloudPnts, scene)
+
+ convertParticlesToMesh(scene, cloudParticles, cloudPnts, True)
+
+ removeParticleSystemFromObj(scene, active_object)
+
+ pDensity = getpdensitytexture(bounds)
+ pDensity.point_density.point_source = 'OBJECT'
+ pDensity.point_density.object = cloudPnts
+
+ #Let's resize the bound box to be more accurate.
+ how_much_bigger = pDensity.point_density.radius
+ makeObjectIntoBoundBox(scene, bounds, how_much_bigger, cloudPnts)
+
+ else:
+ # Generate Cloud
+
+ ###############Create Combined Object bounds##################
+ # Make a list of all Selected objects.
+ selectedObjects = bpy.context.selected_objects
+ if not selectedObjects:
+ selectedObjects = [bpy.context.active_object]
+
+ # Create a new object bounds
+ bounds = addNewObject(scene,
+ "CloudBounds",
+ selectedObjects[0])
+
+ bounds.draw_type = 'BOUNDS'
+ bounds.hide_render = False
+
+ # Just add a Definition Property designating this
+ # as the blend_data object.
+ bounds["CloudMember"] = "MainObj"
+
+ # Since we used iteration 0 to copy with object we
+ # delete it off the list.
+ firstObject = selectedObjects[0]
+ del selectedObjects[0]
+
+ # Apply location Rotation and Scale to all objects involved.
+ applyScaleRotLoc(scene, bounds)
+ for each in selectedObjects:
+ applyScaleRotLoc(scene, each)
+
+ # Let's combine all of them together.
+ combineObjects(scene, bounds, selectedObjects)
+
+ # Let's add some property info to the objects.
+ for selObj in selectedObjects:
+ selObj["CloudMember"] = "DefinitioinObj"
+ selObj.name = "DefinitioinObj"
+ selObj.draw_type = 'WIRE'
+ selObj.hide_render = True
+ makeParent(bounds, selObj, scene)
+
+ # Do the same to the 1. object since it is no longer in list.
+ firstObject["CloudMember"] = "DefinitioinObj"
+ firstObject.name = "DefinitioinObj"
+ firstObject.draw_type = 'WIRE'
+ firstObject.hide_render = True
+ makeParent(bounds, firstObject, scene)
+
+ ###############Create Cloud for putting Cloud Mesh############
+ # Create a new object cloud.
+ cloud = addNewObject(scene, "CloudMesh", bounds)
+ cloud["CloudMember"] = "CreatedObj"
+ cloud.draw_type = 'WIRE'
+ cloud.hide_render = True
+
+ makeParent(bounds, cloud, scene)
+
+ bpy.ops.object.editmode_toggle()
+ bpy.ops.mesh.select_all(action='SELECT')
+
+ #Don't subdivide object or smooth if smoothing box not checked.
+ if scene.cloudsmoothing:
+ bpy.ops.mesh.subdivide(number_cuts=2, fractal=0, smoothness=1)
+ # bpy.ops.object.transform_apply(location=True)
+ bpy.ops.mesh.vertices_smooth(repeat=20)
+ bpy.ops.mesh.tris_convert_to_quads()
+ bpy.ops.mesh.faces_shade_smooth()
+ bpy.ops.object.editmode_toggle()
+
+ ###############Create Particles in cloud obj##################
+
+ # Set time to 0.
+ scene.frame_current = 0
+
+ # Add a new particle system.
+ bpy.ops.object.particle_system_add()
+
+ #Particle settings setting it up!
+ cloudParticles = cloud.particle_systems.active
+ cloudParticles.name = "CloudParticles"
+ cloudParticles.settings.frame_start = 0
+ cloudParticles.settings.frame_end = 0
+ cloudParticles.settings.emit_from = 'VOLUME'
+ cloudParticles.settings.lifetime = scene.frame_end
+ cloudParticles.settings.draw_method = 'DOT'
+ cloudParticles.settings.render_type = 'NONE'
+ cloudParticles.settings.distribution = 'RAND'
+ cloudParticles.settings.physics_type = 'NEWTON'
+ cloudParticles.settings.normal_factor = 0
+
+ #Gravity does not effect the particle system
+ eWeights = cloudParticles.settings.effector_weights
+ eWeights.gravity = 0
+
+ ####################Create Volume Material####################
+ # Deselect All
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # Select the object.
+ bounds.select = True
+ scene.objects.active = bounds
+
+ # Turn bounds object into a box. Use itself as a reference.
+ makeObjectIntoBoundBox(scene, bounds, 1.0, bounds)
+
+ # Delete all material slots in bounds object.
+ for i in range(len(bounds.material_slots)):
+ bounds.active_material_index = i - 1
+ bpy.ops.object.material_slot_remove()
+
+ # Add a new material.
+ cloudMaterial = blend_data.materials.new("CloudMaterial")
+ bpy.ops.object.material_slot_add()
+ bounds.material_slots[0].material = cloudMaterial
+
+ # Set Up the Cloud Material
+ cloudMaterial.name = "CloudMaterial"
+ cloudMaterial.type = 'VOLUME'
+ mVolume = cloudMaterial.volume
+ mVolume.scattering = scattering
+ mVolume.density = 0
+ mVolume.density_scale = densityScale
+ mVolume.transmission_color = [3, 3, 3]
+ mVolume.step_size = 0.1
+ mVolume.use_light_cache = True
+ mVolume.cache_resolution = 45
+
+ # Add a texture
+ vMaterialTextureSlots = cloudMaterial.texture_slots
+ cloudtex = blend_data.textures.new("CloudTex", type='CLOUDS')
+ cloudtex.noise_type = 'HARD_NOISE'
+ cloudtex.noise_scale = 2
+ mtex = cloudMaterial.texture_slots.add()
+ mtex.texture = cloudtex
+ mtex.texture_coords = 'ORCO'
+ mtex.use_map_color_diffuse = True
+
+ # Set time
+ scene.frame_current = 1
+
+ # Add a Point Density texture
+ pDensity = blend_data.textures.new("CloudPointDensity", 'POINT_DENSITY')
+
+ mtex = cloudMaterial.texture_slots.add()
+ mtex.texture = pDensity
+ mtex.texture_coords = 'GLOBAL'
+ mtex.use_map_density = True
+ mtex.use_rgb_to_intensity = True
+ mtex.texture_coords = 'GLOBAL'
+
+ pDensity.point_density.vertex_cache_space = 'WORLD_SPACE'
+ pDensity.point_density.use_turbulence = True
+ pDensity.point_density.noise_basis = 'VORONOI_F2'
+ pDensity.point_density.turbulence_depth = 3
+
+ pDensity.use_color_ramp = True
+ pRamp = pDensity.color_ramp
+ #pRamp.use_interpolation = 'LINEAR'
+ pRampElements = pRamp.elements
+ #pRampElements[1].position = .9
+ #pRampElements[1].color = [.18,.18,.18,.8]
+ bpy.ops.texture.slot_move(type='UP')
+
+
+ # Estimate the number of particles for the size of bounds.
+ volumeBoundBox = (bounds.dimensions[0] * bounds.dimensions[1]* bounds.dimensions[2])
+ numParticles = int((2.4462 * volumeBoundBox + 430.4) * numOfPoints)
+ if numParticles > maxNumOfPoints:
+ numParticles = maxNumOfPoints
+ if numParticles < 10000:
+ numParticles = int(numParticles + 15 * volumeBoundBox)
+ print(numParticles)
+
+ # Set the number of particles according to the volume
+ # of bounds.
+ cloudParticles.settings.count = numParticles
+
+ pDensity.point_density.radius = (.00013764 * volumeBoundBox + .3989) * pointDensityRadiusFactor
+
+ if pDensity.point_density.radius > maxPointDensityRadius:
+ pDensity.point_density.radius = maxPointDensityRadius
+
+ # Set time to 1.
+ scene.frame_current = 1
+
+ if not scene.cloudparticles:
+ ###############Create CloudPnts for putting points in#########
+ # Create a new object cloudPnts
+ cloudPnts = addNewObject(scene, "CloudPoints", bounds)
+ cloudPnts["CloudMember"] = "CreatedObj"
+ cloudPnts.draw_type = 'WIRE'
+ cloudPnts.hide_render = True
+
+ makeParent(bounds, cloudPnts, scene)
+
+ convertParticlesToMesh(scene, cloudParticles, cloudPnts, True)
+
+ # Add a modifier.
+ bpy.ops.object.modifier_add(type='DISPLACE')
+
+ cldPntsModifiers = cloudPnts.modifiers
+ cldPntsModifiers[0].name = "CloudPnts"
+ cldPntsModifiers[0].texture = cloudtex
+ cldPntsModifiers[0].texture_coords = 'OBJECT'
+ cldPntsModifiers[0].texture_coords_object = cloud
+ cldPntsModifiers[0].strength = -1.4
+
+ # Apply modifier
+ bpy.ops.object.modifier_apply(apply_as='DATA', modifier=cldPntsModifiers[0].name)
+
+ pDensity.point_density.point_source = 'OBJECT'
+ pDensity.point_density.object = cloudPnts
+
+ removeParticleSystemFromObj(scene, cloud)
+
+ else:
+
+ pDensity.point_density.point_source = 'PARTICLE_SYSTEM'
+ pDensity.point_density.object = cloud
+ pDensity.point_density.particle_system = cloudParticles
+
+ if scene.cloud_type == '1': # Cumulous
+ print("Cumulous")
+ mVolume.density_scale = 2.22
+ pDensity.point_density.turbulence_depth = 10
+ pDensity.point_density.turbulence_strength = 6.3
+ pDensity.point_density.turbulence_scale = 2.9
+ pRampElements[1].position = .606
+ pDensity.point_density.radius = pDensity.point_density.radius + .1
+
+ elif scene.cloud_type == '2': # Cirrus
+ print("Cirrus")
+ pDensity.point_density.turbulence_strength = 22
+ mVolume.transmission_color = [3.5, 3.5, 3.5]
+ mVolume.scattering = .13
+
+ elif scene.cloud_type == '3': # Explosion
+ mVolume.emission = 1.42
+ mtex.use_rgb_to_intensity = False
+ pRampElements[0].position = .825
+ pRampElements[0].color = [.119,.119,.119,1]
+ pRampElements[1].position = .049
+ pRampElements[1].color = [1.0,1.0,1.0,0]
+ pDensity.point_density.turbulence_strength = 1.5
+ pRampElement1 = pRampElements.new(.452)
+ pRampElement1.color = [.814,.112,0,1]
+ pRampElement2 = pRampElements.new(.234)
+ pRampElement2.color = [.814,.310,.002,1]
+ pRampElement3 = pRampElements.new(.669)
+ pRampElement3.color = [0,.0,.040,1]
+
+ # Select the object.
+ bounds.select = True
+ scene.objects.active = bounds
+
+ #Let's resize the bound box to be more accurate.
+ how_much_bigger = pDensity.point_density.radius + .1
+
+ #If it's a particle cloud use cloud mesh if otherwise use point mesh
+ if not scene.cloudparticles:
+ makeObjectIntoBoundBox(scene, bounds, how_much_bigger, cloudPnts)
+ else:
+ makeObjectIntoBoundBox(scene, bounds, how_much_bigger, cloud)
+
+ return {'FINISHED'}
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.Scene.cloudparticles = BoolProperty(
+ name="Particles",
+ description="Generate Cloud as Particle System",
+ default=False)
+
+ bpy.types.Scene.cloudsmoothing = BoolProperty(
+ name="Smoothing",
+ description="Smooth Resultant Geometry From Gen Cloud Operation",
+ default=True)
+
+ bpy.types.Scene.cloud_type = EnumProperty(
+ name="Type",
+ description="Select the type of cloud to create with material settings",
+ items=[("0","Stratus","Generate Stratus_foggy Cloud"),
+ ("1","Cumulous","Generate Cumulous_puffy Cloud"),
+ ("2","Cirrus","Generate Cirrus_wispy Cloud"),
+ ("3","Explosion","Generate Explosion"),
+ ],
+ default='0')
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ del bpy.types.Scene.cloudparticles
+ del bpy.types.Scene.cloud_type
+
+
+if __name__ == "__main__":
+ register()
diff --git a/object_fracture/__init__.py b/object_fracture/__init__.py
new file mode 100644
index 00000000..895af266
--- /dev/null
+++ b/object_fracture/__init__.py
@@ -0,0 +1,81 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Fracture Tools",
+ "author": "pildanovak",
+ "version": (2, 0),
+ "blender": (2, 5, 7),
+ "api": 36147,
+ "location": "Search > Fracture Object & Add -> Fracture Helper Objects",
+ "description": "Fractured Object, Bomb, Projectile, Recorder",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Object/Fracture",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=21793",
+ "category": "Object"}
+
+
+if "bpy" in locals():
+ import imp
+ imp.reload(fracture_ops)
+ imp.reload(fracture_setup)
+else:
+ from . import fracture_ops
+ from . import fracture_setup
+
+import bpy
+
+
+class INFO_MT_add_fracture_objects(bpy.types.Menu):
+ bl_idname = "INFO_MT_add_fracture_objects"
+ bl_label = "Fracture Helper Objects"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.operator("object.import_fracture_bomb",
+ text="Bomb")
+ layout.operator("object.import_fracture_projectile",
+ text="Projectile")
+ layout.operator("object.import_fracture_recorder",
+ text="Rigidbody Recorder")
+
+
+def menu_func(self, context):
+ self.layout.menu("INFO_MT_add_fracture_objects", icon="PLUGIN")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ # Add the "add fracture objects" menu to the "Add" menu
+ bpy.types.INFO_MT_add.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ # Remove "add fracture objects" menu from the "Add" menu.
+ bpy.types.INFO_MT_add.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/object_fracture/data.blend b/object_fracture/data.blend
new file mode 100644
index 00000000..a466e9dd
--- /dev/null
+++ b/object_fracture/data.blend
Binary files differ
diff --git a/object_fracture/fracture_ops.py b/object_fracture/fracture_ops.py
new file mode 100644
index 00000000..c7a84279
--- /dev/null
+++ b/object_fracture/fracture_ops.py
@@ -0,0 +1,496 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+from bpy.props import *
+import os
+import random
+import mathutils
+from mathutils import *
+
+
+def create_cutter(context, crack_type, scale, roughness):
+ ncuts = 12
+ if crack_type == 'FLAT' or crack_type == 'FLAT_ROUGH':
+ bpy.ops.mesh.primitive_cube_add(
+ view_align=False,
+ enter_editmode=False,
+ location=(0, 0, 0),
+ rotation=(0, 0, 0),
+ layers=(True, False, False, False,
+ False, False, False, False,
+ False, False, False, False,
+ False, False, False, False,
+ False, False, False, False))
+
+ for v in context.scene.objects.active.data.vertices:
+ v.co[0] += 1
+ v.co[0] *= scale
+ v.co[1] *= scale
+ v.co[2] *= scale
+
+ bpy.ops.object.editmode_toggle()
+ bpy.ops.mesh.faces_shade_smooth()
+ bpy.ops.uv.reset()
+
+ if crack_type == 'FLAT_ROUGH':
+ bpy.ops.mesh.subdivide(
+ number_cuts=ncuts,
+ fractal=roughness * 7 * scale,
+ smoothness=0)
+
+ bpy.ops.mesh.vertices_smooth(repeat=5)
+
+ bpy.ops.object.editmode_toggle()
+
+ if crack_type == 'SPHERE' or crack_type == 'SPHERE_ROUGH':
+ bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=4,
+ size=1,
+ view_align=False,
+ enter_editmode=False,
+ location=(0, 0, 0),
+ rotation=(0, 0, 0),
+ layers=(True, False, False, False,
+ False, False, False, False,
+ False, False, False, False,
+ False, False, False, False,
+ False, False, False, False))
+
+ bpy.ops.object.editmode_toggle()
+ bpy.ops.mesh.faces_shade_smooth()
+ bpy.ops.uv.smart_project(angle_limit=66, island_margin=0)
+
+ bpy.ops.object.editmode_toggle()
+ for v in context.scene.objects.active.data.vertices:
+ v.co[0] += 1
+ v.co[0] *= scale
+ v.co[1] *= scale
+ v.co[2] *= scale
+
+ if crack_type == 'SPHERE_ROUGH':
+ for v in context.scene.objects.active.data.vertices:
+ v.co[0] += roughness * scale * 0.2 * (random.random() - 0.5)
+ v.co[1] += roughness * scale * 0.1 * (random.random() - 0.5)
+ v.co[2] += roughness * scale * 0.1 * (random.random() - 0.5)
+
+ bpy.context.scene.objects.active.select = True
+
+ '''
+ # Adding fracture material
+ # @todo Doesn't work at all yet.
+ sce = bpy.context.scene
+ if bpy.data.materials.get('fracture') is None:
+ bpy.ops.material.new()
+ bpy.ops.object.material_slot_add()
+ sce.objects.active.material_slots[0].material.name = 'fracture'
+ else:
+ bpy.ops.object.material_slot_add()
+ sce.objects.active.material_slots[0].material
+ = bpy.data.materials['fracture']
+ '''
+
+
+#UNWRAP
+def getsizefrommesh(ob):
+ bb = ob.bound_box
+ return (
+ bb[5][0] - bb[0][0],
+ bb[3][1] - bb[0][1],
+ bb[1][2] - bb[0][2])
+
+
+def getIslands(shard):
+ sm = shard.data
+ islands = []
+ vgroups = []
+ fgroups = []
+
+ vgi = []
+ for v in sm.vertices:
+ vgi.append(-1)
+
+ gindex = 0
+ for i in range(len(vgi)):
+ if vgi[i] == -1:
+ gproc = [i]
+ vgroups.append([i])
+ fgroups.append([])
+
+ while len(gproc) > 0:
+ i = gproc.pop(0)
+ for f in sm.faces:
+ #if i in f.vertices:
+ for v in f.vertices:
+ if v == i:
+ for v1 in f.vertices:
+ if vgi[v1] == -1:
+ vgi[v1] = gindex
+ vgroups[gindex].append(v1)
+ gproc.append(v1)
+
+ fgroups[gindex].append(f.index)
+
+ gindex += 1
+
+ #print( gindex)
+
+ if gindex == 1:
+ shards = [shard]
+
+ else:
+ shards = []
+ for gi in range(0, gindex):
+ bpy.ops.object.select_all(action='DESELECT')
+ bpy.context.scene.objects.active = shard
+ shard.select = True
+ bpy.ops.object.duplicate(linked=False, mode='DUMMY')
+ a = bpy.context.scene.objects.active
+ sm = a.data
+ print (a.name)
+
+ bpy.ops.object.editmode_toggle()
+ bpy.ops.mesh.select_all(action='DESELECT')
+ bpy.ops.object.editmode_toggle()
+
+ for x in range(len(sm.vertices) - 1, -1, -1):
+ if vgi[x] != gi:
+ #print('getIslands: selecting')
+ #print('getIslands: ' + str(x))
+ a.data.vertices[x].select = True
+
+ print(bpy.context.scene.objects.active.name)
+
+ bpy.ops.object.editmode_toggle()
+ bpy.ops.mesh.delete()
+ bpy.ops.object.editmode_toggle()
+
+ bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
+
+ shards.append(a)
+
+ bpy.context.scene.objects.unlink(shard)
+
+ return shards
+
+
+def boolop(ob, cutter, op):
+ sce = bpy.context.scene
+
+ fault = 0
+ new_shards = []
+
+ sizex, sizey, sizez = getsizefrommesh(ob)
+ gsize = sizex + sizey + sizez
+
+ bpy.ops.object.select_all()
+ ob.select = True
+ sce.objects.active = ob
+ cutter.select = False
+
+ bpy.ops.object.modifier_add(type='BOOLEAN')
+ a = sce.objects.active
+ a.modifiers['Boolean'].object = cutter
+ a.modifiers['Boolean'].operation = op
+
+ nmesh = a.to_mesh(sce, apply_modifiers=True, settings='PREVIEW')
+
+ if len(nmesh.vertices) > 0:
+ a.modifiers.remove(a.modifiers['Boolean'])
+ bpy.ops.object.duplicate(linked=False, mode='DUMMY')
+
+ new_shard = sce.objects.active
+ new_shard.data = nmesh
+ #scene.objects.link(new_shard)
+
+ new_shard.location = a.location
+ bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY')
+
+ sizex, sizey, sizez = getsizefrommesh(new_shard)
+ gsize2 = sizex + sizey + sizez
+
+ if gsize2 > gsize * 1.01: # Size check
+ print (gsize2, gsize, ob.name, cutter.name)
+ fault = 1
+ #print ('boolop: sizeerror')
+
+ # This checks whether returned shards are non-manifold.
+ # Problem is, if org mesh is non-manifold, it will always fail (e.g. with Suzanne).
+ # And disabling it does not seem to cause any problem…
+# elif min(mesh_utils.edge_face_count(nmesh)) < 2: # Manifold check
+# fault = 1
+
+ if not fault:
+ new_shards = getIslands(new_shard)
+
+ else:
+ sce.objects.unlink(new_shard)
+
+ else:
+ fault = 2
+
+ return fault, new_shards
+
+
+def splitobject(context, ob, crack_type, roughness):
+ scene = context.scene
+
+ size = getsizefrommesh(ob)
+ shards = []
+ scale = max(size) * 1.3
+
+ create_cutter(context, crack_type, scale, roughness)
+ cutter = context.active_object
+ cutter.location = ob.location
+
+ cutter.location[0] += random.random() * size[0] * 0.1
+ cutter.location[1] += random.random() * size[1] * 0.1
+ cutter.location[2] += random.random() * size[2] * 0.1
+ cutter.rotation_euler = [
+ random.random() * 5000.0,
+ random.random() * 5000.0,
+ random.random() * 5000.0]
+
+ scene.objects.active = ob
+ operations = ['INTERSECT', 'DIFFERENCE']
+
+ for op in operations:
+ fault, newshards = boolop(ob, cutter, op)
+
+ shards.extend(newshards)
+ if fault > 0:
+ # Delete all shards in case of fault from previous operation.
+ for s in shards:
+ scene.objects.unlink(s)
+
+ scene.objects.unlink(cutter)
+ #print('splitobject: fault')
+
+ return [ob]
+
+ if shards[0] != ob:
+ bpy.context.scene.objects.unlink(ob)
+
+ bpy.context.scene.objects.unlink(cutter)
+
+ return shards
+
+
+def fracture_basic(context, nshards, crack_type, roughness):
+ tobesplit = []
+ shards = []
+
+ for ob in context.scene.objects:
+ if ob.select:
+ tobesplit.append(ob)
+
+ i = 1 # I counts shards, starts with 1 - the original object
+ iter = 0 # counts iterations, to prevent eternal loops in case
+ # of boolean faults
+
+ maxshards = nshards * len(tobesplit)
+
+ while i < maxshards and len(tobesplit) > 0 and iter < maxshards * 10:
+ ob = tobesplit.pop(0)
+ newshards = splitobject(context, ob, crack_type, roughness)
+
+ tobesplit.extend(newshards)
+
+ if len(newshards) > 1:
+ shards.extend(newshards)
+ #shards.remove(ob)
+
+ i += (len(newshards) - 1)
+
+ #print('fracture_basic: ' + str(i))
+ #print('fracture_basic: lenobs', len(context.scene.objects))
+
+ iter += 1
+
+
+def fracture_group(context, group):
+ tobesplit = []
+ shards = []
+
+ for ob in context.scene.objects:
+ if (ob.select
+ and (len(ob.users_group) == 0 or ob.users_group[0].name != group)):
+ tobesplit.append(ob)
+
+ cutters = bpy.data.groups[group].objects
+
+ # @todo This can be optimized.
+ # Avoid booleans on obs where bbox doesn't intersect.
+ i = 0
+ for ob in tobesplit:
+ for cutter in cutters:
+ fault, newshards = boolop(ob, cutter, 'INTERSECT')
+ shards.extend(newshards)
+
+ if fault == 1:
+ # Delete all shards in case of fault from previous operation.
+ for s in shards:
+ bpy.context.scene.objects.unlink(s)
+
+ #print('fracture_group: fault')
+ #print('fracture_group: ' + str(i))
+
+ return
+
+ i += 1
+
+
+class FractureSimple(bpy.types.Operator):
+ '''Split object with boolean operations for simulation, uses an object.'''
+ bl_idname = "object.fracture_simple"
+ bl_label = "Fracture Object"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ exe = BoolProperty(name="Execute",
+ description="If it shall actually run, for optimal performance...",
+ default=False)
+
+ hierarchy = BoolProperty(name="Generate hierarchy",
+ description="Hierarchy is usefull for simulation of objects" \
+ " breaking in motion.",
+ default=False)
+
+ nshards = IntProperty(name="Number of shards",
+ description="Number of shards the object should be split into.",
+ min=2,
+ default=5)
+
+ crack_type = EnumProperty(name='Crack type',
+ items=(
+ ('FLAT', 'Flat', 'a'),
+ ('FLAT_ROUGH', 'Flat rough', 'a'),
+ ('SPHERE', 'Spherical', 'a'),
+ ('SPHERE_ROUGH', 'Spherical rough', 'a')),
+ description='Look of the fracture surface',
+ default='FLAT')
+
+ roughness = FloatProperty(name="Roughness",
+ description="Roughness of the fracture surface",
+ min=0.0,
+ max=3.0,
+ default=0.5)
+
+ def execute(self, context):
+ #getIslands(context.object)
+ if self.exe:
+ fracture_basic(context,
+ self.nshards,
+ self.crack_type,
+ self.roughness)
+
+ return {'FINISHED'}
+
+
+class FractureGroup(bpy.types.Operator):
+ '''Split object with boolean operations for simulation, uses a group.'''
+ bl_idname = "object.fracture_group"
+ bl_label = "Fracture Object (Group)"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ exe = BoolProperty(name="Execute",
+ description="If it shall actually run, for optimal performance...",
+ default=False)
+
+ group = StringProperty(name="Group",
+ description="Specify the group used for fracturing")
+
+# e = []
+# for i, g in enumerate(bpy.data.groups):
+# e.append((g.name, g.name, ''))
+# group = EnumProperty(name='Group (hit F8 to refresh list)',
+# items=e,
+# description='Specify the group used for fracturing')
+
+ def execute(self, context):
+ #getIslands(context.object)
+
+ if self.exe and self.group:
+ fracture_group(context, self.group)
+
+ return {'FINISHED'}
+
+ def draw(self, context):
+ layout = self.layout
+ layout.prop(self, "exe")
+ layout.prop_search(self, "group", bpy.data, "groups")
+
+#####################################################################
+# Import Functions
+
+def import_object(obname):
+ opath = "//data.blend\\Object\\" + obname
+ s = os.sep
+ dpath = bpy.utils.script_paths()[0] + \
+ '%saddons%sobject_fracture%sdata.blend\\Object\\' % (s, s, s)
+
+ # DEBUG
+ #print('import_object: ' + opath)
+
+ bpy.ops.wm.link_append(
+ filepath=opath,
+ filename=obname,
+ directory=dpath,
+ filemode=1,
+ link=False,
+ autoselect=True,
+ active_layer=True,
+ instance_groups=True,
+ relative_path=True)
+
+ for ob in bpy.context.selected_objects:
+ ob.location = bpy.context.scene.cursor_location
+
+
+class ImportFractureRecorder(bpy.types.Operator):
+ '''Imports a rigidbody recorder'''
+ bl_idname = "object.import_fracture_recorder"
+ bl_label = "Add Rigidbody Recorder (Fracture)"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ def execute(self, context):
+ import_object("RECORDER")
+
+ return {'FINISHED'}
+
+
+class ImportFractureBomb(bpy.types.Operator):
+ '''Import a bomb'''
+ bl_idname = "object.import_fracture_bomb"
+ bl_label = "Add Bomb (Fracture)"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ def execute(self, context):
+ import_object("BOMB")
+
+ return {'FINISHED'}
+
+
+class ImportFractureProjectile(bpy.types.Operator, ):
+ '''Imports a projectile'''
+ bl_idname = "object.import_fracture_projectile"
+ bl_label = "Add Projectile (Fracture)"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ def execute(self, context):
+ import_object("PROJECTILE")
+
+ return {'FINISHED'}
diff --git a/object_fracture/fracture_setup.py b/object_fracture/fracture_setup.py
new file mode 100644
index 00000000..3ab93821
--- /dev/null
+++ b/object_fracture/fracture_setup.py
@@ -0,0 +1,74 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+from bpy.props import *
+
+
+def getsizefrommesh(ob):
+ bb = ob.bound_box
+ return(
+ bb[5][0] - bb[0][0],
+ bb[3][1] - bb[0][1],
+ bb[1][2] - bb[0][2])
+
+
+def setupshards(context):
+ sce = context.scene
+ #print(dir(context))
+ #bpy.data.scenes[0].game_settings.all_frames
+
+ tobeprocessed = []
+ for ob in sce.objects:
+ if ob.select:
+ tobeprocessed.append(ob)
+
+ for ob in tobeprocessed:
+ g = ob.game
+
+ g.physics_type = 'RIGID_BODY'
+ g.use_collision_bounds = 1
+ g.collision_bounds_type = 'CONVEX_HULL'
+ g.rotation_damping = 0.9
+
+ sizex, sizey, sizez = getsizefrommesh(ob)
+ approxvolume = sizex * sizey * sizez
+ g.mass = approxvolume
+
+ sce.objects.active = ob
+
+ bpy.ops.object.game_property_new()
+ g.properties['prop'].name = 'shard'
+ #sm=FloatProperty(name='shard',description='shardprop',default=0.0)
+ #print (sm)
+ #np=bpy.types.GameFloatProperty(sm)
+ #name='shard',type='BOOL', value=1
+ #print(ob)
+
+
+class SetupFractureShards(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.setup_fracture_shards"
+ bl_label = "Setup Fracture Shards"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ #def poll(self, context):
+
+ def execute(self, context):
+ setupshards(context)
+ return {'FINISHED'}
diff --git a/paint_palette.py b/paint_palette.py
new file mode 100644
index 00000000..be5bd128
--- /dev/null
+++ b/paint_palette.py
@@ -0,0 +1,700 @@
+# paint_palette.py (c) 2011 Dany Lebel (Axon_D)
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+
+bl_info = {
+ "name": "Paint Palettes",
+ "author": "Dany Lebel (Axon D)",
+ "version": (0,8,2),
+ "blender": (2, 5, 7),
+ "api": 36826,
+ "location": "Image Editor and 3D View > Any Paint mode > Color Palette or Weight Palette panel",
+ "description": "Palettes for color and weight paint modes",
+ "warning": "beta",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/Paint/Palettes",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?func=detail&aid=25908",
+ "category": "Paint"}
+
+"""
+This addon brings palettes to the paint modes.
+
+ * Color Palette for Image Painting, Texture Paint and Vertex Paint modes.
+ * Weight Palette for the Weight Paint mode.
+
+Set a number of colors (or weights according to the mode) and then associate it
+with the brush by using the button under the color.
+"""
+
+import bpy
+from bpy.props import *
+
+
+class AddPresetBase():
+ '''Base preset class, only for subclassing
+ subclasses must define
+ - preset_values
+ - preset_subdir '''
+ # bl_idname = "script.preset_base_add"
+ # bl_label = "Add a Python Preset"
+ bl_options = {'REGISTER'} # only because invoke_props_popup requires.
+
+ name = bpy.props.StringProperty(name="Name",
+ description="Name of the preset, used to make the path name",
+ maxlen=64, default="")
+ remove_active = bpy.props.BoolProperty(default=False, options={'HIDDEN'})
+
+ @staticmethod
+ def as_filename(name): # could reuse for other presets
+ for char in " !@#$%^&*(){}:\";'[]<>,.\\/?":
+ name = name.replace(char, '_')
+ return name.lower().strip()
+
+ def execute(self, context):
+ import os
+
+ if hasattr(self, "pre_cb"):
+ self.pre_cb(context)
+
+ preset_menu_class = getattr(bpy.types, self.preset_menu)
+
+ if not self.remove_active:
+
+ if not self.name:
+ return {'FINISHED'}
+
+ filename = self.as_filename(self.name)
+
+ target_path = bpy.utils.user_resource('SCRIPTS',
+ os.path.join("presets", self.preset_subdir), create=True)
+
+ if not target_path:
+ self.report({'WARNING'}, "Failed to create presets path")
+ return {'CANCELLED'}
+
+ filepath = os.path.join(target_path, filename) + ".py"
+
+ if hasattr(self, "add"):
+ self.add(context, filepath)
+ else:
+ file_preset = open(filepath, 'w')
+ file_preset.write("import bpy\n")
+
+ if hasattr(self, "preset_defines"):
+ for rna_path in self.preset_defines:
+ exec(rna_path)
+ file_preset.write("%s\n" % rna_path)
+ file_preset.write("\n")
+
+
+ for rna_path in self.preset_values:
+ value = eval(rna_path)
+ # convert thin wrapped sequences to simple lists to repr()
+ try:
+ value = value[:]
+ except:
+ pass
+
+ file_preset.write("%s = %r\n" % (rna_path, value))
+ file_preset.write("\
+ci = bpy.context.window_manager.palette_props.current_color_index\n\
+palette_props = bpy.context.window_manager.palette_props\n\
+image_paint = bpy.context.tool_settings.image_paint\n\
+vertex_paint = bpy.context.tool_settings.vertex_paint\n\
+if ci == 0:\n\
+ image_paint.brush.color = palette_props.color_0\n\
+ vertex_paint.brush.color = palette_props.color_0\n\
+elif ci == 1:\n\
+ image_paint.brush.color = palette_props.color_1\n\
+ vertex_paint.brush.color = palette_props.color_1\n\
+elif ci == 2:\n\
+ image_paint.brush.color = palette_props.color_2\n\
+ vertex_paint.brush.color = palette_props.color_2\n\
+elif ci == 3:\n\
+ image_paint.brush.color = palette_props.color_3\n\
+ vertex_paint.brush.color = palette_props.color_3\n\
+elif ci == 4:\n\
+ image_paint.brush.color = palette_props.color_4\n\
+ vertex_paint.brush.color = palette_props.color_4\n\
+elif ci == 5:\n\
+ image_paint.brush.color = palette_props.color_5\n\
+ vertex_paint.brush.color = palette_props.color_5\n\
+elif ci == 6:\n\
+ image_paint.brush.color = palette_props.color_6\n\
+ vertex_paint.brush.color = palette_props.color_6\n\
+elif ci == 7:\n\
+ image_paint.brush.color = palette_props.color_7\n\
+ vertex_paint.brush.color = palette_props.color_7\n\
+elif ci == 8:\n\
+ image_paint.brush.color = palette_props.color_8\n\
+ vertex_paint.brush.color = palette_props.color_8")
+
+ file_preset.close()
+
+ preset_menu_class.bl_label = bpy.path.display_name(filename)
+
+ else:
+ preset_active = preset_menu_class.bl_label
+
+ # fairly sloppy but convenient.
+ filepath = bpy.utils.preset_find(preset_active, self.preset_subdir)
+
+ if not filepath:
+ filepath = bpy.utils.preset_find(preset_active,
+ self.preset_subdir, display_name=True)
+
+ if not filepath:
+ return {'CANCELLED'}
+
+ if hasattr(self, "remove"):
+ self.remove(context, filepath)
+ else:
+ try:
+ os.remove(filepath)
+ except:
+ import traceback
+ traceback.print_exc()
+
+ # XXX, stupid!
+ preset_menu_class.bl_label = "Presets"
+
+ if hasattr(self, "post_cb"):
+ self.post_cb(context)
+
+ return {'FINISHED'}
+
+ def check(self, context):
+ self.name = self.as_filename(self.name)
+
+ def invoke(self, context, event):
+ if not self.remove_active:
+ wm = context.window_manager
+ return wm.invoke_props_dialog(self)
+ else:
+ return self.execute(context)
+
+class ExecutePalettePreset(bpy.types.Operator):
+ ''' Executes a preset '''
+ bl_idname = "script.execute_preset"
+ bl_label = "Execute a Python Preset"
+
+ filepath = bpy.props.StringProperty(name="Path",
+ description="Path of the Python file to execute",
+ maxlen=512, default="")
+ menu_idname = bpy.props.StringProperty(name="Menu ID Name",
+ description="ID name of the menu this was called from", default="")
+
+ def execute(self, context):
+ from os.path import basename
+ filepath = self.filepath
+
+ # change the menu title to the most recently chosen option
+ preset_class = getattr(bpy.types, self.menu_idname)
+ preset_class.bl_label = bpy.path.display_name(basename(filepath))
+
+ # execute the preset using script.python_file_run
+ bpy.ops.script.python_file_run(filepath=filepath)
+ return {'FINISHED'}
+
+
+class PALETTE_MT_palette_presets(bpy.types.Menu):
+ bl_label = "Palette Presets"
+ preset_subdir = "palette"
+ preset_operator = "script.execute_preset"
+ draw = bpy.types.Menu.draw_preset
+
+
+class AddPresetPalette(AddPresetBase, bpy.types.Operator):
+ '''Add a Palette Preset'''
+ bl_idname = "palette.preset_add"
+ bl_label = "Add Palette Preset"
+ preset_menu = "PALETTE_MT_palette_presets"
+
+ preset_defines = [
+ "window_manager = bpy.context.window_manager"
+ ]
+
+ preset_values = [
+ "window_manager.palette_props.color_0",
+ "window_manager.palette_props.color_1",
+ "window_manager.palette_props.color_2",
+ "window_manager.palette_props.color_3",
+ "window_manager.palette_props.color_4",
+ "window_manager.palette_props.color_5",
+ "window_manager.palette_props.color_6",
+ "window_manager.palette_props.color_7",
+ "window_manager.palette_props.color_8",
+
+ ]
+
+ preset_subdir = "palette"
+
+
+class BrushButtonsPanel():
+ bl_space_type = 'IMAGE_EDITOR'
+ bl_region_type = 'UI'
+
+ @classmethod
+ def poll(cls, context):
+ sima = context.space_data
+ toolsettings = context.tool_settings.image_paint
+ return sima.show_paint and toolsettings.brush
+
+class IMAGE_OT_select_color(bpy.types.Operator):
+ bl_label = ""
+ bl_description = "Select this color"
+ bl_idname = "paint.select_color"
+
+
+ color_index = IntProperty()
+
+ def invoke(self, context, event):
+ palette_props = bpy.context.window_manager.palette_props
+ palette_props.current_color_index = self.color_index
+
+ if self.color_index == 0:
+ color = palette_props.color_0
+ elif self.color_index == 1:
+ color = palette_props.color_1
+ elif self.color_index == 2:
+ color = palette_props.color_2
+ elif self.color_index == 3:
+ color = palette_props.color_3
+ elif self.color_index == 4:
+ color = palette_props.color_4
+ elif self.color_index == 5:
+ color = palette_props.color_5
+ elif self.color_index == 6:
+ color = palette_props.color_6
+ elif self.color_index == 7:
+ color = palette_props.color_7
+ elif self.color_index == 8:
+ color = palette_props.color_8
+ elif self.color_index == 9:
+ color = palette_props.color_9
+
+ bpy.context.tool_settings.image_paint.brush.color = color
+ bpy.context.tool_settings.vertex_paint.brush.color = color
+ return {"FINISHED"}
+
+
+
+
+def color_palette_draw(self, context):
+ palette_props = bpy.context.window_manager.palette_props
+
+
+ layout = self.layout
+
+ row = layout.row(align=True)
+ row.menu("PALETTE_MT_palette_presets", text=bpy.types.PALETTE_MT_palette_presets.bl_label)
+ row.operator("palette.preset_add", text="", icon="ZOOMIN")
+ row.operator("palette.preset_add", text="", icon="ZOOMOUT").remove_active = True
+
+
+ if context.vertex_paint_object:
+ brush = context.tool_settings.vertex_paint.brush
+ elif context.image_paint_object:
+ brush = context.tool_settings.image_paint.brush
+ elif context.space_data.use_image_paint:
+ brush = context.tool_settings.image_paint.brush
+
+
+ for i in range(0, 9):
+ if not i % 3:
+ row = layout.row()
+
+
+
+ if i == palette_props.current_color_index:
+
+ if i == 0:
+ palette_props.color_0 = brush.color[:]
+ elif i == 1:
+ palette_props.color_1 = brush.color[:]
+ elif i == 2:
+ palette_props.color_2 = brush.color[:]
+ elif i == 3:
+ palette_props.color_3 = brush.color[:]
+ elif i == 4:
+ palette_props.color_4 = brush.color[:]
+ elif i == 5:
+ palette_props.color_5 = brush.color[:]
+ elif i == 6:
+ palette_props.color_6 = brush.color[:]
+ elif i == 7:
+ palette_props.color_7 = brush.color[:]
+ elif i == 8:
+ palette_props.color_8 = brush.color[:]
+ col = row.column()
+ col.prop(brush, "color", text="")
+ col.operator("paint.select_color",
+ icon="COLOR", emboss=False).color_index = i
+
+ else :
+ col = row.column(align=True)
+ col.prop(palette_props, "color_%d" % i)
+ col.operator("paint.select_color"
+ ).color_index = i
+
+
+
+class IMAGE_PT_color_palette(BrushButtonsPanel, bpy.types.Panel):
+ bl_label = "Color Palette"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ def draw(self, context):
+ color_palette_draw(self, context)
+
+
+class PaintPanel():
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+
+ @staticmethod
+ def paint_settings(context):
+ ts = context.tool_settings
+
+ if context.vertex_paint_object:
+ return ts.vertex_paint
+ elif context.weight_paint_object:
+ return ts.weight_paint
+ elif context.texture_paint_object:
+ return ts.image_paint
+ return None
+
+
+class VIEW3D_PT_color_palette(PaintPanel, bpy.types.Panel):
+ bl_label = "Color Palette"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ @classmethod
+ def poll(cls, context):
+ return (context.image_paint_object or context.vertex_paint_object)
+
+ def draw(self, context):
+ color_palette_draw(self, context)
+
+
+class VIEW3D_OT_select_weight(bpy.types.Operator):
+ bl_label = ""
+ bl_description = "Select this weight"
+ bl_idname = "paint.select_weight"
+
+ weight_index = IntProperty()
+
+ def invoke(self, context, event):
+ palette_props = bpy.context.window_manager.palette_props
+ palette_props.current_weight_index = self.weight_index
+
+ if self.weight_index == 0:
+ weight = palette_props.weight_0
+ elif self.weight_index == 1:
+ weight = palette_props.weight_1
+ elif self.weight_index == 2:
+ weight = palette_props.weight_2
+ elif self.weight_index == 3:
+ weight = palette_props.weight_3
+ elif self.weight_index == 4:
+ weight = palette_props.weight_4
+ elif self.weight_index == 5:
+ weight = palette_props.weight_5
+ elif self.weight_index == 6:
+ weight = palette_props.weight_6
+ elif self.weight_index == 7:
+ weight = palette_props.weight_7
+ elif self.weight_index == 8:
+ weight = palette_props.weight_8
+ elif self.weight_index == 9:
+ weight = palette_props.weight_9
+ elif self.weight_index == 10:
+ weight = palette_props.weight_10
+
+ bpy.context.tool_settings.vertex_group_weight = weight
+ return {"FINISHED"}
+
+
+class VIEW3D_OT_reset_weight_palette(bpy.types.Operator):
+ bl_label = ""
+ bl_idname = "paint.reset_weight_palette"
+
+ def execute(self, context):
+ palette_props = context.window_manager.palette_props
+
+
+ if palette_props.current_weight_index == 0:
+ print("coucou!")
+ context.tool_settings.vertex_group_weight = 0.0
+ palette_props.weight_0 = 0.0
+
+ palette_props.weight_1 = 0.1
+ if palette_props.current_weight_index == 1:
+ context.tool_settings.vertex_group_weight = 0.1
+
+ if palette_props.current_weight_index == 2:
+ context.tool_settings.vertex_group_weight = 0.25
+ palette_props.weight_2 = 0.25
+
+ if palette_props.current_weight_index == 3:
+ context.tool_settings.vertex_group_weight = 0.3333
+ palette_props.weight_3 = 0.3333
+
+ if palette_props.current_weight_index == 4:
+ context.tool_settings.vertex_group_weight = 0.4
+ palette_props.weight_4 = 0.4
+
+ if palette_props.current_weight_index == 5:
+ context.tool_settings.vertex_group_weight = 0.5
+ palette_props.weight_5 = 0.5
+
+ if palette_props.current_weight_index == 6:
+ context.tool_settings.vertex_group_weight = 0.6
+ palette_props.weight_6 = 0.6
+
+ if palette_props.current_weight_index == 7:
+ context.tool_settings.vertex_group_weight = 0.6666
+ palette_props.weight_7 = 0.6666
+
+ if palette_props.current_weight_index == 8:
+ context.tool_settings.vertex_group_weight = 0.75
+ palette_props.weight_8 = 0.75
+
+ if palette_props.current_weight_index == 9:
+ context.tool_settings.vertex_group_weight = 0.9
+ palette_props.weight_9 = 0.9
+
+ if palette_props.current_weight_index == 10:
+ context.tool_settings.vertex_group_weight = 1.0
+ palette_props.weight_10 = 1.0
+ return {"FINISHED"}
+
+class VIEW3D_PT_weight_palette(PaintPanel, bpy.types.Panel):
+ bl_label = "Weight Palette"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ @classmethod
+ def poll(cls, context):
+ return context.weight_paint_object
+
+ def draw(self, context):
+ palette_props = bpy.context.window_manager.palette_props
+ vertex_group_weight = bpy.context.tool_settings.vertex_group_weight
+
+
+
+ layout = self.layout
+
+ box = layout.box()
+
+ row = box.row()
+ if palette_props.current_weight_index == 0:
+ palette_props.weight_0 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_0,
+ emboss=False).weight_index = 0
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_0,
+ emboss=True).weight_index = 0
+
+ row = box.row(align=True)
+ if palette_props.current_weight_index == 1:
+ palette_props.weight_1 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_1,
+ emboss=False).weight_index = 1
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_1,
+ emboss=True).weight_index = 1
+
+ if palette_props.current_weight_index == 2:
+ palette_props.weight_2 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_2,
+ emboss=False).weight_index = 2
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_2,
+ emboss=True).weight_index = 2
+
+ if palette_props.current_weight_index == 3:
+ palette_props.weight_3 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_3,
+ emboss=False).weight_index = 3
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_3,
+ emboss=True).weight_index = 3
+
+ row = box.row(align=True)
+
+ if palette_props.current_weight_index == 4:
+ palette_props.weight_4 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_4,
+ emboss=False).weight_index = 4
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_4,
+ emboss=True).weight_index = 4
+
+ if palette_props.current_weight_index == 5:
+ palette_props.weight_5 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_5,
+ emboss=False).weight_index = 5
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_5,
+ emboss=True).weight_index = 5
+
+ if palette_props.current_weight_index == 6:
+ palette_props.weight_6 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_6,
+ emboss=False).weight_index = 6
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_6,
+ emboss=True).weight_index = 6
+
+ row = box.row(align=True)
+
+ if palette_props.current_weight_index == 7:
+ palette_props.weight_7 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_7,
+ emboss=False).weight_index = 7
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_7,
+ emboss=True).weight_index = 7
+
+ if palette_props.current_weight_index == 8:
+ palette_props.weight_8 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_8,
+ emboss=False).weight_index = 8
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_8,
+ emboss=True).weight_index = 8
+
+ if palette_props.current_weight_index == 9:
+ palette_props.weight_9 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_9,
+ emboss=False).weight_index = 9
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_9,
+ emboss=True).weight_index = 9
+
+ row = box.row()
+
+ if palette_props.current_weight_index == 10:
+ palette_props.weight_10 = vertex_group_weight
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_10,
+ emboss=False).weight_index = 10
+ else :
+ row.operator("paint.select_weight", text="%.2f" % palette_props.weight_10,
+ emboss=True).weight_index = 10
+
+ row = layout.row()
+ row.operator("paint.reset_weight_palette", text="Reset")
+
+
+def register():
+
+ class Colors(bpy.types.PropertyGroup):
+ """Class for colors CollectionProperty"""
+ color = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+
+
+ class PaletteProps(bpy.types.PropertyGroup):
+ current_color_index = IntProperty(
+ name="Current Color Index", description="", default=0, min=-1)
+
+ current_weight_index = IntProperty(
+ name="Current Color Index", description="", default=10, min=-1)
+
+ # Collection of colors
+ colors = bpy.props.CollectionProperty(type=Colors)
+
+ color_0 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_1 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_2 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_3 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_4 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_5 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_6 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_7 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+ color_8 = bpy.props.FloatVectorProperty(
+ name="", description="", default=(0.8, 0.8, 0.8), min=0, max=1,
+ step=1, precision=3, subtype='COLOR_GAMMA', size=3)
+
+
+ weight_0 = bpy.props.FloatProperty(
+ default=0.0, min=0.0, max=1.0, precision=3)
+ weight_1 = bpy.props.FloatProperty(
+ default=0.1, min=0.0, max=1.0, precision=3)
+ weight_2 = bpy.props.FloatProperty(
+ default=0.25, min=0.0, max=1.0, precision=3)
+ weight_3 = bpy.props.FloatProperty(
+ default=0.333, min=0.0, max=1.0, precision=3)
+ weight_4 = bpy.props.FloatProperty(
+ default=0.4, min=0.0, max=1.0, precision=3)
+ weight_5 = bpy.props.FloatProperty(
+ default=0.5, min=0.0, max=1.0, precision=3)
+ weight_6 = bpy.props.FloatProperty(
+ default=0.6, min=0.0, max=1.0, precision=3)
+ weight_7 = bpy.props.FloatProperty(
+ default=0.6666, min=0.0, max=1.0, precision=3)
+ weight_8 = bpy.props.FloatProperty(
+ default=0.75, min=0.0, max=1.0, precision=3)
+ weight_9 = bpy.props.FloatProperty(
+ default=0.9, min=0.0, max=1.0, precision=3)
+ weight_10 = bpy.props.FloatProperty(
+ default=1.0, min=0.0, max=1.0, precision=3)
+ pass
+
+
+
+
+
+ bpy.utils.register_module(__name__)
+
+ bpy.types.WindowManager.palette_props = PointerProperty(
+ type=PaletteProps, name="Palette Props", description="")
+
+
+ for i in range(0, 256):
+ colors = bpy.context.window_manager.palette_props.colors.add()
+
+ pass
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+ pass
+
+
+if __name__ == "__main__":
+ register()
diff --git a/render_povray/__init__.py b/render_povray/__init__.py
new file mode 100644
index 00000000..792b9930
--- /dev/null
+++ b/render_povray/__init__.py
@@ -0,0 +1,531 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "POV-Ray 3.7",
+ "author": "Campbell Barton, Silvio Falcinelli, Maurice Raybaud, Constantin Rahn, Bastien Montagne",
+ "version": (0, 0, 9),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "Render > Engine > POV-Ray 3.7",
+ "description": "Basic POV-Ray 3.7 integration for blender",
+ "warning": "both POV-Ray 3.7 and this script are beta",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Render/PovRay",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=23145",
+ "category": "Render"}
+
+if "bpy" in locals():
+ import imp
+ imp.reload(ui)
+ imp.reload(render)
+ imp.reload(update_files)
+
+else:
+ import bpy
+ from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, \
+ FloatVectorProperty, EnumProperty, PointerProperty
+ from . import ui
+ from . import render
+ from . import update_files
+
+
+###############################################################################
+# Scene POV properties.
+###############################################################################
+class RenderPovSettingsScene(bpy.types.PropertyGroup):
+ # File Options
+ tempfiles_enable = BoolProperty(
+ name="Enable Tempfiles",
+ description="Enable the OS-Tempfiles. Otherwise set the path where to save the files.",
+ default=True)
+ deletefiles_enable = BoolProperty(
+ name="Delete files",
+ description="Delete files after rendering. Doesn't work with the image.",
+ default=True)
+ scene_name = StringProperty(
+ name="Scene Name",
+ description="Name of POV-Ray scene to create. Empty name will use the name of " \
+ "the blend file.",
+ default="", maxlen=1024)
+ scene_path = StringProperty(
+ name="Export scene path",
+ # description="Path to directory where the exported scene (POV and INI) is created", # Bug in POV-Ray RC3
+ description="Path to directory where the files are created",
+ default="", maxlen=1024, subtype="DIR_PATH")
+ renderimage_path = StringProperty(
+ name="Rendered image path",
+ description="Full path to directory where the rendered image is saved.",
+ default="", maxlen=1024, subtype="DIR_PATH")
+ list_lf_enable = BoolProperty(
+ name="LF in lists",
+ description="Enable line breaks in lists (vectors and indices). Disabled: " \
+ "lists are exported in one line.",
+ default=True)
+
+ # Not a real pov option, just to know if we should write
+ radio_enable = BoolProperty(
+ name="Enable Radiosity",
+ description="Enable POV-Rays radiosity calculation",
+ default=False)
+ radio_display_advanced = BoolProperty(
+ name="Advanced Options",
+ description="Show advanced options",
+ default=False)
+ media_enable = BoolProperty(
+ name="Enable Media",
+ description="Enable POV-Rays atmospheric media",
+ default=False)
+ media_samples = IntProperty(
+ name="Samples",
+ description="Number of samples taken from camera to first object " \
+ "encountered along ray path for media calculation",
+ min=1, max=100, default=35)
+
+ media_color = FloatVectorProperty(
+ name="Media Color", description="The atmospheric media color.",
+ precision=4, step=0.01, min=0, soft_max=1,
+ default=(0.001, 0.001, 0.001), options={'ANIMATABLE'}, subtype='COLOR')
+
+ baking_enable = BoolProperty(
+ name="Enable Baking",
+ description="Enable POV-Rays texture baking",
+ default=False)
+ indentation_character = EnumProperty(
+ name="Indentation",
+ description="Select the indentation type",
+ items=(("0", "None", "No indentation"),
+ ("1", "Tabs", "Indentation with tabs"),
+ ("2", "Spaces", "Indentation with spaces")),
+ default="2")
+ indentation_spaces = IntProperty(
+ name="Quantity of spaces",
+ description="The number of spaces for indentation",
+ min=1, max=10, default=4)
+
+ comments_enable = BoolProperty(
+ name="Enable Comments",
+ description="Add comments to pov file",
+ default=True)
+
+ # Real pov options
+ command_line_switches = StringProperty(
+ name="Command Line Switches",
+ description="Command line switches consist of a + (plus) or - (minus) sign, followed " \
+ "by one or more alphabetic characters and possibly a numeric value.",
+ default="", maxlen=500)
+
+ antialias_enable = BoolProperty(
+ name="Anti-Alias", description="Enable Anti-Aliasing",
+ default=True)
+
+ antialias_method = EnumProperty(
+ name="Method",
+ description="AA-sampling method. Type 1 is an adaptive, non-recursive, super-sampling "\
+ "method. Type 2 is an adaptive and recursive super-sampling method.",
+ items=(("0", "non-recursive AA", "Type 1 Sampling in POV-Ray"),
+ ("1", "recursive AA", "Type 2 Sampling in POV-Ray")),
+ default="1")
+
+ antialias_depth = IntProperty(
+ name="Antialias Depth", description="Depth of pixel for sampling",
+ min=1, max=9, default=3)
+
+ antialias_threshold = FloatProperty(
+ name="Antialias Threshold", description="Tolerance for sub-pixels",
+ min=0.0, max=1.0, soft_min=0.05, soft_max=0.5, default=0.1)
+
+ jitter_enable = BoolProperty(
+ name="Jitter",
+ description="Enable Jittering. Adds noise into the sampling process (it should be " \
+ "avoided to use jitter in animation).",
+ default=True)
+
+ jitter_amount = FloatProperty(
+ name="Jitter Amount", description="Amount of jittering.",
+ min=0.0, max=1.0, soft_min=0.01, soft_max=1.0, default=1.0)
+
+ antialias_gamma = FloatProperty(
+ name="Antialias Gamma",
+ description="POV-Ray compares gamma-adjusted values for super sampling. Antialias " \
+ "Gamma sets the Gamma before comparison.",
+ min=0.0, max=5.0, soft_min=0.01, soft_max=2.5, default=2.5)
+
+ max_trace_level = IntProperty(
+ name="Max Trace Level",
+ description="Number of reflections/refractions allowed on ray path",
+ min=1, max=256, default=5)
+
+ photon_spacing = FloatProperty(
+ name="Spacing",
+ description="Average distance between photons on surfaces. half this get four times " \
+ "as many surface photons",
+ min=0.001, max=1.000, soft_min=0.001, soft_max=1.000, default=0.005, precision=3)
+
+ photon_max_trace_level = IntProperty(
+ name="Max Trace Level",
+ description="Number of reflections/refractions allowed on ray path",
+ min=1, max=256, default=5)
+
+ photon_adc_bailout = FloatProperty(
+ name="ADC Bailout",
+ description="The adc_bailout for photons. Use adc_bailout = " \
+ "0.01 / brightest_ambient_object for good results",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default=0.1, precision=3)
+
+ photon_gather_min = IntProperty(
+ name="Gather Min", description="Minimum number of photons gathered for each point",
+ min=1, max=256, default=20)
+
+ photon_gather_max = IntProperty(
+ name="Gather Max", description="Maximum number of photons gathered for each point",
+ min=1, max=256, default=100)
+
+ radio_adc_bailout = FloatProperty(
+ name="ADC Bailout",
+ description="The adc_bailout for radiosity rays. Use " \
+ "adc_bailout = 0.01 / brightest_ambient_object for good results",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default=0.01, precision=3)
+
+ radio_always_sample = BoolProperty(
+ name="Always Sample",
+ description="Only use the data from the pretrace step and not gather " \
+ "any new samples during the final radiosity pass",
+ default=True)
+
+ radio_brightness = FloatProperty(
+ name="Brightness",
+ description="Amount objects are brightened before being returned " \
+ "upwards to the rest of the system",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=10.0, default=1.0)
+
+ radio_count = IntProperty(
+ name="Ray Count",
+ description="Number of rays for each new radiosity value to be calculated " \
+ "(halton sequence over 1600)",
+ min=1, max=10000, soft_max=1600, default=35)
+
+ radio_error_bound = FloatProperty(
+ name="Error Bound",
+ description="One of the two main speed/quality tuning values, " \
+ "lower values are more accurate",
+ min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default=1.8)
+
+ radio_gray_threshold = FloatProperty(
+ name="Gray Threshold",
+ description="One of the two main speed/quality tuning values, " \
+ "lower values are more accurate",
+ min=0.0, max=1.0, soft_min=0, soft_max=1, default=0.0)
+
+ radio_low_error_factor = FloatProperty(
+ name="Low Error Factor",
+ description="Just enough samples is slightly blotchy. Low error changes error " \
+ "tolerance for less critical last refining pass",
+ min=0.0, max=1.0, soft_min=0.0, soft_max=1.0, default=0.5)
+
+ # max_sample - not available yet
+ radio_media = BoolProperty(
+ name="Media", description="Radiosity estimation can be affected by media",
+ default=False)
+
+ radio_minimum_reuse = FloatProperty(
+ name="Minimum Reuse",
+ description="Fraction of the screen width which sets the minimum radius of reuse " \
+ "for each sample point (At values higher than 2% expect errors)",
+ min=0.0, max=1.0, soft_min=0.1, soft_max=0.1, default=0.015, precision=3)
+
+ radio_nearest_count = IntProperty(
+ name="Nearest Count",
+ description="Number of old ambient values blended together to " \
+ "create a new interpolated value",
+ min=1, max=20, default=5)
+
+ radio_normal = BoolProperty(
+ name="Normals", description="Radiosity estimation can be affected by normals",
+ default=False)
+
+ radio_recursion_limit = IntProperty(
+ name="Recursion Limit",
+ description="how many recursion levels are used to calculate " \
+ "the diffuse inter-reflection",
+ min=1, max=20, default=3)
+
+ radio_pretrace_start = FloatProperty(
+ name="Pretrace Start",
+ description="Fraction of the screen width which sets the size of the " \
+ "blocks in the mosaic preview first pass",
+ min=0.01, max=1.00, soft_min=0.02, soft_max=1.0, default=0.08)
+
+ radio_pretrace_end = FloatProperty(
+ name="Pretrace End",
+ description="Fraction of the screen width which sets the size of the blocks " \
+ "in the mosaic preview last pass",
+ min=0.001, max=1.00, soft_min=0.01, soft_max=1.00, default=0.04, precision=3)
+
+
+###############################################################################
+# Material POV properties.
+###############################################################################
+class RenderPovSettingsMaterial(bpy.types.PropertyGroup):
+ irid_enable = BoolProperty(
+ name="Enable Iridescence",
+ description="Newton's thin film interference (like an oil slick on a puddle of " \
+ "water or the rainbow hues of a soap bubble.)",
+ default=False)
+
+ mirror_use_IOR = BoolProperty(
+ name="Correct Reflection",
+ description="Use same IOR as raytrace transparency to calculate mirror reflections. " \
+ "More physically correct",
+ default=False)
+
+ mirror_metallic = BoolProperty(
+ name="Metallic Reflection",
+ description="mirror reflections get colored as diffuse (for metallic materials)",
+ default=False)
+
+ conserve_energy = BoolProperty(
+ name="Conserve Energy",
+ description="Light transmitted is more correctly reduced by mirror reflections, " \
+ "also the sum of diffuse and translucency gets reduced below one ",
+ default=True)
+
+ irid_amount = FloatProperty(
+ name="amount",
+ description="Contribution of the iridescence effect to the overall surface color. " \
+ "As a rule of thumb keep to around 0.25 (25% contribution) or less, " \
+ "but experiment. If the surface is coming out too white, try lowering " \
+ "the diffuse and possibly the ambient values of the surface.",
+ min=0.0, max=1.0, soft_min=0.01, soft_max=1.0, default=0.25)
+
+ irid_thickness = FloatProperty(
+ name="thickness",
+ description="A very thin film will have a high frequency of color changes while a " \
+ "thick film will have large areas of color.",
+ min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default=1)
+
+ irid_turbulence = FloatProperty(
+ name="turbulence", description="This parameter varies the thickness.",
+ min=0.0, max=10.0, soft_min=0.000, soft_max=1.0, default=0)
+
+ interior_fade_color = FloatVectorProperty(
+ name="Fade Color", description="Color of filtered attenuation for transparent materials",
+ precision=4, step=0.01, min=0.0, soft_max=1.0,
+ default=(0, 0, 0), options={'ANIMATABLE'}, subtype='COLOR')
+
+ caustics_enable = BoolProperty(
+ name="Caustics",
+ description="use only fake refractive caustics (default) or photon based " \
+ "reflective/refractive caustics",
+ default=True)
+
+ fake_caustics = BoolProperty(
+ name="Fake Caustics", description="use only (Fast) fake refractive caustics",
+ default=True)
+
+ fake_caustics_power = FloatProperty(
+ name="Fake caustics power",
+ description="Values typically range from 0.0 to 1.0 or higher. Zero is no caustics. " \
+ "Low, non-zero values give broad hot-spots while higher values give " \
+ "tighter, smaller simulated focal points",
+ min=0.00, max=10.0, soft_min=0.00, soft_max=1.10, default=0.1)
+
+ photons_refraction = BoolProperty(
+ name="Refractive Photon Caustics", description="more physically correct",
+ default=False)
+
+ photons_dispersion = FloatProperty(
+ name="Chromatic Dispersion",
+ description="Light passing through will be separated according to wavelength. " \
+ "This ratio of refractive indices for violet to red controls how much " \
+ "the colors are spread out 1 = no dispersion, good values are 1.01 to 1.1",
+ min=1.0000, max=10.000, soft_min=1.0000, soft_max=1.1000, precision=4, default=1.0000)
+
+ photons_dispersion_samples = IntProperty(
+ name="Dispersion Samples", description="Number of color-steps for dispersion",
+ min=2, max=128, default=7)
+
+ photons_reflection = BoolProperty(
+ name="Reflective Photon Caustics",
+ description="Use this to make your Sauron's ring ;-P",
+ default=False)
+
+ refraction_type = EnumProperty(
+ items=[("0", "None", "use only reflective caustics"),
+ ("1", "Fake Caustics", "use fake caustics"),
+ ("2", "Photons Caustics", "use photons for refractive caustics")],
+ name="Refractive",
+ description="use fake caustics (fast) or true photons for refractive Caustics",
+ default="1")
+
+ ##################################CustomPOV Code############################
+ replacement_text = StringProperty(
+ name="Declared name:",
+ description="Type the declared name in custom POV code or an external " \
+ ".inc it points at. texture {} expected",
+ default="")
+
+
+###############################################################################
+# Texture POV properties.
+###############################################################################
+class RenderPovSettingsTexture(bpy.types.PropertyGroup):
+ #Custom texture gamma
+ tex_gamma_enable = BoolProperty(
+ name="Enable custom texture gamma",
+ description="Notify some custom gamma for which texture has been precorrected " \
+ "without the file format carrying it and only if it differs from your " \
+ "OS expected standard (see pov doc)",
+ default=False)
+
+ tex_gamma_value = FloatProperty(
+ name="Custom texture gamma",
+ description="value for which the file was issued e.g. a Raw photo is gamma 1.0",
+ min=0.45, max=5.00, soft_min=1.00, soft_max=2.50, default=1.00)
+
+ ##################################CustomPOV Code############################
+ #Only DUMMIES below for now:
+ replacement_text = StringProperty(
+ name="Declared name:",
+ description="Type the declared name in custom POV code or an external .inc " \
+ "it points at. pigment {} expected",
+ default="")
+
+
+###############################################################################
+# Object POV properties.
+###############################################################################
+class RenderPovSettingsObject(bpy.types.PropertyGroup):
+ #Importance sampling
+ importance_value = FloatProperty(
+ name="Radiosity Importance",
+ description="Priority value relative to other objects for sampling radiosity rays. " \
+ "Increase to get more radiosity rays at comparatively small yet " \
+ "bright objects",
+ min=0.01, max=1.00, default=1.00)
+
+ #Collect photons
+ collect_photons = BoolProperty(
+ name="Receive Photon Caustics",
+ description="Enable object to collect photons from other objects caustics. Turn " \
+ "off for objects that don't really need to receive caustics (e.g. objects" \
+ " that generate caustics often don't need to show any on themselves).",
+ default=True)
+
+ #Photons spacing_multiplier
+ spacing_multiplier = FloatProperty(
+ name="Photons Spacing Multiplier",
+ description="Multiplier value relative to global spacing of photons. " \
+ "Decrease by half to get 4x more photons at surface of " \
+ "this object (or 8x media photons than specified in the globals",
+ min=0.01, max=1.00, default=1.00)
+
+ ##################################CustomPOV Code############################
+ #Only DUMMIES below for now:
+ replacement_text = StringProperty(
+ name="Declared name:",
+ description="Type the declared name in custom POV code or an external .inc " \
+ "it points at. Any POV shape expected e.g: isosurface {}",
+ default="")
+
+
+###############################################################################
+# Camera POV properties.
+###############################################################################
+class RenderPovSettingsCamera(bpy.types.PropertyGroup):
+ #DOF Toggle
+ dof_enable = BoolProperty(
+ name="Depth Of Field", description="EnablePOV-Ray Depth Of Field ",
+ default=True)
+
+ #Aperture (Intensity of the Blur)
+ dof_aperture = FloatProperty(
+ name="Aperture",
+ description="Similar to a real camera's aperture effect over focal blur (though not " \
+ "in physical units and independant of focal length). " \
+ "Increase to get more blur",
+ min=0.01, max=1.00, default=0.25)
+
+ #Aperture adaptive sampling
+ dof_samples_min = IntProperty(
+ name="Samples Min", description="Minimum number of rays to use for each pixel",
+ min=1, max=128, default=96)
+
+ dof_samples_max = IntProperty(
+ name="Samples Max", description="Maximum number of rays to use for each pixel",
+ min=1, max=128, default=128)
+
+ dof_variance = IntProperty(
+ name="Variance",
+ description="Minimum threshold (fractional value) for adaptive DOF sampling (up " \
+ "increases quality and render time). The value for the variance should " \
+ "be in the range of the smallest displayable color difference",
+ min=1, max=100000, soft_max=10000, default=256)
+
+ dof_confidence = FloatProperty(
+ name="Confidence",
+ description="Probability to reach the real color value. Larger confidence values " \
+ "will lead to more samples, slower traces and better images.",
+ min=0.01, max=0.99, default=0.90)
+
+ ##################################CustomPOV Code############################
+ #Only DUMMIES below for now:
+ replacement_text = StringProperty(
+ name="Texts in blend file",
+ description="Type the declared name in custom POV code or an external .inc " \
+ "it points at. camera {} expected",
+ default="")
+
+
+###############################################################################
+# Text POV properties.
+###############################################################################
+class RenderPovSettingsText(bpy.types.PropertyGroup):
+ custom_code = BoolProperty(
+ name="Custom Code",
+ description="Add this text at the top of the exported POV-Ray file",
+ default=False)
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.Scene.pov = PointerProperty(type=RenderPovSettingsScene)
+ bpy.types.Material.pov = PointerProperty(type=RenderPovSettingsMaterial)
+ bpy.types.Texture.pov = PointerProperty(type=RenderPovSettingsTexture)
+ bpy.types.Object.pov = PointerProperty(type=RenderPovSettingsObject)
+ bpy.types.Camera.pov = PointerProperty(type=RenderPovSettingsCamera)
+ bpy.types.Text.pov = PointerProperty(type=RenderPovSettingsText)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ del bpy.types.Scene.pov
+ del bpy.types.Material.pov
+ del bpy.types.Texture.pov
+ del bpy.types.Object.pov
+ del bpy.types.Camera.pov
+ del bpy.types.Text.pov
+
+
+if __name__ == "__main__":
+ register()
diff --git a/render_povray/render.py b/render_povray/render.py
new file mode 100644
index 00000000..beeb9d67
--- /dev/null
+++ b/render_povray/render.py
@@ -0,0 +1,2328 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+import subprocess
+import os
+import sys
+import time
+from math import atan, pi, degrees, sqrt
+import re
+
+##############################SF###########################
+##############find image texture
+
+
+def imageFormat(imgF):
+ ext = {
+ 'JPG': "jpeg",
+ 'JPEG': "jpeg",
+ 'GIF': "gif",
+ 'TGA': "tga",
+ 'IFF': "iff",
+ 'PPM': "ppm",
+ 'PNG': "png",
+ 'SYS': "sys",
+ 'TIFF': "tiff",
+ 'TIF': "tiff",
+ 'EXR': "exr", # POV3.7 Only!
+ 'HDR': "hdr", # POV3.7 Only! --MR
+ }.get(os.path.splitext(imgF)[-1].upper(), "")
+
+ if not ext:
+ print(" WARNING: texture image format not supported ")
+
+ return ext
+
+
+def imgMap(ts):
+ image_map = ""
+ if ts.mapping == 'FLAT':
+ image_map = "map_type 0 "
+ elif ts.mapping == 'SPHERE':
+ image_map = "map_type 1 " # map_type 7 in megapov
+ elif ts.mapping == 'TUBE':
+ image_map = "map_type 2 "
+
+ ## map_type 3 and 4 in development (?)
+ ## for POV-Ray, currently they just seem to default back to Flat (type 0)
+ #elif ts.mapping=="?":
+ # image_map = " map_type 3 "
+ #elif ts.mapping=="?":
+ # image_map = " map_type 4 "
+ if ts.texture.use_interpolation:
+ image_map += " interpolate 2 "
+ if ts.texture.extension == 'CLIP':
+ image_map += " once "
+ #image_map += "}"
+ #if ts.mapping=='CUBE':
+ # image_map+= "warp { cubic } rotate <-90,0,180>"
+ # no direct cube type mapping. Though this should work in POV 3.7
+ # it doesn't give that good results(best suited to environment maps?)
+ #if image_map == "":
+ # print(" No texture image found ")
+ return image_map
+
+
+def imgMapBG(wts):
+ image_mapBG = ""
+ # texture_coords refers to the mapping of world textures:
+ if wts.texture_coords == 'VIEW':
+ image_mapBG = " map_type 0 "
+ elif wts.texture_coords == 'ANGMAP':
+ image_mapBG = " map_type 1 "
+ elif wts.texture_coords == 'TUBE':
+ image_mapBG = " map_type 2 "
+
+ if wts.texture.use_interpolation:
+ image_mapBG += " interpolate 2 "
+ if wts.texture.extension == 'CLIP':
+ image_mapBG += " once "
+ #image_mapBG += "}"
+ #if wts.mapping == 'CUBE':
+ # image_mapBG += "warp { cubic } rotate <-90,0,180>"
+ # no direct cube type mapping. Though this should work in POV 3.7
+ # it doesn't give that good results(best suited to environment maps?)
+ #if image_mapBG == "":
+ # print(" No background texture image found ")
+ return image_mapBG
+
+
+def findInSubDir(filename, subdirectory=""):
+ pahFile = ""
+ if subdirectory:
+ path = subdirectory
+ else:
+ path = os.getcwd()
+ try:
+ for root, dirs, names in os.walk(path):
+ if filename in names:
+ pahFile = os.path.join(root, filename)
+ return pahFile
+ except OSError:
+ return ""
+
+
+def path_image(image):
+ import os
+ fn = bpy.path.abspath(image)
+ fn_strip = os.path.basename(fn)
+ if not os.path.isfile(fn):
+ fn = findInSubDir(os.path.basename(fn), os.path.dirname(bpy.data.filepath))
+ fn = os.path.realpath(fn)
+ return fn
+
+##############end find image texture
+
+
+def splitHyphen(name):
+ hyphidx = name.find("-")
+ if hyphidx == -1:
+ return name
+ else:
+ return name[:].replace("-", "")
+
+
+def safety(name, Level):
+ # safety string name material
+ #
+ # Level=1 is for texture with No specular nor Mirror reflection
+ # Level=2 is for texture with translation of spec and mir levels
+ # for when no map influences them
+ # Level=3 is for texture with Maximum Spec and Mirror
+
+ try:
+ if int(name) > 0:
+ prefix = "shader"
+ except:
+ prefix = ""
+ prefix = "shader_"
+ name = splitHyphen(name)
+ if Level == 2:
+ return prefix + name
+ elif Level == 1:
+ return prefix + name + "0" # used for 0 of specular map
+ elif Level == 3:
+ return prefix + name + "1" # used for 1 of specular map
+
+
+##############end safety string name material
+##############################EndSF###########################
+
+def is_renderable(scene, ob):
+ return (ob.is_visible(scene) and not ob.hide_render)
+
+
+def renderable_objects(scene):
+ return [ob for ob in scene.objects if is_renderable(scene, ob)]
+
+
+tabLevel = 0
+
+
+def write_pov(filename, scene=None, info_callback=None):
+ import mathutils
+ #file = filename
+ file = open(filename, "w")
+
+ # Only for testing
+ if not scene:
+ scene = bpy.data.scenes[0]
+
+ render = scene.render
+ world = scene.world
+ global_matrix = mathutils.Matrix.Rotation(-pi / 2.0, 4, 'X')
+
+ def setTab(tabtype, spaces):
+ TabStr = ""
+ if tabtype == '0':
+ TabStr = ""
+ elif tabtype == '1':
+ TabStr = "\t"
+ elif tabtype == '2':
+ TabStr = spaces * " "
+ return TabStr
+
+ tab = setTab(scene.pov.indentation_character, scene.pov.indentation_spaces)
+
+ def tabWrite(str_o):
+ if not scene.pov.tempfiles_enable:
+ global tabLevel
+ brackets = str_o.count("{") - str_o.count("}") + str_o.count("[") - str_o.count("]")
+ if brackets < 0:
+ tabLevel = tabLevel + brackets
+ if tabLevel < 0:
+ print("Indentation Warning: tabLevel = %s" % tabLevel)
+ tabLevel = 0
+ if tabLevel >= 1:
+ file.write("%s" % tab * tabLevel)
+ file.write(str_o)
+ if brackets > 0:
+ tabLevel = tabLevel + brackets
+ else:
+ file.write(str_o)
+
+ def uniqueName(name, nameSeq):
+
+ if name not in nameSeq:
+ name = splitHyphen(name)
+ return name
+
+ name_orig = name
+ i = 1
+ while name in nameSeq:
+ name = "%s_%.3d" % (name_orig, i)
+ i += 1
+ name = splitHyphen(name)
+ return name
+
+ def writeMatrix(matrix):
+ tabWrite("matrix <%.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, " \
+ "%.6f>\n" % (matrix[0][0], matrix[0][1], matrix[0][2], matrix[1][0], matrix[1][1],
+ matrix[1][2], matrix[2][0], matrix[2][1], matrix[2][2], matrix[3][0],
+ matrix[3][1], matrix[3][2]))
+
+ def MatrixAsPovString(matrix):
+ sMatrix = ("matrix <%.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, " \
+ "%.6f>\n" % (matrix[0][0], matrix[0][1], matrix[0][2], matrix[1][0], matrix[1][1],
+ matrix[1][2], matrix[2][0], matrix[2][1], matrix[2][2], matrix[3][0],
+ matrix[3][1], matrix[3][2]))
+ return sMatrix
+
+ def writeObjectMaterial(material, ob):
+
+ # DH - modified some variables to be function local, avoiding RNA write
+ # this should be checked to see if it is functionally correct
+
+ # Commented out: always write IOR to be able to use it for SSS, Fresnel reflections...
+ #if material and material.transparency_method == 'RAYTRACE':
+ if material:
+ # But there can be only one!
+ if material.subsurface_scattering.use: # SSS IOR get highest priority
+ tabWrite("interior {\n")
+ tabWrite("ior %.6f\n" % material.subsurface_scattering.ior)
+ # Then the raytrace IOR taken from raytrace transparency properties and used for
+ # reflections if IOR Mirror option is checked.
+ elif material.pov.mirror_use_IOR:
+ tabWrite("interior {\n")
+ tabWrite("ior %.6f\n" % material.raytrace_transparency.ior)
+ else:
+ tabWrite("interior {\n")
+ tabWrite("ior %.6f\n" % material.raytrace_transparency.ior)
+
+ pov_fake_caustics = False
+ pov_photons_refraction = False
+ pov_photons_reflection = False
+
+ if material.pov.photons_reflection:
+ pov_photons_reflection = True
+ if material.pov.refraction_type == "0":
+ pov_fake_caustics = False
+ pov_photons_refraction = False
+ elif material.pov.refraction_type == "1":
+ pov_fake_caustics = True
+ pov_photons_refraction = False
+ elif material.pov.refraction_type == "2":
+ pov_fake_caustics = False
+ pov_photons_refraction = True
+
+ # If only Raytrace transparency is set, its IOR will be used for refraction, but user
+ # can set up 'un-physical' fresnel reflections in raytrace mirror parameters.
+ # Last, if none of the above is specified, user can set up 'un-physical' fresnel
+ # reflections in raytrace mirror parameters. And pov IOR defaults to 1.
+ if material.pov.caustics_enable:
+ if pov_fake_caustics:
+ tabWrite("caustics %.3g\n" % material.pov.fake_caustics_power)
+ if pov_photons_refraction:
+ # Default of 1 means no dispersion
+ tabWrite("dispersion %.6f\n" % material.pov.photons_dispersion)
+ tabWrite("dispersion_samples %.d\n" % material.pov.photons_dispersion_samples)
+ #TODO
+ # Other interior args
+ if material.use_transparency and material.transparency_method == 'RAYTRACE':
+ # fade_distance
+ # In Blender this value has always been reversed compared to what tooltip says.
+ # 100.001 rather than 100 so that it does not get to 0
+ # which deactivates the feature in POV
+ tabWrite("fade_distance %.3g\n" % \
+ (100.001 - material.raytrace_transparency.depth_max))
+ # fade_power
+ tabWrite("fade_power %.3g\n" % material.raytrace_transparency.falloff)
+ # fade_color
+ tabWrite("fade_color <%.3g, %.3g, %.3g>\n" % material.pov.interior_fade_color[:])
+
+ # (variable) dispersion_samples (constant count for now)
+ tabWrite("}\n")
+
+ tabWrite("photons{")
+ if not ob.pov.collect_photons:
+ tabWrite("collect off\n")
+ tabWrite("target %.3g\n" % ob.pov.spacing_multiplier)
+ if pov_photons_refraction:
+ tabWrite("refraction on\n")
+ if pov_photons_reflection:
+ tabWrite("reflection on\n")
+ tabWrite("}\n")
+
+ materialNames = {}
+ DEF_MAT_NAME = "Default"
+
+ def writeMaterial(material):
+ # Assumes only called once on each material
+ if material:
+ name_orig = material.name
+ else:
+ name_orig = DEF_MAT_NAME
+
+ name = materialNames[name_orig] = uniqueName(bpy.path.clean_name(name_orig), materialNames)
+ comments = scene.pov.comments_enable
+
+ ##################
+ # Several versions of the finish: Level conditions are variations for specular/Mirror
+ # texture channel map with alternative finish of 0 specular and no mirror reflection.
+ # Level=1 Means No specular nor Mirror reflection
+ # Level=2 Means translation of spec and mir levels for when no map influences them
+ # Level=3 Means Maximum Spec and Mirror
+
+ def povHasnoSpecularMaps(Level):
+ if Level == 1:
+ tabWrite("#declare %s = finish {" % safety(name, Level=1))
+ if not scene.pov.tempfiles_enable and comments:
+ file.write(" //No specular nor Mirror reflection\n")
+ else:
+ tabWrite("\n")
+ elif Level == 2:
+ tabWrite("#declare %s = finish {" % safety(name, Level=2))
+ if not scene.pov.tempfiles_enable and comments:
+ file.write(" //translation of spec and mir levels for when no map " \
+ "influences them\n")
+ else:
+ tabWrite("\n")
+ elif Level == 3:
+ tabWrite("#declare %s = finish {" % safety(name, Level=3))
+ if not scene.pov.tempfiles_enable and comments:
+ file.write(" //Maximum Spec and Mirror\n")
+ else:
+ tabWrite("\n")
+
+ if material:
+ # POV-Ray 3.7 now uses two diffuse values respectively for front and back shading
+ # (the back diffuse is like blender translucency)
+ frontDiffuse = material.diffuse_intensity
+ backDiffuse = material.translucency
+
+ if material.pov.conserve_energy:
+
+ #Total should not go above one
+ if (frontDiffuse + backDiffuse) <= 1.0:
+ pass
+ elif frontDiffuse == backDiffuse:
+ # Try to respect the user's 'intention' by comparing the two values but
+ # bringing the total back to one.
+ frontDiffuse = backDiffuse = 0.5
+ # Let the highest value stay the highest value.
+ elif frontDiffuse > backDiffuse:
+ # clamps the sum below 1
+ backDiffuse = min(backDiffuse, (1.0 - frontDiffuse))
+ else:
+ frontDiffuse = min(frontDiffuse, (1.0 - backDiffuse))
+
+ # map hardness between 0.0 and 1.0
+ roughness = ((1.0 - ((material.specular_hardness - 1.0) / 510.0)))
+ ## scale from 0.0 to 0.1
+ roughness *= 0.1
+ # add a small value because 0.0 is invalid.
+ roughness += (1.0 / 511.0)
+
+ ################################Diffuse Shader######################################
+ # Not used for Full spec (Level=3) of the shader.
+ if material.diffuse_shader == 'OREN_NAYAR' and Level != 3:
+ # Blender roughness is what is generally called oren nayar Sigma,
+ # and brilliance in POV-Ray.
+ tabWrite("brilliance %.3g\n" % (0.9 + material.roughness))
+
+ if material.diffuse_shader == 'TOON' and Level != 3:
+ tabWrite("brilliance %.3g\n" % (0.01 + material.diffuse_toon_smooth * 0.25))
+ # Lower diffuse and increase specular for toon effect seems to look better
+ # in POV-Ray.
+ frontDiffuse *= 0.5
+
+ if material.diffuse_shader == 'MINNAERT' and Level != 3:
+ #tabWrite("aoi %.3g\n" % material.darkness)
+ pass # let's keep things simple for now
+ if material.diffuse_shader == 'FRESNEL' and Level != 3:
+ #tabWrite("aoi %.3g\n" % material.diffuse_fresnel_factor)
+ pass # let's keep things simple for now
+ if material.diffuse_shader == 'LAMBERT' and Level != 3:
+ # trying to best match lambert attenuation by that constant brilliance value
+ tabWrite("brilliance 1.8\n")
+
+ if Level == 2:
+ ###########################Specular Shader######################################
+ # No difference between phong and cook torrence in blender HaHa!
+ if (material.specular_shader == 'COOKTORR' or
+ material.specular_shader == 'PHONG'):
+ tabWrite("phong %.3g\n" % (material.specular_intensity))
+ tabWrite("phong_size %.3g\n" % (material.specular_hardness / 2 + 0.25))
+
+ # POV-Ray 'specular' keyword corresponds to a Blinn model, without the ior.
+ elif material.specular_shader == 'BLINN':
+ # Use blender Blinn's IOR just as some factor for spec intensity
+ tabWrite("specular %.3g\n" % (material.specular_intensity *
+ (material.specular_ior / 4.0)))
+ tabWrite("roughness %.3g\n" % roughness)
+ #Could use brilliance 2(or varying around 2 depending on ior or factor) too.
+
+ elif material.specular_shader == 'TOON':
+ tabWrite("phong %.3g\n" % (material.specular_intensity * 2))
+ # use extreme phong_size
+ tabWrite("phong_size %.3g\n" % (0.1 + material.specular_toon_smooth / 2))
+
+ elif material.specular_shader == 'WARDISO':
+ # find best suited default constant for brilliance Use both phong and
+ # specular for some values.
+ tabWrite("specular %.3g\n" % (material.specular_intensity /
+ (material.specular_slope + 0.0005)))
+ # find best suited default constant for brilliance Use both phong and
+ # specular for some values.
+ tabWrite("roughness %.4g\n" % (0.0005 + material.specular_slope / 10.0))
+ # find best suited default constant for brilliance Use both phong and
+ # specular for some values.
+ tabWrite("brilliance %.4g\n" % (1.8 - material.specular_slope * 1.8))
+
+ ####################################################################################
+ elif Level == 1:
+ tabWrite("specular 0\n")
+ elif Level == 3:
+ tabWrite("specular 1\n")
+ tabWrite("diffuse %.3g %.3g\n" % (frontDiffuse, backDiffuse))
+
+ tabWrite("ambient %.3g\n" % material.ambient)
+ # POV-Ray blends the global value
+ #tabWrite("ambient rgb <%.3g, %.3g, %.3g>\n" % \
+ # tuple([c*material.ambient for c in world.ambient_color]))
+ tabWrite("emission %.3g\n" % material.emit) # New in POV-Ray 3.7
+
+ #POV-Ray just ignores roughness if there's no specular keyword
+ #tabWrite("roughness %.3g\n" % roughness)
+
+ if material.pov.conserve_energy:
+ # added for more realistic shading. Needs some checking to see if it
+ # really works. --Maurice.
+ tabWrite("conserve_energy\n")
+
+ # 'phong 70.0 '
+ if Level != 1:
+ if material.raytrace_mirror.use:
+ raytrace_mirror = material.raytrace_mirror
+ if raytrace_mirror.reflect_factor:
+ tabWrite("reflection {\n")
+ tabWrite("rgb <%.3g, %.3g, %.3g>" % material.mirror_color[:])
+ if material.pov.mirror_metallic:
+ tabWrite("metallic %.3g" % (raytrace_mirror.reflect_factor))
+ if material.pov.mirror_use_IOR: # WORKING ?
+ # Removed from the line below: gives a more physically correct
+ # material but needs proper IOR. --Maurice
+ tabWrite("fresnel 1 ")
+ tabWrite("falloff %.3g exponent %.3g} " % \
+ (raytrace_mirror.fresnel, raytrace_mirror.fresnel_factor))
+
+ if material.subsurface_scattering.use:
+ subsurface_scattering = material.subsurface_scattering
+ tabWrite("subsurface { <%.3g, %.3g, %.3g>, <%.3g, %.3g, %.3g> }\n" % (
+ sqrt(subsurface_scattering.radius[0]) * 1.5,
+ sqrt(subsurface_scattering.radius[1]) * 1.5,
+ sqrt(subsurface_scattering.radius[2]) * 1.5,
+ 1.0 - subsurface_scattering.color[0],
+ 1.0 - subsurface_scattering.color[1],
+ 1.0 - subsurface_scattering.color[2])
+ )
+
+ if material.pov.irid_enable:
+ tabWrite("irid { %.4g thickness %.4g turbulence %.4g }" % \
+ (material.pov.irid_amount, material.pov.irid_thickness,
+ material.pov.irid_turbulence))
+
+ else:
+ tabWrite("diffuse 0.8\n")
+ tabWrite("phong 70.0\n")
+
+ #tabWrite("specular 0.2\n")
+
+ # This is written into the object
+ '''
+ if material and material.transparency_method=='RAYTRACE':
+ 'interior { ior %.3g} ' % material.raytrace_transparency.ior
+ '''
+
+ #tabWrite("crand 1.0\n") # Sand granyness
+ #tabWrite("metallic %.6f\n" % material.spec)
+ #tabWrite("phong %.6f\n" % material.spec)
+ #tabWrite("phong_size %.6f\n" % material.spec)
+ #tabWrite("brilliance %.6f " % (material.specular_hardness/256.0) # Like hardness
+
+ tabWrite("}\n\n")
+
+ # Level=2 Means translation of spec and mir levels for when no map influences them
+ povHasnoSpecularMaps(Level=2)
+
+ if material:
+ special_texture_found = False
+ for t in material.texture_slots:
+ if(t and t.texture.type == 'IMAGE' and t.use and t.texture.image and
+ (t.use_map_specular or t.use_map_raymir or t.use_map_normal or t.use_map_alpha)):
+ special_texture_found = True
+ continue # Some texture found
+
+ if special_texture_found:
+ # Level=1 Means No specular nor Mirror reflection
+ povHasnoSpecularMaps(Level=1)
+
+ # Level=3 Means Maximum Spec and Mirror
+ povHasnoSpecularMaps(Level=3)
+
+ def exportCamera():
+ camera = scene.camera
+
+ # DH disabled for now, this isn't the correct context
+ active_object = None # bpy.context.active_object # does not always work MR
+ matrix = global_matrix * camera.matrix_world
+ focal_point = camera.data.dof_distance
+
+ # compute resolution
+ Qsize = float(render.resolution_x) / float(render.resolution_y)
+ tabWrite("#declare camLocation = <%.6f, %.6f, %.6f>;\n" % \
+ (matrix[3][0], matrix[3][1], matrix[3][2]))
+ tabWrite("#declare camLookAt = <%.6f, %.6f, %.6f>;\n" % \
+ tuple([degrees(e) for e in matrix.to_3x3().to_euler()]))
+
+ tabWrite("camera {\n")
+ if scene.pov.baking_enable and active_object and active_object.type == 'MESH':
+ tabWrite("mesh_camera{ 1 3\n") # distribution 3 is what we want here
+ tabWrite("mesh{%s}\n" % active_object.name)
+ tabWrite("}\n")
+ tabWrite("location <0,0,.01>")
+ tabWrite("direction <0,0,-1>")
+ # Using standard camera otherwise
+ else:
+ tabWrite("location <0, 0, 0>\n")
+ tabWrite("look_at <0, 0, -1>\n")
+ tabWrite("right <%s, 0, 0>\n" % - Qsize)
+ tabWrite("up <0, 1, 0>\n")
+ tabWrite("angle %f\n" % (360.0 * atan(16.0 / camera.data.lens) / pi))
+
+ tabWrite("rotate <%.6f, %.6f, %.6f>\n" % \
+ tuple([degrees(e) for e in matrix.to_3x3().to_euler()]))
+ tabWrite("translate <%.6f, %.6f, %.6f>\n" % (matrix[3][0], matrix[3][1], matrix[3][2]))
+ if camera.data.pov.dof_enable and focal_point != 0:
+ tabWrite("aperture %.3g\n" % camera.data.pov.dof_aperture)
+ tabWrite("blur_samples %d %d\n" % \
+ (camera.data.pov.dof_samples_min, camera.data.pov.dof_samples_max))
+ tabWrite("variance 1/%d\n" % camera.data.pov.dof_variance)
+ tabWrite("confidence %.3g\n" % camera.data.pov.dof_confidence)
+ tabWrite("focal_point <0, 0, %f>\n" % focal_point)
+ tabWrite("}\n")
+
+ def exportLamps(lamps):
+ # Incremented after each lamp export to declare its target
+ # currently used for Fresnel diffuse shader as their slope vector:
+ global lampCount
+ lampCount = 0
+ # Get all lamps
+ for ob in lamps:
+ lamp = ob.data
+
+ matrix = global_matrix * ob.matrix_world
+
+ # Colour is modified by energy #muiltiplie by 2 for a better match --Maurice
+ color = tuple([c * lamp.energy * 2.0 for c in lamp.color])
+
+ tabWrite("light_source {\n")
+ tabWrite("< 0,0,0 >\n")
+ tabWrite("color rgb<%.3g, %.3g, %.3g>\n" % color)
+
+ if lamp.type == 'POINT':
+ pass
+ elif lamp.type == 'SPOT':
+ tabWrite("spotlight\n")
+
+ # Falloff is the main radius from the centre line
+ tabWrite("falloff %.2f\n" % (degrees(lamp.spot_size) / 2.0)) # 1 TO 179 FOR BOTH
+ tabWrite("radius %.6f\n" % \
+ ((degrees(lamp.spot_size) / 2.0) * (1.0 - lamp.spot_blend)))
+
+ # Blender does not have a tightness equivilent, 0 is most like blender default.
+ tabWrite("tightness 0\n") # 0:10f
+
+ tabWrite("point_at <0, 0, -1>\n")
+ elif lamp.type == 'SUN':
+ tabWrite("parallel\n")
+ tabWrite("point_at <0, 0, -1>\n") # *must* be after 'parallel'
+
+ elif lamp.type == 'AREA':
+ tabWrite("fade_distance %.6f\n" % (lamp.distance / 5.0))
+ # Area lights have no falloff type, so always use blenders lamp quad equivalent
+ # for those?
+ tabWrite("fade_power %d\n" % 2)
+ size_x = lamp.size
+ samples_x = lamp.shadow_ray_samples_x
+ if lamp.shape == 'SQUARE':
+ size_y = size_x
+ samples_y = samples_x
+ else:
+ size_y = lamp.size_y
+ samples_y = lamp.shadow_ray_samples_y
+
+ tabWrite("area_light <%d,0,0>,<0,0,%d> %d, %d\n" % \
+ (size_x, size_y, samples_x, samples_y))
+ if lamp.shadow_ray_sample_method == 'CONSTANT_JITTERED':
+ if lamp.jitter:
+ tabWrite("jitter\n")
+ else:
+ tabWrite("adaptive 1\n")
+ tabWrite("jitter\n")
+
+ # HEMI never has any shadow_method attribute
+ if(not scene.render.use_shadows or lamp.type == 'HEMI' or
+ (lamp.type != 'HEMI' and lamp.shadow_method == 'NOSHADOW')):
+ tabWrite("shadowless\n")
+
+ # Sun shouldn't be attenuated. Hemi and area lights have no falloff attribute so they
+ # are put to type 2 attenuation a little higher above.
+ if lamp.type not in ('SUN', 'AREA', 'HEMI'):
+ tabWrite("fade_distance %.6f\n" % (lamp.distance / 5.0))
+ if lamp.falloff_type == 'INVERSE_SQUARE':
+ tabWrite("fade_power %d\n" % 2) # Use blenders lamp quad equivalent
+ elif lamp.falloff_type == 'INVERSE_LINEAR':
+ tabWrite("fade_power %d\n" % 1) # Use blenders lamp linear
+ # upposing using no fade power keyword would default to constant, no attenuation.
+ elif lamp.falloff_type == 'CONSTANT':
+ pass
+ # Using Custom curve for fade power 3 for now.
+ elif lamp.falloff_type == 'CUSTOM_CURVE':
+ tabWrite("fade_power %d\n" % 4)
+
+ writeMatrix(matrix)
+
+ tabWrite("}\n")
+
+ lampCount += 1
+
+ # v(A,B) rotates vector A about origin by vector B.
+ file.write("#declare lampTarget%s= vrotate(<%.4g,%.4g,%.4g>,<%.4g,%.4g,%.4g>);\n" % \
+ (lampCount, -(ob.location.x), -(ob.location.y), -(ob.location.z),
+ ob.rotation_euler.x, ob.rotation_euler.y, ob.rotation_euler.z))
+
+####################################################################################################
+
+ def exportMeta(metas):
+
+ # TODO - blenders 'motherball' naming is not supported.
+
+ if not scene.pov.tempfiles_enable and scene.pov.comments_enable and len(metas) >= 1:
+ file.write("//--Blob objects--\n\n")
+
+ for ob in metas:
+ meta = ob.data
+
+ # important because no elements will break parsing.
+ elements = [elem for elem in meta.elements if elem.type in ('BALL', 'ELLIPSOID')]
+
+ if elements:
+ tabWrite("blob {\n")
+ tabWrite("threshold %.4g\n" % meta.threshold)
+ importance = ob.pov.importance_value
+
+ try:
+ material = meta.materials[0] # lame! - blender cant do enything else.
+ except:
+ material = None
+
+ for elem in elements:
+ loc = elem.co
+
+ stiffness = elem.stiffness
+ if elem.use_negative:
+ stiffness = - stiffness
+
+ if elem.type == 'BALL':
+
+ tabWrite("sphere { <%.6g, %.6g, %.6g>, %.4g, %.4g }\n" % \
+ (loc.x, loc.y, loc.z, elem.radius, stiffness))
+
+ # After this wecould do something simple like...
+ # "pigment {Blue} }"
+ # except we'll write the color
+
+ elif elem.type == 'ELLIPSOID':
+ # location is modified by scale
+ tabWrite("sphere { <%.6g, %.6g, %.6g>, %.4g, %.4g }\n" % \
+ (loc.x / elem.size_x, loc.y / elem.size_y, loc.z / elem.size_z,
+ elem.radius, stiffness))
+ tabWrite("scale <%.6g, %.6g, %.6g> \n" % \
+ (elem.size_x, elem.size_y, elem.size_z))
+
+ if material:
+ diffuse_color = material.diffuse_color
+ trans = 1.0 - material.alpha
+ if material.use_transparency and material.transparency_method == 'RAYTRACE':
+ povFilter = material.raytrace_transparency.filter * (1.0 - material.alpha)
+ trans = (1.0 - material.alpha) - povFilter
+ else:
+ povFilter = 0.0
+
+ material_finish = materialNames[material.name]
+
+ tabWrite("pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} \n" % \
+ (diffuse_color[0], diffuse_color[1], diffuse_color[2],
+ povFilter, trans))
+ tabWrite("finish {%s}\n" % safety(material_finish, Level=2))
+
+ else:
+ tabWrite("pigment {rgb<1 1 1>} \n")
+ # Write the finish last.
+ tabWrite("finish {%s}\n" % (safety(DEF_MAT_NAME, Level=2)))
+
+ writeObjectMaterial(material, ob)
+
+ writeMatrix(global_matrix * ob.matrix_world)
+ #Importance for radiosity sampling added here:
+ tabWrite("radiosity { \n")
+ tabWrite("importance %3g \n" % importance)
+ tabWrite("}\n")
+
+ tabWrite("}\n") # End of Metaball block
+
+ if not scene.pov.tempfiles_enable and scene.pov.comments_enable and len(metas) >= 1:
+ file.write("\n")
+
+# objectNames = {}
+ DEF_OBJ_NAME = "Default"
+
+ def exportMeshes(scene, sel):
+# obmatslist = []
+# def hasUniqueMaterial():
+# # Grab materials attached to object instances ...
+# if hasattr(ob, 'material_slots'):
+# for ms in ob.material_slots:
+# if ms.material != None and ms.link == 'OBJECT':
+# if ms.material in obmatslist:
+# return False
+# else:
+# obmatslist.append(ms.material)
+# return True
+# def hasObjectMaterial(ob):
+# # Grab materials attached to object instances ...
+# if hasattr(ob, 'material_slots'):
+# for ms in ob.material_slots:
+# if ms.material != None and ms.link == 'OBJECT':
+# # If there is at least one material slot linked to the object
+# # and not the data (mesh), always create a new, “private” data instance.
+# return True
+# return False
+ # For objects using local material(s) only!
+ # This is a mapping between a tuple (dataname, materialnames, …), and the POV dataname.
+ # As only objects using:
+ # * The same data.
+ # * EXACTLY the same materials, in EXACTLY the same sockets.
+ # … can share a same instance in POV export.
+ obmats2data = {}
+
+ def checkObjectMaterials(ob, name, dataname):
+ if hasattr(ob, 'material_slots'):
+ has_local_mats = False
+ key = [dataname]
+ for ms in ob.material_slots:
+ if ms.material != None:
+ key.append(ms.material.name)
+ if ms.link == 'OBJECT' and not has_local_mats:
+ has_local_mats = True
+ else:
+ # Even if the slot is empty, it is important to grab it…
+ key.append("")
+ if has_local_mats:
+ # If this object uses local material(s), lets find if another object
+ # using the same data and exactly the same list of materials
+ # (in the same slots) has already been processed…
+ # Note that here also, we use object name as new, unique dataname for Pov.
+ key = tuple(key) # Lists are not hashable…
+ if key not in obmats2data:
+ obmats2data[key] = name
+ return obmats2data[key]
+ return None
+
+ data_ref = {}
+
+ def store(scene, ob, name, dataname, matrix):
+ # The Object needs to be written at least once but if its data is
+ # already in data_ref this has already been done.
+ # This func returns the “povray” name of the data, or None
+ # if no writing is needed.
+ if ob.is_modified(scene, 'RENDER'):
+ # Data modified.
+ # Create unique entry in data_ref by using object name
+ # (always unique in Blender) as data name.
+ data_ref[name] = [(name, MatrixAsPovString(matrix))]
+ return name
+ # Here, we replace dataname by the value returned by checkObjectMaterials, only if
+ # it is not evaluated to False (i.e. only if the object uses some local material(s)).
+ dataname = checkObjectMaterials(ob, name, dataname) or dataname
+ if dataname in data_ref:
+ # Data already known, just add the object instance.
+ data_ref[dataname].append((name, MatrixAsPovString(matrix)))
+ # No need to write data
+ return None
+ else:
+ # Data not yet processed, create a new entry in data_ref.
+ data_ref[dataname] = [(name, MatrixAsPovString(matrix))]
+ return dataname
+
+ ob_num = 0
+ for ob in sel:
+ ob_num += 1
+
+ # XXX I moved all those checks here, as there is no need to compute names
+ # for object we won’t export here!
+ if ob.type in ('LAMP', 'CAMERA', 'EMPTY', 'META', 'ARMATURE', 'LATTICE'):
+ continue
+
+ try:
+ me = ob.to_mesh(scene, True, 'RENDER')
+ except:
+ # happens when curves cant be made into meshes because of no-data
+ continue
+
+ importance = ob.pov.importance_value
+ me_materials = me.materials
+ me_faces = me.faces[:]
+
+ if not me or not me_faces:
+ continue
+
+#############################################
+ # Generating a name for object just like materials to be able to use it
+ # (baking for now or anything else).
+ # XXX I don’t understand that – if we are here, sel if a non-empty iterable,
+ # so this condition is always True, IMO -- mont29
+ if sel:
+ name_orig = "OB" + ob.name
+ dataname_orig = "DATA" + ob.data.name
+ else:
+ name_orig = DEF_OBJ_NAME
+ dataname_orig = DEF_OBJ_NAME
+ name = splitHyphen(bpy.path.clean_name(name_orig))
+ dataname = splitHyphen(bpy.path.clean_name(dataname_orig))
+## for slot in ob.material_slots:
+## if slot.material != None and slot.link == 'OBJECT':
+## obmaterial = slot.material
+
+#############################################
+
+ if info_callback:
+ info_callback("Object %2.d of %2.d (%s)" % (ob_num, len(sel), ob.name))
+
+ #if ob.type != 'MESH':
+ # continue
+ # me = ob.data
+
+ matrix = global_matrix * ob.matrix_world
+ povdataname = store(scene, ob, name, dataname, matrix)
+ if povdataname is None:
+ print("This is an instance")
+ continue
+
+ print("Writing Down First Occurence")
+
+ try:
+ uv_layer = me.uv_textures.active.data
+ except AttributeError:
+ uv_layer = None
+
+ try:
+ vcol_layer = me.vertex_colors.active.data
+ except AttributeError:
+ vcol_layer = None
+
+ faces_verts = [f.vertices[:] for f in me_faces]
+ faces_normals = [f.normal[:] for f in me_faces]
+ verts_normals = [v.normal[:] for v in me.vertices]
+
+ # quads incur an extra face
+ quadCount = sum(1 for f in faces_verts if len(f) == 4)
+
+ # Use named declaration to allow reference e.g. for baking. MR
+ file.write("\n")
+ tabWrite("#declare %s =\n" % povdataname)
+ tabWrite("mesh2 {\n")
+ tabWrite("vertex_vectors {\n")
+ tabWrite("%d" % len(me.vertices)) # vert count
+
+ tabStr = tab * tabLevel
+ for v in me.vertices:
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%.6f, %.6f, %.6f>" % v.co[:]) # vert count
+ else:
+ file.write(", ")
+ file.write("<%.6f, %.6f, %.6f>" % v.co[:]) # vert count
+ #tabWrite("<%.6f, %.6f, %.6f>" % v.co[:]) # vert count
+ file.write("\n")
+ tabWrite("}\n")
+
+ # Build unique Normal list
+ uniqueNormals = {}
+ for fi, f in enumerate(me_faces):
+ fv = faces_verts[fi]
+ # [-1] is a dummy index, use a list so we can modify in place
+ if f.use_smooth: # Use vertex normals
+ for v in fv:
+ key = verts_normals[v]
+ uniqueNormals[key] = [-1]
+ else: # Use face normal
+ key = faces_normals[fi]
+ uniqueNormals[key] = [-1]
+
+ tabWrite("normal_vectors {\n")
+ tabWrite("%d" % len(uniqueNormals)) # vert count
+ idx = 0
+ tabStr = tab * tabLevel
+ for no, index in uniqueNormals.items():
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%.6f, %.6f, %.6f>" % no) # vert count
+ else:
+ file.write(", ")
+ file.write("<%.6f, %.6f, %.6f>" % no) # vert count
+ index[0] = idx
+ idx += 1
+ file.write("\n")
+ tabWrite("}\n")
+
+ # Vertex colours
+ vertCols = {} # Use for material colours also.
+
+ if uv_layer:
+ # Generate unique UV's
+ uniqueUVs = {}
+
+ for fi, uv in enumerate(uv_layer):
+
+ if len(faces_verts[fi]) == 4:
+ uvs = uv.uv1, uv.uv2, uv.uv3, uv.uv4
+ else:
+ uvs = uv.uv1, uv.uv2, uv.uv3
+
+ for uv in uvs:
+ uniqueUVs[uv[:]] = [-1]
+
+ tabWrite("uv_vectors {\n")
+ #print unique_uvs
+ tabWrite("%d" % len(uniqueUVs)) # vert count
+ idx = 0
+ tabStr = tab * tabLevel
+ for uv, index in uniqueUVs.items():
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%.6f, %.6f>" % uv)
+ else:
+ file.write(", ")
+ file.write("<%.6f, %.6f>" % uv)
+ index[0] = idx
+ idx += 1
+ '''
+ else:
+ # Just add 1 dummy vector, no real UV's
+ tabWrite('1') # vert count
+ file.write(',\n\t\t<0.0, 0.0>')
+ '''
+ file.write("\n")
+ tabWrite("}\n")
+
+ if me.vertex_colors:
+
+ for fi, f in enumerate(me_faces):
+ material_index = f.material_index
+ material = me_materials[material_index]
+
+ if material and material.use_vertex_color_paint:
+
+ col = vcol_layer[fi]
+
+ if len(faces_verts[fi]) == 4:
+ cols = col.color1, col.color2, col.color3, col.color4
+ else:
+ cols = col.color1, col.color2, col.color3
+
+ for col in cols:
+ key = col[0], col[1], col[2], material_index # Material index!
+ vertCols[key] = [-1]
+
+ else:
+ if material:
+ diffuse_color = material.diffuse_color[:]
+ key = diffuse_color[0], diffuse_color[1], diffuse_color[2], \
+ material_index
+ vertCols[key] = [-1]
+
+ else:
+ # No vertex colours, so write material colours as vertex colours
+ for i, material in enumerate(me_materials):
+
+ if material:
+ diffuse_color = material.diffuse_color[:]
+ key = diffuse_color[0], diffuse_color[1], diffuse_color[2], i # i == f.mat
+ vertCols[key] = [-1]
+
+ # Vert Colours
+ tabWrite("texture_list {\n")
+ file.write(tabStr + "%s" % (len(vertCols))) # vert count
+ idx = 0
+
+ for col, index in vertCols.items():
+ if me_materials:
+ material = me_materials[col[3]]
+ material_finish = materialNames[material.name]
+
+ if material.use_transparency:
+ trans = 1.0 - material.alpha
+ else:
+ trans = 0.0
+
+ if material.use_transparency and material.transparency_method == 'RAYTRACE':
+ povFilter = material.raytrace_transparency.filter * (1.0 - material.alpha)
+ trans = (1.0 - material.alpha) - povFilter
+ else:
+ povFilter = 0.0
+ else:
+ material_finish = DEF_MAT_NAME # not working properly,
+ trans = 0.0
+
+ ##############SF
+ texturesDif = ""
+ texturesSpec = ""
+ texturesNorm = ""
+ texturesAlpha = ""
+ for t in material.texture_slots:
+ if t and t.texture.type == 'IMAGE' and t.use and t.texture.image:
+ image_filename = path_image(t.texture.image.filepath)
+ imgGamma = ""
+ if image_filename:
+ if t.use_map_color_diffuse:
+ texturesDif = image_filename
+ colvalue = t.default_value
+ t_dif = t
+ if t_dif.texture.pov.tex_gamma_enable:
+ imgGamma = (" gamma %.3g " % t_dif.texture.pov.tex_gamma_value)
+ if t.use_map_specular or t.use_map_raymir:
+ texturesSpec = image_filename
+ colvalue = t.default_value
+ t_spec = t
+ if t.use_map_normal:
+ texturesNorm = image_filename
+ colvalue = t.normal_factor * 10.0
+ #textNormName=t.texture.image.name + ".normal"
+ #was the above used? --MR
+ t_nor = t
+ if t.use_map_alpha:
+ texturesAlpha = image_filename
+ colvalue = t.alpha_factor * 10.0
+ #textDispName=t.texture.image.name + ".displ"
+ #was the above used? --MR
+ t_alpha = t
+
+ ####################################################################################
+
+ if material.pov.replacement_text != "":
+ file.write("\n")
+ file.write(" texture{%s}\n" % material.pov.replacement_text)
+
+ else:
+ file.write("\n")
+ # THIS AREA NEEDS TO LEAVE THE TEXTURE OPEN UNTIL ALL MAPS ARE WRITTEN DOWN.
+ # --MR
+ tabWrite("texture {\n")
+
+ ################################################################################
+ if material.diffuse_shader == 'MINNAERT':
+ tabWrite("\n")
+ tabWrite("aoi\n")
+ tabWrite("texture_map {\n")
+ tabWrite("[%.3g finish {diffuse %.3g}]\n" % \
+ (material.darkness / 2.0, 2.0 - material.darkness))
+ tabWrite("[%.3g\n" % (1.0 - (material.darkness / 2.0)))
+
+ if material.diffuse_shader == 'FRESNEL':
+ # For FRESNEL diffuse in POV, we'll layer slope patterned textures
+ # with lamp vector as the slope vector and nest one slope per lamp
+ # into each texture map's entry.
+
+ c = 1
+ while (c <= lampCount):
+ tabWrite("slope { lampTarget%s }\n" % (c))
+ tabWrite("texture_map {\n")
+ # Diffuse Fresnel value and factor go up to five,
+ # other kind of values needed: used the number 5 below to remap
+ tabWrite("[%.3g finish {diffuse %.3g}]\n" % \
+ ((5.0 - material.diffuse_fresnel) / 5,
+ (material.diffuse_intensity *
+ ((5.0 - material.diffuse_fresnel_factor) / 5))))
+ tabWrite("[%.3g\n" % ((material.diffuse_fresnel_factor / 5) *
+ (material.diffuse_fresnel / 5.0)))
+ c += 1
+
+ # if shader is a 'FRESNEL' or 'MINNAERT': slope pigment pattern or aoi
+ # and texture map above, the rest below as one of its entry
+
+ if texturesSpec != "" or texturesAlpha != "":
+ if texturesSpec != "":
+ # tabWrite("\n")
+ tabWrite("pigment_pattern {\n")
+ # POV-Ray "scale" is not a number of repetitions factor, but its
+ # inverse, a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the
+ # scale is not the same in blender and POV
+ mappingSpec = "translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>\n" % \
+ (-t_spec.offset.x, t_spec.offset.y, t_spec.offset.z,
+ 1.0 / t_spec.scale.x, 1.0 / t_spec.scale.y,
+ 1.0 / t_spec.scale.z)
+ tabWrite("uv_mapping image_map{%s \"%s\" %s}\n" % \
+ (imageFormat(texturesSpec), texturesSpec, imgMap(t_spec)))
+ tabWrite("%s\n" % mappingSpec)
+ tabWrite("}\n")
+ tabWrite("texture_map {\n")
+ tabWrite("[0 \n")
+
+ if texturesDif == "":
+ if texturesAlpha != "":
+ tabWrite("\n")
+ # POV-Ray "scale" is not a number of repetitions factor, but its
+ # inverse, a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the
+ # scale is not the same in blender and POV
+ mappingAlpha = " translate <%.4g, %.4g, %.4g> " \
+ "scale <%.4g, %.4g, %.4g>\n" % \
+ (-t_alpha.offset.x, -t_alpha.offset.y,
+ t_alpha.offset.z, 1.0 / t_alpha.scale.x,
+ 1.0 / t_alpha.scale.y, 1.0 / t_alpha.scale.z)
+ tabWrite("pigment {pigment_pattern {uv_mapping image_map" \
+ "{%s \"%s\" %s}%s" % \
+ (imageFormat(texturesAlpha), texturesAlpha,
+ imgMap(t_alpha), mappingAlpha))
+ tabWrite("}\n")
+ tabWrite("pigment_map {\n")
+ tabWrite("[0 color rgbft<0,0,0,1,1>]\n")
+ tabWrite("[1 color rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>]\n" % \
+ (col[0], col[1], col[2], povFilter, trans))
+ tabWrite("}\n")
+ tabWrite("}\n")
+
+ else:
+
+ tabWrite("pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>}\n" % \
+ (col[0], col[1], col[2], povFilter, trans))
+
+ if texturesSpec != "":
+ # Level 1 is no specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=1)))
+
+ else:
+ # Level 2 is translated spec
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=2)))
+
+ else:
+ # POV-Ray "scale" is not a number of repetitions factor, but its
+ # inverse, a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the
+ # scale is not the same in blender and POV
+ mappingDif = ("translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>" % \
+ (-t_dif.offset.x, -t_dif.offset.y, t_dif.offset.z,
+ 1.0 / t_dif.scale.x, 1.0 / t_dif.scale.y,
+ 1.0 / t_dif.scale.z))
+ if texturesAlpha != "":
+ # POV-Ray "scale" is not a number of repetitions factor, but its
+ # inverse, a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the
+ # scale is not the same in blender and POV
+ mappingAlpha = " translate <%.4g,%.4g,%.4g> " \
+ "scale <%.4g,%.4g,%.4g>" % \
+ (-t_alpha.offset.x, -t_alpha.offset.y,
+ t_alpha.offset.z, 1.0 / t_alpha.scale.x,
+ 1.0 / t_alpha.scale.y, 1.0 / t_alpha.scale.z)
+ tabWrite("pigment {\n")
+ tabWrite("pigment_pattern {\n")
+ tabWrite("uv_mapping image_map{%s \"%s\" %s}%s}\n" % \
+ (imageFormat(texturesAlpha), texturesAlpha,
+ imgMap(t_alpha), mappingAlpha))
+ tabWrite("pigment_map {\n")
+ tabWrite("[0 color rgbft<0,0,0,1,1>]\n")
+ tabWrite("[1 uv_mapping image_map {%s \"%s\" %s} %s]\n" % \
+ (imageFormat(texturesDif), texturesDif,
+ (imgGamma + imgMap(t_dif)), mappingDif))
+ tabWrite("}\n")
+ tabWrite("}\n")
+
+ else:
+ tabWrite("pigment {uv_mapping image_map {%s \"%s\" %s}%s}\n" % \
+ (imageFormat(texturesDif), texturesDif,
+ (imgGamma + imgMap(t_dif)), mappingDif))
+
+ if texturesSpec != "":
+ # Level 1 is no specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=1)))
+
+ else:
+ # Level 2 is translated specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=2)))
+
+ ## scale 1 rotate y*0
+ #imageMap = ("{image_map {%s \"%s\" %s }\n" % \
+ # (imageFormat(textures),textures,imgMap(t_dif)))
+ #tabWrite("uv_mapping pigment %s} %s finish {%s}\n" % \
+ # (imageMap,mapping,safety(material_finish)))
+ #tabWrite("pigment {uv_mapping image_map {%s \"%s\" %s}%s} " \
+ # "finish {%s}\n" % \
+ # (imageFormat(texturesDif), texturesDif, imgMap(t_dif),
+ # mappingDif, safety(material_finish)))
+ if texturesNorm != "":
+ ## scale 1 rotate y*0
+ # POV-Ray "scale" is not a number of repetitions factor, but its
+ # inverse, a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the
+ # scale is not the same in blender and POV
+ mappingNor = " translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>" % \
+ (-t_nor.offset.x, -t_nor.offset.y, t_nor.offset.z,
+ 1.0 / t_nor.scale.x, 1.0 / t_nor.scale.y,
+ 1.0 / t_nor.scale.z)
+ #imageMapNor = ("{bump_map {%s \"%s\" %s mapping}" % \
+ # (imageFormat(texturesNorm),texturesNorm,imgMap(t_nor)))
+ #We were not using the above maybe we should?
+ tabWrite("normal {uv_mapping bump_map " \
+ "{%s \"%s\" %s bump_size %.4g }%s}\n" % \
+ (imageFormat(texturesNorm), texturesNorm, imgMap(t_nor),
+ t_nor.normal_factor * 10, mappingNor))
+ if texturesSpec != "":
+ tabWrite("]\n")
+ ##################Second index for mapping specular max value###############
+ tabWrite("[1 \n")
+
+ if texturesDif == "" and material.pov.replacement_text == "":
+ if texturesAlpha != "":
+ # POV-Ray "scale" is not a number of repetitions factor, but its inverse,
+ # a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the scale
+ # is not the same in blender and POV
+ # Strange that the translation factor for scale is not the same as for
+ # translate.
+ # TODO: verify both matches with blender internal.
+ mappingAlpha = " translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>\n" % \
+ (-t_alpha.offset.x, -t_alpha.offset.y, t_alpha.offset.z,
+ 1.0 / t_alpha.scale.x, 1.0 / t_alpha.scale.y,
+ 1.0 / t_alpha.scale.z)
+ tabWrite("pigment {pigment_pattern {uv_mapping image_map" \
+ "{%s \"%s\" %s}%s}\n" % \
+ (imageFormat(texturesAlpha), texturesAlpha, imgMap(t_alpha),
+ mappingAlpha))
+ tabWrite("pigment_map {\n")
+ tabWrite("[0 color rgbft<0,0,0,1,1>]\n")
+ tabWrite("[1 color rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>]\n" % \
+ (col[0], col[1], col[2], povFilter, trans))
+ tabWrite("}\n")
+ tabWrite("}\n")
+
+ else:
+ tabWrite("pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>}\n" % \
+ (col[0], col[1], col[2], povFilter, trans))
+
+ if texturesSpec != "":
+ # Level 3 is full specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=3)))
+
+ else:
+ # Level 2 is translated specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=2)))
+
+ elif material.pov.replacement_text == "":
+ # POV-Ray "scale" is not a number of repetitions factor, but its inverse,
+ # a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the scale is
+ # not the same in blender and POV
+ # Strange that the translation factor for scale is not the same as for
+ # translate.
+ # TODO: verify both matches with blender internal.
+ mappingDif = ("translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>" % \
+ (-t_dif.offset.x, -t_dif.offset.y, t_dif.offset.z,
+ 1.0 / t_dif.scale.x, 1.0 / t_dif.scale.y, 1.0 / t_dif.scale.z))
+ if texturesAlpha != "":
+ # Strange that the translation factor for scale is not the same as for
+ # translate.
+ # TODO: verify both matches with blender internal.
+ mappingAlpha = "translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>" % \
+ (-t_alpha.offset.x, -t_alpha.offset.y, t_alpha.offset.z,
+ 1.0 / t_alpha.scale.x, 1.0 / t_alpha.scale.y,
+ 1.0 / t_alpha.scale.z)
+ tabWrite("pigment {pigment_pattern {uv_mapping image_map" \
+ "{%s \"%s\" %s}%s}\n" % \
+ (imageFormat(texturesAlpha), texturesAlpha, imgMap(t_alpha),
+ mappingAlpha))
+ tabWrite("pigment_map {\n")
+ tabWrite("[0 color rgbft<0,0,0,1,1>]\n")
+ tabWrite("[1 uv_mapping image_map {%s \"%s\" %s} %s]\n" % \
+ (imageFormat(texturesDif), texturesDif,
+ (imgMap(t_dif) + imgGamma), mappingDif))
+ tabWrite("}\n")
+ tabWrite("}\n")
+
+ else:
+ tabWrite("pigment {\n")
+ tabWrite("uv_mapping image_map {\n")
+ #tabWrite("%s \"%s\" %s}%s\n" % \
+ # (imageFormat(texturesDif), texturesDif,
+ # (imgGamma + imgMap(t_dif)),mappingDif))
+ tabWrite("%s \"%s\" \n" % (imageFormat(texturesDif), texturesDif))
+ tabWrite("%s\n" % (imgGamma + imgMap(t_dif)))
+ tabWrite("}\n")
+ tabWrite("%s\n" % mappingDif)
+ tabWrite("}\n")
+ if texturesSpec != "":
+ # Level 3 is full specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=3)))
+ else:
+ # Level 2 is translated specular
+ tabWrite("finish {%s}\n" % (safety(material_finish, Level=2)))
+
+ ## scale 1 rotate y*0
+ #imageMap = ("{image_map {%s \"%s\" %s }" % \
+ # (imageFormat(textures), textures,imgMap(t_dif)))
+ #file.write("\n\t\t\tuv_mapping pigment %s} %s finish {%s}" % \
+ # (imageMap, mapping, safety(material_finish)))
+ #file.write("\n\t\t\tpigment {uv_mapping image_map " \
+ # "{%s \"%s\" %s}%s} finish {%s}" % \
+ # (imageFormat(texturesDif), texturesDif,imgMap(t_dif),
+ # mappingDif, safety(material_finish)))
+ if texturesNorm != "" and material.pov.replacement_text == "":
+ ## scale 1 rotate y*0
+ # POV-Ray "scale" is not a number of repetitions factor, but its inverse,
+ # a standard scale factor.
+ # Offset seems needed relatively to scale so probably center of the scale is
+ # not the same in blender and POV
+ mappingNor = (" translate <%.4g,%.4g,%.4g> scale <%.4g,%.4g,%.4g>" % \
+ (-t_nor.offset.x, -t_nor.offset.y, t_nor.offset.z,
+ 1.0 / t_nor.scale.x, 1.0 / t_nor.scale.y, 1.0 / t_nor.scale.z))
+ #imageMapNor = ("{bump_map {%s \"%s\" %s mapping}" % \
+ # (imageFormat(texturesNorm),texturesNorm,imgMap(t_nor)))
+ #We were not using the above maybe we should?
+ tabWrite("normal {uv_mapping bump_map {%s \"%s\" %s bump_size %.4g }%s}\n" % \
+ (imageFormat(texturesNorm), texturesNorm, imgMap(t_nor),
+ t_nor.normal_factor * 10.0, mappingNor))
+ if texturesSpec != "" and material.pov.replacement_text == "":
+ tabWrite("]\n")
+
+ tabWrite("}\n")
+
+ #End of slope/ior texture_map
+ if material.diffuse_shader == 'MINNAERT' and material.pov.replacement_text == "":
+ tabWrite("]\n")
+ tabWrite("}\n")
+ if material.diffuse_shader == 'FRESNEL' and material.pov.replacement_text == "":
+ c = 1
+ while (c <= lampCount):
+ tabWrite("]\n")
+ tabWrite("}\n")
+ c += 1
+
+ if material.pov.replacement_text == "":
+ tabWrite("}\n") # THEN IT CAN CLOSE IT --MR
+
+ ####################################################################################
+ index[0] = idx
+ idx += 1
+
+ tabWrite("}\n")
+
+ # Face indices
+ tabWrite("face_indices {\n")
+ tabWrite("%d" % (len(me_faces) + quadCount)) # faces count
+ tabStr = tab * tabLevel
+
+ for fi, f in enumerate(me_faces):
+ fv = faces_verts[fi]
+ material_index = f.material_index
+ if len(fv) == 4:
+ indices = (0, 1, 2), (0, 2, 3)
+ else:
+ indices = ((0, 1, 2),)
+
+ if vcol_layer:
+ col = vcol_layer[fi]
+
+ if len(fv) == 4:
+ cols = col.color1, col.color2, col.color3, col.color4
+ else:
+ cols = col.color1, col.color2, col.color3
+
+ if not me_materials or me_materials[material_index] is None: # No materials
+ for i1, i2, i3 in indices:
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ # vert count
+ file.write(tabStr + "<%d,%d,%d>" % (fv[i1], fv[i2], fv[i3]))
+ else:
+ file.write(", ")
+ file.write("<%d,%d,%d>" % (fv[i1], fv[i2], fv[i3])) # vert count
+ else:
+ material = me_materials[material_index]
+ for i1, i2, i3 in indices:
+ if me.vertex_colors and material.use_vertex_color_paint:
+ # Colour per vertex - vertex colour
+
+ col1 = cols[i1]
+ col2 = cols[i2]
+ col3 = cols[i3]
+
+ ci1 = vertCols[col1[0], col1[1], col1[2], material_index][0]
+ ci2 = vertCols[col2[0], col2[1], col2[2], material_index][0]
+ ci3 = vertCols[col3[0], col3[1], col3[2], material_index][0]
+ else:
+ # Colour per material - flat material colour
+ diffuse_color = material.diffuse_color
+ ci1 = ci2 = ci3 = vertCols[diffuse_color[0], diffuse_color[1], \
+ diffuse_color[2], f.material_index][0]
+
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%d,%d,%d>, %d,%d,%d" % \
+ (fv[i1], fv[i2], fv[i3], ci1, ci2, ci3)) # vert count
+ else:
+ file.write(", ")
+ file.write("<%d,%d,%d>, %d,%d,%d" % \
+ (fv[i1], fv[i2], fv[i3], ci1, ci2, ci3)) # vert count
+
+ file.write("\n")
+ tabWrite("}\n")
+
+ # normal_indices indices
+ tabWrite("normal_indices {\n")
+ tabWrite("%d" % (len(me_faces) + quadCount)) # faces count
+ tabStr = tab * tabLevel
+ for fi, fv in enumerate(faces_verts):
+
+ if len(fv) == 4:
+ indices = (0, 1, 2), (0, 2, 3)
+ else:
+ indices = ((0, 1, 2),)
+
+ for i1, i2, i3 in indices:
+ if me_faces[fi].use_smooth:
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%d,%d,%d>" %\
+ (uniqueNormals[verts_normals[fv[i1]]][0],\
+ uniqueNormals[verts_normals[fv[i2]]][0],\
+ uniqueNormals[verts_normals[fv[i3]]][0])) # vert count
+ else:
+ file.write(", ")
+ file.write("<%d,%d,%d>" %\
+ (uniqueNormals[verts_normals[fv[i1]]][0],\
+ uniqueNormals[verts_normals[fv[i2]]][0],\
+ uniqueNormals[verts_normals[fv[i3]]][0])) # vert count
+ else:
+ idx = uniqueNormals[faces_normals[fi]][0]
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%d,%d,%d>" % (idx, idx, idx)) # vert count
+ else:
+ file.write(", ")
+ file.write("<%d,%d,%d>" % (idx, idx, idx)) # vert count
+
+ file.write("\n")
+ tabWrite("}\n")
+
+ if uv_layer:
+ tabWrite("uv_indices {\n")
+ tabWrite("%d" % (len(me_faces) + quadCount)) # faces count
+ tabStr = tab * tabLevel
+ for fi, fv in enumerate(faces_verts):
+
+ if len(fv) == 4:
+ indices = (0, 1, 2), (0, 2, 3)
+ else:
+ indices = ((0, 1, 2),)
+
+ uv = uv_layer[fi]
+ if len(faces_verts[fi]) == 4:
+ uvs = uv.uv1[:], uv.uv2[:], uv.uv3[:], uv.uv4[:]
+ else:
+ uvs = uv.uv1[:], uv.uv2[:], uv.uv3[:]
+
+ for i1, i2, i3 in indices:
+ if not scene.pov.tempfiles_enable and scene.pov.list_lf_enable:
+ file.write(",\n")
+ file.write(tabStr + "<%d,%d,%d>" % (
+ uniqueUVs[uvs[i1]][0],\
+ uniqueUVs[uvs[i2]][0],\
+ uniqueUVs[uvs[i3]][0]))
+ else:
+ file.write(", ")
+ file.write("<%d,%d,%d>" % (
+ uniqueUVs[uvs[i1]][0],\
+ uniqueUVs[uvs[i2]][0],\
+ uniqueUVs[uvs[i3]][0]))
+
+ file.write("\n")
+ tabWrite("}\n")
+
+ if me.materials:
+ try:
+ material = me.materials[0] # dodgy
+ writeObjectMaterial(material, ob)
+ except IndexError:
+ print(me)
+
+ #Importance for radiosity sampling added here:
+ tabWrite("radiosity { \n")
+ tabWrite("importance %3g \n" % importance)
+ tabWrite("}\n")
+
+ tabWrite("}\n") # End of mesh block
+
+ bpy.data.meshes.remove(me)
+
+ for data_name, inst in data_ref.items():
+ for ob_name, matrix_str in inst:
+ tabWrite("//----Blender Object Name:%s----\n" % ob_name)
+ tabWrite("object { \n")
+ tabWrite("%s\n" % data_name)
+ tabWrite("%s\n" % matrix_str)
+ tabWrite("}\n")
+
+ def exportWorld(world):
+ render = scene.render
+ camera = scene.camera
+ matrix = global_matrix * camera.matrix_world
+ if not world:
+ return
+ #############Maurice####################################
+ #These lines added to get sky gradient (visible with PNG output)
+ if world:
+ #For simple flat background:
+ if not world.use_sky_blend:
+ # Non fully transparent background could premultiply alpha and avoid anti-aliasing
+ # display issue:
+ if render.alpha_mode == 'PREMUL':
+ tabWrite("background {rgbt<%.3g, %.3g, %.3g, 0.75>}\n" % \
+ (world.horizon_color[:]))
+ #Currently using no alpha with Sky option:
+ elif render.alpha_mode == 'SKY':
+ tabWrite("background {rgbt<%.3g, %.3g, %.3g, 0>}\n" % (world.horizon_color[:]))
+ #StraightAlpha:
+ else:
+ tabWrite("background {rgbt<%.3g, %.3g, %.3g, 1>}\n" % (world.horizon_color[:]))
+
+ worldTexCount = 0
+ #For Background image textures
+ for t in world.texture_slots: # risk to write several sky_spheres but maybe ok.
+ if t and t.texture.type is not None:
+ worldTexCount += 1
+ # XXX No enable checkbox for world textures yet (report it?)
+ #if t and t.texture.type == 'IMAGE' and t.use:
+ if t and t.texture.type == 'IMAGE':
+ image_filename = path_image(t.texture.image.filepath)
+ if t.texture.image.filepath != image_filename:
+ t.texture.image.filepath = image_filename
+ if image_filename != "" and t.use_map_blend:
+ texturesBlend = image_filename
+ #colvalue = t.default_value
+ t_blend = t
+
+ # Commented below was an idea to make the Background image oriented as camera
+ # taken here:
+#http://news.povray.org/povray.newusers/thread/%3Cweb.4a5cddf4e9c9822ba2f93e20@news.povray.org%3E/
+ # Replace 4/3 by the ratio of each image found by some custom or existing
+ # function
+ #mappingBlend = (" translate <%.4g,%.4g,%.4g> rotate z*degrees" \
+ # "(atan((camLocation - camLookAt).x/(camLocation - " \
+ # "camLookAt).y)) rotate x*degrees(atan((camLocation - " \
+ # "camLookAt).y/(camLocation - camLookAt).z)) rotate y*" \
+ # "degrees(atan((camLocation - camLookAt).z/(camLocation - " \
+ # "camLookAt).x)) scale <%.4g,%.4g,%.4g>b" % \
+ # (t_blend.offset.x / 10 , t_blend.offset.y / 10 ,
+ # t_blend.offset.z / 10, t_blend.scale.x ,
+ # t_blend.scale.y , t_blend.scale.z))
+ #using camera rotation valuesdirectly from blender seems much easier
+ if t_blend.texture_coords == 'ANGMAP':
+ mappingBlend = ""
+ else:
+ mappingBlend = " translate <%.4g-0.5,%.4g-0.5,%.4g-0.5> rotate<0,0,0> " \
+ "scale <%.4g,%.4g,%.4g>" % \
+ (t_blend.offset.x / 10.0, t_blend.offset.y / 10.0,
+ t_blend.offset.z / 10.0, t_blend.scale.x * 0.85,
+ t_blend.scale.y * 0.85, t_blend.scale.z * 0.85)
+
+ # The initial position and rotation of the pov camera is probably creating
+ # the rotation offset should look into it someday but at least background
+ # won't rotate with the camera now.
+ # Putting the map on a plane would not introduce the skysphere distortion and
+ # allow for better image scale matching but also some waay to chose depth and
+ # size of the plane relative to camera.
+ tabWrite("sky_sphere {\n")
+ tabWrite("pigment {\n")
+ tabWrite("image_map{%s \"%s\" %s}\n" % \
+ (imageFormat(texturesBlend), texturesBlend, imgMapBG(t_blend)))
+ tabWrite("}\n")
+ tabWrite("%s\n" % (mappingBlend))
+ # The following layered pigment opacifies to black over the texture for
+ # transmit below 1 or otherwise adds to itself
+ tabWrite("pigment {rgb 0 transmit %s}\n" % (t.texture.intensity))
+ tabWrite("}\n")
+ #tabWrite("scale 2\n")
+ #tabWrite("translate -1\n")
+
+ #For only Background gradient
+
+ if worldTexCount == 0:
+ if world.use_sky_blend:
+ tabWrite("sky_sphere {\n")
+ tabWrite("pigment {\n")
+ # maybe Should follow the advice of POV doc about replacing gradient
+ # for skysphere..5.5
+ tabWrite("gradient y\n")
+ tabWrite("color_map {\n")
+ if render.alpha_mode == 'STRAIGHT':
+ tabWrite("[0.0 rgbt<%.3g, %.3g, %.3g, 1>]\n" % (world.horizon_color[:]))
+ tabWrite("[1.0 rgbt<%.3g, %.3g, %.3g, 1>]\n" % (world.zenith_color[:]))
+ elif render.alpha_mode == 'PREMUL':
+ tabWrite("[0.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\n" % (world.horizon_color[:]))
+ # aa premult not solved with transmit 1
+ tabWrite("[1.0 rgbt<%.3g, %.3g, %.3g, 0.99>]\n" % (world.zenith_color[:]))
+ else:
+ tabWrite("[0.0 rgbt<%.3g, %.3g, %.3g, 0>]\n" % (world.horizon_color[:]))
+ tabWrite("[1.0 rgbt<%.3g, %.3g, %.3g, 0>]\n" % (world.zenith_color[:]))
+ tabWrite("}\n")
+ tabWrite("}\n")
+ tabWrite("}\n")
+ # Sky_sphere alpha (transmit) is not translating into image alpha the same
+ # way as 'background'
+
+ #if world.light_settings.use_indirect_light:
+ # scene.pov.radio_enable=1
+
+ # Maybe change the above to a funtion copyInternalRenderer settings when
+ # user pushes a button, then:
+ #scene.pov.radio_enable = world.light_settings.use_indirect_light
+ # and other such translations but maybe this would not be allowed either?
+
+ ###############################################################
+
+ mist = world.mist_settings
+
+ if mist.use_mist:
+ tabWrite("fog {\n")
+ tabWrite("distance %.6f\n" % mist.depth)
+ tabWrite("color rgbt<%.3g, %.3g, %.3g, %.3g>\n" % \
+ (world.horizon_color[:] + (1.0 - mist.intensity,)))
+ #tabWrite("fog_offset %.6f\n" % mist.start)
+ #tabWrite("fog_alt 5\n")
+ #tabWrite("turbulence 0.2\n")
+ #tabWrite("turb_depth 0.3\n")
+ tabWrite("fog_type 1\n")
+ tabWrite("}\n")
+ if scene.pov.media_enable:
+ tabWrite("media {\n")
+ tabWrite("scattering { 1, rgb <%.4g, %.4g, %.4g>}\n" % scene.pov.media_color[:])
+ tabWrite("samples %.d\n" % scene.pov.media_samples)
+ tabWrite("}\n")
+
+ def exportGlobalSettings(scene):
+
+ tabWrite("global_settings {\n")
+ tabWrite("assumed_gamma 1.0\n")
+ tabWrite("max_trace_level %d\n" % scene.pov.max_trace_level)
+
+ if scene.pov.radio_enable:
+ tabWrite("radiosity {\n")
+ tabWrite("adc_bailout %.4g\n" % scene.pov.radio_adc_bailout)
+ tabWrite("always_sample %d\n" % scene.pov.radio_always_sample)
+ tabWrite("brightness %.4g\n" % scene.pov.radio_brightness)
+ tabWrite("count %d\n" % scene.pov.radio_count)
+ tabWrite("error_bound %.4g\n" % scene.pov.radio_error_bound)
+ tabWrite("gray_threshold %.4g\n" % scene.pov.radio_gray_threshold)
+ tabWrite("low_error_factor %.4g\n" % scene.pov.radio_low_error_factor)
+ tabWrite("media %d\n" % scene.pov.radio_media)
+ tabWrite("minimum_reuse %.4g\n" % scene.pov.radio_minimum_reuse)
+ tabWrite("nearest_count %d\n" % scene.pov.radio_nearest_count)
+ tabWrite("normal %d\n" % scene.pov.radio_normal)
+ tabWrite("pretrace_start %.3g\n" % scene.pov.radio_pretrace_start)
+ tabWrite("pretrace_end %.3g\n" % scene.pov.radio_pretrace_end)
+ tabWrite("recursion_limit %d\n" % scene.pov.radio_recursion_limit)
+ tabWrite("}\n")
+ onceSss = 1
+ onceAmbient = 1
+ oncePhotons = 1
+ for material in bpy.data.materials:
+ if material.subsurface_scattering.use and onceSss:
+ # In pov, the scale has reversed influence compared to blender. these number
+ # should correct that
+ tabWrite("mm_per_unit %.6f\n" % \
+ (material.subsurface_scattering.scale * (-100.0) + 15.0))
+ # In POV-Ray, the scale factor for all subsurface shaders needs to be the same
+ onceSss = 0
+
+ if world and onceAmbient:
+ tabWrite("ambient_light rgb<%.3g, %.3g, %.3g>\n" % world.ambient_color[:])
+ onceAmbient = 0
+
+ if (material.pov.photons_refraction or material.pov.photons_reflection)and oncePhotons:
+ tabWrite("photons {\n")
+ tabWrite("spacing %.6f\n" % scene.pov.photon_spacing)
+ tabWrite("max_trace_level %d\n" % scene.pov.photon_max_trace_level)
+ tabWrite("adc_bailout %.3g\n" % scene.pov.photon_adc_bailout)
+ tabWrite("gather %d, %d\n" % (scene.pov.photon_gather_min, scene.pov.photon_gather_max))
+ tabWrite("}\n")
+ oncePhotons = 0
+
+ tabWrite("}\n")
+
+ def exportCustomCode():
+
+ for txt in bpy.data.texts:
+ if txt.pov.custom_code:
+ # Why are the newlines needed?
+ file.write("\n")
+ file.write(txt.as_string())
+ file.write("\n")
+
+ sel = renderable_objects(scene)
+ comments = scene.pov.comments_enable
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("//----------------------------------------------\n" \
+ "//--Exported with POV-Ray exporter for Blender--\n" \
+ "//----------------------------------------------\n\n")
+ file.write("#version 3.7;\n")
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n//--CUSTOM CODE--\n\n")
+ exportCustomCode()
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n//--Global settings and background--\n\n")
+
+ exportGlobalSettings(scene)
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n")
+
+ exportWorld(scene.world)
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n//--Cameras--\n\n")
+
+ exportCamera()
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n//--Lamps--\n\n")
+
+ exportLamps([l for l in sel if l.type == 'LAMP'])
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n//--Material Definitions--\n\n")
+
+ # Convert all materials to strings we can access directly per vertex.
+ #exportMaterials()
+ writeMaterial(None) # default material
+ for material in bpy.data.materials:
+ if material.users > 0:
+ writeMaterial(material)
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("\n")
+
+ exportMeta([l for l in sel if l.type == 'META'])
+
+ if not scene.pov.tempfiles_enable and comments:
+ file.write("//--Mesh objects--\n")
+
+ exportMeshes(scene, sel)
+ #What follow used to happen here:
+ #exportCamera()
+ #exportWorld(scene.world)
+ #exportGlobalSettings(scene)
+ # MR:..and the order was important for an attempt to implement pov 3.7 baking
+ # (mesh camera) comment for the record
+ # CR: Baking should be a special case than. If "baking", than we could change the order.
+
+ #print("pov file closed %s" % file.closed)
+ file.close()
+ #print("pov file closed %s" % file.closed)
+
+
+def write_pov_ini(scene, filename_ini, filename_pov, filename_image):
+ #scene = bpy.data.scenes[0]
+ render = scene.render
+
+ x = int(render.resolution_x * render.resolution_percentage * 0.01)
+ y = int(render.resolution_y * render.resolution_percentage * 0.01)
+
+ file = open(filename_ini, "w")
+ file.write("Version=3.7\n")
+ file.write("Input_File_Name='%s'\n" % filename_pov)
+ file.write("Output_File_Name='%s'\n" % filename_image)
+
+ file.write("Width=%d\n" % x)
+ file.write("Height=%d\n" % y)
+
+ # Border render.
+ if render.use_border:
+ file.write("Start_Column=%4g\n" % render.border_min_x)
+ file.write("End_Column=%4g\n" % (render.border_max_x))
+
+ file.write("Start_Row=%4g\n" % (1.0 - render.border_max_y))
+ file.write("End_Row=%4g\n" % (1.0 - render.border_min_y))
+
+ file.write("Bounding_Method=2\n") # The new automatic BSP is faster in most scenes
+
+ # Activated (turn this back off when better live exchange is done between the two programs
+ # (see next comment)
+ file.write("Display=1\n")
+ file.write("Pause_When_Done=0\n")
+ # PNG, with POV-Ray 3.7, can show background color with alpha. In the long run using the
+ # POV-Ray interactive preview like bishop 3D could solve the preview for all formats.
+ file.write("Output_File_Type=N\n")
+ #file.write("Output_File_Type=T\n") # TGA, best progressive loading
+ file.write("Output_Alpha=1\n")
+
+ if scene.pov.antialias_enable:
+ # method 2 (recursive) with higher max subdiv forced because no mipmapping in POV-Ray
+ # needs higher sampling.
+ # aa_mapping = {"5": 2, "8": 3, "11": 4, "16": 5}
+ method = {"0": 1, "1": 2}
+ file.write("Antialias=on\n")
+ file.write("Sampling_Method=%s\n" % method[scene.pov.antialias_method])
+ file.write("Antialias_Depth=%d\n" % scene.pov.antialias_depth)
+ file.write("Antialias_Threshold=%.3g\n" % scene.pov.antialias_threshold)
+ file.write("Antialias_Gamma=%.3g\n" % scene.pov.antialias_gamma)
+ if scene.pov.jitter_enable:
+ file.write("Jitter=on\n")
+ file.write("Jitter_Amount=%3g\n" % scene.pov.jitter_amount)
+ else:
+ file.write("Jitter=off\n") # prevent animation flicker
+
+ else:
+ file.write("Antialias=off\n")
+ #print("ini file closed %s" % file.closed)
+ file.close()
+ #print("ini file closed %s" % file.closed)
+
+
+class PovrayRender(bpy.types.RenderEngine):
+ bl_idname = 'POVRAY_RENDER'
+ bl_label = "POV-Ray 3.7"
+ DELAY = 0.5
+
+ def _export(self, scene, povPath, renderImagePath):
+ import tempfile
+
+ if scene.pov.tempfiles_enable:
+ self._temp_file_in = tempfile.NamedTemporaryFile(suffix=".pov", delete=False).name
+ # PNG with POV 3.7, can show the background color with alpha. In the long run using the
+ # POV-Ray interactive preview like bishop 3D could solve the preview for all formats.
+ self._temp_file_out = tempfile.NamedTemporaryFile(suffix=".png", delete=False).name
+ #self._temp_file_out = tempfile.NamedTemporaryFile(suffix=".tga", delete=False).name
+ self._temp_file_ini = tempfile.NamedTemporaryFile(suffix=".ini", delete=False).name
+ else:
+ self._temp_file_in = povPath + ".pov"
+ # PNG with POV 3.7, can show the background color with alpha. In the long run using the
+ # POV-Ray interactive preview like bishop 3D could solve the preview for all formats.
+ self._temp_file_out = renderImagePath + ".png"
+ #self._temp_file_out = renderImagePath + ".tga"
+ self._temp_file_ini = povPath + ".ini"
+ '''
+ self._temp_file_in = "/test.pov"
+ # PNG with POV 3.7, can show the background color with alpha. In the long run using the
+ # POV-Ray interactive preview like bishop 3D could solve the preview for all formats.
+ self._temp_file_out = "/test.png"
+ #self._temp_file_out = "/test.tga"
+ self._temp_file_ini = "/test.ini"
+ '''
+
+ def info_callback(txt):
+ self.update_stats("", "POV-Ray 3.7: " + txt)
+
+ write_pov(self._temp_file_in, scene, info_callback)
+
+ def _render(self, scene):
+
+ try:
+ os.remove(self._temp_file_out) # so as not to load the old file
+ except OSError:
+ pass
+
+ write_pov_ini(scene, self._temp_file_ini, self._temp_file_in, self._temp_file_out)
+
+ print ("***-STARTING-***")
+
+ pov_binary = "povray"
+
+ extra_args = []
+
+ if scene.pov.command_line_switches != "":
+ for newArg in scene.pov.command_line_switches.split(" "):
+ extra_args.append(newArg)
+
+ self._is_windows = False
+ if sys.platform[:3] == "win":
+ self._is_windows = True
+ #extra_args.append("/EXIT")
+
+ import winreg
+ import platform as pltfrm
+ if pltfrm.architecture()[0] == "64bit":
+ bitness = 64
+ else:
+ bitness = 32
+
+ regKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\POV-Ray\\v3.7\\Windows")
+
+ # TODO, report api
+
+ # 64 bits blender
+ if bitness == 64:
+ try:
+ pov_binary = winreg.QueryValueEx(regKey, "Home")[0] + "\\bin\\pvengine64"
+ self._process = subprocess.Popen(
+ [pov_binary, self._temp_file_ini] + extra_args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ # This would work too but means we have to wait until its done:
+ # os.system("%s %s" % (pov_binary, self._temp_file_ini))
+
+ except OSError:
+ # someone might run povray 32 bits on a 64 bits blender machine
+ try:
+ pov_binary = winreg.QueryValueEx(regKey, "Home")[0] + "\\bin\\pvengine"
+ self._process = subprocess.Popen(
+ [pov_binary, self._temp_file_ini] + extra_args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ except OSError:
+ # TODO, report api
+ print("POV-Ray 3.7: could not execute '%s', possibly POV-Ray isn't " \
+ "installed" % pov_binary)
+ import traceback
+ traceback.print_exc()
+ print ("***-DONE-***")
+ return False
+
+ else:
+ print("POV-Ray 3.7 64 bits could not execute, running 32 bits instead")
+ print("Command line arguments passed: " + str(extra_args))
+ return True
+
+ else:
+ print("POV-Ray 3.7 64 bits found")
+ print("Command line arguments passed: " + str(extra_args))
+ return True
+
+ #32 bits blender
+ else:
+ try:
+ pov_binary = winreg.QueryValueEx(regKey, "Home")[0] + "\\bin\\pvengine"
+ self._process = subprocess.Popen(
+ [pov_binary, self._temp_file_ini] + extra_args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ # someone might also run povray 64 bits with a 32 bits build of blender.
+ except OSError:
+ try:
+ pov_binary = winreg.QueryValueEx(regKey, "Home")[0] + "\\bin\\pvengine64"
+ self._process = subprocess.Popen(
+ [pov_binary, self._temp_file_ini] + extra_args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ except OSError:
+ # TODO, report api
+ print("POV-Ray 3.7: could not execute '%s', possibly POV-Ray isn't " \
+ "installed" % pov_binary)
+ import traceback
+ traceback.print_exc()
+ print ("***-DONE-***")
+ return False
+
+ else:
+ print("Running POV-Ray 3.7 64 bits build with 32 bits Blender,\n" \
+ "You might want to run Blender 64 bits as well.")
+ print("Command line arguments passed: " + str(extra_args))
+ return True
+
+ else:
+ print("POV-Ray 3.7 32 bits found")
+ print("Command line arguments passed: " + str(extra_args))
+ return True
+
+ else:
+ # DH - added -d option to prevent render window popup which leads to segfault on linux
+ extra_args.append("-d")
+
+ isExists = False
+ sysPathList = os.getenv("PATH").split(':')
+ sysPathList.append("")
+
+ for dirName in sysPathList:
+ if (os.path.exists(os.path.join(dirName, pov_binary))):
+ isExists = True
+ break
+
+ if not isExists:
+ print("POV-Ray 3.7: could not found execute '%s' - not if PATH" % pov_binary)
+ import traceback
+ traceback.print_exc()
+ print ("***-DONE-***")
+ return False
+
+ try:
+ self._process = subprocess.Popen([pov_binary, self._temp_file_ini] + extra_args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ except OSError:
+ # TODO, report api
+ print("POV-Ray 3.7: could not execute '%s'" % pov_binary)
+ import traceback
+ traceback.print_exc()
+ print ("***-DONE-***")
+ return False
+
+ else:
+ print("POV-Ray 3.7 found")
+ print("Command line arguments passed: " + str(extra_args))
+ return True
+
+ # Now that we have a valid process
+
+ def _cleanup(self):
+ for f in (self._temp_file_in, self._temp_file_ini, self._temp_file_out):
+ for i in range(5):
+ try:
+ os.unlink(f)
+ break
+ except OSError:
+ # Wait a bit before retrying file might be still in use by Blender,
+ # and Windows does not know how to delete a file in use!
+ time.sleep(self.DELAY)
+
+ def render(self, scene):
+ import tempfile
+
+ print("***INITIALIZING***")
+
+##WIP output format
+## if r.file_format == 'OPENEXR':
+## fformat = 'EXR'
+## render.color_mode = 'RGBA'
+## else:
+## fformat = 'TGA'
+## r.file_format = 'TARGA'
+## r.color_mode = 'RGBA'
+
+ blendSceneName = bpy.data.filepath.split(os.path.sep)[-1].split(".")[0]
+ povSceneName = ""
+ povPath = ""
+ renderImagePath = ""
+
+ # has to be called to update the frame on exporting animations
+ scene.frame_set(scene.frame_current)
+
+ if not scene.pov.tempfiles_enable:
+
+ # check paths
+ povPath = bpy.path.abspath(scene.pov.scene_path).replace('\\', '/')
+ if povPath == "":
+ if bpy.path.abspath("//") != "":
+ povPath = bpy.path.abspath("//")
+ else:
+ povPath = tempfile.gettempdir()
+ elif povPath.endswith("/"):
+ if povPath == "/":
+ povPath = bpy.path.abspath("//")
+ else:
+ povPath = bpy.path.abspath(scene.pov.scene_path)
+
+ if not os.path.exists(povPath):
+ try:
+ os.makedirs(povPath)
+ except:
+ import traceback
+ traceback.print_exc()
+
+ print("POV-Ray 3.7: Cannot create scenes directory: %r" % povPath)
+ self.update_stats("", "POV-Ray 3.7: Cannot create scenes directory %r" % \
+ povPath)
+ time.sleep(2.0)
+ return
+
+ '''
+ # Bug in POV-Ray RC3
+ renderImagePath = bpy.path.abspath(scene.pov.renderimage_path).replace('\\','/')
+ if renderImagePath == "":
+ if bpy.path.abspath("//") != "":
+ renderImagePath = bpy.path.abspath("//")
+ else:
+ renderImagePath = tempfile.gettempdir()
+ #print("Path: " + renderImagePath)
+ elif path.endswith("/"):
+ if renderImagePath == "/":
+ renderImagePath = bpy.path.abspath("//")
+ else:
+ renderImagePath = bpy.path.abspath(scene.pov.renderimage_path)
+ if not os.path.exists(path):
+ print("POV-Ray 3.7: Cannot find render image directory")
+ self.update_stats("", "POV-Ray 3.7: Cannot find render image directory")
+ time.sleep(2.0)
+ return
+ '''
+
+ # check name
+ if scene.pov.scene_name == "":
+ if blendSceneName != "":
+ povSceneName = blendSceneName
+ else:
+ povSceneName = "untitled"
+ else:
+ povSceneName = scene.pov.scene_name
+ if os.path.isfile(povSceneName):
+ povSceneName = os.path.basename(povSceneName)
+ povSceneName = povSceneName.split('/')[-1].split('\\')[-1]
+ if not povSceneName:
+ print("POV-Ray 3.7: Invalid scene name")
+ self.update_stats("", "POV-Ray 3.7: Invalid scene name")
+ time.sleep(2.0)
+ return
+ povSceneName = os.path.splitext(povSceneName)[0]
+
+ print("Scene name: " + povSceneName)
+ print("Export path: " + povPath)
+ povPath = os.path.join(povPath, povSceneName)
+ povPath = os.path.realpath(povPath)
+
+ # for now this has to be the same like the pov output. Bug in POV-Ray RC3.
+ # renderImagePath = renderImagePath + "\\" + povSceneName
+ renderImagePath = povPath # Bugfix for POV-Ray RC3 bug
+ # renderImagePath = os.path.realpath(renderImagePath) # Bugfix for POV-Ray RC3 bug
+
+ #print("Export path: %s" % povPath)
+ #print("Render Image path: %s" % renderImagePath)
+
+ # start export
+ self.update_stats("", "POV-Ray 3.7: Exporting data from Blender")
+ self._export(scene, povPath, renderImagePath)
+ self.update_stats("", "POV-Ray 3.7: Parsing File")
+
+ if not self._render(scene):
+ self.update_stats("", "POV-Ray 3.7: Not found")
+ return
+
+ r = scene.render
+ # compute resolution
+ x = int(r.resolution_x * r.resolution_percentage * 0.01)
+ y = int(r.resolution_y * r.resolution_percentage * 0.01)
+
+ # This makes some tests on the render, returning True if all goes good, and False if
+ # it was finished one way or the other.
+ # It also pauses the script (time.sleep())
+ def _test_wait():
+ time.sleep(self.DELAY)
+
+ # User interrupts the rendering
+ if self.test_break():
+ try:
+ self._process.terminate()
+ print("***POV INTERRUPTED***")
+ except OSError:
+ pass
+ return False
+
+ poll_result = self._process.poll()
+ # POV process is finisehd, one way or the other
+ if poll_result is not None:
+ if poll_result < 0:
+ print("***POV PROCESS FAILED : %s ***" % poll_result)
+ self.update_stats("", "POV-Ray 3.7: Failed")
+ return False
+
+ return True
+
+ # Wait for the file to be created
+ # XXX This is no more valid, as 3.7 always creates output file once render is finished!
+ parsing = re.compile(br"= \[Parsing\.\.\.\] =")
+ rendering = re.compile(br"= \[Rendering\.\.\.\] =")
+ percent = re.compile(r"\(([0-9]{1,3})%\)")
+ # print("***POV WAITING FOR FILE***")
+
+ data = b""
+ last_line = ""
+ while _test_wait():
+ # POV in Windows does not output its stdout/stderr, it displays them in its GUI
+ if self._is_windows:
+ self.update_stats("", "POV-Ray 3.7: Rendering File")
+ else:
+ t_data = self._process.stdout.read(10000)
+ if not t_data:
+ continue
+
+ data += t_data
+ # XXX This is working for UNIX, not sure whether it might need adjustments for
+ # other OSs
+ # First replace is for windows
+ t_data = str(t_data).replace('\\r\\n', '\\n').replace('\\r', '\r')
+ lines = t_data.split('\\n')
+ last_line += lines[0]
+ lines[0] = last_line
+ print('\n'.join(lines), end="")
+ last_line = lines[-1]
+
+ if rendering.search(data):
+ _pov_rendering = True
+ match = percent.findall(str(data))
+ if match:
+ self.update_stats("", "POV-Ray 3.7: Rendering File (%s%%)" % match[-1])
+ else:
+ self.update_stats("", "POV-Ray 3.7: Rendering File")
+
+ elif parsing.search(data):
+ self.update_stats("", "POV-Ray 3.7: Parsing File")
+
+ if os.path.exists(self._temp_file_out):
+ # print("***POV FILE OK***")
+ #self.update_stats("", "POV-Ray 3.7: Rendering")
+
+ # prev_size = -1
+
+ xmin = int(r.border_min_x * x)
+ ymin = int(r.border_min_y * y)
+ xmax = int(r.border_max_x * x)
+ ymax = int(r.border_max_y * y)
+
+ # print("***POV UPDATING IMAGE***")
+ result = self.begin_result(0, 0, x, y)
+ # XXX, tests for border render.
+ #result = self.begin_result(xmin, ymin, xmax - xmin, ymax - ymin)
+ #result = self.begin_result(0, 0, xmax - xmin, ymax - ymin)
+ lay = result.layers[0]
+
+ # This assumes the file has been fully written We wait a bit, just in case!
+ time.sleep(self.DELAY)
+ try:
+ lay.load_from_file(self._temp_file_out)
+ # XXX, tests for border render.
+ #lay.load_from_file(self._temp_file_out, xmin, ymin)
+ except RuntimeError:
+ print("***POV ERROR WHILE READING OUTPUT FILE***")
+
+ # Not needed right now, might only be useful if we find a way to use temp raw output of
+ # pov 3.7 (in which case it might go under _test_wait()).
+# def update_image():
+# # possible the image wont load early on.
+# try:
+# lay.load_from_file(self._temp_file_out)
+# # XXX, tests for border render.
+# #lay.load_from_file(self._temp_file_out, xmin, ymin)
+# #lay.load_from_file(self._temp_file_out, xmin, ymin)
+# except RuntimeError:
+# pass
+
+# # Update while POV-Ray renders
+# while True:
+# # print("***POV RENDER LOOP***")
+
+# # test if POV-Ray exists
+# if self._process.poll() is not None:
+# print("***POV PROCESS FINISHED***")
+# update_image()
+# break
+
+# # user exit
+# if self.test_break():
+# try:
+# self._process.terminate()
+# print("***POV PROCESS INTERRUPTED***")
+# except OSError:
+# pass
+
+# break
+
+# # Would be nice to redirect the output
+# # stdout_value, stderr_value = self._process.communicate() # locks
+
+# # check if the file updated
+# new_size = os.path.getsize(self._temp_file_out)
+
+# if new_size != prev_size:
+# update_image()
+# prev_size = new_size
+
+# time.sleep(self.DELAY)
+
+ self.end_result(result)
+
+ else:
+ print("***POV FILE NOT FOUND***")
+
+ print("***POV FINISHED***")
+
+ self.update_stats("", "")
+
+ if scene.pov.tempfiles_enable or scene.pov.deletefiles_enable:
+ self._cleanup()
diff --git a/render_povray/ui.py b/render_povray/ui.py
new file mode 100644
index 00000000..336424bd
--- /dev/null
+++ b/render_povray/ui.py
@@ -0,0 +1,639 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import bpy
+
+# Use some of the existing buttons.
+from bl_ui import properties_render
+properties_render.RENDER_PT_render.COMPAT_ENGINES.add('POVRAY_RENDER')
+properties_render.RENDER_PT_dimensions.COMPAT_ENGINES.add('POVRAY_RENDER')
+# properties_render.RENDER_PT_antialiasing.COMPAT_ENGINES.add('POVRAY_RENDER')
+properties_render.RENDER_PT_shading.COMPAT_ENGINES.add('POVRAY_RENDER')
+properties_render.RENDER_PT_output.COMPAT_ENGINES.add('POVRAY_RENDER')
+del properties_render
+
+# Use only a subset of the world panels
+from bl_ui import properties_world
+properties_world.WORLD_PT_preview.COMPAT_ENGINES.add('POVRAY_RENDER')
+properties_world.WORLD_PT_context_world.COMPAT_ENGINES.add('POVRAY_RENDER')
+properties_world.WORLD_PT_world.COMPAT_ENGINES.add('POVRAY_RENDER')
+properties_world.WORLD_PT_mist.COMPAT_ENGINES.add('POVRAY_RENDER')
+del properties_world
+
+# Example of wrapping every class 'as is'
+from bl_ui import properties_material
+for member in dir(properties_material):
+ subclass = getattr(properties_material, member)
+ try:
+ subclass.COMPAT_ENGINES.add('POVRAY_RENDER')
+ except:
+ pass
+del properties_material
+
+from bl_ui import properties_data_mesh
+for member in dir(properties_data_mesh):
+ subclass = getattr(properties_data_mesh, member)
+ try:
+ subclass.COMPAT_ENGINES.add('POVRAY_RENDER')
+ except:
+ pass
+del properties_data_mesh
+
+from bl_ui import properties_texture
+for member in dir(properties_texture):
+ subclass = getattr(properties_texture, member)
+ try:
+ subclass.COMPAT_ENGINES.add('POVRAY_RENDER')
+ except:
+ pass
+del properties_texture
+
+from bl_ui import properties_data_camera
+for member in dir(properties_data_camera):
+ subclass = getattr(properties_data_camera, member)
+ try:
+ subclass.COMPAT_ENGINES.add('POVRAY_RENDER')
+ except:
+ pass
+del properties_data_camera
+
+from bl_ui import properties_data_lamp
+for member in dir(properties_data_lamp):
+ subclass = getattr(properties_data_lamp, member)
+ try:
+ subclass.COMPAT_ENGINES.add('POVRAY_RENDER')
+ except:
+ pass
+del properties_data_lamp
+
+
+class RenderButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "render"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine == False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+class MaterialButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "material"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ mat = context.material
+ rd = context.scene.render
+ return mat and (rd.use_game_engine == False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+class TextureButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "texture"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ tex = context.texture
+ rd = context.scene.render
+ return tex and (rd.use_game_engine == False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+class ObjectButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "object"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ rd = context.scene.render
+ return obj and (rd.use_game_engine == False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+class CameraDataButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "data"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ cam = context.camera
+ rd = context.scene.render
+ return cam and (rd.use_game_engine == False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+class TextButtonsPanel():
+ bl_space_type = 'TEXT_EDITOR'
+ bl_region_type = 'UI'
+ bl_label = "P.O.V-Ray"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ @classmethod
+ def poll(cls, context):
+ text = context.space_data
+ rd = context.scene.render
+ return text and (rd.use_game_engine == False) and (rd.engine in cls.COMPAT_ENGINES)
+
+
+class RENDER_PT_povray_export_settings(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Export Settings"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render
+
+ layout.active = scene.pov.max_trace_level
+ split = layout.split()
+
+ col = split.column()
+ col.label(text="Command line switches:")
+ col.prop(scene.pov, "command_line_switches", text="")
+ split = layout.split()
+ col = split.column()
+ col.prop(scene.pov, "tempfiles_enable", text="OS Tempfiles")
+ if not scene.pov.tempfiles_enable:
+ col = split.column()
+ col.prop(scene.pov, "deletefiles_enable", text="Delete files")
+ else:
+ col = split.column()
+
+ split = layout.split()
+ if not scene.pov.tempfiles_enable:
+ col = split.column()
+ col.prop(scene.pov, "scene_name", text="Name")
+ split = layout.split()
+ col = split.column()
+ col.prop(scene.pov, "scene_path", text="Path to files")
+ #col.prop(scene.pov, "scene_path", text="Path to POV-file")
+ split = layout.split()
+ #col = split.column() # Bug in POV-Ray RC3
+ #col.prop(scene.pov, "renderimage_path", text="Path to image")
+ #split = layout.split()
+
+ col = split.column()
+ col.prop(scene.pov, "indentation_character", text="Indent")
+ col = split.column()
+ if scene.pov.indentation_character == "2":
+ col.prop(scene.pov, "indentation_spaces", text="Spaces")
+ row = layout.row()
+ row.prop(scene.pov, "comments_enable", text="Comments")
+ row.prop(scene.pov, "list_lf_enable", text="Line breaks in lists")
+
+
+class RENDER_PT_povray_render_settings(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Render Settings"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render
+
+ layout.active = scene.pov.max_trace_level
+
+ split = layout.split()
+ col = split.column()
+
+ col.label(text="Global Settings")
+ col.prop(scene.pov, "max_trace_level", text="Ray Depth")
+
+ col.label(text="Global Photons")
+ col.prop(scene.pov, "photon_max_trace_level", text="Photon Depth")
+
+ split = layout.split()
+ col = split.column()
+ col.prop(scene.pov, "photon_spacing", text="Spacing")
+ col.prop(scene.pov, "photon_gather_min")
+
+ col = split.column()
+
+ col.prop(scene.pov, "photon_adc_bailout", text="Photon ADC")
+ col.prop(scene.pov, "photon_gather_max")
+
+
+class RENDER_PT_povray_antialias(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Anti-Aliasing"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ scene = context.scene
+
+ self.layout.prop(scene.pov, "antialias_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render
+
+ layout.active = scene.pov.antialias_enable
+
+ row = layout.row()
+ row.prop(scene.pov, "antialias_method", text="")
+ row.prop(scene.pov, "jitter_enable", text="Jitter")
+
+ split = layout.split()
+ col = split.column()
+ col.prop(scene.pov, "antialias_depth", text="AA Depth")
+ sub = split.column()
+ sub.prop(scene.pov, "jitter_amount", text="Jitter Amount")
+ if scene.pov.jitter_enable:
+ sub.enabled = True
+ else:
+ sub.enabled = False
+
+ row = layout.row()
+ row.prop(scene.pov, "antialias_threshold", text="AA Threshold")
+ row.prop(scene.pov, "antialias_gamma", text="AA Gamma")
+
+
+class RENDER_PT_povray_radiosity(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Radiosity"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ scene = context.scene
+
+ self.layout.prop(scene.pov, "radio_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render
+
+ layout.active = scene.pov.radio_enable
+
+ split = layout.split()
+
+ col = split.column()
+ col.prop(scene.pov, "radio_count", text="Rays")
+ col.prop(scene.pov, "radio_recursion_limit", text="Recursions")
+
+ split.prop(scene.pov, "radio_error_bound", text="Error Bound")
+
+ layout.prop(scene.pov, "radio_display_advanced")
+
+ if scene.pov.radio_display_advanced:
+ split = layout.split()
+
+ col = split.column()
+ col.prop(scene.pov, "radio_adc_bailout", slider=True)
+ col.prop(scene.pov, "radio_gray_threshold", slider=True)
+ col.prop(scene.pov, "radio_low_error_factor", slider=True)
+ col.prop(scene.pov, "radio_pretrace_start", slider=True)
+
+ col = split.column()
+ col.prop(scene.pov, "radio_brightness")
+ col.prop(scene.pov, "radio_minimum_reuse", text="Min Reuse")
+ col.prop(scene.pov, "radio_nearest_count")
+ col.prop(scene.pov, "radio_pretrace_end", slider=True)
+
+ split = layout.split()
+
+ col = split.column()
+ col.label(text="Estimation Influence:")
+ col.prop(scene.pov, "radio_media")
+ col.prop(scene.pov, "radio_normal")
+
+ split.prop(scene.pov, "radio_always_sample")
+
+
+class RENDER_PT_povray_media(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Atmosphere Media"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ scene = context.scene
+
+ self.layout.prop(scene.pov, "media_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render
+
+ layout.active = scene.pov.media_enable
+
+ row = layout.row()
+ row.prop(scene.pov, "media_samples", text="Samples")
+ row.prop(scene.pov, "media_color", text="")
+
+##class RENDER_PT_povray_baking(RenderButtonsPanel, bpy.types.Panel):
+## bl_label = "Baking"
+## COMPAT_ENGINES = {'POVRAY_RENDER'}
+##
+## def draw_header(self, context):
+## scene = context.scene
+##
+## self.layout.prop(scene.pov, "baking_enable", text="")
+##
+## def draw(self, context):
+## layout = self.layout
+##
+## scene = context.scene
+## rd = scene.render
+##
+## layout.active = scene.pov.baking_enable
+
+
+class MATERIAL_PT_povray_mirrorIOR(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "IOR Mirror"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ scene = context.material
+
+ self.layout.prop(scene.pov, "mirror_use_IOR", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ mat = context.material
+ layout.active = mat.pov.mirror_use_IOR
+
+ if mat.pov.mirror_use_IOR:
+ split = layout.split()
+ col = split.column()
+ row = col.row()
+ row.alignment = 'CENTER'
+ row.label(text="The current Raytrace ")
+ row = col.row()
+ row.alignment = 'CENTER'
+ row.label(text="Transparency IOR is: " + str(mat.raytrace_transparency.ior))
+
+
+class MATERIAL_PT_povray_metallic(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "metallic Mirror"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ scene = context.material
+
+ self.layout.prop(scene.pov, "mirror_metallic", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ mat = context.material
+ layout.active = mat.pov.mirror_metallic
+
+
+class MATERIAL_PT_povray_fade_color(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "Interior Fade Color"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ mat = context.material
+
+ self.layout.prop(mat.pov, "interior_fade_color", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ mat = context.material
+ #layout.active = mat.pov.interior_fade_color
+
+
+class MATERIAL_PT_povray_conserve_energy(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "conserve energy"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ mat = context.material
+
+ self.layout.prop(mat.pov, "conserve_energy", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ mat = context.material
+ layout.active = mat.pov.conserve_energy
+
+
+class MATERIAL_PT_povray_iridescence(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "iridescence"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ mat = context.material
+
+ self.layout.prop(mat.pov, "irid_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ mat = context.material
+ layout.active = mat.pov.irid_enable
+
+ if mat.pov.irid_enable:
+ split = layout.split()
+
+ col = split.column()
+ col.prop(mat.pov, "irid_amount", slider=True)
+ col.prop(mat.pov, "irid_thickness", slider=True)
+ col.prop(mat.pov, "irid_turbulence", slider=True)
+
+
+class MATERIAL_PT_povray_caustics(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "Caustics"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ mat = context.material
+
+ self.layout.prop(mat.pov, "caustics_enable", text="")
+
+ def draw(self, context):
+
+ layout = self.layout
+
+ mat = context.material
+ layout.active = mat.pov.caustics_enable
+
+ if mat.pov.caustics_enable:
+ split = layout.split()
+
+ col = split.column()
+ col.prop(mat.pov, "refraction_type")
+
+ if mat.pov.refraction_type == "1":
+ col.prop(mat.pov, "fake_caustics_power", slider=True)
+ elif mat.pov.refraction_type == "2":
+ col.prop(mat.pov, "photons_dispersion", slider=True)
+ col.prop(mat.pov, "photons_dispersion_samples", slider=True)
+ col.prop(mat.pov, "photons_reflection")
+
+ if mat.pov.refraction_type == "0" and not mat.pov.photons_reflection:
+ split = layout.split()
+ col = split.column()
+ row = col.row()
+ row.alignment = 'CENTER'
+ row.label(text="Caustics override is on, ")
+ row = col.row()
+ row.alignment = 'CENTER'
+ row.label(text="but you didn't chose any !")
+
+
+class MATERIAL_PT_povray_replacement_text(MaterialButtonsPanel, bpy.types.Panel):
+ bl_label = "Custom POV Code"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ mat = context.material
+
+ col = layout.column()
+ col.label(text="Replace properties with:")
+ col.prop(mat.pov, "replacement_text", text="")
+
+
+class TEXTURE_PT_povray_tex_gamma(TextureButtonsPanel, bpy.types.Panel):
+ bl_label = "Image Gamma"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ tex = context.texture
+
+ self.layout.prop(tex.pov, "tex_gamma_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ tex = context.texture
+
+ layout.active = tex.pov.tex_gamma_enable
+ layout.prop(tex.pov, "tex_gamma_value", text="Gamma Value")
+
+
+class TEXTURE_PT_povray_replacement_text(TextureButtonsPanel, bpy.types.Panel):
+ bl_label = "Custom POV Code"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ tex = context.texture
+
+ col = layout.column()
+ col.label(text="Replace properties with:")
+ col.prop(tex.pov, "replacement_text", text="")
+
+
+class OBJECT_PT_povray_obj_importance(ObjectButtonsPanel, bpy.types.Panel):
+ bl_label = "POV-Ray"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ obj = context.object
+
+ col = layout.column()
+ col.label(text="Radiosity:")
+ col.prop(obj.pov, "importance_value", text="Importance")
+ col.label(text="Photons:")
+ col.prop(obj.pov, "collect_photons", text="Receive Photon Caustics")
+ if obj.pov.collect_photons:
+ col.prop(obj.pov, "spacing_multiplier", text="Photons Spacing Multiplier")
+
+
+class OBJECT_PT_povray_replacement_text(ObjectButtonsPanel, bpy.types.Panel):
+ bl_label = "Custom POV Code"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ obj = context.object
+
+ col = layout.column()
+ col.label(text="Replace properties with:")
+ col.prop(obj.pov, "replacement_text", text="")
+
+
+class CAMERA_PT_povray_cam_dof(CameraDataButtonsPanel, bpy.types.Panel):
+ bl_label = "POV-Ray Depth Of Field"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw_header(self, context):
+ cam = context.camera
+
+ self.layout.prop(cam.pov, "dof_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ cam = context.camera
+
+ layout.active = cam.pov.dof_enable
+
+ layout.prop(cam.pov, "dof_aperture")
+
+ split = layout.split()
+
+ col = split.column()
+ col.prop(cam.pov, "dof_samples_min")
+ col.prop(cam.pov, "dof_variance")
+
+ col = split.column()
+ col.prop(cam.pov, "dof_samples_max")
+ col.prop(cam.pov, "dof_confidence")
+
+
+class CAMERA_PT_povray_replacement_text(CameraDataButtonsPanel, bpy.types.Panel):
+ bl_label = "Custom POV Code"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ cam = context.camera
+
+ col = layout.column()
+ col.label(text="Replace properties with:")
+ col.prop(cam.pov, "replacement_text", text="")
+
+
+class TEXT_PT_povray_custom_code(TextButtonsPanel, bpy.types.Panel):
+ bl_label = "P.O.V-Ray"
+ COMPAT_ENGINES = {'POVRAY_RENDER'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ text = context.space_data.text
+ if text:
+ layout.prop(text.pov, "custom_code", text="Add as POV code")
diff --git a/render_povray/update_files.py b/render_povray/update_files.py
new file mode 100644
index 00000000..2d8cf6fb
--- /dev/null
+++ b/render_povray/update_files.py
@@ -0,0 +1,595 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+
+import bpy
+from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, \
+ FloatVectorProperty, EnumProperty
+
+
+def update2_0_0_9():
+ # Temporally register old props, so we can access their values.
+ register()
+
+ # Mapping old names → old default values
+ # XXX We could also store the new name, but as it is just the same without leading pov_ …
+ # Get default values of pov scene props.
+ old_sce_props = {}
+ for k in ["pov_tempfiles_enable", "pov_deletefiles_enable", "pov_scene_name", "pov_scene_path",
+ "pov_renderimage_path", "pov_list_lf_enable", "pov_radio_enable",
+ "pov_radio_display_advanced", "pov_media_enable", "pov_media_samples", "pov_media_color",
+ "pov_baking_enable", "pov_indentation_character", "pov_indentation_spaces",
+ "pov_comments_enable", "pov_command_line_switches", "pov_antialias_enable",
+ "pov_antialias_method", "pov_antialias_depth", "pov_antialias_threshold",
+ "pov_jitter_enable", "pov_jitter_amount", "pov_antialias_gamma", "pov_max_trace_level",
+ "pov_photon_spacing", "pov_photon_max_trace_level", "pov_photon_adc_bailout",
+ "pov_photon_gather_min", "pov_photon_gather_max", "pov_radio_adc_bailout",
+ "pov_radio_always_sample", "pov_radio_brightness", "pov_radio_count",
+ "pov_radio_error_bound", "pov_radio_gray_threshold", "pov_radio_low_error_factor",
+ "pov_radio_media", "pov_radio_minimum_reuse", "pov_radio_nearest_count",
+ "pov_radio_normal", "pov_radio_recursion_limit", "pov_radio_pretrace_start",
+ "pov_radio_pretrace_end"]:
+ old_sce_props[k] = getattr(bpy.types.Scene, k)[1].get('default', None)
+
+ # Get default values of pov material props.
+ old_mat_props = {}
+ for k in ["pov_irid_enable", "pov_mirror_use_IOR", "pov_mirror_metallic", "pov_conserve_energy",
+ "pov_irid_amount", "pov_irid_thickness", "pov_irid_turbulence", "pov_interior_fade_color",
+ "pov_caustics_enable", "pov_fake_caustics", "pov_fake_caustics_power",
+ "pov_photons_refraction", "pov_photons_dispersion", "pov_photons_reflection",
+ "pov_refraction_type", "pov_replacement_text"]:
+ old_mat_props[k] = getattr(bpy.types.Material, k)[1].get('default', None)
+
+ # Get default values of pov texture props.
+ old_tex_props = {}
+ for k in ["pov_tex_gamma_enable", "pov_tex_gamma_value", "pov_replacement_text"]:
+ old_tex_props[k] = getattr(bpy.types.Texture, k)[1].get('default', None)
+
+ # Get default values of pov object props.
+ old_obj_props = {}
+ for k in ["pov_importance_value", "pov_collect_photons", "pov_replacement_text"]:
+ old_obj_props[k] = getattr(bpy.types.Object, k)[1].get('default', None)
+
+ # Get default values of pov camera props.
+ old_cam_props = {}
+ for k in ["pov_dof_enable", "pov_dof_aperture", "pov_dof_samples_min", "pov_dof_samples_max",
+ "pov_dof_variance", "pov_dof_confidence", "pov_replacement_text"]:
+ old_cam_props[k] = getattr(bpy.types.Camera, k)[1].get('default', None)
+
+ # Get default values of pov text props.
+ old_txt_props = {}
+ for k in ["pov_custom_code"]:
+ old_txt_props[k] = getattr(bpy.types.Text, k)[1].get('default', None)
+
+ ################################################################################################
+ # Now, update !
+ # For each old pov property of each scene, if its value is not equal to the default one,
+ # copy it to relevant new prop…
+ for sce in bpy.data.scenes:
+ for k, d in old_sce_props.items():
+ val = getattr(sce, k, d)
+ if val != d:
+ setattr(sce.pov, k[4:], val)
+ # The same goes for materials, textures, etc.
+ for mat in bpy.data.materials:
+ for k, d in old_mat_props.items():
+ val = getattr(mat, k, d)
+ if val != d:
+ setattr(mat.pov, k[4:], val)
+ for tex in bpy.data.textures:
+ for k, d in old_tex_props.items():
+ val = getattr(tex, k, d)
+ if val != d:
+ setattr(tex.pov, k[4:], val)
+ for obj in bpy.data.objects:
+ for k, d in old_obj_props.items():
+ val = getattr(obj, k, d)
+ if val != d:
+ setattr(obj.pov, k[4:], val)
+ for cam in bpy.data.cameras:
+ for k, d in old_cam_props.items():
+ val = getattr(cam, k, d)
+ if val != d:
+ setattr(cam.pov, k[4:], val)
+ for txt in bpy.data.texts:
+ for k, d in old_txt_props.items():
+ val = getattr(txt, k, d)
+ if val != d:
+ setattr(txt.pov, k[4:], val)
+ # Finally, unregister old props !
+ unregister()
+
+
+class RenderCopySettings(bpy.types.Operator):
+ '''
+ Update old POV properties to new ones.
+ '''
+ bl_idname = "scene.pov_update_properties"
+ bl_label = "PovRay render: Update to script v0.0.9"
+ bl_option = {'REGISTER'}
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+ def execute(self, context):
+ update2_0_0_9()
+ return {'FINISHED'}
+
+
+def register():
+ Scene = bpy.types.Scene
+ Mat = bpy.types.Material
+ Tex = bpy.types.Texture
+ Obj = bpy.types.Object
+ Cam = bpy.types.Camera
+ Text = bpy.types.Text
+ ###########################SCENE##################################
+
+ # File Options
+ Scene.pov_tempfiles_enable = BoolProperty(
+ name="Enable Tempfiles",
+ description="Enable the OS-Tempfiles. Otherwise set the path where to save the files.",
+ default=True)
+ Scene.pov_deletefiles_enable = BoolProperty(
+ name="Delete files",
+ description="Delete files after rendering. Doesn't work with the image.",
+ default=True)
+ Scene.pov_scene_name = StringProperty(
+ name="Scene Name",
+ description="Name of POV-Ray scene to create. Empty name will use the name of the blend file.",
+ default="", maxlen=1024)
+ Scene.pov_scene_path = StringProperty(
+ name="Export scene path",
+ # description="Path to directory where the exported scene (POV and INI) is created", # Bug in POV-Ray RC3
+ description="Path to directory where the files are created",
+ default="", maxlen=1024, subtype="DIR_PATH")
+ Scene.pov_renderimage_path = StringProperty(
+ name="Rendered image path",
+ description="Full path to directory where the rendered image is saved.",
+ default="", maxlen=1024, subtype="DIR_PATH")
+ Scene.pov_list_lf_enable = BoolProperty(
+ name="LF in lists",
+ description="Enable line breaks in lists (vectors and indices). Disabled: lists are exported in one line.",
+ default=True)
+
+ # Not a real pov option, just to know if we should write
+ Scene.pov_radio_enable = BoolProperty(
+ name="Enable Radiosity",
+ description="Enable POV-Rays radiosity calculation",
+ default=False)
+ Scene.pov_radio_display_advanced = BoolProperty(
+ name="Advanced Options",
+ description="Show advanced options",
+ default=False)
+ Scene.pov_media_enable = BoolProperty(
+ name="Enable Media",
+ description="Enable POV-Rays atmospheric media",
+ default=False)
+ Scene.pov_media_samples = IntProperty(
+ name="Samples", description="Number of samples taken from camera to first object encountered along ray path for media calculation",
+ min=1, max=100, default=35)
+
+ Scene.pov_media_color = FloatVectorProperty(
+ name="Media Color",
+ description="The atmospheric media color.",
+ subtype='COLOR',
+ precision=4,
+ step=0.01,
+ min=0,
+ soft_max=1,
+ default=(0.001, 0.001, 0.001),
+ options={'ANIMATABLE'})
+
+ Scene.pov_baking_enable = BoolProperty(
+ name="Enable Baking",
+ description="Enable POV-Rays texture baking",
+ default=False)
+ Scene.pov_indentation_character = EnumProperty(
+ name="Indentation",
+ description="Select the indentation type",
+ items=(("0", "None", "No indentation"),
+ ("1", "Tabs", "Indentation with tabs"),
+ ("2", "Spaces", "Indentation with spaces")),
+ default="2")
+ Scene.pov_indentation_spaces = IntProperty(
+ name="Quantity of spaces",
+ description="The number of spaces for indentation",
+ min=1, max=10, default=4)
+
+ Scene.pov_comments_enable = BoolProperty(
+ name="Enable Comments",
+ description="Add comments to pov file",
+ default=True)
+
+ # Real pov options
+ Scene.pov_command_line_switches = StringProperty(name="Command Line Switches",
+ description="Command line switches consist of a + (plus) or - (minus) sign, followed by one or more alphabetic characters and possibly a numeric value.",
+ default="", maxlen=500)
+
+ Scene.pov_antialias_enable = BoolProperty(
+ name="Anti-Alias", description="Enable Anti-Aliasing",
+ default=True)
+
+ Scene.pov_antialias_method = EnumProperty(
+ name="Method",
+ description="AA-sampling method. Type 1 is an adaptive, non-recursive, super-sampling method. Type 2 is an adaptive and recursive super-sampling method.",
+ items=(("0", "non-recursive AA", "Type 1 Sampling in POV-Ray"),
+ ("1", "recursive AA", "Type 2 Sampling in POV-Ray")),
+ default="1")
+
+ Scene.pov_antialias_depth = IntProperty(
+ name="Antialias Depth", description="Depth of pixel for sampling",
+ min=1, max=9, default=3)
+
+ Scene.pov_antialias_threshold = FloatProperty(
+ name="Antialias Threshold", description="Tolerance for sub-pixels",
+ min=0.0, max=1.0, soft_min=0.05, soft_max=0.5, default=0.1)
+
+ Scene.pov_jitter_enable = BoolProperty(
+ name="Jitter", description="Enable Jittering. Adds noise into the sampling process (it should be avoided to use jitter in animation).",
+ default=True)
+
+ Scene.pov_jitter_amount = FloatProperty(
+ name="Jitter Amount", description="Amount of jittering.",
+ min=0.0, max=1.0, soft_min=0.01, soft_max=1.0, default=1.0)
+
+ Scene.pov_antialias_gamma = FloatProperty(
+ name="Antialias Gamma", description="POV-Ray compares gamma-adjusted values for super sampling. Antialias Gamma sets the Gamma before comparison.",
+ min=0.0, max=5.0, soft_min=0.01, soft_max=2.5, default=2.5)
+
+ Scene.pov_max_trace_level = IntProperty(
+ name="Max Trace Level", description="Number of reflections/refractions allowed on ray path",
+ min=1, max=256, default=5)
+
+ Scene.pov_photon_spacing = FloatProperty(
+ name="Spacing", description="Average distance between photons on surfaces. half this get four times as many surface photons",
+ min=0.001, max=1.000, soft_min=0.001, soft_max=1.000, default=0.005, precision=3)
+
+ Scene.pov_photon_max_trace_level = IntProperty(
+ name="Max Trace Level", description="Number of reflections/refractions allowed on ray path",
+ min=1, max=256, default=5)
+
+ Scene.pov_photon_adc_bailout = FloatProperty(
+ name="ADC Bailout", description="The adc_bailout for photons. Use adc_bailout = 0.01 / brightest_ambient_object for good results",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default=0.1, precision=3)
+
+ Scene.pov_photon_gather_min = IntProperty(
+ name="Gather Min", description="Minimum number of photons gathered for each point",
+ min=1, max=256, default=20)
+
+ Scene.pov_photon_gather_max = IntProperty(
+ name="Gather Max", description="Maximum number of photons gathered for each point",
+ min=1, max=256, default=100)
+
+ Scene.pov_radio_adc_bailout = FloatProperty(
+ name="ADC Bailout", description="The adc_bailout for radiosity rays. Use adc_bailout = 0.01 / brightest_ambient_object for good results",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default=0.01, precision=3)
+
+ Scene.pov_radio_always_sample = BoolProperty(
+ name="Always Sample", description="Only use the data from the pretrace step and not gather any new samples during the final radiosity pass",
+ default=True)
+
+ Scene.pov_radio_brightness = FloatProperty(
+ name="Brightness", description="Amount objects are brightened before being returned upwards to the rest of the system",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=10.0, default=1.0)
+
+ Scene.pov_radio_count = IntProperty(
+ name="Ray Count", description="Number of rays for each new radiosity value to be calculated (halton sequence over 1600)",
+ min=1, max=10000, soft_max=1600, default=35)
+
+ Scene.pov_radio_error_bound = FloatProperty(
+ name="Error Bound", description="One of the two main speed/quality tuning values, lower values are more accurate",
+ min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default=1.8)
+
+ Scene.pov_radio_gray_threshold = FloatProperty(
+ name="Gray Threshold", description="One of the two main speed/quality tuning values, lower values are more accurate",
+ min=0.0, max=1.0, soft_min=0, soft_max=1, default=0.0)
+
+ Scene.pov_radio_low_error_factor = FloatProperty(
+ name="Low Error Factor", description="Just enough samples is slightly blotchy. Low error changes error tolerance for less critical last refining pass",
+ min=0.0, max=1.0, soft_min=0.0, soft_max=1.0, default=0.5)
+
+ # max_sample - not available yet
+ Scene.pov_radio_media = BoolProperty(
+ name="Media", description="Radiosity estimation can be affected by media",
+ default=False)
+
+ Scene.pov_radio_minimum_reuse = FloatProperty(
+ name="Minimum Reuse", description="Fraction of the screen width which sets the minimum radius of reuse for each sample point (At values higher than 2% expect errors)",
+ min=0.0, max=1.0, soft_min=0.1, soft_max=0.1, default=0.015, precision=3)
+
+ Scene.pov_radio_nearest_count = IntProperty(
+ name="Nearest Count", description="Number of old ambient values blended together to create a new interpolated value",
+ min=1, max=20, default=5)
+
+ Scene.pov_radio_normal = BoolProperty(
+ name="Normals", description="Radiosity estimation can be affected by normals",
+ default=False)
+
+ Scene.pov_radio_recursion_limit = IntProperty(
+ name="Recursion Limit", description="how many recursion levels are used to calculate the diffuse inter-reflection",
+ min=1, max=20, default=3)
+
+ Scene.pov_radio_pretrace_start = FloatProperty(
+ name="Pretrace Start", description="Fraction of the screen width which sets the size of the blocks in the mosaic preview first pass",
+ min=0.01, max=1.00, soft_min=0.02, soft_max=1.0, default=0.08)
+
+ Scene.pov_radio_pretrace_end = FloatProperty(
+ name="Pretrace End", description="Fraction of the screen width which sets the size of the blocks in the mosaic preview last pass",
+ min=0.001, max=1.00, soft_min=0.01, soft_max=1.00, default=0.04, precision=3)
+
+ #############################MATERIAL######################################
+
+ Mat.pov_irid_enable = BoolProperty(
+ name="Enable Iridescence",
+ description="Newton's thin film interference (like an oil slick on a puddle of water or the rainbow hues of a soap bubble.)",
+ default=False)
+
+ Mat.pov_mirror_use_IOR = BoolProperty(
+ name="Correct Reflection",
+ description="Use same IOR as raytrace transparency to calculate mirror reflections. More physically correct",
+ default=False)
+
+ Mat.pov_mirror_metallic = BoolProperty(
+ name="Metallic Reflection",
+ description="mirror reflections get colored as diffuse (for metallic materials)",
+ default=False)
+
+ Mat.pov_conserve_energy = BoolProperty(
+ name="Conserve Energy",
+ description="Light transmitted is more correctly reduced by mirror reflections, also the sum of diffuse and translucency gets reduced below one ",
+ default=True)
+
+ Mat.pov_irid_amount = FloatProperty(
+ name="amount",
+ description="Contribution of the iridescence effect to the overall surface color. As a rule of thumb keep to around 0.25 (25% contribution) or less, but experiment. If the surface is coming out too white, try lowering the diffuse and possibly the ambient values of the surface.",
+ min=0.0, max=1.0, soft_min=0.01, soft_max=1.0, default=0.25)
+
+ Mat.pov_irid_thickness = FloatProperty(
+ name="thickness",
+ description="A very thin film will have a high frequency of color changes while a thick film will have large areas of color.",
+ min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default=1)
+
+ Mat.pov_irid_turbulence = FloatProperty(
+ name="turbulence",
+ description="This parameter varies the thickness.",
+ min=0.0, max=10.0, soft_min=0.000, soft_max=1.0, default=0)
+
+ Mat.pov_interior_fade_color = FloatVectorProperty(
+ name="Fade Color",
+ description="Color of filtered attenuation for transparent materials",
+ subtype='COLOR',
+ precision=4,
+ step=0.01,
+ min=0.0,
+ soft_max=1.0,
+ default=(0, 0, 0),
+ options={'ANIMATABLE'})
+
+ Mat.pov_caustics_enable = BoolProperty(
+ name="Caustics",
+ description="use only fake refractive caustics (default) or photon based reflective/refractive caustics",
+ default=True)
+
+ Mat.pov_fake_caustics = BoolProperty(
+ name="Fake Caustics",
+ description="use only (Fast) fake refractive caustics",
+ default=True)
+
+ Mat.pov_fake_caustics_power = FloatProperty(
+ name="Fake caustics power",
+ description="Values typically range from 0.0 to 1.0 or higher. Zero is no caustics. Low, non-zero values give broad hot-spots while higher values give tighter, smaller simulated focal points",
+ min=0.00, max=10.0, soft_min=0.00, soft_max=1.10, default=0.1)
+
+ Mat.pov_photons_refraction = BoolProperty(
+ name="Refractive Photon Caustics",
+ description="more physically correct",
+ default=False)
+
+ Mat.pov_photons_dispersion = FloatProperty(
+ name="chromatic dispersion",
+ description="Light passing through will be separated according to wavelength. This ratio of refractive indices for violet to red controls how much the colors are spread out 1 = no dispersion, good values are 1.01 to 1.1",
+ min=1.0000, max=10.000, soft_min=1.0000, soft_max=1.1000, precision=4, default=1.0000)
+
+ Mat.pov_photons_reflection = BoolProperty(
+ name="Reflective Photon Caustics",
+ description="Use this to make your Sauron's ring ;-P",
+ default=False)
+
+ Mat.pov_refraction_type = EnumProperty(
+ items=[("0", "None", "use only reflective caustics"),
+ ("1", "Fake Caustics", "use fake caustics"),
+ ("2", "Photons Caustics", "use photons for refractive caustics"),
+ ],
+ name="Refractive",
+ description="use fake caustics (fast) or true photons for refractive Caustics",
+ default="1")
+ ##################################CustomPOV Code############################
+ Mat.pov_replacement_text = StringProperty(
+ name="Declared name:",
+ description="Type the declared name in custom POV code or an external .inc it points at. texture {} expected",
+ default="")
+
+ #Only DUMMIES below for now:
+ Tex.pov_replacement_text = StringProperty(
+ name="Declared name:",
+ description="Type the declared name in custom POV code or an external .inc it points at. pigment {} expected",
+ default="")
+
+ Obj.pov_replacement_text = StringProperty(
+ name="Declared name:",
+ description="Type the declared name in custom POV code or an external .inc it points at. Any POV shape expected e.g: isosurface {}",
+ default="")
+
+ Cam.pov_replacement_text = StringProperty(
+ name="Texts in blend file",
+ description="Type the declared name in custom POV code or an external .inc it points at. camera {} expected",
+ default="")
+ ##############################TEXTURE######################################
+
+ #Custom texture gamma
+ Tex.pov_tex_gamma_enable = BoolProperty(
+ name="Enable custom texture gamma",
+ description="Notify some custom gamma for which texture has been precorrected without the file format carrying it and only if it differs from your OS expected standard (see pov doc)",
+ default=False)
+
+ Tex.pov_tex_gamma_value = FloatProperty(
+ name="Custom texture gamma",
+ description="value for which the file was issued e.g. a Raw photo is gamma 1.0",
+ min=0.45, max=5.00, soft_min=1.00, soft_max=2.50, default=1.00)
+
+ #################################OBJECT####################################
+
+ #Importance sampling
+ Obj.pov_importance_value = FloatProperty(
+ name="Radiosity Importance",
+ description="Priority value relative to other objects for sampling radiosity rays. Increase to get more radiosity rays at comparatively small yet bright objects",
+ min=0.01, max=1.00, default=1.00)
+
+ #Collect photons
+ Obj.pov_collect_photons = BoolProperty(
+ name="Receive Photon Caustics",
+ description="Enable object to collect photons from other objects caustics. Turn off for objects that don't really need to receive caustics (e.g. objects that generate caustics often don't need to show any on themselves) ",
+ default=True)
+
+ ##################################CAMERA###################################
+
+ #DOF Toggle
+ Cam.pov_dof_enable = BoolProperty(
+ name="Depth Of Field",
+ description="Enable POV-Ray Depth Of Field ",
+ default=True)
+
+ #Aperture (Intensity of the Blur)
+ Cam.pov_dof_aperture = FloatProperty(
+ name="Aperture",
+ description="Similar to a real camera's aperture effect over focal blur (though not in physical units and independant of focal length).Increase to get more blur",
+ min=0.01, max=1.00, default=0.25)
+
+ #Aperture adaptive sampling
+ Cam.pov_dof_samples_min = IntProperty(
+ name="Samples Min",
+ description="Minimum number of rays to use for each pixel",
+ min=1, max=128, default=96)
+
+ Cam.pov_dof_samples_max = IntProperty(
+ name="Samples Max",
+ description="Maximum number of rays to use for each pixel",
+ min=1, max=128, default=128)
+
+ Cam.pov_dof_variance = IntProperty(
+ name="Variance",
+ description="Minimum threshold (fractional value) for adaptive DOF sampling (up increases quality and render time). The value for the variance should be in the range of the smallest displayable color difference",
+ min=1, max=100000, soft_max=10000, default=256)
+
+ Cam.pov_dof_confidence = FloatProperty(
+ name="Confidence",
+ description="Probability to reach the real color value. Larger confidence values will lead to more samples, slower traces and better images.",
+ min=0.01, max=0.99, default=0.90)
+
+ ###################################TEXT####################################
+
+ Text.pov_custom_code = BoolProperty(
+ name="Custom Code",
+ description="Add this text at the top of the exported POV-Ray file",
+ default=False)
+
+
+def unregister():
+ Scene = bpy.types.Scene
+ Mat = bpy.types.Material
+ Tex = bpy.types.Texture
+ Obj = bpy.types.Object
+ Cam = bpy.types.Camera
+ Text = bpy.types.Text
+ del Scene.pov_tempfiles_enable # CR
+ del Scene.pov_scene_name # CR
+ del Scene.pov_deletefiles_enable # CR
+ del Scene.pov_scene_path # CR
+ del Scene.pov_renderimage_path # CR
+ del Scene.pov_list_lf_enable # CR
+ del Scene.pov_radio_enable
+ del Scene.pov_radio_display_advanced
+ del Scene.pov_radio_adc_bailout
+ del Scene.pov_radio_always_sample
+ del Scene.pov_radio_brightness
+ del Scene.pov_radio_count
+ del Scene.pov_radio_error_bound
+ del Scene.pov_radio_gray_threshold
+ del Scene.pov_radio_low_error_factor
+ del Scene.pov_radio_media
+ del Scene.pov_radio_minimum_reuse
+ del Scene.pov_radio_nearest_count
+ del Scene.pov_radio_normal
+ del Scene.pov_radio_recursion_limit
+ del Scene.pov_radio_pretrace_start # MR
+ del Scene.pov_radio_pretrace_end # MR
+ del Scene.pov_media_enable # MR
+ del Scene.pov_media_samples # MR
+ del Scene.pov_media_color # MR
+ del Scene.pov_baking_enable # MR
+ del Scene.pov_max_trace_level # MR
+ del Scene.pov_photon_spacing # MR
+ del Scene.pov_photon_max_trace_level # MR
+ del Scene.pov_photon_adc_bailout # MR
+ del Scene.pov_photon_gather_min # MR
+ del Scene.pov_photon_gather_max # MR
+ del Scene.pov_antialias_enable # CR
+ del Scene.pov_antialias_method # CR
+ del Scene.pov_antialias_depth # CR
+ del Scene.pov_antialias_threshold # CR
+ del Scene.pov_antialias_gamma # CR
+ del Scene.pov_jitter_enable # CR
+ del Scene.pov_jitter_amount # CR
+ del Scene.pov_command_line_switches # CR
+ del Scene.pov_indentation_character # CR
+ del Scene.pov_indentation_spaces # CR
+ del Scene.pov_comments_enable # CR
+ del Mat.pov_irid_enable # MR
+ del Mat.pov_mirror_use_IOR # MR
+ del Mat.pov_mirror_metallic # MR
+ del Mat.pov_conserve_energy # MR
+ del Mat.pov_irid_amount # MR
+ del Mat.pov_irid_thickness # MR
+ del Mat.pov_irid_turbulence # MR
+ del Mat.pov_interior_fade_color # MR
+ del Mat.pov_caustics_enable # MR
+ del Mat.pov_fake_caustics # MR
+ del Mat.pov_fake_caustics_power # MR
+ del Mat.pov_photons_refraction # MR
+ del Mat.pov_photons_dispersion # MR
+ del Mat.pov_photons_reflection # MR
+ del Mat.pov_refraction_type # MR
+ del Mat.pov_replacement_text # MR
+ del Tex.pov_tex_gamma_enable # MR
+ del Tex.pov_tex_gamma_value # MR
+ del Tex.pov_replacement_text # MR
+ del Obj.pov_importance_value # MR
+ del Obj.pov_collect_photons # MR
+ del Obj.pov_replacement_text # MR
+ del Cam.pov_dof_enable # MR
+ del Cam.pov_dof_aperture # MR
+ del Cam.pov_dof_samples_min # MR
+ del Cam.pov_dof_samples_max # MR
+ del Cam.pov_dof_variance # MR
+ del Cam.pov_dof_confidence # MR
+ del Cam.pov_replacement_text # MR
+ del Text.pov_custom_code # MR
diff --git a/render_renderfarmfi.py b/render_renderfarmfi.py
new file mode 100644
index 00000000..8ed14c10
--- /dev/null
+++ b/render_renderfarmfi.py
@@ -0,0 +1,999 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Renderfarm.fi",
+ "author": "Nathan Letwory <nathan@letworyinteractive.com>, Jesse Kaukonen <jesse.kaukonen@gmail.com>",
+ "version": (8,),
+ "blender": (2, 5, 7),
+ "api": 36487,
+ "location": "Render > Engine > Renderfarm.fi",
+ "description": "Send .blend as session to http://www.renderfarm.fi to render",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Render/Renderfarm.fi",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22927",
+ "category": "Render"}
+
+"""
+Copyright 2009-2011 Laurea University of Applied Sciences
+Authors: Nathan Letwory, Jesse Kaukonen
+"""
+
+import bpy
+import hashlib
+import http.client
+import xmlrpc.client
+import math
+from os.path import abspath, isabs, join, isfile
+
+from bpy.props import PointerProperty, StringProperty, BoolProperty, EnumProperty, IntProperty, CollectionProperty
+
+bpy.CURRENT_VERSION = bl_info["version"][0]
+bpy.found_newer_version = False
+bpy.up_to_date = False
+bpy.download_location = 'http://www.renderfarm.fi/blender'
+
+bpy.errorMessages = {
+ 'missing_desc': 'You need to enter a title, short and long description',
+ 'missing_creds': 'You haven\'t entered your credentials yet'
+}
+
+bpy.statusMessage = {
+ 'title': 'TRIA_RIGHT',
+ 'shortdesc': 'TRIA_RIGHT',
+ 'longdesc': 'TRIA_RIGHT',
+ 'username': 'TRIA_RIGHT',
+ 'password': 'TRIA_RIGHT'
+}
+
+bpy.errors = []
+bpy.ore_sessions = []
+bpy.queue_selected = -1
+
+def renderEngine(render_engine):
+ bpy.utils.register_class(render_engine)
+ return render_engine
+
+licenses = (
+ ('1', 'CC by-nc-nd', 'Creative Commons: Attribution Non-Commercial No Derivatives'),
+ ('2', 'CC by-nc-sa', 'Creative Commons: Attribution Non-Commercial Share Alike'),
+ ('3', 'CC by-nd', 'Creative Commons: Attribution No Derivatives'),
+ ('4', 'CC by-nc', 'Creative Commons: Attribution Non-Commercial'),
+ ('5', 'CC by-sa', 'Creative Commons: Attribution Share Alike'),
+ ('6', 'CC by', 'Creative Commons: Attribution'),
+ ('7', 'Copyright', 'Copyright, no license specified'),
+ )
+
+class ORESession(bpy.types.PropertyGroup):
+ name = StringProperty(name='Name', description='Name of the session', maxlen=128, default='[session]')
+
+class ORESettings(bpy.types.PropertyGroup):
+ username = StringProperty(name='E-mail', description='E-mail for Renderfarm.fi', maxlen=256, default='')
+ password = StringProperty(name='Password', description='Renderfarm.fi password', maxlen=256, default='')
+ hash = StringProperty(name='Hash', description='hash calculated out of credentials', maxlen=33, default='')
+
+ shortdesc = StringProperty(name='Short description', description='A short description of the scene (100 characters)', maxlen=101, default='')
+ longdesc = StringProperty(name='Long description', description='A more elaborate description of the scene (2k)', maxlen=2048, default='')
+ title = StringProperty(name='Title', description='Title for this session (128 characters)', maxlen=128, default='')
+ url = StringProperty(name='Project URL', description='Project URL. Leave empty if not applicable', maxlen=256, default='')
+
+ parts = IntProperty(name='Parts/Frame', description='', min=1, max=1000, soft_min=1, soft_max=64, default=1)
+ resox = IntProperty(name='Resolution X', description='X of render', min=1, max=10000, soft_min=1, soft_max=10000, default=1920)
+ resoy = IntProperty(name='Resolution Y', description='Y of render', min=1, max=10000, soft_min=1, soft_max=10000, default=1080)
+ memusage = IntProperty(name='Memory Usage', description='Estimated maximum memory usage during rendering in MB', min=1, max=6*1024, soft_min=1, soft_max=3*1024, default=256)
+ start = IntProperty(name='Start Frame', description='Start Frame', default=1)
+ end = IntProperty(name='End Frame', description='End Frame', default=250)
+ fps = IntProperty(name='FPS', description='FPS', min=1, max=256, default=25)
+
+ prepared = BoolProperty(name='Prepared', description='Set to True if preparation has been run', default=False)
+ debug = BoolProperty(name='Debug', description='Verbose output in console', default=False)
+ selected_session = IntProperty(name='Selected Session', description='The selected session', default=0)
+ hasUnsupportedSimulation = BoolProperty(name='HasSimulation', description='Set to True if therea re unsupported simulations', default=False)
+
+ inlicense = EnumProperty(items=licenses, name='source license', description='license speficied for the source files', default='1')
+ outlicense = EnumProperty(items=licenses, name='output license', description='license speficied for the output files', default='1')
+ sessions = CollectionProperty(type=ORESession, name='Sessions', description='Sessions on Renderfarm.fi')
+
+# session struct
+
+# all panels, except render panel
+# Example of wrapping every class 'as is'
+from bl_ui import properties_scene
+for member in dir(properties_scene):
+ subclass = getattr(properties_scene, member)
+ try: subclass.COMPAT_ENGINES.add('RENDERFARMFI_RENDER')
+ except: pass
+del properties_scene
+
+from bl_ui import properties_world
+for member in dir(properties_world):
+ subclass = getattr(properties_world, member)
+ try: subclass.COMPAT_ENGINES.add('RENDERFARMFI_RENDER')
+ except: pass
+del properties_world
+
+from bl_ui import properties_material
+for member in dir(properties_material):
+ subclass = getattr(properties_material, member)
+ try: subclass.COMPAT_ENGINES.add('RENDERFARMFI_RENDER')
+ except: pass
+del properties_material
+
+from bl_ui import properties_object
+for member in dir(properties_object):
+ subclass = getattr(properties_object, member)
+ try: subclass.COMPAT_ENGINES.add('RENDERFARMFI_RENDER')
+ except: pass
+del properties_object
+
+class RenderButtonsPanel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "render"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+class SUMMARY_PT_RenderfarmFi(RenderButtonsPanel, bpy.types.Panel):
+ # Prints a summary to the panel before uploading. If scene settings differ from ore settings, then display a warning icon
+ bl_label = 'Summary'
+ bl_options = {'DEFAULT_CLOSED'}
+ COMPAT_ENGINES = set(['RENDERFARMFI_RENDER'])
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ sce = context.scene
+ rd = context.scene.render
+ ore = context.scene.ore_render
+ layout = self.layout
+ problems = False
+
+ # Check if correct resolution is set
+ if rd.resolution_x != ore.resox:
+ layout.label(text='Resolution X: ' + str(ore.resox), icon='ERROR')
+ problems = True
+ else:
+ layout.label(text='Resolution X: ' + str(ore.resox), icon='FILE_TICK')
+ if rd.resolution_y != ore.resoy:
+ layout.label(text='Resolution Y: ' + str(ore.resoy), icon='ERROR')
+ problems = True
+ else:
+ layout.label(text='Resolution Y: ' + str(ore.resoy), icon='FILE_TICK')
+
+ # Check if correct number of frames is specified
+ if (sce.frame_start != ore.start) and not ( (sce.frame_start == ore.start and sce.frame_end == ore.end) and sce.frame_start == sce.frame_end):
+ layout.label(text='Start frame: ' + str(ore.start), icon='ERROR')
+ layout.label(text='.blend Start frame is different to the one specified in the uploader script. Please verify!')
+ problems = True
+ else:
+ layout.label(text='Start frame: ' + str(ore.start), icon='FILE_TICK')
+ if (sce.frame_end != ore.end) and not ( (sce.frame_start == ore.start and sce.frame_end == ore.end) and sce.frame_start == sce.frame_end):
+ layout.label(text='End frame: ' + str(ore.end), icon='ERROR')
+ layout.label(text='.blend End frame is different to the one specified in the uploader script. Please verify!')
+ problems = True
+ else:
+ layout.label(text='End frame: ' + str(ore.end), icon='FILE_TICK')
+
+ # Check if more than 1 frame is specified
+ if (sce.frame_start == ore.start and sce.frame_end == ore.end) and (sce.frame_start == sce.frame_end):
+ layout.label(text='Only one frame specified to be rendered!')
+ layout.label(text='This is highly ineffective when using distributed rendering')
+ problems = True
+
+ if rd.resolution_percentage != 100:
+ layout.label(text='Resolution percentage: ' + str(rd.resolution_percentage), icon='ERROR')
+ problems = True
+ else:
+ layout.label(text='Resolution percentage: ' + str(rd.resolution_percentage), icon='FILE_TICK')
+
+ if rd.file_format != 'PNG':
+ layout.label(text='Output format: ' + rd.file_format, icon='ERROR')
+ layout.label(text='Output format must be set to PNG')
+ problems = True
+ else:
+ layout.label(text='Output format: ' + rd.file_format, icon='FILE_TICK')
+
+ if ore.parts > 1 and rd.use_sss == True:
+ layout.label(text='Subsurface Scattering: ' + str(rd.use_sss), icon='ERROR')
+ layout.label(text='If you want to use SSS, parts must be set to 1')
+ problems = True
+ else:
+ layout.label(text='Subsurface Scattering: ' + str(rd.use_sss), icon='FILE_TICK')
+
+ if rd.use_compositing == False:
+ layout.label(text='Composite nodes: ' + str(rd.use_compositing), icon='ERROR')
+ layout.label(text='Composite nodes are disabled.')
+ layout.label(text='The script automatically disables them if: ')
+ layout.label(text='- Filter type nodes are used and parts are more than 1')
+ layout.label(text='- There is an output node')
+ problems = True
+ else:
+ layout.label(text='Composite nodes: ' + str(rd.use_compositing), icon='FILE_TICK')
+
+ if rd.use_save_buffers:
+ layout.label(text='Save buffers: ' + str(rd.use_save_buffers), icon='ERROR')
+ layout.label(text='Save buffers must be disabled')
+ layout.label(text='Can only disabled if Full Sample is turned off')
+ problems = True
+ else:
+ layout.label(text='Save buffers: ' + str(rd.use_save_buffers), icon='FILE_TICK')
+
+ if rd.use_border:
+ layout.label(text='Border render: ' + str(rd.use_border), icon='ERROR')
+ layout.label(text='Border render must be disabled')
+ else:
+ layout.label(text='Border render: ' + str(rd.use_border), icon='FILE_TICK')
+
+ if rd.threads_mode != 'FIXED' or rd.threads > 1:
+ layout.label(text='Threads: ' + rd.threads_mode + ' ' + str(rd.threads), icon='ERROR')
+ layout.label(text='Threads must be set to Fixed, 1')
+ problems = True
+ else:
+ layout.label(text='Threads: ' + rd.threads_mode + ' ' + str(rd.threads), icon='FILE_TICK')
+
+ if ore.hasUnsupportedSimulation == True:
+ layout.label(text='There is an unsupported simulation', icon='ERROR')
+ layout.label(text='Fluid/smoke/cloth/collision/softbody simulations are not supported')
+ problems = True
+ else:
+ layout.label(text='No unsupported simulations found', icon='FILE_TICK')
+
+ if ore.prepared == False:
+ layout.label(text='The script reports "not ready".', icon='ERROR')
+ layout.label(text='Please review the settings above')
+ layout.label(text='If everything is in order, click Check Scene again')
+ layout.label(text='The script automatically changes settings, so make sure they are correct')
+ else:
+ layout.label(text='The script reports "All settings ok!"', icon='FILE_TICK')
+ layout.label(text='Please render one frame using Blender Render first')
+ row = layout.row()
+ row.operator('ore.test_render')
+ layout.label(text='If you are sure that the render works, click Render on Renderfarm.fi')
+
+class RENDERFARM_MT_Session(bpy.types.Menu):
+ bl_label = "Show Session"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator('ore.completed_sessions')
+ layout.operator('ore.accept_sessions')
+ layout.operator('ore.active_sessions')
+ layout.separator()
+ layout.operator('ore.cancelled_sessions')
+
+class LOGIN_PT_RenderfarmFi(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = 'Login to Renderfarm.fi'
+ COMPAT_ENGINES = set(['RENDERFARMFI_RENDER'])
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ layout = self.layout
+ # XXX layout.operator('ore.check_update')
+ ore = context.scene.ore_render
+ checkStatus(ore)
+
+ if ore.hash=='':
+ col = layout.column()
+ if ore.hash=='':
+ col.prop(ore, 'username', icon=bpy.statusMessage['username'])
+ col.prop(ore, 'password', icon=bpy.statusMessage['password'])
+ layout.operator('ore.login')
+ else:
+ layout.label(text='E-mail and password entered.', icon='INFO')
+ layout.operator('ore.change_user')
+
+class CHECK_PT_RenderfarmFi(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = 'Check for updates'
+ COMPAT_ENGINES = set(['RENDERFARMFI_RENDER'])
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ layout = self.layout
+ ore = context.scene.ore_render
+
+ if bpy.found_newer_version == True:
+ layout.operator('ore.open_download_location')
+ else:
+ if bpy.up_to_date == True:
+ layout.label(text='You have the latest version')
+ layout.operator('ore.check_update')
+
+class SESSIONS_PT_RenderfarmFi(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = 'Sessions'
+ COMPAT_ENGINES = set(['RENDERFARMFI_RENDER'])
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ layout = self.layout
+ ore = context.scene.ore_render
+
+ layout.menu("RENDERFARM_MT_Session")
+ if bpy.queue_selected == 1:
+ layout.label(text='Completed Sessions')
+ elif bpy.queue_selected == 2:
+ layout.label(text='Rendering Sessions')
+ elif bpy.queue_selected == 3:
+ layout.label(text='Pending Sessions')
+ elif bpy.queue_selected == 4:
+ layout.label(text='Cancelled and Rejected Sessions')
+ layout.template_list(ore, 'sessions', ore, 'selected_session', rows=2)
+ if bpy.queue_selected == 3:
+ layout.operator('ore.cancel_session')
+
+class CONDITIONS_PT_RenderfarmFi(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "IMPORTANT: Rendering on Renderfarm.fi"
+ COMPAT_ENGINES = set(['RENDERFARMFI_RENDER'])
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.label(text='- The render must take more than 50 seconds / frame')
+ layout.label(text='- The animation must be at least 20 frames long')
+ layout.label(text='- No still renders')
+ layout.label(text='- All external data must be included:')
+ layout.label(text=' * Linked files: L in object mode')
+ layout.label(text=' * Textures: File menu -> External Data')
+ layout.label(text='- No Python scripts')
+ layout.label(text='- Memory usage max 3GB')
+ layout.label(text='- If your render takes more than an hour / frame:')
+ layout.label(text=' * No filter type composite nodes (blur, glare etc.)')
+ layout.label(text=' * No SSS')
+ layout.label(text=' * No Motion Blur')
+
+class RENDER_PT_RenderfarmFi(RenderButtonsPanel, bpy.types.Panel):
+ bl_label = "Scene Settings"
+ COMPAT_ENGINES = set(['RENDERFARMFI_RENDER'])
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ return (rd.use_game_engine==False) and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ layout = self.layout
+ sce = context.scene
+ ore = sce.ore_render
+
+ if ore.prepared and ore.hash!='':
+ layout.prop(ore, 'memusage')
+ layout.separator()
+ row = layout.row()
+
+ layout.separator()
+ row = layout.row()
+ row.prop(ore, 'inlicense')
+ row.prop(ore, 'outlicense')
+
+ layout.separator()
+ row = layout.row()
+ row.operator('ore.upload')
+ layout.label(text='Blender may seem frozen during the upload!')
+ row.operator('ore.reset', icon='FILE_REFRESH')
+ else:
+ layout.prop(ore, 'title', icon=bpy.statusMessage['title'])
+ layout.prop(ore, 'shortdesc', icon=bpy.statusMessage['shortdesc'])
+ layout.prop(ore, 'longdesc', icon=bpy.statusMessage['longdesc'])
+ layout.prop(ore, 'url')
+ layout.separator()
+ layout.operator('ore.use_scene_settings', icon='HAND')
+ row = layout.row()
+ row.prop(ore, 'resox')
+ row.prop(ore, 'resoy')
+ layout.separator()
+ layout.prop(ore, 'parts')
+ row = layout.row()
+ row.prop(ore, 'start')
+ row.prop(ore, 'end')
+ layout.prop(ore, 'fps')
+
+ layout.separator()
+ layout.operator('ore.prepare', icon='INFO')
+
+def random_string(length):
+ import string
+ import random
+ return ''.join(random.choice(string.ascii_letters) for ii in range(length + 1))
+
+def encode_multipart_data(data, files):
+ boundary = random_string(30)
+
+ def get_content_type(filename):
+ return 'application/octet-stream' # default this
+
+ def encode_field(field_name):
+ return ('--' + boundary,
+ 'Content-Disposition: form-data; name="%s"' % field_name,
+ '', str(data[field_name]))
+
+ def encode_file(field_name):
+ import codecs
+ filename = files [field_name]
+ return ('--' + boundary,
+ 'Content-Disposition: form-data; name="%s"; filename="%s"' % (field_name, filename),
+ 'Content-Type: %s' % get_content_type(filename),
+ '', str(open(filename, 'rb').read(), encoding='iso-8859-1'))
+
+ lines = []
+ for name in data:
+ lines.extend(encode_field(name))
+ for name in files:
+ lines.extend(encode_file(name))
+ lines.extend(('--%s--' % boundary, ''))
+ body = '\r\n'.join(lines)
+
+ headers = {'content-type': 'multipart/form-data; boundary=' + boundary,
+ 'content-length': str(len(body))}
+
+ return body, headers
+
+def send_post(url, data, files):
+ connection = http.client.HTTPConnection('xmlrpc.renderfarm.fi')
+ connection.request('POST', '/file', *encode_multipart_data(data, files))
+ response = connection.getresponse()
+ res = response.read()
+ return res
+
+def md5_for_file(filepath):
+ md5hash = hashlib.md5()
+ blocksize = 0x10000
+ f = open(filepath, "rb")
+ while True:
+ data = f.read(blocksize)
+ if not data:
+ break
+ md5hash.update(data)
+ return md5hash.hexdigest()
+
+def upload_file(key, userid, sessionid, server, path):
+ assert isabs(path)
+ assert isfile(path)
+ data = {
+ 'userId': str(userid),
+ 'sessionKey': key,
+ 'sessionId': sessionid,
+ 'md5sum': md5_for_file(path)
+ }
+ files = {
+ 'blenderfile': path
+ }
+
+ r = send_post(server, data, files)
+
+ #print 'Uploaded %r' % (path)
+
+ return r
+
+def run_upload(key, userid, sessionid, path):
+ #print('Upload', path)
+ r = upload_file(key, userid, sessionid, r'http://xmlrpc.renderfarm.fi/file', path)
+ o = xmlrpc.client.loads(r)
+ #print('Done!')
+ return o[0][0]
+
+def ore_upload(op, context):
+ sce = context.scene
+ ore = sce.ore_render
+ if not ore.prepared:
+ op.report(set(['ERROR']), 'Your user or scene information is not complete')
+ return {'CANCELLED'}
+ try:
+ authproxy = xmlrpc.client.ServerProxy(r'https://xmlrpc.renderfarm.fi/auth')
+ res = authproxy.auth.getSessionKey(ore.username, ore.hash)
+ key = res['key']
+ userid = res['userId']
+ proxy = xmlrpc.client.ServerProxy(r'http://xmlrpc.renderfarm.fi/session')
+ proxy._ServerProxy__transport.user_agent = 'Renderfarm.fi Uploader/%s' % (bpy.CURRENT_VERSION)
+ res = proxy.session.createSession(userid, key)
+ sessionid = res['sessionId']
+ key = res['key']
+ res = run_upload(key, userid, sessionid, bpy.data.filepath)
+ fileid = int(res['fileId'])
+ res = proxy.session.setTitle(userid, res['key'], sessionid, ore.title)
+ res = proxy.session.setLongDescription(userid, res['key'], sessionid, ore.longdesc)
+ res = proxy.session.setShortDescription(userid, res['key'], sessionid, ore.shortdesc)
+ if len(ore.url)>0:
+ res = proxy.session.setExternalURLs(userid, res['key'], sessionid, ore.url)
+ res = proxy.session.setStartFrame(userid, res['key'], sessionid, ore.start)
+ res = proxy.session.setEndFrame(userid, res['key'], sessionid, ore.end)
+ res = proxy.session.setSplit(userid, res['key'], sessionid, ore.parts)
+ res = proxy.session.setMemoryLimit(userid, res['key'], sessionid, ore.memusage)
+ res = proxy.session.setXSize(userid, res['key'], sessionid, ore.resox)
+ res = proxy.session.setYSize(userid, res['key'], sessionid, ore.resoy)
+ res = proxy.session.setFrameRate(userid, res['key'], sessionid, ore.fps)
+ res = proxy.session.setOutputLicense(userid, res['key'], sessionid, int(ore.outlicense))
+ res = proxy.session.setInputLicense(userid, res['key'], sessionid, int(ore.inlicense))
+ res = proxy.session.setPrimaryInputFile(userid, res['key'], sessionid, fileid)
+ res = proxy.session.submit(userid, res['key'], sessionid)
+ op.report(set(['INFO']), 'Submission sent to Renderfarm.fi')
+ except xmlrpc.client.Error as v:
+ print('ERROR:', v)
+ op.report(set(['ERROR']), 'An error occurred while sending submission to Renderfarm.fi')
+ except Exception as e:
+ print('Unhandled error:', e)
+ op.report(set(['ERROR']), 'An error occurred while sending submission to Renderfarm.fi')
+
+ return {'FINISHED'}
+
+def setStatus(property, status):
+ if status:
+ bpy.statusMessage[property] = 'ERROR'
+ else:
+ bpy.statusMessage[property] = 'TRIA_RIGHT'
+
+def showStatus(layoutform, property, message):
+ if bpy.statusMessage[property] == 'ERROR':
+ layoutform.label(text='', icon='ERROR')
+
+def checkStatus(ore):
+ bpy.errors = []
+
+ if ore.hash=='' and (ore.username=='' or ore.password==''):
+ bpy.errors.append('missing_creds')
+
+ if '' in (ore.title, ore.longdesc, ore.shortdesc):
+ bpy.errors.append('missing_desc')
+
+ setStatus('username', ore.hash=='' and ore.username=='')
+ setStatus('password', ore.hash=='' and ore.password=='')
+
+ setStatus('title', ore.title=='')
+ setStatus('longdesc', ore.longdesc=='')
+ setStatus('shortdesc', ore.shortdesc=='')
+
+class OreSession:
+
+ def __init__(self, id, title):
+ self.id = id
+ self.title = title
+ self.frames = 0
+ self.startframe = 0
+ self.endframe = 0
+ self.rendertime = 0
+ self.percentage = 0
+
+ def percentageComplete(self):
+ totFrames = self.endframe - self.startframe
+ if totFrames != 0:
+ done = math.floor((self.frames / totFrames)*100)
+ else:
+ done = math.floor((self.frames / (totFrames+0.01))*100)
+
+ if done > 100:
+ done = 100
+ return done
+
+def xmlSessionsToOreSessions(sessions, queue):
+ bpy.ore_sessions = []
+ completed = sessions[queue]
+ for sid in completed:
+ s = completed[sid]['title']
+ t = completed[sid]['timestamps']
+ sinfo = OreSession(sid, s)
+ if queue in ('completed', 'active'):
+ sinfo.frames = completed[sid]['framesRendered']
+ sinfo.startframe = completed[sid]['startFrame']
+ sinfo.endframe = completed[sid]['endFrame']
+ bpy.ore_sessions.append(sinfo)
+
+def updateSessionList(ore):
+ while(len(ore.sessions) > 0):
+ ore.sessions.remove(0)
+
+ for s in bpy.ore_sessions:
+ ore.sessions.add()
+ session = ore.sessions[-1]
+ session.name = s.title + ' [' + str(s.percentageComplete()) + '% complete]'
+
+class ORE_OpenDownloadLocation(bpy.types.Operator):
+ bl_idname = 'ore.open_download_location'
+ bl_label = 'Download new version for your platform'
+
+ def execute(self, context):
+ import webbrowser
+ webbrowser.open(bpy.download_location)
+ return {'FINISHED'}
+
+class ORE_CancelSession(bpy.types.Operator):
+ bl_idname = 'ore.cancel_session'
+ bl_label = 'Cancel Session'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ userproxy = xmlrpc.client.ServerProxy(r'https://xmlrpc.renderfarm.fi/user')
+ if len(bpy.ore_sessions)>0:
+ s = bpy.ore_sessions[ore.selected_session]
+ try:
+ res = userproxy.user.cancelSession(ore.username, ore.hash, int(s.id))
+ self.report(set(['INFO']), 'Session ' + s.title + ' with id ' + s.id + ' cancelled')
+ except:
+ self.report(set(['ERROR']), 'Could not cancel session ' + s.title + ' with id ' + s.id)
+
+ return {'FINISHED'}
+
+class ORE_GetCompletedSessions(bpy.types.Operator):
+ bl_idname = 'ore.completed_sessions'
+ bl_label = 'Completed sessions'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ bpy.queue_selected = 1
+ userproxy = xmlrpc.client.ServerProxy(r'https://xmlrpc.renderfarm.fi/user')
+
+ sessions = userproxy.user.getAllSessions(ore.username, ore.hash, 'completed')
+
+ xmlSessionsToOreSessions(sessions, 'completed')
+
+ updateSessionList(ore)
+
+ return {'FINISHED'}
+
+class ORE_GetCancelledSessions(bpy.types.Operator):
+ bl_idname = 'ore.cancelled_sessions'
+ bl_label = 'Cancelled sessions'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ bpy.queue_selected = 4
+ userproxy = xmlrpc.client.ServerProxy(r'https://xmlrpc.renderfarm.fi/user')
+
+ sessions = userproxy.user.getAllSessions(ore.username, ore.hash, 'completed')
+
+ xmlSessionsToOreSessions(sessions, 'canceled')
+
+ updateSessionList(ore)
+
+ return {'FINISHED'}
+
+class ORE_GetActiveSessions(bpy.types.Operator):
+ bl_idname = 'ore.active_sessions'
+ bl_label = 'Rendering sessions'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ bpy.queue_selected = 2
+ userproxy = xmlrpc.client.ServerProxy(r'https://xmlrpc.renderfarm.fi/user')
+
+ sessions = userproxy.user.getAllSessions(ore.username, ore.hash, 'active')
+
+ xmlSessionsToOreSessions(sessions, 'active')
+
+ updateSessionList(ore)
+
+ return {'FINISHED'}
+
+class ORE_GetPendingSessions(bpy.types.Operator):
+ bl_idname = 'ore.accept_sessions' # using ORE lingo in API. acceptQueue is session waiting for admin approval
+ bl_label = 'Pending sessions'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ bpy.queue_selected = 3
+ userproxy = xmlrpc.client.ServerProxy(r'https://xmlrpc.renderfarm.fi/user')
+
+ sessions = userproxy.user.getAllSessions(ore.username, ore.hash, 'accept')
+
+ xmlSessionsToOreSessions(sessions, 'accept')
+
+ updateSessionList(ore)
+
+ return {'FINISHED'}
+
+class ORE_CheckUpdate(bpy.types.Operator):
+ bl_idname = 'ore.check_update'
+ bl_label = 'Check for new version'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ blenderproxy = xmlrpc.client.ServerProxy(r'http://xmlrpc.renderfarm.fi/blender')
+ try:
+ self.report(set(['INFO']), 'Checking for newer version on Renderfarm.fi')
+ dl_url = blenderproxy.blender.getCurrentVersion(bpy.CURRENT_VERSION)
+ if len(dl_url['url']) > 0:
+ self.report(set(['INFO']), 'Found a newer version on Renderfarm.fi ' + dl_url['url'])
+ bpy.download_location = dl_url['url']
+ bpy.found_newer_version = True
+ else:
+ bpy.up_to_date = True
+ self.report(set(['INFO']), 'Done checking for newer version on Renderfarm.fi')
+ except xmlrpc.client.Fault as f:
+ print('ERROR:', f)
+ self.report(set(['ERROR']), 'An error occurred while checking for newer version on Renderfarm.fi')
+
+ return {'FINISHED'}
+
+class ORE_LoginOp(bpy.types.Operator):
+ bl_idname = 'ore.login'
+ bl_label = 'Confirm credentials'
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+
+ if ore.hash=='':
+ if ore.password != '' and ore.username != '':
+ ore.hash = hashlib.md5(ore.password.encode() + ore.username.encode()).hexdigest()
+ ore.password = ''
+
+ checkStatus(ore)
+
+ if len(bpy.errors) > 0:
+ ore.prepared = False
+ return {'CANCELLED'}
+
+ return {'FINISHED'}
+
+class ORE_PrepareOp(bpy.types.Operator):
+ '''Checking the scene will also save to the current file when successful!'''
+ bl_idname = 'ore.prepare'
+ bl_label = 'Check scene'
+
+ def execute(self, context):
+ def hasSSSMaterial():
+ for m in bpy.data.materials:
+ if m.subsurface_scattering.use:
+ return True
+ return False
+
+ def hasParticleSystem():
+ if len(bpy.data.particles) > 0:
+ self.report({'WARNING'}, "Found particle system")
+ print("Found particle system")
+ return True
+ return False
+
+ def hasSimulation(t):
+ for o in bpy.data.objects:
+ for m in o.modifiers:
+ if isinstance(m, t):
+ self.report({'WARNING'}, "Found simulation: " + str(t))
+ print("Found simulation: " + str(t))
+ return True
+ return False
+
+ def hasFluidSimulation():
+ return hasSimulation(bpy.types.FluidSimulationModifier)
+
+ def hasSmokeSimulation():
+ return hasSimulation(bpy.types.SmokeModifier)
+
+ def hasClothSimulation():
+ return hasSimulation(bpy.types.ClothModifier)
+
+ def hasCollisionSimulation():
+ return hasSimulation(bpy.types.CollisionModifier)
+
+ def hasSoftbodySimulation():
+ return hasSimulation(bpy.types.SoftBodyModifier)
+
+ def hasUnsupportedSimulation():
+ return hasSoftbodySimulation() or hasCollisionSimulation() or hasClothSimulation() or hasSmokeSimulation() or hasFluidSimulation()
+
+ def isFilterNode(node):
+ t = type(node)
+ return t==bpy.types.CompositorNodeBlur or t==bpy.types.CompositorNodeDBlur
+
+ def hasCompositingErrors(use_nodes, nodetree, parts):
+ if not use_nodes: # no nodes in use, ignore check
+ return False
+
+ for node in nodetree.nodes:
+ # output file absolutely forbidden
+ if type(node)==bpy.types.CompositorNodeOutputFile:
+ self.report({'ERROR'}, 'File output node is disallowed, remove them from your compositing nodetrees.')
+ return True
+ # blur et al are problematic when rendering ore.parts>1
+ if isFilterNode(node) and parts>1:
+ self.report({'WARNING'}, 'A filtering node found and parts > 1. This combination will give bad output.')
+ return True
+
+ return False
+
+ sce = context.scene
+ ore = sce.ore_render
+
+ errors = False
+
+ checkStatus(ore)
+
+ if len(bpy.errors) > 0:
+ ore.prepared = False
+ return {'CANCELLED'}
+
+ rd = sce.render
+
+ bpy.ops.file.pack_all()
+ print("=============================================")
+ rd.threads_mode = 'FIXED'
+ rd.threads = 1
+ rd.resolution_x = ore.resox
+ rd.resolution_y = ore.resoy
+ if (rd.resolution_percentage != 100):
+ print("Resolution percentage is not 100. Changing to 100%")
+ self.report({'WARNING'}, "Resolution percentage is not 100. Changing to 100%")
+ rd.resolution_percentage = 100
+ if rd.file_format != 'PNG':
+ print("Renderfarm.fi always uses PNG for output. Changing to PNG.")
+ self.report({'WARNING'}, "Renderfarm.fi always uses PNG for output. Changing to PNG.")
+ rd.file_format = 'PNG'
+ if (rd.use_sss == True or hasSSSMaterial()) and ore.parts > 1:
+ print("Subsurface Scattering is not supported when rendering with parts > 1. Disabling")
+ self.report({'WARNING'}, "Subsurface Scattering is not supported when rendering with parts > 1. Disabling")
+ rd.use_sss = False # disabling because ore.parts > 1. It's ok to use SSS with 1part/frame
+ if hasUnsupportedSimulation() == True:
+ print("An unsupported simulation was detected. Please check your settings and remove them")
+ self.report({'WARNING'}, "An unsupported simulation was detected. Please check your settings and remove them")
+ ore.hasUnsupportedSimulation = True
+ errors = True
+ else:
+ ore.hasUnsupportedSimulation = False
+ if (rd.use_full_sample == True and rd.use_save_buffers == True):
+ print("Save Buffers is not supported. As you also have Full Sample on, I'm turning both of the settings off")
+ self.report({'WARNING'}, "Save Buffers is not supported. As you also have Full Sample on, I'm turning both of the settings off")
+ if (rd.use_full_sample == False and rd.use_save_buffers == True):
+ print("Save buffers needs to be turned off. Changing to off")
+ self.report({'WARNING'}, "Save buffers needs to be turned off. Changing to off")
+ rd.use_full_sample = False
+ rd.use_save_buffers = False
+ rd.use_free_image_textures = True
+ if (rd.use_border == True):
+ print("Border render is not supported. Turning it off")
+ self.report({'WARNING'}, "Border render is not supported. Turning it off")
+ rd.use_border = False
+ if rd.use_compositing:
+ if hasCompositingErrors(sce.use_nodes, sce.node_tree, ore.parts):
+ print("Found disallowed nodes or problematic setup")
+ rd.use_compositing = False
+ self.report({'WARNING'}, "Found disallowed nodes or problematic setup")
+ print("Done checking the scene. Now do a test render")
+ self.report({'INFO'}, "Done checking the scene. Now do a test render")
+ print("=============================================")
+
+ # if errors found, don't allow to upload, instead have user
+ # go through this until everything is ok
+ # Errors is only True if there is a setting that could not be changed to the correct setting
+ # In short, unsupported simulations
+ if errors:
+ self.report({'WARNING'}, "Some issues found. Check console and do a test render to make sure everything works.")
+ ore.prepared = False
+ else:
+ ore.prepared = True
+ rd.engine = 'BLENDER_RENDER'
+ bpy.ops.wm.save_mainfile()
+ rd.engine = 'RENDERFARMFI_RENDER'
+
+ return {'FINISHED'}
+
+class ORE_ResetOp(bpy.types.Operator):
+ bl_idname = "ore.reset"
+ bl_label = "Reset Preparation"
+
+ def execute(self, context):
+ sce = context.scene
+ sce.ore_render.prepared = False
+ sce.render.threads_mode = 'AUTO'
+ return {'FINISHED'}
+
+class ORE_TestRenderOp(bpy.types.Operator):
+ bl_idname = "ore.test_render"
+ bl_label = "Run a test render"
+
+ def execute(self, context):
+ rd = context.scene.render
+ rd.engine = 'BLENDER_RENDER'
+ rd.threads_mode = 'AUTO'
+ rd.threads = 1
+ bpy.ops.render.render()
+ rd.threads_mode = 'FIXED'
+ rd.threads = 1
+ rd.engine = 'RENDERFARMFI_RENDER'
+ return {'FINISHED'}
+
+class ORE_UploaderOp(bpy.types.Operator):
+ bl_idname = "ore.upload"
+ bl_label = "Render on Renderfarm.fi"
+
+ def execute(self, context):
+ rd = context.scene.render
+ rd.engine = 'BLENDER_RENDER'
+ bpy.ops.wm.save_mainfile()
+ return ore_upload(self, context)
+
+class ORE_UseBlenderReso(bpy.types.Operator):
+ bl_idname = "ore.use_scene_settings"
+ bl_label = "Use Scene settings"
+
+ def execute(self, context):
+ sce = context.scene
+ ore = sce.ore_render
+ rd = context.scene.render
+
+ ore.resox = rd.resolution_x
+ ore.resoy = rd.resolution_y
+ ore.start = sce.frame_start
+ ore.end = sce.frame_end
+ ore.fps = rd.fps
+
+ return {'FINISHED'}
+
+class ORE_ChangeUser(bpy.types.Operator):
+ bl_idname = "ore.change_user"
+ bl_label = "Change user"
+
+ def execute(self, context):
+ ore = context.scene.ore_render
+ ore.password = ''
+ ore.hash = ''
+
+ return {'FINISHED'}
+
+class RenderfarmFi(bpy.types.RenderEngine):
+ bl_idname = 'RENDERFARMFI_RENDER'
+ bl_label = "Renderfarm.fi"
+
+ def render(self, scene):
+ print('Do test renders with Blender Render')
+
+
+#~ def menu_export(self, context):
+ #~ import os
+ #~ default_path = os.path.splitext(bpy.data.filepath)[0] + ".py"
+ #~ self.layout.operator(RenderfarmFi.bl_idname, text=RenderfarmFi.bl_label)
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.Scene.ore_render = PointerProperty(type=ORESettings, name='ORE Render', description='ORE Render Settings')
+
+ #~ bpy.types.INFO_MT_render.append(menu_export)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ #~ bpy.types.INFO_MT_render.remove(menu_export)
+
+if __name__ == "__main__":
+ register()
diff --git a/rigify/CREDITS b/rigify/CREDITS
new file mode 100644
index 00000000..dd517fb1
--- /dev/null
+++ b/rigify/CREDITS
@@ -0,0 +1,17 @@
+A big thank you to all the people listed here for supporting Rigify.
+
+Original prototyping and development, and Python API support:
+- Campbell Barton
+
+General financial support:
+- Benjamin Tolputt
+- Nesterenko Viktoriya
+- Jeff Hogan
+
+IK/FK snapping financial support:
+- Benjamin Tolputt
+- Nesterenko Viktoriya
+- Leslie Chih
+- Isaac Ah-Loe
+- Casey "TheLorax" Jones
+
diff --git a/rigify/README b/rigify/README
new file mode 100644
index 00000000..790942f9
--- /dev/null
+++ b/rigify/README
@@ -0,0 +1,252 @@
+INTRODUCTION
+------------
+Rigify is an auto-rigging system based on a "building blocks" paradigm. The
+user can create a rig by putting together any combination of rig types, in any
+configuration that they want.
+
+A rig type is something like "biped arm" or "spine" or "finger".
+
+The input to the Rigify system is something called a "metarig". It is an
+armature that contains data about how to construct the rig. In particular, it
+contains bones in the basic configuration of the rig, with some bones tagged
+to indicate the rig type.
+
+For example, a metarig might contain a chain of three bones, the root-most of
+which is tagged as being a biped arm. When given as input to Rigify, Rigify
+will then generate a fully-featured biped arm rig in the same position and
+proportions as the 3-bone chain.
+
+One could also have another chain of bones, the root-most of which is tagged as
+being a spine. And the root-most bone of the arm chain could be the child of
+any of those spine bones. Then the rig that Rigify generates would be a
+spine rig with an arm rig attached to it.
+
+
+THE GUTS OF RIGIFY, SUMMARIZED
+------------------------------
+The concept behind rigify is fairly simple. It recieves an armature as input
+with some of the bones tagged as being certain rig types (arm, leg, etc.)
+
+When Rigify recieves that armature as input, the first thing it does is
+duplicate the armature. From here on out, the original armature is totally
+ignored. Only the duplicate is used. And this duplicate armature object will
+become the generated rig.
+
+Rigify next prepends "ORG-" to all of the bones. These are the "original"
+bones of the metarig, and they are used as the glue between rig types, as I
+will explain later.
+
+Rigify then generates the rig in two passes. The first pass is the
+"information gathering" stage.
+
+The information gathering stage doesn't modify the armature at all. It simply
+gathers information about it. Or, rather, it lets the rig types gather
+information about it.
+It traverses the bones in a root-most to leaf-most order, and whenever it
+stumbles upon a bone that has a rig type tagged on it, it creates a rig-type
+python object (rig types will be explained further down) for that rig type,
+and executes the resulting python object's information gathering code.
+
+At the end of the information gathering stage, Rigify has a collection of
+python objects, each of which know all the information they need to generate
+their own bit of the rig.
+
+The next stage is the rig generation stage. This part is pretty simple. All
+Rigify does is it loops over all of the rig-type python objects that it created
+in the previous stage (also in root-most to leaf-most order), and executes
+their rig-generate code. All of the actual rig generation happens in the
+rig-type python objects.
+
+And that's pretty much it. As you can see, most of the important code is
+actually in the rig types themselves, not in Rigify. Rigify is pretty sparse
+when it comes right down to it.
+
+There is one final stage to rig generation. Rigify checks all of the bones
+for "DEF-", "MCH-", and "ORG-" prefixes, and moves those bones to their own
+layers. It also sets all of the "DEF-" bones to deform, and sets all other
+bones to _not_ deform. And finally, it looks for any bone that does not have
+a parent, and sets the root bone (which Rigify creates) as their parent.
+
+
+THE GUTS OF A RIG TYPE, BASIC
+-----------------------------
+A rig type is simply a python module containing a class named "Rig". The Rig
+class is only required to have two methods: __init__() and generate()
+
+__init__() is the "information gathering" code for the rig type. When Rigify
+loops through the bones and finds a tagged bone, it will create a python
+object from the Rig class, executing this method.
+In addition to the default "self" parameter, __init__() needs to take the
+armature object, the name of the bone that was tagged, and a parameters object.
+
+A proper rig-type __init__() will look like this:
+
+ def __init__(self, obj, bone_name, params):
+ # code goes here
+
+At the bare minimum, you are going to want to store the object and bone name
+in the rig type object for later reference in the generate method. So:
+
+ def __init__(self, obj, bone_name, params):
+ self.obj = obj
+ self.org_bone = bone_name
+
+Most rig types involve more than just that one bone, though, so you will also
+want to store the names of any other relevant bones. For example, maybe the
+parent of the tagged bone is important to the rig type:
+
+ def __init__(self, obj, bone_name, params):
+ self.obj = obj
+ self.org_bone = bone_name
+ self.org_parent = obj.data.bones[bone_name].parent.name
+
+It is important that you store the _names_ of the bones, and not direct
+references. Due to the inner workings of Blender's armature system, direct
+edit-bone and pose-bone references are lost when flipping in and out of
+armature edit mode. (Arg...)
+
+Remember that it is critical that the information-gathering method does _not_
+modify the armature in any way. This way all of the rig type's info-gathering
+methods can execute on a clean armature. Many rig types depend on traversing
+parent-child relationships to figure out what bones are relevant to them, for
+example.
+
+
+Next is the generate() method. This is the method that Rigify calls to
+actually generate the rig. It takes the form:
+
+ def generate(self):
+ # code goes here
+
+It doesn't take any parameters beyond "self". So you have to store any
+information you need with the __init__() method.
+
+generate() pretty much has free reign to do whatever it wants, with the exception
+of two simple rules:
+1. Other than the "ORG-" bones, do not touch anything that is not created by
+the rig type (this prevents rig types from messing each other up).
+2. Even with "ORG-" bones, the only thing you are allowed to do is add children
+and add constraints. Do not rename them, do not remove children or
+constraints, and especially do not change their parents. (Adding constraints
+and adding children are encouraged, though. ;-)) This is because the "ORG-"
+bones are the glue that holds everything together, and changing them beyond
+adding children/constraints ruins the glue, so to speak.
+
+In short: with the exception of adding children/constraints to "ORG-"
+bones, only mess with things that you yourself create.
+
+It is also generally a good idea (though not strictly required) that the rig
+type add constraints to the "ORG-" bones it was generated from so that the
+"ORG-" bones move with the animation controls.
+For example, if I make a simple arm rig type, the controls that the animator
+uses should also move the "ORG-" bones. That way, any other rig-types that are
+children of those "ORG-" bones will move along with them. For example, any
+fingers on the end of the arm.
+
+Also, any bones that the animator should not directly animate with should have
+their names prefixed with "DEF-" or "MCH-". The former if it is a bone that
+is intended to deform the mesh, the latter if it is not.
+It should be obvious, then, that a bone cannot be both an animation control and
+a deforming bone in Rigify. This is on purpose.
+
+Also note that there are convenience functions in utils.py for prepending
+"DEF-" and "MCH-" to bone names: deformer() and mch()
+There is also a convenience function for stripping "ORG-" from a bone name:
+strip_org()
+Which is useful for removing "ORG-" from bones you create by duplicating
+the "ORG-" bones.
+I recommend you use these functions instead of manually adding/stripping
+these prefixes. That way if the prefixes are changed, it can be changed in
+one place (those functions) and all the rig types will still work.
+
+
+THE GUTS OF A RIG TYPE, ADVANCED
+--------------------------------
+If you look at any of the rig types included with Rigify, you'll note that they
+have several more methods than just __init__() and generate().
+THESE ADDITIONAL METHODS ARE _NOT_ REQUIRED for a rig type to function. But
+they can add some nifty functionality to your rig.
+
+Not all of the additional methods you see in the included rig types have any
+special purpose for Rigify, however. For example, I often create separate
+methods for generating the deformation and control rigs, and then call them
+both from the main generate() method. But that is just for organization, and
+has nothing to do with Rigify itself.
+
+Here are the additional methods relevant to Rigify, with brief decriptions of
+what they are for:
+
+
+RIG PARAMETERS
+--------------
+For many rig types, it is handy for the user to be able to tweak how they are
+generated. For example, the included biped arm rig allows the user to specify
+the axis of rotation for the elbow.
+
+There are two methods necessary to give a rig type user-tweakable parameters,
+both of which must be class methods:
+add_parameters()
+parameters_ui()
+
+add_parameters() takes an IDPropertyGroup as input, and adds its parameters
+to that group as RNA properties. For example:
+
+ @classmethod
+ def add_parameters(self, group):
+ group.toggle_param = bpy.props.BoolProperty(name="Test toggle:", default=False, description="Just a test, not really used for anything.")
+
+parameter_ui() recieves a Blender UILayout object, the metarig object, and the
+tagged bone name. It creates a GUI in the UILayout for the user to tweak the
+parameters. For example:
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ params = obj.pose.bones[bone].rigify_parameters[0]
+ r = layout.row()
+ r.prop(params, "toggle_param")
+
+
+SAMPLE METARIG
+--------------
+It is a good idea for all rig types to have a sample metarig that the user can
+add to their own metarig. This is what the create_sample() method is for.
+Like the parameter methods above, create_sample() must be a class method.
+
+create_sample() takes the current armature object as input, and adds the bones
+for its rig-type's metarig. For example:
+
+ @classmethod
+ def create_sample(self, obj):
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bone = arm.edit_bones.new('Bone')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.0000, 0.0000, 1.0000
+ bone.roll = 0.0000
+ bone.use_connect = False
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bone]
+ pbone.rigify_type = 'copy'
+ pbone.rigify_parameters.add()
+
+Obviously, this isn't something that you generally want to hand-code,
+especially with more complex samples. There is a function in utils.py
+that will generate the code for create_sample() for you, based on a selected
+armature. The function is called write_metarig()
+
+
+GENERATING A PYTHON UI
+----------------------
+The generate() method can also, optionally, return python code as a string.
+This python code is added to the "rig properties" panel that gets
+auto-generated along with the rig. This is useful for exposing things like
+IK/FK switches in a nice way to the animator.
+
+The string must be returned in a list:
+
+return ["my python code"]
+
+Otherwise it won't work.
+
diff --git a/rigify/__init__.py b/rigify/__init__.py
new file mode 100644
index 00000000..1de834aa
--- /dev/null
+++ b/rigify/__init__.py
@@ -0,0 +1,166 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+bl_info = {
+ "name": "Rigify",
+ "author": "Nathan Vegdahl",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "View3D > Add > Armature",
+ "description": "Adds various Rig Templates",
+ "location": "Armature properties",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Rigging/Rigify",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=25546",
+ "category": "Rigging"}
+
+
+if "bpy" in locals():
+ import imp
+ imp.reload(generate)
+ imp.reload(ui)
+ imp.reload(utils)
+ imp.reload(metarig_menu)
+else:
+ from . import generate, ui, utils, metarig_menu
+
+import bpy
+import bpy_types
+import os
+
+
+def get_rig_list(path):
+ """ Recursively searches for rig types, and returns a list.
+ """
+ rigs = []
+ MODULE_DIR = os.path.dirname(__file__)
+ RIG_DIR_ABS = os.path.join(MODULE_DIR, utils.RIG_DIR)
+ SEARCH_DIR_ABS = os.path.join(RIG_DIR_ABS, path)
+ files = os.listdir(SEARCH_DIR_ABS)
+ files.sort()
+
+ for f in files:
+ is_dir = os.path.isdir(os.path.join(SEARCH_DIR_ABS, f)) # Whether the file is a directory
+ if f[0] in (".", "_"):
+ pass
+ elif f.count(".") >= 2 or (is_dir and "." in f):
+ print("Warning: %r, filename contains a '.', skipping" % os.path.join(SEARCH_DIR_ABS, f))
+ else:
+ if is_dir:
+ # Check directories
+ module_name = os.path.join(path, f).replace(os.sep, ".")
+ try:
+ rig = utils.get_rig_type(module_name)
+ except ImportError as e:
+ print("Rigify: " + str(e))
+ else:
+ # Check if it's a rig itself
+ if not hasattr(rig, "Rig"):
+ # Check for sub-rigs
+ ls = get_rig_list(os.path.join(path, f, "")) # "" adds a final slash
+ rigs.extend(["%s.%s" % (f, l) for l in ls])
+ else:
+ rigs += [f]
+
+ elif f.endswith(".py"):
+ # Check straight-up python files
+ t = f[:-3]
+ module_name = os.path.join(path, t).replace(os.sep, ".")
+ try:
+ utils.get_rig_type(module_name).Rig
+ except (ImportError, AttributeError):
+ pass
+ else:
+ rigs += [t]
+ rigs.sort()
+ return rigs
+
+
+rig_list = get_rig_list("")
+
+
+collection_list = []
+for r in rig_list:
+ a = r.split(".")
+ if len(a) >= 2 and a[0] not in collection_list:
+ collection_list += [a[0]]
+
+
+col_enum_list = [("All", "All", ""), ("None", "None", "")]
+for c in collection_list:
+ col_enum_list += [(c, c, "")]
+
+
+class RigifyName(bpy.types.PropertyGroup):
+ name = bpy.props.StringProperty()
+
+
+class RigifyParameters(bpy.types.PropertyGroup):
+ name = bpy.props.StringProperty()
+
+
+class RigifyArmatureLayer(bpy.types.PropertyGroup):
+ name = bpy.props.StringProperty(name="Layer Name", default=" ")
+ row = bpy.props.IntProperty(name="Layer Row", default=1, min=1, max=32)
+
+
+##### REGISTER #####
+
+def register():
+ ui.register()
+ metarig_menu.register()
+
+ bpy.utils.register_class(RigifyName)
+ bpy.utils.register_class(RigifyParameters)
+ bpy.utils.register_class(RigifyArmatureLayer)
+
+ bpy.types.PoseBone.rigify_type = bpy.props.StringProperty(name="Rigify Type", description="Rig type for this bone.")
+ bpy.types.PoseBone.rigify_parameters = bpy.props.CollectionProperty(type=RigifyParameters)
+
+ bpy.types.Armature.rigify_layers = bpy.props.CollectionProperty(type=RigifyArmatureLayer)
+
+ IDStore = bpy.types.WindowManager
+ IDStore.rigify_collection = bpy.props.EnumProperty(items=col_enum_list, default="All", name="Rigify Active Collection", description="The selected rig collection")
+ IDStore.rigify_types = bpy.props.CollectionProperty(type=RigifyName)
+ IDStore.rigify_active_type = bpy.props.IntProperty(name="Rigify Active Type", description="The selected rig type.")
+
+ # Add rig parameters
+ for rig in rig_list:
+ r = utils.get_rig_type(rig).Rig
+ try:
+ r.add_parameters(RigifyParameters)
+ except AttributeError:
+ pass
+
+
+def unregister():
+ del bpy.types.PoseBone.rigify_type
+ del bpy.types.PoseBone.rigify_parameters
+
+ IDStore = bpy.types.WindowManager
+ del IDStore.rigify_collection
+ del IDStore.rigify_types
+ del IDStore.rigify_active_type
+
+ bpy.utils.unregister_class(RigifyName)
+ bpy.utils.unregister_class(RigifyParameters)
+ bpy.utils.unregister_class(RigifyArmatureLayer)
+
+ metarig_menu.unregister()
+ ui.unregister()
diff --git a/rigify/generate.py b/rigify/generate.py
new file mode 100644
index 00000000..153162bc
--- /dev/null
+++ b/rigify/generate.py
@@ -0,0 +1,428 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import re
+import time
+import traceback
+import sys
+from rna_prop_ui import rna_idprop_ui_prop_get
+from rigify.utils import MetarigError, new_bone, get_rig_type
+from rigify.utils import ORG_PREFIX, MCH_PREFIX, DEF_PREFIX, WGT_PREFIX, ROOT_NAME, make_original_name
+from rigify.utils import RIG_DIR
+from rigify.utils import create_root_widget
+from rigify.utils import random_id
+from rigify.utils import copy_attributes
+from rigify.rig_ui_template import UI_SLIDERS, layers_ui, UI_REGISTER
+from rigify import rigs
+
+RIG_MODULE = "rigs"
+ORG_LAYER = [n == 31 for n in range(0, 32)] # Armature layer that original bones should be moved to.
+MCH_LAYER = [n == 30 for n in range(0, 32)] # Armature layer that mechanism bones should be moved to.
+DEF_LAYER = [n == 29 for n in range(0, 32)] # Armature layer that deformation bones should be moved to.
+ROOT_LAYER = [n == 28 for n in range(0, 32)] # Armature layer that root bone should be moved to.
+
+
+class Timer:
+ def __init__(self):
+ self.timez = time.time()
+
+ def tick(self, string):
+ t = time.time()
+ print(string + "%.3f" % (t - self.timez))
+ self.timez = t
+
+
+# TODO: generalize to take a group as input instead of an armature.
+def generate_rig(context, metarig):
+ """ Generates a rig from a metarig.
+
+ """
+ t = Timer()
+
+ # Random string with time appended so that
+ # different rigs don't collide id's
+ rig_id = random_id(16)
+
+ # Initial configuration
+ mode_orig = context.mode
+ rest_backup = metarig.data.pose_position
+ metarig.data.pose_position = 'REST'
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ scene = context.scene
+
+ #------------------------------------------
+ # Create/find the rig object and set it up
+
+ # Check if the generated rig already exists, so we can
+ # regenerate in the same object. If not, create a new
+ # object to generate the rig in.
+ print("Fetch rig.")
+ try:
+ name = metarig["rig_object_name"]
+ except KeyError:
+ name = "rig"
+
+ try:
+ obj = scene.objects[name]
+ except KeyError:
+ obj = bpy.data.objects.new(name, bpy.data.armatures.new(name))
+ obj.draw_type = 'WIRE'
+ scene.objects.link(obj)
+
+ obj.data.pose_position = 'POSE'
+
+ # Get rid of anim data in case the rig already existed
+ print("Clear rig animation data.")
+ obj.animation_data_clear()
+
+ # Select generated rig object
+ metarig.select = False
+ obj.select = True
+ scene.objects.active = obj
+
+ # Remove all bones from the generated rig armature.
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in obj.data.edit_bones:
+ obj.data.edit_bones.remove(bone)
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Create temporary duplicates for merging
+ temp_rig_1 = metarig.copy()
+ temp_rig_1.data = metarig.data.copy()
+ scene.objects.link(temp_rig_1)
+
+ temp_rig_2 = metarig.copy()
+ temp_rig_2.data = obj.data
+ scene.objects.link(temp_rig_2)
+
+ # Select the temp rigs for merging
+ for objt in scene.objects:
+ objt.select = False # deselect all objects
+ temp_rig_1.select = True
+ temp_rig_2.select = True
+ scene.objects.active = temp_rig_2
+
+ # Merge the temporary rigs
+ bpy.ops.object.join()
+
+ # Delete the second temp rig
+ bpy.ops.object.delete()
+
+ # Select the generated rig
+ for objt in scene.objects:
+ objt.select = False # deselect all objects
+ obj.select = True
+ scene.objects.active = obj
+
+ # Copy over bone properties
+ for bone in metarig.data.bones:
+ bone_gen = obj.data.bones[bone.name]
+
+ # B-bone stuff
+ bone_gen.bbone_segments = bone.bbone_segments
+ bone_gen.bbone_in = bone.bbone_in
+ bone_gen.bbone_out = bone.bbone_out
+
+ # Copy over the pose_bone properties
+ for bone in metarig.pose.bones:
+ bone_gen = obj.pose.bones[bone.name]
+
+ # Rotation mode and transform locks
+ bone_gen.rotation_mode = bone.rotation_mode
+ bone_gen.lock_rotation = tuple(bone.lock_rotation)
+ bone_gen.lock_rotation_w = bone.lock_rotation_w
+ bone_gen.lock_rotations_4d = bone.lock_rotations_4d
+ bone_gen.lock_location = tuple(bone.lock_location)
+ bone_gen.lock_scale = tuple(bone.lock_scale)
+
+ # Custom properties
+ for prop in bone.keys():
+ bone_gen[prop] = bone[prop]
+
+ # Constraints
+ for con1 in bone.constraints:
+ con2 = bone_gen.constraints.new(type=con1.type)
+ copy_attributes(con1, con2)
+
+ # Set metarig target to rig target
+ if "target" in dir(con2):
+ if con2.target == metarig:
+ con2.target = obj
+
+ # Copy drivers
+ if metarig.animation_data:
+ for d1 in metarig.animation_data.drivers:
+ d2 = obj.driver_add(d1.data_path)
+ copy_attributes(d1, d2)
+ copy_attributes(d1.driver, d2.driver)
+
+ # Remove default modifiers, variables, etc.
+ for m in d2.modifiers:
+ d2.modifiers.remove(m)
+ for v in d2.driver.variables:
+ d2.driver.variables.remove(v)
+
+ # Copy modifiers
+ for m1 in d1.modifiers:
+ m2 = d2.modifiers.new(type=m1.type)
+ copy_attributes(m1, m2)
+
+ # Copy variables
+ for v1 in d1.driver.variables:
+ v2 = d2.driver.variables.new()
+ copy_attributes(v1, v2)
+ for i in range(len(v1.targets)):
+ copy_attributes(v1.targets[i], v2.targets[i])
+ # Switch metarig targets to rig targets
+ if v2.targets[i].id == metarig:
+ v2.targets[i].id = obj
+
+ # Mark targets that may need to be altered after rig generation
+ tar = v2.targets[i]
+ # If a custom property
+ if v2.type == 'SINGLE_PROP' \
+ and re.match('^pose.bones\["[^"\]]*"\]\["[^"\]]*"\]$', tar.data_path):
+ tar.data_path = "RIGIFY-" + tar.data_path
+
+ # Copy key frames
+ for i in range(len(d1.keyframe_points)):
+ d2.keyframe_points.add()
+ k1 = d1.keyframe_points[i]
+ k2 = d2.keyframe_points[i]
+ copy_attributes(k1, k2)
+
+ t.tick("Duplicate rig: ")
+ #----------------------------------
+ # Make a list of the original bones so we can keep track of them.
+ original_bones = [bone.name for bone in obj.data.bones]
+
+ # Add the ORG_PREFIX to the original bones.
+ bpy.ops.object.mode_set(mode='OBJECT')
+ for i in range(0, len(original_bones)):
+ obj.data.bones[original_bones[i]].name = make_original_name(original_bones[i])
+ original_bones[i] = make_original_name(original_bones[i])
+
+ # Create a sorted list of the original bones, sorted in the order we're
+ # going to traverse them for rigging.
+ # (root-most -> leaf-most, alphabetical)
+ bones_sorted = []
+ for name in original_bones:
+ bones_sorted += [name]
+ bones_sorted.sort() # first sort by names
+ bones_sorted.sort(key=lambda bone: len(obj.pose.bones[bone].parent_recursive)) # then parents before children
+
+ t.tick("Make list of org bones: ")
+ #----------------------------------
+ # Create the root bone.
+ bpy.ops.object.mode_set(mode='EDIT')
+ root_bone = new_bone(obj, ROOT_NAME)
+ obj.data.edit_bones[root_bone].head = (0, 0, 0)
+ obj.data.edit_bones[root_bone].tail = (0, 1, 0)
+ obj.data.edit_bones[root_bone].roll = 0
+ bpy.ops.object.mode_set(mode='OBJECT')
+ obj.data.bones[root_bone].layers = ROOT_LAYER
+ # Put the rig_name in the armature custom properties
+ rna_idprop_ui_prop_get(obj.data, "rig_id", create=True)
+ obj.data["rig_id"] = rig_id
+
+ t.tick("Create root bone: ")
+ #----------------------------------
+ try:
+ # Collect/initialize all the rigs.
+ rigs = []
+ deformation_rigs = []
+ for bone in bones_sorted:
+ bpy.ops.object.mode_set(mode='EDIT')
+ rigs += get_bone_rigs(obj, bone)
+ t.tick("Initialize rigs: ")
+
+ # Generate all the rigs.
+ ui_scripts = []
+ for rig in rigs:
+ # Go into editmode in the rig armature
+ bpy.ops.object.mode_set(mode='OBJECT')
+ context.scene.objects.active = obj
+ obj.select = True
+ bpy.ops.object.mode_set(mode='EDIT')
+ scripts = rig.generate()
+ if scripts != None:
+ ui_scripts += [scripts[0]]
+ t.tick("Generate rigs: ")
+ except Exception as e:
+ # Cleanup if something goes wrong
+ print("Rigify: failed to generate rig.")
+ metarig.data.pose_position = rest_backup
+ obj.data.pose_position = 'POSE'
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Continue the exception
+ raise e
+
+ #----------------------------------
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Get a list of all the bones in the armature
+ bones = [bone.name for bone in obj.data.bones]
+
+ # Parent any free-floating bones to the root.
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in bones:
+ if obj.data.edit_bones[bone].parent is None:
+ obj.data.edit_bones[bone].use_connect = False
+ obj.data.edit_bones[bone].parent = obj.data.edit_bones[root_bone]
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Every bone that has a name starting with "DEF-" make deforming. All the
+ # others make non-deforming.
+ for bone in bones:
+ if obj.data.bones[bone].name.startswith(DEF_PREFIX):
+ obj.data.bones[bone].use_deform = True
+ else:
+ obj.data.bones[bone].use_deform = False
+
+ # Alter marked driver targets
+ for d in obj.animation_data.drivers:
+ for v in d.driver.variables:
+ for tar in v.targets:
+ if tar.data_path.startswith("RIGIFY-"):
+ temp, bone, prop = tuple([x.strip('"]') for x in tar.data_path.split('["')])
+ if bone in obj.data.bones \
+ and prop in obj.pose.bones[bone].keys():
+ tar.data_path = tar.data_path[7:]
+ else:
+ tar.data_path = 'pose.bones["%s"]["%s"]' % (make_original_name(bone), prop)
+
+ # Move all the original bones to their layer.
+ for bone in original_bones:
+ obj.data.bones[bone].layers = ORG_LAYER
+
+ # Move all the bones with names starting with "MCH-" to their layer.
+ for bone in bones:
+ if obj.data.bones[bone].name.startswith(MCH_PREFIX):
+ obj.data.bones[bone].layers = MCH_LAYER
+
+ # Move all the bones with names starting with "DEF-" to their layer.
+ for bone in bones:
+ if obj.data.bones[bone].name.startswith(DEF_PREFIX):
+ obj.data.bones[bone].layers = DEF_LAYER
+
+ # Create root bone widget
+ create_root_widget(obj, "root")
+
+ # Assign shapes to bones
+ # Object's with name WGT-<bone_name> get used as that bone's shape.
+ for bone in bones:
+ wgt_name = (WGT_PREFIX + obj.data.bones[bone].name)[:21] # Object names are limited to 21 characters... arg
+ if wgt_name in context.scene.objects:
+ # Weird temp thing because it won't let me index by object name
+ for ob in context.scene.objects:
+ if ob.name == wgt_name:
+ obj.pose.bones[bone].custom_shape = ob
+ break
+ # This is what it should do:
+ # obj.pose.bones[bone].custom_shape = context.scene.objects[wgt_name]
+ # Reveal all the layers with control bones on them
+ vis_layers = [False for n in range(0, 32)]
+ for bone in bones:
+ for i in range(0, 32):
+ vis_layers[i] = vis_layers[i] or obj.data.bones[bone].layers[i]
+ for i in range(0, 32):
+ vis_layers[i] = vis_layers[i] and not (ORG_LAYER[i] or MCH_LAYER[i] or DEF_LAYER[i])
+ obj.data.layers = vis_layers
+
+ # Ensure the collection of layer names exists
+ for i in range(1 + len(metarig.data.rigify_layers), 29):
+ layer = metarig.data.rigify_layers.add()
+
+ # Create list of layer name/row pairs
+ layer_layout = []
+ for l in metarig.data.rigify_layers:
+ layer_layout += [(l.name, l.row)]
+
+ # Generate the UI script
+ if "rig_ui.py" in bpy.data.texts:
+ script = bpy.data.texts["rig_ui.py"]
+ script.clear()
+ else:
+ script = bpy.data.texts.new("rig_ui.py")
+ script.write(UI_SLIDERS % rig_id)
+ for s in ui_scripts:
+ script.write("\n " + s.replace("\n", "\n ") + "\n")
+ script.write(layers_ui(vis_layers, layer_layout))
+ script.write(UI_REGISTER)
+ script.use_module = True
+
+ # Run UI script
+ exec(script.as_string(), {})
+
+ t.tick("The rest: ")
+ #----------------------------------
+ # Deconfigure
+ bpy.ops.object.mode_set(mode='OBJECT')
+ metarig.data.pose_position = rest_backup
+ obj.data.pose_position = 'POSE'
+
+
+def get_bone_rigs(obj, bone_name, halt_on_missing=False):
+ """ Fetch all the rigs specified on a bone.
+ """
+ rigs = []
+ rig_type = obj.pose.bones[bone_name].rigify_type
+ rig_type = rig_type.replace(" ", "")
+
+ if rig_type == "":
+ pass
+ else:
+ # Gather parameters
+ try:
+ params = obj.pose.bones[bone_name].rigify_parameters[0]
+ except (KeyError, IndexError):
+ params = None
+
+ # Get the rig
+ try:
+ rig = get_rig_type(rig_type).Rig(obj, bone_name, params)
+ except ImportError:
+ message = "Rig Type Missing: python module for type '%s' not found (bone: %s)" % (t, bone_name)
+ if halt_on_missing:
+ raise MetarigError(message)
+ else:
+ print(message)
+ print('print_exc():')
+ traceback.print_exc(file=sys.stdout)
+ else:
+ rigs += [rig]
+ return rigs
+
+
+def param_matches_type(param_name, rig_type):
+ """ Returns True if the parameter name is consistent with the rig type.
+ """
+ if param_name.rsplit(".", 1)[0] == rig_type:
+ return True
+ else:
+ return False
+
+
+def param_name(param_name, rig_type):
+ """ Get the actual parameter name, sans-rig-type.
+ """
+ return param_name[len(rig_type) + 1:]
diff --git a/rigify/metarig_menu.py b/rigify/metarig_menu.py
new file mode 100644
index 00000000..56b82370
--- /dev/null
+++ b/rigify/metarig_menu.py
@@ -0,0 +1,58 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+import mathutils
+from rigify.metarigs import human
+from math import cos, sin, pi
+
+
+class AddHuman(bpy.types.Operator):
+ '''Add an advanced human metarig base'''
+ bl_idname = "object.armature_human_advanced_add"
+ bl_label = "Add Humanoid (advanced metarig)"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ def execute(self, context):
+ bpy.ops.object.armature_add()
+ obj = context.active_object
+ mode_orig = obj.mode
+ bpy.ops.object.mode_set(mode='EDIT') # grr, remove bone
+ bones = context.active_object.data.edit_bones
+ bones.remove(bones[0])
+ human.create(obj)
+ bpy.ops.object.mode_set(mode=mode_orig)
+ return {'FINISHED'}
+
+
+# Add to a menu
+menu_func = (lambda self, context: self.layout.operator(AddHuman.bl_idname,
+ icon='OUTLINER_OB_ARMATURE', text="Human (Meta-Rig)"))
+
+
+def register():
+ bpy.utils.register_class(AddHuman)
+
+ bpy.types.INFO_MT_armature_add.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_class(AddHuman)
+
+ bpy.types.INFO_MT_armature_add.remove(menu_func)
+
diff --git a/rigify/metarigs/__init__.py b/rigify/metarigs/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rigify/metarigs/__init__.py
diff --git a/rigify/metarigs/human.py b/rigify/metarigs/human.py
new file mode 100644
index 00000000..cfc9f038
--- /dev/null
+++ b/rigify/metarigs/human.py
@@ -0,0 +1,1149 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+import bpy
+
+
+def create(obj):
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ for i in range(28):
+ arm.rigify_layers.add()
+
+ arm.rigify_layers[0].name = "Torso"
+ arm.rigify_layers[0].row = 2
+ arm.rigify_layers[2].name = "Head"
+ arm.rigify_layers[2].row = 1
+ arm.rigify_layers[4].name = "Fingers"
+ arm.rigify_layers[4].row = 3
+ arm.rigify_layers[5].name = "(Tweak)"
+ arm.rigify_layers[5].row = 3
+ arm.rigify_layers[6].name = "Arm.L (FK)"
+ arm.rigify_layers[6].row = 4
+ arm.rigify_layers[7].name = "Arm.L (IK)"
+ arm.rigify_layers[7].row = 5
+ arm.rigify_layers[8].name = "Arm.R (FK)"
+ arm.rigify_layers[8].row = 4
+ arm.rigify_layers[9].name = "Arm.R (IK)"
+ arm.rigify_layers[9].row = 5
+ arm.rigify_layers[10].name = "Leg.L (FK)"
+ arm.rigify_layers[10].row = 6
+ arm.rigify_layers[11].name = "Leg.L (IK)"
+ arm.rigify_layers[11].row = 7
+ arm.rigify_layers[12].name = "Leg.R (FK)"
+ arm.rigify_layers[12].row = 6
+ arm.rigify_layers[13].name = "Leg.R (IK)"
+ arm.rigify_layers[13].row = 7
+
+ bones = {}
+
+ bone = arm.edit_bones.new('hips')
+ bone.head[:] = 0.0000, 0.0552, 1.0099
+ bone.tail[:] = 0.0000, 0.0172, 1.1837
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bones['hips'] = bone.name
+ bone = arm.edit_bones.new('spine')
+ bone.head[:] = 0.0000, 0.0172, 1.1837
+ bone.tail[:] = 0.0000, 0.0004, 1.3418
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['hips']]
+ bones['spine'] = bone.name
+ bone = arm.edit_bones.new('thigh.L')
+ bone.head[:] = 0.0980, 0.0124, 1.0720
+ bone.tail[:] = 0.0980, -0.0286, 0.5372
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hips']]
+ bones['thigh.L'] = bone.name
+ bone = arm.edit_bones.new('thigh.R')
+ bone.head[:] = -0.0980, 0.0124, 1.0720
+ bone.tail[:] = -0.0980, -0.0286, 0.5372
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hips']]
+ bones['thigh.R'] = bone.name
+ bone = arm.edit_bones.new('ribs')
+ bone.head[:] = 0.0000, 0.0004, 1.3418
+ bone.tail[:] = 0.0000, 0.0114, 1.6582
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['spine']]
+ bones['ribs'] = bone.name
+ bone = arm.edit_bones.new('shin.L')
+ bone.head[:] = 0.0980, -0.0286, 0.5372
+ bone.tail[:] = 0.0980, 0.0162, 0.0852
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thigh.L']]
+ bones['shin.L'] = bone.name
+ bone = arm.edit_bones.new('shin.R')
+ bone.head[:] = -0.0980, -0.0286, 0.5372
+ bone.tail[:] = -0.0980, 0.0162, 0.0852
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thigh.R']]
+ bones['shin.R'] = bone.name
+ bone = arm.edit_bones.new('neck')
+ bone.head[:] = 0.0000, 0.0114, 1.6582
+ bone.tail[:] = 0.0000, -0.0247, 1.7813
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['ribs']]
+ bones['neck'] = bone.name
+ bone = arm.edit_bones.new('shoulder.L')
+ bone.head[:] = 0.0183, -0.0684, 1.6051
+ bone.tail[:] = 0.1694, 0.0205, 1.6050
+ bone.roll = 0.0004
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['ribs']]
+ bones['shoulder.L'] = bone.name
+ bone = arm.edit_bones.new('shoulder.R')
+ bone.head[:] = -0.0183, -0.0684, 1.6051
+ bone.tail[:] = -0.1694, 0.0205, 1.6050
+ bone.roll = -0.0004
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['ribs']]
+ bones['shoulder.R'] = bone.name
+ bone = arm.edit_bones.new('foot.L')
+ bone.head[:] = 0.0980, 0.0162, 0.0852
+ bone.tail[:] = 0.0980, -0.0934, 0.0167
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['shin.L']]
+ bones['foot.L'] = bone.name
+ bone = arm.edit_bones.new('heel.L')
+ bone.head[:] = 0.0980, 0.0162, 0.0852
+ bone.tail[:] = 0.0980, 0.0882, -0.0000
+ bone.roll = -3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['shin.L']]
+ bones['heel.L'] = bone.name
+ bone = arm.edit_bones.new('heel.02.L')
+ bone.head[:] = 0.0600, 0.0000, 0.0000
+ bone.tail[:] = 0.1400, 0.0000, 0.0000
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['heel.L']]
+ bones['heel.02.L'] = bone.name
+ bone = arm.edit_bones.new('foot.R')
+ bone.head[:] = -0.0980, 0.0162, 0.0852
+ bone.tail[:] = -0.0980, -0.0934, 0.0167
+ bone.roll = -0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['shin.R']]
+ bones['foot.R'] = bone.name
+ bone = arm.edit_bones.new('heel.R')
+ bone.head[:] = -0.0980, 0.0162, 0.0852
+ bone.tail[:] = -0.0980, 0.0882, -0.0000
+ bone.roll = 3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['shin.R']]
+ bones['heel.R'] = bone.name
+ bone = arm.edit_bones.new('heel.02.R')
+ bone.head[:] = -0.0600, 0.0000, 0.0000
+ bone.tail[:] = -0.1400, 0.0000, 0.0000
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['heel.R']]
+ bones['heel.02.R'] = bone.name
+ bone = arm.edit_bones.new('head')
+ bone.head[:] = 0.0000, -0.0247, 1.7813
+ bone.tail[:] = 0.0000, -0.0247, 1.9347
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['neck']]
+ bones['head'] = bone.name
+ bone = arm.edit_bones.new('upper_arm.L')
+ bone.head[:] = 0.1953, 0.0267, 1.5846
+ bone.tail[:] = 0.4424, 0.0885, 1.4491
+ bone.roll = 2.0691
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['shoulder.L']]
+ bones['upper_arm.L'] = bone.name
+ bone = arm.edit_bones.new('upper_arm.R')
+ bone.head[:] = -0.1953, 0.0267, 1.5846
+ bone.tail[:] = -0.4424, 0.0885, 1.4491
+ bone.roll = -2.0691
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['shoulder.R']]
+ bones['upper_arm.R'] = bone.name
+ bone = arm.edit_bones.new('toe.L')
+ bone.head[:] = 0.0980, -0.0934, 0.0167
+ bone.tail[:] = 0.0980, -0.1606, 0.0167
+ bone.roll = -0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['foot.L']]
+ bones['toe.L'] = bone.name
+ bone = arm.edit_bones.new('toe.R')
+ bone.head[:] = -0.0980, -0.0934, 0.0167
+ bone.tail[:] = -0.0980, -0.1606, 0.0167
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['foot.R']]
+ bones['toe.R'] = bone.name
+ bone = arm.edit_bones.new('forearm.L')
+ bone.head[:] = 0.4424, 0.0885, 1.4491
+ bone.tail[:] = 0.6594, 0.0492, 1.3061
+ bone.roll = 2.1459
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['upper_arm.L']]
+ bones['forearm.L'] = bone.name
+ bone = arm.edit_bones.new('forearm.R')
+ bone.head[:] = -0.4424, 0.0885, 1.4491
+ bone.tail[:] = -0.6594, 0.0492, 1.3061
+ bone.roll = -2.1459
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['upper_arm.R']]
+ bones['forearm.R'] = bone.name
+ bone = arm.edit_bones.new('hand.L')
+ bone.head[:] = 0.6594, 0.0492, 1.3061
+ bone.tail[:] = 0.7234, 0.0412, 1.2585
+ bone.roll = -2.4946
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['forearm.L']]
+ bones['hand.L'] = bone.name
+ bone = arm.edit_bones.new('hand.R')
+ bone.head[:] = -0.6594, 0.0492, 1.3061
+ bone.tail[:] = -0.7234, 0.0412, 1.2585
+ bone.roll = 2.4946
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['forearm.R']]
+ bones['hand.R'] = bone.name
+ bone = arm.edit_bones.new('palm.01.L')
+ bone.head[:] = 0.6921, 0.0224, 1.2882
+ bone.tail[:] = 0.7464, 0.0051, 1.2482
+ bone.roll = -2.4928
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.L']]
+ bones['palm.01.L'] = bone.name
+ bone = arm.edit_bones.new('palm.02.L')
+ bone.head[:] = 0.6970, 0.0389, 1.2877
+ bone.tail[:] = 0.7518, 0.0277, 1.2487
+ bone.roll = -2.5274
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.L']]
+ bones['palm.02.L'] = bone.name
+ bone = arm.edit_bones.new('palm.03.L')
+ bone.head[:] = 0.6963, 0.0545, 1.2874
+ bone.tail[:] = 0.7540, 0.0521, 1.2482
+ bone.roll = -2.5843
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.L']]
+ bones['palm.03.L'] = bone.name
+ bone = arm.edit_bones.new('palm.04.L')
+ bone.head[:] = 0.6929, 0.0696, 1.2871
+ bone.tail[:] = 0.7528, 0.0763, 1.2428
+ bone.roll = -2.5155
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.L']]
+ bones['palm.04.L'] = bone.name
+ bone = arm.edit_bones.new('palm.01.R')
+ bone.head[:] = -0.6921, 0.0224, 1.2882
+ bone.tail[:] = -0.7464, 0.0051, 1.2482
+ bone.roll = 2.4928
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.R']]
+ bones['palm.01.R'] = bone.name
+ bone = arm.edit_bones.new('palm.02.R')
+ bone.head[:] = -0.6970, 0.0389, 1.2877
+ bone.tail[:] = -0.7518, 0.0277, 1.2487
+ bone.roll = 2.5274
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.R']]
+ bones['palm.02.R'] = bone.name
+ bone = arm.edit_bones.new('palm.03.R')
+ bone.head[:] = -0.6963, 0.0544, 1.2874
+ bone.tail[:] = -0.7540, 0.0521, 1.2482
+ bone.roll = 2.5843
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.R']]
+ bones['palm.03.R'] = bone.name
+ bone = arm.edit_bones.new('palm.04.R')
+ bone.head[:] = -0.6929, 0.0696, 1.2871
+ bone.tail[:] = -0.7528, 0.0763, 1.2428
+ bone.roll = 2.5155
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['hand.R']]
+ bones['palm.04.R'] = bone.name
+ bone = arm.edit_bones.new('finger_index.01.L')
+ bone.head[:] = 0.7464, 0.0051, 1.2482
+ bone.tail[:] = 0.7718, 0.0013, 1.2112
+ bone.roll = -2.0315
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.01.L']]
+ bones['finger_index.01.L'] = bone.name
+ bone = arm.edit_bones.new('thumb.01.L')
+ bone.head[:] = 0.6705, 0.0214, 1.2738
+ bone.tail[:] = 0.6857, 0.0015, 1.2404
+ bone.roll = -0.1587
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['palm.01.L']]
+ bones['thumb.01.L'] = bone.name
+ bone = arm.edit_bones.new('finger_middle.01.L')
+ bone.head[:] = 0.7518, 0.0277, 1.2487
+ bone.tail[:] = 0.7762, 0.0234, 1.2058
+ bone.roll = -2.0067
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.02.L']]
+ bones['finger_middle.01.L'] = bone.name
+ bone = arm.edit_bones.new('finger_ring.01.L')
+ bone.head[:] = 0.7540, 0.0521, 1.2482
+ bone.tail[:] = 0.7715, 0.0499, 1.2070
+ bone.roll = -2.0082
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.03.L']]
+ bones['finger_ring.01.L'] = bone.name
+ bone = arm.edit_bones.new('finger_pinky.01.L')
+ bone.head[:] = 0.7528, 0.0763, 1.2428
+ bone.tail[:] = 0.7589, 0.0765, 1.2156
+ bone.roll = -1.9749
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.04.L']]
+ bones['finger_pinky.01.L'] = bone.name
+ bone = arm.edit_bones.new('finger_index.01.R')
+ bone.head[:] = -0.7464, 0.0051, 1.2482
+ bone.tail[:] = -0.7718, 0.0012, 1.2112
+ bone.roll = 2.0315
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.01.R']]
+ bones['finger_index.01.R'] = bone.name
+ bone = arm.edit_bones.new('thumb.01.R')
+ bone.head[:] = -0.6705, 0.0214, 1.2738
+ bone.tail[:] = -0.6857, 0.0015, 1.2404
+ bone.roll = 0.1587
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['palm.01.R']]
+ bones['thumb.01.R'] = bone.name
+ bone = arm.edit_bones.new('finger_middle.01.R')
+ bone.head[:] = -0.7518, 0.0277, 1.2487
+ bone.tail[:] = -0.7762, 0.0233, 1.2058
+ bone.roll = 2.0067
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.02.R']]
+ bones['finger_middle.01.R'] = bone.name
+ bone = arm.edit_bones.new('finger_ring.01.R')
+ bone.head[:] = -0.7540, 0.0521, 1.2482
+ bone.tail[:] = -0.7715, 0.0499, 1.2070
+ bone.roll = 2.0082
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.03.R']]
+ bones['finger_ring.01.R'] = bone.name
+ bone = arm.edit_bones.new('finger_pinky.01.R')
+ bone.head[:] = -0.7528, 0.0763, 1.2428
+ bone.tail[:] = -0.7589, 0.0765, 1.2156
+ bone.roll = 1.9749
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['palm.04.R']]
+ bones['finger_pinky.01.R'] = bone.name
+ bone = arm.edit_bones.new('finger_index.02.L')
+ bone.head[:] = 0.7718, 0.0013, 1.2112
+ bone.tail[:] = 0.7840, -0.0003, 1.1858
+ bone.roll = -1.8799
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_index.01.L']]
+ bones['finger_index.02.L'] = bone.name
+ bone = arm.edit_bones.new('thumb.02.L')
+ bone.head[:] = 0.6857, 0.0015, 1.2404
+ bone.tail[:] = 0.7056, -0.0057, 1.2145
+ bone.roll = -0.4798
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thumb.01.L']]
+ bones['thumb.02.L'] = bone.name
+ bone = arm.edit_bones.new('finger_middle.02.L')
+ bone.head[:] = 0.7762, 0.0234, 1.2058
+ bone.tail[:] = 0.7851, 0.0218, 1.1749
+ bone.roll = -1.8283
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_middle.01.L']]
+ bones['finger_middle.02.L'] = bone.name
+ bone = arm.edit_bones.new('finger_ring.02.L')
+ bone.head[:] = 0.7715, 0.0499, 1.2070
+ bone.tail[:] = 0.7794, 0.0494, 1.1762
+ bone.roll = -1.8946
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_ring.01.L']]
+ bones['finger_ring.02.L'] = bone.name
+ bone = arm.edit_bones.new('finger_pinky.02.L')
+ bone.head[:] = 0.7589, 0.0765, 1.2156
+ bone.tail[:] = 0.7618, 0.0770, 1.1932
+ bone.roll = -1.9059
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_pinky.01.L']]
+ bones['finger_pinky.02.L'] = bone.name
+ bone = arm.edit_bones.new('finger_index.02.R')
+ bone.head[:] = -0.7718, 0.0012, 1.2112
+ bone.tail[:] = -0.7840, -0.0003, 1.1858
+ bone.roll = 1.8799
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_index.01.R']]
+ bones['finger_index.02.R'] = bone.name
+ bone = arm.edit_bones.new('thumb.02.R')
+ bone.head[:] = -0.6857, 0.0015, 1.2404
+ bone.tail[:] = -0.7056, -0.0057, 1.2145
+ bone.roll = 0.4798
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thumb.01.R']]
+ bones['thumb.02.R'] = bone.name
+ bone = arm.edit_bones.new('finger_middle.02.R')
+ bone.head[:] = -0.7762, 0.0233, 1.2058
+ bone.tail[:] = -0.7851, 0.0218, 1.1749
+ bone.roll = 1.8283
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_middle.01.R']]
+ bones['finger_middle.02.R'] = bone.name
+ bone = arm.edit_bones.new('finger_ring.02.R')
+ bone.head[:] = -0.7715, 0.0499, 1.2070
+ bone.tail[:] = -0.7794, 0.0494, 1.1762
+ bone.roll = 1.8946
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_ring.01.R']]
+ bones['finger_ring.02.R'] = bone.name
+ bone = arm.edit_bones.new('finger_pinky.02.R')
+ bone.head[:] = -0.7589, 0.0765, 1.2156
+ bone.tail[:] = -0.7618, 0.0770, 1.1932
+ bone.roll = 1.9059
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_pinky.01.R']]
+ bones['finger_pinky.02.R'] = bone.name
+ bone = arm.edit_bones.new('finger_index.03.L')
+ bone.head[:] = 0.7840, -0.0003, 1.1858
+ bone.tail[:] = 0.7892, 0.0006, 1.1636
+ bone.roll = -1.6760
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_index.02.L']]
+ bones['finger_index.03.L'] = bone.name
+ bone = arm.edit_bones.new('thumb.03.L')
+ bone.head[:] = 0.7056, -0.0057, 1.2145
+ bone.tail[:] = 0.7194, -0.0098, 1.1995
+ bone.roll = -0.5826
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thumb.02.L']]
+ bones['thumb.03.L'] = bone.name
+ bone = arm.edit_bones.new('finger_middle.03.L')
+ bone.head[:] = 0.7851, 0.0218, 1.1749
+ bone.tail[:] = 0.7888, 0.0216, 1.1525
+ bone.roll = -1.7483
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_middle.02.L']]
+ bones['finger_middle.03.L'] = bone.name
+ bone = arm.edit_bones.new('finger_ring.03.L')
+ bone.head[:] = 0.7794, 0.0494, 1.1762
+ bone.tail[:] = 0.7781, 0.0498, 1.1577
+ bone.roll = -1.6582
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_ring.02.L']]
+ bones['finger_ring.03.L'] = bone.name
+ bone = arm.edit_bones.new('finger_pinky.03.L')
+ bone.head[:] = 0.7618, 0.0770, 1.1932
+ bone.tail[:] = 0.7611, 0.0772, 1.1782
+ bone.roll = -1.7639
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_pinky.02.L']]
+ bones['finger_pinky.03.L'] = bone.name
+ bone = arm.edit_bones.new('finger_index.03.R')
+ bone.head[:] = -0.7840, -0.0003, 1.1858
+ bone.tail[:] = -0.7892, 0.0006, 1.1636
+ bone.roll = 1.6760
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_index.02.R']]
+ bones['finger_index.03.R'] = bone.name
+ bone = arm.edit_bones.new('thumb.03.R')
+ bone.head[:] = -0.7056, -0.0057, 1.2145
+ bone.tail[:] = -0.7194, -0.0098, 1.1995
+ bone.roll = 0.5826
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thumb.02.R']]
+ bones['thumb.03.R'] = bone.name
+ bone = arm.edit_bones.new('finger_middle.03.R')
+ bone.head[:] = -0.7851, 0.0218, 1.1749
+ bone.tail[:] = -0.7888, 0.0216, 1.1525
+ bone.roll = 1.7483
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_middle.02.R']]
+ bones['finger_middle.03.R'] = bone.name
+ bone = arm.edit_bones.new('finger_ring.03.R')
+ bone.head[:] = -0.7794, 0.0494, 1.1762
+ bone.tail[:] = -0.7781, 0.0498, 1.1577
+ bone.roll = 1.6582
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_ring.02.R']]
+ bones['finger_ring.03.R'] = bone.name
+ bone = arm.edit_bones.new('finger_pinky.03.R')
+ bone.head[:] = -0.7618, 0.0770, 1.1932
+ bone.tail[:] = -0.7611, 0.0772, 1.1782
+ bone.roll = 1.7639
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger_pinky.02.R']]
+ bones['finger_pinky.03.R'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['hips']]
+ pbone.rigify_type = 'spine'
+ pbone.rigify_parameters.add()
+ pbone.rigify_parameters[0].chain_bone_controls = "1, 2, 3"
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['spine']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['thigh.L']]
+ pbone.rigify_type = 'biped.leg'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_ik_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].ik_layers = [False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['thigh.R']]
+ pbone.rigify_type = 'biped.leg'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_ik_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].ik_layers = [False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['ribs']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['shin.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['shin.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['neck']]
+ pbone.rigify_type = 'neck_short'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['shoulder.L']]
+ pbone.rigify_type = 'basic.copy'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, True, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['shoulder.R']]
+ pbone.rigify_type = 'basic.copy'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, True, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['foot.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['heel.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['heel.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['foot.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['heel.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['heel.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['head']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['upper_arm.L']]
+ pbone.rigify_type = 'biped.arm'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_ik_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].ik_layers = [False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['upper_arm.R']]
+ pbone.rigify_type = 'biped.arm'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_ik_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].ik_layers = [False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['toe.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['toe.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['forearm.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['forearm.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['hand.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['hand.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['palm.01.L']]
+ pbone.rigify_type = 'palm'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['palm.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['palm.03.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['palm.04.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['palm.01.R']]
+ pbone.rigify_type = 'palm'
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['palm.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['palm.03.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['palm.04.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_index.01.L']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['thumb.01.L']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_middle.01.L']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_ring.01.L']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_pinky.01.L']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_index.01.R']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['thumb.01.R']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_middle.01.R']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_ring.01.R']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_pinky.01.R']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone.rigify_parameters.add()
+ try:
+ pbone.rigify_parameters[0].separate_extra_layers = True
+ except AttributeError:
+ pass
+ try:
+ pbone.rigify_parameters[0].extra_layers = [False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ except AttributeError:
+ pass
+ pbone = obj.pose.bones[bones['finger_index.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['thumb.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_middle.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_ring.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_pinky.02.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_index.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['thumb.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_middle.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_ring.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_pinky.02.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_index.03.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['thumb.03.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_middle.03.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_ring.03.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_pinky.03.L']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_index.03.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['thumb.03.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_middle.03.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_ring.03.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+ pbone = obj.pose.bones[bones['finger_pinky.03.R']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.bone.layers = [False, False, False, False, True, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False]
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
+ arm.layers = [(x in [0, 2, 4, 6, 8, 10, 12]) for x in range(0, 32)]
+
diff --git a/rigify/rig_ui_template.py b/rigify/rig_ui_template.py
new file mode 100644
index 00000000..a54ab175
--- /dev/null
+++ b/rigify/rig_ui_template.py
@@ -0,0 +1,570 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+UI_SLIDERS = '''
+import bpy
+from mathutils import Matrix, Vector
+from math import acos
+
+rig_id = "%s"
+
+
+#########################################
+## "Visual Transform" helper functions ##
+#########################################
+
+def get_pose_matrix_in_other_space(mat, pose_bone):
+ """ Returns the transform matrix relative to pose_bone's current
+ transform space. In other words, presuming that mat is in
+ armature space, slapping the returned matrix onto pose_bone
+ should give it the armature-space transforms of mat.
+ TODO: try to handle cases with axis-scaled parents better.
+ """
+ rest = pose_bone.bone.matrix_local.copy()
+ rest_inv = rest.inverted()
+ if pose_bone.parent:
+ par_mat = pose_bone.parent.matrix.copy()
+ par_inv = par_mat.inverted()
+ par_rest = pose_bone.parent.bone.matrix_local.copy()
+ else:
+ par_mat = Matrix()
+ par_inv = Matrix()
+ par_rest = Matrix()
+
+ # Get matrix in bone's current transform space
+ smat = rest_inv * (par_rest * (par_inv * mat))
+
+ # Compensate for non-local location
+ #if not pose_bone.bone.use_local_location:
+ # loc = smat.to_translation() * (par_rest.inverted() * rest).to_quaternion()
+ # smat[3][0] = loc[0]
+ # smat[3][1] = loc[1]
+ # smat[3][2] = loc[2]
+
+ return smat
+
+
+def get_local_pose_matrix(pose_bone):
+ """ Returns the local transform matrix of the given pose bone.
+ """
+ return get_pose_matrix_in_other_space(pose_bone.matrix, pose_bone)
+
+
+def set_pose_translation(pose_bone, mat):
+ """ Sets the pose bone's translation to the same translation as the given matrix.
+ Matrix should be given in bone's local space.
+ """
+ if pose_bone.bone.use_local_location == True:
+ pose_bone.location = mat.to_translation()
+ else:
+ loc = mat.to_translation()
+
+ rest = pose_bone.bone.matrix_local.copy()
+ if pose_bone.bone.parent:
+ par_rest = pose_bone.bone.parent.matrix_local.copy()
+ else:
+ par_rest = Matrix()
+
+ q = (par_rest.inverted() * rest).to_quaternion()
+ pose_bone.location = loc * q
+
+
+def set_pose_rotation(pose_bone, mat):
+ """ Sets the pose bone's rotation to the same rotation as the given matrix.
+ Matrix should be given in bone's local space.
+ """
+ q = mat.to_quaternion()
+
+ if pose_bone.rotation_mode == 'QUATERNION':
+ pose_bone.rotation_quaternion = q
+ elif pose_bone.rotation_mode == 'AXIS_ANGLE':
+ pose_bone.rotation_axis_angle[0] = q.angle
+ pose_bone.rotation_axis_angle[1] = q.axis[0]
+ pose_bone.rotation_axis_angle[2] = q.axis[1]
+ pose_bone.rotation_axis_angle[3] = q.axis[2]
+ else:
+ pose_bone.rotation_euler = q.to_euler(pose_bone.rotation_mode)
+
+
+def set_pose_scale(pose_bone, mat):
+ """ Sets the pose bone's scale to the same scale as the given matrix.
+ Matrix should be given in bone's local space.
+ """
+ pose_bone.scale = mat.to_scale()
+
+
+def match_pose_translation(pose_bone, target_bone):
+ """ Matches pose_bone's visual translation to target_bone's visual
+ translation.
+ This function assumes you are in pose mode on the relevant armature.
+ """
+ mat = get_pose_matrix_in_other_space(target_bone.matrix, pose_bone)
+ set_pose_translation(pose_bone, mat)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='POSE')
+
+
+def match_pose_rotation(pose_bone, target_bone):
+ """ Matches pose_bone's visual rotation to target_bone's visual
+ rotation.
+ This function assumes you are in pose mode on the relevant armature.
+ """
+ mat = get_pose_matrix_in_other_space(target_bone.matrix, pose_bone)
+ set_pose_rotation(pose_bone, mat)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='POSE')
+
+
+def match_pose_scale(pose_bone, target_bone):
+ """ Matches pose_bone's visual scale to target_bone's visual
+ scale.
+ This function assumes you are in pose mode on the relevant armature.
+ """
+ mat = get_pose_matrix_in_other_space(target_bone.matrix, pose_bone)
+ set_pose_scale(pose_bone, mat)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='POSE')
+
+
+##############################
+## IK/FK snapping functions ##
+##############################
+
+def match_pole_target(ik_first, ik_last, pole, match_bone, length):
+ """ Places an IK chain's pole target to match ik_first's
+ transforms to match_bone. All bones should be given as pose bones.
+ You need to be in pose mode on the relevant armature object.
+ ik_first: first bone in the IK chain
+ ik_last: last bone in the IK chain
+ pole: pole target bone for the IK chain
+ match_bone: bone to match ik_first to (probably first bone in a matching FK chain)
+ length: distance pole target should be placed from the chain center
+ """
+ a = ik_first.matrix.to_translation()
+ b = ik_last.matrix.to_translation() + ik_last.vector
+
+ # Vector from the head of ik_first to the
+ # tip of ik_last
+ ikv = b - a
+
+ # Create a vector that is not aligned with ikv.
+ # It doesn't matter what vector. Just any vector
+ # that's guaranteed to not be pointing in the same
+ # direction. In this case, we create a unit vector
+ # on the axis of the smallest component of ikv.
+ if abs(ikv[0]) < abs(ikv[1]) and abs(ikv[0]) < abs(ikv[2]):
+ v = Vector((1,0,0))
+ elif abs(ikv[1]) < abs(ikv[2]):
+ v = Vector((0,1,0))
+ else:
+ v = Vector((0,0,1))
+
+ # Get a vector perpendicular to ikv
+ pv = v.cross(ikv).normalized() * length
+
+ def set_pole(pvi):
+ """ Set pole target's position based on a vector
+ from the arm center line.
+ """
+ # Translate pvi into armature space
+ ploc = a + (ikv/2) + pvi
+
+ # Set pole target to location
+ mat = get_pose_matrix_in_other_space(Matrix.Translation(ploc), pole)
+ set_pose_translation(pole, mat)
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='POSE')
+
+ set_pole(pv)
+
+ # Get the rotation difference between ik_first and match_bone
+ q1 = ik_first.matrix.to_quaternion()
+ q2 = match_bone.matrix.to_quaternion()
+ angle = acos(min(1,max(-1,q1.dot(q2)))) * 2
+
+ # Compensate for the rotation difference
+ if angle > 0.0001:
+ pv *= Matrix.Rotation(angle, 4, ikv).to_quaternion()
+ set_pole(pv)
+
+ # Get rotation difference again, to see if we
+ # compensated in the right direction
+ q1 = ik_first.matrix.to_quaternion()
+ q2 = match_bone.matrix.to_quaternion()
+ angle2 = acos(min(1,max(-1,q1.dot(q2)))) * 2
+ if angle2 > 0.0001:
+ # Compensate in the other direction
+ pv *= Matrix.Rotation((angle*(-2)), 4, ikv).to_quaternion()
+ set_pole(pv)
+
+
+def fk2ik_arm(obj, fk, ik):
+ """ Matches the fk bones in an arm rig to the ik bones.
+ obj: armature object
+ fk: list of fk bone names
+ ik: list of ik bone names
+ """
+ uarm = obj.pose.bones[fk[0]]
+ farm = obj.pose.bones[fk[1]]
+ hand = obj.pose.bones[fk[2]]
+ uarmi = obj.pose.bones[ik[0]]
+ farmi = obj.pose.bones[ik[1]]
+ handi = obj.pose.bones[ik[2]]
+
+ # Upper arm position
+ match_pose_rotation(uarm, uarmi)
+ match_pose_scale(uarm, uarmi)
+
+ # Forearm position
+ match_pose_rotation(farm, farmi)
+ match_pose_scale(farm, farmi)
+
+ # Hand position
+ match_pose_rotation(hand, handi)
+ match_pose_scale(hand, handi)
+
+
+def ik2fk_arm(obj, fk, ik):
+ """ Matches the ik bones in an arm rig to the fk bones.
+ obj: armature object
+ fk: list of fk bone names
+ ik: list of ik bone names
+ """
+ uarm = obj.pose.bones[fk[0]]
+ farm = obj.pose.bones[fk[1]]
+ hand = obj.pose.bones[fk[2]]
+ uarmi = obj.pose.bones[ik[0]]
+ farmi = obj.pose.bones[ik[1]]
+ handi = obj.pose.bones[ik[2]]
+ pole = obj.pose.bones[ik[3]]
+
+ # Hand position
+ match_pose_translation(handi, hand)
+ match_pose_rotation(handi, hand)
+ match_pose_scale(handi, hand)
+
+ # Pole target position
+ match_pole_target(uarmi, farmi, pole, uarm, (uarmi.length + farmi.length))
+
+
+def fk2ik_leg(obj, fk, ik):
+ """ Matches the fk bones in a leg rig to the ik bones.
+ obj: armature object
+ fk: list of fk bone names
+ ik: list of ik bone names
+ """
+ thigh = obj.pose.bones[fk[0]]
+ shin = obj.pose.bones[fk[1]]
+ foot = obj.pose.bones[fk[2]]
+ mfoot = obj.pose.bones[fk[3]]
+ thighi = obj.pose.bones[ik[0]]
+ shini = obj.pose.bones[ik[1]]
+ mfooti = obj.pose.bones[ik[2]]
+
+ # Thigh position
+ match_pose_rotation(thigh, thighi)
+ match_pose_scale(thigh, thighi)
+
+ # Shin position
+ match_pose_rotation(shin, shini)
+ match_pose_scale(shin, shini)
+
+ # Foot position
+ mat = mfoot.bone.matrix_local.inverted() * foot.bone.matrix_local
+ footmat = get_pose_matrix_in_other_space(mfooti.matrix, foot) * mat
+ set_pose_rotation(foot, footmat)
+ set_pose_scale(foot, footmat)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='POSE')
+
+
+def ik2fk_leg(obj, fk, ik):
+ """ Matches the ik bones in a leg rig to the fk bones.
+ obj: armature object
+ fk: list of fk bone names
+ ik: list of ik bone names
+ """
+ thigh = obj.pose.bones[fk[0]]
+ shin = obj.pose.bones[fk[1]]
+ mfoot = obj.pose.bones[fk[2]]
+ thighi = obj.pose.bones[ik[0]]
+ shini = obj.pose.bones[ik[1]]
+ footi = obj.pose.bones[ik[2]]
+ footroll = obj.pose.bones[ik[3]]
+ pole = obj.pose.bones[ik[4]]
+ mfooti = obj.pose.bones[ik[5]]
+
+ # Clear footroll
+ set_pose_rotation(footroll, Matrix())
+
+ # Foot position
+ mat = mfooti.bone.matrix_local.inverted() * footi.bone.matrix_local
+ footmat = get_pose_matrix_in_other_space(mfoot.matrix, footi) * mat
+ set_pose_translation(footi, footmat)
+ set_pose_rotation(footi, footmat)
+ set_pose_scale(footi, footmat)
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='POSE')
+
+ # Pole target position
+ match_pole_target(thighi, shini, pole, thigh, (thighi.length + shini.length))
+
+
+##############################
+## IK/FK snapping operators ##
+##############################
+
+class Rigify_Arm_FK2IK(bpy.types.Operator):
+ """ Snaps an FK arm to an IK arm.
+ """
+ bl_idname = "pose.rigify_arm_fk2ik_" + rig_id
+ bl_label = "Rigify Snap FK arm to IK"
+ bl_options = {'UNDO'}
+
+ uarm_fk = bpy.props.StringProperty(name="Upper Arm FK Name")
+ farm_fk = bpy.props.StringProperty(name="Forerm FK Name")
+ hand_fk = bpy.props.StringProperty(name="Hand FK Name")
+
+ uarm_ik = bpy.props.StringProperty(name="Upper Arm IK Name")
+ farm_ik = bpy.props.StringProperty(name="Forearm IK Name")
+ hand_ik = bpy.props.StringProperty(name="Hand IK Name")
+
+ @classmethod
+ def poll(cls, context):
+ return (context.active_object != None and context.mode == 'POSE')
+
+ def execute(self, context):
+ use_global_undo = context.user_preferences.edit.use_global_undo
+ context.user_preferences.edit.use_global_undo = False
+ try:
+ fk2ik_arm(context.active_object, fk=[self.uarm_fk, self.farm_fk, self.hand_fk], ik=[self.uarm_ik, self.farm_ik, self.hand_ik])
+ finally:
+ context.user_preferences.edit.use_global_undo = use_global_undo
+ return {'FINISHED'}
+
+
+class Rigify_Arm_IK2FK(bpy.types.Operator):
+ """ Snaps an IK arm to an FK arm.
+ """
+ bl_idname = "pose.rigify_arm_ik2fk_" + rig_id
+ bl_label = "Rigify Snap IK arm to FK"
+ bl_options = {'UNDO'}
+
+ uarm_fk = bpy.props.StringProperty(name="Upper Arm FK Name")
+ farm_fk = bpy.props.StringProperty(name="Forerm FK Name")
+ hand_fk = bpy.props.StringProperty(name="Hand FK Name")
+
+ uarm_ik = bpy.props.StringProperty(name="Upper Arm IK Name")
+ farm_ik = bpy.props.StringProperty(name="Forearm IK Name")
+ hand_ik = bpy.props.StringProperty(name="Hand IK Name")
+ pole = bpy.props.StringProperty(name="Pole IK Name")
+
+ @classmethod
+ def poll(cls, context):
+ return (context.active_object != None and context.mode == 'POSE')
+
+ def execute(self, context):
+ use_global_undo = context.user_preferences.edit.use_global_undo
+ context.user_preferences.edit.use_global_undo = False
+ try:
+ ik2fk_arm(context.active_object, fk=[self.uarm_fk, self.farm_fk, self.hand_fk], ik=[self.uarm_ik, self.farm_ik, self.hand_ik, self.pole])
+ finally:
+ context.user_preferences.edit.use_global_undo = use_global_undo
+ return {'FINISHED'}
+
+
+class Rigify_Leg_FK2IK(bpy.types.Operator):
+ """ Snaps an FK leg to an IK leg.
+ """
+ bl_idname = "pose.rigify_leg_fk2ik_" + rig_id
+ bl_label = "Rigify Snap FK leg to IK"
+ bl_options = {'UNDO'}
+
+ thigh_fk = bpy.props.StringProperty(name="Thigh FK Name")
+ shin_fk = bpy.props.StringProperty(name="Shin FK Name")
+ foot_fk = bpy.props.StringProperty(name="Foot FK Name")
+ mfoot_fk = bpy.props.StringProperty(name="MFoot FK Name")
+
+ thigh_ik = bpy.props.StringProperty(name="Thigh IK Name")
+ shin_ik = bpy.props.StringProperty(name="Shin IK Name")
+ mfoot_ik = bpy.props.StringProperty(name="MFoot IK Name")
+
+ @classmethod
+ def poll(cls, context):
+ return (context.active_object != None and context.mode == 'POSE')
+
+ def execute(self, context):
+ use_global_undo = context.user_preferences.edit.use_global_undo
+ context.user_preferences.edit.use_global_undo = False
+ try:
+ fk2ik_leg(context.active_object, fk=[self.thigh_fk, self.shin_fk, self.foot_fk, self.mfoot_fk], ik=[self.thigh_ik, self.shin_ik, self.mfoot_ik])
+ finally:
+ context.user_preferences.edit.use_global_undo = use_global_undo
+ return {'FINISHED'}
+
+
+class Rigify_Leg_IK2FK(bpy.types.Operator):
+ """ Snaps an IK leg to an FK leg.
+ """
+ bl_idname = "pose.rigify_leg_ik2fk_" + rig_id
+ bl_label = "Rigify Snap IK leg to FK"
+ bl_options = {'UNDO'}
+
+ thigh_fk = bpy.props.StringProperty(name="Thigh FK Name")
+ shin_fk = bpy.props.StringProperty(name="Shin FK Name")
+ mfoot_fk = bpy.props.StringProperty(name="MFoot FK Name")
+
+ thigh_ik = bpy.props.StringProperty(name="Thigh IK Name")
+ shin_ik = bpy.props.StringProperty(name="Shin IK Name")
+ foot_ik = bpy.props.StringProperty(name="Foot IK Name")
+ footroll = bpy.props.StringProperty(name="Foot Roll Name")
+ pole = bpy.props.StringProperty(name="Pole IK Name")
+ mfoot_ik = bpy.props.StringProperty(name="MFoot IK Name")
+
+ @classmethod
+ def poll(cls, context):
+ return (context.active_object != None and context.mode == 'POSE')
+
+ def execute(self, context):
+ use_global_undo = context.user_preferences.edit.use_global_undo
+ context.user_preferences.edit.use_global_undo = False
+ try:
+ ik2fk_leg(context.active_object, fk=[self.thigh_fk, self.shin_fk, self.mfoot_fk], ik=[self.thigh_ik, self.shin_ik, self.foot_ik, self.footroll, self.pole, self.mfoot_ik])
+ finally:
+ context.user_preferences.edit.use_global_undo = use_global_undo
+ return {'FINISHED'}
+
+
+###################
+## Rig UI Panels ##
+###################
+
+class RigUI(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'UI'
+ bl_label = "Rig Main Properties"
+ bl_idname = rig_id + "_PT_rig_ui"
+
+ @classmethod
+ def poll(self, context):
+ if context.mode != 'POSE':
+ return False
+ try:
+ return (context.active_object.data.get("rig_id") == rig_id)
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ def draw(self, context):
+ layout = self.layout
+ pose_bones = context.active_object.pose.bones
+ try:
+ selected_bones = [bone.name for bone in context.selected_pose_bones]
+ selected_bones += [context.active_pose_bone.name]
+ except (AttributeError, TypeError):
+ return
+
+ def is_selected(names):
+ # Returns whether any of the named bones are selected.
+ if type(names) == list:
+ for name in names:
+ if name in selected_bones:
+ return True
+ elif names in selected_bones:
+ return True
+ return False
+
+
+'''
+
+
+def layers_ui(layers, layout):
+ """ Turn a list of booleans + a list of names into a layer UI.
+ """
+
+ code = '''
+class RigLayers(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'UI'
+ bl_label = "Rig Layers"
+ bl_idname = rig_id + "_PT_rig_layers"
+
+ @classmethod
+ def poll(self, context):
+ try:
+ return (context.active_object.data.get("rig_id") == rig_id)
+ except (AttributeError, KeyError, TypeError):
+ return False
+
+ def draw(self, context):
+ layout = self.layout
+ col = layout.column()
+'''
+ rows = {}
+ for i in range(28):
+ if layers[i]:
+ if layout[i][1] not in rows:
+ rows[layout[i][1]] = []
+ rows[layout[i][1]] += [(layout[i][0], i)]
+
+ keys = list(rows.keys())
+ keys.sort()
+
+ for key in keys:
+ code += "\n row = col.row()\n"
+ i = 0
+ for l in rows[key]:
+ if i > 3:
+ code += "\n row = col.row()\n"
+ i = 0
+ code += " row.prop(context.active_object.data, 'layers', index=%s, toggle=True, text='%s')\n" % (str(l[1]), l[0])
+ i += 1
+
+ # Root layer
+ code += "\n row = col.row()"
+ code += "\n row.separator()"
+ code += "\n row = col.row()"
+ code += "\n row.separator()\n"
+ code += "\n row = col.row()\n"
+ code += " row.prop(context.active_object.data, 'layers', index=28, toggle=True, text='Root')\n"
+
+
+ return code
+
+
+UI_REGISTER = '''
+
+def register():
+ bpy.utils.register_class(Rigify_Arm_FK2IK)
+ bpy.utils.register_class(Rigify_Arm_IK2FK)
+ bpy.utils.register_class(Rigify_Leg_FK2IK)
+ bpy.utils.register_class(Rigify_Leg_IK2FK)
+ bpy.utils.register_class(RigUI)
+ bpy.utils.register_class(RigLayers)
+
+def unregister():
+ bpy.utils.unregister_class(Rigify_Arm_FK2IK)
+ bpy.utils.unregister_class(Rigify_Arm_IK2FK)
+ bpy.utils.unregister_class(Rigify_Leg_FK2IK)
+ bpy.utils.unregister_class(Rigify_Leg_IK2FK)
+ bpy.utils.unregister_class(RigUI)
+ bpy.utils.unregister_class(RigLayers)
+
+register()
+'''
+
diff --git a/rigify/rigs/__init__.py b/rigify/rigs/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rigify/rigs/__init__.py
diff --git a/rigify/rigs/basic/__init__.py b/rigify/rigs/basic/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rigify/rigs/basic/__init__.py
diff --git a/rigify/rigs/basic/copy.py b/rigify/rigs/basic/copy.py
new file mode 100644
index 00000000..22beedf1
--- /dev/null
+++ b/rigify/rigs/basic/copy.py
@@ -0,0 +1,142 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone
+from rigify.utils import strip_org, make_deformer_name
+from rigify.utils import create_bone_widget
+
+
+class Rig:
+ """ A "copy" rig. All it does is duplicate the original bone and
+ constrain it.
+ This is a control and deformation rig.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ """
+ self.obj = obj
+ self.org_bone = bone
+ self.org_name = strip_org(bone)
+ self.params = params
+ self.make_control = params.make_control
+ self.make_deform = params.make_deform
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Make a control bone (copy of original).
+ if self.make_control:
+ bone = copy_bone(self.obj, self.org_bone, self.org_name)
+
+ # Make a deformation bone (copy of original, child of original).
+ if self.make_deform:
+ def_bone = copy_bone(self.obj, self.org_bone, make_deformer_name(self.org_name))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+ if self.make_control:
+ bone_e = eb[bone]
+ if self.make_deform:
+ def_bone_e = eb[def_bone]
+
+ # Parent
+ if self.make_deform:
+ def_bone_e.use_connect = False
+ def_bone_e.parent = eb[self.org_bone]
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ if self.make_control:
+ # Constrain the original bone.
+ con = pb[self.org_bone].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = bone
+
+ # Create control widget
+ create_bone_widget(self.obj, bone)
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+ """
+ group.make_control = bpy.props.BoolProperty(name="Control", default=True, description="Create a control bone for the copy.")
+ group.make_deform = bpy.props.BoolProperty(name="Deform", default=True, description="Create a deform bone for the copy.")
+
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.prop(params, "make_control")
+ r = layout.row()
+ r.prop(params, "make_deform")
+
+ @classmethod
+ def create_sample(self, obj):
+ """ Create a sample metarig for this rig type.
+
+ """
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('Bone')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.0000, 0.0000, 0.2000
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bones['Bone'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['Bone']]
+ pbone.rigify_type = 'simple.bone'
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.rigify_parameters.add()
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
diff --git a/rigify/rigs/basic/copy_chain.py b/rigify/rigs/basic/copy_chain.py
new file mode 100644
index 00000000..4910ef82
--- /dev/null
+++ b/rigify/rigs/basic/copy_chain.py
@@ -0,0 +1,210 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_deformer_name
+from rigify.utils import create_bone_widget
+
+
+class Rig:
+ """ A "copy_chain" rig. All it does is duplicate the original bone chain
+ and constrain it.
+ This is a control and deformation rig.
+
+ """
+ def __init__(self, obj, bone_name, params):
+ """ Gather and validate data about the rig.
+ """
+ self.obj = obj
+ self.org_bones = [bone_name] + connected_children_names(obj, bone_name)
+ self.params = params
+ self.make_controls = params.make_controls
+ self.make_deforms = params.make_deforms
+
+ if len(self.org_bones) <= 1:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 2 or more bones." % (strip_org(bone)))
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create the deformation and control bone chains.
+ # Just copies of the original chain.
+ def_chain = []
+ ctrl_chain = []
+ for i in range(len(self.org_bones)):
+ name = self.org_bones[i]
+
+ # Control bone
+ if self.make_controls:
+ # Copy
+ ctrl_bone = copy_bone(self.obj, name)
+ eb = self.obj.data.edit_bones
+ ctrl_bone_e = eb[ctrl_bone]
+ # Name
+ ctrl_bone_e.name = strip_org(name)
+ # Parenting
+ if i == 0:
+ # First bone
+ ctrl_bone_e.parent = eb[self.org_bones[0]].parent
+ else:
+ # The rest
+ ctrl_bone_e.parent = eb[ctrl_chain[-1]]
+ # Add to list
+ ctrl_chain += [ctrl_bone_e.name]
+ else:
+ ctrl_chain += [None]
+
+ # Deformation bone
+ if self.make_deforms:
+ # Copy
+ def_bone = copy_bone(self.obj, name)
+ eb = self.obj.data.edit_bones
+ def_bone_e = eb[def_bone]
+ # Name
+ def_bone_e.name = make_deformer_name(strip_org(name))
+ # Parenting
+ if i == 0:
+ # First bone
+ def_bone_e.parent = eb[self.org_bones[0]].parent
+ else:
+ # The rest
+ def_bone_e.parent = eb[def_chain[-1]]
+ # Add to list
+ def_chain += [def_bone_e.name]
+ else:
+ def_chain += [None]
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ # Constraints for org and def
+ for org, ctrl, defrm in zip(self.org_bones, ctrl_chain, def_chain):
+ if self.make_controls:
+ con = pb[org].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = ctrl
+
+ if self.make_deforms:
+ con = pb[defrm].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = org
+
+ # Create control widgets
+ if self.make_controls:
+ for bone in ctrl_chain:
+ create_bone_widget(self.obj, bone)
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+ """
+ group.make_controls = bpy.props.BoolProperty(name="Controls", default=True, description="Create control bones for the copy.")
+ group.make_deforms = bpy.props.BoolProperty(name="Deform", default=True, description="Create deform bones for the copy.")
+
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.prop(params, "make_controls")
+ r = layout.row()
+ r.prop(params, "make_deforms")
+
+ @classmethod
+ def create_sample(self, obj):
+ """ Create a sample metarig for this rig type.
+
+ """
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('bone.01')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.0000, 0.0000, 0.3333
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bones['bone.01'] = bone.name
+ bone = arm.edit_bones.new('bone.02')
+ bone.head[:] = 0.0000, 0.0000, 0.3333
+ bone.tail[:] = 0.0000, 0.0000, 0.6667
+ bone.roll = 3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['bone.01']]
+ bones['bone.02'] = bone.name
+ bone = arm.edit_bones.new('bone.03')
+ bone.head[:] = 0.0000, 0.0000, 0.6667
+ bone.tail[:] = 0.0000, 0.0000, 1.0000
+ bone.roll = 3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['bone.02']]
+ bones['bone.03'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['bone.01']]
+ pbone.rigify_type = 'basic.copy_chain'
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['bone.02']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['bone.03']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
+
diff --git a/rigify/rigs/biped/__init__.py b/rigify/rigs/biped/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rigify/rigs/biped/__init__.py
diff --git a/rigify/rigs/biped/arm/__init__.py b/rigify/rigs/biped/arm/__init__.py
new file mode 100644
index 00000000..93e757f4
--- /dev/null
+++ b/rigify/rigs/biped/arm/__init__.py
@@ -0,0 +1,235 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import imp
+from . import fk, ik, deform
+from rigify.utils import MetarigError, get_layers
+
+imp.reload(fk)
+imp.reload(ik)
+imp.reload(deform)
+
+script = """
+fk_arm = ["%s", "%s", "%s"]
+ik_arm = ["%s", "%s", "%s", "%s"]
+if is_selected(fk_arm+ik_arm):
+ layout.prop(pose_bones[ik_arm[2]], '["ikfk_switch"]', text="FK / IK (" + ik_arm[2] + ")", slider=True)
+if is_selected(fk_arm):
+ try:
+ pose_bones[fk_arm[0]]["isolate"]
+ layout.prop(pose_bones[fk_arm[0]], '["isolate"]', text="Isolate Rotation (" + fk_arm[0] + ")", slider=True)
+ except KeyError:
+ pass
+if is_selected(fk_arm+ik_arm):
+ p = layout.operator("pose.rigify_arm_fk2ik_" + rig_id, text="Snap FK->IK (" + fk_arm[0] + ")")
+ p.uarm_fk = fk_arm[0]
+ p.farm_fk = fk_arm[1]
+ p.hand_fk = fk_arm[2]
+ p.uarm_ik = ik_arm[0]
+ p.farm_ik = ik_arm[1]
+ p.hand_ik = ik_arm[2]
+ p = layout.operator("pose.rigify_arm_ik2fk_" + rig_id, text="Snap IK->FK (" + fk_arm[0] + ")")
+ p.uarm_fk = fk_arm[0]
+ p.farm_fk = fk_arm[1]
+ p.hand_fk = fk_arm[2]
+ p.uarm_ik = ik_arm[0]
+ p.farm_ik = ik_arm[1]
+ p.hand_ik = ik_arm[2]
+ p.pole = ik_arm[3]
+
+"""
+
+
+class Rig:
+ """ An arm rig, with IK/FK switching and hinge switch.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ # Gather deform rig
+ self.deform_rig = deform.Rig(obj, bone, params)
+
+ # Gather FK rig
+ self.fk_rig = fk.Rig(obj, bone, params)
+
+ # Gather IK rig
+ self.ik_rig = ik.Rig(obj, bone, params, ikfk_switch=True)
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ self.deform_rig.generate()
+ fk_controls = self.fk_rig.generate()
+ ik_controls = self.ik_rig.generate()
+ return [script % (fk_controls[0], fk_controls[1], fk_controls[2], ik_controls[0], ik_controls[1], ik_controls[2], ik_controls[3])]
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+
+ """
+ items = [('X', 'X', ''), ('Y', 'Y', ''), ('Z', 'Z', ''), ('-X', '-X', ''), ('-Y', '-Y', ''), ('-Z', '-Z', '')]
+ group.primary_rotation_axis = bpy.props.EnumProperty(items=items, name="Primary Rotation Axis", default='X')
+
+ group.bend_hint = bpy.props.BoolProperty(name="Bend Hint", default=True, description="Give IK chain a hint about which way to bend. Useful for perfectly straight chains.")
+
+ group.separate_ik_layers = bpy.props.BoolProperty(name="Separate IK Control Layers:", default=False, description="Enable putting the ik controls on a separate layer from the fk controls.")
+ group.ik_layers = bpy.props.BoolVectorProperty(size=32, description="Layers for the ik controls to be on.")
+
+ group.use_upper_arm_twist = bpy.props.BoolProperty(name="Upper Arm Twist", default=True, description="Generate the dual-bone twist setup for the upper arm.")
+ group.use_forearm_twist = bpy.props.BoolProperty(name="Forearm Twist", default=True, description="Generate the dual-bone twist setup for the forearm.")
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.prop(params, "separate_ik_layers")
+
+ r = layout.row()
+ r.active = params.separate_ik_layers
+
+ col = r.column(align=True)
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=0, toggle=True, text="")
+ row.prop(params, "ik_layers", index=1, toggle=True, text="")
+ row.prop(params, "ik_layers", index=2, toggle=True, text="")
+ row.prop(params, "ik_layers", index=3, toggle=True, text="")
+ row.prop(params, "ik_layers", index=4, toggle=True, text="")
+ row.prop(params, "ik_layers", index=5, toggle=True, text="")
+ row.prop(params, "ik_layers", index=6, toggle=True, text="")
+ row.prop(params, "ik_layers", index=7, toggle=True, text="")
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=16, toggle=True, text="")
+ row.prop(params, "ik_layers", index=17, toggle=True, text="")
+ row.prop(params, "ik_layers", index=18, toggle=True, text="")
+ row.prop(params, "ik_layers", index=19, toggle=True, text="")
+ row.prop(params, "ik_layers", index=20, toggle=True, text="")
+ row.prop(params, "ik_layers", index=21, toggle=True, text="")
+ row.prop(params, "ik_layers", index=22, toggle=True, text="")
+ row.prop(params, "ik_layers", index=23, toggle=True, text="")
+
+ col = r.column(align=True)
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=8, toggle=True, text="")
+ row.prop(params, "ik_layers", index=9, toggle=True, text="")
+ row.prop(params, "ik_layers", index=10, toggle=True, text="")
+ row.prop(params, "ik_layers", index=11, toggle=True, text="")
+ row.prop(params, "ik_layers", index=12, toggle=True, text="")
+ row.prop(params, "ik_layers", index=13, toggle=True, text="")
+ row.prop(params, "ik_layers", index=14, toggle=True, text="")
+ row.prop(params, "ik_layers", index=15, toggle=True, text="")
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=24, toggle=True, text="")
+ row.prop(params, "ik_layers", index=25, toggle=True, text="")
+ row.prop(params, "ik_layers", index=26, toggle=True, text="")
+ row.prop(params, "ik_layers", index=27, toggle=True, text="")
+ row.prop(params, "ik_layers", index=28, toggle=True, text="")
+ row.prop(params, "ik_layers", index=29, toggle=True, text="")
+ row.prop(params, "ik_layers", index=30, toggle=True, text="")
+ row.prop(params, "ik_layers", index=31, toggle=True, text="")
+
+ r = layout.row()
+ r.label(text="Elbow rotation axis:")
+ r.prop(params, "primary_rotation_axis", text="")
+
+ r = layout.row()
+ r.prop(params, "bend_hint")
+
+ col = layout.column()
+ col.prop(params, "use_upper_arm_twist")
+ col.prop(params, "use_forearm_twist")
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_meta_rig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('upper_arm')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.3000, 0.0300, 0.0000
+ bone.roll = 1.5708
+ bone.use_connect = False
+ bones['upper_arm'] = bone.name
+ bone = arm.edit_bones.new('forearm')
+ bone.head[:] = 0.3000, 0.0300, 0.0000
+ bone.tail[:] = 0.6000, 0.0000, 0.0000
+ bone.roll = 1.5708
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['upper_arm']]
+ bones['forearm'] = bone.name
+ bone = arm.edit_bones.new('hand')
+ bone.head[:] = 0.6000, 0.0000, 0.0000
+ bone.tail[:] = 0.7000, 0.0000, 0.0000
+ bone.roll = 3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['forearm']]
+ bones['hand'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['upper_arm']]
+ pbone.rigify_type = 'biped.arm'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['forearm']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['hand']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
diff --git a/rigify/rigs/biped/arm/deform.py b/rigify/rigs/biped/arm/deform.py
new file mode 100644
index 00000000..2a7b3109
--- /dev/null
+++ b/rigify/rigs/biped/arm/deform.py
@@ -0,0 +1,230 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from math import acos, degrees
+from mathutils import Vector, Matrix
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+
+
+def align_roll(obj, bone1, bone2):
+ bone1_e = obj.data.edit_bones[bone1]
+ bone2_e = obj.data.edit_bones[bone2]
+
+ bone1_e.roll = 0.0
+
+ # Get the directions the bones are pointing in, as vectors
+ y1 = bone1_e.y_axis
+ x1 = bone1_e.x_axis
+ y2 = bone2_e.y_axis
+ x2 = bone2_e.x_axis
+
+ # Get the shortest axis to rotate bone1 on to point in the same direction as bone2
+ axis = y1.cross(y2)
+ axis.normalize()
+
+ # Angle to rotate on that shortest axis
+ angle = y1.angle(y2)
+
+ # Create rotation matrix to make bone1 point in the same direction as bone2
+ rot_mat = Matrix.Rotation(angle, 3, axis)
+
+ # Roll factor
+ x3 = x1 * rot_mat
+ dot = x2 * x3
+ if dot > 1.0:
+ dot = 1.0
+ elif dot < -1.0:
+ dot = -1.0
+ roll = acos(dot)
+
+ # Set the roll
+ bone1_e.roll = roll
+
+ # Check if we rolled in the right direction
+ x3 = bone1_e.x_axis * rot_mat
+ check = x2 * x3
+
+ # If not, reverse
+ if check < 0.9999:
+ bone1_e.roll = -roll
+
+
+class Rig:
+ """ An FK arm rig, with hinge switch.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed, and
+ store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ self.obj = obj
+ self.params = params
+
+ # Get the chain of 3 connected bones
+ self.org_bones = [bone] + connected_children_names(self.obj, bone)[:2]
+
+ if len(self.org_bones) != 3:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 3 bones." % (strip_org(bone)))
+
+ # Get rig parameters
+ self.use_upper_arm_twist = params.use_upper_arm_twist
+ self.use_forearm_twist = params.use_forearm_twist
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create upper arm bones
+ if self.use_upper_arm_twist:
+ uarm1 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0] + ".01")))
+ uarm2 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0] + ".02")))
+ utip = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(self.org_bones[0] + ".tip")))
+ else:
+ uarm = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0])))
+
+ # Create forearm bones
+ if self.use_forearm_twist:
+ farm1 = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1] + ".01")))
+ farm2 = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1] + ".02")))
+ ftip = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(self.org_bones[1] + ".tip")))
+ else:
+ farm = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1])))
+
+ # Create hand bone
+ hand = copy_bone(self.obj, self.org_bones[2], make_deformer_name(strip_org(self.org_bones[2])))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+
+ org_uarm_e = eb[self.org_bones[0]]
+ if self.use_upper_arm_twist:
+ uarm1_e = eb[uarm1]
+ uarm2_e = eb[uarm2]
+ utip_e = eb[utip]
+ else:
+ uarm_e = eb[uarm]
+
+ org_farm_e = eb[self.org_bones[1]]
+ if self.use_forearm_twist:
+ farm1_e = eb[farm1]
+ farm2_e = eb[farm2]
+ ftip_e = eb[ftip]
+ else:
+ farm_e = eb[farm]
+
+ org_hand_e = eb[self.org_bones[2]]
+ hand_e = eb[hand]
+
+ # Parent and position upper arm bones
+ if self.use_upper_arm_twist:
+ uarm1_e.use_connect = False
+ uarm2_e.use_connect = False
+ utip_e.use_connect = False
+
+ uarm1_e.parent = org_uarm_e.parent
+ uarm2_e.parent = org_uarm_e
+ utip_e.parent = org_uarm_e
+
+ center = Vector((org_uarm_e.head + org_uarm_e.tail) / 2)
+
+ uarm1_e.tail = center
+ uarm2_e.head = center
+ put_bone(self.obj, utip, org_uarm_e.tail)
+ utip_e.length = org_uarm_e.length / 8
+ else:
+ uarm_e.use_connect = False
+ uarm_e.parent = org_uarm_e
+
+ # Parent and position forearm bones
+ if self.use_forearm_twist:
+ farm1_e.use_connect = False
+ farm2_e.use_connect = False
+ ftip_e.use_connect = False
+
+ farm1_e.parent = org_farm_e
+ farm2_e.parent = org_farm_e
+ ftip_e.parent = org_farm_e
+
+ center = Vector((org_farm_e.head + org_farm_e.tail) / 2)
+
+ farm1_e.tail = center
+ farm2_e.head = center
+ put_bone(self.obj, ftip, org_farm_e.tail)
+ ftip_e.length = org_farm_e.length / 8
+
+ # Align roll of farm2 with hand
+ align_roll(self.obj, farm2, hand)
+ else:
+ farm_e.use_connect = False
+ farm_e.parent = org_farm_e
+
+ # Parent hand
+ hand_e.use_connect = False
+ hand_e.parent = org_hand_e
+
+ # Object mode, get pose bones
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ if self.use_upper_arm_twist:
+ uarm1_p = pb[uarm1]
+ if self.use_forearm_twist:
+ farm2_p = pb[farm2]
+ hand_p = pb[hand]
+
+ # Upper arm constraints
+ if self.use_upper_arm_twist:
+ con = uarm1_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = self.org_bones[0]
+
+ con = uarm1_p.constraints.new('COPY_SCALE')
+ con.name = "copy_scale"
+ con.target = self.obj
+ con.subtarget = self.org_bones[0]
+
+ con = uarm1_p.constraints.new('DAMPED_TRACK')
+ con.name = "track_to"
+ con.target = self.obj
+ con.subtarget = utip
+
+ # Forearm constraints
+ if self.use_forearm_twist:
+ con = farm2_p.constraints.new('COPY_ROTATION')
+ con.name = "copy_rotation"
+ con.target = self.obj
+ con.subtarget = hand
+
+ con = farm2_p.constraints.new('DAMPED_TRACK')
+ con.name = "track_to"
+ con.target = self.obj
+ con.subtarget = ftip
+
diff --git a/rigify/rigs/biped/arm/fk.py b/rigify/rigs/biped/arm/fk.py
new file mode 100644
index 00000000..2c634b86
--- /dev/null
+++ b/rigify/rigs/biped/arm/fk.py
@@ -0,0 +1,217 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import math
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+from rigify.utils import get_layers
+from rigify.utils import create_widget, create_limb_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+
+class Rig:
+ """ An FK arm rig, with hinge switch.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed, and
+ store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ self.obj = obj
+ self.params = params
+
+ # Get the chain of 3 connected bones
+ self.org_bones = [bone] + connected_children_names(self.obj, bone)[:2]
+
+ if len(self.org_bones) != 3:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 3 bones." % (strip_org(bone)))
+
+ # Get (optional) parent
+ if self.obj.data.bones[bone].parent is None:
+ self.org_parent = None
+ else:
+ self.org_parent = self.obj.data.bones[bone].parent.name
+
+ # Get the rig parameters
+ if "layers" in params:
+ self.layers = get_layers(params["layers"])
+ else:
+ self.layers = None
+
+ self.primary_rotation_axis = params.primary_rotation_axis
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create the control bones
+ uarm = copy_bone(self.obj, self.org_bones[0], strip_org(self.org_bones[0]))
+ farm = copy_bone(self.obj, self.org_bones[1], strip_org(self.org_bones[1]))
+ hand = copy_bone(self.obj, self.org_bones[2], strip_org(self.org_bones[2]))
+
+ # Create the hinge bones
+ if self.org_parent != None:
+ hinge = copy_bone(self.obj, self.org_parent, make_mechanism_name(uarm + ".hinge"))
+ socket1 = copy_bone(self.obj, uarm, make_mechanism_name(uarm + ".socket1"))
+ socket2 = copy_bone(self.obj, uarm, make_mechanism_name(uarm + ".socket2"))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+
+ uarm_e = eb[uarm]
+ farm_e = eb[farm]
+ hand_e = eb[hand]
+
+ if self.org_parent != None:
+ hinge_e = eb[hinge]
+ socket1_e = eb[socket1]
+ socket2_e = eb[socket2]
+
+ # Parenting
+ farm_e.parent = uarm_e
+ hand_e.parent = farm_e
+
+ if self.org_parent != None:
+ hinge_e.use_connect = False
+ socket1_e.use_connect = False
+ socket2_e.use_connect = False
+
+ uarm_e.parent = hinge_e
+ hinge_e.parent = socket2_e
+ socket2_e.parent = None
+
+ # Positioning
+ if self.org_parent != None:
+ center = (hinge_e.head + hinge_e.tail) / 2
+ hinge_e.head = center
+ socket1_e.length /= 4
+ socket2_e.length /= 3
+
+ # Object mode, get pose bones
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ uarm_p = pb[uarm]
+ farm_p = pb[farm]
+ hand_p = pb[hand]
+ if self.org_parent != None:
+ hinge_p = pb[hinge]
+
+ if self.org_parent != None:
+ socket1_p = pb[socket1]
+ socket2_p = pb[socket2]
+
+ # Set the elbow to only bend on the x-axis.
+ farm_p.rotation_mode = 'XYZ'
+ if 'X' in self.primary_rotation_axis:
+ farm_p.lock_rotation = (False, True, True)
+ elif 'Y' in self.primary_rotation_axis:
+ farm_p.lock_rotation = (True, False, True)
+ else:
+ farm_p.lock_rotation = (True, True, False)
+
+ # Hinge transforms are locked, for auto-ik
+ if self.org_parent != None:
+ hinge_p.lock_location = True, True, True
+ hinge_p.lock_rotation = True, True, True
+ hinge_p.lock_rotation_w = True
+ hinge_p.lock_scale = True, True, True
+
+ # Set up custom properties
+ if self.org_parent != None:
+ prop = rna_idprop_ui_prop_get(uarm_p, "isolate", create=True)
+ uarm_p["isolate"] = 0.0
+ prop["soft_min"] = prop["min"] = 0.0
+ prop["soft_max"] = prop["max"] = 1.0
+
+ # Hinge constraints / drivers
+ if self.org_parent != None:
+ con = socket2_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = socket1
+
+ con = socket2_p.constraints.new('COPY_TRANSFORMS')
+ con.name = "isolate_off"
+ con.target = self.obj
+ con.subtarget = socket1
+
+ # Driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = uarm_p.path_from_id() + '["isolate"]'
+ mod = fcurve.modifiers[0]
+ mod.poly_order = 1
+ mod.coefficients[0] = 1.0
+ mod.coefficients[1] = -1.0
+
+ # Constrain org bones to controls
+ con = pb[self.org_bones[0]].constraints.new('COPY_TRANSFORMS')
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = uarm
+
+ con = pb[self.org_bones[1]].constraints.new('COPY_TRANSFORMS')
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = farm
+
+ con = pb[self.org_bones[2]].constraints.new('COPY_TRANSFORMS')
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = hand
+
+ # Set layers if specified
+ if self.layers:
+ uarm_p.bone.layers = self.layers
+ farm_p.bone.layers = self.layers
+ hand_p.bone.layers = self.layers
+
+ # Create control widgets
+ create_limb_widget(self.obj, uarm)
+ create_limb_widget(self.obj, farm)
+
+ ob = create_widget(self.obj, hand)
+ if ob != None:
+ verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)]
+ edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)]
+ mesh = ob.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ mod = ob.modifiers.new("subsurf", 'SUBSURF')
+ mod.levels = 2
+
+ return [uarm, farm, hand]
+
diff --git a/rigify/rigs/biped/arm/ik.py b/rigify/rigs/biped/arm/ik.py
new file mode 100644
index 00000000..0ecf70e7
--- /dev/null
+++ b/rigify/rigs/biped/arm/ik.py
@@ -0,0 +1,339 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from mathutils import Vector
+from math import pi, acos
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_mechanism_name, insert_before_lr
+from rigify.utils import get_layers
+from rigify.utils import create_widget, create_line_widget, create_sphere_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+
+def angle_on_plane(plane, vec1, vec2):
+ """ Return the angle between two vectors projected onto a plane.
+ """
+ plane.normalize()
+ vec1 = vec1 - (plane * (vec1.dot(plane)))
+ vec2 = vec2 - (plane * (vec2.dot(plane)))
+ vec1.normalize()
+ vec2.normalize()
+
+ # Determine the angle
+ angle = acos(max(-1.0, min(1.0, vec1.dot(vec2))))
+
+ if angle < 0.00001: # close enough to zero that sign doesn't matter
+ return angle
+
+ # Determine the sign of the angle
+ vec3 = vec2.cross(vec1)
+ vec3.normalize()
+ sign = vec3.dot(plane)
+ if sign >= 0:
+ sign = 1
+ else:
+ sign = -1
+
+ return angle * sign
+
+
+class Rig:
+ """ An IK arm rig, with an optional ik/fk switch.
+
+ """
+ def __init__(self, obj, bone, params, ikfk_switch=False):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed, and
+ store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ ikfk_switch: if True, create an ik/fk switch slider
+ """
+ self.obj = obj
+ self.params = params
+ self.switch = ikfk_switch
+
+ # Get the chain of 3 connected bones
+ self.org_bones = [bone] + connected_children_names(self.obj, bone)[:2]
+
+ if len(self.org_bones) != 3:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 3 bones." % (strip_org(bone)))
+
+ # Get the rig parameters
+ if params.separate_ik_layers:
+ self.layers = list(params.ik_layers)
+ else:
+ self.layers = None
+
+ self.bend_hint = params.bend_hint
+
+ self.primary_rotation_axis = params.primary_rotation_axis
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create the bones
+ uarm = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], "_ik"))))
+ farm = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], "_ik"))))
+
+ hand = copy_bone(self.obj, self.org_bones[2], strip_org(insert_before_lr(self.org_bones[2], "_ik")))
+ pole = copy_bone(self.obj, self.org_bones[0], strip_org(insert_before_lr(self.org_bones[0], "_pole")))
+
+ vishand = copy_bone(self.obj, self.org_bones[2], "VIS-" + strip_org(insert_before_lr(self.org_bones[2], "_ik")))
+ vispole = copy_bone(self.obj, self.org_bones[1], "VIS-" + strip_org(insert_before_lr(self.org_bones[0], "_pole")))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+
+ uarm_e = eb[uarm]
+ farm_e = eb[farm]
+ hand_e = eb[hand]
+ pole_e = eb[pole]
+ vishand_e = eb[vishand]
+ vispole_e = eb[vispole]
+
+ # Parenting
+ farm_e.parent = uarm_e
+
+ hand_e.use_connect = False
+ hand_e.parent = None
+
+ pole_e.use_connect = False
+
+ vishand_e.use_connect = False
+ vishand_e.parent = None
+
+ vispole_e.use_connect = False
+ vispole_e.parent = None
+
+ # Misc
+ hand_e.use_local_location = False
+
+ vishand_e.hide_select = True
+ vispole_e.hide_select = True
+
+ # Positioning
+ v1 = farm_e.tail - uarm_e.head
+ if 'X' in self.primary_rotation_axis or 'Y' in self.primary_rotation_axis:
+ v2 = v1.cross(farm_e.x_axis)
+ if (v2 * farm_e.z_axis) > 0.0:
+ v2 *= -1.0
+ else:
+ v2 = v1.cross(farm_e.z_axis)
+ if (v2 * farm_e.x_axis) < 0.0:
+ v2 *= -1.0
+ v2.normalize()
+ v2 *= v1.length
+
+ if '-' in self.primary_rotation_axis:
+ v2 *= -1
+
+ pole_e.head = farm_e.head + v2
+ pole_e.tail = pole_e.head + (Vector((0, 1, 0)) * (v1.length / 8))
+ pole_e.roll = 0.0
+
+ vishand_e.tail = vishand_e.head + Vector((0, 0, v1.length / 32))
+ vispole_e.tail = vispole_e.head + Vector((0, 0, v1.length / 32))
+
+ # Determine the pole offset value
+ plane = (farm_e.tail - uarm_e.head).normalized()
+ vec1 = uarm_e.x_axis.normalized()
+ vec2 = (pole_e.head - uarm_e.head).normalized()
+ pole_offset = angle_on_plane(plane, vec1, vec2)
+
+ # Object mode, get pose bones
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ uarm_p = pb[uarm]
+ farm_p = pb[farm]
+ hand_p = pb[hand]
+ pole_p = pb[pole]
+ vishand_p = pb[vishand]
+ vispole_p = pb[vispole]
+
+ # Set the elbow to only bend on the primary axis
+ if 'X' in self.primary_rotation_axis:
+ farm_p.lock_ik_y = True
+ farm_p.lock_ik_z = True
+ elif 'Y' in self.primary_rotation_axis:
+ farm_p.lock_ik_x = True
+ farm_p.lock_ik_z = True
+ else:
+ farm_p.lock_ik_x = True
+ farm_p.lock_ik_y = True
+
+ # Pole target only translates
+ pole_p.lock_location = False, False, False
+ pole_p.lock_rotation = True, True, True
+ pole_p.lock_rotation_w = True
+ pole_p.lock_scale = True, True, True
+
+ # Set up custom properties
+ if self.switch == True:
+ prop = rna_idprop_ui_prop_get(hand_p, "ikfk_switch", create=True)
+ hand_p["ikfk_switch"] = 0.0
+ prop["soft_min"] = prop["min"] = 0.0
+ prop["soft_max"] = prop["max"] = 1.0
+
+ # Bend direction hint
+ if self.bend_hint:
+ con = farm_p.constraints.new('LIMIT_ROTATION')
+ con.name = "bend_hint"
+ con.owner_space = 'LOCAL'
+ if self.primary_rotation_axis == 'X':
+ con.use_limit_x = True
+ con.min_x = pi / 10
+ con.max_x = pi / 10
+ elif self.primary_rotation_axis == '-X':
+ con.use_limit_x = True
+ con.min_x = -pi / 10
+ con.max_x = -pi / 10
+ elif self.primary_rotation_axis == 'Y':
+ con.use_limit_y = True
+ con.min_y = pi / 10
+ con.max_y = pi / 10
+ elif self.primary_rotation_axis == '-Y':
+ con.use_limit_y = True
+ con.min_y = -pi / 10
+ con.max_y = -pi / 10
+ elif self.primary_rotation_axis == 'Z':
+ con.use_limit_z = True
+ con.min_z = pi / 10
+ con.max_z = pi / 10
+ elif self.primary_rotation_axis == '-Z':
+ con.use_limit_z = True
+ con.min_z = -pi / 10
+ con.max_z = -pi / 10
+
+ # IK Constraint
+ con = farm_p.constraints.new('IK')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = hand
+ con.pole_target = self.obj
+ con.pole_subtarget = pole
+ con.pole_angle = pole_offset
+ con.chain_count = 2
+
+ # Constrain org bones to controls
+ con = pb[self.org_bones[0]].constraints.new('COPY_TRANSFORMS')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = uarm
+ if self.switch == True:
+ # IK/FK switch driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = hand_p.path_from_id() + '["ikfk_switch"]'
+
+ con = pb[self.org_bones[1]].constraints.new('COPY_TRANSFORMS')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = farm
+ if self.switch == True:
+ # IK/FK switch driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = hand_p.path_from_id() + '["ikfk_switch"]'
+
+ con = pb[self.org_bones[2]].constraints.new('COPY_TRANSFORMS')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = hand
+ if self.switch == True:
+ # IK/FK switch driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = hand_p.path_from_id() + '["ikfk_switch"]'
+
+ # VIS hand constraints
+ con = vishand_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_loc"
+ con.target = self.obj
+ con.subtarget = self.org_bones[2]
+
+ con = vishand_p.constraints.new('STRETCH_TO')
+ con.name = "stretch_to"
+ con.target = self.obj
+ con.subtarget = hand
+ con.volume = 'NO_VOLUME'
+ con.rest_length = vishand_p.length
+
+ # VIS pole constraints
+ con = vispole_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_loc"
+ con.target = self.obj
+ con.subtarget = self.org_bones[1]
+
+ con = vispole_p.constraints.new('STRETCH_TO')
+ con.name = "stretch_to"
+ con.target = self.obj
+ con.subtarget = pole
+ con.volume = 'NO_VOLUME'
+ con.rest_length = vispole_p.length
+
+ # Set layers if specified
+ if self.layers:
+ hand_p.bone.layers = self.layers
+ pole_p.bone.layers = self.layers
+ vishand_p.bone.layers = self.layers
+ vispole_p.bone.layers = self.layers
+
+ # Create widgets
+ create_line_widget(self.obj, vispole)
+ create_line_widget(self.obj, vishand)
+ create_sphere_widget(self.obj, pole)
+
+ ob = create_widget(self.obj, hand)
+ if ob != None:
+ verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)]
+ edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)]
+ mesh = ob.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ mod = ob.modifiers.new("subsurf", 'SUBSURF')
+ mod.levels = 2
+
+ return [uarm, farm, hand, pole]
+
diff --git a/rigify/rigs/biped/leg/__init__.py b/rigify/rigs/biped/leg/__init__.py
new file mode 100644
index 00000000..8f9286c7
--- /dev/null
+++ b/rigify/rigs/biped/leg/__init__.py
@@ -0,0 +1,272 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import imp
+from . import fk, ik, deform
+from rigify.utils import MetarigError, get_layers
+
+imp.reload(fk)
+imp.reload(ik)
+imp.reload(deform)
+
+script = """
+fk_leg = ["%s", "%s", "%s", "%s"]
+ik_leg = ["%s", "%s", "%s", "%s", "%s", "%s"]
+if is_selected(fk_leg+ik_leg):
+ layout.prop(pose_bones[ik_leg[2]], '["ikfk_switch"]', text="FK / IK (" + ik_leg[2] + ")", slider=True)
+if is_selected(fk_leg):
+ try:
+ pose_bones[fk_leg[0]]["isolate"]
+ layout.prop(pose_bones[fk_leg[0]], '["isolate"]', text="Isolate Rotation (" + fk_leg[0] + ")", slider=True)
+ except KeyError:
+ pass
+if is_selected(fk_leg+ik_leg):
+ p = layout.operator("pose.rigify_leg_fk2ik_" + rig_id, text="Snap FK->IK (" + fk_leg[0] + ")")
+ p.thigh_fk = fk_leg[0]
+ p.shin_fk = fk_leg[1]
+ p.foot_fk = fk_leg[2]
+ p.mfoot_fk = fk_leg[3]
+ p.thigh_ik = ik_leg[0]
+ p.shin_ik = ik_leg[1]
+ p.mfoot_ik = ik_leg[5]
+ p = layout.operator("pose.rigify_leg_ik2fk_" + rig_id, text="Snap IK->FK (" + fk_leg[0] + ")")
+ p.thigh_fk = fk_leg[0]
+ p.shin_fk = fk_leg[1]
+ p.mfoot_fk = fk_leg[3]
+ p.thigh_ik = ik_leg[0]
+ p.shin_ik = ik_leg[1]
+ p.foot_ik = ik_leg[2]
+ p.pole = ik_leg[3]
+ p.footroll = ik_leg[4]
+ p.mfoot_ik = ik_leg[5]
+"""
+
+
+class Rig:
+ """ A leg rig, with IK/FK switching, a hinge switch, and foot roll.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ # Gather deform rig
+ self.deform_rig = deform.Rig(obj, bone, params)
+
+ # Gather FK rig
+ self.fk_rig = fk.Rig(obj, bone, params)
+
+ # Gather IK rig
+ self.ik_rig = ik.Rig(obj, bone, params, ikfk_switch=True)
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ self.deform_rig.generate()
+ fk_controls = self.fk_rig.generate()
+ ik_controls = self.ik_rig.generate()
+ return [script % (fk_controls[0], fk_controls[1], fk_controls[2], fk_controls[3], ik_controls[0], ik_controls[1], ik_controls[2], ik_controls[3], ik_controls[4], ik_controls[5])]
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+
+ """
+ items = [('X', 'X', ''), ('Y', 'Y', ''), ('Z', 'Z', ''), ('-X', '-X', ''), ('-Y', '-Y', ''), ('-Z', '-Z', '')]
+ group.primary_rotation_axis = bpy.props.EnumProperty(items=items, name="Primary Rotation Axis", default='X')
+
+ group.bend_hint = bpy.props.BoolProperty(name="Bend Hint", default=True, description="Give IK chain a hint about which way to bend. Useful for perfectly straight chains.")
+
+ group.separate_ik_layers = bpy.props.BoolProperty(name="Separate IK Control Layers:", default=False, description="Enable putting the ik controls on a separate layer from the fk controls.")
+ group.ik_layers = bpy.props.BoolVectorProperty(size=32, description="Layers for the ik controls to be on.")
+
+ group.use_thigh_twist = bpy.props.BoolProperty(name="Thigh Twist", default=True, description="Generate the dual-bone twist setup for the thigh.")
+ group.use_shin_twist = bpy.props.BoolProperty(name="Shin Twist", default=True, description="Generate the dual-bone twist setup for the shin.")
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.prop(params, "separate_ik_layers")
+
+ r = layout.row()
+ r.active = params.separate_ik_layers
+
+ col = r.column(align=True)
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=0, toggle=True, text="")
+ row.prop(params, "ik_layers", index=1, toggle=True, text="")
+ row.prop(params, "ik_layers", index=2, toggle=True, text="")
+ row.prop(params, "ik_layers", index=3, toggle=True, text="")
+ row.prop(params, "ik_layers", index=4, toggle=True, text="")
+ row.prop(params, "ik_layers", index=5, toggle=True, text="")
+ row.prop(params, "ik_layers", index=6, toggle=True, text="")
+ row.prop(params, "ik_layers", index=7, toggle=True, text="")
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=16, toggle=True, text="")
+ row.prop(params, "ik_layers", index=17, toggle=True, text="")
+ row.prop(params, "ik_layers", index=18, toggle=True, text="")
+ row.prop(params, "ik_layers", index=19, toggle=True, text="")
+ row.prop(params, "ik_layers", index=20, toggle=True, text="")
+ row.prop(params, "ik_layers", index=21, toggle=True, text="")
+ row.prop(params, "ik_layers", index=22, toggle=True, text="")
+ row.prop(params, "ik_layers", index=23, toggle=True, text="")
+
+ col = r.column(align=True)
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=8, toggle=True, text="")
+ row.prop(params, "ik_layers", index=9, toggle=True, text="")
+ row.prop(params, "ik_layers", index=10, toggle=True, text="")
+ row.prop(params, "ik_layers", index=11, toggle=True, text="")
+ row.prop(params, "ik_layers", index=12, toggle=True, text="")
+ row.prop(params, "ik_layers", index=13, toggle=True, text="")
+ row.prop(params, "ik_layers", index=14, toggle=True, text="")
+ row.prop(params, "ik_layers", index=15, toggle=True, text="")
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=24, toggle=True, text="")
+ row.prop(params, "ik_layers", index=25, toggle=True, text="")
+ row.prop(params, "ik_layers", index=26, toggle=True, text="")
+ row.prop(params, "ik_layers", index=27, toggle=True, text="")
+ row.prop(params, "ik_layers", index=28, toggle=True, text="")
+ row.prop(params, "ik_layers", index=29, toggle=True, text="")
+ row.prop(params, "ik_layers", index=30, toggle=True, text="")
+ row.prop(params, "ik_layers", index=31, toggle=True, text="")
+
+ r = layout.row()
+ r.label(text="Knee rotation axis:")
+ r.prop(params, "primary_rotation_axis", text="")
+
+ r = layout.row()
+ r.prop(params, "bend_hint")
+
+ col = layout.column()
+ col.prop(params, "use_thigh_twist")
+ col.prop(params, "use_shin_twist")
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_meta_rig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('thigh')
+ bone.head[:] = -0.0000, 0.0000, 1.0000
+ bone.tail[:] = -0.0000, -0.0500, 0.5000
+ bone.roll = -0.0000
+ bone.use_connect = False
+ bones['thigh'] = bone.name
+ bone = arm.edit_bones.new('shin')
+ bone.head[:] = -0.0000, -0.0500, 0.5000
+ bone.tail[:] = -0.0000, 0.0000, 0.1000
+ bone.roll = -0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['thigh']]
+ bones['shin'] = bone.name
+ bone = arm.edit_bones.new('foot')
+ bone.head[:] = -0.0000, 0.0000, 0.1000
+ bone.tail[:] = 0.0000, -0.1200, 0.0300
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['shin']]
+ bones['foot'] = bone.name
+ bone = arm.edit_bones.new('heel')
+ bone.head[:] = -0.0000, 0.0000, 0.1000
+ bone.tail[:] = -0.0000, 0.0600, 0.0000
+ bone.roll = -0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['shin']]
+ bones['heel'] = bone.name
+ bone = arm.edit_bones.new('heel.02')
+ bone.head[:] = -0.0500, -0.0200, 0.0000
+ bone.tail[:] = 0.0500, -0.0200, 0.0000
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['heel']]
+ bones['heel.02'] = bone.name
+ bone = arm.edit_bones.new('toe')
+ bone.head[:] = 0.0000, -0.1200, 0.0300
+ bone.tail[:] = 0.0000, -0.2000, 0.0300
+ bone.roll = 3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['foot']]
+ bones['toe'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['thigh']]
+ pbone.rigify_type = 'biped.leg'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['shin']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['foot']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['heel']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['toe']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
diff --git a/rigify/rigs/biped/leg/deform.py b/rigify/rigs/biped/leg/deform.py
new file mode 100644
index 00000000..bb6b6f39
--- /dev/null
+++ b/rigify/rigs/biped/leg/deform.py
@@ -0,0 +1,263 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from math import acos, degrees
+from mathutils import Vector, Matrix
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names, has_connected_children
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+
+
+def align_roll(obj, bone1, bone2):
+ bone1_e = obj.data.edit_bones[bone1]
+ bone2_e = obj.data.edit_bones[bone2]
+
+ bone1_e.roll = 0.0
+
+ # Get the directions the bones are pointing in, as vectors
+ y1 = bone1_e.y_axis
+ x1 = bone1_e.x_axis
+ y2 = bone2_e.y_axis
+ x2 = bone2_e.x_axis
+
+ # Get the shortest axis to rotate bone1 on to point in the same direction as bone2
+ axis = y1.cross(y2)
+ axis.normalize()
+
+ # Angle to rotate on that shortest axis
+ angle = y1.angle(y2)
+
+ # Create rotation matrix to make bone1 point in the same direction as bone2
+ rot_mat = Matrix.Rotation(angle, 3, axis)
+
+ # Roll factor
+ x3 = x1 * rot_mat
+ dot = x2 * x3
+ if dot > 1.0:
+ dot = 1.0
+ elif dot < -1.0:
+ dot = -1.0
+ roll = acos(dot)
+
+ # Set the roll
+ bone1_e.roll = roll
+
+ # Check if we rolled in the right direction
+ x3 = bone1_e.x_axis * rot_mat
+ check = x2 * x3
+
+ # If not, reverse
+ if check < 0.9999:
+ bone1_e.roll = -roll
+
+
+class Rig:
+ """ A leg deform-bone setup.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed, and
+ store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ self.obj = obj
+ self.params = params
+
+ # Get the chain of 2 connected bones
+ leg_bones = [bone] + connected_children_names(self.obj, bone)[:2]
+
+ if len(leg_bones) != 2:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ # Get the foot and heel
+ foot = None
+ heel = None
+ for b in self.obj.data.bones[leg_bones[1]].children:
+ if b.use_connect == True:
+ if len(b.children) >= 1 and has_connected_children(b):
+ foot = b.name
+ else:
+ heel = b.name
+
+ if foot is None or heel is None:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ # Get the toe
+ toe = None
+ for b in self.obj.data.bones[foot].children:
+ if b.use_connect == True:
+ toe = b.name
+
+ if toe is None:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ self.org_bones = leg_bones + [foot, toe, heel]
+
+ # Get rig parameters
+ self.use_thigh_twist = params.use_thigh_twist
+ self.use_shin_twist = params.use_shin_twist
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create upper arm bones
+ if self.use_thigh_twist:
+ thigh1 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0] + ".01")))
+ thigh2 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0] + ".02")))
+ utip = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(self.org_bones[0] + ".tip")))
+ else:
+ thigh = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0])))
+
+ # Create forearm bones
+ if self.use_shin_twist:
+ shin1 = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1] + ".01")))
+ shin2 = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1] + ".02")))
+ stip = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(self.org_bones[1] + ".tip")))
+ else:
+ shin = copy_bone(self.obj, self.org_bones[1], make_deformer_name(strip_org(self.org_bones[1])))
+
+ # Create foot bone
+ foot = copy_bone(self.obj, self.org_bones[2], make_deformer_name(strip_org(self.org_bones[2])))
+
+ # Create toe bone
+ toe = copy_bone(self.obj, self.org_bones[3], make_deformer_name(strip_org(self.org_bones[3])))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+
+ org_thigh_e = eb[self.org_bones[0]]
+ if self.use_thigh_twist:
+ thigh1_e = eb[thigh1]
+ thigh2_e = eb[thigh2]
+ utip_e = eb[utip]
+ else:
+ thigh_e = eb[thigh]
+
+ org_shin_e = eb[self.org_bones[1]]
+ if self.use_shin_twist:
+ shin1_e = eb[shin1]
+ shin2_e = eb[shin2]
+ stip_e = eb[stip]
+ else:
+ shin_e = eb[shin]
+
+ org_foot_e = eb[self.org_bones[2]]
+ foot_e = eb[foot]
+
+ org_toe_e = eb[self.org_bones[3]]
+ toe_e = eb[toe]
+
+ # Parent and position thigh bones
+ if self.use_thigh_twist:
+ thigh1_e.use_connect = False
+ thigh2_e.use_connect = False
+ utip_e.use_connect = False
+
+ thigh1_e.parent = org_thigh_e.parent
+ thigh2_e.parent = org_thigh_e
+ utip_e.parent = org_thigh_e
+
+ center = Vector((org_thigh_e.head + org_thigh_e.tail) / 2)
+
+ thigh1_e.tail = center
+ thigh2_e.head = center
+ put_bone(self.obj, utip, org_thigh_e.tail)
+ utip_e.length = org_thigh_e.length / 8
+ else:
+ thigh_e.use_connect = False
+ thigh_e.parent = org_thigh_e
+
+ # Parent and position shin bones
+ if self.use_shin_twist:
+ shin1_e.use_connect = False
+ shin2_e.use_connect = False
+ stip_e.use_connect = False
+
+ shin1_e.parent = org_shin_e
+ shin2_e.parent = org_shin_e
+ stip_e.parent = org_shin_e
+
+ center = Vector((org_shin_e.head + org_shin_e.tail) / 2)
+
+ shin1_e.tail = center
+ shin2_e.head = center
+ put_bone(self.obj, stip, org_shin_e.tail)
+ stip_e.length = org_shin_e.length / 8
+
+ # Align roll of shin2 with foot
+ align_roll(self.obj, shin2, foot)
+ else:
+ shin_e.use_connect = False
+ shin_e.parent = org_shin_e
+
+ # Parent foot
+ foot_e.use_connect = False
+ foot_e.parent = org_foot_e
+
+ # Parent toe
+ toe_e.use_connect = False
+ toe_e.parent = org_toe_e
+
+ # Object mode, get pose bones
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ if self.use_thigh_twist:
+ thigh1_p = pb[thigh1]
+ if self.use_shin_twist:
+ shin2_p = pb[shin2]
+ foot_p = pb[foot]
+
+ # Thigh constraints
+ if self.use_thigh_twist:
+ con = thigh1_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = self.org_bones[0]
+
+ con = thigh1_p.constraints.new('COPY_SCALE')
+ con.name = "copy_scale"
+ con.target = self.obj
+ con.subtarget = self.org_bones[0]
+
+ con = thigh1_p.constraints.new('DAMPED_TRACK')
+ con.name = "track_to"
+ con.target = self.obj
+ con.subtarget = utip
+
+ # Shin constraints
+ if self.use_shin_twist:
+ con = shin2_p.constraints.new('COPY_ROTATION')
+ con.name = "copy_rotation"
+ con.target = self.obj
+ con.subtarget = foot
+
+ con = shin2_p.constraints.new('DAMPED_TRACK')
+ con.name = "track_to"
+ con.target = self.obj
+ con.subtarget = stip
diff --git a/rigify/rigs/biped/leg/fk.py b/rigify/rigs/biped/leg/fk.py
new file mode 100644
index 00000000..f2ce6653
--- /dev/null
+++ b/rigify/rigs/biped/leg/fk.py
@@ -0,0 +1,255 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import math
+from mathutils import Vector
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names, has_connected_children
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+from rigify.utils import get_layers
+from rigify.utils import create_widget, create_limb_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+
+class Rig:
+ """ An FK leg rig, with hinge switch.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed, and
+ store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ self.obj = obj
+ self.params = params
+
+ # Get the chain of 2 connected bones
+ leg_bones = [bone] + connected_children_names(self.obj, bone)[:2]
+
+ if len(leg_bones) != 2:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ # Get the foot and heel
+ foot = None
+ heel = None
+ for b in self.obj.data.bones[leg_bones[1]].children:
+ if b.use_connect == True:
+ if len(b.children) >= 1 and has_connected_children(b):
+ foot = b.name
+ else:
+ heel = b.name
+
+ if foot is None or heel is None:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ # Get the toe
+ toe = None
+ for b in self.obj.data.bones[foot].children:
+ if b.use_connect == True:
+ toe = b.name
+
+ # Get the toe
+ if toe is None:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ self.org_bones = leg_bones + [foot, toe, heel]
+
+ # Get (optional) parent
+ if self.obj.data.bones[bone].parent is None:
+ self.org_parent = None
+ else:
+ self.org_parent = self.obj.data.bones[bone].parent.name
+
+ # Get rig parameters
+ if "layers" in params:
+ self.layers = get_layers(params["layers"])
+ else:
+ self.layers = None
+
+ self.primary_rotation_axis = params.primary_rotation_axis
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create the control bones
+ thigh = copy_bone(self.obj, self.org_bones[0], strip_org(self.org_bones[0]))
+ shin = copy_bone(self.obj, self.org_bones[1], strip_org(self.org_bones[1]))
+ foot = copy_bone(self.obj, self.org_bones[2], strip_org(self.org_bones[2]))
+
+ # Create the foot mechanism bone
+ foot_mch = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(self.org_bones[2])))
+
+ # Create the hinge bones
+ if self.org_parent != None:
+ hinge = copy_bone(self.obj, self.org_parent, make_mechanism_name(thigh + ".hinge"))
+ socket1 = copy_bone(self.obj, thigh, make_mechanism_name(thigh + ".socket1"))
+ socket2 = copy_bone(self.obj, thigh, make_mechanism_name(thigh + ".socket2"))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+
+ thigh_e = eb[thigh]
+ shin_e = eb[shin]
+ foot_e = eb[foot]
+ foot_mch_e = eb[foot_mch]
+
+ if self.org_parent != None:
+ hinge_e = eb[hinge]
+ socket1_e = eb[socket1]
+ socket2_e = eb[socket2]
+
+ # Parenting
+ shin_e.parent = thigh_e
+ foot_e.parent = shin_e
+
+ foot_mch_e.use_connect = False
+ foot_mch_e.parent = foot_e
+
+ if self.org_parent != None:
+ hinge_e.use_connect = False
+ socket1_e.use_connect = False
+ socket2_e.use_connect = False
+
+ thigh_e.parent = hinge_e
+ hinge_e.parent = socket2_e
+ socket2_e.parent = None
+
+ # Positioning
+ vec = Vector(eb[self.org_bones[3]].vector)
+ vec.normalize()
+ foot_e.tail = foot_e.head + (vec * foot_e.length)
+ foot_e.roll = eb[self.org_bones[3]].roll
+
+ if self.org_parent != None:
+ center = (hinge_e.head + hinge_e.tail) / 2
+ hinge_e.head = center
+ socket1_e.length /= 4
+ socket2_e.length /= 3
+
+ # Object mode, get pose bones
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ thigh_p = pb[thigh]
+ shin_p = pb[shin]
+ foot_p = pb[foot]
+ if self.org_parent != None:
+ hinge_p = pb[hinge]
+
+ if self.org_parent != None:
+ socket1_p = pb[socket1]
+ socket2_p = pb[socket2]
+
+ # Set the knee to only bend on the x-axis.
+ shin_p.rotation_mode = 'XYZ'
+ if 'X' in self.primary_rotation_axis:
+ shin_p.lock_rotation = (False, True, True)
+ elif 'Y' in self.primary_rotation_axis:
+ shin_p.lock_rotation = (True, False, True)
+ else:
+ shin_p.lock_rotation = (True, True, False)
+
+ # Hinge transforms are locked, for auto-ik
+ if self.org_parent != None:
+ hinge_p.lock_location = True, True, True
+ hinge_p.lock_rotation = True, True, True
+ hinge_p.lock_rotation_w = True
+ hinge_p.lock_scale = True, True, True
+
+ # Set up custom properties
+ if self.org_parent != None:
+ prop = rna_idprop_ui_prop_get(thigh_p, "isolate", create=True)
+ thigh_p["isolate"] = 0.0
+ prop["soft_min"] = prop["min"] = 0.0
+ prop["soft_max"] = prop["max"] = 1.0
+
+ # Hinge constraints / drivers
+ if self.org_parent != None:
+ con = socket2_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = socket1
+
+ con = socket2_p.constraints.new('COPY_TRANSFORMS')
+ con.name = "isolate_off"
+ con.target = self.obj
+ con.subtarget = socket1
+
+ # Driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = thigh_p.path_from_id() + '["isolate"]'
+ mod = fcurve.modifiers[0]
+ mod.poly_order = 1
+ mod.coefficients[0] = 1.0
+ mod.coefficients[1] = -1.0
+
+ # Constrain org bones to controls
+ con = pb[self.org_bones[0]].constraints.new('COPY_TRANSFORMS')
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = thigh
+
+ con = pb[self.org_bones[1]].constraints.new('COPY_TRANSFORMS')
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = shin
+
+ con = pb[self.org_bones[2]].constraints.new('COPY_TRANSFORMS')
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = foot_mch
+
+ # Set layers if specified
+ if self.layers:
+ thigh_p.bone.layers = self.layers
+ shin_p.bone.layers = self.layers
+ foot_p.bone.layers = self.layers
+
+ # Create control widgets
+ create_limb_widget(self.obj, thigh)
+ create_limb_widget(self.obj, shin)
+
+ ob = create_widget(self.obj, foot)
+ if ob != None:
+ verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)]
+ edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)]
+ mesh = ob.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ mod = ob.modifiers.new("subsurf", 'SUBSURF')
+ mod.levels = 2
+
+ return [thigh, shin, foot, foot_mch]
+
diff --git a/rigify/rigs/biped/leg/ik.py b/rigify/rigs/biped/leg/ik.py
new file mode 100644
index 00000000..54869b8e
--- /dev/null
+++ b/rigify/rigs/biped/leg/ik.py
@@ -0,0 +1,608 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from mathutils import Vector
+from math import pi, acos
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names, has_connected_children
+from rigify.utils import strip_org, make_mechanism_name, insert_before_lr
+from rigify.utils import get_layers
+from rigify.utils import create_widget, create_line_widget, create_sphere_widget, create_circle_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+
+def align_x_axis(obj, bone, vec):
+ """ Aligns the x-axis of a bone to the given vector. This only works if it
+ can be an exact match.
+ Must be in edit mode.
+
+ """
+ vec.normalize()
+ bone_e = obj.data.edit_bones[bone]
+ dot = max(-1.0, min(1.0, bone_e.x_axis.dot(vec)))
+ angle = acos(dot)
+
+ bone_e.roll += angle
+
+ dot1 = bone_e.x_axis.dot(vec)
+
+ bone_e.roll -= angle * 2
+
+ dot2 = bone_e.x_axis.dot(vec)
+
+ if dot1 > dot2:
+ bone_e.roll += angle * 2
+
+
+def angle_on_plane(plane, vec1, vec2):
+ """ Return the angle between two vectors projected onto a plane.
+ """
+ plane.normalize()
+ vec1 = vec1 - (plane * (vec1.dot(plane)))
+ vec2 = vec2 - (plane * (vec2.dot(plane)))
+ vec1.normalize()
+ vec2.normalize()
+
+ # Determine the angle
+ angle = acos(max(-1.0, min(1.0, vec1.dot(vec2))))
+
+ if angle < 0.00001: # close enough to zero that sign doesn't matter
+ return angle
+
+ # Determine the sign of the angle
+ vec3 = vec2.cross(vec1)
+ vec3.normalize()
+ sign = vec3.dot(plane)
+ if sign >= 0:
+ sign = 1
+ else:
+ sign = -1
+
+ return angle * sign
+
+
+class Rig:
+ """ An IK leg rig, with an optional ik/fk switch.
+
+ """
+ def __init__(self, obj, bone, params, ikfk_switch=False):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed, and
+ store names of bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+ """
+ self.obj = obj
+ self.params = params
+ self.switch = ikfk_switch
+
+ # Get the chain of 2 connected bones
+ leg_bones = [bone] + connected_children_names(self.obj, bone)[:2]
+
+ if len(leg_bones) != 2:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ # Get the foot and heel
+ foot = None
+ heel = None
+ rocker = None
+ for b in self.obj.data.bones[leg_bones[1]].children:
+ if b.use_connect == True:
+ if len(b.children) >= 1 and has_connected_children(b):
+ foot = b.name
+ else:
+ heel = b.name
+ if len(b.children) > 0:
+ rocker = b.children[0].name
+
+ if foot is None or heel is None:
+ print("blah")
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ # Get the toe
+ toe = None
+ for b in self.obj.data.bones[foot].children:
+ if b.use_connect == True:
+ toe = b.name
+
+ # Get toe
+ if toe is None:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': incorrect bone configuration for rig type." % (strip_org(bone)))
+
+ self.org_bones = leg_bones + [foot, toe, heel, rocker]
+
+ # Get rig parameters
+ if params.separate_ik_layers:
+ self.layers = list(params.ik_layers)
+ else:
+ self.layers = None
+
+ self.bend_hint = params.bend_hint
+
+ self.primary_rotation_axis = params.primary_rotation_axis
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ make_rocker = False
+ if self.org_bones[5] is not None:
+ make_rocker = True
+
+ # Create the bones
+ thigh = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[0], "_ik"))))
+ shin = copy_bone(self.obj, self.org_bones[1], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[1], "_ik"))))
+
+ foot = copy_bone(self.obj, self.org_bones[2], strip_org(insert_before_lr(self.org_bones[2], "_ik")))
+ foot_ik_target = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(insert_before_lr(self.org_bones[2], "_ik_target"))))
+ pole = copy_bone(self.obj, self.org_bones[0], strip_org(insert_before_lr(self.org_bones[0], "_pole")))
+
+ toe = copy_bone(self.obj, self.org_bones[3], strip_org(self.org_bones[3]))
+ toe_parent = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(self.org_bones[3] + ".parent")))
+ toe_parent_socket1 = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(self.org_bones[3] + ".socket1")))
+ toe_parent_socket2 = copy_bone(self.obj, self.org_bones[2], make_mechanism_name(strip_org(self.org_bones[3] + ".socket2")))
+
+ foot_roll = copy_bone(self.obj, self.org_bones[4], strip_org(insert_before_lr(self.org_bones[2], "_roll")))
+ roll1 = copy_bone(self.obj, self.org_bones[4], make_mechanism_name(strip_org(self.org_bones[2] + ".roll.01")))
+ roll2 = copy_bone(self.obj, self.org_bones[4], make_mechanism_name(strip_org(self.org_bones[2] + ".roll.02")))
+
+ if make_rocker:
+ rocker1 = copy_bone(self.obj, self.org_bones[5], make_mechanism_name(strip_org(self.org_bones[2] + ".rocker.01")))
+ rocker2 = copy_bone(self.obj, self.org_bones[5], make_mechanism_name(strip_org(self.org_bones[2] + ".rocker.02")))
+
+ visfoot = copy_bone(self.obj, self.org_bones[2], "VIS-" + strip_org(insert_before_lr(self.org_bones[2], "_ik")))
+ vispole = copy_bone(self.obj, self.org_bones[1], "VIS-" + strip_org(insert_before_lr(self.org_bones[0], "_pole")))
+
+ # Get edit bones
+ eb = self.obj.data.edit_bones
+
+ org_foot_e = eb[self.org_bones[2]]
+ thigh_e = eb[thigh]
+ shin_e = eb[shin]
+ foot_e = eb[foot]
+ foot_ik_target_e = eb[foot_ik_target]
+ pole_e = eb[pole]
+ toe_e = eb[toe]
+ toe_parent_e = eb[toe_parent]
+ toe_parent_socket1_e = eb[toe_parent_socket1]
+ toe_parent_socket2_e = eb[toe_parent_socket2]
+ foot_roll_e = eb[foot_roll]
+ roll1_e = eb[roll1]
+ roll2_e = eb[roll2]
+ if make_rocker:
+ rocker1_e = eb[rocker1]
+ rocker2_e = eb[rocker2]
+ visfoot_e = eb[visfoot]
+ vispole_e = eb[vispole]
+
+ # Parenting
+ shin_e.parent = thigh_e
+
+ foot_e.use_connect = False
+ foot_e.parent = None
+ foot_ik_target_e.use_connect = False
+ foot_ik_target_e.parent = roll2_e
+
+ pole_e.use_connect = False
+ pole_e.parent = foot_e
+
+ toe_e.parent = toe_parent_e
+ toe_parent_e.use_connect = False
+ toe_parent_e.parent = toe_parent_socket1_e
+ toe_parent_socket1_e.use_connect = False
+ toe_parent_socket1_e.parent = roll1_e
+ toe_parent_socket2_e.use_connect = False
+ toe_parent_socket2_e.parent = eb[self.org_bones[2]]
+
+ foot_roll_e.use_connect = False
+ foot_roll_e.parent = foot_e
+
+ roll1_e.use_connect = False
+ roll1_e.parent = foot_e
+
+ roll2_e.use_connect = False
+ roll2_e.parent = roll1_e
+
+ visfoot_e.use_connect = False
+ visfoot_e.parent = None
+
+ vispole_e.use_connect = False
+ vispole_e.parent = None
+
+ if make_rocker:
+ rocker1_e.use_connect = False
+ rocker2_e.use_connect = False
+
+ roll1_e.parent = rocker2_e
+ rocker2_e.parent = rocker1_e
+ rocker1_e.parent = foot_e
+
+ # Misc
+ foot_e.use_local_location = False
+
+ visfoot_e.hide_select = True
+ vispole_e.hide_select = True
+
+ # Positioning
+ vec = Vector(toe_e.vector)
+ vec.normalize()
+ foot_e.tail = foot_e.head + (vec * foot_e.length)
+ foot_e.roll = toe_e.roll
+
+ v1 = shin_e.tail - thigh_e.head
+
+ if 'X' in self.primary_rotation_axis or 'Y' in self.primary_rotation_axis:
+ v2 = v1.cross(shin_e.x_axis)
+ if (v2 * shin_e.z_axis) > 0.0:
+ v2 *= -1.0
+ else:
+ v2 = v1.cross(shin_e.z_axis)
+ if (v2 * shin_e.x_axis) < 0.0:
+ v2 *= -1.0
+ v2.normalize()
+ v2 *= v1.length
+
+ if '-' in self.primary_rotation_axis:
+ v2 *= -1
+
+ pole_e.head = shin_e.head + v2
+ pole_e.tail = pole_e.head + (Vector((0, 1, 0)) * (v1.length / 8))
+ pole_e.roll = 0.0
+
+ flip_bone(self.obj, toe_parent_socket1)
+ flip_bone(self.obj, toe_parent_socket2)
+ toe_parent_socket1_e.head = Vector(org_foot_e.tail)
+ toe_parent_socket2_e.head = Vector(org_foot_e.tail)
+ toe_parent_socket1_e.tail = Vector(org_foot_e.tail) + (Vector((0, 0, 1)) * foot_e.length / 2)
+ toe_parent_socket2_e.tail = Vector(org_foot_e.tail) + (Vector((0, 0, 1)) * foot_e.length / 3)
+ toe_parent_socket2_e.roll = toe_parent_socket1_e.roll
+
+ tail = Vector(roll1_e.tail)
+ roll1_e.tail = Vector(org_foot_e.tail)
+ roll1_e.tail = Vector(org_foot_e.tail)
+ roll1_e.head = tail
+ roll2_e.head = Vector(org_foot_e.tail)
+ foot_roll_e.head = Vector(org_foot_e.tail)
+ put_bone(self.obj, foot_roll, roll1_e.head)
+ foot_roll_e.length /= 2
+
+ roll_axis = roll1_e.vector.cross(org_foot_e.vector)
+ align_x_axis(self.obj, roll1, roll_axis)
+ align_x_axis(self.obj, roll2, roll_axis)
+ foot_roll_e.roll = roll2_e.roll
+
+ visfoot_e.tail = visfoot_e.head + Vector((0, 0, v1.length / 32))
+ vispole_e.tail = vispole_e.head + Vector((0, 0, v1.length / 32))
+
+ if make_rocker:
+ d = toe_e.y_axis.dot(rocker1_e.x_axis)
+ if d >= 0.0:
+ flip_bone(self.obj, rocker2)
+ else:
+ flip_bone(self.obj, rocker1)
+
+ # Weird alignment issues. Fix.
+ toe_parent_e.head = Vector(org_foot_e.head)
+ toe_parent_e.tail = Vector(org_foot_e.tail)
+ toe_parent_e.roll = org_foot_e.roll
+
+ foot_e.head = Vector(org_foot_e.head)
+
+ foot_ik_target_e.head = Vector(org_foot_e.head)
+ foot_ik_target_e.tail = Vector(org_foot_e.tail)
+
+ # Determine the pole offset value
+ plane = (shin_e.tail - thigh_e.head).normalized()
+ vec1 = thigh_e.x_axis.normalized()
+ vec2 = (pole_e.head - thigh_e.head).normalized()
+ pole_offset = angle_on_plane(plane, vec1, vec2)
+
+ # Object mode, get pose bones
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ thigh_p = pb[thigh]
+ shin_p = pb[shin]
+ foot_p = pb[foot]
+ pole_p = pb[pole]
+ foot_roll_p = pb[foot_roll]
+ roll1_p = pb[roll1]
+ roll2_p = pb[roll2]
+ if make_rocker:
+ rocker1_p = pb[rocker1]
+ rocker2_p = pb[rocker2]
+ toe_p = pb[toe]
+ toe_parent_p = pb[toe_parent]
+ toe_parent_socket1_p = pb[toe_parent_socket1]
+ visfoot_p = pb[visfoot]
+ vispole_p = pb[vispole]
+
+ # Set the knee to only bend on the primary axis.
+ if 'X' in self.primary_rotation_axis:
+ shin_p.lock_ik_y = True
+ shin_p.lock_ik_z = True
+ elif 'Y' in self.primary_rotation_axis:
+ shin_p.lock_ik_x = True
+ shin_p.lock_ik_z = True
+ else:
+ shin_p.lock_ik_x = True
+ shin_p.lock_ik_y = True
+
+ # Foot roll control only rotates on x-axis, or x and y if rocker.
+ foot_roll_p.rotation_mode = 'XYZ'
+ if make_rocker:
+ foot_roll_p.lock_rotation = False, False, True
+ else:
+ foot_roll_p.lock_rotation = False, True, True
+ foot_roll_p.lock_location = True, True, True
+ foot_roll_p.lock_scale = True, True, True
+
+ # roll and rocker bones set to euler rotation
+ roll1_p.rotation_mode = 'XYZ'
+ roll2_p.rotation_mode = 'XYZ'
+ if make_rocker:
+ rocker1_p.rotation_mode = 'XYZ'
+ rocker2_p.rotation_mode = 'XYZ'
+
+ # Pole target only translates
+ pole_p.lock_location = False, False, False
+ pole_p.lock_rotation = True, True, True
+ pole_p.lock_rotation_w = True
+ pole_p.lock_scale = True, True, True
+
+ # Set up custom properties
+ if self.switch == True:
+ prop = rna_idprop_ui_prop_get(foot_p, "ikfk_switch", create=True)
+ foot_p["ikfk_switch"] = 0.0
+ prop["soft_min"] = prop["min"] = 0.0
+ prop["soft_max"] = prop["max"] = 1.0
+
+ # Bend direction hint
+ if self.bend_hint:
+ con = shin_p.constraints.new('LIMIT_ROTATION')
+ con.name = "bend_hint"
+ con.owner_space = 'LOCAL'
+ if self.primary_rotation_axis == 'X':
+ con.use_limit_x = True
+ con.min_x = pi / 10
+ con.max_x = pi / 10
+ elif self.primary_rotation_axis == '-X':
+ con.use_limit_x = True
+ con.min_x = -pi / 10
+ con.max_x = -pi / 10
+ elif self.primary_rotation_axis == 'Y':
+ con.use_limit_y = True
+ con.min_y = pi / 10
+ con.max_y = pi / 10
+ elif self.primary_rotation_axis == '-Y':
+ con.use_limit_y = True
+ con.min_y = -pi / 10
+ con.max_y = -pi / 10
+ elif self.primary_rotation_axis == 'Z':
+ con.use_limit_z = True
+ con.min_z = pi / 10
+ con.max_z = pi / 10
+ elif self.primary_rotation_axis == '-Z':
+ con.use_limit_z = True
+ con.min_z = -pi / 10
+ con.max_z = -pi / 10
+
+ # IK Constraint
+ con = shin_p.constraints.new('IK')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = foot_ik_target
+ con.pole_target = self.obj
+ con.pole_subtarget = pole
+ con.pole_angle = pole_offset
+ con.chain_count = 2
+
+ # toe_parent constraint
+ con = toe_parent_socket1_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = toe_parent_socket2
+
+ con = toe_parent_socket1_p.constraints.new('COPY_SCALE')
+ con.name = "copy_scale"
+ con.target = self.obj
+ con.subtarget = toe_parent_socket2
+
+ con = toe_parent_socket1_p.constraints.new('COPY_TRANSFORMS') # drive with IK switch
+ con.name = "fk"
+ con.target = self.obj
+ con.subtarget = toe_parent_socket2
+
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_p.path_from_id() + '["ikfk_switch"]'
+ mod = fcurve.modifiers[0]
+ mod.poly_order = 1
+ mod.coefficients[0] = 1.0
+ mod.coefficients[1] = -1.0
+
+ # Foot roll drivers
+ fcurve = roll1_p.driver_add("rotation_euler", 0)
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ driver.expression = "min(0,var)"
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_roll_p.path_from_id() + '.rotation_euler[0]'
+
+ fcurve = roll2_p.driver_add("rotation_euler", 0)
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ driver.expression = "max(0,var)"
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_roll_p.path_from_id() + '.rotation_euler[0]'
+
+ if make_rocker:
+ fcurve = rocker1_p.driver_add("rotation_euler", 0)
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ driver.expression = "max(0,-var)"
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_roll_p.path_from_id() + '.rotation_euler[1]'
+
+ fcurve = rocker2_p.driver_add("rotation_euler", 0)
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ driver.expression = "max(0,var)"
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_roll_p.path_from_id() + '.rotation_euler[1]'
+
+ # Constrain org bones to controls
+ con = pb[self.org_bones[0]].constraints.new('COPY_TRANSFORMS')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = thigh
+ if self.switch == True:
+ # IK/FK switch driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_p.path_from_id() + '["ikfk_switch"]'
+
+ con = pb[self.org_bones[1]].constraints.new('COPY_TRANSFORMS')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = shin
+ if self.switch == True:
+ # IK/FK switch driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_p.path_from_id() + '["ikfk_switch"]'
+
+ con = pb[self.org_bones[2]].constraints.new('COPY_TRANSFORMS')
+ con.name = "ik"
+ con.target = self.obj
+ con.subtarget = foot_ik_target
+ if self.switch == True:
+ # IK/FK switch driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "var"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = foot_p.path_from_id() + '["ikfk_switch"]'
+
+ con = pb[self.org_bones[3]].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = toe
+
+ # VIS foot constraints
+ con = visfoot_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_loc"
+ con.target = self.obj
+ con.subtarget = self.org_bones[2]
+
+ con = visfoot_p.constraints.new('STRETCH_TO')
+ con.name = "stretch_to"
+ con.target = self.obj
+ con.subtarget = foot
+ con.volume = 'NO_VOLUME'
+ con.rest_length = visfoot_p.length
+
+ # VIS pole constraints
+ con = vispole_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_loc"
+ con.target = self.obj
+ con.subtarget = self.org_bones[1]
+
+ con = vispole_p.constraints.new('STRETCH_TO')
+ con.name = "stretch_to"
+ con.target = self.obj
+ con.subtarget = pole
+ con.volume = 'NO_VOLUME'
+ con.rest_length = vispole_p.length
+
+ # Set layers if specified
+ if self.layers:
+ foot_p.bone.layers = self.layers
+ pole_p.bone.layers = self.layers
+ foot_roll_p.bone.layers = self.layers
+ visfoot_p.bone.layers = self.layers
+ vispole_p.bone.layers = self.layers
+
+ toe_p.bone.layers = [(i[0] or i[1]) for i in zip(toe_p.bone.layers, self.layers)] # Both FK and IK layers
+
+ # Create widgets
+ create_line_widget(self.obj, vispole)
+ create_line_widget(self.obj, visfoot)
+ create_sphere_widget(self.obj, pole)
+ create_circle_widget(self.obj, toe, radius=0.7, head_tail=0.5)
+
+ ob = create_widget(self.obj, foot)
+ if ob != None:
+ verts = [(0.7, 1.5, 0.0), (0.7, -0.25, 0.0), (-0.7, -0.25, 0.0), (-0.7, 1.5, 0.0), (0.7, 0.723, 0.0), (-0.7, 0.723, 0.0), (0.7, 0.0, 0.0), (-0.7, 0.0, 0.0)]
+ edges = [(1, 2), (0, 3), (0, 4), (3, 5), (4, 6), (1, 6), (5, 7), (2, 7)]
+ mesh = ob.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ mod = ob.modifiers.new("subsurf", 'SUBSURF')
+ mod.levels = 2
+
+ ob = create_widget(self.obj, foot_roll)
+ if ob != None:
+ verts = [(0.3999999761581421, 0.766044557094574, 0.6427875757217407), (0.17668449878692627, 3.823702598992895e-08, 3.2084670920085046e-08), (-0.17668461799621582, 9.874240447516058e-08, 8.285470443070153e-08), (-0.39999961853027344, 0.7660449147224426, 0.6427879333496094), (0.3562471270561218, 0.6159579753875732, 0.5168500542640686), (-0.35624682903289795, 0.6159582138061523, 0.5168502926826477), (0.20492683351039886, 0.09688037633895874, 0.0812922865152359), (-0.20492687821388245, 0.0968804731965065, 0.08129236847162247)]
+ edges = [(1, 2), (0, 3), (0, 4), (3, 5), (1, 6), (4, 6), (2, 7), (5, 7)]
+ mesh = ob.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ mod = ob.modifiers.new("subsurf", 'SUBSURF')
+ mod.levels = 2
+
+ return [thigh, shin, foot, pole, foot_roll, foot_ik_target]
+
diff --git a/rigify/rigs/finger.py b/rigify/rigs/finger.py
new file mode 100644
index 00000000..0bcea44b
--- /dev/null
+++ b/rigify/rigs/finger.py
@@ -0,0 +1,412 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from mathutils import Vector
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+from rigify.utils import get_layers
+from rigify.utils import create_widget, create_line_widget, create_limb_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+import re
+
+
+class Rig:
+ """ A finger rig. It takes a single chain of bones.
+ This is a control and deformation rig.
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ """
+ self.obj = obj
+ self.org_bones = [bone] + connected_children_names(obj, bone)
+ self.params = params
+
+ if len(self.org_bones) <= 1:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 2 or more bones." % (strip_org(bone)))
+
+ # Get user-specified layers, if they exist
+ if params.separate_extra_layers:
+ self.ex_layers = list(params.extra_layers)
+ else:
+ self.ex_layers = None
+
+ # Get other rig parameters
+ self.primary_rotation_axis = params.primary_rotation_axis
+ self.use_digit_twist = params.use_digit_twist
+
+ def deform(self):
+ """ Generate the deformation rig.
+ Just a copy of the original bones, except the first digit which is a twist bone.
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create the bones
+ # First bone is a twist bone
+ if self.use_digit_twist:
+ b1a = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0] + ".01")))
+ b1b = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0] + ".02")))
+ b1tip = copy_bone(self.obj, self.org_bones[0], make_mechanism_name(strip_org(self.org_bones[0] + ".tip")))
+ else:
+ b1 = copy_bone(self.obj, self.org_bones[0], make_deformer_name(strip_org(self.org_bones[0])))
+
+ # The rest are normal
+ bones = []
+ for bone in self.org_bones[1:]:
+ bones += [copy_bone(self.obj, bone, make_deformer_name(strip_org(bone)))]
+
+ # Position bones
+ eb = self.obj.data.edit_bones
+ if self.use_digit_twist:
+ b1a_e = eb[b1a]
+ b1b_e = eb[b1b]
+ b1tip_e = eb[b1tip]
+
+ b1tip_e.use_connect = False
+ b1tip_e.tail += Vector((0.1, 0, 0))
+ b1tip_e.head = b1b_e.tail
+ b1tip_e.length = b1a_e.length / 4
+
+ center = (b1a_e.head + b1a_e.tail) / 2
+ b1a_e.tail = center
+ b1b_e.use_connect = False
+ b1b_e.head = center
+
+ # Parenting
+ if self.use_digit_twist:
+ b1b_e.parent = eb[self.org_bones[0]]
+ b1tip_e.parent = eb[self.org_bones[0]]
+ else:
+ eb[b1].use_connect = False
+ eb[b1].parent = eb[self.org_bones[0]]
+
+ for (ba, bb) in zip(bones, self.org_bones[1:]):
+ eb[ba].use_connect = False
+ eb[ba].parent = eb[bb]
+
+ # Constraints
+ if self.use_digit_twist:
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ b1a_p = pb[b1a]
+
+ con = b1a_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = self.org_bones[0]
+
+ con = b1a_p.constraints.new('COPY_SCALE')
+ con.name = "copy_scale"
+ con.target = self.obj
+ con.subtarget = self.org_bones[0]
+
+ con = b1a_p.constraints.new('DAMPED_TRACK')
+ con.name = "track_to"
+ con.target = self.obj
+ con.subtarget = b1tip
+
+ def control(self):
+ """ Generate the control rig.
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Figure out the name for the control bone (remove the last .##)
+ ctrl_name = re.sub("([0-9]+\.)", "", strip_org(self.org_bones[0])[::-1], count=1)[::-1]
+
+ # Create the bones
+ ctrl = copy_bone(self.obj, self.org_bones[0], ctrl_name)
+
+ helpers = []
+ bones = []
+ for bone in self.org_bones:
+ bones += [copy_bone(self.obj, bone, strip_org(bone))]
+ helpers += [copy_bone(self.obj, bone, make_mechanism_name(strip_org(bone)))]
+
+ # Position bones
+ eb = self.obj.data.edit_bones
+
+ length = 0.0
+ for bone in helpers:
+ length += eb[bone].length
+ eb[bone].length /= 2
+
+ eb[ctrl].length = length * 1.5
+
+ # Parent bones
+ prev = eb[self.org_bones[0]].parent
+ for (b, h) in zip(bones, helpers):
+ b_e = eb[b]
+ h_e = eb[h]
+ b_e.use_connect = False
+ h_e.use_connect = False
+
+ b_e.parent = h_e
+ h_e.parent = prev
+
+ prev = b_e
+
+ # Transform locks and rotation mode
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ for bone in bones[1:]:
+ pb[bone].lock_location = True, True, True
+
+ if pb[self.org_bones[0]].bone.use_connect == True:
+ pb[bones[0]].lock_location = True, True, True
+
+ pb[ctrl].lock_scale = True, False, True
+
+ for bone in helpers:
+ pb[bone].rotation_mode = 'XYZ'
+
+ # Drivers
+ i = 1
+ val = 1.2 / (len(self.org_bones) - 1)
+ for bone in helpers:
+ # Add custom prop
+ prop_name = "bend_%02d" % i
+ prop = rna_idprop_ui_prop_get(pb[ctrl], prop_name, create=True)
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+ if i == 1:
+ pb[ctrl][prop_name] = 0.0
+ else:
+ pb[ctrl][prop_name] = val
+
+ # Add driver
+ if 'X' in self.primary_rotation_axis:
+ fcurve = pb[bone].driver_add("rotation_euler", 0)
+ elif 'Y' in self.primary_rotation_axis:
+ fcurve = pb[bone].driver_add("rotation_euler", 1)
+ else:
+ fcurve = pb[bone].driver_add("rotation_euler", 2)
+
+ driver = fcurve.driver
+ driver.type = 'SCRIPTED'
+
+ var = driver.variables.new()
+ var.name = "ctrl_y"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = pb[ctrl].path_from_id() + '.scale[1]'
+
+ var = driver.variables.new()
+ var.name = "bend"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = pb[ctrl].path_from_id() + '["' + prop_name + '"]'
+
+ if '-' in self.primary_rotation_axis:
+ driver.expression = "-(1.0-ctrl_y) * bend * 3.14159 * 2"
+ else:
+ driver.expression = "(1.0-ctrl_y) * bend * 3.14159 * 2"
+
+ i += 1
+
+ # Constraints
+ con = pb[helpers[0]].constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = ctrl
+
+ con = pb[helpers[0]].constraints.new('COPY_ROTATION')
+ con.name = "copy_rotation"
+ con.target = self.obj
+ con.subtarget = ctrl
+
+ # Constrain org bones to the control bones
+ for (bone, org) in zip(bones, self.org_bones):
+ con = pb[org].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = bone
+
+ # Set layers for extra control bones
+ if self.ex_layers:
+ for bone in bones:
+ pb[bone].bone.layers = self.ex_layers
+
+ # Create control widgets
+ w = create_widget(self.obj, ctrl)
+ if w != None:
+ mesh = w.data
+ verts = [(0, 0, 0), (0, 1, 0), (0.05, 1, 0), (0.05, 1.1, 0), (-0.05, 1.1, 0), (-0.05, 1, 0)]
+ if 'Z' in self.primary_rotation_axis:
+ # Flip x/z coordinates
+ temp = []
+ for v in verts:
+ temp += [(v[2], v[1], v[0])]
+ verts = temp
+ edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 1)]
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ for bone in bones:
+ create_limb_widget(self.obj, bone)
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+ """
+ self.deform()
+ self.control()
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+ """
+ items = [('X', 'X', ''), ('Y', 'Y', ''), ('Z', 'Z', ''), ('-X', '-X', ''), ('-Y', '-Y', ''), ('-Z', '-Z', '')]
+ group.primary_rotation_axis = bpy.props.EnumProperty(items=items, name="Primary Rotation Axis", default='X')
+
+ group.separate_extra_layers = bpy.props.BoolProperty(name="Separate Secondary Control Layers:", default=False, description="Enable putting the secondary controls on a separate layer from the primary controls.")
+ group.extra_layers = bpy.props.BoolVectorProperty(size=32, description="Layers for the secondary controls to be on.")
+
+ group.use_digit_twist = bpy.props.BoolProperty(name="Digit Twist", default=True, description="Generate the dual-bone twist setup for the first finger digit.")
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.prop(params, "separate_extra_layers")
+
+ r = layout.row()
+ r.active = params.separate_extra_layers
+
+ col = r.column(align=True)
+ row = col.row(align=True)
+ row.prop(params, "extra_layers", index=0, toggle=True, text="")
+ row.prop(params, "extra_layers", index=1, toggle=True, text="")
+ row.prop(params, "extra_layers", index=2, toggle=True, text="")
+ row.prop(params, "extra_layers", index=3, toggle=True, text="")
+ row.prop(params, "extra_layers", index=4, toggle=True, text="")
+ row.prop(params, "extra_layers", index=5, toggle=True, text="")
+ row.prop(params, "extra_layers", index=6, toggle=True, text="")
+ row.prop(params, "extra_layers", index=7, toggle=True, text="")
+ row = col.row(align=True)
+ row.prop(params, "extra_layers", index=16, toggle=True, text="")
+ row.prop(params, "extra_layers", index=17, toggle=True, text="")
+ row.prop(params, "extra_layers", index=18, toggle=True, text="")
+ row.prop(params, "extra_layers", index=19, toggle=True, text="")
+ row.prop(params, "extra_layers", index=20, toggle=True, text="")
+ row.prop(params, "extra_layers", index=21, toggle=True, text="")
+ row.prop(params, "extra_layers", index=22, toggle=True, text="")
+ row.prop(params, "extra_layers", index=23, toggle=True, text="")
+
+ col = r.column(align=True)
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=8, toggle=True, text="")
+ row.prop(params, "ik_layers", index=9, toggle=True, text="")
+ row.prop(params, "ik_layers", index=10, toggle=True, text="")
+ row.prop(params, "ik_layers", index=11, toggle=True, text="")
+ row.prop(params, "ik_layers", index=12, toggle=True, text="")
+ row.prop(params, "ik_layers", index=13, toggle=True, text="")
+ row.prop(params, "ik_layers", index=14, toggle=True, text="")
+ row.prop(params, "ik_layers", index=15, toggle=True, text="")
+ row = col.row(align=True)
+ row.prop(params, "ik_layers", index=24, toggle=True, text="")
+ row.prop(params, "ik_layers", index=25, toggle=True, text="")
+ row.prop(params, "ik_layers", index=26, toggle=True, text="")
+ row.prop(params, "ik_layers", index=27, toggle=True, text="")
+ row.prop(params, "ik_layers", index=28, toggle=True, text="")
+ row.prop(params, "ik_layers", index=29, toggle=True, text="")
+ row.prop(params, "ik_layers", index=30, toggle=True, text="")
+ row.prop(params, "ik_layers", index=31, toggle=True, text="")
+
+ r = layout.row()
+ r.label(text="Bend rotation axis:")
+ r.prop(params, "primary_rotation_axis", text="")
+
+ col = layout.column()
+ col.prop(params, "use_digit_twist")
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('finger.01')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.2529, 0.0000, 0.0000
+ bone.roll = 3.1416
+ bone.use_connect = False
+ bones['finger.01'] = bone.name
+ bone = arm.edit_bones.new('finger.02')
+ bone.head[:] = 0.2529, 0.0000, 0.0000
+ bone.tail[:] = 0.4024, 0.0000, -0.0264
+ bone.roll = -2.9671
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger.01']]
+ bones['finger.02'] = bone.name
+ bone = arm.edit_bones.new('finger.03')
+ bone.head[:] = 0.4024, 0.0000, -0.0264
+ bone.tail[:] = 0.4975, -0.0000, -0.0610
+ bone.roll = -2.7925
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['finger.02']]
+ bones['finger.03'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['finger.01']]
+ pbone.rigify_type = 'finger'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YZX'
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['finger.02']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YZX'
+ pbone = obj.pose.bones[bones['finger.03']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YZX'
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
diff --git a/rigify/rigs/misc/__init__.py b/rigify/rigs/misc/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/rigify/rigs/misc/__init__.py
diff --git a/rigify/rigs/misc/delta.py b/rigify/rigs/misc/delta.py
new file mode 100644
index 00000000..dc8edd41
--- /dev/null
+++ b/rigify/rigs/misc/delta.py
@@ -0,0 +1,161 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from math import acos
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone
+from rigify.utils import org_name, make_mechanism_name
+
+
+class Rig:
+ """ A delta rig.
+ Creates a setup that will place its child at its position in pose mode,
+ but will not modifying its child's position in edit mode.
+ This is a mechanism-only rig (no control or deformation bones).
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ Store any data or references to data that will be needed later on.
+ In particular, store references to bones that will be needed.
+ Do NOT change any data in the scene. This is a gathering phase only.
+
+ """
+ bb = obj.data.bones
+
+ if bb[bone].children is None:
+ raise MetarigError("RIGIFY ERROR: bone '%s': rig type requires one child." % org_name(bone.name))
+ if bb[bone].use_connect == True:
+ raise MetarigError("RIGIFY ERROR: bone '%s': rig type cannot be connected to parent." % org_name(bone.name))
+
+ self.obj = obj
+ self.org_bones = {"delta": bone, "child": bb[bone].children[0].name}
+ self.org_names = [org_name(bone), org_name(bb[bone].children[0].name)]
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+ eb = self.obj.data.edit_bones
+
+ org_delta = self.org_bones["delta"]
+ org_delta_e = eb[self.org_bones["delta"]]
+ org_child = self.org_bones["child"]
+ org_child_e = eb[self.org_bones["child"]]
+
+ # Calculate the matrix for achieving the delta
+ child_mat = org_delta_e.matrix.invert() * org_child_e.matrix
+ mat = org_delta_e.matrix * child_mat.invert()
+
+ # Create the delta bones.
+ delta_e = eb[copy_bone(self.obj, self.org_bones["delta"])]
+ delta_e.name = make_mechanism_name(self.org_names[0])
+ delta = delta_e.name
+
+ # Set the delta to the matrix's transforms
+ set_mat(self.obj, delta, mat)
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Constrain org_delta to delta
+ con = self.obj.pose.bones[org_delta].constraints.new('COPY_TRANSFORMS')
+ con.name = "delta"
+ con.target = self.obj
+ con.subtarget = delta
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('delta')
+ bone.head[:] = 0.0000, -0.1198, 0.1253
+ bone.tail[:] = -0.0000, -0.2483, 0.2785
+ bone.roll = -0.0000
+ bone.use_connect = False
+ bones['delta'] = bone.name
+ bone = arm.edit_bones.new('Bone')
+ bone.head[:] = -0.0000, 0.0000, 0.0000
+ bone.tail[:] = -0.0000, 0.0000, 0.2000
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['delta']]
+ bones['Bone'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['delta']]
+ pbone.rigify_type = 'misc.delta'
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['Bone']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
+
+def set_mat(obj, bone_name, matrix):
+ """ Sets the bone to have the given transform matrix.
+ """
+ a = obj.data.edit_bones[bone_name]
+
+ a.head = (0, 0, 0)
+ a.tail = (0, 1, 0)
+
+ a.transform(matrix)
+
+ d = acos(a.matrix.to_quaternion().dot(matrix.to_quaternion())) * 2.0
+
+ roll_1 = a.roll + d
+ roll_2 = a.roll - d
+
+ a.roll = roll_1
+ d1 = a.matrix.to_quaternion().dot(matrix.to_quaternion())
+ a.roll = roll_2
+ d2 = a.matrix.to_quaternion().dot(matrix.to_quaternion())
+
+ if d1 > d2:
+ a.roll = roll_1
+ else:
+ a.roll = roll_2
+
diff --git a/rigify/rigs/neck_short.py b/rigify/rigs/neck_short.py
new file mode 100644
index 00000000..58191e6a
--- /dev/null
+++ b/rigify/rigs/neck_short.py
@@ -0,0 +1,392 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from mathutils import Vector
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, new_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+from rigify.utils import obj_to_bone, create_circle_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+script1 = """
+head_neck = ["%s", "%s"]
+"""
+
+script2 = """
+if is_selected(head_neck[0]):
+ layout.prop(pose_bones[head_neck[0]], '["isolate"]', text="Isolate (" + head_neck[0] + ")", slider=True)
+"""
+
+script3 = """
+if is_selected(head_neck):
+ layout.prop(pose_bones[head_neck[0]], '["neck_follow"]', text="Neck Follow Head (" + head_neck[0] + ")", slider=True)
+"""
+
+
+class Rig:
+ """ A "neck" rig. It turns a chain of bones into a rig with two controls:
+ One for the head, and one for the neck.
+
+ """
+ def __init__(self, obj, bone_name, params):
+ """ Gather and validate data about the rig.
+
+ """
+ self.obj = obj
+ self.org_bones = [bone_name] + connected_children_names(obj, bone_name)
+ self.params = params
+
+ if len(self.org_bones) <= 1:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 2 or more bones." % (strip_org(bone)))
+
+ self.isolate = False
+ if self.obj.data.bones[bone_name].parent:
+ self.isolate = True
+
+ def gen_deform(self):
+ """ Generate the deformation rig.
+
+ """
+ for name in self.org_bones:
+ bpy.ops.object.mode_set(mode='EDIT')
+ eb = self.obj.data.edit_bones
+
+ # Create deform bone
+ bone_e = eb[copy_bone(self.obj, name)]
+
+ # Change its name
+ bone_e.name = make_deformer_name(strip_org(name))
+ bone_name = bone_e.name
+
+ # Leave edit mode
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Get the pose bone
+ bone = self.obj.pose.bones[bone_name]
+
+ # Constrain to the original bone
+ con = bone.constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = name
+
+ def gen_control(self):
+ """ Generate the control rig.
+
+ """
+ #---------------------------------
+ # Create the neck and head controls
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Create bones
+ neck_ctrl = copy_bone(self.obj, self.org_bones[0], strip_org(self.org_bones[0]))
+ neck_follow = copy_bone(self.obj, self.org_bones[-1], make_mechanism_name(strip_org(self.org_bones[0] + ".follow")))
+ neck_child = new_bone(self.obj, make_mechanism_name(strip_org(self.org_bones[0] + ".child")))
+
+ head_ctrl = copy_bone(self.obj, self.org_bones[-1], strip_org(self.org_bones[-1]))
+ head_mch = new_bone(self.obj, make_mechanism_name(strip_org(self.org_bones[-1])))
+ if self.isolate:
+ head_socket1 = copy_bone(self.obj, self.org_bones[-1], make_mechanism_name(strip_org(self.org_bones[-1] + ".socket1")))
+ head_socket2 = copy_bone(self.obj, self.org_bones[-1], make_mechanism_name(strip_org(self.org_bones[-1] + ".socket2")))
+
+ # Create neck chain bones
+ neck = []
+ helpers = []
+ for name in self.org_bones:
+ neck += [copy_bone(self.obj, name, make_mechanism_name(strip_org(name)))]
+ helpers += [copy_bone(self.obj, neck_child, make_mechanism_name(strip_org(name + ".02")))]
+
+ # Fetch edit bones
+ eb = self.obj.data.edit_bones
+
+ neck_ctrl_e = eb[neck_ctrl]
+ neck_follow_e = eb[neck_follow]
+ neck_child_e = eb[neck_child]
+ head_ctrl_e = eb[head_ctrl]
+ head_mch_e = eb[head_mch]
+ if self.isolate:
+ head_socket1_e = eb[head_socket1]
+ head_socket2_e = eb[head_socket2]
+
+ # Parenting
+ head_ctrl_e.use_connect = False
+ head_ctrl_e.parent = neck_ctrl_e.parent
+ head_mch_e.use_connect = False
+ head_mch_e.parent = head_ctrl_e
+
+ if self.isolate:
+ head_socket1_e.use_connect = False
+ head_socket1_e.parent = neck_ctrl_e.parent
+
+ head_socket2_e.use_connect = False
+ head_socket2_e.parent = None
+
+ head_ctrl_e.parent = head_socket2_e
+
+ for (name1, name2) in zip(neck, helpers):
+ eb[name1].use_connect = False
+ eb[name1].parent = eb[name2]
+ eb[name2].use_connect = False
+ eb[name2].parent = neck_ctrl_e.parent
+
+ neck_follow_e.use_connect = False
+ neck_follow_e.parent = neck_ctrl_e.parent
+ neck_child_e.use_connect = False
+ neck_child_e.parent = neck_ctrl_e
+ neck_ctrl_e.parent = neck_follow_e
+
+ # Position
+ put_bone(self.obj, neck_follow, neck_ctrl_e.head)
+ put_bone(self.obj, neck_child, neck_ctrl_e.head)
+ put_bone(self.obj, head_ctrl, neck_ctrl_e.head)
+ put_bone(self.obj, head_mch, neck_ctrl_e.head)
+ head_mch_e.length = head_ctrl_e.length / 2
+ neck_child_e.length = neck_ctrl_e.length / 2
+
+ if self.isolate:
+ put_bone(self.obj, head_socket1, neck_ctrl_e.head)
+ head_mch_e.length /= 2
+
+ put_bone(self.obj, head_socket2, neck_ctrl_e.head)
+ head_mch_e.length /= 3
+
+ for (name1, name2) in zip(neck, helpers):
+ put_bone(self.obj, name2, eb[name1].head)
+ eb[name2].length = eb[name1].length / 2
+
+ # Switch to object mode
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+ neck_ctrl_p = pb[neck_ctrl]
+ neck_follow_p = pb[neck_follow]
+ neck_child_p = pb[neck_child]
+ head_ctrl_p = pb[head_ctrl]
+ if self.isolate:
+ head_socket1_p = pb[head_socket1]
+ head_socket2_p = pb[head_socket2]
+
+ # Custom bone appearance
+ neck_ctrl_p.custom_shape_transform = pb[self.org_bones[(len(self.org_bones) - 1) // 2]]
+ head_ctrl_p.custom_shape_transform = pb[self.org_bones[-1]]
+
+ # Custom properties
+ prop = rna_idprop_ui_prop_get(head_ctrl_p, "inf_extent", create=True)
+ head_ctrl_p["inf_extent"] = 0.5
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+
+ prop = rna_idprop_ui_prop_get(head_ctrl_p, "neck_follow", create=True)
+ head_ctrl_p["neck_follow"] = 1.0
+ prop["min"] = 0.0
+ prop["max"] = 2.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+
+ if self.isolate:
+ prop = rna_idprop_ui_prop_get(head_ctrl_p, "isolate", create=True)
+ head_ctrl_p["isolate"] = 0.0
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+
+ # Constraints
+
+ # Neck follow
+ con = neck_follow_p.constraints.new('COPY_ROTATION')
+ con.name = "copy_rotation"
+ con.target = self.obj
+ con.subtarget = head_ctrl
+
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ var.name = "follow"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = head_ctrl_p.path_from_id() + '["neck_follow"]'
+ driver.expression = "follow / 2"
+
+ # Isolate
+ if self.isolate:
+ con = head_socket2_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = head_socket1
+
+ con = head_socket2_p.constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = head_socket1
+
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ var.name = "isolate"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = head_ctrl_p.path_from_id() + '["isolate"]'
+ driver.expression = "1.0 - isolate"
+
+ # Neck chain
+ first = True
+ prev = None
+ i = 0
+ l = len(neck)
+ for (name1, name2, org_name) in zip(neck, helpers, self.org_bones):
+ con = pb[org_name].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = name1
+
+ n_con = pb[name2].constraints.new('COPY_TRANSFORMS')
+ n_con.name = "neck"
+ n_con.target = self.obj
+ n_con.subtarget = neck_child
+
+ h_con = pb[name2].constraints.new('COPY_TRANSFORMS')
+ h_con.name = "head"
+ h_con.target = self.obj
+ h_con.subtarget = head_mch
+
+ con = pb[name2].constraints.new('COPY_LOCATION')
+ con.name = "anchor"
+ con.target = self.obj
+ if first:
+ con.subtarget = neck_ctrl
+ else:
+ con.subtarget = prev
+ con.head_tail = 1.0
+
+ # Drivers
+ n = (i + 1) / l
+
+ # Neck influence
+ fcurve = n_con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ var.name = "ext"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = head_ctrl_p.path_from_id() + '["inf_extent"]'
+ driver.expression = "1.0 if (%.4f > (1.0-ext) or (1.0-ext) == 0.0) else (%.4f / (1.0-ext))" % (n, n)
+
+ # Head influence
+ if (i + 1) == l:
+ h_con.influence = 1.0
+ else:
+ fcurve = h_con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'SCRIPTED'
+ var.name = "ext"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = head_ctrl_p.path_from_id() + '["inf_extent"]'
+ driver.expression = "0.0 if (%.4f <= (1.0-ext)) else ((%.4f - (1.0-ext)) / ext)" % (n, n)
+
+ first = False
+ prev = name1
+ i += 1
+
+ # Create control widgets
+ w1 = create_circle_widget(self.obj, neck_ctrl, radius=1.0, head_tail=0.5)
+ w2 = create_circle_widget(self.obj, head_ctrl, radius=1.0, head_tail=0.5)
+
+ if w1 != None:
+ obj_to_bone(w1, self.obj, self.org_bones[(len(self.org_bones) - 1) // 2])
+ if w2 != None:
+ obj_to_bone(w2, self.obj, self.org_bones[-1])
+
+ # Return control bones
+ return (head_ctrl, neck_ctrl)
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ self.gen_deform()
+ (head, neck) = self.gen_control()
+
+ script = script1 % (head, neck)
+ if self.isolate:
+ script += script2
+ script += script3
+
+ return [script]
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('neck')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.0000, -0.0500, 0.1500
+ bone.roll = 0.0000
+ bone.use_connect = False
+ bones['neck'] = bone.name
+ bone = arm.edit_bones.new('head')
+ bone.head[:] = 0.0000, -0.0500, 0.1500
+ bone.tail[:] = 0.0000, -0.0500, 0.4000
+ bone.roll = 3.1416
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['neck']]
+ bones['head'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['neck']]
+ pbone.rigify_type = 'neck_short'
+ pbone.lock_location = (True, True, True)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone.rigify_parameters.add()
+ pbone = obj.pose.bones[bones['head']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
diff --git a/rigify/rigs/palm.py b/rigify/rigs/palm.py
new file mode 100644
index 00000000..d1a3cf61
--- /dev/null
+++ b/rigify/rigs/palm.py
@@ -0,0 +1,273 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from math import sin, cos, pi
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone
+from rigify.utils import strip_org, deformer, mch
+from rigify.utils import create_widget
+import re
+
+
+def bone_siblings(obj, bone):
+ """ Returns a list of the siblings of the given bone.
+ This requires that the bones has a parent.
+
+ """
+ parent = obj.data.bones[bone].parent
+
+ if parent is None:
+ return []
+
+ bones = []
+
+ for b in parent.children:
+ if b.name != bone:
+ bones += [b.name]
+
+ return bones
+
+
+def bone_distance(obj, bone1, bone2):
+ """ Returns the distance between two bones.
+
+ """
+ vec = obj.data.bones[bone1].head - obj.data.bones[bone2].head
+ return vec.length
+
+
+class Rig:
+ """ A "palm" rig. A set of sibling bones that bend with each other.
+ This is a control and deformation rig.
+
+ """
+ def __init__(self, obj, bone, params):
+ """ Gather and validate data about the rig.
+ """
+ self.obj = obj
+ self.params = params
+
+ siblings = bone_siblings(obj, bone)
+
+ if len(siblings) == 0:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': must have a parent and at least one sibling." % (strip_org(bone)))
+
+ # Sort list by name and distance
+ siblings.sort()
+ siblings.sort(key=lambda b: bone_distance(obj, bone, b))
+
+ self.org_bones = [bone] + siblings
+
+ # Get rig parameters
+ self.palm_rotation_axis = params.palm_rotation_axis
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ # Figure out the name for the control bone (remove the last .##)
+ last_bone = self.org_bones[-1:][0]
+ ctrl_name = re.sub("([0-9]+\.)", "", strip_org(last_bone)[::-1], count=1)[::-1]
+
+ # Make control bone
+ ctrl = copy_bone(self.obj, last_bone, ctrl_name)
+
+ # Make deformation bones
+ def_bones = []
+ for bone in self.org_bones:
+ b = copy_bone(self.obj, bone, deformer(strip_org(bone)))
+ def_bones += [b]
+
+ # Parenting
+ eb = self.obj.data.edit_bones
+
+ for d, b in zip(def_bones, self.org_bones):
+ eb[d].use_connect = False
+ eb[d].parent = eb[b]
+
+ # Constraints
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ i = 0
+ div = len(self.org_bones) - 1
+ for b in self.org_bones:
+ con = pb[b].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = ctrl
+ con.target_space = 'LOCAL'
+ con.owner_space = 'LOCAL'
+ con.influence = i / div
+
+ con = pb[b].constraints.new('COPY_ROTATION')
+ con.name = "copy_rotation"
+ con.target = self.obj
+ con.subtarget = ctrl
+ con.target_space = 'LOCAL'
+ con.owner_space = 'LOCAL'
+ if 'X' in self.palm_rotation_axis:
+ con.invert_x = True
+ con.use_x = True
+ con.use_z = False
+ else:
+ con.invert_z = True
+ con.use_x = False
+ con.use_z = True
+ con.use_y = False
+
+ con.influence = (i / div) - (1 - cos((i * pi / 2) / div))
+
+ i += 1
+
+ # Create control widget
+ w = create_widget(self.obj, ctrl)
+ if w != None:
+ mesh = w.data
+ verts = [(0.15780271589756012, 2.086162567138672e-07, -0.30000004172325134), (0.15780259668827057, 1.0, -0.2000001072883606), (-0.15780280530452728, 0.9999999403953552, -0.20000004768371582), (-0.15780259668827057, -2.086162567138672e-07, -0.29999998211860657), (-0.15780256688594818, -2.7089754439657554e-07, 0.30000004172325134), (-0.1578027755022049, 0.9999998807907104, 0.19999995827674866), (0.15780262649059296, 0.9999999403953552, 0.19999989867210388), (0.1578027456998825, 1.4633496903115883e-07, 0.29999998211860657), (0.15780268609523773, 0.2500001788139343, -0.27500003576278687), (-0.15780264139175415, 0.24999985098838806, -0.2749999761581421), (0.15780262649059296, 0.7500000596046448, -0.22500008344650269), (-0.1578027606010437, 0.7499998807907104, -0.2250000238418579), (0.15780265629291534, 0.75, 0.22499991953372955), (0.15780271589756012, 0.2500000596046448, 0.2749999761581421), (-0.15780261158943176, 0.2499997615814209, 0.27500003576278687), (-0.1578027307987213, 0.7499998807907104, 0.22499997913837433)]
+ if 'Z' in self.palm_rotation_axis:
+ # Flip x/z coordinates
+ temp = []
+ for v in verts:
+ temp += [(v[2], v[1], v[0])]
+ verts = temp
+ edges = [(1, 2), (0, 3), (4, 7), (5, 6), (8, 0), (9, 3), (10, 1), (11, 2), (12, 6), (13, 7), (4, 14), (15, 5), (10, 8), (11, 9), (15, 14), (12, 13)]
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+ mod = w.modifiers.new("subsurf", 'SUBSURF')
+ mod.levels = 2
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+
+ """
+ items = [('X', 'X', ''), ('Z', 'Z', '')]
+ group.palm_rotation_axis = bpy.props.EnumProperty(items=items, name="Palm Rotation Axis", default='X')
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.label(text="Primary rotation axis:")
+ r.prop(params, "palm_rotation_axis", text="")
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('palm.parent')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = 0.0577, 0.0000, -0.0000
+ bone.roll = 3.1416
+ bone.use_connect = False
+ bones['palm.parent'] = bone.name
+ bone = arm.edit_bones.new('palm.04')
+ bone.head[:] = 0.0577, 0.0315, -0.0000
+ bone.tail[:] = 0.1627, 0.0315, -0.0000
+ bone.roll = 3.1416
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['palm.parent']]
+ bones['palm.04'] = bone.name
+ bone = arm.edit_bones.new('palm.03')
+ bone.head[:] = 0.0577, 0.0105, -0.0000
+ bone.tail[:] = 0.1627, 0.0105, -0.0000
+ bone.roll = 3.1416
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['palm.parent']]
+ bones['palm.03'] = bone.name
+ bone = arm.edit_bones.new('palm.02')
+ bone.head[:] = 0.0577, -0.0105, -0.0000
+ bone.tail[:] = 0.1627, -0.0105, -0.0000
+ bone.roll = 3.1416
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['palm.parent']]
+ bones['palm.02'] = bone.name
+ bone = arm.edit_bones.new('palm.01')
+ bone.head[:] = 0.0577, -0.0315, -0.0000
+ bone.tail[:] = 0.1627, -0.0315, -0.0000
+ bone.roll = 3.1416
+ bone.use_connect = False
+ bone.parent = arm.edit_bones[bones['palm.parent']]
+ bones['palm.01'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['palm.parent']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['palm.04']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone = obj.pose.bones[bones['palm.03']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone = obj.pose.bones[bones['palm.02']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone = obj.pose.bones[bones['palm.01']]
+ pbone.rigify_type = 'palm'
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, True, True)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'YXZ'
+ pbone.rigify_parameters.add()
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
+
diff --git a/rigify/rigs/spine.py b/rigify/rigs/spine.py
new file mode 100644
index 00000000..86d9ba6c
--- /dev/null
+++ b/rigify/rigs/spine.py
@@ -0,0 +1,617 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+""" TODO:
+ - Add parameters for bone transform alphas.
+"""
+
+from math import floor
+
+import bpy
+from mathutils import Vector
+from rigify.utils import MetarigError
+from rigify.utils import copy_bone, new_bone, flip_bone, put_bone
+from rigify.utils import connected_children_names
+from rigify.utils import strip_org, make_mechanism_name, make_deformer_name
+from rigify.utils import obj_to_bone, create_circle_widget, create_compass_widget
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+
+script = """
+main = "%s"
+spine = [%s]
+if is_selected([main]+ spine):
+ layout.prop(pose_bones[main], '["pivot_slide"]', text="Pivot Slide (" + main + ")", slider=True)
+
+for name in spine[1:-1]:
+ if is_selected(name):
+ layout.prop(pose_bones[name], '["auto_rotate"]', text="Auto Rotate (" + name + ")", slider=True)
+"""
+
+
+class Rig:
+ """ A "spine" rig. It turns a chain of bones into a rig with two controls:
+ One for the hips, and one for the rib cage.
+
+ """
+ def __init__(self, obj, bone_name, params):
+ """ Gather and validate data about the rig.
+
+ """
+ self.obj = obj
+ self.org_bones = [bone_name] + connected_children_names(obj, bone_name)
+ self.params = params
+
+ # Collect control bone indices
+ self.control_indices = [0, len(self.org_bones) - 1]
+ temp = self.params.chain_bone_controls.split(",")
+ for i in temp:
+ try:
+ j = int(i) - 1
+ except ValueError:
+ pass
+ else:
+ if (j > 0) and (j < len(self.org_bones)) and (j not in self.control_indices):
+ self.control_indices += [j]
+ self.control_indices.sort()
+
+ self.pivot_rest = self.params.rest_pivot_slide
+ self.pivot_rest = max(self.pivot_rest, 1.0/len(self.org_bones))
+ self.pivot_rest = min(self.pivot_rest, 1.0-(1.0/len(self.org_bones)))
+
+ if len(self.org_bones) <= 1:
+ raise MetarigError("RIGIFY ERROR: Bone '%s': input to rig type must be a chain of 2 or more bones." % (strip_org(bone)))
+
+ def gen_deform(self):
+ """ Generate the deformation rig.
+
+ """
+ for name in self.org_bones:
+ bpy.ops.object.mode_set(mode='EDIT')
+ eb = self.obj.data.edit_bones
+
+ # Create deform bone
+ bone_e = eb[copy_bone(self.obj, name)]
+
+ # Change its name
+ bone_e.name = make_deformer_name(strip_org(name))
+ bone_name = bone_e.name
+
+ # Leave edit mode
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Get the pose bone
+ bone = self.obj.pose.bones[bone_name]
+
+ # Constrain to the original bone
+ con = bone.constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = name
+
+ def gen_control(self):
+ """ Generate the control rig.
+
+ """
+ bpy.ops.object.mode_set(mode='EDIT')
+ eb = self.obj.data.edit_bones
+ #-------------------------
+ # Get rest slide position
+ a = self.pivot_rest * len(self.org_bones)
+ i = floor(a)
+ a -= i
+ if i == len(self.org_bones):
+ i -= 1
+ a = 1.0
+
+ pivot_rest_pos = eb[self.org_bones[i]].head.copy()
+ pivot_rest_pos += eb[self.org_bones[i]].vector * a
+
+ #----------------------
+ # Create controls
+
+ # Create control bones
+ controls = []
+ for i in self.control_indices:
+ name = copy_bone(self.obj, self.org_bones[i], strip_org(self.org_bones[i]))
+ controls += [name]
+
+ # Create control parents
+ control_parents = []
+ for i in self.control_indices[1:-1]:
+ name = new_bone(self.obj, make_mechanism_name("par_" + strip_org(self.org_bones[i])))
+ control_parents += [name]
+
+ # Create sub-control bones
+ subcontrols = []
+ for i in self.control_indices:
+ name = new_bone(self.obj, make_mechanism_name("sub_" + strip_org(self.org_bones[i])))
+ subcontrols += [name]
+
+ # Create main control bone
+ main_control = new_bone(self.obj, self.params.spine_main_control_name)
+
+ # Create main control WGT bones
+ main_wgt1 = new_bone(self.obj, make_mechanism_name(self.params.spine_main_control_name + ".01"))
+ main_wgt2 = new_bone(self.obj, make_mechanism_name(self.params.spine_main_control_name + ".02"))
+
+ eb = self.obj.data.edit_bones
+
+ # Parent the main control
+ eb[main_control].use_connect = False
+ eb[main_control].parent = eb[self.org_bones[0]].parent
+
+ # Parent the main WGTs
+ eb[main_wgt1].use_connect = False
+ eb[main_wgt1].parent = eb[main_control]
+ eb[main_wgt2].use_connect = False
+ eb[main_wgt2].parent = eb[main_wgt1]
+
+ # Parent the controls and sub-controls
+ for name, subname in zip(controls, subcontrols):
+ eb[name].use_connect = False
+ eb[name].parent = eb[main_control]
+ eb[subname].use_connect = False
+ eb[subname].parent = eb[name]
+
+ # Parent the control parents
+ for name, par_name in zip(controls[1:-1], control_parents):
+ eb[par_name].use_connect = False
+ eb[par_name].parent = eb[main_control]
+ eb[name].parent = eb[par_name]
+
+ # Position the main bone
+ put_bone(self.obj, main_control, pivot_rest_pos)
+ eb[main_control].length = sum([eb[b].length for b in self.org_bones]) / 2
+
+ # Position the main WGTs
+ eb[main_wgt1].tail = (0.0, 0.0, sum([eb[b].length for b in self.org_bones]) / 4)
+ eb[main_wgt2].length = sum([eb[b].length for b in self.org_bones]) / 4
+ put_bone(self.obj, main_wgt1, pivot_rest_pos)
+ put_bone(self.obj, main_wgt2, pivot_rest_pos)
+
+ # Position the controls and sub-controls
+ pos = eb[controls[0]].head.copy()
+ for name, subname in zip(controls, subcontrols):
+ put_bone(self.obj, name, pivot_rest_pos)
+ put_bone(self.obj, subname, pivot_rest_pos)
+ eb[subname].length = eb[name].length / 3
+
+ # Position the control parents
+ for name, par_name in zip(controls[1:-1], control_parents):
+ put_bone(self.obj, par_name, pivot_rest_pos)
+ eb[par_name].length = eb[name].length / 2
+
+ #-----------------------------------------
+ # Control bone constraints and properties
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ # Lock control locations
+ for name in controls:
+ bone = pb[name]
+ bone.lock_location = True, True, True
+
+ # Main control doesn't use local location
+ pb[main_control].bone.use_local_location = False
+
+ # Intermediate controls follow hips and spine
+ for name, par_name, i in zip(controls[1:-1], control_parents, self.control_indices[1:-1]):
+ bone = pb[par_name]
+
+ # Custom bend_alpha property
+ prop = rna_idprop_ui_prop_get(pb[name], "bend_alpha", create=True)
+ pb[name]["bend_alpha"] = i / (len(self.org_bones) - 1) # set bend alpha
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+
+ # Custom auto_rotate
+ prop = rna_idprop_ui_prop_get(pb[name], "auto_rotate", create=True)
+ pb[name]["auto_rotate"] = 1.0
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+
+ # Constraints
+ con1 = bone.constraints.new('COPY_TRANSFORMS')
+ con1.name = "copy_transforms"
+ con1.target = self.obj
+ con1.subtarget = subcontrols[0]
+
+ con2 = bone.constraints.new('COPY_TRANSFORMS')
+ con2.name = "copy_transforms"
+ con2.target = self.obj
+ con2.subtarget = subcontrols[-1]
+
+ # Drivers
+ fcurve = con1.driver_add("influence")
+ driver = fcurve.driver
+ driver.type = 'AVERAGE'
+ var = driver.variables.new()
+ var.name = "auto"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = pb[name].path_from_id() + '["auto_rotate"]'
+
+ fcurve = con2.driver_add("influence")
+ driver = fcurve.driver
+ driver.type = 'SCRIPTED'
+ driver.expression = "alpha * auto"
+ var = driver.variables.new()
+ var.name = "alpha"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = pb[name].path_from_id() + '["bend_alpha"]'
+ var = driver.variables.new()
+ var.name = "auto"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = pb[name].path_from_id() + '["auto_rotate"]'
+
+ #-------------------------
+ # Create flex spine chain
+ bpy.ops.object.mode_set(mode='EDIT')
+ flex_bones = []
+ flex_subs = []
+ prev_bone = None
+ for b in self.org_bones:
+ # Create bones
+ bone = copy_bone(self.obj, b, make_mechanism_name(strip_org(b) + ".flex"))
+ sub = new_bone(self.obj, make_mechanism_name(strip_org(b) + ".flex_s"))
+ flex_bones += [bone]
+ flex_subs += [sub]
+
+ eb = self.obj.data.edit_bones
+ bone_e = eb[bone]
+ sub_e = eb[sub]
+
+ # Parenting
+ bone_e.use_connect = False
+ sub_e.use_connect = False
+ if prev_bone is None:
+ sub_e.parent = eb[controls[0]]
+ else:
+ sub_e.parent = eb[prev_bone]
+ bone_e.parent = sub_e
+
+ # Position
+ put_bone(self.obj, sub, bone_e.head)
+ sub_e.length = bone_e.length / 4
+ if prev_bone is not None:
+ sub_e.use_connect = True
+
+ prev_bone = bone
+
+ #----------------------------
+ # Create reverse spine chain
+
+ # Create bones/parenting/positioning
+ bpy.ops.object.mode_set(mode='EDIT')
+ rev_bones = []
+ prev_bone = None
+ for b in zip(flex_bones, self.org_bones):
+ # Create bones
+ bone = copy_bone(self.obj, b[1], make_mechanism_name(strip_org(b[1]) + ".reverse"))
+ rev_bones += [bone]
+ eb = self.obj.data.edit_bones
+ bone_e = eb[bone]
+
+ # Parenting
+ bone_e.use_connect = False
+ bone_e.parent = eb[b[0]]
+
+ # Position
+ flip_bone(self.obj, bone)
+ bone_e.tail = Vector(eb[b[0]].head)
+ #bone_e.head = Vector(eb[b[0]].tail)
+ if prev_bone is None:
+ put_bone(self.obj, bone, pivot_rest_pos)
+ else:
+ put_bone(self.obj, bone, eb[prev_bone].tail)
+
+ prev_bone = bone
+
+ # Constraints
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+ prev_bone = None
+ for bone in rev_bones:
+ bone_p = pb[bone]
+
+ con = bone_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ if prev_bone is None:
+ con.subtarget = main_control
+ else:
+ con.subtarget = prev_bone
+ con.head_tail = 1.0
+ prev_bone = bone
+
+ #----------------------------------------
+ # Constrain original bones to flex spine
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ for obone, fbone in zip(self.org_bones, flex_bones):
+ con = pb[obone].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = fbone
+
+ #---------------------------
+ # Create pivot slide system
+ pb = self.obj.pose.bones
+ bone_p = pb[self.org_bones[0]]
+ main_control_p = pb[main_control]
+
+ # Custom pivot_slide property
+ prop = rna_idprop_ui_prop_get(main_control_p, "pivot_slide", create=True)
+ main_control_p["pivot_slide"] = self.pivot_rest
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 1.0 / len(self.org_bones)
+ prop["soft_max"] = 1.0 - (1.0 / len(self.org_bones))
+
+ # Anchor constraints
+ con = bone_p.constraints.new('COPY_LOCATION')
+ con.name = "copy_location"
+ con.target = self.obj
+ con.subtarget = rev_bones[0]
+
+ con = pb[main_wgt1].constraints.new('COPY_ROTATION')
+ con.name = "copy_rotation"
+ con.target = self.obj
+ con.subtarget = rev_bones[0]
+
+ # Slide constraints
+ i = 1
+ tot = len(rev_bones)
+ for rb in rev_bones:
+ con = bone_p.constraints.new('COPY_LOCATION')
+ con.name = "slide." + str(i)
+ con.target = self.obj
+ con.subtarget = rb
+ con.head_tail = 1.0
+
+ # Driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "slide"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = main_control_p.path_from_id() + '["pivot_slide"]'
+ mod = fcurve.modifiers[0]
+ mod.poly_order = 1
+ mod.coefficients[0] = 1 - i
+ mod.coefficients[1] = tot
+
+ # Main WGT
+ con = pb[main_wgt1].constraints.new('COPY_ROTATION')
+ con.name = "slide." + str(i)
+ con.target = self.obj
+ con.subtarget = rb
+
+ # Driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "slide"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = main_control_p.path_from_id() + '["pivot_slide"]'
+ mod = fcurve.modifiers[0]
+ mod.poly_order = 1
+ mod.coefficients[0] = 1.5 - i
+ mod.coefficients[1] = tot
+
+ i += 1
+
+ #----------------------------------
+ # Constrain flex spine to controls
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ # Constrain the bones that correspond exactly to the controls
+ for i, name in zip(self.control_indices, subcontrols):
+ con = pb[flex_subs[i]].constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = name
+
+ # Constrain the bones in-between the controls
+ for i, j, name1, name2 in zip(self.control_indices, self.control_indices[1:], subcontrols, subcontrols[1:]):
+ if (i + 1) < j:
+ for n in range(i + 1, j):
+ bone = pb[flex_subs[n]]
+ # Custom bend_alpha property
+ prop = rna_idprop_ui_prop_get(bone, "bend_alpha", create=True)
+ bone["bend_alpha"] = (n - i) / (j - i) # set bend alpha
+ prop["min"] = 0.0
+ prop["max"] = 1.0
+ prop["soft_min"] = 0.0
+ prop["soft_max"] = 1.0
+
+ con = bone.constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = name1
+
+ con = bone.constraints.new('COPY_TRANSFORMS')
+ con.name = "copy_transforms"
+ con.target = self.obj
+ con.subtarget = name2
+
+ # Driver
+ fcurve = con.driver_add("influence")
+ driver = fcurve.driver
+ var = driver.variables.new()
+ driver.type = 'AVERAGE'
+ var.name = "alpha"
+ var.targets[0].id_type = 'OBJECT'
+ var.targets[0].id = self.obj
+ var.targets[0].data_path = bone.path_from_id() + '["bend_alpha"]'
+
+ #-------------
+ # Final stuff
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pb = self.obj.pose.bones
+
+ # Control appearance
+ # Main
+ pb[main_control].custom_shape_transform = pb[main_wgt2]
+ w = create_compass_widget(self.obj, main_control)
+ if w != None:
+ obj_to_bone(w, self.obj, main_wgt2)
+
+ # Spines
+ for name, i in zip(controls[1:-1], self.control_indices[1:-1]):
+ pb[name].custom_shape_transform = pb[self.org_bones[i]]
+ # Create control widgets
+ w = create_circle_widget(self.obj, name, radius=1.0, head_tail=0.5, with_line=True)
+ if w != None:
+ obj_to_bone(w, self.obj, self.org_bones[i])
+ # Hips
+ pb[controls[0]].custom_shape_transform = pb[self.org_bones[0]]
+ # Create control widgets
+ w = create_circle_widget(self.obj, controls[0], radius=1.0, head_tail=0.5, with_line=True)
+ if w != None:
+ obj_to_bone(w, self.obj, self.org_bones[0])
+
+ # Ribs
+ pb[controls[-1]].custom_shape_transform = pb[self.org_bones[-1]]
+ # Create control widgets
+ w = create_circle_widget(self.obj, controls[-1], radius=1.0, head_tail=0.5, with_line=True)
+ if w != None:
+ obj_to_bone(w, self.obj, self.org_bones[-1])
+
+ # Layers
+ pb[main_control].bone.layers = pb[self.org_bones[0]].bone.layers
+
+ return [main_control] + controls
+
+ def generate(self):
+ """ Generate the rig.
+ Do NOT modify any of the original bones, except for adding constraints.
+ The main armature should be selected and active before this is called.
+
+ """
+ self.gen_deform()
+ controls = self.gen_control()
+
+ controls_string = ", ".join(["'" + x + "'" for x in controls[1:]])
+ return [script % (controls[0], controls_string)]
+
+ @classmethod
+ def add_parameters(self, group):
+ """ Add the parameters of this rig type to the
+ RigifyParameters PropertyGroup
+ """
+ group.spine_main_control_name = bpy.props.StringProperty(name="Main control name", default="torso", description="Name that the main control bone should be given.")
+ group.rest_pivot_slide = bpy.props.FloatProperty(name="Rest Pivot Slide", default=0.0, min=0.0, max=1.0, soft_min=0.0, soft_max=1.0, description="The pivot slide value in the rest pose.")
+ group.chain_bone_controls = bpy.props.StringProperty(name="Control bone list", default="", description="Define which bones have controls.")
+
+
+ @classmethod
+ def parameters_ui(self, layout, obj, bone):
+ """ Create the ui for the rig parameters.
+ """
+ params = obj.pose.bones[bone].rigify_parameters[0]
+
+ r = layout.row()
+ r.prop(params, "spine_main_control_name")
+
+ r = layout.row()
+ r.prop(params, "rest_pivot_slide", slider=True)
+
+ r = layout.row()
+ r.prop(params, "chain_bone_controls")
+
+ @classmethod
+ def create_sample(self, obj):
+ # generated by rigify.utils.write_metarig
+ bpy.ops.object.mode_set(mode='EDIT')
+ arm = obj.data
+
+ bones = {}
+
+ bone = arm.edit_bones.new('hips')
+ bone.head[:] = 0.0000, 0.0000, 0.0000
+ bone.tail[:] = -0.0000, -0.0590, 0.2804
+ bone.roll = -0.0000
+ bone.use_connect = False
+ bones['hips'] = bone.name
+ bone = arm.edit_bones.new('spine')
+ bone.head[:] = -0.0000, -0.0590, 0.2804
+ bone.tail[:] = 0.0000, 0.0291, 0.5324
+ bone.roll = 0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['hips']]
+ bones['spine'] = bone.name
+ bone = arm.edit_bones.new('ribs')
+ bone.head[:] = 0.0000, 0.0291, 0.5324
+ bone.tail[:] = -0.0000, 0.0000, 1.0000
+ bone.roll = -0.0000
+ bone.use_connect = True
+ bone.parent = arm.edit_bones[bones['spine']]
+ bones['ribs'] = bone.name
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ pbone = obj.pose.bones[bones['hips']]
+ pbone.rigify_type = 'spine'
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['spine']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['ribs']]
+ pbone.rigify_type = ''
+ pbone.lock_location = (False, False, False)
+ pbone.lock_rotation = (False, False, False)
+ pbone.lock_rotation_w = False
+ pbone.lock_scale = (False, False, False)
+ pbone.rotation_mode = 'QUATERNION'
+ pbone = obj.pose.bones[bones['hips']]
+ pbone['rigify_type'] = 'spine'
+ pbone.rigify_parameters.add()
+ pbone.rigify_parameters[0].chain_bone_controls = "1, 2, 3"
+
+ bpy.ops.object.mode_set(mode='EDIT')
+ for bone in arm.edit_bones:
+ bone.select = False
+ bone.select_head = False
+ bone.select_tail = False
+ for b in bones:
+ bone = arm.edit_bones[bones[b]]
+ bone.select = True
+ bone.select_head = True
+ bone.select_tail = True
+ arm.edit_bones.active = bone
diff --git a/rigify/ui.py b/rigify/ui.py
new file mode 100644
index 00000000..3a2e89e3
--- /dev/null
+++ b/rigify/ui.py
@@ -0,0 +1,300 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+from bpy.props import *
+import rigify
+from rigify.utils import get_rig_type
+from rigify import generate
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+
+class DATA_PT_rigify_buttons(bpy.types.Panel):
+ bl_label = "Rigify Buttons"
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "data"
+ #bl_options = {'DEFAULT_OPEN'}
+
+ @classmethod
+ def poll(cls, context):
+ if not context.armature:
+ return False
+ #obj = context.object
+ #if obj:
+ # return (obj.mode in ('POSE', 'OBJECT', 'EDIT'))
+ #return False
+ return True
+
+ def draw(self, context):
+ C = context
+ layout = self.layout
+ obj = context.object
+ id_store = C.window_manager
+
+ if obj.mode in ('POSE', 'OBJECT'):
+ row = layout.row()
+ row.operator("pose.rigify_generate", text="Generate")
+ elif obj.mode == 'EDIT':
+ # Build types list
+ collection_name = str(id_store.rigify_collection).replace(" ", "")
+
+ for i in range(0, len(id_store.rigify_types)):
+ id_store.rigify_types.remove(0)
+
+ for r in rigify.rig_list:
+ collection = r.split('.')[0]
+ if collection_name == "All":
+ a = id_store.rigify_types.add()
+ a.name = r
+ elif r.startswith(collection_name + '.'):
+ a = id_store.rigify_types.add()
+ a.name = r
+ elif collection_name == "None" and len(r.split('.')) == 1:
+ a = id_store.rigify_types.add()
+ a.name = r
+
+ ## Rig collection field
+ #row = layout.row()
+ #row.prop(id_store, 'rigify_collection', text="Category")
+
+ # Rig type list
+ row = layout.row()
+ row.template_list(id_store, "rigify_types", id_store, 'rigify_active_type')
+ row = layout.row()
+ op = row.operator("armature.metarig_sample_add", text="Add sample")
+ op.metarig_type = id_store.rigify_types[id_store.rigify_active_type].name
+
+
+class DATA_PT_rigify_layer_names(bpy.types.Panel):
+ bl_label = "Rigify Layer Names"
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "data"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ @classmethod
+ def poll(cls, context):
+ if not context.armature:
+ return False
+ return True
+
+ def draw(self, context):
+ C = context
+ layout = self.layout
+ obj = context.object
+
+ # Ensure that the layers exist
+ for i in range(1 + len(obj.data.rigify_layers), 29):
+ layer = obj.data.rigify_layers.add()
+
+ # UI
+ for i in range(28):
+ if (i % 16) == 0:
+ col = layout.column()
+ if i == 0:
+ col.label(text="Top Row:")
+ else:
+ col.label(text="Bottom Row:")
+ if (i % 8) == 0:
+ col = layout.column(align=True)
+ row = col.row()
+ row.prop(obj.data, "layers", index=i, text="", toggle=True)
+ split = row.split(percentage=0.8)
+ split.prop(obj.data.rigify_layers[i], "name", text="Layer %d" % (i + 1))
+ split.prop(obj.data.rigify_layers[i], "row", text="")
+ #split.prop(obj.data.rigify_layers[i], "column", text="")
+
+
+class BONE_PT_rigify_buttons(bpy.types.Panel):
+ bl_label = "Rigify Type"
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "bone"
+ #bl_options = {'DEFAULT_OPEN'}
+
+ @classmethod
+ def poll(cls, context):
+ if not context.armature or not context.active_pose_bone:
+ return False
+ obj = context.object
+ if obj:
+ return (obj.mode in ('POSE'))
+ return False
+
+ def draw(self, context):
+ C = context
+ id_store = C.window_manager
+ bone = context.active_pose_bone
+ collection_name = str(id_store.rigify_collection).replace(" ", "")
+ rig_name = str(context.active_pose_bone.rigify_type).replace(" ", "")
+
+ layout = self.layout
+
+ # Build types list
+ for i in range(0, len(id_store.rigify_types)):
+ id_store.rigify_types.remove(0)
+
+ for r in rigify.rig_list:
+ collection = r.split('.')[0]
+ if collection_name == "All":
+ a = id_store.rigify_types.add()
+ a.name = r
+ elif r.startswith(collection_name + '.'):
+ a = id_store.rigify_types.add()
+ a.name = r
+ elif collection_name == "None" and len(r.split('.')) == 1:
+ a = id_store.rigify_types.add()
+ a.name = r
+
+ # Rig type field
+ row = layout.row()
+ row.prop_search(bone, "rigify_type", id_store, "rigify_types", text="Rig type:")
+
+ # Rig type parameters / Rig type non-exist alert
+ if rig_name != "":
+ if len(bone.rigify_parameters) < 1:
+ bone.rigify_parameters.add()
+
+ try:
+ rig = get_rig_type(rig_name)
+ rig.Rig
+ except (ImportError, AttributeError):
+ row = layout.row()
+ box = row.box()
+ box.label(text="ALERT: type \"%s\" does not exist!" % rig_name)
+ else:
+ try:
+ rig.Rig.parameters_ui
+ except AttributeError:
+ pass
+ else:
+ col = layout.column()
+ col.label(text="Options:")
+ box = layout.box()
+
+ rig.Rig.parameters_ui(box, C.active_object, bone.name)
+
+
+#class INFO_MT_armature_metarig_add(bpy.types.Menu):
+# bl_idname = "INFO_MT_armature_metarig_add"
+# bl_label = "Meta-Rig"
+
+# def draw(self, context):
+ #import rigify
+
+ #layout = self.layout
+ #layout.operator_context = 'INVOKE_REGION_WIN'
+
+ #for submodule_type in rigify.get_submodule_types():
+ # text = bpy.path.display_name(submodule_type)
+ # layout.operator("pose.metarig_sample_add", text=text, icon='OUTLINER_OB_ARMATURE').metarig_type = submodule_type
+
+
+def rigify_report_exception(operator, exception):
+ import traceback
+ import sys
+ import os
+ # find the module name where the error happened
+ # hint, this is the metarig type!
+ exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
+ fn = traceback.extract_tb(exceptionTraceback)[-1][0]
+ fn = os.path.basename(fn)
+ fn = os.path.splitext(fn)[0]
+ message = []
+ if fn.startswith("__"):
+ message.append("Incorrect armature...")
+ else:
+ message.append("Incorrect armature for type '%s'" % fn)
+ message.append(exception.message)
+
+ message.reverse() # XXX - stupid! menu's are upside down!
+
+ operator.report(set(['INFO']), '\n'.join(message))
+
+
+class Generate(bpy.types.Operator):
+ '''Generates a rig from the active metarig armature'''
+
+ bl_idname = "pose.rigify_generate"
+ bl_label = "Rigify Generate Rig"
+ bl_options = {'UNDO'}
+
+ def execute(self, context):
+ import imp
+ imp.reload(generate)
+
+ use_global_undo = context.user_preferences.edit.use_global_undo
+ context.user_preferences.edit.use_global_undo = False
+ try:
+ generate.generate_rig(context, context.object)
+ except rigify.utils.MetarigError as rig_exception:
+ rigify_report_exception(self, rig_exception)
+ finally:
+ context.user_preferences.edit.use_global_undo = use_global_undo
+
+ return {'FINISHED'}
+
+
+class Sample(bpy.types.Operator):
+ '''Create a sample metarig to be modified before generating the final rig.'''
+
+ bl_idname = "armature.metarig_sample_add"
+ bl_label = "Add a sample metarig for a rig type"
+ bl_options = {'UNDO'}
+
+ metarig_type = StringProperty(name="Type", description="Name of the rig type to generate a sample of", maxlen=128, default="")
+
+ def execute(self, context):
+ if context.mode == 'EDIT_ARMATURE' and self.metarig_type != "":
+ use_global_undo = context.user_preferences.edit.use_global_undo
+ context.user_preferences.edit.use_global_undo = False
+ try:
+ rig = get_rig_type(self.metarig_type).Rig
+ create_sample = rig.create_sample
+ except (ImportError, AttributeError):
+ print("Rigify: rig type has no sample.")
+ else:
+ create_sample(context.active_object)
+ finally:
+ context.user_preferences.edit.use_global_undo = use_global_undo
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ return {'FINISHED'}
+
+
+#menu_func = (lambda self, context: self.layout.menu("INFO_MT_armature_metarig_add", icon='OUTLINER_OB_ARMATURE'))
+
+#from bl_ui import space_info # ensure the menu is loaded first
+
+def register():
+ bpy.utils.register_class(DATA_PT_rigify_layer_names)
+ bpy.utils.register_class(DATA_PT_rigify_buttons)
+ bpy.utils.register_class(BONE_PT_rigify_buttons)
+ bpy.utils.register_class(Generate)
+ bpy.utils.register_class(Sample)
+
+ #space_info.INFO_MT_armature_add.append(ui.menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_class(DATA_PT_rigify_layer_names)
+ bpy.utils.unregister_class(DATA_PT_rigify_buttons)
+ bpy.utils.unregister_class(BONE_PT_rigify_buttons)
+ bpy.utils.unregister_class(Generate)
+ bpy.utils.unregister_class(Sample)
diff --git a/rigify/utils.py b/rigify/utils.py
new file mode 100644
index 00000000..311ef24d
--- /dev/null
+++ b/rigify/utils.py
@@ -0,0 +1,552 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import imp
+import random
+import time
+from mathutils import Vector
+from math import ceil, floor
+from rna_prop_ui import rna_idprop_ui_prop_get
+
+RIG_DIR = "rigs" # Name of the directory where rig types are kept
+
+ORG_PREFIX = "ORG-" # Prefix of original bones.
+MCH_PREFIX = "MCH-" # Prefix of mechanism bones.
+DEF_PREFIX = "DEF-" # Prefix of deformation bones.
+WGT_PREFIX = "WGT-" # Prefix for widget objects
+ROOT_NAME = "root" # Name of the root bone.
+
+WGT_LAYERS = [x == 19 for x in range(0, 20)] # Widgets go on the last scene layer.
+
+MODULE_NAME = "rigify" # Windows/Mac blender is weird, so __package__ doesn't work
+
+
+#=======================================================================
+# Error handling
+#=======================================================================
+class MetarigError(Exception):
+ """ Exception raised for errors.
+ """
+ def __init__(self, message):
+ self.message = message
+
+ def __str__(self):
+ return repr(self.message)
+
+
+#=======================================================================
+# Name manipulation
+#=======================================================================
+def org_name(name):
+ """ Returns the name with ORG_PREFIX stripped from it.
+ """
+ if name.startswith(ORG_PREFIX):
+ return name[len(ORG_PREFIX):]
+ else:
+ return name
+
+
+def strip_org(name):
+ """ Returns the name with ORG_PREFIX stripped from it.
+ """
+ if name.startswith(ORG_PREFIX):
+ return name[len(ORG_PREFIX):]
+ else:
+ return name
+org_name = strip_org
+
+
+def org(name):
+ """ Prepends the ORG_PREFIX to a name if it doesn't already have
+ it, and returns it.
+ """
+ if name.startswith(ORG_PREFIX):
+ return name
+ else:
+ return ORG_PREFIX + name
+make_original_name = org
+
+
+def mch(name):
+ """ Prepends the MCH_PREFIX to a name if it doesn't already have
+ it, and returns it.
+ """
+ if name.startswith(MCH_PREFIX):
+ return name
+ else:
+ return MCH_PREFIX + name
+make_mechanism_name = mch
+
+
+def deformer(name):
+ """ Prepends the DEF_PREFIX to a name if it doesn't already have
+ it, and returns it.
+ """
+ if name.startswith(DEF_PREFIX):
+ return name
+ else:
+ return DEF_PREFIX + name
+make_deformer_name = deformer
+
+
+def insert_before_lr(name, text):
+ if name[-1] in ['l', 'L', 'r', 'R'] and name[-2] in ['.', '-', '_']:
+ return name[:-2] + text + name[-2:]
+ else:
+ return name + text
+
+
+#=======================
+# Bone manipulation
+#=======================
+def new_bone(obj, bone_name):
+ """ Adds a new bone to the given armature object.
+ Returns the resulting bone's name.
+ """
+ if obj == bpy.context.active_object and bpy.context.mode == 'EDIT_ARMATURE':
+ edit_bone = obj.data.edit_bones.new(bone_name)
+ name = edit_bone.name
+ edit_bone.head = (0, 0, 0)
+ edit_bone.tail = (0, 1, 0)
+ edit_bone.roll = 0
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='EDIT')
+ return name
+ else:
+ raise MetarigError("Can't add new bone '%s' outside of edit mode." % bone_name)
+
+
+def copy_bone(obj, bone_name, assign_name=''):
+ """ Makes a copy of the given bone in the given armature object.
+ Returns the resulting bone's name.
+ """
+ if bone_name not in obj.data.bones:
+ raise MetarigError("copy_bone(): bone '%s' not found, cannot copy it." % bone_name)
+
+ if obj == bpy.context.active_object and bpy.context.mode == 'EDIT_ARMATURE':
+ if assign_name == '':
+ assign_name = bone_name
+ # Copy the edit bone
+ edit_bone_1 = obj.data.edit_bones[bone_name]
+ edit_bone_2 = obj.data.edit_bones.new(assign_name)
+ bone_name_1 = bone_name
+ bone_name_2 = edit_bone_2.name
+
+ edit_bone_2.parent = edit_bone_1.parent
+ edit_bone_2.use_connect = edit_bone_1.use_connect
+
+ # Copy edit bone attributes
+ edit_bone_2.layers = list(edit_bone_1.layers)
+
+ edit_bone_2.head = Vector(edit_bone_1.head)
+ edit_bone_2.tail = Vector(edit_bone_1.tail)
+ edit_bone_2.roll = edit_bone_1.roll
+
+ edit_bone_2.use_inherit_rotation = edit_bone_1.use_inherit_rotation
+ edit_bone_2.use_inherit_scale = edit_bone_1.use_inherit_scale
+ edit_bone_2.use_local_location = edit_bone_1.use_local_location
+
+ edit_bone_2.use_deform = edit_bone_1.use_deform
+ edit_bone_2.bbone_segments = edit_bone_1.bbone_segments
+ edit_bone_2.bbone_in = edit_bone_1.bbone_in
+ edit_bone_2.bbone_out = edit_bone_1.bbone_out
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Get the pose bones
+ pose_bone_1 = obj.pose.bones[bone_name_1]
+ pose_bone_2 = obj.pose.bones[bone_name_2]
+
+ # Copy pose bone attributes
+ pose_bone_2.rotation_mode = pose_bone_1.rotation_mode
+ pose_bone_2.rotation_axis_angle = tuple(pose_bone_1.rotation_axis_angle)
+ pose_bone_2.rotation_euler = tuple(pose_bone_1.rotation_euler)
+ pose_bone_2.rotation_quaternion = tuple(pose_bone_1.rotation_quaternion)
+
+ pose_bone_2.lock_location = tuple(pose_bone_1.lock_location)
+ pose_bone_2.lock_scale = tuple(pose_bone_1.lock_scale)
+ pose_bone_2.lock_rotation = tuple(pose_bone_1.lock_rotation)
+ pose_bone_2.lock_rotation_w = pose_bone_1.lock_rotation_w
+ pose_bone_2.lock_rotations_4d = pose_bone_1.lock_rotations_4d
+
+ # Copy custom properties
+ for key in pose_bone_1.keys():
+ if key != "_RNA_UI" \
+ and key != "rigify_parameters" \
+ and key != "rigify_type":
+ prop1 = rna_idprop_ui_prop_get(pose_bone_1, key, create=False)
+ prop2 = rna_idprop_ui_prop_get(pose_bone_2, key, create=True)
+ pose_bone_2[key] = pose_bone_1[key]
+ for key in prop1.keys():
+ prop2[key] = prop1[key]
+
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ return bone_name_2
+ else:
+ raise MetarigError("Cannot copy bones outside of edit mode.")
+
+
+def flip_bone(obj, bone_name):
+ """ Flips an edit bone.
+ """
+ if bone_name not in obj.data.bones:
+ raise MetarigError("flip_bone(): bone '%s' not found, cannot copy it." % bone_name)
+
+ if obj == bpy.context.active_object and bpy.context.mode == 'EDIT_ARMATURE':
+ bone = obj.data.edit_bones[bone_name]
+ head = Vector(bone.head)
+ tail = Vector(bone.tail)
+ bone.tail = head + tail
+ bone.head = tail
+ bone.tail = head
+ else:
+ raise MetarigError("Cannot flip bones outside of edit mode.")
+
+
+def put_bone(obj, bone_name, pos):
+ """ Places a bone at the given position.
+ """
+ if bone_name not in obj.data.bones:
+ raise MetarigError("put_bone(): bone '%s' not found, cannot copy it." % bone_name)
+
+ if obj == bpy.context.active_object and bpy.context.mode == 'EDIT_ARMATURE':
+ bone = obj.data.edit_bones[bone_name]
+
+ delta = pos - bone.head
+ bone.translate(delta)
+ else:
+ raise MetarigError("Cannot 'put' bones outside of edit mode.")
+
+
+#=============================================
+# Widget creation
+#=============================================
+
+def obj_to_bone(obj, rig, bone_name):
+ """ Places an object at the location/rotation/scale of the given bone.
+ """
+ if bpy.context.mode == 'EDIT_ARMATURE':
+ raise MetarigError("obj_to_bone(): does not work while in edit mode.")
+
+ bone = rig.data.bones[bone_name]
+
+ mat = rig.matrix_world * bone.matrix_local
+
+ obj.location = mat.to_translation()
+
+ obj.rotation_mode = 'XYZ'
+ obj.rotation_euler = mat.to_euler()
+
+ scl = mat.to_scale()
+ scl_avg = (scl[0] + scl[1] + scl[2]) / 3
+ obj.scale = (bone.length * scl_avg), (bone.length * scl_avg), (bone.length * scl_avg)
+
+
+def create_widget(rig, bone_name):
+ """ Creates an empty widget object for a bone, and returns the object.
+ """
+ obj_name = WGT_PREFIX + bone_name
+ scene = bpy.context.scene
+ # Check if it already exists
+ if obj_name in scene.objects:
+ return None
+ else:
+ mesh = bpy.data.meshes.new(obj_name)
+ obj = bpy.data.objects.new(obj_name, mesh)
+ scene.objects.link(obj)
+
+ obj_to_bone(obj, rig, bone_name)
+ obj.layers = WGT_LAYERS
+
+ return obj
+
+
+# Common Widgets
+
+def create_line_widget(rig, bone_name):
+ """ Creates a basic line widget, a line that spans the length of the bone.
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ mesh = obj.data
+ mesh.from_pydata([(0, 0, 0), (0, 1, 0)], [(0, 1)], [])
+ mesh.update()
+
+
+def create_circle_widget(rig, bone_name, radius=1.0, head_tail=0.0, with_line=False):
+ """ Creates a basic circle widget, a circle around the y-axis.
+ radius: the radius of the circle
+ head_tail: where along the length of the bone the circle is (0.0=head, 1.0=tail)
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ v = [(0.7071068286895752, 2.980232238769531e-07, -0.7071065306663513), (0.8314696550369263, 2.980232238769531e-07, -0.5555699467658997), (0.9238795042037964, 2.682209014892578e-07, -0.3826831877231598), (0.9807852506637573, 2.5331974029541016e-07, -0.19509011507034302), (1.0, 2.365559055306221e-07, 1.6105803979371558e-07), (0.9807853698730469, 2.2351741790771484e-07, 0.19509044289588928), (0.9238796234130859, 2.086162567138672e-07, 0.38268351554870605), (0.8314696550369263, 1.7881393432617188e-07, 0.5555704236030579), (0.7071068286895752, 1.7881393432617188e-07, 0.7071070075035095), (0.5555702447891235, 1.7881393432617188e-07, 0.8314698934555054), (0.38268327713012695, 1.7881393432617188e-07, 0.923879861831665), (0.19509008526802063, 1.7881393432617188e-07, 0.9807855486869812), (-3.2584136988589307e-07, 1.1920928955078125e-07, 1.000000238418579), (-0.19509072601795197, 1.7881393432617188e-07, 0.9807854294776917), (-0.3826838731765747, 1.7881393432617188e-07, 0.9238795638084412), (-0.5555707216262817, 1.7881393432617188e-07, 0.8314695358276367), (-0.7071071863174438, 1.7881393432617188e-07, 0.7071065902709961), (-0.8314700126647949, 1.7881393432617188e-07, 0.5555698871612549), (-0.923879861831665, 2.086162567138672e-07, 0.3826829195022583), (-0.9807853698730469, 2.2351741790771484e-07, 0.1950896978378296), (-1.0, 2.365559907957504e-07, -7.290432222362142e-07), (-0.9807850122451782, 2.5331974029541016e-07, -0.195091113448143), (-0.9238790273666382, 2.682209014892578e-07, -0.38268423080444336), (-0.831468939781189, 2.980232238769531e-07, -0.5555710196495056), (-0.7071058750152588, 2.980232238769531e-07, -0.707107424736023), (-0.555569052696228, 2.980232238769531e-07, -0.8314701318740845), (-0.38268208503723145, 2.980232238769531e-07, -0.923879861831665), (-0.19508881866931915, 2.980232238769531e-07, -0.9807853102684021), (1.6053570561780361e-06, 2.980232238769531e-07, -0.9999997615814209), (0.19509197771549225, 2.980232238769531e-07, -0.9807847142219543), (0.3826850652694702, 2.980232238769531e-07, -0.9238786101341248), (0.5555717945098877, 2.980232238769531e-07, -0.8314683437347412)]
+ verts = [(a[0] * radius, head_tail, a[2] * radius) for a in v]
+ if with_line:
+ edges = [(28, 12), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31), (0, 31)]
+ else:
+ edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31), (0, 31)]
+ mesh = obj.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+ return obj
+ else:
+ return None
+
+
+def create_sphere_widget(rig, bone_name):
+ """ Creates a basic sphere widget, three pependicular overlapping circles.
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ verts = [(0.3535533845424652, 0.3535533845424652, 0.0), (0.4619397521018982, 0.19134171307086945, 0.0), (0.5, -2.1855694143368964e-08, 0.0), (0.4619397521018982, -0.19134175777435303, 0.0), (0.3535533845424652, -0.3535533845424652, 0.0), (0.19134174287319183, -0.4619397521018982, 0.0), (7.549790126404332e-08, -0.5, 0.0), (-0.1913416087627411, -0.46193981170654297, 0.0), (-0.35355329513549805, -0.35355350375175476, 0.0), (-0.4619397521018982, -0.19134178757667542, 0.0), (-0.5, 5.962440319251527e-09, 0.0), (-0.4619397222995758, 0.1913418024778366, 0.0), (-0.35355326533317566, 0.35355350375175476, 0.0), (-0.19134148955345154, 0.46193987131118774, 0.0), (3.2584136988589307e-07, 0.5, 0.0), (0.1913420855998993, 0.46193960309028625, 0.0), (7.450580596923828e-08, 0.46193960309028625, 0.19134199619293213), (5.9254205098113744e-08, 0.5, 2.323586443253589e-07), (4.470348358154297e-08, 0.46193987131118774, -0.1913415789604187), (2.9802322387695312e-08, 0.35355350375175476, -0.3535533547401428), (2.9802322387695312e-08, 0.19134178757667542, -0.46193981170654297), (5.960464477539063e-08, -1.1151834122813398e-08, -0.5000000596046448), (5.960464477539063e-08, -0.1913418024778366, -0.46193984150886536), (5.960464477539063e-08, -0.35355350375175476, -0.3535533845424652), (7.450580596923828e-08, -0.46193981170654297, -0.19134166836738586), (9.348272556053416e-08, -0.5, 1.624372103492533e-08), (1.043081283569336e-07, -0.4619397521018982, 0.19134168326854706), (1.1920928955078125e-07, -0.3535533845424652, 0.35355329513549805), (1.1920928955078125e-07, -0.19134174287319183, 0.46193966269493103), (1.1920928955078125e-07, -4.7414250303745575e-09, 0.49999991059303284), (1.1920928955078125e-07, 0.19134172797203064, 0.46193966269493103), (8.940696716308594e-08, 0.3535533845424652, 0.35355329513549805), (0.3535534739494324, 0.0, 0.35355329513549805), (0.1913418173789978, -2.9802322387695312e-08, 0.46193966269493103), (8.303572940349113e-08, -5.005858838558197e-08, 0.49999991059303284), (-0.19134165346622467, -5.960464477539063e-08, 0.46193966269493103), (-0.35355329513549805, -8.940696716308594e-08, 0.35355329513549805), (-0.46193963289260864, -5.960464477539063e-08, 0.19134168326854706), (-0.49999991059303284, -5.960464477539063e-08, 1.624372103492533e-08), (-0.4619397521018982, -2.9802322387695312e-08, -0.19134166836738586), (-0.3535534143447876, -2.9802322387695312e-08, -0.3535533845424652), (-0.19134171307086945, 0.0, -0.46193984150886536), (7.662531942287387e-08, 9.546055501630235e-09, -0.5000000596046448), (0.19134187698364258, 5.960464477539063e-08, -0.46193981170654297), (0.3535535931587219, 5.960464477539063e-08, -0.3535533547401428), (0.4619399905204773, 5.960464477539063e-08, -0.1913415789604187), (0.5000000596046448, 5.960464477539063e-08, 2.323586443253589e-07), (0.4619396924972534, 2.9802322387695312e-08, 0.19134199619293213)]
+ edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (0, 15), (16, 31), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31), (32, 33), (33, 34), (34, 35), (35, 36), (36, 37), (37, 38), (38, 39), (39, 40), (40, 41), (41, 42), (42, 43), (43, 44), (44, 45), (45, 46), (46, 47), (32, 47)]
+ mesh = obj.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+
+def create_limb_widget(rig, bone_name):
+ """ Creates a basic limb widget, a line that spans the length of the
+ bone, with a circle around the center.
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ verts = [(-1.1920928955078125e-07, 1.7881393432617188e-07, 0.0), (3.5762786865234375e-07, 1.0000004768371582, 0.0), (0.1767769455909729, 0.5000001192092896, 0.17677664756774902), (0.20786768198013306, 0.5000001192092896, 0.1388925313949585), (0.23097014427185059, 0.5000001192092896, 0.09567084908485413), (0.24519658088684082, 0.5000001192092896, 0.048772573471069336), (0.2500002384185791, 0.5000001192092896, -2.545945676502015e-09), (0.24519658088684082, 0.5000001192092896, -0.048772573471069336), (0.23097014427185059, 0.5000001192092896, -0.09567084908485413), (0.20786768198013306, 0.5000001192092896, -0.13889259099960327), (0.1767769455909729, 0.5000001192092896, -0.1767767071723938), (0.13889282941818237, 0.5000001192092896, -0.20786744356155396), (0.09567105770111084, 0.5000001192092896, -0.23096990585327148), (0.04877278208732605, 0.5000001192092896, -0.24519634246826172), (1.7279069197684294e-07, 0.5000000596046448, -0.25), (-0.0487724244594574, 0.5000001192092896, -0.24519634246826172), (-0.09567070007324219, 0.5000001192092896, -0.2309698462486267), (-0.13889241218566895, 0.5000001192092896, -0.20786738395690918), (-0.17677652835845947, 0.5000001192092896, -0.17677664756774902), (-0.20786726474761963, 0.5000001192092896, -0.13889244198799133), (-0.23096972703933716, 0.5000001192092896, -0.09567070007324219), (-0.24519610404968262, 0.5000001192092896, -0.04877239465713501), (-0.2499997615814209, 0.5000001192092896, 2.1997936983098043e-07), (-0.24519598484039307, 0.5000001192092896, 0.04877282679080963), (-0.23096948862075806, 0.5000001192092896, 0.09567108750343323), (-0.20786696672439575, 0.5000001192092896, 0.1388927698135376), (-0.1767762303352356, 0.5000001192092896, 0.17677688598632812), (-0.13889199495315552, 0.5000001192092896, 0.2078675627708435), (-0.09567028284072876, 0.5000001192092896, 0.23097002506256104), (-0.048771947622299194, 0.5000001192092896, 0.24519634246826172), (6.555903269145347e-07, 0.5000001192092896, 0.25), (0.04877324402332306, 0.5000001192092896, 0.24519622325897217), (0.09567153453826904, 0.5000001192092896, 0.23096966743469238), (0.13889318704605103, 0.5000001192092896, 0.20786714553833008)]
+ edges = [(0, 1), (2, 3), (4, 3), (5, 4), (5, 6), (6, 7), (8, 7), (8, 9), (10, 9), (10, 11), (11, 12), (13, 12), (14, 13), (14, 15), (16, 15), (16, 17), (17, 18), (19, 18), (19, 20), (21, 20), (21, 22), (22, 23), (24, 23), (25, 24), (25, 26), (27, 26), (27, 28), (29, 28), (29, 30), (30, 31), (32, 31), (32, 33), (2, 33)]
+ mesh = obj.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+
+def create_bone_widget(rig, bone_name):
+ """ Creates a basic bone widget, a simple obolisk-esk shape.
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ verts = [(0.04, 1.0, -0.04), (0.1, 0.0, -0.1), (-0.1, 0.0, -0.1), (-0.04, 1.0, -0.04), (0.04, 1.0, 0.04), (0.1, 0.0, 0.1), (-0.1, 0.0, 0.1), (-0.04, 1.0, 0.04)]
+ edges = [(1, 2), (0, 1), (0, 3), (2, 3), (4, 5), (5, 6), (6, 7), (4, 7), (1, 5), (0, 4), (2, 6), (3, 7)]
+ mesh = obj.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+
+def create_compass_widget(rig, bone_name):
+ """ Creates a compass-shaped widget.
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ verts = [(0.0, 1.2000000476837158, 0.0), (0.19509032368659973, 0.9807852506637573, 0.0), (0.3826834559440613, 0.9238795042037964, 0.0), (0.5555702447891235, 0.8314695954322815, 0.0), (0.7071067690849304, 0.7071067690849304, 0.0), (0.8314696550369263, 0.5555701851844788, 0.0), (0.9238795042037964, 0.3826834261417389, 0.0), (0.9807852506637573, 0.19509035348892212, 0.0), (1.2000000476837158, 7.549790126404332e-08, 0.0), (0.9807853102684021, -0.19509020447731018, 0.0), (0.9238795638084412, -0.38268327713012695, 0.0), (0.8314696550369263, -0.5555701851844788, 0.0), (0.7071067690849304, -0.7071067690849304, 0.0), (0.5555701851844788, -0.8314696550369263, 0.0), (0.38268327713012695, -0.9238796234130859, 0.0), (0.19509008526802063, -0.9807853102684021, 0.0), (-3.2584136988589307e-07, -1.2999999523162842, 0.0), (-0.19509072601795197, -0.9807851910591125, 0.0), (-0.3826838731765747, -0.9238793253898621, 0.0), (-0.5555707216262817, -0.8314692974090576, 0.0), (-0.7071072459220886, -0.707106351852417, 0.0), (-0.8314700126647949, -0.5555696487426758, 0.0), (-0.923879861831665, -0.3826826810836792, 0.0), (-0.9807854294776917, -0.1950894594192505, 0.0), (-1.2000000476837158, 9.655991561885457e-07, 0.0), (-0.980785071849823, 0.1950913518667221, 0.0), (-0.923879086971283, 0.38268446922302246, 0.0), (-0.831468939781189, 0.5555712580680847, 0.0), (-0.7071058750152588, 0.707107663154602, 0.0), (-0.5555691123008728, 0.8314703702926636, 0.0), (-0.38268208503723145, 0.9238801002502441, 0.0), (-0.19508881866931915, 0.9807855486869812, 0.0)]
+ edges = [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 14), (14, 15), (15, 16), (16, 17), (17, 18), (18, 19), (19, 20), (20, 21), (21, 22), (22, 23), (23, 24), (24, 25), (25, 26), (26, 27), (27, 28), (28, 29), (29, 30), (30, 31), (0, 31)]
+ mesh = obj.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+def create_root_widget(rig, bone_name):
+ """ Creates a widget for the root bone.
+ """
+ obj = create_widget(rig, bone_name)
+ if obj != None:
+ verts = [(0.7071067690849304, 0.7071067690849304, 0.0), (0.7071067690849304, -0.7071067690849304, 0.0), (-0.7071067690849304, 0.7071067690849304, 0.0), (-0.7071067690849304, -0.7071067690849304, 0.0), (0.8314696550369263, 0.5555701851844788, 0.0), (0.8314696550369263, -0.5555701851844788, 0.0), (-0.8314696550369263, 0.5555701851844788, 0.0), (-0.8314696550369263, -0.5555701851844788, 0.0), (0.9238795042037964, 0.3826834261417389, 0.0), (0.9238795042037964, -0.3826834261417389, 0.0), (-0.9238795042037964, 0.3826834261417389, 0.0), (-0.9238795042037964, -0.3826834261417389, 0.0), (0.9807852506637573, 0.19509035348892212, 0.0), (0.9807852506637573, -0.19509035348892212, 0.0), (-0.9807852506637573, 0.19509035348892212, 0.0), (-0.9807852506637573, -0.19509035348892212, 0.0), (0.19509197771549225, 0.9807849526405334, 0.0), (0.19509197771549225, -0.9807849526405334, 0.0), (-0.19509197771549225, 0.9807849526405334, 0.0), (-0.19509197771549225, -0.9807849526405334, 0.0), (0.3826850652694702, 0.9238788485527039, 0.0), (0.3826850652694702, -0.9238788485527039, 0.0), (-0.3826850652694702, 0.9238788485527039, 0.0), (-0.3826850652694702, -0.9238788485527039, 0.0), (0.5555717945098877, 0.8314685821533203, 0.0), (0.5555717945098877, -0.8314685821533203, 0.0), (-0.5555717945098877, 0.8314685821533203, 0.0), (-0.5555717945098877, -0.8314685821533203, 0.0), (0.19509197771549225, 1.2807848453521729, 0.0), (0.19509197771549225, -1.2807848453521729, 0.0), (-0.19509197771549225, 1.2807848453521729, 0.0), (-0.19509197771549225, -1.2807848453521729, 0.0), (1.280785322189331, 0.19509035348892212, 0.0), (1.280785322189331, -0.19509035348892212, 0.0), (-1.280785322189331, 0.19509035348892212, 0.0), (-1.280785322189331, -0.19509035348892212, 0.0), (0.3950919806957245, 1.2807848453521729, 0.0), (0.3950919806957245, -1.2807848453521729, 0.0), (-0.3950919806957245, 1.2807848453521729, 0.0), (-0.3950919806957245, -1.2807848453521729, 0.0), (1.280785322189331, 0.39509034156799316, 0.0), (1.280785322189331, -0.39509034156799316, 0.0), (-1.280785322189331, 0.39509034156799316, 0.0), (-1.280785322189331, -0.39509034156799316, 0.0), (0.0, 1.5807849168777466, 0.0), (0.0, -1.5807849168777466, 0.0), (1.5807852745056152, 0.0, 0.0), (-1.5807852745056152, 0.0, 0.0)]
+ edges = [(0, 4), (1, 5), (2, 6), (3, 7), (4, 8), (5, 9), (6, 10), (7, 11), (8, 12), (9, 13), (10, 14), (11, 15), (16, 20), (17, 21), (18, 22), (19, 23), (20, 24), (21, 25), (22, 26), (23, 27), (0, 24), (1, 25), (2, 26), (3, 27), (16, 28), (17, 29), (18, 30), (19, 31), (12, 32), (13, 33), (14, 34), (15, 35), (28, 36), (29, 37), (30, 38), (31, 39), (32, 40), (33, 41), (34, 42), (35, 43), (36, 44), (37, 45), (38, 44), (39, 45), (40, 46), (41, 46), (42, 47), (43, 47)]
+ mesh = obj.data
+ mesh.from_pydata(verts, edges, [])
+ mesh.update()
+
+
+#=============================================
+# Misc
+#=============================================
+
+def copy_attributes(a, b):
+ keys = dir(a)
+ for key in keys:
+ if not key.startswith("_") \
+ and not key.startswith("error_") \
+ and key != "group" \
+ and key != "is_valid" \
+ and key != "rna_type" \
+ and key != "bl_rna":
+ try:
+ setattr(b, key, getattr(a, key))
+ except AttributeError as e:
+ pass
+
+
+def get_rig_type(rig_type):
+ """ Fetches a rig module by name, and returns it.
+ """
+ #print("%s.%s.%s" % (__package__,RIG_DIR,rig_type))
+ submod = __import__(name="%s.%s.%s" % (MODULE_NAME, RIG_DIR, rig_type), fromlist=[rig_type])
+ imp.reload(submod)
+ return submod
+
+
+def connected_children_names(obj, bone_name):
+ """ Returns a list of bone names (in order) of the bones that form a single
+ connected chain starting with the given bone as a parent.
+ If there is a connected branch, the list stops there.
+ """
+ bone = obj.data.bones[bone_name]
+ names = []
+
+ while True:
+ connects = 0
+ con_name = ""
+
+ for child in bone.children:
+ if child.use_connect:
+ connects += 1
+ con_name = child.name
+
+ if connects == 1:
+ names += [con_name]
+ bone = obj.data.bones[con_name]
+ else:
+ break
+
+ return names
+
+
+def has_connected_children(bone):
+ """ Returns true/false whether a bone has connected children or not.
+ """
+ t = False
+ for b in bone.children:
+ t = t or b.use_connect
+ return t
+
+
+def get_layers(layers):
+ """ Does it's best to exctract a set of layers from any data thrown at it.
+ """
+ if type(layers) == int:
+ return [x == layers for x in range(0, 32)]
+ elif type(layers) == str:
+ s = layers.split(",")
+ l = []
+ for i in s:
+ try:
+ l += [int(float(i))]
+ except ValueError:
+ pass
+ return [x in l for x in range(0, 32)]
+ elif type(layers) == tuple or type(layers) == list:
+ return [x in layers for x in range(0, 32)]
+ else:
+ try:
+ list(layers)
+ except TypeError:
+ pass
+ else:
+ return [x in layers for x in range(0, 32)]
+
+
+def write_metarig(obj, layers=False, func_name="create_sample"):
+ '''
+ Write a metarig as a python script, this rig is to have all info needed for
+ generating the real rig with rigify.
+ '''
+ code = []
+
+ code.append("def %s(obj):" % func_name)
+ code.append(" # generated by rigify.utils.write_metarig")
+ bpy.ops.object.mode_set(mode='EDIT')
+ code.append(" bpy.ops.object.mode_set(mode='EDIT')")
+ code.append(" arm = obj.data")
+
+ arm = obj.data
+ # write parents first
+ bones = [(len(bone.parent_recursive), bone.name) for bone in arm.edit_bones]
+ bones.sort(key=lambda item: item[0])
+ bones = [item[1] for item in bones]
+
+ code.append("\n bones = {}\n")
+
+ for bone_name in bones:
+ bone = arm.edit_bones[bone_name]
+ code.append(" bone = arm.edit_bones.new('%s')" % bone.name)
+ code.append(" bone.head[:] = %.4f, %.4f, %.4f" % bone.head.to_tuple(4))
+ code.append(" bone.tail[:] = %.4f, %.4f, %.4f" % bone.tail.to_tuple(4))
+ code.append(" bone.roll = %.4f" % bone.roll)
+ code.append(" bone.use_connect = %s" % str(bone.use_connect))
+ if bone.parent:
+ code.append(" bone.parent = arm.edit_bones[bones['%s']]" % bone.parent.name)
+ code.append(" bones['%s'] = bone.name" % bone.name)
+
+ bpy.ops.object.mode_set(mode='OBJECT')
+ code.append("")
+ code.append(" bpy.ops.object.mode_set(mode='OBJECT')")
+
+ # Rig type and other pose properties
+ for bone_name in bones:
+ pbone = obj.pose.bones[bone_name]
+ pbone_written = False
+
+ code.append(" pbone = obj.pose.bones[bones['%s']]" % bone_name)
+ code.append(" pbone.rigify_type = '%s'" % pbone.rigify_type)
+ code.append(" pbone.lock_location = %s" % str(tuple(pbone.lock_location)))
+ code.append(" pbone.lock_rotation = %s" % str(tuple(pbone.lock_rotation)))
+ code.append(" pbone.lock_rotation_w = %s" % str(pbone.lock_rotation_w))
+ code.append(" pbone.lock_scale = %s" % str(tuple(pbone.lock_scale)))
+ code.append(" pbone.rotation_mode = '%s'" % str(pbone.rotation_mode))
+ if layers:
+ code.append(" pbone.bone.layers = %s" % str(list(pbone.bone.layers)))
+ # Rig type parameters
+ if len(pbone.rigify_parameters) > 0:
+ code.append(" pbone.rigify_parameters.add()")
+ for param_name in pbone.rigify_parameters[0].keys():
+ param = getattr(pbone.rigify_parameters[0], param_name)
+ if str(type(param)) == "<class 'bpy_prop_array'>":
+ param = list(param)
+ code.append(" try:")
+ code.append(" pbone.rigify_parameters[0].%s = %s" % (param_name, str(param)))
+ code.append(" except AttributeError:")
+ code.append(" pass")
+
+ code.append("\n bpy.ops.object.mode_set(mode='EDIT')")
+ code.append(" for bone in arm.edit_bones:")
+ code.append(" bone.select = False")
+ code.append(" bone.select_head = False")
+ code.append(" bone.select_tail = False")
+
+ code.append(" for b in bones:")
+ code.append(" bone = arm.edit_bones[bones[b]]")
+ code.append(" bone.select = True")
+ code.append(" bone.select_head = True")
+ code.append(" bone.select_tail = True")
+ code.append(" arm.edit_bones.active = bone")
+
+ return "\n".join(code)
+
+
+def random_id(length = 8):
+ """ Generates a random alphanumeric id string.
+ """
+ tlength = int(length / 2)
+ rlength = int(length / 2) + int(length % 2)
+
+ chars = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
+ text = ""
+ for i in range(0, rlength):
+ text += random.choice(chars)
+ text += str(hex(int(time.time())))[2:][-tlength:].rjust(tlength, '0')[::-1]
+ return text
+
diff --git a/space_view3d_3d_navigation.py b/space_view3d_3d_navigation.py
new file mode 100644
index 00000000..ba3ba292
--- /dev/null
+++ b/space_view3d_3d_navigation.py
@@ -0,0 +1,101 @@
+# 3D NAVIGATION TOOLBAR v1.2 - 3Dview Addon - Blender 2.5x
+#
+# THIS SCRIPT IS LICENSED UNDER GPL,
+# please read the license block.
+#
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "3D Navigation",
+ "author": "Demohero, uriel",
+ "version": (1, 2),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Tool Shelf > 3D Nav",
+ "description": "Navigate the Camera & 3D View from the Toolshelf",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/3D_interaction/3D_Navigation",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=23530",
+ "category": "3D View"}
+
+# import the basic library
+import bpy
+
+# main class of this toolbar
+class VIEW3D_PT_3dnavigationPanel(bpy.types.Panel):
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "TOOLS"
+ bl_label = "3D Nav"
+
+ def draw(self, context):
+ layout = self.layout
+ view = context.space_data
+
+# Triple boutons
+ col = layout.column(align=True)
+ row = col.row()
+ row.operator("view3d.viewnumpad", text="View Camera", icon='CAMERA_DATA').type='CAMERA'
+ row = col.row()
+ row.operator("view3d.localview", text="View Global/Local")
+ row = col.row()
+ row.operator("view3d.view_persportho", text="View Persp/Ortho")
+
+# group of 6 buttons
+ col = layout.column(align=True)
+ col.label(text="Align view from:")
+ row = col.row()
+ row.operator("view3d.viewnumpad", text="Front").type='FRONT'
+ row.operator("view3d.viewnumpad", text="Back").type='BACK'
+ row = col.row()
+ row.operator("view3d.viewnumpad", text="Left").type='LEFT'
+ row.operator("view3d.viewnumpad", text="Right").type='RIGHT'
+ row = col.row()
+ row.operator("view3d.viewnumpad", text="Top").type='TOP'
+ row.operator("view3d.viewnumpad", text="Bottom").type='BOTTOM'
+ row = col.row()
+
+# group of 2 buttons
+ col = layout.column(align=True)
+ col.label(text="View to Object:")
+ col.prop(view, "lock_object", text="")
+ row = col.row()
+ row.operator("view3d.view_selected", text="View to Selected")
+ col = layout.column(align=True)
+ col.label(text="Cursor:")
+ row = col.row()
+ row.operator("view3d.snap_cursor_to_center", text="Center")
+ row.operator("view3d.view_center_cursor", text="View")
+ row = col.row()
+ row.operator("view3d.snap_cursor_to_selected", text="Cursor to Selected")
+
+# register the class
+def register():
+ bpy.utils.register_module(__name__)
+
+ pass
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/space_view3d_align_tools.py b/space_view3d_align_tools.py
new file mode 100644
index 00000000..11ff8b98
--- /dev/null
+++ b/space_view3d_align_tools.py
@@ -0,0 +1,342 @@
+# AlingTools.py (c) 2009, 2010 Gabriel Beaudin (gabhead)
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+bl_info = {
+ "name": "Align Tools",
+ "author": "Gabriel Beaudin (gabhead)",
+ "version": (0,1),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Tool Shelf > Align Tools Panel",
+ "description": "Align Selected Objects to Active Object",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/3D interaction/Align_Tools",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid==22389",
+ "category": "3D View"}
+
+"""Align Selected Objects"""
+
+import bpy
+
+
+class AlignUi(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'TOOLS'
+
+ bl_label = "Align Tools"
+ bl_context = "objectmode"
+
+ def draw(self, context):
+ layout = self.layout
+ obj = context.object
+
+ if obj != None:
+ row = layout.row()
+ row.label(text="Active object is: ", icon='OBJECT_DATA')
+ row = layout.row()
+ row.label(obj.name, icon='EDITMODE_HLT')
+
+ box = layout.separator()
+
+ col = layout.column()
+ col.label(text="Align Loc + Rot:", icon='MANIPUL')
+
+
+ col = layout.column(align=False)
+ col.operator("object.align",text="XYZ")
+
+ col = layout.column()
+ col.label(text="Align Location:", icon='MAN_TRANS')
+
+ col = layout.column_flow(columns=5,align=True)
+ col.operator("object.align_location_x",text="X")
+ col.operator("object.align_location_y",text="Y")
+ col.operator("object.align_location_z",text="Z")
+ col.operator("object.align_location_all",text="All")
+
+ col = layout.column()
+ col.label(text="Align Rotation:", icon='MAN_ROT')
+
+ col = layout.column_flow(columns=5,align=True)
+ col.operator("object.align_rotation_x",text="X")
+ col.operator("object.align_rotation_y",text="Y")
+ col.operator("object.align_rotation_z",text="Z")
+ col.operator("object.align_rotation_all",text="All")
+
+ col = layout.column()
+ col.label(text="Align Scale:", icon='MAN_SCALE')
+
+ col = layout.column_flow(columns=5,align=True)
+ col.operator("object.align_objects_scale_x",text="X")
+ col.operator("object.align_objects_scale_y",text="Y")
+ col.operator("object.align_objects_scale_z",text="Z")
+ col.operator("object.align_objects_scale_all",text="All")
+
+
+##Align all
+def main(context):
+ for i in bpy.context.selected_objects:
+ i.location = bpy.context.active_object.location
+ i.rotation_euler = bpy.context.active_object.rotation_euler
+
+## Align Location
+
+def LocAll(context):
+ for i in bpy.context.selected_objects:
+ i.location = bpy.context.active_object.location
+
+def LocX(context):
+ for i in bpy.context.selected_objects:
+ i.location.x = bpy.context.active_object.location.x
+
+def LocY(context):
+ for i in bpy.context.selected_objects:
+ i.location.y = bpy.context.active_object.location.y
+
+def LocZ(context):
+ for i in bpy.context.selected_objects:
+ i.location.z = bpy.context.active_object.location.z
+
+## Aling Rotation
+def RotAll(context):
+ for i in bpy.context.selected_objects:
+ i.rotation_euler = bpy.context.active_object.rotation_euler
+
+def RotX(context):
+ for i in bpy.context.selected_objects:
+ i.rotation_euler.x = bpy.context.active_object.rotation_euler.x
+
+def RotY(context):
+ for i in bpy.context.selected_objects:
+ i.rotation_euler.y = bpy.context.active_object.rotation_euler.y
+
+def RotZ(context):
+ for i in bpy.context.selected_objects:
+ i.rotation_euler.z = bpy.context.active_object.rotation_euler.z
+## Aling Scale
+def ScaleAll(context):
+ for i in bpy.context.selected_objects:
+ i.scale = bpy.context.active_object.scale
+
+def ScaleX(context):
+ for i in bpy.context.selected_objects:
+ i.scale.x = bpy.context.active_object.scale.x
+
+def ScaleY(context):
+ for i in bpy.context.selected_objects:
+ i.scale.y = bpy.context.active_object.scale.y
+
+def ScaleZ(context):
+ for i in bpy.context.selected_objects:
+ i.scale.z = bpy.context.active_object.scale.z
+
+## Classes
+
+## Align All Rotation And Location
+class AlignOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align"
+ bl_label = "Align Selected To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ main(context)
+ return {'FINISHED'}
+
+#######################Align Location########################
+## Align LocationAll
+class AlignLocationOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_location_all"
+ bl_label = "Align Selected Location To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ LocAll(context)
+ return {'FINISHED'}
+## Align LocationX
+class AlignLocationXOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_location_x"
+ bl_label = "Align Selected Location X To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ LocX(context)
+ return {'FINISHED'}
+## Align LocationY
+class AlignLocationYOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_location_y"
+ bl_label = "Align Selected Location Y To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ LocY(context)
+ return {'FINISHED'}
+## Align LocationZ
+class AlignLocationZOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_location_z"
+ bl_label = "Align Selected Location Z To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ LocZ(context)
+ return {'FINISHED'}
+
+#######################Align Rotation########################
+## Align RotationAll
+class AlignRotationOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_rotation_all"
+ bl_label = "Align Selected Rotation To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ RotAll(context)
+ return {'FINISHED'}
+## Align RotationX
+class AlignRotationXOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_rotation_x"
+ bl_label = "Align Selected Rotation X To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ RotX(context)
+ return {'FINISHED'}
+## Align RotationY
+class AlignRotationYOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_rotation_y"
+ bl_label = "Align Selected Rotation Y To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ RotY(context)
+ return {'FINISHED'}
+## Align RotationZ
+class AlignRotationZOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_rotation_z"
+ bl_label = "Align Selected Rotation Z To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ RotZ(context)
+ return {'FINISHED'}
+#######################Align Scale########################
+## Scale All
+class AlignScaleOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_objects_scale_all"
+ bl_label = "Align Selected Scale To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ ScaleAll(context)
+ return {'FINISHED'}
+## Align ScaleX
+class AlignScaleXOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_objects_scale_x"
+ bl_label = "Align Selected Scale X To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ ScaleX(context)
+ return {'FINISHED'}
+## Align ScaleY
+class AlignScaleYOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_objects_scale_y"
+ bl_label = "Align Selected Scale Y To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ ScaleY(context)
+ return {'FINISHED'}
+## Align ScaleZ
+class AlignScaleZOperator(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.align_objects_scale_z"
+ bl_label = "Align Selected Scale Z To Active"
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ ScaleZ(context)
+ return {'FINISHED'}
+
+## registring
+def register():
+ bpy.utils.register_module(__name__)
+
+ pass
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/space_view3d_copy_attributes.py b/space_view3d_copy_attributes.py
new file mode 100644
index 00000000..3a35ed1f
--- /dev/null
+++ b/space_view3d_copy_attributes.py
@@ -0,0 +1,817 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ 'name': 'Copy Attributes Menu',
+ 'author': 'Bassam Kurdali, Fabian Fricke, wiseman303',
+ 'version': (0, 4, 4),
+ "blender": (2, 5, 7),
+ "api": 36695,
+ 'location': 'View3D > Ctrl-C',
+ 'description': 'Copy Attributes Menu from Blender 2.4',
+ 'wiki_url': 'http://wiki.blender.org/index.php/Extensions:2.5/Py/'\
+ 'Scripts/3D_interaction/Copy_Attributes_Menu',
+ 'tracker_url': 'https://projects.blender.org/tracker/index.php?'\
+ 'func=detail&aid=22588',
+ 'category': '3D View'}
+
+import bpy
+from mathutils import Matrix, Vector
+
+
+def build_exec(loopfunc, func):
+ '''Generator function that returns exec functions for operators '''
+
+ def exec_func(self, context):
+ loopfunc(self, context, func)
+ return {'FINISHED'}
+ return exec_func
+
+
+def build_invoke(loopfunc, func):
+ '''Generator function that returns invoke functions for operators'''
+
+ def invoke_func(self, context, event):
+ loopfunc(self, context, func)
+ return {'FINISHED'}
+ return invoke_func
+
+
+def build_op(idname, label, description, fpoll, fexec, finvoke):
+ '''Generator function that returns the basic operator'''
+
+ class myopic(bpy.types.Operator):
+ bl_idname = idname
+ bl_label = label
+ bl_description = description
+ execute = fexec
+ poll = fpoll
+ invoke = finvoke
+ return myopic
+
+
+def genops(copylist, oplist, prefix, poll_func, loopfunc):
+ '''Generate ops from the copy list and its associated functions '''
+ for op in copylist:
+ exec_func = build_exec(loopfunc, op[3])
+ invoke_func = build_invoke(loopfunc, op[3])
+ opclass = build_op(prefix + op[0], "Copy " + op[1], op[2],
+ poll_func, exec_func, invoke_func)
+ oplist.append(opclass)
+
+
+def generic_copy(source, target, string=""):
+ ''' copy attributes from source to target that have string in them '''
+ for attr in dir(source):
+ if attr.find(string) > -1:
+ try:
+ setattr(target, attr, getattr(source, attr))
+ except:
+ pass
+ return
+
+
+def getmat(bone, active, context, ignoreparent):
+ '''Helper function for visual transform copy,
+ gets the active transform in bone space
+ '''
+ data_bone = context.active_object.data.bones[bone.name]
+ #all matrices are in armature space unless commented otherwise
+ otherloc = active.matrix # final 4x4 mat of target, location.
+ bonemat_local = Matrix(data_bone.matrix_local) # self rest matrix
+ if data_bone.parent:
+ parentposemat = Matrix(
+ context.active_object.pose.bones[data_bone.parent.name].matrix)
+ parentbonemat = Matrix(data_bone.parent.matrix_local)
+ else:
+ parentposemat = bonemat_local.copy()
+ parentbonemat = bonemat_local.copy()
+
+ # FIXME! why copy from the parent if setting identity ?, Campbell
+ parentposemat.identity()
+ parentbonemat.identity()
+
+ if parentbonemat == parentposemat or ignoreparent:
+ newmat = bonemat_local.inverted() * otherloc
+ else:
+ bonemat = parentbonemat.inverted() * bonemat_local
+
+ newmat = bonemat.inverted() * parentposemat.inverted() * otherloc
+ return newmat
+
+
+def rotcopy(item, mat):
+ '''copy rotation to item from matrix mat depending on item.rotation_mode'''
+ if item.rotation_mode == 'QUATERNION':
+ item.rotation_quaternion = mat.to_3x3().to_quaternion()
+ elif item.rotation_mode == 'AXIS_ANGLE':
+ quat = mat.to_3x3().to_quaternion()
+ item.rotation_axis_angle = Vector([quat.axis[0],
+ quat.axis[1], quat.axis[2], quat.angle])
+ else:
+ item.rotation_euler = mat.to_3x3().to_euler(item.rotation_mode)
+
+
+def pLoopExec(self, context, funk):
+ '''Loop over selected bones and execute funk on them'''
+ active = context.active_pose_bone
+ selected = context.selected_pose_bones
+ selected.remove(active)
+ for bone in selected:
+ funk(bone, active, context)
+
+#The following functions are used o copy attributes frome active to bone
+
+
+def pLocLocExec(bone, active, context):
+ bone.location = active.location
+
+
+def pLocRotExec(bone, active, context):
+ rotcopy(bone, active.matrix_basis.to_3x3())
+
+
+def pLocScaExec(bone, active, context):
+ bone.scale = active.scale
+
+
+def pVisLocExec(bone, active, context):
+ bone.location = getmat(bone, active, context, False).to_translation()
+
+
+def pVisRotExec(bone, active, context):
+ rotcopy(bone, getmat(bone, active,
+ context, not context.active_object.data.bones[bone.name].use_inherit_rotation))
+
+
+def pVisScaExec(bone, active, context):
+ bone.scale = getmat(bone, active, context,
+ not context.active_object.data.bones[bone.name].use_inherit_scale)\
+ .to_scale()
+
+
+def pDrwExec(bone, active, context):
+ bone.custom_shape = active.custom_shape
+
+
+def pLokExec(bone, active, context):
+ for index, state in enumerate(active.lock_location):
+ bone.lock_location[index] = state
+ for index, state in enumerate(active.lock_rotation):
+ bone.lock_rotation[index] = state
+ bone.lock_rotations_4d = active.lock_rotations_4d
+ bone.lock_rotation_w = active.lock_rotation_w
+ for index, state in enumerate(active.lock_scale):
+ bone.lock_scale[index] = state
+
+
+def pConExec(bone, active, context):
+ for old_constraint in active.constraints.values():
+ new_constraint = bone.constraints.new(old_constraint.type)
+ generic_copy(old_constraint, new_constraint)
+
+
+def pIKsExec(bone, active, context):
+ generic_copy(active, bone, "ik_")
+
+pose_copies = (('pose_loc_loc', "Local Location",
+ "Copy Location from Active to Selected", pLocLocExec),
+ ('pose_loc_rot', "Local Rotation",
+ "Copy Rotation from Active to Selected", pLocRotExec),
+ ('pose_loc_sca', "Local Scale",
+ "Copy Scale from Active to Selected", pLocScaExec),
+ ('pose_vis_loc', "Visual Location",
+ "Copy Location from Active to Selected", pVisLocExec),
+ ('pose_vis_rot', "Visual Rotation",
+ "Copy Rotation from Active to Selected", pVisRotExec),
+ ('pose_vis_sca', "Visual Scale",
+ "Copy Scale from Active to Selected", pVisScaExec),
+ ('pose_drw', "Bone Shape",
+ "Copy Bone Shape from Active to Selected", pDrwExec),
+ ('pose_lok', "Protected Transform",
+ "Copy Protected Tranforms from Active to Selected", pLokExec),
+ ('pose_con', "Bone Constraints",
+ "Copy Object Constraints from Active to Selected", pConExec),
+ ('pose_iks', "IK Limits",
+ "Copy IK Limits from Active to Selected", pIKsExec))
+
+
+@classmethod
+def pose_poll_func(cls, context):
+ return(context.mode == 'POSE')
+
+
+def pose_invoke_func(self, context, event):
+ wm = context.window_manager
+ wm.invoke_props_dialog(self)
+ return {'RUNNING_MODAL'}
+
+
+class CopySelectedPoseConstraints(bpy.types.Operator):
+ ''' Copy Chosen constraints from active to selected'''
+ bl_idname = "pose.copy_selected_constraints"
+ bl_label = "Copy Selected Constraints"
+ selection = bpy.props.BoolVectorProperty(size=32)
+
+ poll = pose_poll_func
+ invoke = pose_invoke_func
+
+ def draw(self, context):
+ layout = self.layout
+ for idx, const in enumerate(context.active_pose_bone.constraints):
+ layout.prop(self, "selection", index=idx, text=const.name,
+ toggle=True)
+
+ def execute(self, context):
+ active = context.active_pose_bone
+ selected = context.selected_pose_bones[:]
+ selected.remove(active)
+ for bone in selected:
+ for index, flag in enumerate(self.selection):
+ if flag:
+ old_constraint = active.constraints[index]
+ new_constraint = bone.constraints.new(\
+ active.constraints[index].type)
+ generic_copy(old_constraint, new_constraint)
+ return {'FINISHED'}
+
+pose_ops = [] # list of pose mode copy operators
+
+genops(pose_copies, pose_ops, "pose.copy_", pose_poll_func, pLoopExec)
+
+
+class VIEW3D_MT_posecopypopup(bpy.types.Menu):
+ bl_label = "Copy Attributes"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ for op in pose_copies:
+ layout.operator("pose.copy_" + op[0])
+ layout.operator("pose.copy_selected_constraints")
+ layout.operator("pose.copy", text="copy pose")
+
+
+def obLoopExec(self, context, funk):
+ '''Loop over selected objects and execute funk on them'''
+ active = context.active_object
+ selected = context.selected_objects[:]
+ selected.remove(active)
+ for obj in selected:
+ msg = funk(obj, active, context)
+ if msg:
+ self.report({msg[0]}, msg[1])
+
+
+def world_to_basis(active, ob, context):
+ '''put world coords of active as basis coords of ob'''
+ local = ob.parent.matrix_world.inverted() * active.matrix_world
+ P = ob.matrix_basis * ob.matrix_local.inverted()
+ mat = P * local
+ return(mat)
+
+#The following functions are used o copy attributes from
+#active to selected object
+
+
+def obLoc(ob, active, context):
+ ob.location = active.location
+
+
+def obRot(ob, active, context):
+ rotcopy(ob, active.matrix_local.to_3x3())
+
+
+def obSca(ob, active, context):
+ ob.scale = active.scale
+
+
+def obVisLoc(ob, active, context):
+ if ob.parent:
+ mat = world_to_basis(active, ob, context)
+ ob.location = mat.to_translation()
+ else:
+ ob.location = active.matrix_world.to_translation()
+
+
+def obVisRot(ob, active, context):
+ if ob.parent:
+ mat = world_to_basis(active, ob, context)
+ rotcopy(ob, mat.to_3x3())
+ else:
+ rotcopy(ob, active.matrix_world.to_3x3())
+
+
+def obVisSca(ob, active, context):
+ if ob.parent:
+ mat = world_to_basis(active, ob, context)
+ ob.scale = mat.to_scale()
+ else:
+ ob.scale = active.matrix_world.to_scale()
+
+
+def obDrw(ob, active, context):
+ ob.draw_type = active.draw_type
+ ob.show_axis = active.show_axis
+ ob.show_bounds = active.show_bounds
+ ob.draw_bounds_type = active.draw_bounds_type
+ ob.show_name = active.show_name
+ ob.show_texture_space = active.show_texture_space
+ ob.show_transparent = active.show_transparent
+ ob.show_wire = active.show_wire
+ ob.show_x_ray = active.show_x_ray
+ ob.empty_draw_type = active.empty_draw_type
+ ob.empty_draw_size = active.empty_draw_size
+
+
+def obOfs(ob, active, context):
+ ob.time_offset = active.time_offset
+ return('INFO', "time offset copied")
+
+
+def obDup(ob, active, context):
+ generic_copy(active, ob, "dupli")
+ return('INFO', "duplication method copied")
+
+
+def obCol(ob, active, context):
+ ob.color = active.color
+
+
+def obMas(ob, active, context):
+ ob.game.mass = active.game.mass
+ return('INFO', "mass copied")
+
+
+def obLok(ob, active, context):
+ for index, state in enumerate(active.lock_location):
+ ob.lock_location[index] = state
+ for index, state in enumerate(active.lock_rotation):
+ ob.lock_rotation[index] = state
+ for index, state in enumerate(active.lock_rotations_4d):
+ ob.lock_rotations_4d[index] = state
+ ob.lock_rotation_w = active.lock_rotation_w
+ for index, state in enumerate(active.lock_scale):
+ ob.lock_scale[index] = state
+ return('INFO', "transform locks copied")
+
+
+def obCon(ob, active, context):
+ #for consistency with 2.49, delete old constraints first
+ for removeconst in ob.constraints:
+ ob.constraints.remove(removeconst)
+ for old_constraint in active.constraints.values():
+ new_constraint = ob.constraints.new(old_constraint.type)
+ generic_copy(old_constraint, new_constraint)
+ return('INFO', "constraints copied")
+
+
+def obTex(ob, active, context):
+ if 'texspace_location' in dir(ob.data) and 'texspace_location' in dir(
+ active.data):
+ ob.data.texspace_location[:] = active.data.texspace_location[:]
+ if 'texspace_size' in dir(ob.data) and 'texspace_size' in dir(active.data):
+ ob.data.texspace_size[:] = active.data.texspace_size[:]
+ return('INFO', "texture space copied")
+
+
+def obIdx(ob, active, context):
+ ob.pass_index = active.pass_index
+ return('INFO', "pass index copied")
+
+
+def obMod(ob, active, context):
+ for modifier in ob.modifiers:
+ #remove existing before adding new:
+ ob.modifiers.remove(modifier)
+ for old_modifier in active.modifiers.values():
+ new_modifier = ob.modifiers.new(name=old_modifier.name,
+ type=old_modifier.type)
+ generic_copy(old_modifier, new_modifier)
+ return('INFO', "modifiers copied")
+
+
+def obWei(ob, active, context):
+ me_source = active.data
+ me_target = ob.data
+ # sanity check: do source and target have the same amount of verts?
+ if len(me_source.vertices) != len(me_target.vertices):
+ return('ERROR', "objects have different vertex counts, doing nothing")
+ vgroups_IndexName = {}
+ for i in range(0, len(active.vertex_groups)):
+ groups = active.vertex_groups[i]
+ vgroups_IndexName[groups.index] = groups.name
+ data = {} # vert_indices, [(vgroup_index, weights)]
+ for v in me_source.vertices:
+ vg = v.groups
+ vi = v.index
+ if len(vg) > 0:
+ vgroup_collect = []
+ for i in range(0, len(vg)):
+ vgroup_collect.append((vg[i].group, vg[i].weight))
+ data[vi] = vgroup_collect
+ # write data to target
+ if ob != active:
+ # add missing vertex groups
+ for vgroup_name in vgroups_IndexName.values():
+ #check if group already exists...
+ already_present = 0
+ for i in range(0, len(ob.vertex_groups)):
+ if ob.vertex_groups[i].name == vgroup_name:
+ already_present = 1
+ # ... if not, then add
+ if already_present == 0:
+ ob.vertex_groups.new(name=vgroup_name)
+ # write weights
+ for v in me_target.vertices:
+ for vi_source, vgroupIndex_weight in data.items():
+ if v.index == vi_source:
+
+ for i in range(0, len(vgroupIndex_weight)):
+ groupName = vgroups_IndexName[vgroupIndex_weight[i][0]]
+ groups = ob.vertex_groups
+ for vgs in range(0, len(groups)):
+ if groups[vgs].name == groupName:
+ groups[vgs].add((v.index,),
+ vgroupIndex_weight[i][1], "REPLACE")
+ return('INFO', "weights copied")
+
+object_copies = (('obj_loc', "Location",
+ "Copy Location from Active to Selected", obLoc),
+ ('obj_rot', "Rotation",
+ "Copy Rotation from Active to Selected", obRot),
+ ('obj_sca', "Scale",
+ "Copy Scale from Active to Selected", obSca),
+ ('obj_vis_loc', "Visual Location",
+ "Copy Visual Location from Active to Selected", obVisLoc),
+ ('obj_vis_rot', "Visual Rotation",
+ "Copy Visual Rotation from Active to Selected", obVisRot),
+ ('obj_vis_sca', "Visual Scale",
+ "Copy Visual Scale from Active to Selected", obVisSca),
+ ('obj_drw', "Draw Options",
+ "Copy Draw Options from Active to Selected", obDrw),
+ ('obj_ofs', "Time Offset",
+ "Copy Time Offset from Active to Selected", obOfs),
+ ('obj_dup', "Dupli",
+ "Copy Dupli from Active to Selected", obDup),
+ ('obj_col', "Object Color",
+ "Copy Object Color from Active to Selected", obCol),
+ ('obj_mas', "Mass",
+ "Copy Mass from Active to Selected", obMas),
+ #('obj_dmp', "Damping",
+ #"Copy Damping from Active to Selected"),
+ #('obj_all', "All Physical Attributes",
+ #"Copy Physical Atributes from Active to Selected"),
+ #('obj_prp', "Properties",
+ #"Copy Properties from Active to Selected"),
+ #('obj_log', "Logic Bricks",
+ #"Copy Logic Bricks from Active to Selected"),
+ ('obj_lok', "Protected Transform",
+ "Copy Protected Tranforms from Active to Selected", obLok),
+ ('obj_con', "Object Constraints",
+ "Copy Object Constraints from Active to Selected", obCon),
+ #('obj_nla', "NLA Strips",
+ #"Copy NLA Strips from Active to Selected"),
+ #('obj_tex', "Texture Space",
+ #"Copy Texture Space from Active to Selected", obTex),
+ #('obj_sub', "Subsurf Settings",
+ #"Copy Subsurf Setings from Active to Selected"),
+ #('obj_smo', "AutoSmooth",
+ #"Copy AutoSmooth from Active to Selected"),
+ ('obj_idx', "Pass Index",
+ "Copy Pass Index from Active to Selected", obIdx),
+ ('obj_mod', "Modifiers",
+ "Copy Modifiers from Active to Selected", obMod),
+ ('obj_wei', "Vertex Weights",
+ "Copy vertex weights based on indices", obWei))
+
+
+@classmethod
+def object_poll_func(cls, context):
+ return(len(context.selected_objects) > 1)
+
+
+def object_invoke_func(self, context, event):
+ wm = context.window_manager
+ wm.invoke_props_dialog(self)
+ return {'RUNNING_MODAL'}
+
+
+class CopySelectedObjectConstraints(bpy.types.Operator):
+ ''' Copy Chosen constraints from active to selected'''
+ bl_idname = "object.copy_selected_constraints"
+ bl_label = "Copy Selected Constraints"
+ selection = bpy.props.BoolVectorProperty(size=32)
+
+ poll = object_poll_func
+
+ invoke = object_invoke_func
+
+ def draw(self, context):
+ layout = self.layout
+ for idx, const in enumerate(context.active_object.constraints):
+ layout.prop(self, "selection", index=idx, text=const.name,
+ toggle=True)
+
+ def execute(self, context):
+ active = context.active_object
+ selected = context.selected_objects[:]
+ selected.remove(active)
+ for obj in selected:
+ for index, flag in enumerate(self.selection):
+ if flag:
+ old_constraint = active.constraints[index]
+ new_constraint = obj.constraints.new(\
+ active.constraints[index].type)
+ generic_copy(old_constraint, new_constraint)
+ return{'FINISHED'}
+
+
+class CopySelectedObjectModifiers(bpy.types.Operator):
+ ''' Copy Chosen modifiers from active to selected'''
+ bl_idname = "object.copy_selected_modifiers"
+ bl_label = "Copy Selected Modifiers"
+ selection = bpy.props.BoolVectorProperty(size=32)
+
+ poll = object_poll_func
+
+ invoke = object_invoke_func
+
+ def draw(self, context):
+ layout = self.layout
+ for idx, const in enumerate(context.active_object.modifiers):
+ layout.prop(self, 'selection', index=idx, text=const.name,
+ toggle=True)
+
+ def execute(self, context):
+ active = context.active_object
+ selected = context.selected_objects[:]
+ selected.remove(active)
+ for obj in selected:
+ for index, flag in enumerate(self.selection):
+ if flag:
+ old_modifier = active.modifiers[index]
+ new_modifier = obj.modifiers.new(\
+ type=active.modifiers[index].type,
+ name=active.modifiers[index].name)
+ generic_copy(old_modifier, new_modifier)
+ return{'FINISHED'}
+
+object_ops = []
+genops(object_copies, object_ops, "object.copy_", object_poll_func, obLoopExec)
+
+
+class VIEW3D_MT_copypopup(bpy.types.Menu):
+ bl_label = "Copy Attributes"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ for op in object_copies:
+ layout.operator("object.copy_" + op[0])
+ layout.operator("object.copy_selected_constraints")
+ layout.operator("object.copy_selected_modifiers")
+
+#Begin Mesh copy settings:
+
+
+class MESH_MT_CopyFaceSettings(bpy.types.Menu):
+ bl_label = "Copy Face Settings"
+
+ @classmethod
+ def poll(cls, context):
+ return context.mode == 'EDIT_MESH'
+
+ def draw(self, context):
+ mesh = context.object.data
+ uv = len(mesh.uv_textures) > 1
+ vc = len(mesh.vertex_colors) > 1
+ layout = self.layout
+
+ layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Material")['mode'] = 'MAT'
+ if mesh.uv_textures.active:
+ layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Mode")['mode'] = 'MODE'
+ layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Transp")['mode'] = 'TRANSP'
+ layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Image")['mode'] = 'IMAGE'
+ layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy UV Coords")['mode'] = 'UV'
+ if mesh.vertex_colors.active:
+ layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Vertex Colors")['mode'] = 'VCOL'
+ if uv or vc:
+ layout.separator()
+ if uv:
+ layout.menu("MESH_MT_CopyModeFromLayer")
+ layout.menu("MESH_MT_CopyTranspFromLayer")
+ layout.menu("MESH_MT_CopyImagesFromLayer")
+ layout.menu("MESH_MT_CopyUVCoordsFromLayer")
+ if vc:
+ layout.menu("MESH_MT_CopyVertexColorsFromLayer")
+
+
+def _buildmenu(self, mesh, mode):
+ layout = self.layout
+ if mode == 'VCOL':
+ layers = mesh.vertex_colors
+ else:
+ layers = mesh.uv_textures
+ for layer in layers:
+ if not layer.active:
+ op = layout.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text=layer.name)
+ op['layer'] = layer.name
+ op['mode'] = mode
+
+
+@classmethod
+def _poll_layer_uvs(cls, context):
+ return context.mode == "EDIT_MESH" and len(
+ context.object.data.uv_layers) > 1
+
+
+@classmethod
+def _poll_layer_vcols(cls, context):
+ return context.mode == "EDIT_MESH" and len(
+ context.object.data.vertex_colors) > 1
+
+
+def _build_draw(mode):
+ return (lambda self, context: _buildmenu(self, context.object.data, mode))
+
+_layer_menu_data = (("UV Coords", _build_draw("UV"), _poll_layer_uvs),
+ ("Images", _build_draw("IMAGE"), _poll_layer_uvs),
+ ("Mode", _build_draw("MODE"), _poll_layer_uvs),
+ ("Transp", _build_draw("TRANSP"), _poll_layer_uvs),
+ ("Vertex Colors", _build_draw("VCOL"), _poll_layer_vcols))
+_layer_menus = []
+for name, draw_func, poll_func in _layer_menu_data:
+ classname = "MESH_MT_Copy" + "".join(name.split()) + "FromLayer"
+ menuclass = type(classname, (bpy.types.Menu,),
+ dict(bl_label="Copy " + name + " from layer",
+ bl_idname=classname,
+ draw=draw_func,
+ poll=poll_func))
+ _layer_menus.append(menuclass)
+
+
+class MESH_OT_CopyFaceSettings(bpy.types.Operator):
+ """Copy settings from active face to all selected faces."""
+ bl_idname = 'mesh.copy_face_settings'
+ bl_label = "Copy Face Settings"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ mode = bpy.props.StringProperty(name="mode")
+ layer = bpy.props.StringProperty(name="layer")
+
+ @classmethod
+ def poll(cls, context):
+ return context.mode == 'EDIT_MESH'
+
+ def execute(self, context):
+ mesh = context.object.data
+ mode = getattr(self, 'mode', 'MODE')
+ layername = getattr(self, 'layer', None)
+
+ # Switching out of edit mode updates the selected state of faces and
+ # makes the data from the uv texture and vertex color layers available.
+ bpy.ops.object.editmode_toggle()
+
+ if mode == 'MAT':
+ from_data = mesh.faces
+ to_data = from_data
+ else:
+ if mode == 'VCOL':
+ layers = mesh.vertex_colors
+ act_layer = mesh.vertex_colors.active
+ else:
+ layers = mesh.uv_textures
+ act_layer = mesh.uv_textures.active
+ if not layers or (layername and not layername in layers):
+ return _end({'CANCELLED'})
+ from_data = layers[layername or act_layer.name].data
+ to_data = act_layer.data
+ from_face = from_data[mesh.faces.active]
+
+ for f in mesh.faces:
+ if f.select:
+ if to_data != from_data:
+ from_face = from_data[f.index]
+ if mode == 'MAT':
+ f.material_index = from_face.material_index
+ continue
+ to_face = to_data[f.index]
+ if to_face is from_face:
+ continue
+ if mode == 'VCOL':
+ to_face.color1 = from_face.color1
+ to_face.color2 = from_face.color2
+ to_face.color3 = from_face.color3
+ to_face.color4 = from_face.color4
+ elif mode == 'MODE':
+ to_face.use_alpha_sort = from_face.use_alpha_sort
+ to_face.use_billboard = from_face.use_billboard
+ to_face.use_collision = from_face.use_collision
+ to_face.use_halo = from_face.use_halo
+ to_face.hide = from_face.hide
+ to_face.use_light = from_face.use_light
+ to_face.use_object_color = from_face.use_object_color
+ to_face.use_shadow_cast = from_face.use_shadow_cast
+ to_face.use_blend_shared = from_face.use_blend_shared
+ to_face.use_image = from_face.use_image
+ to_face.use_bitmap_text = from_face.use_bitmap_text
+ to_face.use_twoside = from_face.use_twoside
+ elif mode == 'TRANSP':
+ to_face.blend_type = from_face.blend_type
+ elif mode in ('UV', 'IMAGE'):
+ attr = mode.lower()
+ setattr(to_face, attr, getattr(from_face, attr))
+ return _end({'FINISHED'})
+
+
+def _end(retval):
+ # Clean up by returning to edit mode like it was before.
+ bpy.ops.object.editmode_toggle()
+ return(retval)
+
+
+def _add_tface_buttons(self, context):
+ row = self.layout.row()
+ row.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Mode")['mode'] = 'MODE'
+ row.operator(MESH_OT_CopyFaceSettings.bl_idname,
+ text="Copy Transp")['mode'] = 'TRANSP'
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ ''' mostly to get the keymap working '''
+ kc = bpy.context.window_manager.keyconfigs['Blender']
+ km = kc.keymaps.get("Object Mode")
+ if km is None:
+ km = kc.keymaps.new(name="Object Mode")
+ kmi = km.keymap_items.new('wm.call_menu', 'C', 'PRESS', ctrl=True)
+ kmi.properties.name = 'VIEW3D_MT_copypopup'
+ km = kc.keymaps.get("Pose")
+ if km is None:
+ km = kc.keymaps.new(name="Pose")
+
+ kmi = km.keymap_items.get("pose.copy")
+ if kmi is not None:
+ kmi.idname = 'wm.call_menu'
+ else:
+ kmi = km.keymap_items.new('wm.call_menu', 'C', 'PRESS', ctrl=True)
+ kmi.properties.name = 'VIEW3D_MT_posecopypopup'
+ for menu in _layer_menus:
+ bpy.utils.register_class(menu)
+ bpy.types.DATA_PT_texface.append(_add_tface_buttons)
+ km = kc.keymaps.get("Mesh")
+ if km is None:
+ km = kc.keymaps.new(name="Mesh")
+ kmi = km.keymap_items.new('wm.call_menu', 'C', 'PRESS')
+ kmi.ctrl = True
+ kmi.properties.name = 'MESH_MT_CopyFaceSettings'
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ ''' mostly to remove the keymap '''
+ kms = bpy.context.window_manager.keyconfigs['Blender'].keymaps['Pose']
+ for item in kms.keymap_items:
+ if item.name == 'Call Menu' and item.idname == 'wm.call_menu' and \
+ item.properties.name == 'VIEW3D_MT_posecopypopup':
+ item.idname = 'pose.copy'
+ break
+ for menu in _layer_menus:
+ bpy.utils.unregister_class(menu)
+ bpy.types.DATA_PT_texface.remove(_add_tface_buttons)
+ km = bpy.context.window_manager.keyconfigs.active.keymaps['Mesh']
+ for kmi in km.keymap_items:
+ if kmi.idname == 'wm.call_menu':
+ if kmi.properties.name == 'MESH_MT_CopyFaceSettings':
+ km.keymap_items.remove(kmi)
+
+if __name__ == "__main__":
+ register()
diff --git a/space_view3d_materials_utils.py b/space_view3d_materials_utils.py
new file mode 100644
index 00000000..3c0fa523
--- /dev/null
+++ b/space_view3d_materials_utils.py
@@ -0,0 +1,713 @@
+#(c) 2010 Michael Williamson (michaelw)
+#ported from original by Michael Williamsn
+#
+#tested r28370
+#
+#
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Material Utils",
+ "author": "michaelw",
+ "version": (1, 3),
+ "blender": (2, 5, 6),
+ "api": 35324,
+ "location": "View3D > Q key",
+ "description": "Menu of material tools (assign, select by etc) in the 3D View",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/3D interaction/Materials Utils",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22140",
+ "category": "3D View"}
+
+"""
+This script has several functions and operators... grouped for convenience
+* assign material:
+ offers the user a list of ALL the materials in the blend file and an additional "new" entry
+ the chosen material will be assigned to all the selected objects in object mode.
+
+ in edit mode the selected faces get the selected material applied.
+
+ if the user chose "new" the new material can be renamed using the "last operator" section of the toolbox
+ After assigning the material "clean material slots" and "material to texface" are auto run to keep things tidy (see description bellow)
+
+
+* select by material
+ in object mode this offers the user a menu of all materials in the blend file
+ any objects using the selected material will become selected, any objects without the material will be removed from selection.
+
+ in edit mode: the menu offers only the materials attached to the current object. It will select the faces that use the material and deselect those that do not.
+
+* clean material slots
+ for all selected objects any empty material slots or material slots with materials that are not used by the mesh faces will be removed.
+
+* Any un-used materials and slots will be removed
+"""
+
+
+import bpy
+from bpy.props import*
+
+
+def replace_material(m1 , m2, all_objects = False):
+ #replace material named m1 with material named m2
+ #m1 is the name of original material
+ #m2 is the name of the material to replace it with
+ #'all' will replace throughout the blend file
+ try:
+ matorg = bpy.data.materials[m1]
+ matrep = bpy.data.materials[m2]
+
+
+ #store active object
+ scn = bpy.context.scene
+ ob_active = bpy.context.active_object
+
+ if all_objects:
+ objs = bpy.data.objects
+
+ else:
+ objs = bpy.context.selected_editable_objects
+
+ for ob in objs:
+ if ob.type == 'MESH':
+ scn.objects.active = ob
+ print(ob.name)
+ ms = ob.material_slots.values()
+
+ for m in ms:
+ if m.material == matorg:
+ m.material = matrep
+ #don't break the loop as the material can be
+ # ref'd more than once
+
+ #restore active object
+ scn.objects.active = ob_active
+ except:
+ print('no match to replace')
+
+def select_material_by_name(find_mat):
+ #in object mode selects all objects with material find_mat
+ #in edit mode selects all faces with material find_mat
+
+ #check for editmode
+ editmode = False
+
+ scn = bpy.context.scene
+
+ #set selection mode to faces
+ scn.tool_settings.mesh_select_mode =[False,False,True]
+
+ actob = bpy.context.active_object
+ if actob.mode == 'EDIT':
+ editmode =True
+ bpy.ops.object.mode_set()
+
+
+ if not editmode:
+ objs = bpy.data.objects
+ for ob in objs:
+ typ = ['MESH','CURVE', 'SURFACE', 'FONT', 'META']
+ if ob.type in typ:
+ ms = ob.material_slots.values()
+ for m in ms:
+ if m.material.name == find_mat:
+ ob.select = True
+ #the active object may not have the mat!
+ #set it to one that does!
+ scn.objects.active = ob
+ break
+ else:
+ ob.select = False
+
+ #deselect non-meshes
+ else:
+ ob.select = False
+
+ else:
+ #it's editmode, so select the faces
+ ob = actob
+ ms = ob.material_slots.values()
+
+ #same material can be on multiple slots
+ slot_indeces =[]
+ i = 0
+ found = False
+ for m in ms:
+ if m.material.name == find_mat:
+ slot_indeces.append(i)
+ found = True
+ i += 1
+ me = ob.data
+ for f in me.faces:
+ if f.material_index in slot_indeces:
+ f.select = True
+ else:
+ f.select = False
+ me.update()
+ if editmode:
+ bpy.ops.object.mode_set(mode = 'EDIT')
+
+def mat_to_texface():
+ #assigns the first image in each material to the faces in the active uvlayer
+ #for all selected objects
+
+ #check for editmode
+ editmode = False
+
+ actob = bpy.context.active_object
+ if actob.mode == 'EDIT':
+ editmode =True
+ bpy.ops.object.mode_set()
+
+ for ob in bpy.context.selected_editable_objects:
+ if ob.type == 'MESH':
+ #get the materials from slots
+ ms = ob.material_slots.values()
+
+ #build a list of images, one per material
+ images=[]
+ #get the textures from the mats
+ for m in ms:
+ gotimage = False
+ textures = m.material.texture_slots.values()
+ if len(textures) >= 1:
+ for t in textures:
+ if t != None:
+ tex = t.texture
+ if tex.type == 'IMAGE':
+ img = tex.image
+ images.append(img)
+ gotimage =True
+ break
+
+ if not gotimage:
+ print('noimage on', m.name)
+ images.append(None)
+
+ #now we have the images
+ #applythem to the uvlayer
+
+
+ me = ob.data
+ #got uvs?
+ if not me.uv_textures:
+ scn = bpy.context.scene
+ scn.objects.active = ob
+ bpy.ops.mesh.uv_texture_add()
+ scn.objects.active = actob
+
+ #get active uvlayer
+ for t in me.uv_textures:
+ if t.active:
+ uvtex = t.data.values()
+ for f in me.faces:
+ #check that material had an image!
+ if images[f.material_index] != None:
+ uvtex[f.index].image = images[f.material_index]
+ uvtex[f.index].use_image = True
+ else:
+ uvtex[f.index].use_image = False
+
+ me.update()
+
+
+ if editmode:
+ bpy.ops.object.mode_set(mode = 'EDIT')
+
+
+
+def assignmatslots(ob, matlist):
+ #given an object and a list of material names
+ #removes all material slots form the object
+ #adds new ones for each material in matlist
+ #adds the materials to the slots as well.
+
+ scn = bpy.context.scene
+ ob_active = bpy.context.active_object
+ scn.objects.active = ob
+
+ for s in ob.material_slots:
+ bpy.ops.object.material_slot_remove()
+
+
+ #re-add them and assign material
+ i = 0
+ for m in matlist:
+ mat = bpy.data.materials[m]
+ ob.data.materials.append(mat)
+ i += 1
+
+ #restore active object:
+ scn.objects.active = ob_active
+
+
+def cleanmatslots():
+ #check for edit mode
+ editmode = False
+ actob = bpy.context.active_object
+ if actob.mode == 'EDIT':
+ editmode =True
+ bpy.ops.object.mode_set()
+
+
+ objs = bpy.context.selected_editable_objects
+
+ for ob in objs:
+ if ob.type == 'MESH':
+ mats = ob.material_slots.keys()
+
+ #check the faces on the mesh to build a list of used materials
+ usedMatIndex =[] #we'll store used materials indices here
+ faceMats =[]
+ me = ob.data
+ for f in me.faces:
+ #get the material index for this face...
+ faceindex = f.material_index
+
+ #indices will be lost: Store face mat use by name
+ currentfacemat = mats[faceindex]
+ faceMats.append(currentfacemat)
+
+
+ #check if index is already listed as used or not
+ found = 0
+ for m in usedMatIndex:
+ if m == faceindex:
+ found = 1
+ #break
+
+ if found == 0:
+ #add this index to the list
+ usedMatIndex.append(faceindex)
+
+ #re-assign the used mats to the mesh and leave out the unused
+ ml = []
+ mnames = []
+ for u in usedMatIndex:
+ ml.append( mats[u] )
+ #we'll need a list of names to get the face indices...
+ mnames.append(mats[u])
+
+ assignmatslots(ob, ml)
+
+
+ #restore face indices:
+ i = 0
+ for f in me.faces:
+ matindex = mnames.index(faceMats[i])
+ f.material_index = matindex
+ i += 1
+
+ if editmode:
+ bpy.ops.object.mode_set(mode = 'EDIT')
+
+
+
+
+
+def assign_mat(matname="Default"):
+ #get active object so we can restore it later
+ actob = bpy.context.active_object
+
+ #check if material exists, if it doesn't then create it
+ mats =bpy.data.materials
+ found = False
+ for m in mats:
+ if m.name == matname:
+ target = m
+ found = True
+ break
+ if not found:
+ target = bpy.data.materials.new(matname)
+
+
+ #if objectmode then set all faces
+ editmode = False
+ allfaces = True
+ if actob.mode == 'EDIT':
+ editmode =True
+ allfaces = False
+ bpy.ops.object.mode_set()
+
+ objs = bpy.context.selected_editable_objects
+
+ for ob in objs:
+ #set the active object to our object
+ scn = bpy.context.scene
+ scn.objects.active = ob
+
+
+ other = ['CURVE', 'SURFACE', 'FONT', 'META']
+ if ob.type in other:
+ found=False
+ i = 0
+ mats = bpy.data.materials
+ for m in mats:
+ if m.name == matname:
+ found =True
+ index = i
+ break
+ i += 1
+ if not found:
+ index = i-1
+ targetlist =[index]
+ assignmatslots(ob, targetlist)
+
+ elif ob.type =='MESH':
+ #check material slots for matname material
+ found=False
+ i = 0
+ mats = ob.material_slots
+ for m in mats:
+ if m.name == matname:
+ found =True
+ index = i
+ #make slot active
+ ob.active_material_index = i
+ break
+ i += 1
+
+ if not found:
+ index=i
+ #the material is not attached to the object
+ ob.data.materials.append(target)
+
+ #now assign the material:
+ me =ob.data
+ if allfaces:
+ for f in me.faces:
+ f.material_index = index
+ elif allfaces == False:
+ for f in me.faces:
+ if f.select:
+ f.material_index = index
+ me.update()
+
+
+
+ #restore the active object
+ bpy.context.scene.objects.active = actob
+ if editmode:
+ bpy.ops.object.mode_set(mode = 'EDIT')
+
+
+
+def check_texture(img,mat):
+ #finds a texture from an image
+ #makes a texture if needed
+ #adds it to the material if it isn't there already
+
+ tex = bpy.data.textures.get(img.name)
+
+ if tex is None:
+ tex = bpy.data.textures.new(name=img.name, type='IMAGE')
+
+ tex.image = img
+
+ #see if the material already uses this tex
+ #add it if needed
+ found = False
+ for m in mat.texture_slots:
+ if m and m.texture == tex:
+ found = True
+ break
+ if not found and mat:
+ mtex = mat.texture_slots.add()
+ mtex.texture = tex
+ mtex.texture_coords = 'UV'
+ mtex.use_map_color_diffuse = True
+
+def texface_to_mat():
+ # editmode check here!
+ editmode = False
+ ob = bpy.context.object
+ if ob.mode =='EDIT':
+ editmode = True
+ bpy.ops.object.mode_set()
+
+ for ob in bpy.context.selected_editable_objects:
+
+ faceindex = []
+ unique_images = []
+
+ # get the texface images and store indices
+ if (ob.data.uv_textures):
+ for f in ob.data.uv_textures.active.data:
+ if f.image:
+ img = f.image
+ #build list of unique images
+ if img not in unique_images:
+ unique_images.append(img)
+ faceindex.append(unique_images.index(img))
+
+ else:
+ img = None
+ faceindex.append(None)
+
+
+
+ #check materials for images exist; create if needed
+ matlist = []
+ for i in unique_images:
+ if i:
+ print(i.name)
+ try:
+ m = bpy.data.materials[i.name]
+
+ except:
+ m = bpy.data.materials.new(name = i.name)
+ continue
+
+ finally:
+ matlist.append(m.name)
+ # add textures if needed
+ check_texture(i,m)
+
+ #set up the object material slots
+ assignmatslots(ob, matlist)
+
+ #set texface indices to material slot indices..
+ me = ob.data
+
+ i = 0
+ for f in faceindex:
+ if f != None:
+ me.faces[i].material_index = f
+ i += 1
+ if editmode:
+ bpy.ops.object.mode_set(mode = 'EDIT')
+
+
+#operator classes:
+#---------------------------------------------------------------------
+
+class VIEW3D_OT_texface_to_material(bpy.types.Operator):
+ ''''''
+ bl_idname = "view3d.texface_to_material"
+ bl_label = "MW Texface Images to Material/Texture"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ if context.selected_editable_objects:
+ texface_to_mat()
+ return {'FINISHED'}
+ else:
+ self.report({'WARNING'}, "No editable selected objects, could not finish")
+ return {'CANCELLED'}
+
+class VIEW3D_OT_assign_material(bpy.types.Operator):
+ '''assign a material to the selection'''
+ bl_idname = "view3d.assign_material"
+ bl_label = "MW Assign Material"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ matname = StringProperty(name = 'Material Name',
+ description = 'Name of Material to Assign',
+ default = "", maxlen = 21)
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ mn = self.matname
+ print(mn)
+ assign_mat(mn)
+ cleanmatslots()
+ mat_to_texface()
+ return {'FINISHED'}
+
+class VIEW3D_OT_clean_material_slots(bpy.types.Operator):
+ '''removes any material slots from the
+ selected objects that are not used by the mesh'''
+ bl_idname = "view3d.clean_material_slots"
+ bl_label = "MW Clean Material Slots"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ cleanmatslots()
+ return {'FINISHED'}
+
+class VIEW3D_OT_material_to_texface(bpy.types.Operator):
+ ''''''
+ bl_idname = "view3d.material_to_texface"
+ bl_label = "MW Material Images to Texface"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ mat_to_texface()
+ return {'FINISHED'}
+
+class VIEW3D_OT_select_material_by_name(bpy.types.Operator):
+ ''''''
+ bl_idname = "view3d.select_material_by_name"
+ bl_label = "MW Select Material By Name"
+ bl_options = {'REGISTER', 'UNDO'}
+ matname = StringProperty(name = 'Material Name',
+ description = 'Name of Material to Select',
+ default = "", maxlen = 21)
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ mn = self.matname
+ select_material_by_name(mn)
+ return {'FINISHED'}
+
+
+class VIEW3D_OT_replace_material(bpy.types.Operator):
+ '''assign a material to the selection'''
+ bl_idname = "view3d.replace_material"
+ bl_label = "MW Replace Material"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ matorg = StringProperty(name = 'Material to Replace',
+ description = 'Name of Material to Assign',
+ default = "", maxlen = 21)
+
+ matrep = StringProperty(name = 'Replacement material',
+ description = 'Name of Material to Assign',
+ default = "", maxlen = 21)
+
+ all_objects = BoolProperty(name ='all_objects',
+ description="replace for all objects in this blend file",
+ default = True)
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ m1 = self.matorg
+ m2 = self.matrep
+ all = self.all_objects
+ replace_material(m1,m2,all)
+ return {'FINISHED'}
+
+#menu classes
+#-------------------------------------------------------------------------------
+class VIEW3D_MT_master_material(bpy.types.Menu):
+ bl_label = "Master Material Menu"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.menu("VIEW3D_MT_assign_material", icon='ZOOMIN')
+ layout.menu("VIEW3D_MT_select_material", icon='HAND')
+ layout.separator()
+ layout.operator("view3d.clean_material_slots",
+ text = 'Clean Material Slots', icon='CANCEL')
+ layout.operator("view3d.material_to_texface",
+ text = 'Material to Texface',icon='FACESEL_HLT')
+ layout.operator("view3d.texface_to_material",
+ text = 'Texface to Material',icon='FACESEL_HLT')
+
+ layout.separator()
+ layout.operator("view3d.replace_material",
+ text = 'Replace Material', icon='ARROW_LEFTRIGHT')
+
+
+
+class VIEW3D_MT_assign_material(bpy.types.Menu):
+ bl_label = "Assign Material"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ ob = context
+ layout.label
+ for i in range (len(bpy.data.materials)):
+
+ layout.operator("view3d.assign_material",
+ text=bpy.data.materials[i].name,
+ icon='MATERIAL_DATA').matname = bpy.data.materials[i].name
+
+ layout.operator("view3d.assign_material",text="Add New",
+ icon='ZOOMIN')
+
+class VIEW3D_MT_select_material(bpy.types.Menu):
+ bl_label = "Select by Material"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ ob = context.object
+ layout.label
+ if ob.mode == 'OBJECT':
+ #show all used materials in entire blend file
+ for i in range (len(bpy.data.materials)):
+ if bpy.data.materials[i].users > 0:
+ layout.operator("view3d.select_material_by_name",
+ text=bpy.data.materials[i].name,
+ icon='MATERIAL_DATA').matname = bpy.data.materials[i].name
+
+
+ elif ob.mode == 'EDIT':
+ #show only the materials on this object
+ mats = ob.material_slots.keys()
+ for m in mats:
+ layout.operator("view3d.select_material_by_name",
+ text=m,
+ icon='MATERIAL_DATA').matname = m
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ kc = bpy.data.window_managers[0].keyconfigs.default
+ km = kc.keymaps.get("3D View")
+ if km is None:
+ km = kc.keymaps.new(name="3D View")
+
+ kmi = km.keymap_items.new('wm.call_menu', 'Q', 'PRESS')
+ kmi.properties.name = "VIEW3D_MT_master_material"
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ kc = bpy.data.window_managers[0].keyconfigs.default
+ km = kc.keymaps["3D View"]
+ for kmi in km.keymap_items:
+ if kmi.idname == 'wm.call_menu':
+ if kmi.properties.name == "VIEW3D_MT_master_material":
+ km.keymap_items.remove(kmi)
+ break
+
+if __name__ == "__main__":
+ register()
+
diff --git a/space_view3d_math_vis/__init__.py b/space_view3d_math_vis/__init__.py
new file mode 100644
index 00000000..a79f1eee
--- /dev/null
+++ b/space_view3d_math_vis/__init__.py
@@ -0,0 +1,108 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Math Vis (Console)",
+ "author": "Campbell Barton",
+ "version": (0, 1),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "View3D > Tool Shelf or Console",
+ "description": "Display console defined mathutils variables in the 3D view",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/3D_interaction/Math_Viz",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=25545",
+ "support": "OFFICIAL",
+ "category": "3D View"}
+
+if "bpy" in locals():
+ import imp
+ imp.reload(utils)
+ imp.reload(draw)
+else:
+ from . import utils, draw
+
+import bpy
+
+
+class VIEW3D_PT_math_vis(bpy.types.Panel):
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "TOOLS"
+ bl_label = "Math View"
+
+ def draw(self, context):
+ layout = self.layout
+ view = context.space_data
+
+ col = layout.column(align=True)
+
+ callbacks = draw.callbacks
+ ok = False
+ for region in context.area.regions:
+ if callbacks.get(hash(region)):
+ ok = True
+ break
+
+ col.operator("view3d.math_vis_toggle", emboss=False, icon='CHECKBOX_HLT' if ok else 'CHECKBOX_DEHLT')
+
+
+class SetupMathView(bpy.types.Operator):
+ '''Visualize mathutils type python variables from the interactive console, see addon docs'''
+ bl_idname = "view3d.math_vis_toggle"
+ bl_label = "Use Math Vis"
+
+ def execute(self, context):
+ callbacks = draw.callbacks
+ region = context.region
+ region_id = hash(region)
+ cb_data = callbacks.get(region_id)
+ if cb_data is None:
+ handle_pixel = region.callback_add(draw.draw_callback_px, (self, context), 'POST_PIXEL')
+ handle_view = region.callback_add(draw.draw_callback_view, (self, context), 'POST_VIEW')
+ callbacks[region_id] = region, handle_pixel, handle_view
+ else:
+ region.callback_remove(cb_data[1])
+ region.callback_remove(cb_data[2])
+ del callbacks[region_id]
+
+ context.area.tag_redraw()
+ return {'FINISHED'}
+
+
+def console_hook():
+ for region, handle_pixel, handle_view in draw.callbacks.values():
+ region.tag_redraw()
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ import console_python
+ console_python.execute.hooks.append((console_hook, ()))
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ import console_python
+ console_python.execute.hooks.remove((console_hook, ()))
+
+ draw.callbacks_clear()
diff --git a/space_view3d_math_vis/draw.py b/space_view3d_math_vis/draw.py
new file mode 100644
index 00000000..9d3b436e
--- /dev/null
+++ b/space_view3d_math_vis/draw.py
@@ -0,0 +1,232 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import bpy
+import blf
+
+from . import utils
+from mathutils import Vector, Matrix
+
+callbacks = {}
+
+
+def callbacks_clear():
+ for region, handle_pixel, handle_view in callbacks.values():
+ region.callback_remove(handle_pixel)
+ region.callback_remove(handle_view)
+ callbacks.clear()
+
+
+def draw_callback_px(self, context):
+ from bgl import glColor3f
+ font_id = 0 # XXX, need to find out how best to get this.
+ blf.size(font_id, 12, 72)
+
+ data_matrix, data_quat, data_euler, data_vector, data_vector_array = utils.console_math_data()
+
+ if not data_matrix and not data_quat and not data_euler and not data_vector and not data_vector_array:
+
+ # draw some text
+ glColor3f(1.0, 0.0, 0.0)
+ blf.position(font_id, 180, 10, 0)
+ blf.draw(font_id, "Python Console has no mathutils definitions")
+ return
+
+ glColor3f(1.0, 1.0, 1.0)
+
+ region = context.region
+ region3d = context.space_data.region_3d
+
+ region_mid_width = region.width / 2.0
+ region_mid_height = region.height / 2.0
+
+ # vars for projection
+ perspective_matrix = region3d.perspective_matrix.copy()
+
+ def draw_text(text, vec):
+ vec_4d = vec.to_4d()
+ vec_4d *= perspective_matrix
+ if vec_4d.w > 0.0:
+ x = region_mid_width + region_mid_width * (vec_4d.x / vec_4d.w)
+ y = region_mid_height + region_mid_height * (vec_4d.y / vec_4d.w)
+
+ blf.position(font_id, x + 3.0, y - 4.0, 0.0)
+ blf.draw(font_id, text)
+
+ # points
+ if data_vector:
+ for key, vec in data_vector.items():
+ draw_text(key, vec)
+
+ # lines
+ if data_vector_array:
+ for key, vec in data_vector_array.items():
+ draw_text(key, vec[0])
+
+ # matrix
+ if data_matrix:
+ for key, mat in data_matrix.items():
+ draw_text(key, mat[3])
+
+ if data_quat:
+ loc = context.scene.cursor_location.copy()
+ for key, mat in data_quat.items():
+ draw_text(key, loc)
+
+ if data_euler:
+ loc = context.scene.cursor_location.copy()
+ for key, mat in data_euler.items():
+ draw_text(key, loc)
+
+
+def draw_callback_view(self, context):
+ from bgl import glEnable, glDisable, glColor3f, glVertex3f, glPointSize, glLineWidth, glBegin, glEnd, glLineStipple, GL_POINTS, GL_LINE_STRIP, GL_LINES, GL_LINE_STIPPLE
+
+ data_matrix, data_quat, data_euler, data_vector, data_vector_array = utils.console_math_data()
+
+ # draw_matrix vars
+ zero = Vector((0.0, 0.0, 0.0))
+ x_p = Vector((1.0, 0.0, 0.0))
+ x_n = Vector((-1.0, 0.0, 0.0))
+ y_p = Vector((0.0, 1.0, 0.0))
+ y_n = Vector((0.0, -1.0, 0.0))
+ z_p = Vector((0.0, 0.0, 1.0))
+ z_n = Vector((0.0, 0.0, -1.0))
+ bb = [Vector() for i in range(8)]
+
+ def draw_matrix(mat):
+ zero_tx = zero * mat
+
+ glLineWidth(2.0)
+
+ # x
+ glColor3f(1.0, 0.2, 0.2)
+ glBegin(GL_LINES)
+ glVertex3f(*(zero_tx))
+ glVertex3f(*(x_p * mat))
+ glEnd()
+
+ glColor3f(0.6, 0.0, 0.0)
+ glBegin(GL_LINES)
+ glVertex3f(*(zero_tx))
+ glVertex3f(*(x_n * mat))
+ glEnd()
+
+ # y
+ glColor3f(0.2, 1.0, 0.2)
+ glBegin(GL_LINES)
+ glVertex3f(*(zero_tx))
+ glVertex3f(*(y_p * mat))
+ glEnd()
+
+ glColor3f(0.0, 0.6, 0.0)
+ glBegin(GL_LINES)
+ glVertex3f(*(zero_tx))
+ glVertex3f(*(y_n * mat))
+ glEnd()
+
+ # z
+ glColor3f(0.2, 0.2, 1.0)
+ glBegin(GL_LINES)
+ glVertex3f(*(zero_tx))
+ glVertex3f(*(z_p * mat))
+ glEnd()
+
+ glColor3f(0.0, 0.0, 0.6)
+ glBegin(GL_LINES)
+ glVertex3f(*(zero_tx))
+ glVertex3f(*(z_n * mat))
+ glEnd()
+
+ # bounding box
+ i = 0
+ glColor3f(1.0, 1.0, 1.0)
+ for x in (-1.0, 1.0):
+ for y in (-1.0, 1.0):
+ for z in (-1.0, 1.0):
+ bb[i][:] = x, y, z
+ bb[i] *= mat
+ i += 1
+
+ # strip
+ glLineWidth(1.0)
+ glLineStipple(1, 0xAAAA)
+ glEnable(GL_LINE_STIPPLE)
+
+ glBegin(GL_LINE_STRIP)
+ for i in 0, 1, 3, 2, 0, 4, 5, 7, 6, 4:
+ glVertex3f(*bb[i])
+ glEnd()
+
+ # not done by the strip
+ glBegin(GL_LINES)
+ glVertex3f(*bb[1])
+ glVertex3f(*bb[5])
+
+ glVertex3f(*bb[2])
+ glVertex3f(*bb[6])
+
+ glVertex3f(*bb[3])
+ glVertex3f(*bb[7])
+ glEnd()
+ glDisable(GL_LINE_STIPPLE)
+
+ ########
+ # points
+ if data_vector:
+ glPointSize(3.0)
+ glBegin(GL_POINTS)
+ glColor3f(0.5, 0.5, 1)
+ for key, vec in data_vector.items():
+ glVertex3f(*vec.to_3d())
+ glEnd()
+ glPointSize(1.0)
+
+ #######
+ # lines
+ if data_vector_array:
+ glColor3f(0.5, 0.5, 1)
+ glLineWidth(2.0)
+
+ for line in data_vector_array.values():
+ glBegin(GL_LINE_STRIP)
+ for vec in line:
+ glVertex3f(*vec)
+ glEnd()
+ glPointSize(1.0)
+
+ glLineWidth(1.0)
+
+ # matrix
+ if data_matrix:
+ for mat in data_matrix.values():
+ draw_matrix(mat)
+
+ if data_quat:
+ loc = context.scene.cursor_location.copy()
+ for quat in data_quat.values():
+ mat = quat.to_matrix().to_4x4()
+ mat[3][0:3] = loc
+ draw_matrix(mat)
+
+ if data_euler:
+ loc = context.scene.cursor_location.copy()
+ for eul in data_euler.values():
+ mat = eul.to_matrix().to_4x4()
+ mat[3][0:3] = loc
+ draw_matrix(mat)
diff --git a/space_view3d_math_vis/utils.py b/space_view3d_math_vis/utils.py
new file mode 100644
index 00000000..b0a7757d
--- /dev/null
+++ b/space_view3d_math_vis/utils.py
@@ -0,0 +1,68 @@
+#====================== BEGIN GPL LICENSE BLOCK ======================
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+#======================= END GPL LICENSE BLOCK ========================
+
+import sys
+
+def console_namespace():
+ import console_python
+ get_consoles = console_python.get_console
+ consoles = getattr(get_consoles, "consoles", None)
+ if consoles:
+ for console, stdout, stderr in get_consoles.consoles.values():
+ return console.locals
+ return {}
+
+def console_math_data():
+ from mathutils import Matrix, Vector, Quaternion, Euler
+
+ data_matrix = {}
+ data_quat = {}
+ data_euler = {}
+ data_vector = {}
+ data_vector_array = {}
+
+ for key, var in console_namespace().items():
+ if key[0] == "_":
+ continue
+
+ var_type = type(var)
+
+ if var_type is Matrix:
+ if var.col_size != 4 or var.row_size != 4:
+ var = var.to_4x4()
+ data_matrix[key] = var
+ elif var_type is Vector:
+ if len(var) < 3:
+ var = var.to_3d()
+ data_vector[key] = var
+ elif var_type is Quaternion:
+ data_quat[key] = var
+ elif var_type is Euler:
+ data_euler[key] = var
+ elif var_type in (list, tuple):
+ if var:
+ ok = True
+ for item in var:
+ if type(item) is not Vector:
+ ok = False
+ break
+ if ok:
+ data_vector_array[key] = var
+
+ return data_matrix, data_quat, data_euler, data_vector, data_vector_array
+
diff --git a/space_view3d_panel_measure.py b/space_view3d_panel_measure.py
new file mode 100644
index 00000000..5dc6029f
--- /dev/null
+++ b/space_view3d_panel_measure.py
@@ -0,0 +1,1136 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Measure Panel",
+ "author": "Buerbaum Martin (Pontiac)",
+ "version": (0, 7, 13),
+ "blender": (2, 5, 7),
+ "api": 35864,
+ "location": "View3D > Properties > Measure Panel",
+ "description": "Measure distances between objects",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/" \
+ "Scripts/3D_interaction/Panel_Measure",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?" \
+ "func=detail&aid=21445",
+ "category": "3D View"}
+
+"""
+Measure panel
+
+This script displays in OBJECT MODE:
+* The distance of the 3D cursor to the origin of the
+ 3D space (if NOTHING is selected).
+* The distance of the 3D cursor to the center of an object
+ (if exactly ONE object is selected).
+* The distance between 2 object centers
+ (if exactly TWO objects are selected).
+* The surface area of any selected mesh object.
+
+Display in EDIT MODE (Local and Global space supported):
+* The distance of the 3D cursor to the origin
+ (in Local space it is the object center instead).
+* The distance of the 3D cursor to a selected vertex.
+* The distance between 2 selected vertices.
+
+Usage:
+
+This functionality can be accessed via the
+"Properties" panel in 3D View ([N] key).
+
+It's very helpful to use one or two "Empty" objects with
+"Snap during transform" enabled for fast measurement.
+
+Version history:
+v0.7.13 - Moved property definitions to registration function.
+ Changed automatic callback adding to manual,
+ the current API doesn't seem to allow this top be automatically yet.
+ Various API fixes.
+v0.7.12 - Moved setting of properties to callback function
+ (it is bad practise to set it in the draw code).
+ Fixed distance calculation of parented objects.
+ API change: add_modal_handler -> modal_handler_add
+ Regression: Had to disable area display for selection with
+ more than 2 meshes.
+ Fixed Local/Global vert-loc calculations in EditMode.
+v0.7.11 - Applied patch by Filiciss Muhgue that fixes the text in quad view.
+v0.7.10 - Applied patch by Filiciss Muhgue that (mostly) fixes the quad view.
+ Patch link: https://projects.blender.org/tracker/?func=
+ detail&atid=127&aid=24932&group_id=9
+ Thanks for that!
+ Removed (now) unneeded "attr" setting for properties.
+v0.7.9 - Updated scene properties for changes in property API.
+ See http://lists.blender.org/pipermail/bf-committers/
+ 2010-September/028654.html
+ Synced API changes in/from local copy.
+v0.7.8 - Various Py API changes by Campbell ...
+ bl_default_closed -> bl_options = {'DEFAULT_CLOSED'}
+ x.verts -> x.vertices
+ @classmethod def poll(cls, context)
+ No "location" in bl_info->name
+ bl_info->api
+v0.7.7 - One more change to the callback registration code.
+ Now it should finally work as intended.
+v0.7.6 - API changes (r885, r886) - register & unregister function
+v0.7.5.3 - Small fix for bug in v0.7.5.1
+ (location was off when object was moved)
+v0.7.5.2 - Changed callback registration back to original code &
+ fixed bug in there (use bl_idname instead of bl_label)
+v0.7.5.1 - Global mode is now taking rotation into account properly.
+v0.7.5 - Fixed lagging and drawing issues.
+v0.7.4 - Fixed the modal_handler_add and callback_add code.
+ Thanks to jesterKing for pointing that out :-)
+v0.7.3.1 - Fixed bug that made all lines in Blender stippled :-)
+v0.7.3 - Added display of delta x/y/z value in 3d view.
+ * Inspired by warpi's patch here:
+ http://blenderartists.org/forum/showpost.php?p=1671033&postcount=47
+ * Also added display of dx,dy,dz lines
+ * Changed the "dist" colors to something not already used
+ by x/y/z axes.
+v0.7.2 - Merged changes from trunk (scripts_addons r847):
+ * obj.matrix -> obj.matrix_world
+ * vert.selected -> vert.select
+ * face.selected -> face.select
+ * bl_info: warning, wiki_url, tracker_url
+ * removed __bpydoc__
+ * Use fontid=0 for blf functions. 0 is the default font.
+v0.7.1 - Merged changes by Campbell:
+ * Fix for API change: Collections like context.selected_objects
+ no longer return None for empty lists.
+ * Update for mathutils, also stripped some redundant
+ conversions (Mostly "Vector()" stuff)
+v0.7 - Initial support for drawing lines.
+ (Thanks to Algorith for applying my perspective_matrix patch.)
+ The distance value (in BUs) is also drawn in the 3D view now.
+ Also fixed some wrong calculations of global/local distances.
+ Now it's really "what you see is what is calculated".
+ Use bl_info for Add-On information.
+ Use "3D View" in category & name
+ Renamed reenter_editmode to view3d.reenter_editmode.
+ Renamed panel_measure.py into space_view3d_panel_measure.py
+ Active object is only used for edit-mode now. Measurement
+ with exactly one sel. (but not necessarily active) object
+ now gets the obj via the sel-object array.
+ API change Mathutils -> mathutils (r557)
+ Deselecting 1 of 2 objects now works correctly (active object is ignored).
+ Force a redraw of the area so disabling the "measure_panel_draw"
+ checkbox will clear the line/text.
+ Only calculate area (CPU heavy) if a "area" checkbox is enabled.
+v0.6.4 - Fixed unneeded meshdata duplication (sometimes crashes Blender).
+ The script now correctly calculated the surface area (faceAreaGlobal)
+ of scaled meshes.
+ http://projects.blender.org/tracker/
+ ?func=detail&atid=453&aid=21913&group_id=153
+v0.6.3 - Added register & unregister functions.
+v0.6.2 - Fixed precision of second area property.
+ Reduced display precision to 5 (instead of 6).
+ Added (commented out code) for shortcut [F5] for
+ updating EditMode selection & calculation.
+ Changed the script so it can be managed from the "Add-Ons" tab
+ in the user preferences.
+ Corrected FSF address.
+v0.6.1 - Updated reenter_editmode operator description.
+ Fixed search for selected mesh objects.
+ Added "BU^2" after values that are not yet translated via "unit".
+v0.6
+ *) Fix: Removed EditMode/ObjectMode toggle stuff. This causes all the
+ crashes and is generally not stable.
+ Instead I've added a manual "refresh" button.
+ I registered a new operator OBJECT_OT_reenter_editmode for this.
+ *) Use "unit" settings (i.e. none/metric/imperial)
+ *) Fix: Only display surface area (>=3 objects) if return value is >=0.
+ *) Minor: Renamed objectFaceArea to objectSurfaceArea
+ *) Updated Vector() and tuple() usage.
+ *) Fixed some comments.
+v0.5 - Global surface area (object mode) is now calculated as well.
+ Support area calculation for face selection.
+ Also made measurement panel closed by default. (Area calculation
+ may use up a lot of CPU/RAM in extreme cases)
+v0.4.1 - Various cleanups.
+ Using the shorter "scene" instead of "context.scene"
+ New functions measureGlobal() and measureLocal() for
+ user-friendly access to the "space" setting.
+v0.4 - Calculate & display the surface area of mesh
+ objects (local space only right now).
+ Expanded global/local switch.
+ Made "local" option for 3Dcursor-only in edit mode actually work.
+ Fixed local/global calculation for 3Dcursor<->vertex in edit mode.
+v0.3.2 - Fixed calculation & display of local/global coordinates.
+ The user can now select via dropdown which space is wanted/needed
+ Basically this is a bugfix and new feature at the same time :-)
+v0.3.1 - Fixed bug where "measure_panel_dist" wasn't defined
+ before it was used.
+ Also added the distance calculation "origin -> 3D cursor" for edit mode.
+v0.3 - Support for mesh edit mode (1 or 2 selected vertices)
+v0.2.1 - Small fix (selecting nothing didn't calculate the distance
+ of the cursor from the origin anymore)
+v0.2 - Distance value is now displayed via a FloatProperty widget (and
+ therefore saved to file too right now [according to ideasman42].
+ The value is save inside the scene right now.)
+ Thanks goes to ideasman42 (Campbell Barton) for helping me out on this.
+v0.1 - Initial revision. Seems to work fine for most purposes.
+
+More links:
+http://gitorious.org/blender-scripts/blender-measure-panel-script
+http://blenderartists.org/forum/showthread.php?t=177800
+"""
+
+import bpy
+from bpy.props import *
+from mathutils import Vector, Matrix
+import bgl
+import blf
+
+
+# Precicion for display of float values.
+PRECISION = 4
+
+# Name of the custom properties as stored in the scene.
+COLOR_LOCAL = (1.0, 0.5, 0.0, 0.8)
+COLOR_GLOBAL = (0.5, 0.0, 1.0, 0.8)
+
+
+# Returns a single selected object.
+# Returns None if more than one (or nothing) is selected.
+# Note: Ignores the active object.
+def getSingleObject(context):
+ if len(context.selected_objects) == 1:
+ return context.selected_objects[0]
+
+ return None
+
+
+# Returns a list with 2 3D points (Vector) and a color (RGBA)
+# depending on the current view mode and the selection.
+def getMeasurePoints(context):
+ sce = context.scene
+
+ # Get a single selected object (or nothing).
+ obj = getSingleObject(context)
+
+ if (context.mode == 'EDIT_MESH'):
+ obj = context.active_object
+
+ if (obj and obj.type == 'MESH' and obj.data):
+ # Get mesh data from Object.
+ mesh = obj.data
+
+ # Get the selected vertices.
+ # @todo: Better (more efficient) way to do this?
+ verts_selected = [v for v in mesh.vertices if v.select == 1]
+
+ if len(verts_selected) == 0:
+ # Nothing selected.
+ # We measure the distance from...
+ # local ... the object center to the 3D cursor.
+ # global ... the origin to the 3D cursor.
+ cur_loc = sce.cursor_location
+ obj_loc = obj.matrix_world.to_translation()
+
+ # Convert to local space, if needed.
+ if measureLocal(sce):
+ p1 = cur_loc
+ p2 = obj_loc
+ return (p1, p2, COLOR_GLOBAL)
+
+ else:
+ p1 = Vector((0.0, 0.0, 0.0))
+ p2 = cur_loc
+ return (p1, p2, COLOR_GLOBAL)
+
+ elif len(verts_selected) == 1:
+ # One vertex selected.
+ # We measure the distance from the
+ # selected vertex object to the 3D cursor.
+ cur_loc = sce.cursor_location
+ vert_loc = verts_selected[0].co.copy()
+
+ # Convert to local or global space.
+ if measureLocal(sce):
+ p1 = vert_loc
+ p2 = cur_loc
+ return (p1, p2, COLOR_LOCAL)
+
+ else:
+ p1 = vert_loc * obj.matrix_world
+ p2 = cur_loc
+ return (p1, p2, COLOR_GLOBAL)
+
+ elif len(verts_selected) == 2:
+ # Two vertices selected.
+ # We measure the distance between the
+ # two selected vertices.
+ obj_loc = obj.matrix_world.to_translation()
+ vert1_loc = verts_selected[0].co.copy()
+ vert2_loc = verts_selected[1].co.copy()
+
+ # Convert to local or global space.
+ if measureLocal(sce):
+ p1 = vert1_loc
+ p2 = vert2_loc
+ return (p1, p2, COLOR_LOCAL)
+
+ else:
+ p1 = vert1_loc * obj.matrix_world
+ p2 = vert2_loc * obj.matrix_world
+ return (p1, p2, COLOR_GLOBAL)
+
+ else:
+ return None
+
+ elif (context.mode == 'OBJECT'):
+ # We are working in object mode.
+
+ if len(context.selected_objects) > 2:
+ return None
+ elif len(context.selected_objects) == 2:
+ # 2 objects selected.
+ # We measure the distance between the 2 selected objects.
+ obj1, obj2 = context.selected_objects
+ obj1_loc = obj1.matrix_world.to_translation()
+ obj2_loc = obj2.matrix_world.to_translation()
+ return (obj1_loc, obj2_loc, COLOR_GLOBAL)
+
+ elif (obj):
+ # One object selected.
+ # We measure the distance from the object to the 3D cursor.
+ cur_loc = sce.cursor_location
+ obj_loc = obj.matrix_world.to_translation()
+ return (obj_loc, cur_loc, COLOR_GLOBAL)
+
+ elif not context.selected_objects:
+ # Nothing selected.
+ # We measure the distance from the origin to the 3D cursor.
+ p1 = Vector((0.0, 0.0, 0.0))
+ p2 = sce.cursor_location
+ return (p1, p2, COLOR_GLOBAL)
+
+ else:
+ return None
+
+
+# Return the area of a face (in global space).
+# @note Copies the functionality of the following functions,
+# but also respects the scaling (via the "obj.matrix_world" parameter):
+# @sa: rna_mesh.c:rna_MeshFace_area_get
+# @sa: math_geom.c:area_quad_v3
+# @sa: math_geom.c:area_tri_v3
+def faceAreaGlobal(face, obj):
+ area = 0.0
+
+ mat = obj.matrix_world
+
+ if len(face.vertices) == 4:
+ # Quad
+
+ # Get vertex indices
+ v1, v2, v3, v4 = face.vertices
+
+ # Get vertex data
+ v1 = obj.data.vertices[v1]
+ v2 = obj.data.vertices[v2]
+ v3 = obj.data.vertices[v3]
+ v4 = obj.data.vertices[v4]
+
+ # Apply transform matrix to vertex coordinates.
+ v1 = v1.co * mat
+ v2 = v2.co * mat
+ v3 = v3.co * mat
+ v4 = v4.co * mat
+
+ vec1 = v2 - v1
+ vec2 = v4 - v1
+
+ n = vec1.cross(vec2)
+
+ area = n.length / 2.0
+
+ vec1 = v4 - v3
+ vec2 = v2 - v3
+
+ n = vec1.cross(vec2)
+
+ area += n.length / 2.0
+
+ elif len(face.vertices) == 3:
+ # Triangle
+
+ # Get vertex indices
+ v1, v2, v3 = face.vertices
+
+ # Get vertex data
+ v1 = obj.data.vertices[v1]
+ v2 = obj.data.vertices[v2]
+ v3 = obj.data.vertices[v3]
+
+ # Apply transform matrix to vertex coordinates.
+ v1 = v1.co * mat
+ v2 = v2.co * mat
+ v3 = v3.co * mat
+
+ vec1 = v3 - v2
+ vec2 = v1 - v2
+
+ n = vec1.cross(vec2)
+
+ area = n.length / 2.0
+
+ return area
+
+
+# Calculate the surface area of a mesh object.
+# *) Set selectedOnly=1 if you only want to count selected faces.
+# *) Set globalSpace=1 if you want to calculate
+# the global surface area (object mode).
+# Note: Be sure you have updated the mesh data before
+# running this with selectedOnly=1!
+# @todo Support other object types (surfaces, etc...)?
+def objectSurfaceArea(obj, selectedOnly, globalSpace):
+ if (obj and obj.type == 'MESH' and obj.data):
+ areaTotal = 0
+
+ mesh = obj.data
+
+ # Count the area of all the faces.
+ for face in mesh.faces:
+ if not selectedOnly or face.select:
+ if globalSpace:
+ areaTotal += faceAreaGlobal(face, obj)
+ else:
+ areaTotal += face.area
+
+ return areaTotal
+
+ # We can not calculate an area for this object.
+ return -1
+
+
+# User friendly access to the "space" setting.
+def measureGlobal(sce):
+ return (sce.measure_panel_transform == "measure_global")
+
+
+# User friendly access to the "space" setting.
+def measureLocal(sce):
+ return (sce.measure_panel_transform == "measure_local")
+
+
+# Converts 3D coordinates in a 3DRegion
+# into 2D screen coordinates for that region.
+def region3d_get_2d_coordinates(context, loc_3d):
+ # Get screen information
+ mid_x = context.region.width / 2.0
+ mid_y = context.region.height / 2.0
+ width = context.region.width
+ height = context.region.height
+
+ # Get matrices
+ view_mat = context.region_data.perspective_matrix
+ total_mat = view_mat
+
+ # Order is important
+ vec = Vector((loc_3d[0], loc_3d[1], loc_3d[2], 1.0)) * total_mat
+
+ # dehomogenise
+ vec = Vector((
+ vec[0] / vec[3],
+ vec[1] / vec[3],
+ vec[2] / vec[3]))
+
+ x = int(mid_x + vec[0] * width / 2.0)
+ y = int(mid_y + vec[1] * height / 2.0)
+
+ return Vector((x, y, 0))
+
+
+def draw_measurements_callback(self, context):
+ sce = context.scene
+
+ draw = 0
+ if hasattr(sce, "measure_panel_draw"):
+ draw = sce.measure_panel_draw
+
+ # 2D drawing code example
+ #bgl.glBegin(bgl.GL_LINE_STRIP)
+ #bgl.glVertex2i(0, 0)
+ #bgl.glVertex2i(80, 100)
+ #bgl.glEnd()
+
+ # Get measured 3D points and colors.
+ line = getMeasurePoints(context)
+ if (line and draw):
+ p1, p2, color = line
+
+ # Get and convert the Perspective Matrix of the current view/region.
+ view3d = bpy.context
+ region = view3d.region_data
+ perspMatrix = region.perspective_matrix
+ tempMat = [perspMatrix[i][j] for i in range(4) for j in range(4)]
+ perspBuff = bgl.Buffer(bgl.GL_FLOAT, 16, tempMat)
+
+ # ---
+ # Store previous OpenGL settings.
+ # Store MatrixMode
+ MatrixMode_prev = bgl.Buffer(bgl.GL_INT, [1])
+ bgl.glGetIntegerv(bgl.GL_MATRIX_MODE, MatrixMode_prev)
+ MatrixMode_prev = MatrixMode_prev[0]
+
+ # Store projection matrix
+ ProjMatrix_prev = bgl.Buffer(bgl.GL_DOUBLE, [16])
+ bgl.glGetFloatv(bgl.GL_PROJECTION_MATRIX, ProjMatrix_prev)
+
+ # Store Line width
+ lineWidth_prev = bgl.Buffer(bgl.GL_FLOAT, [1])
+ bgl.glGetFloatv(bgl.GL_LINE_WIDTH, lineWidth_prev)
+ lineWidth_prev = lineWidth_prev[0]
+
+ # Store GL_BLEND
+ blend_prev = bgl.Buffer(bgl.GL_BYTE, [1])
+ bgl.glGetFloatv(bgl.GL_BLEND, blend_prev)
+ blend_prev = blend_prev[0]
+
+ line_stipple_prev = bgl.Buffer(bgl.GL_BYTE, [1])
+ bgl.glGetFloatv(bgl.GL_LINE_STIPPLE, line_stipple_prev)
+ line_stipple_prev = line_stipple_prev[0]
+
+ # Store glColor4f
+ color_prev = bgl.Buffer(bgl.GL_FLOAT, [4])
+ bgl.glGetFloatv(bgl.GL_COLOR, color_prev)
+
+ # ---
+ # Prepare for 3D drawing
+ bgl.glLoadIdentity()
+ bgl.glMatrixMode(bgl.GL_PROJECTION)
+ bgl.glLoadMatrixf(perspBuff)
+
+ bgl.glEnable(bgl.GL_BLEND)
+ bgl.glEnable(bgl.GL_LINE_STIPPLE)
+
+ # ---
+ # Draw 3D stuff.
+ width = 1
+ bgl.glLineWidth(width)
+ # X
+ bgl.glColor4f(1, 0, 0, 0.8)
+ bgl.glBegin(bgl.GL_LINE_STRIP)
+ bgl.glVertex3f(p1[0], p1[1], p1[2])
+ bgl.glVertex3f(p2[0], p1[1], p1[2])
+ bgl.glEnd()
+ # Y
+ bgl.glColor4f(0, 1, 0, 0.8)
+ bgl.glBegin(bgl.GL_LINE_STRIP)
+ bgl.glVertex3f(p1[0], p1[1], p1[2])
+ bgl.glVertex3f(p1[0], p2[1], p1[2])
+ bgl.glEnd()
+ # Z
+ bgl.glColor4f(0, 0, 1, 0.8)
+ bgl.glBegin(bgl.GL_LINE_STRIP)
+ bgl.glVertex3f(p1[0], p1[1], p1[2])
+ bgl.glVertex3f(p1[0], p1[1], p2[2])
+ bgl.glEnd()
+
+ # Dist
+ width = 2
+ bgl.glLineWidth(width)
+ bgl.glColor4f(color[0], color[1], color[2], color[3])
+ bgl.glBegin(bgl.GL_LINE_STRIP)
+ bgl.glVertex3f(p1[0], p1[1], p1[2])
+ bgl.glVertex3f(p2[0], p2[1], p2[2])
+ bgl.glEnd()
+
+ # ---
+ # Restore previous OpenGL settings
+ bgl.glLoadIdentity()
+ bgl.glMatrixMode(MatrixMode_prev)
+ bgl.glLoadMatrixf(ProjMatrix_prev)
+ bgl.glLineWidth(lineWidth_prev)
+ if not blend_prev:
+ bgl.glDisable(bgl.GL_BLEND)
+ if not line_stipple_prev:
+ bgl.glDisable(bgl.GL_LINE_STIPPLE)
+ bgl.glColor4f(color_prev[0],
+ color_prev[1],
+ color_prev[2],
+ color_prev[3])
+
+ # ---
+ # Draw (2D) text
+ # We do this after drawing the lines so
+ # we can draw it OVER the line.
+ coord_2d = region3d_get_2d_coordinates(context, p2 + (p1 - p2) * 0.5)
+ OFFSET_LINE = 10 # Offset the text a bit to the right.
+ OFFSET_Y = 15 # Offset of the lines.
+ OFFSET_VALUE = 30 # Offset of value(s) from the text.
+ dist = (p1 - p2).length
+
+ # Write distance value into the scene property,
+ # so we can display it in the panel & refresh the panel.
+ if hasattr(sce, "measure_panel_dist"):
+ sce.measure_panel_dist = dist
+ context.area.tag_redraw()
+
+ texts = [("Dist:", round(dist, PRECISION)),
+ ("X:", round(abs(p1[0] - p2[0]), PRECISION)),
+ ("Y:", round(abs(p1[1] - p2[1]), PRECISION)),
+ ("Z:", round(abs(p1[2] - p2[2]), PRECISION))]
+
+ # Draw all texts
+ # @todo Get user pref for text color in 3D View
+ bgl.glColor4f(1.0, 1.0, 1.0, 1.0)
+ blf.size(0, 12, 72) # Prevent font size to randomly change.
+
+ loc_x = coord_2d[0] + OFFSET_LINE
+ loc_y = coord_2d[1]
+ for t in texts:
+ text = t[0]
+ value = str(t[1]) + " BU"
+
+ blf.position(0, loc_x, loc_y, 0)
+ blf.draw(0, text)
+ blf.position(0, loc_x + OFFSET_VALUE, loc_y, 0)
+ blf.draw(0, value)
+
+ loc_y -= OFFSET_Y
+
+ # Handle mesh surface area calulations
+ if (sce.measure_panel_calc_area):
+ # Get a single selected object (or nothing).
+ obj = getSingleObject(context)
+
+ if (context.mode == 'EDIT_MESH'):
+ obj = context.active_object
+
+ if (obj and obj.type == 'MESH' and obj.data):
+ # "Note: a Mesh will return the selection state of the mesh
+ # when EditMode was last exited. A Python script operating
+ # in EditMode must exit EditMode before getting the current
+ # selection state of the mesh."
+ # http://www.blender.org/documentation/249PythonDoc/
+ # /Mesh.MVert-class.html#sel
+ # We can only provide this by existing & re-entering EditMode.
+ # @todo: Better way to do this?
+
+ # Get mesh data from Object.
+ mesh = obj.data
+
+ # Get transformation matrix from object.
+ ob_mat = obj.matrix_world
+ # Also make an inversed copy! of the matrix.
+ ob_mat_inv = ob_mat.copy()
+ Matrix.invert(ob_mat_inv)
+
+ # Get the selected vertices.
+ # @todo: Better (more efficient) way to do this?
+ verts_selected = [v for v in mesh.vertices if v.select == 1]
+
+ if len(verts_selected) >= 3:
+ # Get selected faces
+ # @todo: Better (more efficient) way to do this?
+ faces_selected = [f for f in mesh.faces
+ if f.select == 1]
+
+ if len(faces_selected) > 0:
+ area = objectSurfaceArea(obj, True,
+ measureGlobal(sce))
+ if (area >= 0):
+ sce.measure_panel_area1 = area
+
+ elif (context.mode == 'OBJECT'):
+ # We are working in object mode.
+
+ if len(context.selected_objects) > 2:
+ return
+# @todo Make this work again.
+# # We have more that 2 objects selected...
+#
+# mesh_objects = [o for o in context.selected_objects
+# if (o.type == 'MESH')]
+
+# if (len(mesh_objects) > 0):
+# # ... and at least one of them is a mesh.
+#
+# for o in mesh_objects:
+# area = objectSurfaceArea(o, False,
+# measureGlobal(sce))
+# if (area >= 0):
+# #row.label(text=o.name, icon='OBJECT_DATA')
+# #row.label(text=str(round(area, PRECISION))
+# # + " BU^2")
+
+ elif len(context.selected_objects) == 2:
+ # 2 objects selected.
+
+ obj1, obj2 = context.selected_objects
+
+ # Calculate surface area of the objects.
+ area1 = objectSurfaceArea(obj1, False, measureGlobal(sce))
+ area2 = objectSurfaceArea(obj2, False, measureGlobal(sce))
+ sce.measure_panel_area1 = area1
+ sce.measure_panel_area2 = area2
+
+ elif (obj):
+ # One object selected.
+
+ # Calculate surface area of the object.
+ area = objectSurfaceArea(obj, False, measureGlobal(sce))
+ if (area >= 0):
+ sce.measure_panel_area1 = area
+
+
+class VIEW3D_OT_display_measurements(bpy.types.Operator):
+ '''Display the measurements made in the 'Measure' panel'''
+ bl_idname = "view3d.display_measurements"
+ bl_label = "Display the measurements made in the" \
+ " 'Measure' panel in the 3D View."
+ bl_options = {'REGISTER'}
+
+ def modal(self, context, event):
+ context.area.tag_redraw()
+
+ return {'FINISHED'}
+
+ def execute(self, context):
+ if context.area.type == 'VIEW_3D':
+ mgr_ops = context.window_manager.operators.values()
+ if not self.bl_idname in [op.bl_idname for op in mgr_ops]:
+ # Add the region OpenGL drawing callback
+ for WINregion in context.area.regions:
+ if WINregion.type == 'WINDOW':
+ context.window_manager.modal_handler_add(self)
+ self._handle = WINregion.callback_add(
+ draw_measurements_callback,
+ (self, context),
+ 'POST_PIXEL')
+
+ print("Measure panel display callback added")
+
+ return {'RUNNING_MODAL'}
+
+ return {'CANCELLED'}
+
+ else:
+ self.report({'WARNING'}, "View3D not found, cannot run operator")
+ return {'CANCELLED'}
+
+
+class VIEW3D_OT_activate_measure_panel(bpy.types.Operator):
+ bl_label = "Activate"
+ bl_idname = "view3d.activate_measure_panel"
+ bl_description = "Activate the callback needed to draw the lines."
+ bl_options = {'REGISTER'}
+
+ def invoke(self, context, event):
+
+ # Execute operator (this adds the callback)
+ # if it wasn't done yet.
+ bpy.ops.view3d.display_measurements()
+ return {'FINISHED'}
+
+
+class VIEW3D_OT_reenter_editmode(bpy.types.Operator):
+ bl_label = "Re-enter EditMode"
+ bl_idname = "view3d.reenter_editmode"
+ bl_description = "Update mesh data of an active mesh object." \
+ " This is done by exiting and re-entering mesh edit mode."
+ bl_options = {'REGISTER'}
+
+ def invoke(self, context, event):
+
+ # Get the active object.
+ obj = context.active_object
+
+ if (obj and obj.type == 'MESH' and context.mode == 'EDIT_MESH'):
+ # Exit and re-enter mesh EditMode.
+ bpy.ops.object.mode_set(mode='OBJECT')
+ bpy.ops.object.mode_set(mode='EDIT')
+ return {'FINISHED'}
+
+ return {'CANCELLED'}
+
+
+class VIEW3D_PT_measure(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'UI'
+ bl_label = "Measure"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ @classmethod
+ def poll(cls, context):
+ # Only display this panel in the object and edit mode 3D view.
+ if (context.area.type == 'VIEW_3D' and
+ (context.mode == 'EDIT_MESH'
+ or context.mode == 'OBJECT')):
+ return 1
+
+ return 0
+
+ def draw_header(self, context):
+ layout = self.layout
+ sce = context.scene
+
+ # Force a redraw.
+ # This prevents the lines still be drawn after
+ # disabling the "measure_panel_draw" checkbox.
+ # @todo Better solution?
+ context.area.tag_redraw()
+
+ mgr_ops = context.window_manager.operators.values()
+ if (not "VIEW3D_OT_display_measurements"
+ in [op.bl_idname for op in mgr_ops]):
+ layout.operator("view3d.activate_measure_panel",
+ text="Activate")
+ else:
+ layout.prop(sce, "measure_panel_draw")
+
+ context.area.tag_redraw()
+
+ def draw(self, context):
+ layout = self.layout
+ sce = context.scene
+
+ # Get a single selected object (or nothing).
+ obj = getSingleObject(context)
+
+ if (context.mode == 'EDIT_MESH'):
+ obj = context.active_object
+
+ if (obj and obj.type == 'MESH' and obj.data):
+ # "Note: a Mesh will return the selection state of the mesh
+ # when EditMode was last exited. A Python script operating
+ # in EditMode must exit EditMode before getting the current
+ # selection state of the mesh."
+ # http://www.blender.org/documentation/249PythonDoc/
+ # /Mesh.MVert-class.html#sel
+ # We can only provide this by existing & re-entering EditMode.
+ # @todo: Better way to do this?
+
+ # Get mesh data from Object.
+ mesh = obj.data
+
+ # Get transformation matrix from object.
+ ob_mat = obj.matrix_world
+ # Also make an inversed copy! of the matrix.
+ ob_mat_inv = ob_mat.copy()
+ Matrix.invert(ob_mat_inv)
+
+ # Get the selected vertices.
+ # @todo: Better (more efficient) way to do this?
+ verts_selected = [v for v in mesh.vertices if v.select == 1]
+
+ if len(verts_selected) == 0:
+ # Nothing selected.
+ # We measure the distance from...
+ # local ... the object center to the 3D cursor.
+ # global ... the origin to the 3D cursor.
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_dist")
+
+ row = layout.row()
+ row.label(text="", icon='CURSOR')
+ row.label(text="", icon='ARROW_LEFTRIGHT')
+ if measureLocal(sce):
+ row.label(text="Obj. Center")
+ else:
+ row.label(text="Origin [0,0,0]")
+
+ row = layout.row()
+ row.operator("view3d.reenter_editmode",
+ text="Update selection & distance")
+# @todo
+# description="The surface area value can" \
+# " not be updated in mesh edit mode" \
+# " automatically. Press this button" \
+# " to do this manually, after you changed" \
+# " the selection.")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ elif len(verts_selected) == 1:
+ # One vertex selected.
+ # We measure the distance from the
+ # selected vertex object to the 3D cursor.
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_dist")
+
+ row = layout.row()
+ row.label(text="", icon='CURSOR')
+ row.label(text="", icon='ARROW_LEFTRIGHT')
+ row.label(text="", icon='VERTEXSEL')
+
+ row = layout.row()
+ row.operator("view3d.reenter_editmode",
+ text="Update selection & distance")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ elif len(verts_selected) == 2:
+ # Two vertices selected.
+ # We measure the distance between the
+ # two selected vertices.
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_dist")
+
+ row = layout.row()
+ row.label(text="", icon='VERTEXSEL')
+ row.label(text="", icon='ARROW_LEFTRIGHT')
+ row.label(text="", icon='VERTEXSEL')
+
+ row = layout.row()
+ row.operator("view3d.reenter_editmode",
+ text="Update selection & distance")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ else:
+ row = layout.row()
+ row.prop(sce, "measure_panel_calc_area",
+ text="Surface area (selected faces):")
+
+ if (sce.measure_panel_calc_area):
+ # Get selected faces
+ # @todo: Better (more efficient) way to do this?
+ faces_selected = [f for f in mesh.faces
+ if f.select == 1]
+
+ if len(faces_selected) > 0:
+ if (sce.measure_panel_area1 >= 0):
+ row = layout.row()
+ row.label(
+ text=str(len(faces_selected)),
+ icon='FACESEL')
+ row.prop(sce, "measure_panel_area1")
+
+ row = layout.row()
+ row.operator("view3d.reenter_editmode",
+ text="Update selection & area")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ else:
+ row = layout.row()
+ row.label(text="Selection not supported.",
+ icon='INFO')
+
+ row = layout.row()
+ row.operator("view3d.reenter_editmode",
+ text="Update selection")
+
+ else:
+ row = layout.row()
+ row.operator("view3d.reenter_editmode",
+ text="Update selection")
+
+ elif (context.mode == 'OBJECT'):
+ # We are working in object mode.
+
+ if len(context.selected_objects) > 2:
+ # We have more that 2 objects selected...
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_calc_area",
+ text="Surface area (selected faces):")
+
+ if (sce.measure_panel_calc_area):
+ mesh_objects = [o for o in context.selected_objects
+ if (o.type == 'MESH')]
+
+ if (len(mesh_objects) > 0):
+ # ... and at least one of them is a mesh.
+
+ # Calculate and display surface area of the objects.
+ # @todo: Convert to scene units! We do not have a
+ # FloatProperty field here for automatic conversion.
+
+ row = layout.row()
+ row.label(text="Multiple objects not yet supported",
+ icon='INFO')
+ row = layout.row()
+ row.label(text="(= More than two meshes)",
+ icon='INFO')
+# @todo Make this work again.
+# for o in mesh_objects:
+# area = objectSurfaceArea(o, False,
+# measureGlobal(sce))
+# if (area >= 0):
+# row = layout.row()
+# row.label(text=o.name, icon='OBJECT_DATA')
+# row.label(text=str(round(area, PRECISION))
+# + " BU^2")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ elif len(context.selected_objects) == 2:
+ # 2 objects selected.
+ # We measure the distance between the 2 selected objects.
+
+ obj1, obj2 = context.selected_objects
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_dist")
+
+ row = layout.row()
+ row.label(text="", icon='OBJECT_DATA')
+ row.prop(obj1, "name", text="")
+
+ row.label(text="", icon='ARROW_LEFTRIGHT')
+
+ row.label(text="", icon='OBJECT_DATA')
+ row.prop(obj2, "name", text="")
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_calc_area",
+ text="Surface area:")
+
+ if (sce.measure_panel_calc_area):
+ # Display surface area of the objects.
+ if (sce.measure_panel_area1 >= 0
+ or sce.measure_panel_area2 >= 0):
+ if (sce.measure_panel_area1 >= 0):
+ row = layout.row()
+ row.label(text=obj1.name, icon='OBJECT_DATA')
+ row.prop(sce, "measure_panel_area1")
+
+ if (sce.measure_panel_area2 >= 0):
+ row = layout.row()
+ row.label(text=obj2.name, icon='OBJECT_DATA')
+ row.prop(sce, "measure_panel_area2")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ elif (obj):
+ # One object selected.
+ # We measure the distance from the object to the 3D cursor.
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_dist")
+
+ row = layout.row()
+ row.label(text="", icon='CURSOR')
+
+ row.label(text="", icon='ARROW_LEFTRIGHT')
+
+ row.label(text="", icon='OBJECT_DATA')
+ row.prop(obj, "name", text="")
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_calc_area",
+ text="Surface area:")
+
+ if (sce.measure_panel_calc_area):
+ # Display surface area of the object.
+
+ if (sce.measure_panel_area1 >= 0):
+ row = layout.row()
+ row.label(text=obj.name, icon='OBJECT_DATA')
+ row.prop(sce, "measure_panel_area1")
+
+ row = layout.row()
+ row.prop(sce,
+ "measure_panel_transform",
+ expand=True)
+
+ elif not context.selected_objects:
+ # Nothing selected.
+ # We measure the distance from the origin to the 3D cursor.
+
+ row = layout.row()
+ row.prop(sce, "measure_panel_dist")
+
+ row = layout.row()
+ row.label(text="", icon='CURSOR')
+ row.label(text="", icon='ARROW_LEFTRIGHT')
+ row.label(text="Origin [0,0,0]")
+
+ else:
+ row = layout.row()
+ row.label(text="Selection not supported.",
+ icon='INFO')
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ # Define a temporary attribute for the distance value
+ bpy.types.Scene.measure_panel_dist = bpy.props.FloatProperty(
+ name="Distance",
+ precision=PRECISION,
+ unit="LENGTH")
+ bpy.types.Scene.measure_panel_area1 = bpy.props.FloatProperty(
+ precision=PRECISION,
+ unit="AREA")
+ bpy.types.Scene.measure_panel_area2 = bpy.props.FloatProperty(
+ precision=PRECISION,
+ unit="AREA")
+
+ TRANSFORM = [
+ ("measure_global", "Global",
+ "Calculate values in global space."),
+ ("measure_local", "Local",
+ "Calculate values inside the local object space.")]
+
+ # Define dropdown for the global/local setting
+ bpy.types.Scene.measure_panel_transform = bpy.props.EnumProperty(
+ name="Space",
+ description="Choose in which space you want to measure.",
+ items=TRANSFORM,
+ default='measure_global')
+
+ # Define property for the draw setting.
+ bpy.types.Scene.measure_panel_draw = bpy.props.BoolProperty(
+ description="Draw distances in 3D View",
+ default=1)
+
+ # Define property for the calc-area setting.
+ # @todo prevent double calculations for each refresh automatically?
+ bpy.types.Scene.measure_panel_calc_area = bpy.props.BoolProperty(
+ description="Calculate mesh surface area (heavy CPU" \
+ " usage on bigger meshes)",
+ default=0)
+
+ pass
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/space_view3d_spacebar_menu.py b/space_view3d_spacebar_menu.py
new file mode 100644
index 00000000..3a0b4a8f
--- /dev/null
+++ b/space_view3d_spacebar_menu.py
@@ -0,0 +1,1528 @@
+#3d_cursor_menu.py (c) 2011 Jonathan Smith (JayDez)
+#
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+bl_info = {
+ "name": "Dynamic Spacebar Menu",
+ "author": "JayDez, sim88, meta-androcto, sam",
+ "version": (1, 7, 2),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "View3D > Spacebar Key",
+ "description": "Context Sensitive Spacebar Menu",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/3D_interaction/Dynamic_Spacebar_Menu",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22060",
+ "category": "3D View"}
+
+"""
+Dynamic Menu
+This adds a the Dynamic Spacebar Menu in the View3D.
+
+Usage:
+* This script gives a basic menu with common simple tools for easy access.
+* Very similar to the Spacebar menu in 2.49
+* Context sensitive for Object, Edit, Sculpt, Pose, Weight/Texture/Vertex
+ Paint modes.
+* Object sensitive based on object selected in edit mode.
+
+Version history:
+v1.7.3 - (JayDez) - Wrong operator names for deleting in editmode
+v1.7.2 - (JayDez) - Adding proportional editing menu to where it was missing
+v1.7.1 - (JayDez) - Fixing up lattice menu and a wrong operator in curve menu
+v1.7 - (JayDez) - Fixing up animation menu and Metaball Add Menu
+v1.6.1 - (JayDez) - Added Add Menu to Curve and Surface (respectively)
+v1.6 - (JayDez) - Fixed a couple wrong names. (Thanks Bao2 and Dennis)
+v1.5.1 - (JayDez) - Changing formatting to be more uniform.
+v1.5 - (meta-androcto) - adding context sensitive menus.
+v1.3 - (JayDez) - Changed toggle editmode to an if statement, so that
+ if you are in editmode it will show change to object mode but
+ otherwise it shows change to edit mode. Also added separate icons
+ for change to edit mode and to object mode.
+v1.2 - (JayDez) - Editing docs, changing 3D cursor to dynamic menu,
+ reorganizing menu.
+v1.1 - (meta-androcto) - added editmode menu
+v1.0 - (meta-androcto) - initial final revision (commited to contrib)
+v0.1 through 0.9 - various tests/contributions by various people and scripts
+ Devs: JayDez, Crouch, sim88, meta-androcto, Sam
+ Scripts: 3D Cursor Menu, Original Dynamic Menu
+"""
+
+import bpy
+from bpy import *
+from mathutils import Vector, Matrix
+import math
+
+# Dynamic Menu
+class VIEW3D_MT_Space_Dynamic_Menu(bpy.types.Menu):
+ bl_label = "Dynamic Spacebar Menu"
+
+ def draw(self, context):
+ layout = self.layout
+ settings = context.tool_settings
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ ob = context
+ if ob.mode == 'OBJECT':
+ # Object mode
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Add Menu block
+ layout.menu("VIEW3D_MT_AddMenu", icon='OBJECT_DATAMODE')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor Block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Parent block
+ layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
+
+ # Group block
+ layout.menu("VIEW3D_MT_GroupMenu", icon='GROUP')
+ layout.separator()
+
+ # Modifier block
+ layout.operator_menu_enum("object.modifier_add", "type",
+ icon='MODIFIER')
+ layout.separator()
+
+ # Align block
+ layout.menu("VIEW3D_MT_AlignMenu", icon='ALIGN')
+ layout.separator()
+
+ # Select block
+ layout.menu("VIEW3D_MT_SelectMenu", icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ #TODO: Add if statement to test whether editmode switch needs to
+ #be added to the menu, since certain object can't enter edit mode
+ #In which case we don't need the toggle
+ # Toggle Editmode
+ layout.operator("object.editmode_toggle", text="Enter Edit Mode",
+ icon='EDITMODE_HLT')
+
+ # Delete block
+ layout.operator("object.delete", text="Delete Object",
+ icon='CANCEL')
+
+
+ elif ob.mode == 'EDIT_MESH':
+ # Edit mode
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Add block
+ layout.menu("INFO_MT_mesh_add", text="Add Mesh",
+ icon='OUTLINER_OB_MESH')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_EditCursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon="PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon="SMOOTHCURVE")
+ layout.separator()
+
+ # Edit block
+ layout.menu("VIEW3D_MT_edit_TK", icon='EDITMODE_HLT')
+ layout.separator()
+
+ # Multi Select
+ layout.menu("VIEW3D_MT_edit_multi", icon='VERTEXSEL')
+ layout.separator()
+
+ # Extrude block
+ layout.menu("VIEW3D_MT_edit_mesh_extrude", icon='EDIT_VEC')
+ layout.separator()
+
+ # Tools block
+ layout.menu("VIEW3D_MT_edit_mesh_specials", icon='MODIFIER')
+ layout.menu("VIEW3D_MT_uv_map", icon='MOD_UVPROJECT')
+
+ # Select block
+ layout.menu("VIEW3D_MT_SelectEditMenu",
+ icon='RESTRICT_SELECT_OFF')
+ layout.separator()
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Object Mode
+ layout.operator("object.editmode_toggle",
+ text="Enter Object Mode", icon='OBJECT_DATAMODE')
+
+ # Delete Block
+ layout.operator("mesh.delete", icon='CANCEL')
+
+ if ob.mode == 'EDIT_CURVE':
+ # Curve menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Add block
+ layout.menu("INFO_MT_curve_add", text="Add Curve",
+ icon='OUTLINER_OB_CURVE')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon="PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon="SMOOTHCURVE")
+ layout.separator()
+
+ # Edit Control Points
+ layout.menu("VIEW3D_MT_EditCurveCtrlpoints",
+ icon='CURVE_BEZCURVE')
+ layout.separator()
+
+ # Edit Curve Specials
+ layout.menu("VIEW3D_MT_EditCurveSpecials",
+ icon='MODIFIER')
+ layout.separator()
+
+ # Select Curve Block
+ #Could use: VIEW3D_MT_select_edit_curve
+ #Which is the default, instead of a hand written one, left it alone
+ #for now though.
+ layout.menu("VIEW3D_MT_SelectCurveMenu",
+ icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.editmode_toggle", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ # Delete block
+ layout.operator("curve.delete", text="Delete Object",
+ icon='CANCEL')
+
+ if ob.mode == 'EDIT_SURFACE':
+ # Surface menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Add block
+ layout.menu("INFO_MT_surface_add", text="Add Surface",
+ icon='OUTLINER_OB_SURFACE')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon="PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon="SMOOTHCURVE")
+ layout.separator()
+
+ # Edit Curve Specials
+ layout.menu("VIEW3D_MT_EditCurveSpecials",
+ icon='MODIFIER')
+ layout.separator()
+
+ # Select Surface
+ layout.menu("VIEW3D_MT_SelectSurface", icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.editmode_toggle", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ # Delete block
+ layout.operator("curve.delete", text="Delete Object",
+ icon='CANCEL')
+
+ if ob.mode == 'EDIT_METABALL':
+ # Metaball menu
+
+ #Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Add block
+ #layout.menu("INFO_MT_metaball_add", text="Add Metaball",
+ # icon='OUTLINER_OB_META')
+ layout.operator_menu_enum("object.metaball_add", "type",
+ text="Add Metaball", icon='OUTLINER_OB_META')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon="PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon="SMOOTHCURVE")
+ layout.separator()
+
+ #Select Metaball
+ layout.menu("VIEW3D_MT_SelectMetaball", icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.editmode_toggle", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ # Delete block
+ layout.operator("mball.delete_metaelems", text="Delete Object",
+ icon='CANCEL')
+
+ elif ob.mode == 'EDIT_LATTICE':
+ # Lattice menu
+
+ #Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon= "PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon= "SMOOTHCURVE")
+ layout.separator()
+
+ layout.operator("lattice.make_regular")
+ layout.separator()
+
+ #Select Lattice
+ layout.menu("VIEW3D_MT_select_edit_lattice",
+ icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.editmode_toggle", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ # Delete block - Can't delete any lattice stuff so not needed
+ #layout.operator("object.delete", text="Delete Object",
+ # icon='CANCEL')
+
+ if context.mode == 'PARTICLE':
+ # Particle menu
+
+ #Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon= "PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon= "SMOOTHCURVE")
+ layout.separator()
+
+ # Particle block
+ layout.menu("VIEW3D_MT_particle", icon='PARTICLEMODE')
+ layout.separator()
+
+ #Select Particle
+ layout.menu("VIEW3D_MT_select_particle",
+ icon='RESTRICT_SELECT_OFF')
+
+ # History/Cursor Block
+ layout.menu("VIEW3D_MT_undoS", icon='ARROW_LEFTRIGHT')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.mode_set", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ # Delete block
+ layout.operator("object.delete", text="Delete Object",
+ icon='CANCEL')
+
+ ob = context
+ if ob.mode == 'PAINT_WEIGHT':
+ # Weight paint menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Weight Paint block
+ layout.menu("VIEW3D_MT_paint_weight", icon='WPAINT_HLT')
+ layout.separator()
+
+ # History/Cursor Block
+ layout.menu("VIEW3D_MT_undoS", icon='ARROW_LEFTRIGHT')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.mode_set", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ elif ob.mode == 'PAINT_VERTEX':
+ # Vertex paint menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Vertex Paint block
+ layout.operator("paint.vertex_color_set", icon='VPAINT_HLT')
+ layout.separator()
+
+ # History/Cursor Block
+ layout.menu("VIEW3D_MT_undoS", icon='ARROW_LEFTRIGHT')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.mode_set", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ elif ob.mode == 'PAINT_TEXTURE':
+ # Texture paint menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # History/Cursor Block
+ layout.menu("VIEW3D_MT_undoS", icon='ARROW_LEFTRIGHT')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.mode_set", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ elif ob.mode == 'SCULPT':
+ # Sculpt menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Sculpt block
+ layout.menu("VIEW3D_MT_sculpt", icon='SCULPTMODE_HLT')
+ layout.separator()
+
+ # History/Cursor Block
+ layout.menu("VIEW3D_MT_undoS", icon='ARROW_LEFTRIGHT')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Objectmode
+ layout.operator("object.mode_set", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ elif ob.mode == 'EDIT_ARMATURE':
+ # Armature menu
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform block
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Mirror block
+ layout.menu("VIEW3D_MT_MirrorMenu", icon='MOD_MIRROR')
+
+ # Cursor block
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Proportional block
+ layout.prop_menu_enum(settings, "proportional_edit",
+ icon="PROP_CON")
+ layout.prop_menu_enum(settings, "proportional_edit_falloff",
+ icon="SMOOTHCURVE")
+ layout.separator()
+
+ # Edit Armature roll
+ layout.menu("VIEW3D_MT_edit_armature_roll",
+ icon='BONE_DATA')
+ layout.separator()
+
+ # Edit Armature Toolkit
+ layout.menu("VIEW3D_MT_EditArmatureTK",
+ icon='ARMATURE_DATA')
+ layout.separator()
+
+ # Edit Armature Name
+ layout.menu("VIEW3D_MT_ArmatureName",
+ icon='NEW')
+ layout.separator()
+
+ # Parent block
+ layout.menu("VIEW3D_MT_ParentMenu", icon='ROTACTIVE')
+
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_bone_options_toggle",
+ text="Bone Settings")
+
+ # Edit Armature Specials
+ layout.menu("VIEW3D_MT_armature_specials", icon='MODIFIER')
+ layout.separator()
+
+ # Edit Armature Select
+ layout.menu("VIEW3D_MT_SelectArmatureMenu",
+ icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Posemode
+ layout.operator("object.posemode_toggle", text="Enter Pose Mode",
+ icon='POSE_HLT')
+
+ # Toggle Posemode
+ layout.operator("object.editmode_toggle", text="Enter Object Mode",
+ icon='OBJECT_DATA')
+
+ # Delete block
+ layout.operator("armature.delete", text="Delete Object",
+ icon='CANCEL')
+
+
+ if context.mode == 'POSE':
+ # Pose mode menu
+ arm = context.active_object.data
+
+ # Search Menu
+ layout.operator("wm.search_menu", text="Search", icon='VIEWZOOM')
+ layout.separator()
+
+ # Transform Menu
+ layout.menu("VIEW3D_MT_TransformMenu", icon='MANIPUL')
+
+ # Clear Transform
+ layout.menu("VIEW3D_MT_pose_transform")
+
+ # Cursor Menu
+ layout.menu("VIEW3D_MT_CursorMenu", icon='CURSOR')
+ layout.separator()
+
+ # Pose Copy Block
+ layout.menu("VIEW3D_MT_PoseCopy", icon='FILE')
+ layout.separator()
+
+
+ if arm.draw_type in ('BBONE', 'ENVELOPE'):
+ layout.operator("transform.transform",
+ text="Scale Envelope Distance").mode = 'BONE_SIZE'
+
+ layout.menu("VIEW3D_MT_pose_apply")
+ layout.separator()
+
+ layout.operator("pose.relax")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_KeyframeMenu")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_pose_pose")
+ layout.menu("VIEW3D_MT_pose_motion")
+ layout.menu("VIEW3D_MT_pose_group")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_pose_ik")
+ layout.menu("VIEW3D_MT_PoseNames")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_pose_constraints")
+ layout.separator()
+
+ layout.operator("pose.quaternions_flip")
+ layout.separator()
+
+ layout.operator_context = 'INVOKE_AREA'
+ layout.operator("pose.armature_layers",
+ text="Change Armature Layers...")
+ layout.operator("pose.bone_layers", text="Change Bone Layers...")
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_pose_showhide")
+ layout.menu("VIEW3D_MT_bone_options_toggle",
+ text="Bone Settings")
+ layout.separator()
+
+ # Select Pose Block
+ layout.menu("VIEW3D_MT_SelectPoseMenu", icon='RESTRICT_SELECT_OFF')
+
+ # Toolshelf block
+ layout.operator("view3d.toolshelf", icon='MENU_PANEL')
+ layout.separator()
+
+ # Properties block
+ layout.operator("view3d.properties", icon='MENU_PANEL')
+ layout.separator()
+
+ # Toggle Editmode
+ layout.operator("object.editmode_toggle", text="Enter Edit Mode",
+ icon='EDITMODE_HLT')
+
+ layout.operator("object.mode_set", text="Enter Object Mode",
+ icon='OBJECT_DATA').mode='OBJECT'
+
+
+class VIEW3D_MT_AddMenu(bpy.types.Menu):
+ bl_label = "Add Object Menu"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.menu("INFO_MT_mesh_add", text="Add Mesh",
+ icon='OUTLINER_OB_MESH')
+ layout.menu("INFO_MT_curve_add", text="Add Curve",
+ icon='OUTLINER_OB_CURVE')
+ layout.menu("INFO_MT_surface_add", text="Add Surface",
+ icon='OUTLINER_OB_SURFACE')
+ layout.operator_menu_enum("object.metaball_add", "type",
+ icon='OUTLINER_OB_META')
+ layout.operator("object.text_add", text="Add Text",
+ icon='OUTLINER_OB_FONT')
+ layout.separator()
+ layout.menu("INFO_MT_armature_add", text="Add Armature",
+ icon='OUTLINER_OB_ARMATURE')
+ layout.operator("object.add", text="Lattice",
+ icon='OUTLINER_OB_LATTICE').type = 'LATTICE'
+ layout.separator()
+ layout.operator("object.add", text="Add Empty",
+ icon='OUTLINER_OB_EMPTY')
+ layout.separator()
+
+ layout.operator("object.camera_add", text="Camera",
+ icon='OUTLINER_OB_CAMERA')
+ layout.operator_menu_enum("object.lamp_add", "type",
+ icon="OUTLINER_OB_LAMP")
+ layout.separator()
+
+ layout.operator_menu_enum("object.effector_add", "type",
+ text="Force Field",
+ icon='OUTLINER_OB_EMPTY')
+ layout.operator_menu_enum("object.group_instance_add", "group",
+ text="Group Instance",
+ icon='OUTLINER_OB_EMPTY')
+
+
+class VIEW3D_MT_TransformMenu(bpy.types.Menu):
+ bl_label = "Transform Menu"
+
+ # TODO: get rid of the custom text strings?
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("transform.translate", text="Grab/Move")
+ # TODO: sub-menu for grab per axis
+ layout.operator("transform.rotate", text="Rotate")
+ # TODO: sub-menu for rot per axis
+ layout.operator("transform.resize", text="Scale")
+ # TODO: sub-menu for scale per axis
+ layout.separator()
+
+ layout.operator("transform.tosphere", text="To Sphere")
+ layout.operator("transform.shear", text="Shear")
+ layout.operator("transform.warp", text="Warp")
+ layout.operator("transform.push_pull", text="Push/Pull")
+ if context.edit_object and context.edit_object.type == 'ARMATURE':
+ layout.operator("armature.align")
+ else:
+ layout.operator_context = 'EXEC_REGION_WIN'
+ # @todo vvv See alignmenu() in edit.c of b2.4x to get this working.
+ layout.operator("transform.transform",
+ text="Align to Transform Orientation").mode = 'ALIGN'
+ layout.separator()
+
+ layout.operator_context = 'EXEC_AREA'
+
+ layout.operator("object.origin_set",
+ text="Geometry to Origin").type = 'GEOMETRY_ORIGIN'
+ layout.operator("object.origin_set",
+ text="Origin to Geometry").type = 'ORIGIN_GEOMETRY'
+ layout.operator("object.origin_set",
+ text="Origin to 3D Cursor").type = 'ORIGIN_CURSOR'
+
+
+class VIEW3D_MT_MirrorMenu(bpy.types.Menu):
+ bl_label = "Mirror Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("transform.mirror", text="Interactive Mirror")
+ layout.separator()
+
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ props = layout.operator("transform.mirror", text="X Global")
+ props.constraint_axis = (True, False, False)
+ props.constraint_orientation = 'GLOBAL'
+
+ props = layout.operator("transform.mirror", text="Y Global")
+ props.constraint_axis = (False, True, False)
+ props.constraint_orientation = 'GLOBAL'
+
+ props = layout.operator("transform.mirror", text="Z Global")
+ props.constraint_axis = (False, False, True)
+ props.constraint_orientation = 'GLOBAL'
+
+ if context.edit_object:
+
+ layout.separator()
+
+ props = layout.operator("transform.mirror", text="X Local")
+ props.constraint_axis = (True, False, False)
+ props.constraint_orientation = 'LOCAL'
+ props = layout.operator("transform.mirror", text="Y Local")
+ props.constraint_axis = (False, True, False)
+ props.constraint_orientation = 'LOCAL'
+ props = layout.operator("transform.mirror", text="Z Local")
+ props.constraint_axis = (False, False, True)
+ props.constraint_orientation = 'LOCAL'
+
+ layout.operator("object.vertex_group_mirror")
+
+class VIEW3D_MT_ParentMenu(bpy.types.Menu):
+ bl_label = "Parent Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("object.parent_set", text="Set")
+ layout.operator("object.parent_clear", text="Clear")
+
+class VIEW3D_MT_GroupMenu(bpy.types.Menu):
+ bl_label = "Group Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("group.create")
+ layout.operator("group.objects_remove")
+ layout.separator()
+
+ layout.operator("group.objects_add_active")
+ layout.operator("group.objects_remove_active")
+
+class VIEW3D_MT_AlignMenu(bpy.types.Menu):
+ bl_label = "Align Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.menu("VIEW3D_MT_view_align_selected")
+ layout.separator()
+
+ layout.operator("view3d.view_all",
+ text="Center Cursor and View All").center = True
+ layout.operator("view3d.camera_to_view",
+ text="Align Active Camera to View")
+ layout.operator("view3d.view_selected")
+ layout.operator("view3d.view_center_cursor")
+
+class VIEW3D_MT_SelectMenu(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.operator("view3d.select_border")
+ layout.operator("view3d.select_circle")
+ layout.separator()
+
+ layout.operator("object.select_all", text="Select/Deselect All")
+ layout.operator("object.select_inverse", text="Inverse")
+ layout.operator("object.select_random", text="Random")
+ layout.operator("object.select_mirror", text="Mirror")
+ layout.operator("object.select_by_layer", text="Select All by Layer")
+ layout.operator_menu_enum("object.select_by_type", "type",
+ text="Select All by Type...")
+ layout.operator("object.select_camera", text="Select Camera")
+ layout.separator()
+
+ layout.operator_menu_enum("object.select_grouped", "type",
+ text="Grouped")
+ layout.operator_menu_enum("object.select_linked", "type",
+ text="Linked")
+ layout.operator("object.select_pattern", text="Select Pattern...")
+
+class VIEW3D_MT_SelectEditMenu(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("view3d.select_border")
+ layout.operator("view3d.select_circle")
+ layout.separator()
+
+ layout.operator("mesh.select_all", text="Select/Deselect All")
+ layout.operator("mesh.select_inverse", text="Inverse")
+ layout.separator()
+
+ layout.operator("mesh.select_random", text="Random")
+ layout.operator("mesh.select_nth", text="Every N Number of Verts")
+ layout.operator("mesh.edges_select_sharp", text="Sharp Edges")
+ layout.operator("mesh.faces_select_linked_flat",
+ text="Linked Flat Faces")
+ layout.operator("mesh.faces_select_interior", text="Interior Faces")
+ layout.operator("mesh.select_axis", text="Side of Active")
+ layout.separator()
+
+ layout.operator("mesh.select_by_number_vertices",
+ text="Triangles").type = 'TRIANGLES'
+ layout.operator("mesh.select_by_number_vertices",
+ text="Quads").type = 'QUADS'
+ if context.scene.tool_settings.mesh_select_mode[2] == False:
+ layout.operator("mesh.select_non_manifold",
+ text="Non Manifold")
+ layout.operator("mesh.select_by_number_vertices",
+ text="Loose Verts/Edges").type = 'OTHER'
+ layout.operator("mesh.select_similar", text="Similar")
+ layout.separator()
+
+ layout.operator("mesh.select_less", text="Less")
+ layout.operator("mesh.select_more", text="More")
+ layout.separator()
+
+ layout.operator("mesh.select_mirror", text="Mirror")
+
+ layout.operator("mesh.select_linked", text="Linked")
+ layout.operator("mesh.select_vertex_path", text="Vertex Path")
+ layout.operator("mesh.loop_multi_select", text="Edge Loop")
+ layout.operator("mesh.loop_multi_select", text="Edge Ring").ring = True
+ layout.separator()
+
+ layout.operator("mesh.loop_to_region")
+ layout.operator("mesh.region_to_loop")
+
+class VIEW3D_MT_SelectCurveMenu(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("view3d.select_border")
+ layout.operator("view3d.select_circle")
+ layout.separator()
+
+ layout.operator("curve.select_all", text="Select/Deselect All")
+ layout.operator("curve.select_inverse")
+ layout.operator("curve.select_random")
+ layout.operator("curve.select_nth")
+ layout.separator()
+
+ layout.operator("curve.de_select_first")
+ layout.operator("curve.de_select_last")
+ layout.operator("curve.select_next")
+ layout.operator("curve.select_previous")
+ layout.separator()
+
+ layout.operator("curve.select_more")
+ layout.operator("curve.select_less")
+
+class VIEW3D_MT_SelectArmatureMenu(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("view3d.select_border")
+ layout.separator()
+
+ layout.operator("armature.select_all", text="Select/Deselect All")
+ layout.operator("armature.select_inverse", text="Inverse")
+ layout.separator()
+
+ layout.operator("armature.select_hierarchy",
+ text="Parent").direction = 'PARENT'
+ layout.operator("armature.select_hierarchy",
+ text="Child").direction = 'CHILD'
+ layout.separator()
+
+ props = layout.operator("armature.select_hierarchy",
+ text="Extend Parent")
+ props.extend = True
+ props.direction = 'PARENT'
+
+ props = layout.operator("armature.select_hierarchy",
+ text="Extend Child")
+ props.extend = True
+ props.direction = 'CHILD'
+
+ layout.operator("object.select_pattern", text="Select Pattern...")
+
+
+class VIEW3D_MT_SelectPoseMenu(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("view3d.select_border")
+ layout.separator()
+
+ layout.operator("pose.select_all", text="Select/Deselect All")
+ layout.operator("pose.select_inverse", text="Inverse")
+ layout.operator("pose.select_constraint_target",
+ text="Constraint Target")
+ layout.operator("pose.select_linked", text="Linked")
+ layout.separator()
+
+ layout.operator("pose.select_hierarchy",
+ text="Parent").direction = 'PARENT'
+ layout.operator("pose.select_hierarchy",
+ text="Child").direction = 'CHILD'
+ layout.separator()
+
+ props = layout.operator("pose.select_hierarchy", text="Extend Parent")
+ props.extend = True
+ props.direction = 'PARENT'
+
+ props = layout.operator("pose.select_hierarchy", text="Extend Child")
+ props.extend = True
+ props.direction = 'CHILD'
+ layout.separator()
+
+ layout.operator_menu_enum("pose.select_grouped", "type",
+ text="Grouped")
+ layout.operator("object.select_pattern", text="Select Pattern...")
+
+class VIEW3D_MT_PoseCopy(bpy.types.Menu):
+ bl_label = "Pose Copy"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("pose.copy")
+ layout.operator("pose.paste")
+ layout.operator("pose.paste",
+ text="Paste X-Flipped Pose").flipped = True
+ layout.separator()
+
+class VIEW3D_MT_PoseNames(bpy.types.Menu):
+ bl_label = "Pose Copy"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator_context = 'EXEC_AREA'
+ layout.operator("pose.autoside_names",
+ text="AutoName Left/Right").axis = 'XAXIS'
+ layout.operator("pose.autoside_names",
+ text="AutoName Front/Back").axis = 'YAXIS'
+ layout.operator("pose.autoside_names",
+ text="AutoName Top/Bottom").axis = 'ZAXIS'
+
+ layout.operator("pose.flip_names")
+
+
+class VIEW3D_MT_SelectSurface(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("view3d.select_border")
+ layout.operator("view3d.select_circle")
+
+ layout.separator()
+
+ layout.operator("curve.select_all", text="Select/Deselect All")
+ layout.operator("curve.select_inverse")
+ layout.operator("curve.select_random")
+ layout.operator("curve.select_nth")
+
+ layout.separator()
+
+ layout.operator("curve.select_row")
+
+ layout.separator()
+
+ layout.operator("curve.select_more")
+ layout.operator("curve.select_less")
+
+class VIEW3D_MT_SelectMetaball(bpy.types.Menu):
+ bl_label = "Select Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("view3d.select_border")
+
+ layout.separator()
+
+ layout.operator("mball.select_all").action = 'TOGGLE'
+ layout.operator("mball.select_inverse_metaelems")
+ layout.operator("mball.select_random_metaelems")
+
+class VIEW3D_MT_edit_TK(bpy.types.Menu):
+ bl_label = "Edit Mesh Tools"
+
+ def draw(self, context):
+ layout = self.layout
+ row = layout.row()
+
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.menu("VIEW3D_MT_edit_mesh_vertices", icon='VERTEXSEL')
+ layout.menu("VIEW3D_MT_edit_mesh_edges", icon='EDGESEL')
+ layout.menu("VIEW3D_MT_edit_mesh_faces", icon='FACESEL')
+ layout.separator()
+ layout.menu("VIEW3D_MT_edit_mesh_normals", icon='META_DATA')
+ layout.operator("mesh.loopcut_slide",
+ text="Loopcut", icon='EDIT_VEC')
+
+
+
+class VIEW3D_MT_edit_multi(bpy.types.Menu):
+ bl_label = "Multi Select"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.separator()
+ prop = layout.operator("wm.context_set_value", text="Vertex Select",
+ icon='VERTEXSEL')
+ prop.value = "(True, False, False)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+
+ prop = layout.operator("wm.context_set_value", text="Edge Select",
+ icon='EDGESEL')
+ prop.value = "(False, True, False)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+
+ prop = layout.operator("wm.context_set_value", text="Face Select",
+ icon='FACESEL')
+ prop.value = "(False, False, True)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+ layout.separator()
+
+ prop = layout.operator("wm.context_set_value",
+ text="Vertex & Edge Select", icon='EDITMODE_HLT')
+ prop.value = "(True, True, False)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+
+ prop = layout.operator("wm.context_set_value",
+ text="Vertex & Face Select", icon='ORTHO')
+ prop.value = "(True, False, True)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+
+ prop = layout.operator("wm.context_set_value",
+ text="Edge & Face Select", icon='SNAP_FACE')
+ prop.value = "(False, True, True)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+ layout.separator()
+
+ prop = layout.operator("wm.context_set_value",
+ text="Vertex & Edge & Face Select", icon='SNAP_VOLUME')
+ prop.value = "(True, True, True)"
+ prop.data_path = "tool_settings.mesh_select_mode"
+
+class VIEW3D_MT_editM_Edge(bpy.types.Menu):
+ bl_label = "Edges"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+
+ layout.operator("mesh.mark_seam")
+ layout.operator("mesh.mark_seam", text="Clear Seam").clear = True
+ layout.separator()
+
+ layout.operator("mesh.mark_sharp")
+ layout.operator("mesh.mark_sharp", text="Clear Sharp").clear = True
+ layout.operator("mesh.extrude_move_along_normals", text="Extrude")
+ layout.separator()
+
+ layout.operator("mesh.edge_rotate",
+ text="Rotate Edge CW").direction = 'CW'
+ layout.operator("mesh.edge_rotate",
+ text="Rotate Edge CCW").direction = 'CCW'
+ layout.separator()
+
+ layout.operator("TFM_OT_edge_slide", text="Edge Slide")
+ layout.operator("mesh.loop_multi_select", text="Edge Loop")
+ layout.operator("mesh.loop_multi_select", text="Edge Ring").ring = True
+ layout.operator("mesh.loop_to_region")
+ layout.operator("mesh.region_to_loop")
+
+
+class VIEW3D_MT_EditCurveCtrlpoints(bpy.types.Menu):
+ bl_label = "Control Points"
+
+ def draw(self, context):
+ layout = self.layout
+
+ edit_object = context.edit_object
+
+ if edit_object.type == 'CURVE':
+ layout.operator("transform.transform").mode = 'TILT'
+ layout.operator("curve.tilt_clear")
+ layout.operator("curve.separate")
+
+ layout.separator()
+
+ layout.operator_menu_enum("curve.handle_type_set", "type")
+
+ layout.separator()
+
+ layout.menu("VIEW3D_MT_hook")
+
+
+class VIEW3D_MT_EditCurveSegments(bpy.types.Menu):
+ bl_label = "Curve Segments"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("curve.subdivide")
+ layout.operator("curve.switch_direction")
+
+class VIEW3D_MT_EditCurveSpecials(bpy.types.Menu):
+ bl_label = "Specials"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator("curve.subdivide")
+ layout.operator("curve.switch_direction")
+ layout.operator("curve.spline_weight_set")
+ layout.operator("curve.radius_set")
+ layout.operator("curve.smooth")
+ layout.operator("curve.smooth_radius")
+
+class VIEW3D_MT_EditArmatureTK(bpy.types.Menu):
+ bl_label = "Armature Tools"
+
+ def draw(self, context):
+ layout = self.layout
+
+ # Edit Armature
+
+ layout.operator("transform.transform",
+ text="Scale Envelope Distance").mode = 'BONE_SIZE'
+
+ layout.operator("transform.transform",
+ text="Scale B-Bone Width").mode = 'BONE_SIZE'
+ layout.separator()
+
+ layout.operator("armature.extrude_move")
+
+ layout.operator("armature.extrude_forked")
+
+ layout.operator("armature.duplicate_move")
+ layout.operator("armature.merge")
+ layout.operator("armature.fill")
+ layout.operator("armature.delete")
+ layout.operator("armature.separate")
+
+ layout.separator()
+
+ layout.operator("armature.subdivide", text="Subdivide")
+ layout.operator("armature.switch_direction", text="Switch Direction")
+
+class VIEW3D_MT_ArmatureName(bpy.types.Menu):
+ bl_label = "Armature Name"
+
+ def draw(self, context):
+ layout = self.layout
+
+ layout.operator_context = 'EXEC_AREA'
+ layout.operator("armature.autoside_names",
+ text="AutoName Left/Right").type = 'XAXIS'
+ layout.operator("armature.autoside_names",
+ text="AutoName Front/Back").type = 'YAXIS'
+ layout.operator("armature.autoside_names",
+ text="AutoName Top/Bottom").type = 'ZAXIS'
+ layout.operator("armature.flip_names")
+ layout.separator()
+
+class VIEW3D_MT_KeyframeMenu(bpy.types.Menu):
+ bl_label = "Keyframe Menu"
+
+ def draw(self, context):
+ layout = self.layout
+
+ # Keyframe Bleck
+ layout.operator("anim.keyframe_insert_menu",
+ text="Insert Keyframe...")
+ layout.operator("anim.keyframe_delete_v3d",
+ text="Delete Keyframe...")
+ layout.operator("anim.keying_set_active_set",
+ text="Change Keying Set...")
+ layout.separator()
+
+# Classes for VIEW3D_MT_CursorMenu()
+class VIEW3D_OT_pivot_cursor(bpy.types.Operator):
+ "Cursor as Pivot Point"
+ bl_idname = "view3d.pivot_cursor"
+ bl_label = "Cursor as Pivot Point"
+
+ @classmethod
+ def poll(cls, context):
+ return bpy.context.space_data.pivot_point != 'CURSOR'
+
+ def execute(self, context):
+ bpy.context.space_data.pivot_point = 'CURSOR'
+ return {'FINISHED'}
+
+class VIEW3D_OT_revert_pivot(bpy.types.Operator):
+ "Revert Pivot Point"
+ bl_idname = "view3d.revert_pivot"
+ bl_label = "Reverts Pivot Point to median"
+
+ @classmethod
+ def poll(cls, context):
+ return bpy.context.space_data.pivot_point != 'MEDIAN_POINT'
+
+ def execute(self, context):
+ bpy.context.space_data.pivot_point = 'MEDIAN_POINT'
+ # @todo Change this to 'BOUDNING_BOX_CENTER' if needed...
+ return{'FINISHED'}
+
+class VIEW3D_MT_CursorMenu(bpy.types.Menu):
+ bl_label = "Snap Cursor Menu"
+
+ def draw(self, context):
+
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("view3d.snap_cursor_to_selected",
+ text="Cursor to Selected")
+ layout.operator("view3d.snap_cursor_to_center",
+ text="Cursor to Center")
+ layout.operator("view3d.snap_cursor_to_grid",
+ text="Cursor to Grid")
+ layout.operator("view3d.snap_cursor_to_active",
+ text="Cursor to Active")
+ layout.separator()
+ layout.operator("view3d.snap_selected_to_cursor",
+ text="Selection to Cursor")
+ layout.operator("view3d.snap_selected_to_grid",
+ text="Selection to Grid")
+ layout.separator()
+ layout.operator("view3d.pivot_cursor",
+ text="Set Cursor as Pivot Point")
+ layout.operator("view3d.revert_pivot",
+ text="Revert Pivot Point")
+
+class VIEW3D_MT_EditCursorMenu(bpy.types.Menu):
+ bl_label = "Snap Cursor Menu"
+
+ def draw(self, context):
+
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("view3d.snap_cursor_to_selected",
+ text="Cursor to Selected")
+ layout.operator("view3d.snap_cursor_to_center",
+ text="Cursor to Center")
+ layout.operator("view3d.snap_cursor_to_grid",
+ text="Cursor to Grid")
+ layout.operator("view3d.snap_cursor_to_active",
+ text="Cursor to Active")
+ layout.separator()
+ layout.operator("view3d.snap_selected_to_cursor",
+ text="Selection to Cursor")
+ layout.operator("view3d.snap_selected_to_grid",
+ text="Selection to Grid")
+ layout.separator()
+ layout.operator("view3d.pivot_cursor",
+ text="Set Cursor as Pivot Point")
+ layout.operator("view3d.revert_pivot",
+ text="Revert Pivot Point")
+ layout.operator("view3d.snap_cursor_to_edge_intersection",
+ text="Cursor to Edge Intersection")
+ layout.operator("transform.snap_type", text="Snap Tools",
+ icon='SNAP_ON')
+
+def abs(val):
+ if val > 0:
+ return val
+ return -val
+
+def edgeIntersect(context, operator):
+ from mathutils.geometry import intersect_line_line
+
+ obj = context.active_object
+
+ if (obj.type != "MESH"):
+ operator.report({'ERROR'}, "Object must be a mesh")
+ return None
+
+ edges = []
+ mesh = obj.data
+ verts = mesh.vertices
+
+ is_editmode = (obj.mode == 'EDIT')
+ if is_editmode:
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ for e in mesh.edges:
+ if e.select:
+ edges.append(e)
+
+ if len(edges) > 2:
+ break
+
+ if is_editmode:
+ bpy.ops.object.mode_set(mode='EDIT')
+
+ if len(edges) != 2:
+ operator.report({'ERROR'},
+ "Operator requires exactly 2 edges to be selected.")
+ return
+
+ line = intersect_line_line(verts[edges[0].vertices[0]].co,
+ verts[edges[0].vertices[1]].co,
+ verts[edges[1].vertices[0]].co,
+ verts[edges[1].vertices[1]].co)
+
+ if line is None:
+ operator.report({'ERROR'}, "Selected edges do not intersect.")
+ return
+
+ point = line[0].lerp(line[1], 0.5)
+ context.scene.cursor_location = point * obj.matrix_world
+
+class VIEW3D_OT_CursorToEdgeIntersection(bpy.types.Operator):
+ "Finds the mid-point of the shortest distance between two edges"
+
+ bl_idname = "view3d.snap_cursor_to_edge_intersection"
+ bl_label = "Cursor to Edge Intersection"
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.active_object
+ return obj != None and obj.type == 'MESH'
+
+ def execute(self, context):
+ edgeIntersect(context, self)
+ return {'FINISHED'}
+
+class VIEW3D_MT_undoS(bpy.types.Menu):
+ bl_label = "Undo/Redo"
+
+ def draw(self, context):
+ layout = self.layout
+ layout.operator_context = 'INVOKE_REGION_WIN'
+ layout.operator("ed.undo", icon='TRIA_LEFT')
+ layout.operator("ed.redo", icon='TRIA_RIGHT')
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ km = bpy.context.window_manager.keyconfigs.default.keymaps['3D View']
+ kmi = km.keymap_items.new('wm.call_menu', 'SPACE', 'PRESS')
+ kmi.properties.name = "VIEW3D_MT_Space_Dynamic_Menu"
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ km = bpy.context.window_manager.keyconfigs.default.keymaps['3D View']
+ for kmi in km.keymap_items:
+ if kmi.idname == 'wm.call_menu':
+ if kmi.properties.name == "VIEW3D_MT_Space_Dynamic_Menu":
+ km.keymap_items.remove(kmi)
+ break
+
+if __name__ == "__main__":
+ register()
diff --git a/system_blend_info.py b/system_blend_info.py
new file mode 100644
index 00000000..6ba16738
--- /dev/null
+++ b/system_blend_info.py
@@ -0,0 +1,211 @@
+# scene_blend_info.py Copyright (C) 2010, Mariano Hidalgo
+#
+# Show Information About the Blend.
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+bl_info = {
+ "name": "Scene Information",
+ "author": "uselessdreamer",
+ "version": (0,3),
+ "blender": (2, 5, 7),
+ "api": 35853,
+ "location": "Properties > Scene > Blend Info Panel",
+ "description": "Show information about the .blend",
+ "warning": "",
+ "wiki_url": 'http://wiki.blender.org/index.php/Extensions:2.5/Py/' \
+ 'Scripts/System/Blend Info',
+ "tracker_url": "https://projects.blender.org/tracker/index.php?" \
+ "func=detail&aid=22102",
+ "category": "System"}
+
+import bpy
+
+
+def quantity_string(quantity, text_single, text_plural, text_none=None):
+ sep = " "
+
+ if not text_none:
+ text_none = text_plural
+
+ if quantity == 0:
+ string = str(quantity) + sep + text_none
+
+ if quantity == 1:
+ string = str(quantity) + sep + text_single
+
+ if quantity >= 2:
+ string = str(quantity) + sep + text_plural
+
+ if quantity < 0:
+ return None
+
+ return string
+
+
+class OBJECT_PT_blendinfo(bpy.types.Panel):
+ bl_label = "Blend Info"
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+ bl_context = "scene"
+
+ def draw(self, context):
+ amount = 2
+ ob_cols = []
+ db_cols = []
+ etc_cols = []
+
+ objects = bpy.data.objects
+
+ layout = self.layout
+
+ # OBJECTS
+
+ l_row = layout.row()
+ num = len(bpy.data.objects)
+ l_row.label(text=quantity_string(num, "Object", "Objects")
+ + " in the scene:",
+ icon='OBJECT_DATA')
+
+ l_row = layout.row()
+ ob_cols.append(l_row.column())
+ ob_cols.append(l_row.column())
+
+ row = ob_cols[0].row()
+ meshes = [o for o in objects.values() if o.type == 'MESH']
+ num = len(meshes)
+ row.label(text=quantity_string(num, "Mesh", "Meshes"),
+ icon='MESH_DATA')
+
+ row = ob_cols[1].row()
+ curves = [o for o in objects.values() if o.type == 'CURVE']
+ num = len(curves)
+ row.label(text=quantity_string(num, "Curve", "Curves"),
+ icon='CURVE_DATA')
+
+ row = ob_cols[0].row()
+ cameras = [o for o in objects.values() if o.type == 'CAMERA']
+ num = len(cameras)
+ row.label(text=quantity_string(num, "Camera", "Cameras"),
+ icon='CAMERA_DATA')
+
+ row = ob_cols[1].row()
+ lamps = [o for o in objects.values() if o.type == 'LAMP']
+ num = len(lamps)
+ row.label(text=quantity_string(num, "Lamp", "Lamps"),
+ icon='LAMP_DATA')
+
+ row = ob_cols[0].row()
+ armatures = [o for o in objects.values() if o.type == 'ARMATURE']
+ num = len(armatures)
+ row.label(text=quantity_string(num, "Armature", "Armatures"),
+ icon='ARMATURE_DATA')
+
+ row = ob_cols[1].row()
+ lattices = [o for o in objects.values() if o.type == 'LATTICE']
+ num = len(lattices)
+ row.label(text=quantity_string(num, "Lattice", "Lattices"),
+ icon='LATTICE_DATA')
+
+ row = ob_cols[0].row()
+ empties = [o for o in objects.values() if o.type == 'EMPTY']
+ num = len(empties)
+ row.label(text=quantity_string(num, "Empty", "Empties"),
+ icon='EMPTY_DATA')
+
+ l_row_sep = layout.separator()
+
+ # DATABLOCKS
+
+ l_row = layout.row()
+ num = len(bpy.data.objects)
+ l_row.label(text="Datablocks in the scene:")
+
+ l_row = layout.row()
+ db_cols.append(l_row.column())
+ db_cols.append(l_row.column())
+
+ row = db_cols[0].row()
+ num = len(bpy.data.meshes)
+ row.label(text=quantity_string(num, "Mesh", "Meshes"),
+ icon='MESH_DATA')
+
+ row = db_cols[1].row()
+ num = len(bpy.data.curves)
+ row.label(text=quantity_string(num, "Curve", "Curves"),
+ icon='CURVE_DATA')
+
+ row = db_cols[0].row()
+ num = len(bpy.data.cameras)
+ row.label(text=quantity_string(num, "Camera", "Cameras"),
+ icon='CAMERA_DATA')
+
+ row = db_cols[1].row()
+ num = len(bpy.data.lamps)
+ row.label(text=quantity_string(num, "Lamp", "Lamps"),
+ icon='LAMP_DATA')
+
+ row = db_cols[0].row()
+ num = len(bpy.data.armatures)
+ row.label(text=quantity_string(num, "Armature", "Armatures"),
+ icon='ARMATURE_DATA')
+
+ row = db_cols[1].row()
+ num = len(bpy.data.lattices)
+ row.label(text=quantity_string(num, "Lattice", "Lattices"),
+ icon='LATTICE_DATA')
+
+ row = db_cols[0].row()
+ num = len(bpy.data.materials)
+ row.label(text=quantity_string(num, "Material", "Materials"),
+ icon='MATERIAL_DATA')
+
+ row = db_cols[1].row()
+ num = len(bpy.data.worlds)
+ row.label(text=quantity_string(num, "World", "Worlds"),
+ icon='WORLD_DATA')
+
+ row = db_cols[0].row()
+ num = len(bpy.data.textures)
+ row.label(text=quantity_string(num, "Texture", "Textures"),
+ icon='TEXTURE_DATA')
+
+ row = db_cols[1].row()
+ num = len(bpy.data.images)
+ row.label(text=quantity_string(num, "Image", "Images"),
+ icon='IMAGE_DATA')
+
+ row = db_cols[0].row()
+ num = len(bpy.data.texts)
+ row.label(text=quantity_string(num, "Text", "Texts"),
+ icon='TEXT')
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ pass
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ pass
+
+if __name__ == "__main__":
+ register()
diff --git a/system_demo_mode/__init__.py b/system_demo_mode/__init__.py
new file mode 100644
index 00000000..902ed37b
--- /dev/null
+++ b/system_demo_mode/__init__.py
@@ -0,0 +1,206 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Demo Mode",
+ "author": "Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "Demo Menu",
+ "description": "Demo mode lets you select multiple blend files and loop over them.",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/System/Demo_Mode#Running_Demo_Mode",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "System"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "config" in locals():
+ imp.reload(config)
+
+
+import bpy
+from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, EnumProperty
+from bpy_extras.io_utils import ImportHelper
+
+
+class DemoModeSetup(bpy.types.Operator):
+ '''Creates a demo script and optionally executes'''
+ bl_idname = "wm.demo_mode_setup"
+ bl_label = "Demo Mode (Setup)"
+ bl_options = {'PRESET'}
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ # these are used to create the file list.
+ filepath = StringProperty(name="File Path", description="Filepath used for importing the file", maxlen=1024, default="", subtype='FILE_PATH')
+ random_order = BoolProperty(name="Random Order", description="Select files randomly", default=False)
+ mode = EnumProperty(items=(
+ ('AUTO', "Auto", ""),
+ ('PLAY', "Play", ""),
+ ('RENDER', "Render", ""),
+ ),
+ name="Method")
+
+ run = BoolProperty(name="Run Immediately!", description="Run demo immediately", default=True)
+
+ # these are mapped directly to the config!
+ #
+ # anim
+ # ====
+ anim_cycles = IntProperty(name="Cycles", description="Number of times to play the animation", min=1, max=1000, default=2)
+ anim_time_min = FloatProperty(name="Time Min", description="Minimum number of seconds to show the animation for (for small loops)", min=0.0, max=1000.0, soft_min=1.0, soft_max=1000.0, default=4.0)
+ anim_time_max = FloatProperty(name="Time Max", description="Maximum number of seconds to show the animation for (incase the end frame is very high for no reason)", min=0.0, max=100000000.0, soft_min=1.0, soft_max=100000000.0, default=8.0)
+ anim_screen_switch = FloatProperty(name="Screen Switch", description="Time between switching screens (in seconds) or 0 to disable", min=0.0, max=100000000.0, soft_min=1.0, soft_max=60.0, default=0.0)
+ #
+ # render
+ # ======
+ display_render = FloatProperty(name="Render Delay", description="Time to display the rendered image before moving on (in seconds)", min=0.0, max=60.0, default=4.0)
+ anim_render = BoolProperty(name="Render Anim", description="Render entire animation (render mode only)", default=False)
+
+ def execute(self, context):
+ from . import config
+
+ keywords = self.as_keywords(ignore=("filepath", "random_order", "run"))
+
+ from . import config
+ cfg_str, dirpath = config.as_string(self.filepath, self.random_order, **keywords)
+ text = bpy.data.texts.get("demo.py")
+ if text:
+ text.name += ".back"
+
+ text = bpy.data.texts.new("demo.py")
+ text.from_string(cfg_str)
+
+ if self.run:
+ extern_demo_mode_run()
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ context.window_manager.fileselect_add(self)
+ return {'RUNNING_MODAL'}
+
+ def check(self, context):
+ return True # lazy
+
+ def draw(self, context):
+ layout = self.layout
+
+ box = layout.box()
+ box.label("Search *.blend recursively")
+ box.label("Writes: demo.py config text.")
+
+ col = layout.column()
+ col.prop(self, "run")
+
+ col.label("Generate Settings:")
+ row = col.row()
+ row.prop(self, "mode", expand=True)
+ col.prop(self, "random_order")
+
+ mode = self.mode
+
+ col.separator()
+ colsub = col.column()
+ colsub.active = (mode in ('AUTO', 'PLAY'))
+ colsub.label("Animate Settings:")
+ colsub.prop(self, "anim_cycles")
+ colsub.prop(self, "anim_time_min")
+ colsub.prop(self, "anim_time_max")
+ colsub.prop(self, "anim_screen_switch")
+
+ col.separator()
+ colsub = col.column()
+ colsub.active = (mode in ('AUTO', 'RENDER'))
+ colsub.label("Render Settings:")
+ colsub.prop(self, "display_render")
+
+
+class DemoModeRun(bpy.types.Operator):
+ bl_idname = "wm.demo_mode_run"
+ bl_label = "Demo Mode (Start)"
+
+ def execute(self, context):
+ if extern_demo_mode_run():
+ return {'FINISHED'}
+ else:
+ self.report({'ERROR'}, "Cant load demo.py config, run: File -> Demo Mode (Setup)")
+ return {'CANCELLED'}
+
+
+# --- call demo_mode.py funcs
+def extern_demo_mode_run():
+ # this accesses demo_mode.py which is kept standalone
+ # and can be run direct.
+ from . import demo_mode
+ if demo_mode.load_config():
+ demo_mode.demo_mode_load_file() # kick starts the modal operator
+ return True
+ else:
+ return False
+
+
+def extern_demo_mode_register():
+ # this accesses demo_mode.py which is kept standalone
+ # and can be run direct.
+ from . import demo_mode
+ demo_mode.register()
+
+
+def extern_demo_mode_unregister():
+ # this accesses demo_mode.py which is kept standalone
+ # and can be run direct.
+ from . import demo_mode
+ demo_mode.unregister()
+
+# --- intergration
+
+
+def menu_func(self, context):
+ layout = self.layout
+ layout.operator(DemoModeSetup.bl_idname, icon='PREFERENCES')
+ layout.operator(DemoModeRun.bl_idname, icon='PLAY')
+ layout.separator()
+
+
+def register():
+ bpy.utils.register_class(DemoModeSetup)
+ bpy.utils.register_class(DemoModeRun)
+
+ bpy.types.INFO_MT_file.prepend(menu_func)
+
+ extern_demo_mode_register()
+
+
+def unregister():
+ bpy.utils.unregister_class(DemoModeSetup)
+ bpy.utils.unregister_class(DemoModeRun)
+
+ bpy.types.INFO_MT_file.remove(menu_func)
+
+ extern_demo_mode_unregister()
+
+if __name__ == "__main__":
+ register()
diff --git a/system_demo_mode/config.py b/system_demo_mode/config.py
new file mode 100644
index 00000000..d39daae1
--- /dev/null
+++ b/system_demo_mode/config.py
@@ -0,0 +1,74 @@
+import os
+
+
+def blend_list(path):
+ for dirpath, dirnames, filenames in os.walk(path):
+
+ # skip '.svn'
+ if dirpath.startswith("."):
+ continue
+
+ for filename in filenames:
+ if filename.lower().endswith(".blend"):
+ filepath = os.path.join(dirpath, filename)
+ yield filepath
+
+
+def generate(dirpath, random_order, **kwargs):
+
+ # incase file is selected!
+ if not os.path.exists(dirpath) or not os.path.isdir(dirpath):
+ dirpath = os.path.dirname(dirpath)
+
+ files = list(blend_list(dirpath))
+ if random_order:
+ import random
+ random.shuffle(files)
+ else:
+ files.sort()
+
+ config = []
+ for f in files:
+ defaults = kwargs.copy()
+ defaults["file"] = f
+ config.append(defaults)
+
+ return config, dirpath
+
+
+def as_string(dirpath, random_order, **kwargs):
+ """ Config loader is in demo_mode.py
+ """
+ cfg, dirpath = generate(dirpath, random_order, **kwargs)
+
+ # hint for reader, can be used if files are not found.
+ cfg_str = []
+ cfg_str += ["# generated file\n"]
+ cfg_str += ["\n"]
+ cfg_str += ["# edit the search path so other systems may find the files below\n"]
+ cfg_str += ["# based on name only if the absolute paths cant be found\n"]
+ cfg_str += ["# Use '//' for current blend file path.\n"]
+ cfg_str += ["\n"]
+ cfg_str += ["search_path = %r\n" % dirpath]
+ cfg_str += ["\n"]
+
+ # All these work but use nicest formatting!
+ if 0: # works but not nice to edit.
+ cfg_str += ["config = %r" % cfg]
+ elif 0:
+ import pprint
+ cfg_str += ["config = %s" % pprint.pformat(cfg, indent=0, width=120)]
+ elif 0:
+ cfg_str += [("config = %r" % cfg).replace("{", "\n {")]
+ else:
+ import pprint
+
+ def dict_as_kw(d):
+ return "dict(%s)" % ", ".join(("%s=%s" % (k, pprint.pformat(v))) for k, v in sorted(d.items()))
+ ident = " "
+ cfg_str += ["config = [\n"]
+ for cfg_item in cfg:
+ cfg_str += ["%s%s,\n" % (ident, dict_as_kw(cfg_item))]
+ cfg_str += ["%s]\n\n" % ident]
+
+ return "".join(cfg_str), dirpath
diff --git a/system_demo_mode/demo_mode.py b/system_demo_mode/demo_mode.py
new file mode 100644
index 00000000..a2e64839
--- /dev/null
+++ b/system_demo_mode/demo_mode.py
@@ -0,0 +1,510 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+'''
+Even though this is in a package this can run as a stand alone scripts.
+
+# --- example usage
+blender --python release/scripts/addons/system_demo_mode/demo_mode.py
+
+looks for demo.py textblock or file in the same path as the blend:
+# --- example
+config = [
+ dict(anim_cycles=1, anim_render=False, anim_screen_switch=0.0, anim_time_max=10.0, anim_time_min=4.0, mode='AUTO', display_render=4.0, file='/l/19534_simplest_mesh_2.blend'),
+ dict(anim_cycles=1, anim_render=False, anim_screen_switch=0.0, anim_time_max=10.0, anim_time_min=4.0, mode='AUTO', display_render=4.0, file='/l/252_pivotConstraint_01.blend'),
+ ]
+# ---
+/data/src/blender/lib/tests/rendering/
+'''
+
+import bpy
+import time
+import tempfile
+import os
+
+DEMO_CFG = "demo.py"
+
+# populate from script
+global_config_files = []
+
+
+global_config = dict(anim_cycles=1,
+ anim_render=False,
+ anim_screen_switch=0.0,
+ anim_time_max=60.0,
+ anim_time_min=4.0,
+ mode='AUTO',
+ display_render=4.0)
+
+# switch to the next file in 2 sec.
+global_config_fallback = dict(anim_cycles=1,
+ anim_render=False,
+ anim_screen_switch=0.0,
+ anim_time_max=60.0,
+ anim_time_min=4.0,
+ mode='AUTO',
+ display_render=4.0)
+
+
+global_state = {
+ "init_time": 0.0,
+ "last_switch": 0.0,
+ "reset_anim": False,
+ "anim_cycles": 0, # count how many times we played the anim
+ "last_frame": -1,
+ "render_out": "",
+ "render_time": "", # time render was finished.
+ "timer": None,
+ "basedir": "", # demo.py is stored here
+ "demo_index": 0,
+}
+
+
+def demo_mode_auto_select():
+
+ play_area = 0
+ render_area = 0
+
+ totimg = 0
+
+ for area in bpy.context.window.screen.areas:
+ size = area.width * area.height
+ if area.type in {'VIEW_3D', 'GRAPH_EDITOR', 'DOPESHEET_EDITOR', 'NLA_EDITOR', 'TIMELINE'}:
+ play_area += size
+ elif area.type in {'IMAGE_EDITOR', 'SEQUENCE_EDITOR', 'NODE_EDITOR'}:
+ render_area += size
+
+ if area.type == 'IMAGE_EDITOR':
+ totimg += 1
+
+ # since our test files have this as defacto standard
+ scene = bpy.context.scene
+ if totimg >= 2 and (scene.camera or scene.render.use_sequencer):
+ mode = 'RENDER'
+ else:
+ if play_area >= render_area:
+ mode = 'PLAY'
+ else:
+ mode = 'RENDER'
+
+ if 0:
+ return 'PLAY'
+
+ return mode
+
+
+def demo_mode_next_file(step=1):
+ print(global_state["demo_index"])
+ global_state["demo_index"] = (global_state["demo_index"] + step) % len(global_config_files)
+ print(global_state["demo_index"], "....")
+ print("func:demo_mode_next_file", global_state["demo_index"])
+ filepath = global_config_files[global_state["demo_index"]]["file"]
+ bpy.ops.wm.open_mainfile(filepath=filepath)
+
+
+def demo_mode_timer_add():
+ global_state["timer"] = bpy.context.window_manager.event_timer_add(0.8, bpy.context.window)
+
+
+def demo_mode_timer_remove():
+ if global_state["timer"]:
+ bpy.context.window_manager.event_timer_remove(global_state["timer"])
+ global_state["timer"] = None
+
+
+def demo_mode_load_file():
+ """ Take care, this can only do limited functions since its running
+ before the file is fully loaded.
+ Some operators will crash like playing an animation.
+ """
+ print("func:demo_mode_load_file")
+ DemoMode.first_run = True
+ bpy.ops.wm.demo_mode('EXEC_DEFAULT')
+
+
+def demo_mode_init():
+ print("func:demo_mode_init")
+ DemoKeepAlive.ensure()
+
+ if 1:
+ global_config.clear()
+ global_config.update(global_config_files[global_state["demo_index"]])
+
+ print(global_config)
+
+ demo_mode_timer_add()
+
+ if global_config["mode"] == 'AUTO':
+ global_config["mode"] = demo_mode_auto_select()
+
+ if global_config["mode"] == 'PLAY':
+ global_state["last_frame"] = -1
+ global_state["anim_cycles"] = 0
+ bpy.ops.screen.animation_play()
+
+ elif global_config["mode"] == 'RENDER':
+ print(" render")
+
+ # setup tempfile
+ global_state["render_out"] = tempfile.mkstemp()[1]
+ if os.path.exists(global_state["render_out"]):
+ print(" render!!!")
+ os.remove(global_state["render_out"])
+
+ # setup scene.
+ scene = bpy.context.scene
+ scene.render.filepath = global_state["render_out"]
+ scene.render.file_format = 'AVI_JPEG' if global_config["anim_render"] else 'PNG'
+ scene.render.use_file_extension = False
+ scene.render.use_placeholder = False
+ try:
+ if global_config["anim_render"]:
+ bpy.ops.render.render('INVOKE_DEFAULT', animation=True)
+ else:
+ bpy.ops.render.render('INVOKE_DEFAULT', write_still=True)
+ except RuntimeError: # no camera for eg:
+ import traceback
+ traceback.print_exc()
+
+ open(global_state["render_out"], 'w').close() # touch so we move on.
+
+ else:
+ raise Exception("Unsupported mode %r" % global_config["mode"])
+
+ global_state["init_time"] = global_state["last_switch"] = time.time()
+ global_state["render_time"] = -1.0
+
+
+def demo_mode_update():
+ time_current = time.time()
+ time_delta = time_current - global_state["last_switch"]
+ time_total = time_current - global_state["init_time"]
+
+ # --------------------------------------------------------------------------
+ # ANIMATE MODE
+ if global_config["mode"] == 'PLAY':
+ frame = bpy.context.scene.frame_current
+ # check for exit
+ if time_total > global_config["anim_time_max"]:
+ demo_mode_next_file()
+ return
+ # above cycles and minimum display time
+ if (time_total > global_config["anim_time_min"]) and \
+ (global_state["anim_cycles"] > global_config["anim_cycles"]):
+
+ # looped enough now.
+ demo_mode_next_file()
+ return
+
+ # run update funcs
+ if global_state["reset_anim"]:
+ global_state["reset_anim"] = False
+ bpy.ops.screen.animation_cancel(restore_frame=False)
+ bpy.ops.screen.animation_play()
+
+ # warning, switching the screen can switch the scene
+ # and mess with our last-frame/cycles counting.
+ if global_config["anim_screen_switch"]:
+ # print(time_delta, 1)
+ if time_delta > global_config["anim_screen_switch"]:
+
+ screen = bpy.context.window.screen
+ index = bpy.data.screens.keys().index(screen.name)
+ screen_new = bpy.data.screens[(index if index > 0 else len(bpy.data.screens)) - 1]
+ bpy.context.window.screen = screen_new
+
+ global_state["last_switch"] = time_current
+
+ # if we also switch scenes then reset last frame
+ # otherwise it could mess up cycle calc.
+ if screen.scene != screen_new.scene:
+ global_state["last_frame"] = -1
+
+ #if global_config["mode"] == 'PLAY':
+ if 1:
+ global_state["reset_anim"] = True
+
+ # did we loop?
+ if global_state["last_frame"] > frame:
+ print("Cycle!")
+ global_state["anim_cycles"] += 1
+
+ global_state["last_frame"] = frame
+
+ # --------------------------------------------------------------------------
+ # RENDER MODE
+ elif global_config["mode"] == 'RENDER':
+ if os.path.exists(global_state["render_out"]):
+ # wait until the time has passed
+ # XXX, todo, if rendering an anim we need some way to check its done.
+ if global_state["render_time"] == -1.0:
+ global_state["render_time"] = time.time()
+ else:
+ if time.time() - global_state["render_time"] > global_config["display_render"]:
+ os.remove(global_state["render_out"])
+ demo_mode_next_file()
+ return
+ else:
+ raise Exception("Unsupported mode %r" % global_config["mode"])
+
+# -----------------------------------------------------------------------------
+# modal operator
+
+
+class DemoKeepAlive:
+ secret_attr = "_keepalive"
+
+ @staticmethod
+ def ensure():
+ if DemoKeepAlive.secret_attr not in bpy.app.driver_namespace:
+ bpy.app.driver_namespace[DemoKeepAlive.secret_attr] = DemoKeepAlive()
+
+ @staticmethod
+ def remove():
+ if DemoKeepAlive.secret_attr in bpy.app.driver_namespace:
+ del bpy.app.driver_namespace[DemoKeepAlive.secret_attr]
+
+ def __del__(self):
+ """ Hack, when the file is loaded the drivers namespace is cleared.
+ """
+ if DemoMode.enabled:
+ demo_mode_load_file()
+
+
+class DemoMode(bpy.types.Operator):
+ bl_idname = "wm.demo_mode"
+ bl_label = "Demo"
+
+ enabled = False
+ first_run = True
+
+ def cleanup(self, disable=False):
+ demo_mode_timer_remove()
+ __class__.first_run = True
+
+ if disable:
+ __class__.enabled = False
+ DemoKeepAlive.remove()
+
+ def modal(self, context, event):
+ # print("DemoMode.modal", global_state["anim_cycles"])
+ if not __class__.enabled:
+ self.cleanup(disable=True)
+ return {'CANCELLED'}
+
+ if event.type == 'ESC':
+ self.cleanup(disable=True)
+ # disable here and not in cleanup because this is a user level disable.
+ # which should stay disabled until explicitly enabled again.
+ return {'CANCELLED'}
+
+ # print(event.type)
+ if __class__.first_run:
+ __class__.first_run = False
+
+ demo_mode_init()
+ else:
+ demo_mode_update()
+
+ return {'PASS_THROUGH'}
+
+ def execute(self, context):
+ print("func:DemoMode.execute:", len(global_config_files), "files")
+
+ # load config if not loaded
+ if not global_config_files:
+ load_config()
+ if not global_config_files:
+ self.report({'INFO'}, "No configuration found with text or file: %s. Run File -> Demo Mode Setup" % DEMO_CFG)
+ return {'CANCELLED'}
+
+ # toggle
+ if __class__.enabled and __class__.first_run == False:
+ # this actually cancells the previous running instance
+ # should never happen now, DemoModeControl is for this.
+ return {'CANCELLED'}
+ else:
+ __class__.enabled = True
+ context.window_manager.modal_handler_add(self)
+
+ return {'RUNNING_MODAL'}
+
+ def cancel(self, context):
+ print("func:DemoMode.cancel")
+ # disable here means no running on file-load.
+ self.cleanup()
+ return {'CANCELLED'}
+
+ # call from DemoModeControl
+ @classmethod
+ def disable(cls):
+ if cls.enabled and cls.first_run == False:
+ # this actually cancells the previous running instance
+ # should never happen now, DemoModeControl is for this.
+ cls.enabled = False
+
+
+class DemoModeControl(bpy.types.Operator):
+ bl_idname = "wm.demo_mode_control"
+ bl_label = "Control"
+
+ mode = bpy.props.EnumProperty(items=(
+ ('PREV', "Prev", ""),
+ ('PAUSE', "Pause", ""),
+ ('NEXT', "Next", ""),
+ ),
+ name="Mode")
+
+ def execute(self, context):
+ mode = self.mode
+ if mode == 'PREV':
+ demo_mode_next_file(-1)
+ elif mode == 'NEXT':
+ demo_mode_next_file(1)
+ else: # pause
+ DemoMode.disable()
+ return {'FINISHED'}
+
+
+def menu_func(self, context):
+ # print("func:menu_func - DemoMode.enabled:", DemoMode.enabled, "bpy.app.driver_namespace:", DemoKeepAlive.secret_attr not in bpy.app.driver_namespace, 'global_state["timer"]:', global_state["timer"])
+ layout = self.layout
+ layout.operator_context = 'EXEC_DEFAULT'
+ box = layout.row() # BOX messes layout
+ row = box.row(align=True)
+ row.label("Demo Mode:")
+ if not DemoMode.enabled:
+ row.operator("wm.demo_mode", icon='PLAY', text="")
+ else:
+ row.operator("wm.demo_mode_control", icon='REW', text="").mode = 'PREV'
+ row.operator("wm.demo_mode_control", icon='PAUSE', text="").mode = 'PAUSE'
+ row.operator("wm.demo_mode_control", icon='FF', text="").mode = 'NEXT'
+
+
+def register():
+ bpy.utils.register_class(DemoMode)
+ bpy.utils.register_class(DemoModeControl)
+ bpy.types.INFO_HT_header.append(menu_func)
+
+
+def unregister():
+ bpy.utils.unregister_class(DemoMode)
+ bpy.utils.unregister_class(DemoModeControl)
+ bpy.types.INFO_HT_header.remove(menu_func)
+
+
+# -----------------------------------------------------------------------------
+# parse args
+
+def load_config(cfg_name=DEMO_CFG):
+ namespace = {}
+ global_config_files[:] = []
+ basedir = os.path.dirname(bpy.data.filepath)
+
+ text = bpy.data.texts.get(cfg_name)
+ if text is None:
+ demo_path = os.path.join(basedir, cfg_name)
+ if os.path.exists(demo_path):
+ print("Using config file: %r" % demo_path)
+ demo_file = open(demo_path, "r")
+ demo_data = demo_file.read()
+ demo_file.close()
+ else:
+ demo_data = ""
+ else:
+ print("Using config textblock: %r" % cfg_name)
+ demo_data = text.as_string()
+ demo_path = os.path.join(bpy.data.filepath, cfg_name) # fake
+
+ if not demo_data:
+ print("Could not find %r textblock or %r file." % (DEMO_CFG, demo_path))
+ return False
+
+ namespace["__file__"] = demo_path
+
+ exec(demo_data, namespace, namespace)
+
+ demo_config = namespace["config"]
+ demo_search_path = namespace.get("search_path")
+
+ if demo_search_path is None:
+ print("reading: %r, no search_path found, missing files wont be searched." % demo_path)
+ if demo_search_path.startswith("//"):
+ demo_search_path = os.path.relpath(demo_search_path)
+ if not os.path.exists(demo_search_path):
+ print("reading: %r, search_path %r does not exist." % (demo_path, demo_search_path))
+ demo_search_path = None
+
+ blend_lookup = {}
+ # initialize once, case insensitive dict
+
+ def lookup_file(filepath):
+ filename = os.path.basename(filepath).lower()
+
+ if not blend_lookup:
+ # ensure only ever run once.
+ blend_lookup[None] = None
+
+ def blend_dict_items(path):
+ for dirpath, dirnames, filenames in os.walk(path):
+ # skip '.svn'
+ if dirpath.startswith("."):
+ continue
+ for filename in filenames:
+ if filename.lower().endswith(".blend"):
+ filepath = os.path.join(dirpath, filename)
+ yield (filename.lower(), filepath)
+
+ blend_lookup.update(dict(blend_dict_items(demo_search_path)))
+
+ # fallback to orginal file
+ return blend_lookup.get(filename, filepath)
+ # done with search lookup
+
+ for filecfg in demo_config:
+ filepath_test = filecfg["file"]
+ if not os.path.exists(filepath_test):
+ filepath_test = os.path.join(basedir, filecfg["file"])
+ if not os.path.exists(filepath_test):
+ filepath_test = lookup_file(filepath_test) # attempt to get from searchpath
+ if not os.path.exists(filepath_test):
+ print("Cant find %r or %r, skipping!")
+ continue
+
+ filecfg["file"] = os.path.normpath(filepath_test)
+
+ # sanitize
+ filecfg["file"] = os.path.abspath(filecfg["file"])
+ filecfg["file"] = os.path.normpath(filecfg["file"])
+ print(" Adding: %r" % filecfg["file"])
+ global_config_files.append(filecfg)
+
+ print("found %d files" % len(global_config_files))
+
+ global_state["basedir"] = basedir
+
+ return bool(global_config_files)
+
+
+# support direct execution
+if __name__ == "__main__":
+ register()
+
+ demo_mode_load_file() # kick starts the modal operator
diff --git a/system_property_chart.py b/system_property_chart.py
new file mode 100644
index 00000000..045ead0d
--- /dev/null
+++ b/system_property_chart.py
@@ -0,0 +1,248 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Object Property Chart",
+ "author": "Campbell Barton (ideasman42)",
+ "version": (0, 1),
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "Tool Shelf",
+ "description": "Edit arbitrary selected properties for objects of the same type",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/System/Object Property Chart",
+ "tracker_url": "https://projects.blender.org/tracker/index.php?"\
+ "func=detail&aid=22701",
+ "category": "System"}
+
+"""List properties of selected objects"""
+
+import bpy
+
+
+def _property_chart_data_get(self, context):
+ # eg. context.active_object
+ obj = eval("context.%s" % self.context_data_path_active)
+
+ if obj is None:
+ return None, None
+
+ # eg. context.selected_objects[:]
+ selected_objects = eval("context.%s" % self.context_data_path_selected)[:]
+
+ if not selected_objects:
+ return None, None
+
+ return obj, selected_objects
+
+
+def _property_chart_draw(self, context):
+ '''
+ This function can run for different types.
+ '''
+ obj, selected_objects = _property_chart_data_get(self, context)
+
+ if not obj:
+ return
+
+ # active first
+ try:
+ active_index = selected_objects.index(obj)
+ except ValueError:
+ active_index = -1
+
+ if active_index > 0: # not the first alredy
+ selected_objects[0], selected_objects[active_index] = selected_objects[active_index], selected_objects[0]
+
+ id_storage = context.scene
+
+ strings = getattr(id_storage, self._PROP_STORAGE_ID)
+
+ # Collected all props, now display them all
+ layout = self.layout
+
+ if strings:
+
+ def obj_prop_get(obj, attr_string):
+ """return a pair (rna_base, "rna_property") to give to the rna UI property function"""
+ attrs = attr_string.split(".")
+ val_new = obj
+ for i, attr in enumerate(attrs):
+ val_old = val_new
+ val_new = getattr(val_old, attr, Ellipsis)
+
+ if val_new == Ellipsis:
+ return None, None
+ return val_old, attrs[-1]
+
+ strings = strings.split()
+
+ prop_all = []
+
+ for obj in selected_objects:
+ prop_pairs = []
+ prop_found = False
+ for attr_string in strings:
+ prop_pairs.append(obj_prop_get(obj, attr_string))
+ if prop_found == False and prop_pairs[-1] != (None, None):
+ prop_found = True
+
+ if prop_found:
+ prop_all.append((obj, prop_pairs))
+
+ row = layout.row(align=True)
+
+ col = row.column()
+ col.label(text="name")
+ for obj, prop_pairs in prop_all:
+ col.prop(obj, "name", text="")
+
+ for i in range(len(strings)):
+ col = row.column()
+
+ # name and copy button
+ rowsub = col.row(align=False)
+ rowsub.label(text=strings[i].rsplit(".", 1)[-1])
+ props = rowsub.operator("wm.chart_copy", text="", icon='PASTEDOWN', emboss=False)
+ props.data_path_active = self.context_data_path_active
+ props.data_path_selected = self.context_data_path_selected
+ props.data_path = strings[i]
+
+ for obj, prop_pairs in prop_all:
+ data, attr = prop_pairs[i]
+ if data:
+ col.prop(data, attr, text="") # , emboss=obj==active_object
+ else:
+ col.label(text="<missing>")
+
+ # edit the display props
+ col = layout.column()
+ col.label(text="Object Properties")
+ col.prop(id_storage, self._PROP_STORAGE_ID, text="")
+
+
+class View3DEditProps(bpy.types.Panel):
+ bl_space_type = 'VIEW_3D'
+ bl_region_type = 'UI'
+
+ bl_label = "Property Chart"
+ bl_context = "objectmode"
+
+ _PROP_STORAGE_ID = "view3d_edit_props"
+ _PROP_STORAGE_DEFAULT = "data data.name"
+
+ # _property_chart_draw needs these
+ context_data_path_active = "active_object"
+ context_data_path_selected = "selected_objects"
+
+ draw = _property_chart_draw
+
+
+class SequencerEditProps(bpy.types.Panel):
+ bl_space_type = 'SEQUENCE_EDITOR'
+ bl_region_type = 'UI'
+
+ bl_label = "Property Chart"
+
+ _PROP_STORAGE_ID = "sequencer_edit_props"
+ _PROP_STORAGE_DEFAULT = "blend_type blend_alpha"
+
+ # _property_chart_draw needs these
+ context_data_path_active = "scene.sequence_editor.active_strip"
+ context_data_path_selected = "selected_sequences"
+
+ draw = _property_chart_draw
+
+ @classmethod
+ def poll(cls, context):
+ return context.scene.sequence_editor is not None
+
+# Operator to copy properties
+
+
+def _property_chart_copy(self, context):
+ obj, selected_objects = _property_chart_data_get(self, context)
+
+ if not obj:
+ return
+
+ data_path = self.data_path
+
+ # quick & nasty method!
+ for obj_iter in selected_objects:
+ if obj != obj_iter:
+ try:
+ exec("obj_iter.%s = obj.%s" % (data_path, data_path))
+ except:
+ # just incase we need to know what went wrong!
+ import traceback
+ traceback.print_exc()
+
+from bpy.props import StringProperty
+
+
+class CopyPropertyChart(bpy.types.Operator):
+ "Open a path in a file browser"
+ bl_idname = "wm.chart_copy"
+ bl_label = "Copy properties from active to selected"
+
+ data_path_active = StringProperty()
+ data_path_selected = StringProperty()
+ data_path = StringProperty()
+
+ def execute(self, context):
+ # so attributes are found for '_property_chart_data_get()'
+ self.context_data_path_active = self.data_path_active
+ self.context_data_path_selected = self.data_path_selected
+
+ _property_chart_copy(self, context)
+
+ return {'FINISHED'}
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ Scene = bpy.types.Scene
+
+ for cls in View3DEditProps, SequencerEditProps:
+ setattr(Scene,
+ cls._PROP_STORAGE_ID,
+ StringProperty(
+ name="Scene Name",
+ description="Name of POV-Ray scene to create. Empty name will use the name of the blend file.",
+ default=cls._PROP_STORAGE_DEFAULT, maxlen=1024),
+ )
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ Scene = bpy.types.Scene
+
+ for cls in View3DEditProps, SequencerEditProps:
+ delattr(Scene,
+ cls._PROP_STORAGE_ID,
+ )
+
+
+if __name__ == "__main__":
+ register()
diff --git a/texture_paint_layer_manager.py b/texture_paint_layer_manager.py
new file mode 100644
index 00000000..c431f03f
--- /dev/null
+++ b/texture_paint_layer_manager.py
@@ -0,0 +1,645 @@
+bl_info = {
+ "name": "Texture Paint Layer Manager",
+ "author": "Michael Wiliamson",
+ "version": (1, 0),
+ "blender": (2, 5, 7),
+ "api": 35964,
+ "location": "Texture Paint > Properties > Texture Paint Layers Panels",
+ "description": "Adds a layer manager for image based texture slots in paint and quick add layer tools",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/Scripts/3D_interaction/Texture_paint_layers",
+ "tracker_url": "http://projects.blender.org/tracker/index.php?func=detail&aid=26789",
+ "category": "Paint"}
+
+
+import bpy
+from bpy.props import*
+import os
+from bpy_extras.io_utils import ImportHelper
+
+
+#-------------------------------------------
+
+def load_a_brush(context, filepath):
+ if os.path.isdir(filepath):
+ return
+
+ else:
+
+ try:
+ fn = bpy.path.display_name_from_filepath(filepath)
+ #create image and load...
+ img = bpy.data.images.load(filepath)
+ img.use_fake_user =True
+
+ #create a texture
+ tex = bpy.data.textures.new(name =fn, type='IMAGE')
+ tex.use_fake_user =True
+ #tex.use_calculate_alpha = True
+
+ #link the img to the texture
+ tex.image = img
+
+ except:
+ print(f,'is not image?')
+
+ return {'FINISHED'}
+
+
+
+
+class load_single_brush(bpy.types.Operator, ImportHelper):
+ ''' Load an image as a brush texture'''
+ bl_idname = "texture.load_single_brush"
+ bl_label = "Load Image as Brush"
+
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ return load_a_brush(context, self.filepath)
+
+#-------------------------------------------
+
+def loadbrushes(context, filepath):
+ if os.path.isdir(filepath):
+ directory = filepath
+
+ else:
+ #is a file, find parent directory
+ li = filepath.split(os.sep)
+ directory = filepath.rstrip(li[-1])
+
+
+ files = os.listdir(directory)
+ for f in files:
+ try:
+ fn = f[3:]
+ #create image and load...
+ img = bpy.data.images.load(filepath = directory +os.sep + f)
+ img.use_fake_user =True
+
+ #create a texture
+ tex = bpy.data.textures.new(name =fn, type='IMAGE')
+ tex.use_fake_user =True
+ #tex.use_calculate_alpha = True
+
+ #link the img to the texture
+ tex.image = img
+
+ except:
+ print(f,'is not image?')
+ continue
+ return {'FINISHED'}
+
+
+
+
+class ImportBrushes(bpy.types.Operator, ImportHelper):
+ ''' Load a directory of images as brush textures '''
+ bl_idname = "texture.load_brushes"
+ bl_label = "Load brushes directory"
+
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ return loadbrushes(context, self.filepath)
+
+#-------------------------------------------------------------------
+
+class OBJECT_PT_LoadBrushes(bpy.types.Panel):
+ bl_label = "Load Brush images"
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "TOOLS"
+ #bl_context = "texturepaint"
+
+ @classmethod
+ def poll(cls, context):
+ return (context.sculpt_object or context.image_paint_object)
+
+ def draw(self, context):
+ layout = self.layout
+ row = layout.row()
+ row.operator('texture.load_brushes')
+ row = layout.row()
+ row.operator('texture.load_single_brush')
+
+
+#======================================================================
+
+
+
+
+
+class OBJECT_PT_Texture_paint_layers(bpy.types.Panel):
+ bl_label = "Texture Paint Layers"
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "UI"
+ #bl_context = "texturepaint"
+
+ @classmethod
+ def poll(cls, context):
+ return (context.image_paint_object)
+
+ def draw(self, context):
+ layout = self.layout
+
+ ob = bpy.context.image_paint_object
+ if ob:
+ me = ob.data
+ mat = ob.active_material
+ if not mat:
+ row = layout.row()
+ row.label(' Add a Material first!', icon = 'ERROR')
+ else:
+ row = layout.row()
+ row.template_list(ob, "material_slots", ob,
+ "active_material_index", rows=2 )
+
+
+
+ #list Paintable textures
+ #TODO add filter for channel type
+ i = -1
+ for t in mat.texture_slots:
+ i+=1
+ try:
+ if t.texture.type =='IMAGE':
+ row = layout.row(align= True)
+ if t.texture == mat.active_texture:
+ ai = 'BRUSH_DATA'
+ else:
+ ai = 'BLANK1'
+ row.operator('object.set_active_paint_layer',
+ text = "", icon = ai).tex_index =i
+ row.prop(t.texture,'name', text = "")
+
+
+ #Visibility
+ if t.use:
+ ic = 'RESTRICT_VIEW_OFF'
+ else:
+ ic = 'RESTRICT_VIEW_ON'
+ row.prop(t,'use', text = "",icon = ic)
+ except:
+ continue
+
+
+
+
+
+
+ ts = mat.texture_slots[mat.active_texture_index]
+
+ if ts:
+ row = layout.row()
+
+
+
+
+ col = layout.column(align =True)
+ col.label('Active Properties:', icon = 'BRUSH_DATA')
+
+ #use if rather than elif... can be mapped to multiple things
+ if ts.use_map_diffuse:
+ col.prop(ts,'diffuse_factor', slider = True)
+ if ts.use_map_color_diffuse:
+ col.prop(ts,'diffuse_color_factor', slider = True)
+ if ts.use_map_alpha:
+ col.prop(ts,'alpha_factor', slider = True)
+ if ts.use_map_translucency:
+ col.prop(ts,'translucency_factor', slider = True)
+ if ts.use_map_specular:
+ col.prop(ts,'specular_factor', slider = True)
+ if ts.use_map_color_spec:
+ col.prop(ts,'specular_color_factor', slider = True)
+ if ts.use_map_hardness:
+ col.prop(ts,'hardness_factor', slider = True)
+
+ if ts.use_map_normal:
+ col.prop(ts,'normal_factor', slider = True)
+ if ts.use_map_warp:
+ col.prop(ts,'warp_factor', slider = True)
+ if ts.use_map_displacement:
+ col.prop(ts,'displacement_factor', slider = True)
+
+ if ts.use_map_ambient:
+ col.prop(ts,'ambient_factor', slider = True)
+ if ts.use_map_emit:
+ col.prop(ts,'emit_factor', slider = True)
+ if ts.use_map_mirror:
+ col.prop(ts,'mirror_factor', slider = True)
+ if ts.use_map_raymir:
+ col.prop(ts,'raymir_factor', slider = True)
+
+
+ col.prop(ts,'blend_type',text='')
+
+ else:
+ row=layout.row()
+ row.label('No paint layers in material', icon = 'ERROR')
+
+#
+# row = layout.row()
+# row.label('')
+# row = layout.row()
+# row.label('WIP: Use the X to delete!:')
+# row = layout.row()
+# row.template_ID(mat, "active_texture", new="texture.new")
+
+
+class OBJECT_PT_Texture_paint_add(bpy.types.Panel):
+ bl_label = "Add Paint Layers"
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "UI"
+ #bl_context = "texturepaint"
+
+ @classmethod
+ def poll(cls, context):
+ return (context.image_paint_object)
+
+ def draw(self, context):
+ layout = self.layout
+
+ ob = bpy.context.image_paint_object
+ if ob:
+ me = ob.data
+ mat = ob.active_material
+
+ if mat:
+
+ #row = layout.row()
+ col = layout.column(align =True)
+
+
+ col.operator('object.add_paint_layer',
+ text = "Add Color").ttype = 'COLOR'
+ col.operator('object.add_paint_layer',
+ text = "Add Bump").ttype = 'NORMAL'
+
+ col = layout.column(align =True)
+ col.operator('object.add_paint_layer',
+ text = "Add Specular").ttype = 'SPECULAR'
+ col.operator('object.add_paint_layer',
+ text = "Add Spec Col").ttype = 'SPEC_COL'
+ col.operator('object.add_paint_layer',
+ text = "Add Hardness").ttype = 'HARDNESS'
+
+ col = layout.column(align =True)
+ col.operator('object.add_paint_layer',
+ text = "Add Alpha").ttype = 'ALPHA'
+ col.operator('object.add_paint_layer',
+ text = "Add Translucency").ttype = 'TRANSLUCENCY'
+
+# col = layout.column(align =True)
+# col.operator('object.add_paint_layer',
+# text = "Add Mirror").ttype = 'MIRROR'
+# col.operator('object.add_paint_layer',
+# text = "Add Ray Mirror").ttype = 'RAY_MIRROR'
+
+ col = layout.column(align =True)
+ col.operator('object.add_paint_layer',
+ text = "Add Emit").ttype = 'EMIT'
+ col.operator('object.add_paint_layer',
+ text = "Add Diffuse").ttype = 'DIFFUSE'
+ col.operator('object.add_paint_layer',
+ text = "Add Ambient").ttype = 'AMBIENT'
+
+ else:
+ row = layout.row()
+ row.label(' Add a Material first!', icon = 'ERROR')
+
+
+
+def main(context,tn):
+ #tn is the index of the texture in the active material
+ ob = context.active_object
+ me = ob.data
+ mat = ob.active_material
+ mat.active_texture_index = tn
+ ts = mat.texture_slots[tn]
+
+ #make sure it's visible
+ ts.use = True
+
+ #Mesh use UVs?
+ if not me.uv_textures:
+ bpy.ops.mesh.uv_texture_add()
+
+ # texture Slot uses UVs?
+ if ts.texture_coords == 'UV':
+ if ts.uv_layer:
+ uvtex = me.uv_textures[ts.uv_layer]
+
+ else:
+ uvtex = me.uv_textures.active
+ me.uv_textures.active= uvtex
+ else:
+ ts.texture_coords ='UV'
+ uvtex = me.uv_textures.active
+
+
+ uvtex = uvtex.data.values()
+
+
+ #get image from texture slot
+ img = ts.texture.image
+
+ #get material index
+ m_id = ob.active_material_index
+
+ if img:
+ for f in me.faces:
+ if f.material_index == m_id:
+ uvtex[f.index].select_uv
+ uvtex[f.index].image = img
+ uvtex[f.index].use_image = True
+
+
+ else:
+ for f in me.faces:
+ if f.material_index == m_id:
+ uvtex[f.index].image = img
+ #uvtex[f.index].use_image = False
+ me.update()
+
+
+
+
+
+
+
+class set_active_paint_layer(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.set_active_paint_layer"
+ bl_label = "set_active_paint_layer"
+ tex_index = IntProperty(name = 'tex_index',
+ description = "", default = 0)
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ tn = self.tex_index
+ main(context, tn)
+ return {'FINISHED'}
+
+
+
+def add_image_kludge(iname = 'grey', iwidth = 256, iheight = 256,
+ icolor = (0.5,0.5,0.5,1.0)):
+ #evil kludge to get index of new image created using bpy.ops
+ #store current images
+ tl =[]
+ for i in bpy.data.images:
+ tl.append(i.name)
+
+
+ #create a new image
+
+ bpy.ops.image.new(name =iname,width =iwidth,height =iheight,
+ color = icolor)
+
+ #find its creation index
+ it = 0
+ for i in bpy.data.images:
+ if i.name not in tl:
+ return(bpy.data.images[it])
+ break
+ it += 1
+
+
+def add_paint(context, size =2048, typ = 'NORMAL'):
+
+ ob = bpy.context.object
+ mat = ob.active_material
+ ts = mat.texture_slots.add()
+
+ if typ =='NORMAL':
+ color =(0.5,0.5,0.5,1.0)
+ iname = 'Bump'
+ elif typ =='COLOR':
+ iname ='Color'
+ color = (1.0,1.0,1.0,0.0)
+
+ elif typ =='ALPHA':
+ iname ='Alpha'
+ color = (1.0,1.0,1.0,0.0)
+ else:
+ color =(0.0,0.0,0.0,1.0)
+ iname = typ.capitalize()
+
+# bn = bpy.context.blend_data.filepath.split(bpy.utils._os.sep)[-1]
+# bn = bn.replace('.blend', '')
+ bn = ob.name
+
+ iname = bn +'_' + iname
+
+ tex = bpy.data.textures.new(name = iname, type = 'IMAGE')
+ ts.texture = tex
+ img = add_image_kludge(iname = typ,
+ iwidth = size,iheight = size, icolor= color )
+ tex.image = img
+
+ if typ == 'COLOR':
+ ts.use_map_color_diffuse =True
+
+
+ elif typ == 'NORMAL':
+ ts.use_map_normal = True
+ ts.use_map_color_diffuse =False
+ ts.normal_factor = -1
+ ts.bump_method='BUMP_DEFAULT'
+ ts.bump_objectspace='BUMP_OBJECTSPACE'
+
+ elif typ == 'SPECULAR':
+ ts.use_map_specular = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+ #ts.blend_type = 'MULTIPLY'
+
+ elif typ == 'EMIT':
+ ts.use_map_emit = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'ALPHA':
+ mat.use_transparency = True
+ ts.use_map_alpha = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+ ts.blend_type = 'MULTIPLY'
+
+ elif typ == 'SPEC_COL':
+ ts.use_map_color_spec = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'HARDNESS':
+ ts.use_map_hardness = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'DIFFUSE':
+ ts.use_map_diffuse = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'TRANSLUCENCY':
+ ts.use_map_translucency = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'AMBIENT':
+ ts.use_map_ambient = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'MIRROR':
+ ts.use_map_mirror = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ elif typ == 'RAY_MIRROR':
+ mat.raytrace_mirror.use = True
+ ts.use_map_ray_mirror = True
+ ts.use_map_color_diffuse =False
+ ts.use_rgb_to_intensity = True
+
+ #set new texture slot to active
+ i = 0
+ ts_index = None
+ for t in mat.texture_slots:
+ if t == ts:
+
+ ts_index = i
+ break
+ i += 1
+ if ts_index != None:
+ mat.active_texture_index = ts_index
+
+ #set the texfaces using this material.
+ main(context,ts_index)
+
+
+
+
+
+class add_paint_layer(bpy.types.Operator):
+ ''''''
+ bl_idname = "object.add_paint_layer"
+ bl_label = "Add Paint Layer"
+ ttype = StringProperty(name ='ttype',default ='NORMAL')
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ ttype = self.ttype
+ add_paint(context,typ= ttype)
+ return 'FINISHED'
+
+
+
+
+#----------------------------------------------
+def save_painted(ts):
+ #generated images don't have a path
+ #so don't get saved with "save_dirty"
+ #ts is a texture slot object.
+
+ sep = bpy.utils._os.sep
+ if ts:
+ if ts.texture.type =='IMAGE':
+ i = ts.texture.image
+ if i.source =='GENERATED':
+ if i.is_dirty:
+ name = ts.name
+ if i.file_format =='PNG':
+ name = name + '.png'
+ elif i.file_format =='TARGA':
+ name = name +'.tga'
+
+ bpy.context.scene.render.color_mode = 'RGBA'
+ fp =bpy.path.abspath('//textures' + sep + name)
+ try:
+ i.save_render(fp)
+ i.source = 'FILE'
+ if bpy.context.user_preferences.filepaths.use_relative_paths:
+ i.filepath = bpy.path.relpath(fp)
+ else:
+ i.filepath = fp
+ i.name = name
+ i.use_premultiply = True
+ except:
+ print("something wrong with", fp)
+ #THAT'S THE GENERATED FILES saved, pathed and reloaded
+ #now save other painted textures
+ bpy.ops.image.save_dirty()
+
+
+
+def save_active_paint():
+ #for materials in current object
+ ob = bpy.context.object
+ for m in ob.material_slots:
+ for ts in m.material.texture_slots:
+ save_painted(ts)
+ return('FINISHED')
+
+def save_all_paint():
+ #for all materials
+ for m in bpy.data.materials:
+ for ts in m.texture_slots:
+ save_painted(ts)
+ return('FINISHED')
+
+
+class save_all_generated(bpy.types.Operator):
+ '''Saves painted layers to disc '''
+ bl_idname = "paint.save_all_generated"
+
+ bl_label = "SAVE PAINT LAYERS"
+
+
+ @classmethod
+ def poll(cls, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ return save_active_paint()
+
+
+
+
+#-----------------------------------
+class OBJECT_PT_SavePainted(bpy.types.Panel):
+ bl_label = "Save All Painted"
+ bl_space_type = "VIEW_3D"
+ bl_region_type = "UI"
+ #bl_context = "texturepaint"
+
+ @classmethod
+ def poll(cls, context):
+ return (context.image_paint_object)
+
+ def draw(self, context):
+ layout = self.layout
+ row = layout.row()
+ row.operator('paint.save_all_generated')
+
+def register():
+ bpy.utils.register_module(__name__)
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+if __name__ == "__main__":
+ register()