Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'release/scripts/io')
-rw-r--r--release/scripts/io/engine_render_pov.py912
-rw-r--r--release/scripts/io/export_3ds.py1130
-rw-r--r--release/scripts/io/export_fbx.py3453
-rw-r--r--release/scripts/io/export_obj.py996
-rw-r--r--release/scripts/io/export_ply.py279
-rw-r--r--release/scripts/io/export_x3d.py1240
-rw-r--r--release/scripts/io/import_3ds.py1167
-rw-r--r--release/scripts/io/import_obj.py1638
-rw-r--r--release/scripts/io/netrender/__init__.py19
-rw-r--r--release/scripts/io/netrender/balancing.py94
-rw-r--r--release/scripts/io/netrender/client.py203
-rw-r--r--release/scripts/io/netrender/master.py760
-rw-r--r--release/scripts/io/netrender/master_html.py135
-rw-r--r--release/scripts/io/netrender/model.py212
-rw-r--r--release/scripts/io/netrender/operators.py423
-rw-r--r--release/scripts/io/netrender/slave.py224
-rw-r--r--release/scripts/io/netrender/ui.py321
-rw-r--r--release/scripts/io/netrender/utils.py86
18 files changed, 13292 insertions, 0 deletions
diff --git a/release/scripts/io/engine_render_pov.py b/release/scripts/io/engine_render_pov.py
new file mode 100644
index 00000000000..f0247ce532a
--- /dev/null
+++ b/release/scripts/io/engine_render_pov.py
@@ -0,0 +1,912 @@
+import bpy
+
+from math import atan, pi, degrees
+import subprocess
+import os
+import sys
+import time
+
+import platform as pltfrm
+
+if pltfrm.architecture()[0] == '64bit':
+ bitness = 64
+else:
+ bitness = 32
+
+def write_pov(filename, scene=None, info_callback = None):
+ file = open(filename, 'w')
+
+ # Only for testing
+ if not scene:
+ scene = bpy.data.scenes[0]
+
+ render = scene.render_data
+ world = scene.world
+
+ # --- taken from fbx exporter
+ ## This was used to make V, but faster not to do all that
+ ##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}'
+ ##v = range(255)
+ ##for c in valid: v.remove(ord(c))
+ v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,46,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254]
+ invalid = ''.join([chr(i) for i in v])
+ def cleanName(name):
+ for ch in invalid: name = name.replace(ch, '_')
+ return name
+ del v
+
+ # --- done with clean name.
+
+ def uniqueName(name, nameSeq):
+
+ if name not in nameSeq:
+ return name
+
+ name_orig = name
+ i = 1
+ while name in nameSeq:
+ name = '%s_%.3d' % (name_orig, i)
+ i+=1
+
+ return name
+
+
+ def writeMatrix(matrix):
+ file.write('\tmatrix <%.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f>\n' %\
+ (matrix[0][0], matrix[0][1], matrix[0][2], matrix[1][0], matrix[1][1], matrix[1][2], matrix[2][0], matrix[2][1], matrix[2][2], matrix[3][0], matrix[3][1], matrix[3][2]) )
+
+ def writeObjectMaterial(material):
+ if material and material.transparency_method=='RAYTRACE':
+ file.write('\tinterior { ior %.6f }\n' % material.raytrace_transparency.ior)
+
+ # Other interior args
+ # fade_distance 2
+ # fade_power [Value]
+ # fade_color
+
+ # dispersion
+ # dispersion_samples
+
+ materialNames = {}
+ DEF_MAT_NAME = 'Default'
+ def writeMaterial(material):
+ # Assumes only called once on each material
+
+ if material:
+ name_orig = material.name
+ else:
+ name_orig = DEF_MAT_NAME
+
+ name = materialNames[name_orig] = uniqueName(cleanName(name_orig), materialNames)
+
+ file.write('#declare %s = finish {\n' % name)
+
+ if material:
+ file.write('\tdiffuse %.3g\n' % material.diffuse_intensity)
+ file.write('\tspecular %.3g\n' % material.specular_intensity)
+
+ file.write('\tambient %.3g\n' % material.ambient)
+ #file.write('\tambient rgb <%.3g, %.3g, %.3g>\n' % tuple([c*material.ambient for c in world.ambient_color])) # povray blends the global value
+
+ # map hardness between 0.0 and 1.0
+ roughness = ((1.0 - ((material.specular_hardness-1.0)/510.0)))
+ # scale from 0.0 to 0.1
+ roughness *= 0.1
+ # add a small value because 0.0 is invalid
+ roughness += (1/511.0)
+
+ file.write('\troughness %.3g\n' % roughness)
+
+ # 'phong 70.0 '
+
+ if material.raytrace_mirror.enabled:
+ raytrace_mirror= material.raytrace_mirror
+ if raytrace_mirror.reflect_factor:
+ file.write('\treflection {\n')
+ file.write('\t\trgb <%.3g, %.3g, %.3g>' % tuple(material.mirror_color))
+ file.write('\t\tfresnel 1 falloff %.3g exponent %.3g metallic %.3g} ' % (raytrace_mirror.fresnel, raytrace_mirror.fresnel_factor, raytrace_mirror.reflect_factor))
+
+ else:
+ file.write('\tdiffuse 0.8\n')
+ file.write('\tspecular 0.2\n')
+
+
+ # This is written into the object
+ '''
+ if material and material.transparency_method=='RAYTRACE':
+ 'interior { ior %.3g} ' % material.raytrace_transparency.ior
+ '''
+
+ #file.write('\t\t\tcrand 1.0\n') # Sand granyness
+ #file.write('\t\t\tmetallic %.6f\n' % material.spec)
+ #file.write('\t\t\tphong %.6f\n' % material.spec)
+ #file.write('\t\t\tphong_size %.6f\n' % material.spec)
+ #file.write('\t\t\tbrilliance %.6f ' % (material.specular_hardness/256.0) # Like hardness
+
+ file.write('}\n')
+
+ def exportCamera():
+ camera = scene.camera
+ matrix = camera.matrix
+
+ # compute resolution
+ Qsize=float(render.resolution_x)/float(render.resolution_y)
+
+ file.write('camera {\n')
+ file.write('\tlocation <0, 0, 0>\n')
+ file.write('\tlook_at <0, 0, -1>\n')
+ file.write('\tright <%s, 0, 0>\n' % -Qsize)
+ file.write('\tup <0, 1, 0>\n')
+ file.write('\tangle %f \n' % (360.0*atan(16.0/camera.data.lens)/pi))
+
+ file.write('\trotate <%.6f, %.6f, %.6f>\n' % tuple([degrees(e) for e in matrix.rotationPart().toEuler()]))
+ file.write('\ttranslate <%.6f, %.6f, %.6f>\n' % (matrix[3][0], matrix[3][1], matrix[3][2]))
+ file.write('}\n')
+
+ def exportLamps(lamps):
+ # Get all lamps
+ for ob in lamps:
+ lamp = ob.data
+
+ matrix = ob.matrix
+
+ color = tuple([c * lamp.energy for c in lamp.color]) # Colour is modified by energy
+
+ file.write('light_source {\n')
+ file.write('\t< 0,0,0 >\n')
+ file.write('\tcolor rgb<%.3g, %.3g, %.3g>\n' % color)
+
+ if lamp.type == 'POINT': # Point Lamp
+ pass
+ elif lamp.type == 'SPOT': # Spot
+ file.write('\tspotlight\n')
+
+ # Falloff is the main radius from the centre line
+ file.write('\tfalloff %.2f\n' % (lamp.spot_size/2.0) ) # 1 TO 179 FOR BOTH
+ file.write('\tradius %.6f\n' % ((lamp.spot_size/2.0) * (1-lamp.spot_blend)) )
+
+ # Blender does not have a tightness equivilent, 0 is most like blender default.
+ file.write('\ttightness 0\n') # 0:10f
+
+ file.write('\tpoint_at <0, 0, -1>\n')
+ elif lamp.type == 'SUN':
+ file.write('\tparallel\n')
+ file.write('\tpoint_at <0, 0, -1>\n') # *must* be after 'parallel'
+
+ elif lamp.type == 'AREA':
+
+ size_x = lamp.size
+ samples_x = lamp.shadow_ray_samples_x
+ if lamp.shape == 'SQUARE':
+ size_y = size_x
+ samples_y = samples_x
+ else:
+ size_y = lamp.size_y
+ samples_y = lamp.shadow_ray_samples_y
+
+ file.write('\tarea_light <%d,0,0>,<0,0,%d> %d, %d\n' % (size_x, size_y, samples_x, samples_y))
+ if lamp.shadow_ray_sampling_method == 'CONSTANT_JITTERED':
+ if lamp.jitter:
+ file.write('\tjitter\n')
+ else:
+ file.write('\tadaptive 1\n')
+ file.write('\tjitter\n')
+
+ if lamp.shadow_method == 'NOSHADOW':
+ file.write('\tshadowless\n')
+
+ file.write('\tfade_distance %.6f\n' % lamp.distance)
+ file.write('\tfade_power %d\n' % 1) # Could use blenders lamp quad?
+ writeMatrix(matrix)
+
+ file.write('}\n')
+
+ def exportMeta(metas):
+
+ # TODO - blenders 'motherball' naming is not supported.
+
+ for ob in metas:
+ meta = ob.data
+
+ file.write('blob {\n')
+ file.write('\t\tthreshold %.4g\n' % meta.threshold)
+
+ try:
+ material= meta.materials[0] # lame! - blender cant do enything else.
+ except:
+ material= None
+
+ for elem in meta.elements:
+
+ if elem.type not in ('BALL', 'ELLIPSOID'):
+ continue # Not supported
+
+ loc = elem.location
+
+ stiffness= elem.stiffness
+ if elem.negative:
+ stiffness = -stiffness
+
+ if elem.type == 'BALL':
+
+ file.write('\tsphere { <%.6g, %.6g, %.6g>, %.4g, %.4g ' % (loc.x, loc.y, loc.z, elem.radius, stiffness))
+
+ # After this wecould do something simple like...
+ # "pigment {Blue} }"
+ # except we'll write the color
+
+ elif elem.type == 'ELLIPSOID':
+ # location is modified by scale
+ file.write('\tsphere { <%.6g, %.6g, %.6g>, %.4g, %.4g ' % (loc.x/elem.size_x, loc.y/elem.size_y, loc.z/elem.size_z, elem.radius, stiffness))
+ file.write( 'scale <%.6g, %.6g, %.6g> ' % (elem.size_x, elem.size_y, elem.size_z))
+
+ if material:
+ diffuse_color = material.diffuse_color
+
+ if material.transparency and material.transparency_method=='RAYTRACE': trans = 1-material.raytrace_transparency.filter
+ else: trans = 0.0
+
+ file.write(
+ 'pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} finish {%s} }\n' % \
+ (diffuse_color[0], diffuse_color[1], diffuse_color[2], 1-material.alpha, trans, materialNames[material.name])
+ )
+
+ else:
+ file.write('pigment {rgb<1 1 1>} finish {%s} }\n' % DEF_MAT_NAME) # Write the finish last.
+
+ writeObjectMaterial(material)
+
+ writeMatrix(ob.matrix)
+
+ file.write('}\n')
+
+ def exportMeshs(sel):
+
+ ob_num = 0
+
+ for ob in sel:
+ ob_num+= 1
+
+ if ob.type in ('LAMP', 'CAMERA', 'EMPTY', 'META'):
+ continue
+
+ me = ob.data
+ me_materials= me.materials
+
+ me = ob.create_mesh(True, 'RENDER')
+
+ if not me:
+ continue
+
+ if info_callback:
+ info_callback('Object %2.d of %2.d (%s)' % (ob_num, len(sel), ob.name))
+
+ #if ob.type!='MESH':
+ # continue
+ # me = ob.data
+
+ matrix = ob.matrix
+ try: uv_layer = me.active_uv_texture.data
+ except:uv_layer = None
+
+ try: vcol_layer = me.active_vertex_color.data
+ except:vcol_layer = None
+
+ faces_verts = [f.verts for f in me.faces]
+ faces_normals = [tuple(f.normal) for f in me.faces]
+ verts_normals = [tuple(v.normal) for v in me.verts]
+
+ # quads incur an extra face
+ quadCount = len([f for f in faces_verts if len(f)==4])
+
+ file.write('mesh2 {\n')
+ file.write('\tvertex_vectors {\n')
+ file.write('\t\t%s' % (len(me.verts))) # vert count
+ for v in me.verts:
+ file.write(',\n\t\t<%.6f, %.6f, %.6f>' % tuple(v.co)) # vert count
+ file.write('\n }\n')
+
+
+ # Build unique Normal list
+ uniqueNormals = {}
+ for fi, f in enumerate(me.faces):
+ fv = faces_verts[fi]
+ # [-1] is a dummy index, use a list so we can modify in place
+ if f.smooth: # Use vertex normals
+ for v in fv:
+ key = verts_normals[v]
+ uniqueNormals[key] = [-1]
+ else: # Use face normal
+ key = faces_normals[fi]
+ uniqueNormals[key] = [-1]
+
+ file.write('\tnormal_vectors {\n')
+ file.write('\t\t%d' % len(uniqueNormals)) # vert count
+ idx = 0
+ for no, index in uniqueNormals.items():
+ file.write(',\n\t\t<%.6f, %.6f, %.6f>' % no) # vert count
+ index[0] = idx
+ idx +=1
+ file.write('\n }\n')
+
+
+ # Vertex colours
+ vertCols = {} # Use for material colours also.
+
+ if uv_layer:
+ # Generate unique UV's
+ uniqueUVs = {}
+
+ for fi, uv in enumerate(uv_layer):
+
+ if len(faces_verts[fi])==4:
+ uvs = uv.uv1, uv.uv2, uv.uv3, uv.uv4
+ else:
+ uvs = uv.uv1, uv.uv2, uv.uv3
+
+ for uv in uvs:
+ uniqueUVs[tuple(uv)] = [-1]
+
+ file.write('\tuv_vectors {\n')
+ #print unique_uvs
+ file.write('\t\t%s' % (len(uniqueUVs))) # vert count
+ idx = 0
+ for uv, index in uniqueUVs.items():
+ file.write(',\n\t\t<%.6f, %.6f>' % uv)
+ index[0] = idx
+ idx +=1
+ '''
+ else:
+ # Just add 1 dummy vector, no real UV's
+ file.write('\t\t1') # vert count
+ file.write(',\n\t\t<0.0, 0.0>')
+ '''
+ file.write('\n }\n')
+
+
+ if me.vertex_colors:
+
+ for fi, f in enumerate(me.faces):
+ material_index = f.material_index
+ material = me_materials[material_index]
+
+ if material and material.vertex_color_paint:
+
+ col = vcol_layer[fi]
+
+ if len(faces_verts[fi])==4:
+ cols = col.color1, col.color2, col.color3, col.color4
+ else:
+ cols = col.color1, col.color2, col.color3
+
+ for col in cols:
+ key = col[0], col[1], col[2], material_index # Material index!
+ vertCols[key] = [-1]
+
+ else:
+ if material:
+ diffuse_color = tuple(material.diffuse_color)
+ key = diffuse_color[0], diffuse_color[1], diffuse_color[2], material_index
+ vertCols[key] = [-1]
+
+
+ else:
+ # No vertex colours, so write material colours as vertex colours
+ for i, material in enumerate(me_materials):
+
+ if material:
+ diffuse_color = tuple(material.diffuse_color)
+ key = diffuse_color[0], diffuse_color[1], diffuse_color[2], i # i == f.mat
+ vertCols[key] = [-1]
+
+
+ # Vert Colours
+ file.write('\ttexture_list {\n')
+ file.write('\t\t%s' % (len(vertCols))) # vert count
+ idx=0
+ for col, index in vertCols.items():
+
+ if me_materials:
+ material = me_materials[col[3]]
+ material_finish = materialNames[material.name]
+
+ if material.transparency and material.transparency_method=='RAYTRACE': trans = 1-material.raytrace_transparency.filter
+ else: trans = 0.0
+
+ else:
+ material_finish = DEF_MAT_NAME # not working properly,
+ trans = 0.0
+
+ #print material.apl
+ file.write( ',\n\t\ttexture { pigment {rgbft<%.3g, %.3g, %.3g, %.3g, %.3g>} finish {%s}}' %
+ (col[0], col[1], col[2], 1-material.alpha, trans, material_finish) )
+
+ index[0] = idx
+ idx+=1
+
+ file.write( '\n }\n' )
+
+ # Face indicies
+ file.write('\tface_indices {\n')
+ file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count
+ for fi, f in enumerate(me.faces):
+ fv = faces_verts[fi]
+ material_index= f.material_index
+ if len(fv) == 4: indicies = (0,1,2), (0,2,3)
+ else: indicies = ((0,1,2),)
+
+ if vcol_layer:
+ col = vcol_layer[fi]
+
+ if len(fv) == 4:
+ cols = col.color1, col.color2, col.color3, col.color4
+ else:
+ cols = col.color1, col.color2, col.color3
+
+
+ if not me_materials or me_materials[material_index] == None: # No materials
+ for i1, i2, i3 in indicies:
+ file.write(',\n\t\t<%d,%d,%d>' % (fv[i1], fv[i2], fv[i3])) # vert count
+ else:
+ material = me_materials[material_index]
+ for i1, i2, i3 in indicies:
+ if me.vertex_colors and material.vertex_color_paint:
+ # Colour per vertex - vertex colour
+
+ col1 = cols[i1]
+ col2 = cols[i2]
+ col3 = cols[i3]
+
+ ci1 = vertCols[col1[0], col1[1], col1[2], material_index][0]
+ ci2 = vertCols[col2[0], col2[1], col2[2], material_index][0]
+ ci3 = vertCols[col3[0], col3[1], col3[2], material_index][0]
+ else:
+ # Colour per material - flat material colour
+ diffuse_color= material.diffuse_color
+ ci1 = ci2 = ci3 = vertCols[diffuse_color[0], diffuse_color[1], diffuse_color[2], f.material_index][0]
+
+ file.write(',\n\t\t<%d,%d,%d>, %d,%d,%d' % (fv[i1], fv[i2], fv[i3], ci1, ci2, ci3)) # vert count
+
+
+ file.write('\n }\n')
+
+ # normal_indices indicies
+ file.write('\tnormal_indices {\n')
+ file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count
+ for fi, fv in enumerate(faces_verts):
+
+ if len(fv) == 4: indicies = (0,1,2), (0,2,3)
+ else: indicies = ((0,1,2),)
+
+ for i1, i2, i3 in indicies:
+ if f.smooth:
+ file.write(',\n\t\t<%d,%d,%d>' %\
+ (uniqueNormals[verts_normals[fv[i1]]][0],\
+ uniqueNormals[verts_normals[fv[i2]]][0],\
+ uniqueNormals[verts_normals[fv[i3]]][0])) # vert count
+ else:
+ idx = uniqueNormals[faces_normals[fi]][0]
+ file.write(',\n\t\t<%d,%d,%d>' % (idx, idx, idx)) # vert count
+
+ file.write('\n }\n')
+
+ if uv_layer:
+ file.write('\tuv_indices {\n')
+ file.write('\t\t%d' % (len(me.faces) + quadCount)) # faces count
+ for fi, fv in enumerate(faces_verts):
+
+ if len(fv) == 4: indicies = (0,1,2), (0,2,3)
+ else: indicies = ((0,1,2),)
+
+ uv = uv_layer[fi]
+ if len(faces_verts[fi])==4:
+ uvs = tuple(uv.uv1), tuple(uv.uv2), tuple(uv.uv3), tuple(uv.uv4)
+ else:
+ uvs = tuple(uv.uv1), tuple(uv.uv2), tuple(uv.uv3)
+
+ for i1, i2, i3 in indicies:
+ file.write(',\n\t\t<%d,%d,%d>' %\
+ (uniqueUVs[uvs[i1]][0],\
+ uniqueUVs[uvs[i2]][0],\
+ uniqueUVs[uvs[i2]][0])) # vert count
+ file.write('\n }\n')
+
+ if me.materials:
+ material = me.materials[0] # dodgy
+ writeObjectMaterial(material)
+
+ writeMatrix(matrix)
+ file.write('}\n')
+
+ bpy.data.remove_mesh(me)
+
+ def exportWorld(world):
+ if not world:
+ return
+
+ mist = world.mist
+
+ if mist.enabled:
+ file.write('fog {\n')
+ file.write('\tdistance %.6f\n' % mist.depth)
+ file.write('\tcolor rgbt<%.3g, %.3g, %.3g, %.3g>\n' % (tuple(world.horizon_color) + (1-mist.intensity,)))
+ #file.write('\tfog_offset %.6f\n' % mist.start)
+ #file.write('\tfog_alt 5\n')
+ #file.write('\tturbulence 0.2\n')
+ #file.write('\tturb_depth 0.3\n')
+ file.write('\tfog_type 1\n')
+ file.write('}\n')
+
+ def exportGlobalSettings(scene):
+
+ file.write('global_settings {\n')
+
+ if scene.pov_radio_enable:
+ file.write('\tradiosity {\n')
+ file.write("\t\tadc_bailout %.4g\n" % scene.pov_radio_adc_bailout)
+ file.write("\t\talways_sample %d\n" % scene.pov_radio_always_sample)
+ file.write("\t\tbrightness %.4g\n" % scene.pov_radio_brightness)
+ file.write("\t\tcount %d\n" % scene.pov_radio_count)
+ file.write("\t\terror_bound %.4g\n" % scene.pov_radio_error_bound)
+ file.write("\t\tgray_threshold %.4g\n" % scene.pov_radio_gray_threshold)
+ file.write("\t\tlow_error_factor %.4g\n" % scene.pov_radio_low_error_factor)
+ file.write("\t\tmedia %d\n" % scene.pov_radio_media)
+ file.write("\t\tminimum_reuse %.4g\n" % scene.pov_radio_minimum_reuse)
+ file.write("\t\tnearest_count %d\n" % scene.pov_radio_nearest_count)
+ file.write("\t\tnormal %d\n" % scene.pov_radio_normal)
+ file.write("\t\trecursion_limit %d\n" % scene.pov_radio_recursion_limit)
+ file.write('\t}\n')
+
+ if world:
+ file.write("\tambient_light rgb<%.3g, %.3g, %.3g>\n" % tuple(world.ambient_color))
+
+ file.write('}\n')
+
+
+ # Convert all materials to strings we can access directly per vertex.
+ writeMaterial(None) # default material
+
+ for material in bpy.data.materials:
+ writeMaterial(material)
+
+ exportCamera()
+ #exportMaterials()
+ sel = scene.objects
+ exportLamps([l for l in sel if l.type == 'LAMP'])
+ exportMeta([l for l in sel if l.type == 'META'])
+ exportMeshs(sel)
+ exportWorld(scene.world)
+ exportGlobalSettings(scene)
+
+ file.close()
+
+def write_pov_ini(filename_ini, filename_pov, filename_image):
+ scene = bpy.data.scenes[0]
+ render = scene.render_data
+
+ x= int(render.resolution_x*render.resolution_percentage*0.01)
+ y= int(render.resolution_y*render.resolution_percentage*0.01)
+
+ file = open(filename_ini, 'w')
+
+ file.write('Input_File_Name="%s"\n' % filename_pov)
+ file.write('Output_File_Name="%s"\n' % filename_image)
+
+ file.write('Width=%d\n' % x)
+ file.write('Height=%d\n' % y)
+
+ # Needed for border render.
+ '''
+ file.write('Start_Column=%d\n' % part.x)
+ file.write('End_Column=%d\n' % (part.x+part.w))
+
+ file.write('Start_Row=%d\n' % (part.y))
+ file.write('End_Row=%d\n' % (part.y+part.h))
+ '''
+
+ file.write('Display=0\n')
+ file.write('Pause_When_Done=0\n')
+ file.write('Output_File_Type=T\n') # TGA, best progressive loading
+ file.write('Output_Alpha=1\n')
+
+ if render.antialiasing:
+ aa_mapping = {'OVERSAMPLE_5':2, 'OVERSAMPLE_8':3, 'OVERSAMPLE_11':4, 'OVERSAMPLE_16':5} # method 1 assumed
+ file.write('Antialias=1\n')
+ file.write('Antialias_Depth=%d\n' % aa_mapping[render.antialiasing_samples])
+ else:
+ file.write('Antialias=0\n')
+
+ file.close()
+
+# Radiosity panel, use in the scene for now.
+FloatProperty= bpy.types.Scene.FloatProperty
+IntProperty= bpy.types.Scene.IntProperty
+BoolProperty= bpy.types.Scene.BoolProperty
+
+# Not a real pov option, just to know if we should write
+BoolProperty( attr="pov_radio_enable",
+ name="Enable Radiosity",
+ description="Enable povrays radiosity calculation.",
+ default= False)
+BoolProperty( attr="pov_radio_display_advanced",
+ name="Advanced Options",
+ description="Show advanced options.",
+ default= False)
+
+# Real pov options
+FloatProperty( attr="pov_radio_adc_bailout",
+ name="ADC Bailout",
+ description="The adc_bailout for radiosity rays. Use adc_bailout = 0.01 / brightest_ambient_object for good results.",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=1.0, default= 0.01)
+
+BoolProperty( attr="pov_radio_always_sample",
+ name="Always Sample",
+ description="Only use the data from the pretrace step and not gather any new samples during the final radiosity pass..",
+ default= True)
+
+FloatProperty( attr="pov_radio_brightness",
+ name="Brightness",
+ description="Ammount objects are brightened before being returned upwards to the rest of the system.",
+ min=0.0, max=1000.0, soft_min=0.0, soft_max=10.0, default= 1.0)
+
+IntProperty( attr="pov_radio_count",
+ name="Ray Count",
+ description="number of rays that are sent out whenever a new radiosity value has to be calculated.",
+ min=1, max=1600, default= 35)
+
+FloatProperty( attr="pov_radio_error_bound",
+ name="Error Bound",
+ description="one of the two main speed/quality tuning values, lower values are more accurate.",
+ min=0.0, max=1000.0, soft_min=0.1, soft_max=10.0, default= 1.8)
+
+FloatProperty( attr="pov_radio_gray_threshold",
+ name="Gray Threshold",
+ description="one of the two main speed/quality tuning values, lower values are more accurate.",
+ min=0.0, max=1.0, soft_min=0, soft_max=1, default= 0.0)
+
+FloatProperty( attr="pov_radio_low_error_factor",
+ name="Low Error Factor",
+ description="If you calculate just enough samples, but no more, you will get an image which has slightly blotchy lighting.",
+ min=0.0, max=1.0, soft_min=0.0, soft_max=1.0, default= 0.5)
+
+# max_sample - not available yet
+BoolProperty( attr="pov_radio_media",
+ name="Media",
+ description="Radiosity estimation can be affected by media.",
+ default= False)
+
+FloatProperty( attr="pov_radio_minimum_reuse",
+ name="Minimum Reuse",
+ description="Fraction of the screen width which sets the minimum radius of reuse for each sample point (At values higher than 2% expect errors).",
+ min=0.0, max=1.0, soft_min=0.1, soft_max=0.1, default= 0.015)
+
+IntProperty( attr="pov_radio_nearest_count",
+ name="Nearest Count",
+ description="Number of old ambient values blended together to create a new interpolated value.",
+ min=1, max=20, default= 5)
+
+BoolProperty( attr="pov_radio_normal",
+ name="Normals",
+ description="Radiosity estimation can be affected by normals.",
+ default= False)
+
+IntProperty( attr="pov_radio_recursion_limit",
+ name="Recursion Limit",
+ description="how many recursion levels are used to calculate the diffuse inter-reflection.",
+ min=1, max=20, default= 3)
+
+
+class PovrayRender(bpy.types.RenderEngine):
+ __idname__ = 'POVRAY_RENDER'
+ __label__ = "Povray"
+ DELAY = 0.02
+
+ def _export(self, scene):
+ import tempfile
+
+ self.temp_file_in = tempfile.mktemp(suffix='.pov')
+ self.temp_file_out = tempfile.mktemp(suffix='.tga')
+ self.temp_file_ini = tempfile.mktemp(suffix='.ini')
+ '''
+ self.temp_file_in = '/test.pov'
+ self.temp_file_out = '/test.tga'
+ self.temp_file_ini = '/test.ini'
+ '''
+
+ def info_callback(txt):
+ self.update_stats("", "POVRAY: " + txt)
+
+ write_pov(self.temp_file_in, scene, info_callback)
+
+ def _render(self):
+
+ try: os.remove(self.temp_file_out) # so as not to load the old file
+ except: pass
+
+ write_pov_ini(self.temp_file_ini, self.temp_file_in, self.temp_file_out)
+
+ print ("***-STARTING-***")
+
+ pov_binary = "povray"
+
+ if sys.platform=='win32':
+ import winreg
+ regKey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, 'Software\\POV-Ray\\v3.6\\Windows')
+
+ if bitness == 64:
+ pov_binary = winreg.QueryValueEx(regKey, 'Home')[0] + '\\bin\\pvengine64'
+ else:
+ pov_binary = winreg.QueryValueEx(regKey, 'Home')[0] + '\\bin\\pvengine'
+
+ if 1:
+ self.process = subprocess.Popen([pov_binary, self.temp_file_ini]) # stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ else:
+ # This works too but means we have to wait until its done
+ os.system('%s %s' % (pov_binary, self.temp_file_ini))
+
+ print ("***-DONE-***")
+
+ def _cleanup(self):
+ for f in (self.temp_file_in, self.temp_file_ini, self.temp_file_out):
+ try: os.remove(f)
+ except: pass
+
+ self.update_stats("", "")
+
+ def render(self, scene):
+
+ self.update_stats("", "POVRAY: Exporting data from Blender")
+ self._export(scene)
+ self.update_stats("", "POVRAY: Parsing File")
+ self._render()
+
+ r = scene.render_data
+
+ # compute resolution
+ x= int(r.resolution_x*r.resolution_percentage*0.01)
+ y= int(r.resolution_y*r.resolution_percentage*0.01)
+
+ # Wait for the file to be created
+ while not os.path.exists(self.temp_file_out):
+ if self.test_break():
+ try: self.process.terminate()
+ except: pass
+ break
+
+ if self.process.poll() != None:
+ self.update_stats("", "POVRAY: Failed")
+ break
+
+ time.sleep(self.DELAY)
+
+ if os.path.exists(self.temp_file_out):
+
+ self.update_stats("", "POVRAY: Rendering")
+
+ prev_size = -1
+
+ def update_image():
+ result = self.begin_result(0, 0, x, y)
+ lay = result.layers[0]
+ # possible the image wont load early on.
+ try: lay.load_from_file(self.temp_file_out)
+ except: pass
+ self.end_result(result)
+
+ # Update while povray renders
+ while True:
+
+ # test if povray exists
+ if self.process.poll() != None:
+ update_image();
+ break
+
+ # user exit
+ if self.test_break():
+ try: self.process.terminate()
+ except: pass
+
+ break
+
+ # Would be nice to redirect the output
+ # stdout_value, stderr_value = self.process.communicate() # locks
+
+
+ # check if the file updated
+ new_size = os.path.getsize(self.temp_file_out)
+
+ if new_size != prev_size:
+ update_image()
+ prev_size = new_size
+
+ time.sleep(self.DELAY)
+
+ self._cleanup()
+
+bpy.types.register(PovrayRender)
+
+# Use some of the existing buttons.
+import buttons_scene
+buttons_scene.SCENE_PT_render.COMPAT_ENGINES.add('POVRAY_RENDER')
+buttons_scene.SCENE_PT_dimensions.COMPAT_ENGINES.add('POVRAY_RENDER')
+buttons_scene.SCENE_PT_antialiasing.COMPAT_ENGINES.add('POVRAY_RENDER')
+buttons_scene.SCENE_PT_output.COMPAT_ENGINES.add('POVRAY_RENDER')
+del buttons_scene
+
+# Use only a subset of the world panels
+import buttons_world
+buttons_world.WORLD_PT_preview.COMPAT_ENGINES.add('POVRAY_RENDER')
+buttons_world.WORLD_PT_context_world.COMPAT_ENGINES.add('POVRAY_RENDER')
+buttons_world.WORLD_PT_world.COMPAT_ENGINES.add('POVRAY_RENDER')
+buttons_world.WORLD_PT_mist.COMPAT_ENGINES.add('POVRAY_RENDER')
+del buttons_world
+
+# Example of wrapping every class 'as is'
+import buttons_material
+for member in dir(buttons_material):
+ subclass = getattr(buttons_material, member)
+ try: subclass.COMPAT_ENGINES.add('POVRAY_RENDER')
+ except: pass
+del buttons_material
+
+class RenderButtonsPanel(bpy.types.Panel):
+ __space_type__ = 'PROPERTIES'
+ __region_type__ = 'WINDOW'
+ __context__ = "scene"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ def poll(self, context):
+ rd = context.scene.render_data
+ return (rd.use_game_engine==False) and (rd.engine in self.COMPAT_ENGINES)
+
+class SCENE_PT_povray_radiosity(RenderButtonsPanel):
+ __label__ = "Radiosity"
+ COMPAT_ENGINES = set(['POVRAY_RENDER'])
+
+ def draw_header(self, context):
+ scene = context.scene
+
+ self.layout.itemR(scene, "pov_radio_enable", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render_data
+
+ layout.active = scene.pov_radio_enable
+
+ split = layout.split()
+
+ col = split.column()
+ col.itemR(scene, "pov_radio_count", text="Rays")
+ col.itemR(scene, "pov_radio_recursion_limit", text="Recursions")
+ col = split.column()
+ col.itemR(scene, "pov_radio_error_bound", text="Error")
+
+ layout.itemR(scene, "pov_radio_display_advanced")
+
+ if scene.pov_radio_display_advanced:
+ split = layout.split()
+
+ col = split.column()
+ col.itemR(scene, "pov_radio_adc_bailout", slider=True)
+ col.itemR(scene, "pov_radio_gray_threshold", slider=True)
+ col.itemR(scene, "pov_radio_low_error_factor", slider=True)
+
+ col = split.column()
+ col.itemR(scene, "pov_radio_brightness")
+ col.itemR(scene, "pov_radio_minimum_reuse", text="Min Reuse")
+ col.itemR(scene, "pov_radio_nearest_count")
+
+ split = layout.split()
+
+ col = split.column()
+ col.itemL(text="Estimation Influence:")
+ col.itemR(scene, "pov_radio_media")
+ col.itemR(scene, "pov_radio_normal")
+
+ col = split.column()
+ col.itemR(scene, "pov_radio_always_sample")
+
+bpy.types.register(SCENE_PT_povray_radiosity)
diff --git a/release/scripts/io/export_3ds.py b/release/scripts/io/export_3ds.py
new file mode 100644
index 00000000000..2c1999c3d45
--- /dev/null
+++ b/release/scripts/io/export_3ds.py
@@ -0,0 +1,1130 @@
+#!BPY
+# coding: utf-8
+"""
+Name: '3D Studio (.3ds)...'
+Blender: 243
+Group: 'Export'
+Tooltip: 'Export to 3DS file format (.3ds).'
+"""
+
+__author__ = ["Campbell Barton", "Bob Holcomb", "Richard Lärkäng", "Damien McGinnes", "Mark Stijnman"]
+__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/")
+__version__ = "0.90a"
+__bpydoc__ = """\
+
+3ds Exporter
+
+This script Exports a 3ds file.
+
+Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information
+from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode.
+"""
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# Script copyright (C) Bob Holcomb
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+# --------------------------------------------------------------------------
+
+
+######################################################
+# Importing modules
+######################################################
+
+import struct
+import os
+import time
+
+import bpy
+
+# import Blender
+# from BPyMesh import getMeshFromObject
+# from BPyObject import getDerivedObjects
+# try:
+# import struct
+# except:
+# struct = None
+
+# also used by X3D exporter
+# return a tuple (free, object list), free is True if memory should be freed later with free_derived_objects()
+def create_derived_objects(ob):
+ if ob.parent and ob.parent.dupli_type != 'NONE':
+ return False, None
+
+ if ob.dupli_type != 'NONE':
+ ob.create_dupli_list()
+ return True, [(dob.object, dob.matrix) for dob in ob.dupli_list]
+ else:
+ return False, [(ob, ob.matrix)]
+
+# also used by X3D exporter
+def free_derived_objects(ob):
+ ob.free_dupli_list()
+
+# So 3ds max can open files, limit names to 12 in length
+# this is verry annoying for filenames!
+name_unique = []
+name_mapping = {}
+def sane_name(name):
+ name_fixed = name_mapping.get(name)
+ if name_fixed != None:
+ return name_fixed
+
+ if len(name) > 12:
+ new_name = name[:12]
+ else:
+ new_name = name
+
+ i = 0
+
+ while new_name in name_unique:
+ new_name = new_name[:-4] + '.%.3d' % i
+ i+=1
+
+ name_unique.append(new_name)
+ name_mapping[name] = new_name
+ return new_name
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will export
+#----- Primary Chunk, at the beginning of each file
+PRIMARY= int("0x4D4D",16)
+
+#------ Main Chunks
+OBJECTINFO = int("0x3D3D",16); #This gives the version of the mesh and is found right before the material and object information
+VERSION = int("0x0002",16); #This gives the version of the .3ds file
+KFDATA = int("0xB000",16); #This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL=45055 #0xAFFF // This stored the texture info
+OBJECT=16384 #0x4000 // This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+MATNAME = int("0xA000",16); # This holds the material name
+MATAMBIENT = int("0xA010",16); # Ambient color of the object/material
+MATDIFFUSE = int("0xA020",16); # This holds the color of the object/material
+MATSPECULAR = int("0xA030",16); # SPecular color of the object/material
+MATSHINESS = int("0xA040",16); # ??
+MATMAP = int("0xA200",16); # This is a header for a new material
+MATMAPFILE = int("0xA300",16); # This holds the file name of the texture
+
+RGB1= int("0x0011",16)
+RGB2= int("0x0012",16)
+
+#>------ sub defines of OBJECT
+OBJECT_MESH = int("0x4100",16); # This lets us know that we are reading a new object
+OBJECT_LIGHT = int("0x4600",16); # This lets un know we are reading a light object
+OBJECT_CAMERA= int("0x4700",16); # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES= int("0x4720",16); # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES = int("0x4110",16); # The objects vertices
+OBJECT_FACES = int("0x4120",16); # The objects faces
+OBJECT_MATERIAL = int("0x4130",16); # This is found if the object has a material, either texture map or color
+OBJECT_UV = int("0x4140",16); # The UV texture coordinates
+OBJECT_TRANS_MATRIX = int("0x4160",16); # The Object Matrix
+
+#>------ sub defines of KFDATA
+KFDATA_KFHDR = int("0xB00A",16);
+KFDATA_KFSEG = int("0xB008",16);
+KFDATA_KFCURTIME = int("0xB009",16);
+KFDATA_OBJECT_NODE_TAG = int("0xB002",16);
+
+#>------ sub defines of OBJECT_NODE_TAG
+OBJECT_NODE_ID = int("0xB030",16);
+OBJECT_NODE_HDR = int("0xB010",16);
+OBJECT_PIVOT = int("0xB013",16);
+OBJECT_INSTANCE_NAME = int("0xB011",16);
+POS_TRACK_TAG = int("0xB020",16);
+ROT_TRACK_TAG = int("0xB021",16);
+SCL_TRACK_TAG = int("0xB022",16);
+
+def uv_key(uv):
+ return round(uv[0], 6), round(uv[1], 6)
+# return round(uv.x, 6), round(uv.y, 6)
+
+# size defines:
+SZ_SHORT = 2
+SZ_INT = 4
+SZ_FLOAT = 4
+
+class _3ds_short(object):
+ '''Class representing a short (2-byte integer) for a 3ds file.
+ *** This looks like an unsigned short H is unsigned from the struct docs - Cam***'''
+ __slots__ = 'value'
+ def __init__(self, val=0):
+ self.value=val
+
+ def get_size(self):
+ return SZ_SHORT
+
+ def write(self,file):
+ file.write(struct.pack("<H", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+class _3ds_int(object):
+ '''Class representing an int (4-byte integer) for a 3ds file.'''
+ __slots__ = 'value'
+ def __init__(self, val=0):
+ self.value=val
+
+ def get_size(self):
+ return SZ_INT
+
+ def write(self,file):
+ file.write(struct.pack("<I", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+class _3ds_float(object):
+ '''Class representing a 4-byte IEEE floating point number for a 3ds file.'''
+ __slots__ = 'value'
+ def __init__(self, val=0.0):
+ self.value=val
+
+ def get_size(self):
+ return SZ_FLOAT
+
+ def write(self,file):
+ file.write(struct.pack("<f", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_string(object):
+ '''Class representing a zero-terminated string for a 3ds file.'''
+ __slots__ = 'value'
+ def __init__(self, val=""):
+ self.value=val
+
+ def get_size(self):
+ return (len(self.value)+1)
+
+ def write(self,file):
+ binary_format = "<%ds" % (len(self.value)+1)
+ file.write(struct.pack(binary_format, self.value))
+
+ def __str__(self):
+ return self.value
+
+class _3ds_point_3d(object):
+ '''Class representing a three-dimensional point for a 3ds file.'''
+ __slots__ = 'x','y','z'
+ def __init__(self, point=(0.0,0.0,0.0)):
+ self.x, self.y, self.z = point
+
+ def get_size(self):
+ return 3*SZ_FLOAT
+
+ def write(self,file):
+ file.write(struct.pack('<3f', self.x, self.y, self.z))
+
+ def __str__(self):
+ return '(%f, %f, %f)' % (self.x, self.y, self.z)
+
+# Used for writing a track
+"""
+class _3ds_point_4d(object):
+ '''Class representing a four-dimensional point for a 3ds file, for instance a quaternion.'''
+ __slots__ = 'x','y','z','w'
+ def __init__(self, point=(0.0,0.0,0.0,0.0)):
+ self.x, self.y, self.z, self.w = point
+
+ def get_size(self):
+ return 4*SZ_FLOAT
+
+ def write(self,file):
+ data=struct.pack('<4f', self.x, self.y, self.z, self.w)
+ file.write(data)
+
+ def __str__(self):
+ return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
+"""
+
+class _3ds_point_uv(object):
+ '''Class representing a UV-coordinate for a 3ds file.'''
+ __slots__ = 'uv'
+ def __init__(self, point=(0.0,0.0)):
+ self.uv = point
+
+ def __cmp__(self, other):
+ return cmp(self.uv,other.uv)
+
+ def get_size(self):
+ return 2*SZ_FLOAT
+
+ def write(self,file):
+ data=struct.pack('<2f', self.uv[0], self.uv[1])
+ file.write(data)
+
+ def __str__(self):
+ return '(%g, %g)' % self.uv
+
+class _3ds_rgb_color(object):
+ '''Class representing a (24-bit) rgb color for a 3ds file.'''
+ __slots__ = 'r','g','b'
+ def __init__(self, col=(0,0,0)):
+ self.r, self.g, self.b = col
+
+ def get_size(self):
+ return 3
+
+ def write(self,file):
+ file.write( struct.pack('<3B', int(255*self.r), int(255*self.g), int(255*self.b) ) )
+# file.write( struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) )
+
+ def __str__(self):
+ return '{%f, %f, %f}' % (self.r, self.g, self.b)
+
+class _3ds_face(object):
+ '''Class representing a face for a 3ds file.'''
+ __slots__ = 'vindex'
+ def __init__(self, vindex):
+ self.vindex = vindex
+
+ def get_size(self):
+ return 4*SZ_SHORT
+
+ def write(self,file):
+ # The last zero is only used by 3d studio
+ file.write(struct.pack("<4H", self.vindex[0],self.vindex[1], self.vindex[2], 0))
+
+ def __str__(self):
+ return '[%d %d %d]' % (self.vindex[0],self.vindex[1], self.vindex[2])
+
+class _3ds_array(object):
+ '''Class representing an array of variables for a 3ds file.
+
+ Consists of a _3ds_short to indicate the number of items, followed by the items themselves.
+ '''
+ __slots__ = 'values', 'size'
+ def __init__(self):
+ self.values=[]
+ self.size=SZ_SHORT
+
+ # add an item:
+ def add(self,item):
+ self.values.append(item)
+ self.size+=item.get_size()
+
+ def get_size(self):
+ return self.size
+
+ def write(self,file):
+ _3ds_short(len(self.values)).write(file)
+ #_3ds_int(len(self.values)).write(file)
+ for value in self.values:
+ value.write(file)
+
+ # To not overwhelm the output in a dump, a _3ds_array only
+ # outputs the number of items, not all of the actual items.
+ def __str__(self):
+ return '(%d items)' % len(self.values)
+
+class _3ds_named_variable(object):
+ '''Convenience class for named variables.'''
+
+ __slots__ = 'value', 'name'
+ def __init__(self, name, val=None):
+ self.name=name
+ self.value=val
+
+ def get_size(self):
+ if (self.value==None):
+ return 0
+ else:
+ return self.value.get_size()
+
+ def write(self, file):
+ if (self.value!=None):
+ self.value.write(file)
+
+ def dump(self,indent):
+ if (self.value!=None):
+ spaces=""
+ for i in range(indent):
+ spaces+=" ";
+ if (self.name!=""):
+ print(spaces, self.name, " = ", self.value)
+ else:
+ print(spaces, "[unnamed]", " = ", self.value)
+
+
+#the chunk class
+class _3ds_chunk(object):
+ '''Class representing a chunk in a 3ds file.
+
+ Chunks contain zero or more variables, followed by zero or more subchunks.
+ '''
+ __slots__ = 'ID', 'size', 'variables', 'subchunks'
+ def __init__(self, id=0):
+ self.ID=_3ds_short(id)
+ self.size=_3ds_int(0)
+ self.variables=[]
+ self.subchunks=[]
+
+ def set_ID(id):
+ self.ID=_3ds_short(id)
+
+ def add_variable(self, name, var):
+ '''Add a named variable.
+
+ The name is mostly for debugging purposes.'''
+ self.variables.append(_3ds_named_variable(name,var))
+
+ def add_subchunk(self, chunk):
+ '''Add a subchunk.'''
+ self.subchunks.append(chunk)
+
+ def get_size(self):
+ '''Calculate the size of the chunk and return it.
+
+ The sizes of the variables and subchunks are used to determine this chunk\'s size.'''
+ tmpsize=self.ID.get_size()+self.size.get_size()
+ for variable in self.variables:
+ tmpsize+=variable.get_size()
+ for subchunk in self.subchunks:
+ tmpsize+=subchunk.get_size()
+ self.size.value=tmpsize
+ return self.size.value
+
+ def write(self, file):
+ '''Write the chunk to a file.
+
+ Uses the write function of the variables and the subchunks to do the actual work.'''
+ #write header
+ self.ID.write(file)
+ self.size.write(file)
+ for variable in self.variables:
+ variable.write(file)
+ for subchunk in self.subchunks:
+ subchunk.write(file)
+
+
+ def dump(self, indent=0):
+ '''Write the chunk to a file.
+
+ Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
+ Uses the dump function of the named variables and the subchunks to do the actual work.'''
+ spaces=""
+ for i in range(indent):
+ spaces+=" ";
+ print(spaces, "ID=", hex(self.ID.value), "size=", self.get_size())
+ for variable in self.variables:
+ variable.dump(indent+1)
+ for subchunk in self.subchunks:
+ subchunk.dump(indent+1)
+
+
+
+######################################################
+# EXPORT
+######################################################
+
+def get_material_images(material):
+ # blender utility func.
+ if material:
+ return [s.texture.image for s in material.textures if s and s.texture.type == 'IMAGE' and s.texture.image]
+
+ return []
+# images = []
+# if material:
+# for mtex in material.getTextures():
+# if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
+# image = mtex.tex.image
+# if image:
+# images.append(image) # maye want to include info like diffuse, spec here.
+# return images
+
+
+def make_material_subchunk(id, color):
+ '''Make a material subchunk.
+
+ Used for color subchunks, such as diffuse color or ambient color subchunks.'''
+ mat_sub = _3ds_chunk(id)
+ col1 = _3ds_chunk(RGB1)
+ col1.add_variable("color1", _3ds_rgb_color(color));
+ mat_sub.add_subchunk(col1)
+# optional:
+# col2 = _3ds_chunk(RGB1)
+# col2.add_variable("color2", _3ds_rgb_color(color));
+# mat_sub.add_subchunk(col2)
+ return mat_sub
+
+
+def make_material_texture_chunk(id, images):
+ """Make Material Map texture chunk
+ """
+ mat_sub = _3ds_chunk(id)
+
+ def add_image(img):
+ filename = os.path.basename(image.filename)
+# filename = image.filename.split('\\')[-1].split('/')[-1]
+ mat_sub_file = _3ds_chunk(MATMAPFILE)
+ mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
+ mat_sub.add_subchunk(mat_sub_file)
+
+ for image in images:
+ add_image(image)
+
+ return mat_sub
+
+def make_material_chunk(material, image):
+ '''Make a material chunk out of a blender material.'''
+ material_chunk = _3ds_chunk(MATERIAL)
+ name = _3ds_chunk(MATNAME)
+
+ if material: name_str = material.name
+ else: name_str = 'None'
+ if image: name_str += image.name
+
+ name.add_variable("name", _3ds_string(sane_name(name_str)))
+ material_chunk.add_subchunk(name)
+
+ if not material:
+ material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0,0,0) ))
+ material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8) ))
+ material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1,1,1) ))
+
+ else:
+ material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.ambient for a in material.diffuse_color] ))
+# material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a*material.amb for a in material.rgbCol] ))
+ material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color))
+# material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.rgbCol))
+ material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color))
+# material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specCol))
+
+ images = get_material_images(material) # can be None
+ if image: images.append(image)
+
+ if images:
+ material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images))
+
+ return material_chunk
+
+class tri_wrapper(object):
+ '''Class representing a triangle.
+
+ Used when converting faces to triangles'''
+
+ __slots__ = 'vertex_index', 'mat', 'image', 'faceuvs', 'offset'
+ def __init__(self, vindex=(0,0,0), mat=None, image=None, faceuvs=None):
+ self.vertex_index= vindex
+ self.mat= mat
+ self.image= image
+ self.faceuvs= faceuvs
+ self.offset= [0, 0, 0] # offset indicies
+
+
+def extract_triangles(mesh):
+ '''Extract triangles from a mesh.
+
+ If the mesh contains quads, they will be split into triangles.'''
+ tri_list = []
+ do_uv = len(mesh.uv_textures)
+# do_uv = mesh.faceUV
+
+# if not do_uv:
+# face_uv = None
+
+ img = None
+ for i, face in enumerate(mesh.faces):
+ f_v = face.verts
+# f_v = face.v
+
+ uf = mesh.active_uv_texture.data[i] if do_uv else None
+
+ if do_uv:
+ f_uv = uf.uv
+ # f_uv = (uf.uv1, uf.uv2, uf.uv3, uf.uv4) if face.verts[3] else (uf.uv1, uf.uv2, uf.uv3)
+# f_uv = face.uv
+ img = uf.image if uf else None
+# img = face.image
+ if img: img = img.name
+
+ # if f_v[3] == 0:
+ if len(f_v)==3:
+ new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+# new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img)
+ if (do_uv): new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+ tri_list.append(new_tri)
+
+ else: #it's a quad
+ new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+# new_tri = tri_wrapper((f_v[0].index, f_v[1].index, f_v[2].index), face.mat, img)
+ new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img)
+# new_tri_2 = tri_wrapper((f_v[0].index, f_v[2].index, f_v[3].index), face.mat, img)
+
+ if (do_uv):
+ new_tri.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+ new_tri_2.faceuvs= uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])
+
+ tri_list.append( new_tri )
+ tri_list.append( new_tri_2 )
+
+ return tri_list
+
+
+def remove_face_uv(verts, tri_list):
+ '''Remove face UV coordinates from a list of triangles.
+
+ Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates
+ need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when
+ there are multiple uv coordinates per vertex.'''
+
+ # initialize a list of UniqueLists, one per vertex:
+ #uv_list = [UniqueList() for i in xrange(len(verts))]
+ unique_uvs= [{} for i in range(len(verts))]
+
+ # for each face uv coordinate, add it to the UniqueList of the vertex
+ for tri in tri_list:
+ for i in range(3):
+ # store the index into the UniqueList for future reference:
+ # offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i])))
+
+ context_uv_vert= unique_uvs[tri.vertex_index[i]]
+ uvkey= tri.faceuvs[i]
+
+ offset_index__uv_3ds = context_uv_vert.get(uvkey)
+
+ if not offset_index__uv_3ds:
+ offset_index__uv_3ds = context_uv_vert[uvkey] = len(context_uv_vert), _3ds_point_uv(uvkey)
+
+ tri.offset[i] = offset_index__uv_3ds[0]
+
+
+
+ # At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it
+ # only once.
+
+ # Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the
+ # faces refer to the new face indices:
+ vert_index = 0
+ vert_array = _3ds_array()
+ uv_array = _3ds_array()
+ index_list = []
+ for i,vert in enumerate(verts):
+ index_list.append(vert_index)
+
+ pt = _3ds_point_3d(vert.co) # reuse, should be ok
+ uvmap = [None] * len(unique_uvs[i])
+ for ii, uv_3ds in unique_uvs[i].values():
+ # add a vertex duplicate to the vertex_array for every uv associated with this vertex:
+ vert_array.add(pt)
+ # add the uv coordinate to the uv array:
+ # This for loop does not give uv's ordered by ii, so we create a new map
+ # and add the uv's later
+ # uv_array.add(uv_3ds)
+ uvmap[ii] = uv_3ds
+
+ # Add the uv's in the correct order
+ for uv_3ds in uvmap:
+ # add the uv coordinate to the uv array:
+ uv_array.add(uv_3ds)
+
+ vert_index += len(unique_uvs[i])
+
+ # Make sure the triangle vertex indices now refer to the new vertex list:
+ for tri in tri_list:
+ for i in range(3):
+ tri.offset[i]+=index_list[tri.vertex_index[i]]
+ tri.vertex_index= tri.offset
+
+ return vert_array, uv_array, tri_list
+
+def make_faces_chunk(tri_list, mesh, materialDict):
+ '''Make a chunk for the faces.
+
+ Also adds subchunks assigning materials to all faces.'''
+
+ materials = mesh.materials
+ if not materials:
+ mat = None
+
+ face_chunk = _3ds_chunk(OBJECT_FACES)
+ face_list = _3ds_array()
+
+
+ if len(mesh.uv_textures):
+# if mesh.faceUV:
+ # Gather materials used in this mesh - mat/image pairs
+ unique_mats = {}
+ for i,tri in enumerate(tri_list):
+
+ face_list.add(_3ds_face(tri.vertex_index))
+
+ if materials:
+ mat = materials[tri.mat]
+ if mat: mat = mat.name
+
+ img = tri.image
+
+ try:
+ context_mat_face_array = unique_mats[mat, img][1]
+ except:
+
+ if mat: name_str = mat
+ else: name_str = 'None'
+ if img: name_str += img
+
+ context_mat_face_array = _3ds_array()
+ unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array
+
+
+ context_mat_face_array.add(_3ds_short(i))
+ # obj_material_faces[tri.mat].add(_3ds_short(i))
+
+ face_chunk.add_variable("faces", face_list)
+ for mat_name, mat_faces in unique_mats.values():
+ obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL)
+ obj_material_chunk.add_variable("name", mat_name)
+ obj_material_chunk.add_variable("face_list", mat_faces)
+ face_chunk.add_subchunk(obj_material_chunk)
+
+ else:
+
+ obj_material_faces=[]
+ obj_material_names=[]
+ for m in materials:
+ if m:
+ obj_material_names.append(_3ds_string(sane_name(m.name)))
+ obj_material_faces.append(_3ds_array())
+ n_materials = len(obj_material_names)
+
+ for i,tri in enumerate(tri_list):
+ face_list.add(_3ds_face(tri.vertex_index))
+ if (tri.mat < n_materials):
+ obj_material_faces[tri.mat].add(_3ds_short(i))
+
+ face_chunk.add_variable("faces", face_list)
+ for i in range(n_materials):
+ obj_material_chunk=_3ds_chunk(OBJECT_MATERIAL)
+ obj_material_chunk.add_variable("name", obj_material_names[i])
+ obj_material_chunk.add_variable("face_list", obj_material_faces[i])
+ face_chunk.add_subchunk(obj_material_chunk)
+
+ return face_chunk
+
+def make_vert_chunk(vert_array):
+ '''Make a vertex chunk out of an array of vertices.'''
+ vert_chunk = _3ds_chunk(OBJECT_VERTICES)
+ vert_chunk.add_variable("vertices",vert_array)
+ return vert_chunk
+
+def make_uv_chunk(uv_array):
+ '''Make a UV chunk out of an array of UVs.'''
+ uv_chunk = _3ds_chunk(OBJECT_UV)
+ uv_chunk.add_variable("uv coords", uv_array)
+ return uv_chunk
+
+def make_mesh_chunk(mesh, materialDict):
+ '''Make a chunk out of a Blender mesh.'''
+
+ # Extract the triangles from the mesh:
+ tri_list = extract_triangles(mesh)
+
+ if len(mesh.uv_textures):
+# if mesh.faceUV:
+ # Remove the face UVs and convert it to vertex UV:
+ vert_array, uv_array, tri_list = remove_face_uv(mesh.verts, tri_list)
+ else:
+ # Add the vertices to the vertex array:
+ vert_array = _3ds_array()
+ for vert in mesh.verts:
+ vert_array.add(_3ds_point_3d(vert.co))
+ # If the mesh has vertex UVs, create an array of UVs:
+ if len(mesh.sticky):
+# if mesh.vertexUV:
+ uv_array = _3ds_array()
+ for uv in mesh.sticky:
+# for vert in mesh.verts:
+ uv_array.add(_3ds_point_uv(uv.co))
+# uv_array.add(_3ds_point_uv(vert.uvco))
+ else:
+ # no UV at all:
+ uv_array = None
+
+ # create the chunk:
+ mesh_chunk = _3ds_chunk(OBJECT_MESH)
+
+ # add vertex chunk:
+ mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
+ # add faces chunk:
+
+ mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))
+
+ # if available, add uv chunk:
+ if uv_array:
+ mesh_chunk.add_subchunk(make_uv_chunk(uv_array))
+
+ return mesh_chunk
+
+""" # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+def make_kfdata(start=0, stop=0, curtime=0):
+ '''Make the basic keyframe data chunk'''
+ kfdata = _3ds_chunk(KFDATA)
+
+ kfhdr = _3ds_chunk(KFDATA_KFHDR)
+ kfhdr.add_variable("revision", _3ds_short(0))
+ # Not really sure what filename is used for, but it seems it is usually used
+ # to identify the program that generated the .3ds:
+ kfhdr.add_variable("filename", _3ds_string("Blender"))
+ kfhdr.add_variable("animlen", _3ds_int(stop-start))
+
+ kfseg = _3ds_chunk(KFDATA_KFSEG)
+ kfseg.add_variable("start", _3ds_int(start))
+ kfseg.add_variable("stop", _3ds_int(stop))
+
+ kfcurtime = _3ds_chunk(KFDATA_KFCURTIME)
+ kfcurtime.add_variable("curtime", _3ds_int(curtime))
+
+ kfdata.add_subchunk(kfhdr)
+ kfdata.add_subchunk(kfseg)
+ kfdata.add_subchunk(kfcurtime)
+ return kfdata
+"""
+
+"""
+def make_track_chunk(ID, obj):
+ '''Make a chunk for track data.
+
+ Depending on the ID, this will construct a position, rotation or scale track.'''
+ track_chunk = _3ds_chunk(ID)
+ track_chunk.add_variable("track_flags", _3ds_short())
+ track_chunk.add_variable("unknown", _3ds_int())
+ track_chunk.add_variable("unknown", _3ds_int())
+ track_chunk.add_variable("nkeys", _3ds_int(1))
+ # Next section should be repeated for every keyframe, but for now, animation is not actually supported.
+ track_chunk.add_variable("tcb_frame", _3ds_int(0))
+ track_chunk.add_variable("tcb_flags", _3ds_short())
+ if obj.type=='Empty':
+ if ID==POS_TRACK_TAG:
+ # position vector:
+ track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation()))
+ elif ID==ROT_TRACK_TAG:
+ # rotation (quaternion, angle first, followed by axis):
+ q = obj.getEuler().toQuat()
+ track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2])))
+ elif ID==SCL_TRACK_TAG:
+ # scale vector:
+ track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize()))
+ else:
+ # meshes have their transformations applied before
+ # exporting, so write identity transforms here:
+ if ID==POS_TRACK_TAG:
+ # position vector:
+ track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0)))
+ elif ID==ROT_TRACK_TAG:
+ # rotation (quaternion, angle first, followed by axis):
+ track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0)))
+ elif ID==SCL_TRACK_TAG:
+ # scale vector:
+ track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0)))
+
+ return track_chunk
+"""
+
+"""
+def make_kf_obj_node(obj, name_to_id):
+ '''Make a node chunk for a Blender object.
+
+ Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
+ Blender Empty objects are converted to dummy nodes.'''
+
+ name = obj.name
+ # main object node chunk:
+ kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
+ # chunk for the object id:
+ obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
+ # object id is from the name_to_id dictionary:
+ obj_id_chunk.add_variable("node_id", _3ds_short(name_to_id[name]))
+
+ # object node header:
+ obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR)
+ # object name:
+ if obj.type == 'Empty':
+ # Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk
+ # for their name (see below):
+ obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY"))
+ else:
+ # Add the name:
+ obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
+ # Add Flag variables (not sure what they do):
+ obj_node_header_chunk.add_variable("flags1", _3ds_short(0))
+ obj_node_header_chunk.add_variable("flags2", _3ds_short(0))
+
+ # Check parent-child relationships:
+ parent = obj.parent
+ if (parent == None) or (parent.name not in name_to_id):
+ # If no parent, or the parents name is not in the name_to_id dictionary,
+ # parent id becomes -1:
+ obj_node_header_chunk.add_variable("parent", _3ds_short(-1))
+ else:
+ # Get the parent's id from the name_to_id dictionary:
+ obj_node_header_chunk.add_variable("parent", _3ds_short(name_to_id[parent.name]))
+
+ # Add pivot chunk:
+ obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT)
+ obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation()))
+ kf_obj_node.add_subchunk(obj_pivot_chunk)
+
+ # add subchunks for object id and node header:
+ kf_obj_node.add_subchunk(obj_id_chunk)
+ kf_obj_node.add_subchunk(obj_node_header_chunk)
+
+ # Empty objects need to have an extra chunk for the instance name:
+ if obj.type == 'Empty':
+ obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME)
+ obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name)))
+ kf_obj_node.add_subchunk(obj_instance_name_chunk)
+
+ # Add track chunks for position, rotation and scale:
+ kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj))
+ kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj))
+ kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj))
+
+ return kf_obj_node
+"""
+
+# import BPyMessages
+def save_3ds(filename, context):
+ '''Save the Blender scene to a 3ds file.'''
+ # Time the export
+
+ if not filename.lower().endswith('.3ds'):
+ filename += '.3ds'
+
+ # XXX
+# if not BPyMessages.Warning_SaveOver(filename):
+# return
+
+ # XXX
+ time1 = time.clock()
+# time1= Blender.sys.time()
+# Blender.Window.WaitCursor(1)
+
+ sce = context.scene
+# sce= bpy.data.scenes.active
+
+ # Initialize the main chunk (primary):
+ primary = _3ds_chunk(PRIMARY)
+ # Add version chunk:
+ version_chunk = _3ds_chunk(VERSION)
+ version_chunk.add_variable("version", _3ds_int(3))
+ primary.add_subchunk(version_chunk)
+
+ # init main object info chunk:
+ object_info = _3ds_chunk(OBJECTINFO)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # init main key frame data chunk:
+ kfdata = make_kfdata()
+ '''
+
+ # Get all the supported objects selected in this scene:
+ # ob_sel= list(sce.objects.context)
+ # mesh_objects = [ (ob, me) for ob in ob_sel for me in (BPyMesh.getMeshFromObject(ob, None, True, False, sce),) if me ]
+ # empty_objects = [ ob for ob in ob_sel if ob.type == 'Empty' ]
+
+ # Make a list of all materials used in the selected meshes (use a dictionary,
+ # each material is added once):
+ materialDict = {}
+ mesh_objects = []
+ for ob in [ob for ob in context.scene.objects if ob.is_visible()]:
+# for ob in sce.objects.context:
+
+ # get derived objects
+ free, derived = create_derived_objects(ob)
+
+ if derived == None: continue
+
+ for ob_derived, mat in derived:
+# for ob_derived, mat in getDerivedObjects(ob, False):
+
+ if ob.type not in ('MESH', 'CURVE', 'SURFACE', 'TEXT', 'META'):
+ continue
+
+ data = ob_derived.create_mesh(True, 'PREVIEW')
+# data = getMeshFromObject(ob_derived, None, True, False, sce)
+ if data:
+ data.transform(mat)
+# data.transform(mat, recalc_normals=False)
+ mesh_objects.append((ob_derived, data))
+ mat_ls = data.materials
+ mat_ls_len = len(mat_ls)
+
+ # get material/image tuples.
+ if len(data.uv_textures):
+# if data.faceUV:
+ if not mat_ls:
+ mat = mat_name = None
+
+ for f, uf in zip(data.faces, data.active_uv_texture.data):
+ if mat_ls:
+ mat_index = f.material_index
+# mat_index = f.mat
+ if mat_index >= mat_ls_len:
+ mat_index = f.mat = 0
+ mat = mat_ls[mat_index]
+ if mat: mat_name = mat.name
+ else: mat_name = None
+ # else there alredy set to none
+
+ img = uf.image
+# img = f.image
+ if img: img_name = img.name
+ else: img_name = None
+
+ materialDict.setdefault((mat_name, img_name), (mat, img) )
+
+
+ else:
+ for mat in mat_ls:
+ if mat: # material may be None so check its not.
+ materialDict.setdefault((mat.name, None), (mat, None) )
+
+ # Why 0 Why!
+ for f in data.faces:
+ if f.material_index >= mat_ls_len:
+# if f.mat >= mat_ls_len:
+ f.material_index = 0
+ # f.mat = 0
+
+ if free:
+ free_derived_objects(ob)
+
+
+ # Make material chunks for all materials used in the meshes:
+ for mat_and_image in materialDict.values():
+ object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1]))
+
+ # Give all objects a unique ID and build a dictionary from object name to object id:
+ """
+ name_to_id = {}
+ for ob, data in mesh_objects:
+ name_to_id[ob.name]= len(name_to_id)
+ #for ob in empty_objects:
+ # name_to_id[ob.name]= len(name_to_id)
+ """
+
+ # Create object chunks for all meshes:
+ i = 0
+ for ob, blender_mesh in mesh_objects:
+ # create a new object chunk
+ object_chunk = _3ds_chunk(OBJECT)
+
+ # set the object name
+ object_chunk.add_variable("name", _3ds_string(sane_name(ob.name)))
+
+ # make a mesh chunk out of the mesh:
+ object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict))
+ object_info.add_subchunk(object_chunk)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # make a kf object node for the object:
+ kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+ '''
+# if not blender_mesh.users:
+ bpy.data.remove_mesh(blender_mesh)
+# blender_mesh.verts = None
+
+ i+=i
+
+ # Create chunks for all empties:
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ for ob in empty_objects:
+ # Empties only require a kf object node:
+ kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+ pass
+ '''
+
+ # Add main object info chunk to primary chunk:
+ primary.add_subchunk(object_info)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # Add main keyframe data chunk to primary chunk:
+ primary.add_subchunk(kfdata)
+ '''
+
+ # At this point, the chunk hierarchy is completely built.
+
+ # Check the size:
+ primary.get_size()
+ # Open the file for writing:
+ file = open( filename, 'wb' )
+
+ # Recursively write the chunks to file:
+ primary.write(file)
+
+ # Close the file:
+ file.close()
+
+ # Debugging only: report the exporting time:
+# Blender.Window.WaitCursor(0)
+ print("3ds export time: %.2f" % (time.clock() - time1))
+# print("3ds export time: %.2f" % (Blender.sys.time() - time1))
+
+ # Debugging only: dump the chunk hierarchy:
+ #primary.dump()
+
+
+# if __name__=='__main__':
+# if struct:
+# Blender.Window.FileSelector(save_3ds, "Export 3DS", Blender.sys.makename(ext='.3ds'))
+# else:
+# Blender.Draw.PupMenu("Error%t|This script requires a full python installation")
+# # save_3ds('/test_b.3ds')
+
+class EXPORT_OT_3ds(bpy.types.Operator):
+ '''
+ 3DS Exporter
+ '''
+ __idname__ = "export.3ds"
+ __label__ = 'Export 3DS'
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ # bpy.props.StringProperty(attr="filename", name="File Name", description="File name used for exporting the 3DS file", maxlen= 1024, default= ""),
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the 3DS file", maxlen= 1024, default= ""),
+ ]
+
+ def execute(self, context):
+ save_3ds(self.path, context)
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+
+ def poll(self, context): # Poll isnt working yet
+ print("Poll")
+ return context.active_object != None
+
+bpy.ops.add(EXPORT_OT_3ds)
diff --git a/release/scripts/io/export_fbx.py b/release/scripts/io/export_fbx.py
new file mode 100644
index 00000000000..21b1388ebfe
--- /dev/null
+++ b/release/scripts/io/export_fbx.py
@@ -0,0 +1,3453 @@
+#!BPY
+"""
+Name: 'Autodesk FBX (.fbx)...'
+Blender: 249
+Group: 'Export'
+Tooltip: 'Selection to an ASCII Autodesk FBX '
+"""
+__author__ = "Campbell Barton"
+__url__ = ['www.blender.org', 'blenderartists.org']
+__version__ = "1.2"
+
+__bpydoc__ = """\
+This script is an exporter to the FBX file format.
+
+http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx
+"""
+# --------------------------------------------------------------------------
+# FBX Export v0.1 by Campbell Barton (AKA Ideasman)
+# --------------------------------------------------------------------------
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+# --------------------------------------------------------------------------
+
+import os
+import time
+import math # math.pi
+import shutil # for file copying
+
+# try:
+# import time
+# # import os # only needed for batch export, nbot used yet
+# except:
+# time = None # use this to check if they have python modules installed
+
+# for python 2.3 support
+try:
+ set()
+except:
+ try:
+ from sets import Set as set
+ except:
+ set = None # so it complains you dont have a !
+
+# # os is only needed for batch 'own dir' option
+# try:
+# import os
+# except:
+# os = None
+
+# import Blender
+import bpy
+import Mathutils
+# from Blender.Mathutils import Matrix, Vector, RotationMatrix
+
+# import BPyObject
+# import BPyMesh
+# import BPySys
+# import BPyMessages
+
+## This was used to make V, but faster not to do all that
+##valid = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_,.()[]{}'
+##v = range(255)
+##for c in valid: v.remove(ord(c))
+v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,42,43,47,58,59,60,61,62,63,64,92,94,96,124,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254]
+invalid = ''.join([chr(i) for i in v])
+def cleanName(name):
+ for ch in invalid: name = name.replace(ch, '_')
+ return name
+# del v, i
+
+
+def copy_file(source, dest):
+ file = open(source, 'rb')
+ data = file.read()
+ file.close()
+
+ file = open(dest, 'wb')
+ file.write(data)
+ file.close()
+
+
+# XXX not used anymore, images are copied one at a time
+def copy_images(dest_dir, textures):
+ if not dest_dir.endswith(os.sep):
+ dest_dir += os.sep
+
+ image_paths = set()
+ for tex in textures:
+ image_paths.add(Blender.sys.expandpath(tex.filename))
+
+ # Now copy images
+ copyCount = 0
+ for image_path in image_paths:
+ if Blender.sys.exists(image_path):
+ # Make a name for the target path.
+ dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
+ if not Blender.sys.exists(dest_image_path): # Image isnt alredy there
+ print('\tCopying "%s" > "%s"' % (image_path, dest_image_path))
+ try:
+ copy_file(image_path, dest_image_path)
+ copyCount+=1
+ except:
+ print('\t\tWarning, file failed to copy, skipping.')
+
+ print('\tCopied %d images' % copyCount)
+
+# I guess FBX uses degrees instead of radians (Arystan).
+# Call this function just before writing to FBX.
+def eulerRadToDeg(eul):
+ ret = Mathutils.Euler()
+
+ ret.x = 180 / math.pi * eul[0]
+ ret.y = 180 / math.pi * eul[1]
+ ret.z = 180 / math.pi * eul[2]
+
+ return ret
+
+mtx4_identity = Mathutils.Matrix()
+
+# testing
+mtx_x90 = Mathutils.RotationMatrix( math.pi/2, 3, 'x') # used
+#mtx_x90n = RotationMatrix(-90, 3, 'x')
+#mtx_y90 = RotationMatrix( 90, 3, 'y')
+#mtx_y90n = RotationMatrix(-90, 3, 'y')
+#mtx_z90 = RotationMatrix( 90, 3, 'z')
+#mtx_z90n = RotationMatrix(-90, 3, 'z')
+
+#mtx4_x90 = RotationMatrix( 90, 4, 'x')
+mtx4_x90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'x') # used
+#mtx4_y90 = RotationMatrix( 90, 4, 'y')
+mtx4_y90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'y') # used
+mtx4_z90 = Mathutils.RotationMatrix( math.pi/2, 4, 'z') # used
+mtx4_z90n = Mathutils.RotationMatrix(-math.pi/2, 4, 'z') # used
+
+# def strip_path(p):
+# return p.split('\\')[-1].split('/')[-1]
+
+# Used to add the scene name into the filename without using odd chars
+sane_name_mapping_ob = {}
+sane_name_mapping_mat = {}
+sane_name_mapping_tex = {}
+sane_name_mapping_take = {}
+sane_name_mapping_group = {}
+
+# Make sure reserved names are not used
+sane_name_mapping_ob['Scene'] = 'Scene_'
+sane_name_mapping_ob['blend_root'] = 'blend_root_'
+
+def increment_string(t):
+ name = t
+ num = ''
+ while name and name[-1].isdigit():
+ num = name[-1] + num
+ name = name[:-1]
+ if num: return '%s%d' % (name, int(num)+1)
+ else: return name + '_0'
+
+
+
+# todo - Disallow the name 'Scene' and 'blend_root' - it will bugger things up.
+def sane_name(data, dct):
+ #if not data: return None
+
+ if type(data)==tuple: # materials are paired up with images
+ data, other = data
+ use_other = True
+ else:
+ other = None
+ use_other = False
+
+ if data: name = data.name
+ else: name = None
+ orig_name = name
+
+ if other:
+ orig_name_other = other.name
+ name = '%s #%s' % (name, orig_name_other)
+ else:
+ orig_name_other = None
+
+ # dont cache, only ever call once for each data type now,
+ # so as to avoid namespace collision between types - like with objects <-> bones
+ #try: return dct[name]
+ #except: pass
+
+ if not name:
+ name = 'unnamed' # blank string, ASKING FOR TROUBLE!
+ else:
+ #name = BPySys.cleanName(name)
+ name = cleanName(name) # use our own
+
+ while name in iter(dct.values()): name = increment_string(name)
+
+ if use_other: # even if other is None - orig_name_other will be a string or None
+ dct[orig_name, orig_name_other] = name
+ else:
+ dct[orig_name] = name
+
+ return name
+
+def sane_obname(data): return sane_name(data, sane_name_mapping_ob)
+def sane_matname(data): return sane_name(data, sane_name_mapping_mat)
+def sane_texname(data): return sane_name(data, sane_name_mapping_tex)
+def sane_takename(data): return sane_name(data, sane_name_mapping_take)
+def sane_groupname(data): return sane_name(data, sane_name_mapping_group)
+
+# def derived_paths(fname_orig, basepath, FORCE_CWD=False):
+# '''
+# fname_orig - blender path, can be relative
+# basepath - fname_rel will be relative to this
+# FORCE_CWD - dont use the basepath, just add a ./ to the filename.
+# use when we know the file will be in the basepath.
+# '''
+# fname = bpy.sys.expandpath(fname_orig)
+# # fname = Blender.sys.expandpath(fname_orig)
+# fname_strip = os.path.basename(fname)
+# # fname_strip = strip_path(fname)
+# if FORCE_CWD:
+# fname_rel = '.' + os.sep + fname_strip
+# else:
+# fname_rel = bpy.sys.relpath(fname, basepath)
+# # fname_rel = Blender.sys.relpath(fname, basepath)
+# if fname_rel.startswith('//'): fname_rel = '.' + os.sep + fname_rel[2:]
+# return fname, fname_strip, fname_rel
+
+
+def mat4x4str(mat):
+ return '%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f,%.15f' % tuple([ f for v in mat for f in v ])
+
+# XXX not used
+# duplicated in OBJ exporter
+def getVertsFromGroup(me, group_index):
+ ret = []
+
+ for i, v in enumerate(me.verts):
+ for g in v.groups:
+ if g.group == group_index:
+ ret.append((i, g.weight))
+
+ return ret
+
+# ob must be OB_MESH
+def BPyMesh_meshWeight2List(ob):
+ ''' Takes a mesh and return its group names and a list of lists, one list per vertex.
+ aligning the each vert list with the group names, each list contains float value for the weight.
+ These 2 lists can be modified and then used with list2MeshWeight to apply the changes.
+ '''
+
+ me = ob.data
+
+ # Clear the vert group.
+ groupNames= [g.name for g in ob.vertex_groups]
+ len_groupNames= len(groupNames)
+
+ if not len_groupNames:
+ # no verts? return a vert aligned empty list
+ return [[] for i in range(len(me.verts))], []
+ else:
+ vWeightList= [[0.0]*len_groupNames for i in range(len(me.verts))]
+
+ for i, v in enumerate(me.verts):
+ for g in v.groups:
+ vWeightList[i][g.group] = g.weight
+
+ return groupNames, vWeightList
+
+def meshNormalizedWeights(me):
+ try: # account for old bad BPyMesh
+ groupNames, vWeightList = BPyMesh_meshWeight2List(me)
+# groupNames, vWeightList = BPyMesh.meshWeight2List(me)
+ except:
+ return [],[]
+
+ if not groupNames:
+ return [],[]
+
+ for i, vWeights in enumerate(vWeightList):
+ tot = 0.0
+ for w in vWeights:
+ tot+=w
+
+ if tot:
+ for j, w in enumerate(vWeights):
+ vWeights[j] = w/tot
+
+ return groupNames, vWeightList
+
+header_comment = \
+'''; FBX 6.1.0 project file
+; Created by Blender FBX Exporter
+; for support mail: ideasman42@gmail.com
+; ----------------------------------------------------
+
+'''
+
+# This func can be called with just the filename
+def write(filename, batch_objects = None, \
+ context = None,
+ EXP_OBS_SELECTED = True,
+ EXP_MESH = True,
+ EXP_MESH_APPLY_MOD = True,
+# EXP_MESH_HQ_NORMALS = False,
+ EXP_ARMATURE = True,
+ EXP_LAMP = True,
+ EXP_CAMERA = True,
+ EXP_EMPTY = True,
+ EXP_IMAGE_COPY = False,
+ GLOBAL_MATRIX = Mathutils.Matrix(),
+ ANIM_ENABLE = True,
+ ANIM_OPTIMIZE = True,
+ ANIM_OPTIMIZE_PRECISSION = 6,
+ ANIM_ACTION_ALL = False,
+ BATCH_ENABLE = False,
+ BATCH_GROUP = True,
+ BATCH_FILE_PREFIX = '',
+ BATCH_OWN_DIR = False
+ ):
+
+ # ----------------- Batch support!
+ if BATCH_ENABLE:
+ if os == None: BATCH_OWN_DIR = False
+
+ fbxpath = filename
+
+ # get the path component of filename
+ tmp_exists = bpy.sys.exists(fbxpath)
+# tmp_exists = Blender.sys.exists(fbxpath)
+
+ if tmp_exists != 2: # a file, we want a path
+ fbxpath = os.path.dirname(fbxpath)
+# while fbxpath and fbxpath[-1] not in ('/', '\\'):
+# fbxpath = fbxpath[:-1]
+ if not fbxpath:
+# if not filename:
+ # XXX
+ print('Error%t|Directory does not exist!')
+# Draw.PupMenu('Error%t|Directory does not exist!')
+ return
+
+ tmp_exists = bpy.sys.exists(fbxpath)
+# tmp_exists = Blender.sys.exists(fbxpath)
+
+ if tmp_exists != 2:
+ # XXX
+ print('Error%t|Directory does not exist!')
+# Draw.PupMenu('Error%t|Directory does not exist!')
+ return
+
+ if not fbxpath.endswith(os.sep):
+ fbxpath += os.sep
+ del tmp_exists
+
+
+ if BATCH_GROUP:
+ data_seq = bpy.data.groups
+ else:
+ data_seq = bpy.data.scenes
+
+ # call this function within a loop with BATCH_ENABLE == False
+ orig_sce = context.scene
+# orig_sce = bpy.data.scenes.active
+
+
+ new_fbxpath = fbxpath # own dir option modifies, we need to keep an original
+ for data in data_seq: # scene or group
+ newname = BATCH_FILE_PREFIX + cleanName(data.name)
+# newname = BATCH_FILE_PREFIX + BPySys.cleanName(data.name)
+
+
+ if BATCH_OWN_DIR:
+ new_fbxpath = fbxpath + newname + os.sep
+ # path may alredy exist
+ # TODO - might exist but be a file. unlikely but should probably account for it.
+
+ if bpy.sys.exists(new_fbxpath) == 0:
+# if Blender.sys.exists(new_fbxpath) == 0:
+ os.mkdir(new_fbxpath)
+
+
+ filename = new_fbxpath + newname + '.fbx'
+
+ print('\nBatch exporting %s as...\n\t"%s"' % (data, filename))
+
+ # XXX don't know what to do with this, probably do the same? (Arystan)
+ if BATCH_GROUP: #group
+ # group, so objects update properly, add a dummy scene.
+ sce = bpy.data.scenes.new()
+ sce.Layers = (1<<20) -1
+ bpy.data.scenes.active = sce
+ for ob_base in data.objects:
+ sce.objects.link(ob_base)
+
+ sce.update(1)
+
+ # TODO - BUMMER! Armatures not in the group wont animate the mesh
+
+ else:# scene
+
+
+ data_seq.active = data
+
+
+ # Call self with modified args
+ # Dont pass batch options since we alredy usedt them
+ write(filename, data.objects,
+ context,
+ False,
+ EXP_MESH,
+ EXP_MESH_APPLY_MOD,
+# EXP_MESH_HQ_NORMALS,
+ EXP_ARMATURE,
+ EXP_LAMP,
+ EXP_CAMERA,
+ EXP_EMPTY,
+ EXP_IMAGE_COPY,
+ GLOBAL_MATRIX,
+ ANIM_ENABLE,
+ ANIM_OPTIMIZE,
+ ANIM_OPTIMIZE_PRECISSION,
+ ANIM_ACTION_ALL
+ )
+
+ if BATCH_GROUP:
+ # remove temp group scene
+ bpy.data.remove_scene(sce)
+# bpy.data.scenes.unlink(sce)
+
+ bpy.data.scenes.active = orig_sce
+
+ return # so the script wont run after we have batch exported.
+
+ # end batch support
+
+ # Use this for working out paths relative to the export location
+ basepath = os.path.dirname(filename) or '.'
+ basepath += os.sep
+# basepath = Blender.sys.dirname(filename)
+
+ # ----------------------------------------------
+ # storage classes
+ class my_bone_class:
+ __slots__ =(\
+ 'blenName',\
+ 'blenBone',\
+ 'blenMeshes',\
+ 'restMatrix',\
+ 'parent',\
+ 'blenName',\
+ 'fbxName',\
+ 'fbxArm',\
+ '__pose_bone',\
+ '__anim_poselist')
+
+ def __init__(self, blenBone, fbxArm):
+
+ # This is so 2 armatures dont have naming conflicts since FBX bones use object namespace
+ self.fbxName = sane_obname(blenBone)
+
+ self.blenName = blenBone.name
+ self.blenBone = blenBone
+ self.blenMeshes = {} # fbxMeshObName : mesh
+ self.fbxArm = fbxArm
+ self.restMatrix = blenBone.armature_matrix
+# self.restMatrix = blenBone.matrix['ARMATURESPACE']
+
+ # not used yet
+ # self.restMatrixInv = self.restMatrix.copy().invert()
+ # self.restMatrixLocal = None # set later, need parent matrix
+
+ self.parent = None
+
+ # not public
+ pose = fbxArm.blenObject.pose
+# pose = fbxArm.blenObject.getPose()
+ self.__pose_bone = pose.pose_channels[self.blenName]
+# self.__pose_bone = pose.bones[self.blenName]
+
+ # store a list if matricies here, (poseMatrix, head, tail)
+ # {frame:posematrix, frame:posematrix, ...}
+ self.__anim_poselist = {}
+
+ '''
+ def calcRestMatrixLocal(self):
+ if self.parent:
+ self.restMatrixLocal = self.restMatrix * self.parent.restMatrix.copy().invert()
+ else:
+ self.restMatrixLocal = self.restMatrix.copy()
+ '''
+ def setPoseFrame(self, f):
+ # cache pose info here, frame must be set beforehand
+
+ # Didnt end up needing head or tail, if we do - here it is.
+ '''
+ self.__anim_poselist[f] = (\
+ self.__pose_bone.poseMatrix.copy(),\
+ self.__pose_bone.head.copy(),\
+ self.__pose_bone.tail.copy() )
+ '''
+
+ self.__anim_poselist[f] = self.__pose_bone.pose_matrix.copy()
+# self.__anim_poselist[f] = self.__pose_bone.poseMatrix.copy()
+
+ # get pose from frame.
+ def getPoseMatrix(self, f):# ----------------------------------------------
+ return self.__anim_poselist[f]
+ '''
+ def getPoseHead(self, f):
+ #return self.__pose_bone.head.copy()
+ return self.__anim_poselist[f][1].copy()
+ def getPoseTail(self, f):
+ #return self.__pose_bone.tail.copy()
+ return self.__anim_poselist[f][2].copy()
+ '''
+ # end
+
+ def getAnimParRelMatrix(self, frame):
+ #arm_mat = self.fbxArm.matrixWorld
+ #arm_mat = self.fbxArm.parRelMatrix()
+ if not self.parent:
+ #return mtx4_z90 * (self.getPoseMatrix(frame) * arm_mat) # dont apply arm matrix anymore
+ return mtx4_z90 * self.getPoseMatrix(frame)
+ else:
+ #return (mtx4_z90 * ((self.getPoseMatrix(frame) * arm_mat))) * (mtx4_z90 * (self.parent.getPoseMatrix(frame) * arm_mat)).invert()
+ return (mtx4_z90 * (self.getPoseMatrix(frame))) * (mtx4_z90 * self.parent.getPoseMatrix(frame)).invert()
+
+ # we need thes because cameras and lights modified rotations
+ def getAnimParRelMatrixRot(self, frame):
+ return self.getAnimParRelMatrix(frame)
+
+ def flushAnimData(self):
+ self.__anim_poselist.clear()
+
+
+ class my_object_generic:
+ # Other settings can be applied for each type - mesh, armature etc.
+ def __init__(self, ob, matrixWorld = None):
+ self.fbxName = sane_obname(ob)
+ self.blenObject = ob
+ self.fbxGroupNames = []
+ self.fbxParent = None # set later on IF the parent is in the selection.
+ if matrixWorld: self.matrixWorld = matrixWorld * GLOBAL_MATRIX
+ else: self.matrixWorld = ob.matrix * GLOBAL_MATRIX
+# else: self.matrixWorld = ob.matrixWorld * GLOBAL_MATRIX
+ self.__anim_poselist = {} # we should only access this
+
+ def parRelMatrix(self):
+ if self.fbxParent:
+ return self.matrixWorld * self.fbxParent.matrixWorld.copy().invert()
+ else:
+ return self.matrixWorld
+
+ def setPoseFrame(self, f):
+ self.__anim_poselist[f] = self.blenObject.matrix.copy()
+# self.__anim_poselist[f] = self.blenObject.matrixWorld.copy()
+
+ def getAnimParRelMatrix(self, frame):
+ if self.fbxParent:
+ #return (self.__anim_poselist[frame] * self.fbxParent.__anim_poselist[frame].copy().invert() ) * GLOBAL_MATRIX
+ return (self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert()
+ else:
+ return self.__anim_poselist[frame] * GLOBAL_MATRIX
+
+ def getAnimParRelMatrixRot(self, frame):
+ type = self.blenObject.type
+ if self.fbxParent:
+ matrix_rot = (((self.__anim_poselist[frame] * GLOBAL_MATRIX) * (self.fbxParent.__anim_poselist[frame] * GLOBAL_MATRIX).invert())).rotationPart()
+ else:
+ matrix_rot = (self.__anim_poselist[frame] * GLOBAL_MATRIX).rotationPart()
+
+ # Lamps need to be rotated
+ if type =='LAMP':
+ matrix_rot = mtx_x90 * matrix_rot
+ elif type =='CAMERA':
+# elif ob and type =='Camera':
+ y = Mathutils.Vector(0,1,0) * matrix_rot
+ matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y)
+
+ return matrix_rot
+
+ # ----------------------------------------------
+
+
+
+
+
+ print('\nFBX export starting...', filename)
+ start_time = time.clock()
+# start_time = Blender.sys.time()
+ try:
+ file = open(filename, 'w')
+ except:
+ return False
+
+ sce = context.scene
+# sce = bpy.data.scenes.active
+ world = sce.world
+
+
+ # ---------------------------- Write the header first
+ file.write(header_comment)
+ if time:
+ curtime = time.localtime()[0:6]
+ else:
+ curtime = (0,0,0,0,0,0)
+ #
+ file.write(\
+'''FBXHeaderExtension: {
+ FBXHeaderVersion: 1003
+ FBXVersion: 6100
+ CreationTimeStamp: {
+ Version: 1000
+ Year: %.4i
+ Month: %.2i
+ Day: %.2i
+ Hour: %.2i
+ Minute: %.2i
+ Second: %.2i
+ Millisecond: 0
+ }
+ Creator: "FBX SDK/FBX Plugins build 20070228"
+ OtherFlags: {
+ FlagPLE: 0
+ }
+}''' % (curtime))
+
+ file.write('\nCreationTime: "%.4i-%.2i-%.2i %.2i:%.2i:%.2i:000"' % curtime)
+ file.write('\nCreator: "Blender3D version 2.5"')
+# file.write('\nCreator: "Blender3D version %.2f"' % Blender.Get('version'))
+
+ pose_items = [] # list of (fbxName, matrix) to write pose data for, easier to collect allong the way
+
+ # --------------- funcs for exporting
+ def object_tx(ob, loc, matrix, matrix_mod = None):
+ '''
+ Matrix mod is so armature objects can modify their bone matricies
+ '''
+ if isinstance(ob, bpy.types.Bone):
+# if isinstance(ob, Blender.Types.BoneType):
+
+ # we know we have a matrix
+ # matrix = mtx4_z90 * (ob.matrix['ARMATURESPACE'] * matrix_mod)
+ matrix = mtx4_z90 * ob.armature_matrix # dont apply armature matrix anymore
+# matrix = mtx4_z90 * ob.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
+
+ parent = ob.parent
+ if parent:
+ #par_matrix = mtx4_z90 * (parent.matrix['ARMATURESPACE'] * matrix_mod)
+ par_matrix = mtx4_z90 * parent.armature_matrix # dont apply armature matrix anymore
+# par_matrix = mtx4_z90 * parent.matrix['ARMATURESPACE'] # dont apply armature matrix anymore
+ matrix = matrix * par_matrix.copy().invert()
+
+ matrix_rot = matrix.rotationPart()
+
+ loc = tuple(matrix.translationPart())
+ scale = tuple(matrix.scalePart())
+ rot = tuple(matrix_rot.toEuler())
+
+ else:
+ # This is bad because we need the parent relative matrix from the fbx parent (if we have one), dont use anymore
+ #if ob and not matrix: matrix = ob.matrixWorld * GLOBAL_MATRIX
+ if ob and not matrix: raise Exception("error: this should never happen!")
+
+ matrix_rot = matrix
+ #if matrix:
+ # matrix = matrix_scale * matrix
+
+ if matrix:
+ loc = tuple(matrix.translationPart())
+ scale = tuple(matrix.scalePart())
+
+ matrix_rot = matrix.rotationPart()
+ # Lamps need to be rotated
+ if ob and ob.type =='Lamp':
+ matrix_rot = mtx_x90 * matrix_rot
+ rot = tuple(matrix_rot.toEuler())
+ elif ob and ob.type =='Camera':
+ y = Mathutils.Vector(0,1,0) * matrix_rot
+ matrix_rot = matrix_rot * Mathutils.RotationMatrix(math.pi/2, 3, 'r', y)
+ rot = tuple(matrix_rot.toEuler())
+ else:
+ rot = tuple(matrix_rot.toEuler())
+ else:
+ if not loc:
+ loc = 0,0,0
+ scale = 1,1,1
+ rot = 0,0,0
+
+ return loc, rot, scale, matrix, matrix_rot
+
+ def write_object_tx(ob, loc, matrix, matrix_mod= None):
+ '''
+ We have loc to set the location if non blender objects that have a location
+
+ matrix_mod is only used for bones at the moment
+ '''
+ loc, rot, scale, matrix, matrix_rot = object_tx(ob, loc, matrix, matrix_mod)
+
+ file.write('\n\t\t\tProperty: "Lcl Translation", "Lcl Translation", "A+",%.15f,%.15f,%.15f' % loc)
+ file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % tuple(eulerRadToDeg(rot)))
+# file.write('\n\t\t\tProperty: "Lcl Rotation", "Lcl Rotation", "A+",%.15f,%.15f,%.15f' % rot)
+ file.write('\n\t\t\tProperty: "Lcl Scaling", "Lcl Scaling", "A+",%.15f,%.15f,%.15f' % scale)
+ return loc, rot, scale, matrix, matrix_rot
+
+ def write_object_props(ob=None, loc=None, matrix=None, matrix_mod=None):
+ # if the type is 0 its an empty otherwise its a mesh
+ # only difference at the moment is one has a color
+ file.write('''
+ Properties60: {
+ Property: "QuaternionInterpolate", "bool", "",0
+ Property: "Visibility", "Visibility", "A+",1''')
+
+ loc, rot, scale, matrix, matrix_rot = write_object_tx(ob, loc, matrix, matrix_mod)
+
+ # Rotation order, note, for FBX files Iv loaded normal order is 1
+ # setting to zero.
+ # eEULER_XYZ = 0
+ # eEULER_XZY
+ # eEULER_YZX
+ # eEULER_YXZ
+ # eEULER_ZXY
+ # eEULER_ZYX
+
+ file.write('''
+ Property: "RotationOffset", "Vector3D", "",0,0,0
+ Property: "RotationPivot", "Vector3D", "",0,0,0
+ Property: "ScalingOffset", "Vector3D", "",0,0,0
+ Property: "ScalingPivot", "Vector3D", "",0,0,0
+ Property: "TranslationActive", "bool", "",0
+ Property: "TranslationMin", "Vector3D", "",0,0,0
+ Property: "TranslationMax", "Vector3D", "",0,0,0
+ Property: "TranslationMinX", "bool", "",0
+ Property: "TranslationMinY", "bool", "",0
+ Property: "TranslationMinZ", "bool", "",0
+ Property: "TranslationMaxX", "bool", "",0
+ Property: "TranslationMaxY", "bool", "",0
+ Property: "TranslationMaxZ", "bool", "",0
+ Property: "RotationOrder", "enum", "",0
+ Property: "RotationSpaceForLimitOnly", "bool", "",0
+ Property: "AxisLen", "double", "",10
+ Property: "PreRotation", "Vector3D", "",0,0,0
+ Property: "PostRotation", "Vector3D", "",0,0,0
+ Property: "RotationActive", "bool", "",0
+ Property: "RotationMin", "Vector3D", "",0,0,0
+ Property: "RotationMax", "Vector3D", "",0,0,0
+ Property: "RotationMinX", "bool", "",0
+ Property: "RotationMinY", "bool", "",0
+ Property: "RotationMinZ", "bool", "",0
+ Property: "RotationMaxX", "bool", "",0
+ Property: "RotationMaxY", "bool", "",0
+ Property: "RotationMaxZ", "bool", "",0
+ Property: "RotationStiffnessX", "double", "",0
+ Property: "RotationStiffnessY", "double", "",0
+ Property: "RotationStiffnessZ", "double", "",0
+ Property: "MinDampRangeX", "double", "",0
+ Property: "MinDampRangeY", "double", "",0
+ Property: "MinDampRangeZ", "double", "",0
+ Property: "MaxDampRangeX", "double", "",0
+ Property: "MaxDampRangeY", "double", "",0
+ Property: "MaxDampRangeZ", "double", "",0
+ Property: "MinDampStrengthX", "double", "",0
+ Property: "MinDampStrengthY", "double", "",0
+ Property: "MinDampStrengthZ", "double", "",0
+ Property: "MaxDampStrengthX", "double", "",0
+ Property: "MaxDampStrengthY", "double", "",0
+ Property: "MaxDampStrengthZ", "double", "",0
+ Property: "PreferedAngleX", "double", "",0
+ Property: "PreferedAngleY", "double", "",0
+ Property: "PreferedAngleZ", "double", "",0
+ Property: "InheritType", "enum", "",0
+ Property: "ScalingActive", "bool", "",0
+ Property: "ScalingMin", "Vector3D", "",1,1,1
+ Property: "ScalingMax", "Vector3D", "",1,1,1
+ Property: "ScalingMinX", "bool", "",0
+ Property: "ScalingMinY", "bool", "",0
+ Property: "ScalingMinZ", "bool", "",0
+ Property: "ScalingMaxX", "bool", "",0
+ Property: "ScalingMaxY", "bool", "",0
+ Property: "ScalingMaxZ", "bool", "",0
+ Property: "GeometricTranslation", "Vector3D", "",0,0,0
+ Property: "GeometricRotation", "Vector3D", "",0,0,0
+ Property: "GeometricScaling", "Vector3D", "",1,1,1
+ Property: "LookAtProperty", "object", ""
+ Property: "UpVectorProperty", "object", ""
+ Property: "Show", "bool", "",1
+ Property: "NegativePercentShapeSupport", "bool", "",1
+ Property: "DefaultAttributeIndex", "int", "",0''')
+ if ob and not isinstance(ob, bpy.types.Bone):
+# if ob and type(ob) != Blender.Types.BoneType:
+ # Only mesh objects have color
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+ file.write('\n\t\t\tProperty: "Size", "double", "",100')
+ file.write('\n\t\t\tProperty: "Look", "enum", "",1')
+
+ return loc, rot, scale, matrix, matrix_rot
+
+
+ # -------------------------------------------- Armatures
+ #def write_bone(bone, name, matrix_mod):
+ def write_bone(my_bone):
+ file.write('\n\tModel: "Model::%s", "Limb" {' % my_bone.fbxName)
+ file.write('\n\t\tVersion: 232')
+
+ #poseMatrix = write_object_props(my_bone.blenBone, None, None, my_bone.fbxArm.parRelMatrix())[3]
+ poseMatrix = write_object_props(my_bone.blenBone)[3] # dont apply bone matricies anymore
+ pose_items.append( (my_bone.fbxName, poseMatrix) )
+
+
+ # file.write('\n\t\t\tProperty: "Size", "double", "",%.6f' % ((my_bone.blenData.head['ARMATURESPACE'] - my_bone.blenData.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
+ file.write('\n\t\t\tProperty: "Size", "double", "",1')
+
+ #((my_bone.blenData.head['ARMATURESPACE'] * my_bone.fbxArm.matrixWorld) - (my_bone.blenData.tail['ARMATURESPACE'] * my_bone.fbxArm.parRelMatrix())).length)
+
+ """
+ file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %\
+ ((my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']) * my_bone.fbxArm.parRelMatrix()).length)
+ """
+
+ file.write('\n\t\t\tProperty: "LimbLength", "double", "",%.6f' %
+ (my_bone.blenBone.armature_head - my_bone.blenBone.armature_tail).length)
+# (my_bone.blenBone.head['ARMATURESPACE'] - my_bone.blenBone.tail['ARMATURESPACE']).length)
+
+ #file.write('\n\t\t\tProperty: "LimbLength", "double", "",1')
+ file.write('\n\t\t\tProperty: "Color", "ColorRGB", "",0.8,0.8,0.8')
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 1')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Skeleton"')
+ file.write('\n\t}')
+
+ def write_camera_switch():
+ file.write('''
+ Model: "Model::Camera Switcher", "CameraSwitcher" {
+ Version: 232''')
+
+ write_object_props()
+ file.write('''
+ Property: "Color", "Color", "A",0.8,0.8,0.8
+ Property: "Camera Index", "Integer", "A+",100
+ }
+ MultiLayer: 0
+ MultiTake: 1
+ Hidden: "True"
+ Shading: W
+ Culling: "CullingOff"
+ Version: 101
+ Name: "Model::Camera Switcher"
+ CameraId: 0
+ CameraName: 100
+ CameraIndexName:
+ }''')
+
+ def write_camera_dummy(name, loc, near, far, proj_type, up):
+ file.write('\n\tModel: "Model::%s", "Camera" {' % name )
+ file.write('\n\t\tVersion: 232')
+ write_object_props(None, loc)
+
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",0.8,0.8,0.8')
+ file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
+ file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",40')
+ file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0.63,0.63,0.63')
+ file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
+ file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
+ file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "GateFit", "enum", "",0')
+ file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",21.3544940948486')
+ file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
+ file.write('\n\t\t\tProperty: "AspectW", "double", "",320')
+ file.write('\n\t\t\tProperty: "AspectH", "double", "",200')
+ file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",1')
+ file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
+ file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % near)
+ file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % far)
+ file.write('\n\t\t\tProperty: "FilmWidth", "double", "",0.816')
+ file.write('\n\t\t\tProperty: "FilmHeight", "double", "",0.612')
+ file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",1.33333333333333')
+ file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
+ file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",4')
+ file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
+ file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
+ file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Center", "bool", "",1')
+ file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
+ file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
+ file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
+ file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
+ file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",1.33333333333333')
+ file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
+ file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
+ file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",%i' % proj_type)
+ file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
+ file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
+ file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
+ file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
+ file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
+ file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 0')
+ file.write('\n\t\tHidden: "True"')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Camera"')
+ file.write('\n\t\tGeometryVersion: 124')
+ file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
+ file.write('\n\t\tUp: %i,%i,%i' % up)
+ file.write('\n\t\tLookAt: 0,0,0')
+ file.write('\n\t\tShowInfoOnMoving: 1')
+ file.write('\n\t\tShowAudio: 0')
+ file.write('\n\t\tAudioColor: 0,1,0')
+ file.write('\n\t\tCameraOrthoZoom: 1')
+ file.write('\n\t}')
+
+ def write_camera_default():
+ # This sucks but to match FBX converter its easier to
+ # write the cameras though they are not needed.
+ write_camera_dummy('Producer Perspective', (0,71.3,287.5), 10, 4000, 0, (0,1,0))
+ write_camera_dummy('Producer Top', (0,4000,0), 1, 30000, 1, (0,0,-1))
+ write_camera_dummy('Producer Bottom', (0,-4000,0), 1, 30000, 1, (0,0,-1))
+ write_camera_dummy('Producer Front', (0,0,4000), 1, 30000, 1, (0,1,0))
+ write_camera_dummy('Producer Back', (0,0,-4000), 1, 30000, 1, (0,1,0))
+ write_camera_dummy('Producer Right', (4000,0,0), 1, 30000, 1, (0,1,0))
+ write_camera_dummy('Producer Left', (-4000,0,0), 1, 30000, 1, (0,1,0))
+
+ def write_camera(my_cam):
+ '''
+ Write a blender camera
+ '''
+ render = sce.render_data
+ width = render.resolution_x
+ height = render.resolution_y
+# render = sce.render
+# width = render.sizeX
+# height = render.sizeY
+ aspect = float(width)/height
+
+ data = my_cam.blenObject.data
+
+ file.write('\n\tModel: "Model::%s", "Camera" {' % my_cam.fbxName )
+ file.write('\n\t\tVersion: 232')
+ loc, rot, scale, matrix, matrix_rot = write_object_props(my_cam.blenObject, None, my_cam.parRelMatrix())
+
+ file.write('\n\t\t\tProperty: "Roll", "Roll", "A+",0')
+ file.write('\n\t\t\tProperty: "FieldOfView", "FieldOfView", "A+",%.6f' % data.angle)
+ file.write('\n\t\t\tProperty: "FieldOfViewX", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "FieldOfViewY", "FieldOfView", "A+",1')
+ file.write('\n\t\t\tProperty: "FocalLength", "Real", "A+",14.0323972702026')
+ file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shift_x) # not sure if this is in the correct units?
+# file.write('\n\t\t\tProperty: "OpticalCenterX", "Real", "A+",%.6f' % data.shiftX) # not sure if this is in the correct units?
+ file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shift_y) # ditto
+# file.write('\n\t\t\tProperty: "OpticalCenterY", "Real", "A+",%.6f' % data.shiftY) # ditto
+ file.write('\n\t\t\tProperty: "BackgroundColor", "Color", "A+",0,0,0')
+ file.write('\n\t\t\tProperty: "TurnTable", "Real", "A+",0')
+ file.write('\n\t\t\tProperty: "DisplayTurnTableIcon", "bool", "",1')
+ file.write('\n\t\t\tProperty: "Motion Blur Intensity", "Real", "A+",1')
+ file.write('\n\t\t\tProperty: "UseMotionBlur", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseRealTimeMotionBlur", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ResolutionMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ApertureMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "GateFit", "enum", "",0')
+ file.write('\n\t\t\tProperty: "CameraFormat", "enum", "",0')
+ file.write('\n\t\t\tProperty: "AspectW", "double", "",%i' % width)
+ file.write('\n\t\t\tProperty: "AspectH", "double", "",%i' % height)
+
+ '''Camera aspect ratio modes.
+ 0 If the ratio mode is eWINDOW_SIZE, both width and height values aren't relevant.
+ 1 If the ratio mode is eFIXED_RATIO, the height value is set to 1.0 and the width value is relative to the height value.
+ 2 If the ratio mode is eFIXED_RESOLUTION, both width and height values are in pixels.
+ 3 If the ratio mode is eFIXED_WIDTH, the width value is in pixels and the height value is relative to the width value.
+ 4 If the ratio mode is eFIXED_HEIGHT, the height value is in pixels and the width value is relative to the height value.
+
+ Definition at line 234 of file kfbxcamera.h. '''
+
+ file.write('\n\t\t\tProperty: "PixelAspectRatio", "double", "",2')
+
+ file.write('\n\t\t\tProperty: "UseFrameColor", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameColor", "ColorRGB", "",0.3,0.3,0.3')
+ file.write('\n\t\t\tProperty: "ShowName", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowGrid", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowOpticalCenter", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ShowAzimut", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ShowTimeCode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clip_start)
+# file.write('\n\t\t\tProperty: "NearPlane", "double", "",%.6f' % data.clipStart)
+ file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clip_end)
+# file.write('\n\t\t\tProperty: "FarPlane", "double", "",%.6f' % data.clipStart)
+ file.write('\n\t\t\tProperty: "FilmWidth", "double", "",1.0')
+ file.write('\n\t\t\tProperty: "FilmHeight", "double", "",1.0')
+ file.write('\n\t\t\tProperty: "FilmAspectRatio", "double", "",%.6f' % aspect)
+ file.write('\n\t\t\tProperty: "FilmSqueezeRatio", "double", "",1')
+ file.write('\n\t\t\tProperty: "FilmFormatIndex", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ViewFrustum", "bool", "",1')
+ file.write('\n\t\t\tProperty: "ViewFrustumNearFarPlane", "bool", "",0')
+ file.write('\n\t\t\tProperty: "ViewFrustumBackPlaneMode", "enum", "",2')
+ file.write('\n\t\t\tProperty: "BackPlaneDistance", "double", "",100')
+ file.write('\n\t\t\tProperty: "BackPlaneDistanceMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "ViewCameraToLookAt", "bool", "",1')
+ file.write('\n\t\t\tProperty: "LockMode", "bool", "",0')
+ file.write('\n\t\t\tProperty: "LockInterestNavigation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FitImage", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Crop", "bool", "",0')
+ file.write('\n\t\t\tProperty: "Center", "bool", "",1')
+ file.write('\n\t\t\tProperty: "KeepRatio", "bool", "",1')
+ file.write('\n\t\t\tProperty: "BackgroundMode", "enum", "",0')
+ file.write('\n\t\t\tProperty: "BackgroundAlphaTreshold", "double", "",0.5')
+ file.write('\n\t\t\tProperty: "ForegroundTransparent", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DisplaySafeArea", "bool", "",0')
+ file.write('\n\t\t\tProperty: "SafeAreaDisplayStyle", "enum", "",1')
+ file.write('\n\t\t\tProperty: "SafeAreaAspectRatio", "double", "",%.6f' % aspect)
+ file.write('\n\t\t\tProperty: "Use2DMagnifierZoom", "bool", "",0')
+ file.write('\n\t\t\tProperty: "2D Magnifier Zoom", "Real", "A+",100')
+ file.write('\n\t\t\tProperty: "2D Magnifier X", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "2D Magnifier Y", "Real", "A+",50')
+ file.write('\n\t\t\tProperty: "CameraProjectionType", "enum", "",0')
+ file.write('\n\t\t\tProperty: "UseRealTimeDOFAndAA", "bool", "",0')
+ file.write('\n\t\t\tProperty: "UseDepthOfField", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FocusSource", "enum", "",0')
+ file.write('\n\t\t\tProperty: "FocusAngle", "double", "",3.5')
+ file.write('\n\t\t\tProperty: "FocusDistance", "double", "",200')
+ file.write('\n\t\t\tProperty: "UseAntialiasing", "bool", "",0')
+ file.write('\n\t\t\tProperty: "AntialiasingIntensity", "double", "",0.77777')
+ file.write('\n\t\t\tProperty: "UseAccumulationBuffer", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FrameSamplingCount", "int", "",7')
+
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 0')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Camera"')
+ file.write('\n\t\tGeometryVersion: 124')
+ file.write('\n\t\tPosition: %.6f,%.6f,%.6f' % loc)
+ file.write('\n\t\tUp: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,1,0) * matrix_rot) )
+ file.write('\n\t\tLookAt: %.6f,%.6f,%.6f' % tuple(Mathutils.Vector(0,0,-1)*matrix_rot) )
+
+ #file.write('\n\t\tUp: 0,0,0' )
+ #file.write('\n\t\tLookAt: 0,0,0' )
+
+ file.write('\n\t\tShowInfoOnMoving: 1')
+ file.write('\n\t\tShowAudio: 0')
+ file.write('\n\t\tAudioColor: 0,1,0')
+ file.write('\n\t\tCameraOrthoZoom: 1')
+ file.write('\n\t}')
+
+ def write_light(my_light):
+ light = my_light.blenObject.data
+ file.write('\n\tModel: "Model::%s", "Light" {' % my_light.fbxName)
+ file.write('\n\t\tVersion: 232')
+
+ write_object_props(my_light.blenObject, None, my_light.parRelMatrix())
+
+ # Why are these values here twice?????? - oh well, follow the holy sdk's output
+
+ # Blender light types match FBX's, funny coincidence, we just need to
+ # be sure that all unsupported types are made into a point light
+ #ePOINT,
+ #eDIRECTIONAL
+ #eSPOT
+ light_type_items = {'POINT': 0, 'SUN': 1, 'SPOT': 2, 'HEMI': 3, 'AREA': 4}
+ light_type = light_type_items[light.type]
+# light_type = light.type
+ if light_type > 2: light_type = 1 # hemi and area lights become directional
+
+# mode = light.mode
+ if light.shadow_method == 'RAY_SHADOW' or light.shadow_method == 'BUFFER_SHADOW':
+# if mode & Blender.Lamp.Modes.RayShadow or mode & Blender.Lamp.Modes.Shadows:
+ do_shadow = 1
+ else:
+ do_shadow = 0
+
+ if light.only_shadow or (not light.diffuse and not light.specular):
+# if mode & Blender.Lamp.Modes.OnlyShadow or (mode & Blender.Lamp.Modes.NoDiffuse and mode & Blender.Lamp.Modes.NoSpecular):
+ do_light = 0
+ else:
+ do_light = 1
+
+ scale = abs(GLOBAL_MATRIX.scalePart()[0]) # scale is always uniform in this case
+
+ file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
+ file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
+ file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
+ file.write('\n\t\t\tProperty: "Color", "Color", "A+",1,1,1')
+ file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200
+ if light.type == 'SPOT':
+ file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spot_size * scale))
+# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale))
+ file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
+ file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.color))
+# file.write('\n\t\t\tProperty: "Color", "Color", "A",%.2f,%.2f,%.2f' % tuple(light.col))
+ file.write('\n\t\t\tProperty: "Intensity", "Intensity", "A+",%.2f' % (min(light.energy*100, 200))) # clamp below 200
+#
+ # duplication? see ^ (Arystan)
+# file.write('\n\t\t\tProperty: "Cone angle", "Cone angle", "A+",%.2f' % (light.spotSize * scale))
+ file.write('\n\t\t\tProperty: "Fog", "Fog", "A+",50')
+ file.write('\n\t\t\tProperty: "LightType", "enum", "",%i' % light_type)
+ file.write('\n\t\t\tProperty: "CastLightOnObject", "bool", "",%i' % do_light)
+ file.write('\n\t\t\tProperty: "DrawGroundProjection", "bool", "",1')
+ file.write('\n\t\t\tProperty: "DrawFrontFacingVolumetricLight", "bool", "",0')
+ file.write('\n\t\t\tProperty: "DrawVolumetricLight", "bool", "",1')
+ file.write('\n\t\t\tProperty: "GoboProperty", "object", ""')
+ file.write('\n\t\t\tProperty: "DecayType", "enum", "",0')
+ file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.distance)
+# file.write('\n\t\t\tProperty: "DecayStart", "double", "",%.2f' % light.dist)
+ file.write('\n\t\t\tProperty: "EnableNearAttenuation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "NearAttenuationStart", "double", "",0')
+ file.write('\n\t\t\tProperty: "NearAttenuationEnd", "double", "",0')
+ file.write('\n\t\t\tProperty: "EnableFarAttenuation", "bool", "",0')
+ file.write('\n\t\t\tProperty: "FarAttenuationStart", "double", "",0')
+ file.write('\n\t\t\tProperty: "FarAttenuationEnd", "double", "",0')
+ file.write('\n\t\t\tProperty: "CastShadows", "bool", "",%i' % do_shadow)
+ file.write('\n\t\t\tProperty: "ShadowColor", "ColorRGBA", "",0,0,0,1')
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 0')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+ file.write('\n\t\tTypeFlags: "Light"')
+ file.write('\n\t\tGeometryVersion: 124')
+ file.write('\n\t}')
+
+ # matrixOnly is not used at the moment
+ def write_null(my_null = None, fbxName = None, matrixOnly = None):
+ # ob can be null
+ if not fbxName: fbxName = my_null.fbxName
+
+ file.write('\n\tModel: "Model::%s", "Null" {' % fbxName)
+ file.write('\n\t\tVersion: 232')
+
+ # only use this for the root matrix at the moment
+ if matrixOnly:
+ poseMatrix = write_object_props(None, None, matrixOnly)[3]
+
+ else: # all other Null's
+ if my_null: poseMatrix = write_object_props(my_null.blenObject, None, my_null.parRelMatrix())[3]
+ else: poseMatrix = write_object_props()[3]
+
+ pose_items.append((fbxName, poseMatrix))
+
+ file.write('''
+ }
+ MultiLayer: 0
+ MultiTake: 1
+ Shading: Y
+ Culling: "CullingOff"
+ TypeFlags: "Null"
+ }''')
+
+ # Material Settings
+ if world: world_amb = tuple(world.ambient_color)
+# if world: world_amb = world.getAmb()
+ else: world_amb = (0,0,0) # Default value
+
+ def write_material(matname, mat):
+ file.write('\n\tMaterial: "Material::%s", "" {' % matname)
+
+ # Todo, add more material Properties.
+ if mat:
+ mat_cold = tuple(mat.diffuse_color)
+# mat_cold = tuple(mat.rgbCol)
+ mat_cols = tuple(mat.specular_color)
+# mat_cols = tuple(mat.specCol)
+ #mat_colm = tuple(mat.mirCol) # we wont use the mirror color
+ mat_colamb = world_amb
+# mat_colamb = tuple([c for c in world_amb])
+
+ mat_dif = mat.diffuse_intensity
+# mat_dif = mat.ref
+ mat_amb = mat.ambient
+# mat_amb = mat.amb
+ mat_hard = (float(mat.specular_hardness)-1)/5.10
+# mat_hard = (float(mat.hard)-1)/5.10
+ mat_spec = mat.specular_intensity/2.0
+# mat_spec = mat.spec/2.0
+ mat_alpha = mat.alpha
+ mat_emit = mat.emit
+ mat_shadeless = mat.shadeless
+# mat_shadeless = mat.mode & Blender.Material.Modes.SHADELESS
+ if mat_shadeless:
+ mat_shader = 'Lambert'
+ else:
+ if mat.diffuse_shader == 'LAMBERT':
+# if mat.diffuseShader == Blender.Material.Shaders.DIFFUSE_LAMBERT:
+ mat_shader = 'Lambert'
+ else:
+ mat_shader = 'Phong'
+ else:
+ mat_cols = mat_cold = 0.8, 0.8, 0.8
+ mat_colamb = 0.0,0.0,0.0
+ # mat_colm
+ mat_dif = 1.0
+ mat_amb = 0.5
+ mat_hard = 20.0
+ mat_spec = 0.2
+ mat_alpha = 1.0
+ mat_emit = 0.0
+ mat_shadeless = False
+ mat_shader = 'Phong'
+
+ file.write('\n\t\tVersion: 102')
+ file.write('\n\t\tShadingModel: "%s"' % mat_shader.lower())
+ file.write('\n\t\tMultiLayer: 0')
+
+ file.write('\n\t\tProperties60: {')
+ file.write('\n\t\t\tProperty: "ShadingModel", "KString", "", "%s"' % mat_shader)
+ file.write('\n\t\t\tProperty: "MultiLayer", "bool", "",0')
+ file.write('\n\t\t\tProperty: "EmissiveColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold) # emit and diffuse color are he same in blender
+ file.write('\n\t\t\tProperty: "EmissiveFactor", "double", "",%.4f' % mat_emit)
+
+ file.write('\n\t\t\tProperty: "AmbientColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_colamb)
+ file.write('\n\t\t\tProperty: "AmbientFactor", "double", "",%.4f' % mat_amb)
+ file.write('\n\t\t\tProperty: "DiffuseColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cold)
+ file.write('\n\t\t\tProperty: "DiffuseFactor", "double", "",%.4f' % mat_dif)
+ file.write('\n\t\t\tProperty: "Bump", "Vector3D", "",0,0,0')
+ file.write('\n\t\t\tProperty: "TransparentColor", "ColorRGB", "",1,1,1')
+ file.write('\n\t\t\tProperty: "TransparencyFactor", "double", "",%.4f' % (1.0 - mat_alpha))
+ if not mat_shadeless:
+ file.write('\n\t\t\tProperty: "SpecularColor", "ColorRGB", "",%.4f,%.4f,%.4f' % mat_cols)
+ file.write('\n\t\t\tProperty: "SpecularFactor", "double", "",%.4f' % mat_spec)
+ file.write('\n\t\t\tProperty: "ShininessExponent", "double", "",80.0')
+ file.write('\n\t\t\tProperty: "ReflectionColor", "ColorRGB", "",0,0,0')
+ file.write('\n\t\t\tProperty: "ReflectionFactor", "double", "",1')
+ file.write('\n\t\t\tProperty: "Emissive", "ColorRGB", "",0,0,0')
+ file.write('\n\t\t\tProperty: "Ambient", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_colamb)
+ file.write('\n\t\t\tProperty: "Diffuse", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cold)
+ if not mat_shadeless:
+ file.write('\n\t\t\tProperty: "Specular", "ColorRGB", "",%.1f,%.1f,%.1f' % mat_cols)
+ file.write('\n\t\t\tProperty: "Shininess", "double", "",%.1f' % mat_hard)
+ file.write('\n\t\t\tProperty: "Opacity", "double", "",%.1f' % mat_alpha)
+ if not mat_shadeless:
+ file.write('\n\t\t\tProperty: "Reflectivity", "double", "",0')
+
+ file.write('\n\t\t}')
+ file.write('\n\t}')
+
+ def copy_image(image):
+
+ rel = image.get_export_path(basepath, True)
+ base = os.path.basename(rel)
+
+ if EXP_IMAGE_COPY:
+ absp = image.get_export_path(basepath, False)
+ if not os.path.exists(absp):
+ shutil.copy(image.get_abs_filename(), absp)
+
+ return (rel, base)
+
+ # tex is an Image (Arystan)
+ def write_video(texname, tex):
+ # Same as texture really!
+ file.write('\n\tVideo: "Video::%s", "Clip" {' % texname)
+
+ file.write('''
+ Type: "Clip"
+ Properties60: {
+ Property: "FrameRate", "double", "",0
+ Property: "LastFrame", "int", "",0
+ Property: "Width", "int", "",0
+ Property: "Height", "int", "",0''')
+ if tex:
+ fname_rel, fname_strip = copy_image(tex)
+# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY)
+ else:
+ fname = fname_strip = fname_rel = ''
+
+ file.write('\n\t\t\tProperty: "Path", "charptr", "", "%s"' % fname_strip)
+
+
+ file.write('''
+ Property: "StartFrame", "int", "",0
+ Property: "StopFrame", "int", "",0
+ Property: "PlaySpeed", "double", "",1
+ Property: "Offset", "KTime", "",0
+ Property: "InterlaceMode", "enum", "",0
+ Property: "FreeRunning", "bool", "",0
+ Property: "Loop", "bool", "",0
+ Property: "AccessMode", "enum", "",0
+ }
+ UseMipMap: 0''')
+
+ file.write('\n\t\tFilename: "%s"' % fname_strip)
+ if fname_strip: fname_strip = '/' + fname_strip
+ file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # make relative
+ file.write('\n\t}')
+
+
+ def write_texture(texname, tex, num):
+ # if tex == None then this is a dummy tex
+ file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {' % texname)
+ file.write('\n\t\tType: "TextureVideoClip"')
+ file.write('\n\t\tVersion: 202')
+ # TODO, rare case _empty_ exists as a name.
+ file.write('\n\t\tTextureName: "Texture::%s"' % texname)
+
+ file.write('''
+ Properties60: {
+ Property: "Translation", "Vector", "A+",0,0,0
+ Property: "Rotation", "Vector", "A+",0,0,0
+ Property: "Scaling", "Vector", "A+",1,1,1''')
+ file.write('\n\t\t\tProperty: "Texture alpha", "Number", "A+",%i' % num)
+
+
+ # WrapModeU/V 0==rep, 1==clamp, TODO add support
+ file.write('''
+ Property: "TextureTypeUse", "enum", "",0
+ Property: "CurrentTextureBlendMode", "enum", "",1
+ Property: "UseMaterial", "bool", "",0
+ Property: "UseMipMap", "bool", "",0
+ Property: "CurrentMappingType", "enum", "",0
+ Property: "UVSwap", "bool", "",0''')
+
+ file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clamp_x)
+# file.write('\n\t\t\tProperty: "WrapModeU", "enum", "",%i' % tex.clampX)
+ file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clamp_y)
+# file.write('\n\t\t\tProperty: "WrapModeV", "enum", "",%i' % tex.clampY)
+
+ file.write('''
+ Property: "TextureRotationPivot", "Vector3D", "",0,0,0
+ Property: "TextureScalingPivot", "Vector3D", "",0,0,0
+ Property: "VideoProperty", "object", ""
+ }''')
+
+ file.write('\n\t\tMedia: "Video::%s"' % texname)
+
+ if tex:
+ fname_rel, fname_strip = copy_image(tex)
+# fname, fname_strip, fname_rel = derived_paths(tex.filename, basepath, EXP_IMAGE_COPY)
+ else:
+ fname = fname_strip = fname_rel = ''
+
+ file.write('\n\t\tFileName: "%s"' % fname_strip)
+ file.write('\n\t\tRelativeFilename: "%s"' % fname_rel) # need some make relative command
+
+ file.write('''
+ ModelUVTranslation: 0,0
+ ModelUVScaling: 1,1
+ Texture_Alpha_Source: "None"
+ Cropping: 0,0,0,0
+ }''')
+
+ def write_deformer_skin(obname):
+ '''
+ Each mesh has its own deformer
+ '''
+ file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {' % obname)
+ file.write('''
+ Version: 100
+ MultiLayer: 0
+ Type: "Skin"
+ Properties60: {
+ }
+ Link_DeformAcuracy: 50
+ }''')
+
+ # in the example was 'Bip01 L Thigh_2'
+ def write_sub_deformer_skin(my_mesh, my_bone, weights):
+
+ '''
+ Each subdeformer is spesific to a mesh, but the bone it links to can be used by many sub-deformers
+ So the SubDeformer needs the mesh-object name as a prefix to make it unique
+
+ Its possible that there is no matching vgroup in this mesh, in that case no verts are in the subdeformer,
+ a but silly but dosnt really matter
+ '''
+ file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {' % (my_mesh.fbxName, my_bone.fbxName))
+
+ file.write('''
+ Version: 100
+ MultiLayer: 0
+ Type: "Cluster"
+ Properties60: {
+ Property: "SrcModel", "object", ""
+ Property: "SrcModelReference", "object", ""
+ }
+ UserData: "", ""''')
+
+ # Support for bone parents
+ if my_mesh.fbxBoneParent:
+ if my_mesh.fbxBoneParent == my_bone:
+ # TODO - this is a bit lazy, we could have a simple write loop
+ # for this case because all weights are 1.0 but for now this is ok
+ # Parent Bones arent used all that much anyway.
+ vgroup_data = [(j, 1.0) for j in range(len(my_mesh.blenData.verts))]
+ else:
+ # This bone is not a parent of this mesh object, no weights
+ vgroup_data = []
+
+ else:
+ # Normal weight painted mesh
+ if my_bone.blenName in weights[0]:
+ # Before we used normalized wright list
+ #vgroup_data = me.getVertsFromGroup(bone.name, 1)
+ group_index = weights[0].index(my_bone.blenName)
+ vgroup_data = [(j, weight[group_index]) for j, weight in enumerate(weights[1]) if weight[group_index]]
+ else:
+ vgroup_data = []
+
+ file.write('\n\t\tIndexes: ')
+
+ i = -1
+ for vg in vgroup_data:
+ if i == -1:
+ file.write('%i' % vg[0])
+ i=0
+ else:
+ if i==23:
+ file.write('\n\t\t')
+ i=0
+ file.write(',%i' % vg[0])
+ i+=1
+
+ file.write('\n\t\tWeights: ')
+ i = -1
+ for vg in vgroup_data:
+ if i == -1:
+ file.write('%.8f' % vg[1])
+ i=0
+ else:
+ if i==38:
+ file.write('\n\t\t')
+ i=0
+ file.write(',%.8f' % vg[1])
+ i+=1
+
+ if my_mesh.fbxParent:
+ # TODO FIXME, this case is broken in some cases. skinned meshes just shouldnt have parents where possible!
+ m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() )
+ else:
+ # Yes! this is it... - but dosnt work when the mesh is a.
+ m = mtx4_z90 * (my_bone.restMatrix * my_bone.fbxArm.matrixWorld.copy() * my_mesh.matrixWorld.copy().invert() )
+
+ #m = mtx4_z90 * my_bone.restMatrix
+ matstr = mat4x4str(m)
+ matstr_i = mat4x4str(m.invert())
+
+ file.write('\n\t\tTransform: %s' % matstr_i) # THIS IS __NOT__ THE GLOBAL MATRIX AS DOCUMENTED :/
+ file.write('\n\t\tTransformLink: %s' % matstr)
+ file.write('\n\t}')
+
+ def write_mesh(my_mesh):
+
+ me = my_mesh.blenData
+
+ # if there are non NULL materials on this mesh
+ if my_mesh.blenMaterials: do_materials = True
+ else: do_materials = False
+
+ if my_mesh.blenTextures: do_textures = True
+ else: do_textures = False
+
+ do_uvs = len(me.uv_textures) > 0
+# do_uvs = me.faceUV
+
+
+ file.write('\n\tModel: "Model::%s", "Mesh" {' % my_mesh.fbxName)
+ file.write('\n\t\tVersion: 232') # newline is added in write_object_props
+
+ poseMatrix = write_object_props(my_mesh.blenObject, None, my_mesh.parRelMatrix())[3]
+ pose_items.append((my_mesh.fbxName, poseMatrix))
+
+ file.write('\n\t\t}')
+ file.write('\n\t\tMultiLayer: 0')
+ file.write('\n\t\tMultiTake: 1')
+ file.write('\n\t\tShading: Y')
+ file.write('\n\t\tCulling: "CullingOff"')
+
+
+ # Write the Real Mesh data here
+ file.write('\n\t\tVertices: ')
+ i=-1
+
+ for v in me.verts:
+ if i==-1:
+ file.write('%.6f,%.6f,%.6f' % tuple(v.co)); i=0
+ else:
+ if i==7:
+ file.write('\n\t\t'); i=0
+ file.write(',%.6f,%.6f,%.6f'% tuple(v.co))
+ i+=1
+
+ file.write('\n\t\tPolygonVertexIndex: ')
+ i=-1
+ for f in me.faces:
+ fi = f.verts
+ # fi = [v_index for j, v_index in enumerate(f.verts) if v_index != 0 or j != 3]
+# fi = [v.index for v in f]
+
+ # flip the last index, odd but it looks like
+ # this is how fbx tells one face from another
+ fi[-1] = -(fi[-1]+1)
+ fi = tuple(fi)
+ if i==-1:
+ if len(fi) == 3: file.write('%i,%i,%i' % fi )
+# if len(f) == 3: file.write('%i,%i,%i' % fi )
+ else: file.write('%i,%i,%i,%i' % fi )
+ i=0
+ else:
+ if i==13:
+ file.write('\n\t\t')
+ i=0
+ if len(fi) == 3: file.write(',%i,%i,%i' % fi )
+# if len(f) == 3: file.write(',%i,%i,%i' % fi )
+ else: file.write(',%i,%i,%i,%i' % fi )
+ i+=1
+
+ file.write('\n\t\tEdges: ')
+ i=-1
+ for ed in me.edges:
+ if i==-1:
+ file.write('%i,%i' % (ed.verts[0], ed.verts[1]))
+# file.write('%i,%i' % (ed.v1.index, ed.v2.index))
+ i=0
+ else:
+ if i==13:
+ file.write('\n\t\t')
+ i=0
+ file.write(',%i,%i' % (ed.verts[0], ed.verts[1]))
+# file.write(',%i,%i' % (ed.v1.index, ed.v2.index))
+ i+=1
+
+ file.write('\n\t\tGeometryVersion: 124')
+
+ file.write('''
+ LayerElementNormal: 0 {
+ Version: 101
+ Name: ""
+ MappingInformationType: "ByVertice"
+ ReferenceInformationType: "Direct"
+ Normals: ''')
+
+ i=-1
+ for v in me.verts:
+ if i==-1:
+ file.write('%.15f,%.15f,%.15f' % tuple(v.normal)); i=0
+# file.write('%.15f,%.15f,%.15f' % tuple(v.no)); i=0
+ else:
+ if i==2:
+ file.write('\n '); i=0
+ file.write(',%.15f,%.15f,%.15f' % tuple(v.normal))
+# file.write(',%.15f,%.15f,%.15f' % tuple(v.no))
+ i+=1
+ file.write('\n\t\t}')
+
+ # Write Face Smoothing
+ file.write('''
+ LayerElementSmoothing: 0 {
+ Version: 102
+ Name: ""
+ MappingInformationType: "ByPolygon"
+ ReferenceInformationType: "Direct"
+ Smoothing: ''')
+
+ i=-1
+ for f in me.faces:
+ if i==-1:
+ file.write('%i' % f.smooth); i=0
+ else:
+ if i==54:
+ file.write('\n '); i=0
+ file.write(',%i' % f.smooth)
+ i+=1
+
+ file.write('\n\t\t}')
+
+ # Write Edge Smoothing
+ file.write('''
+ LayerElementSmoothing: 0 {
+ Version: 101
+ Name: ""
+ MappingInformationType: "ByEdge"
+ ReferenceInformationType: "Direct"
+ Smoothing: ''')
+
+# SHARP = Blender.Mesh.EdgeFlags.SHARP
+ i=-1
+ for ed in me.edges:
+ if i==-1:
+ file.write('%i' % (ed.sharp)); i=0
+# file.write('%i' % ((ed.flag&SHARP)!=0)); i=0
+ else:
+ if i==54:
+ file.write('\n '); i=0
+ file.write(',%i' % (ed.sharp))
+# file.write(',%i' % ((ed.flag&SHARP)!=0))
+ i+=1
+
+ file.write('\n\t\t}')
+# del SHARP
+
+ # small utility function
+ # returns a slice of data depending on number of face verts
+ # data is either a MeshTextureFace or MeshColor
+ def face_data(data, face):
+ totvert = len(f.verts)
+
+ return data[:totvert]
+
+
+ # Write VertexColor Layers
+ # note, no programs seem to use this info :/
+ collayers = []
+ if len(me.vertex_colors):
+# if me.vertexColors:
+ collayers = me.vertex_colors
+# collayers = me.getColorLayerNames()
+ collayer_orig = me.active_vertex_color
+# collayer_orig = me.activeColorLayer
+ for colindex, collayer in enumerate(collayers):
+# me.activeColorLayer = collayer
+ file.write('\n\t\tLayerElementColor: %i {' % colindex)
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: "%s"' % collayer.name)
+# file.write('\n\t\t\tName: "%s"' % collayer)
+
+ file.write('''
+ MappingInformationType: "ByPolygonVertex"
+ ReferenceInformationType: "IndexToDirect"
+ Colors: ''')
+
+ i = -1
+ ii = 0 # Count how many Colors we write
+
+ for f, cf in zip(me.faces, collayer.data):
+ colors = [cf.color1, cf.color2, cf.color3, cf.color4]
+
+ # determine number of verts
+ colors = face_data(colors, f)
+
+ for col in colors:
+ if i==-1:
+ file.write('%.4f,%.4f,%.4f,1' % tuple(col))
+ i=0
+ else:
+ if i==7:
+ file.write('\n\t\t\t\t')
+ i=0
+ file.write(',%.4f,%.4f,%.4f,1' % tuple(col))
+ i+=1
+ ii+=1 # One more Color
+
+# for f in me.faces:
+# for col in f.col:
+# if i==-1:
+# file.write('%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0))
+# i=0
+# else:
+# if i==7:
+# file.write('\n\t\t\t\t')
+# i=0
+# file.write(',%.4f,%.4f,%.4f,1' % (col[0]/255.0, col[1]/255.0, col[2]/255.0))
+# i+=1
+# ii+=1 # One more Color
+
+ file.write('\n\t\t\tColorIndex: ')
+ i = -1
+ for j in range(ii):
+ if i == -1:
+ file.write('%i' % j)
+ i=0
+ else:
+ if i==55:
+ file.write('\n\t\t\t\t')
+ i=0
+ file.write(',%i' % j)
+ i+=1
+
+ file.write('\n\t\t}')
+
+
+
+ # Write UV and texture layers.
+ uvlayers = []
+ if do_uvs:
+ uvlayers = me.uv_textures
+# uvlayers = me.getUVLayerNames()
+ uvlayer_orig = me.active_uv_texture
+# uvlayer_orig = me.activeUVLayer
+ for uvindex, uvlayer in enumerate(me.uv_textures):
+# for uvindex, uvlayer in enumerate(uvlayers):
+# me.activeUVLayer = uvlayer
+ file.write('\n\t\tLayerElementUV: %i {' % uvindex)
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: "%s"' % uvlayer.name)
+# file.write('\n\t\t\tName: "%s"' % uvlayer)
+
+ file.write('''
+ MappingInformationType: "ByPolygonVertex"
+ ReferenceInformationType: "IndexToDirect"
+ UV: ''')
+
+ i = -1
+ ii = 0 # Count how many UVs we write
+
+ for uf in uvlayer.data:
+# for f in me.faces:
+ for uv in uf.uv:
+# for uv in f.uv:
+ if i==-1:
+ file.write('%.6f,%.6f' % tuple(uv))
+ i=0
+ else:
+ if i==7:
+ file.write('\n ')
+ i=0
+ file.write(',%.6f,%.6f' % tuple(uv))
+ i+=1
+ ii+=1 # One more UV
+
+ file.write('\n\t\t\tUVIndex: ')
+ i = -1
+ for j in range(ii):
+ if i == -1:
+ file.write('%i' % j)
+ i=0
+ else:
+ if i==55:
+ file.write('\n\t\t\t\t')
+ i=0
+ file.write(',%i' % j)
+ i+=1
+
+ file.write('\n\t\t}')
+
+ if do_textures:
+ file.write('\n\t\tLayerElementTexture: %i {' % uvindex)
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: "%s"' % uvlayer.name)
+# file.write('\n\t\t\tName: "%s"' % uvlayer)
+
+ if len(my_mesh.blenTextures) == 1:
+ file.write('\n\t\t\tMappingInformationType: "AllSame"')
+ else:
+ file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
+
+ file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
+ file.write('\n\t\t\tBlendMode: "Translucent"')
+ file.write('\n\t\t\tTextureAlpha: 1')
+ file.write('\n\t\t\tTextureId: ')
+
+ if len(my_mesh.blenTextures) == 1:
+ file.write('0')
+ else:
+ texture_mapping_local = {None:-1}
+
+ i = 0 # 1 for dummy
+ for tex in my_mesh.blenTextures:
+ if tex: # None is set above
+ texture_mapping_local[tex] = i
+ i+=1
+
+ i=-1
+ for f in uvlayer.data:
+# for f in me.faces:
+ img_key = f.image
+
+ if i==-1:
+ i=0
+ file.write( '%s' % texture_mapping_local[img_key])
+ else:
+ if i==55:
+ file.write('\n ')
+ i=0
+
+ file.write(',%s' % texture_mapping_local[img_key])
+ i+=1
+
+ else:
+ file.write('''
+ LayerElementTexture: 0 {
+ Version: 101
+ Name: ""
+ MappingInformationType: "NoMappingInformation"
+ ReferenceInformationType: "IndexToDirect"
+ BlendMode: "Translucent"
+ TextureAlpha: 1
+ TextureId: ''')
+ file.write('\n\t\t}')
+
+# me.activeUVLayer = uvlayer_orig
+
+ # Done with UV/textures.
+
+ if do_materials:
+ file.write('\n\t\tLayerElementMaterial: 0 {')
+ file.write('\n\t\t\tVersion: 101')
+ file.write('\n\t\t\tName: ""')
+
+ if len(my_mesh.blenMaterials) == 1:
+ file.write('\n\t\t\tMappingInformationType: "AllSame"')
+ else:
+ file.write('\n\t\t\tMappingInformationType: "ByPolygon"')
+
+ file.write('\n\t\t\tReferenceInformationType: "IndexToDirect"')
+ file.write('\n\t\t\tMaterials: ')
+
+ if len(my_mesh.blenMaterials) == 1:
+ file.write('0')
+ else:
+ # Build a material mapping for this
+ material_mapping_local = {} # local-mat & tex : global index.
+
+ for j, mat_tex_pair in enumerate(my_mesh.blenMaterials):
+ material_mapping_local[mat_tex_pair] = j
+
+ len_material_mapping_local = len(material_mapping_local)
+
+ mats = my_mesh.blenMaterialList
+
+ if me.active_uv_texture:
+ uv_faces = me.active_uv_texture.data
+ else:
+ uv_faces = [None] * len(me.faces)
+
+ i=-1
+ for f, uf in zip(me.faces, uv_faces):
+# for f in me.faces:
+ try: mat = mats[f.material_index]
+# try: mat = mats[f.mat]
+ except:mat = None
+
+ if do_uvs: tex = uf.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/
+# if do_uvs: tex = f.image # WARNING - MULTI UV LAYER IMAGES NOT SUPPORTED :/
+ else: tex = None
+
+ if i==-1:
+ i=0
+ file.write( '%s' % (material_mapping_local[mat, tex])) # None for mat or tex is ok
+ else:
+ if i==55:
+ file.write('\n\t\t\t\t')
+ i=0
+
+ file.write(',%s' % (material_mapping_local[mat, tex]))
+ i+=1
+
+ file.write('\n\t\t}')
+
+ file.write('''
+ Layer: 0 {
+ Version: 100
+ LayerElement: {
+ Type: "LayerElementNormal"
+ TypedIndex: 0
+ }''')
+
+ if do_materials:
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementMaterial"
+ TypedIndex: 0
+ }''')
+
+ # Always write this
+ if do_textures:
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementTexture"
+ TypedIndex: 0
+ }''')
+
+ if me.vertex_colors:
+# if me.vertexColors:
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementColor"
+ TypedIndex: 0
+ }''')
+
+ if do_uvs: # same as me.faceUV
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementUV"
+ TypedIndex: 0
+ }''')
+
+
+ file.write('\n\t\t}')
+
+ if len(uvlayers) > 1:
+ for i in range(1, len(uvlayers)):
+
+ file.write('\n\t\tLayer: %i {' % i)
+ file.write('\n\t\t\tVersion: 100')
+
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementUV"''')
+
+ file.write('\n\t\t\t\tTypedIndex: %i' % i)
+ file.write('\n\t\t\t}')
+
+ if do_textures:
+
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementTexture"''')
+
+ file.write('\n\t\t\t\tTypedIndex: %i' % i)
+ file.write('\n\t\t\t}')
+
+ file.write('\n\t\t}')
+
+ if len(collayers) > 1:
+ # Take into account any UV layers
+ layer_offset = 0
+ if uvlayers: layer_offset = len(uvlayers)-1
+
+ for i in range(layer_offset, len(collayers)+layer_offset):
+ file.write('\n\t\tLayer: %i {' % i)
+ file.write('\n\t\t\tVersion: 100')
+
+ file.write('''
+ LayerElement: {
+ Type: "LayerElementColor"''')
+
+ file.write('\n\t\t\t\tTypedIndex: %i' % i)
+ file.write('\n\t\t\t}')
+ file.write('\n\t\t}')
+ file.write('\n\t}')
+
+ def write_group(name):
+ file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {' % name)
+
+ file.write('''
+ Properties60: {
+ Property: "MultiLayer", "bool", "",0
+ Property: "Pickable", "bool", "",1
+ Property: "Transformable", "bool", "",1
+ Property: "Show", "bool", "",1
+ }
+ MultiLayer: 0
+ }''')
+
+
+ # add meshes here to clear because they are not used anywhere.
+ meshes_to_clear = []
+
+ ob_meshes = []
+ ob_lights = []
+ ob_cameras = []
+ # in fbx we export bones as children of the mesh
+ # armatures not a part of a mesh, will be added to ob_arms
+ ob_bones = []
+ ob_arms = []
+ ob_null = [] # emptys
+
+ # List of types that have blender objects (not bones)
+ ob_all_typegroups = [ob_meshes, ob_lights, ob_cameras, ob_arms, ob_null]
+
+ groups = [] # blender groups, only add ones that have objects in the selections
+ materials = {} # (mat, image) keys, should be a set()
+ textures = {} # should be a set()
+
+ tmp_ob_type = ob_type = None # incase no objects are exported, so as not to raise an error
+
+ # if EXP_OBS_SELECTED is false, use sceens objects
+ if not batch_objects:
+ if EXP_OBS_SELECTED: tmp_objects = context.selected_objects
+# if EXP_OBS_SELECTED: tmp_objects = sce.objects.context
+ else: tmp_objects = sce.objects
+ else:
+ tmp_objects = batch_objects
+
+ if EXP_ARMATURE:
+ # This is needed so applying modifiers dosnt apply the armature deformation, its also needed
+ # ...so mesh objects return their rest worldspace matrix when bone-parents are exported as weighted meshes.
+ # set every armature to its rest, backup the original values so we done mess up the scene
+ ob_arms_orig_rest = [arm.rest_position for arm in bpy.data.armatures]
+# ob_arms_orig_rest = [arm.restPosition for arm in bpy.data.armatures]
+
+ for arm in bpy.data.armatures:
+ arm.rest_position = True
+# arm.restPosition = True
+
+ if ob_arms_orig_rest:
+ for ob_base in bpy.data.objects:
+ #if ob_base.type == 'Armature':
+ ob_base.make_display_list()
+# ob_base.makeDisplayList()
+
+ # This causes the makeDisplayList command to effect the mesh
+ sce.set_frame(sce.current_frame)
+# Blender.Set('curframe', Blender.Get('curframe'))
+
+
+ for ob_base in tmp_objects:
+
+ # ignore dupli children
+ if ob_base.parent and ob_base.parent.dupli_type != 'NONE':
+ continue
+
+ obs = [(ob_base, ob_base.matrix)]
+ if ob_base.dupli_type != 'NONE':
+ ob_base.create_dupli_list()
+ obs = [(dob.object, dob.matrix) for dob in ob_base.dupli_list]
+
+ for ob, mtx in obs:
+# for ob, mtx in BPyObject.getDerivedObjects(ob_base):
+ tmp_ob_type = ob.type
+ if tmp_ob_type == 'CAMERA':
+# if tmp_ob_type == 'Camera':
+ if EXP_CAMERA:
+ ob_cameras.append(my_object_generic(ob, mtx))
+ elif tmp_ob_type == 'LAMP':
+# elif tmp_ob_type == 'Lamp':
+ if EXP_LAMP:
+ ob_lights.append(my_object_generic(ob, mtx))
+ elif tmp_ob_type == 'ARMATURE':
+# elif tmp_ob_type == 'Armature':
+ if EXP_ARMATURE:
+ # TODO - armatures dont work in dupligroups!
+ if ob not in ob_arms: ob_arms.append(ob)
+ # ob_arms.append(ob) # replace later. was "ob_arms.append(sane_obname(ob), ob)"
+ elif tmp_ob_type == 'EMPTY':
+# elif tmp_ob_type == 'Empty':
+ if EXP_EMPTY:
+ ob_null.append(my_object_generic(ob, mtx))
+ elif EXP_MESH:
+ origData = True
+ if tmp_ob_type != 'MESH':
+# if tmp_ob_type != 'Mesh':
+# me = bpy.data.meshes.new()
+ try: me = ob.create_mesh(True, 'PREVIEW')
+# try: me.getFromObject(ob)
+ except: me = None
+ if me:
+ meshes_to_clear.append( me )
+ mats = me.materials
+ origData = False
+ else:
+ # Mesh Type!
+ if EXP_MESH_APPLY_MOD:
+# me = bpy.data.meshes.new()
+ me = ob.create_mesh(True, 'PREVIEW')
+# me.getFromObject(ob)
+
+ # so we keep the vert groups
+# if EXP_ARMATURE:
+# orig_mesh = ob.getData(mesh=1)
+# if orig_mesh.getVertGroupNames():
+# ob.copy().link(me)
+# # If new mesh has no vgroups we can try add if verts are teh same
+# if not me.getVertGroupNames(): # vgroups were not kept by the modifier
+# if len(me.verts) == len(orig_mesh.verts):
+# groupNames, vWeightDict = BPyMesh.meshWeight2Dict(orig_mesh)
+# BPyMesh.dict2MeshWeight(me, groupNames, vWeightDict)
+
+ # print ob, me, me.getVertGroupNames()
+ meshes_to_clear.append( me )
+ origData = False
+ mats = me.materials
+ else:
+ me = ob.data
+# me = ob.getData(mesh=1)
+ mats = me.materials
+
+# # Support object colors
+# tmp_colbits = ob.colbits
+# if tmp_colbits:
+# tmp_ob_mats = ob.getMaterials(1) # 1 so we get None's too.
+# for i in xrange(16):
+# if tmp_colbits & (1<<i):
+# mats[i] = tmp_ob_mats[i]
+# del tmp_ob_mats
+# del tmp_colbits
+
+
+ if me:
+# # This WILL modify meshes in blender if EXP_MESH_APPLY_MOD is disabled.
+# # so strictly this is bad. but only in rare cases would it have negative results
+# # say with dupliverts the objects would rotate a bit differently
+# if EXP_MESH_HQ_NORMALS:
+# BPyMesh.meshCalcNormals(me) # high quality normals nice for realtime engines.
+
+ texture_mapping_local = {}
+ material_mapping_local = {}
+ if len(me.uv_textures) > 0:
+# if me.faceUV:
+ uvlayer_orig = me.active_uv_texture
+# uvlayer_orig = me.activeUVLayer
+ for uvlayer in me.uv_textures:
+# for uvlayer in me.getUVLayerNames():
+# me.activeUVLayer = uvlayer
+ for f, uf in zip(me.faces, uvlayer.data):
+# for f in me.faces:
+ tex = uf.image
+# tex = f.image
+ textures[tex] = texture_mapping_local[tex] = None
+
+ try: mat = mats[f.material_index]
+# try: mat = mats[f.mat]
+ except: mat = None
+
+ materials[mat, tex] = material_mapping_local[mat, tex] = None # should use sets, wait for blender 2.5
+
+
+# me.activeUVLayer = uvlayer_orig
+ else:
+ for mat in mats:
+ # 2.44 use mat.lib too for uniqueness
+ materials[mat, None] = material_mapping_local[mat, None] = None
+ else:
+ materials[None, None] = None
+
+ if EXP_ARMATURE:
+ armob = ob.find_armature()
+ blenParentBoneName = None
+
+ # parent bone - special case
+ if (not armob) and ob.parent and ob.parent.type == 'ARMATURE' and \
+ ob.parent_type == 'BONE':
+# if (not armob) and ob.parent and ob.parent.type == 'Armature' and ob.parentType == Blender.Object.ParentTypes.BONE:
+ armob = ob.parent
+ blenParentBoneName = ob.parent_bone
+# blenParentBoneName = ob.parentbonename
+
+
+ if armob and armob not in ob_arms:
+ ob_arms.append(armob)
+
+ else:
+ blenParentBoneName = armob = None
+
+ my_mesh = my_object_generic(ob, mtx)
+ my_mesh.blenData = me
+ my_mesh.origData = origData
+ my_mesh.blenMaterials = list(material_mapping_local.keys())
+ my_mesh.blenMaterialList = mats
+ my_mesh.blenTextures = list(texture_mapping_local.keys())
+
+ # if only 1 null texture then empty the list
+ if len(my_mesh.blenTextures) == 1 and my_mesh.blenTextures[0] == None:
+ my_mesh.blenTextures = []
+
+ my_mesh.fbxArm = armob # replace with my_object_generic armature instance later
+ my_mesh.fbxBoneParent = blenParentBoneName # replace with my_bone instance later
+
+ ob_meshes.append( my_mesh )
+
+ # not forgetting to free dupli_list
+ if ob_base.dupli_list: ob_base.free_dupli_list()
+
+
+ if EXP_ARMATURE:
+ # now we have the meshes, restore the rest arm position
+ for i, arm in enumerate(bpy.data.armatures):
+ arm.rest_position = ob_arms_orig_rest[i]
+# arm.restPosition = ob_arms_orig_rest[i]
+
+ if ob_arms_orig_rest:
+ for ob_base in bpy.data.objects:
+ if ob_base.type == 'ARMATURE':
+# if ob_base.type == 'Armature':
+ ob_base.make_display_list()
+# ob_base.makeDisplayList()
+ # This causes the makeDisplayList command to effect the mesh
+ sce.set_frame(sce.current_frame)
+# Blender.Set('curframe', Blender.Get('curframe'))
+
+ del tmp_ob_type, tmp_objects
+
+ # now we have collected all armatures, add bones
+ for i, ob in enumerate(ob_arms):
+
+ ob_arms[i] = my_arm = my_object_generic(ob)
+
+ my_arm.fbxBones = []
+ my_arm.blenData = ob.data
+ if ob.animation_data:
+ my_arm.blenAction = ob.animation_data.action
+ else:
+ my_arm.blenAction = None
+# my_arm.blenAction = ob.action
+ my_arm.blenActionList = []
+
+ # fbxName, blenderObject, my_bones, blenderActions
+ #ob_arms[i] = fbxArmObName, ob, arm_my_bones, (ob.action, [])
+
+ for bone in my_arm.blenData.bones:
+# for bone in my_arm.blenData.bones.values():
+ my_bone = my_bone_class(bone, my_arm)
+ my_arm.fbxBones.append( my_bone )
+ ob_bones.append( my_bone )
+
+ # add the meshes to the bones and replace the meshes armature with own armature class
+ #for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
+ for my_mesh in ob_meshes:
+ # Replace
+ # ...this could be sped up with dictionary mapping but its unlikely for
+ # it ever to be a bottleneck - (would need 100+ meshes using armatures)
+ if my_mesh.fbxArm:
+ for my_arm in ob_arms:
+ if my_arm.blenObject == my_mesh.fbxArm:
+ my_mesh.fbxArm = my_arm
+ break
+
+ for my_bone in ob_bones:
+
+ # The mesh uses this bones armature!
+ if my_bone.fbxArm == my_mesh.fbxArm:
+ my_bone.blenMeshes[my_mesh.fbxName] = me
+
+
+ # parent bone: replace bone names with our class instances
+ # my_mesh.fbxBoneParent is None or a blender bone name initialy, replacing if the names match.
+ if my_mesh.fbxBoneParent == my_bone.blenName:
+ my_mesh.fbxBoneParent = my_bone
+
+ bone_deformer_count = 0 # count how many bones deform a mesh
+ my_bone_blenParent = None
+ for my_bone in ob_bones:
+ my_bone_blenParent = my_bone.blenBone.parent
+ if my_bone_blenParent:
+ for my_bone_parent in ob_bones:
+ # Note 2.45rc2 you can compare bones normally
+ if my_bone_blenParent.name == my_bone_parent.blenName and my_bone.fbxArm == my_bone_parent.fbxArm:
+ my_bone.parent = my_bone_parent
+ break
+
+ # Not used at the moment
+ # my_bone.calcRestMatrixLocal()
+ bone_deformer_count += len(my_bone.blenMeshes)
+
+ del my_bone_blenParent
+
+
+ # Build blenObject -> fbxObject mapping
+ # this is needed for groups as well as fbxParenting
+# for ob in bpy.data.objects: ob.tag = False
+# bpy.data.objects.tag = False
+
+ # using a list of object names for tagging (Arystan)
+ tagged_objects = []
+
+ tmp_obmapping = {}
+ for ob_generic in ob_all_typegroups:
+ for ob_base in ob_generic:
+ tagged_objects.append(ob_base.blenObject.name)
+# ob_base.blenObject.tag = True
+ tmp_obmapping[ob_base.blenObject] = ob_base
+
+ # Build Groups from objects we export
+ for blenGroup in bpy.data.groups:
+ fbxGroupName = None
+ for ob in blenGroup.objects:
+ if ob.name in tagged_objects:
+# if ob.tag:
+ if fbxGroupName == None:
+ fbxGroupName = sane_groupname(blenGroup)
+ groups.append((fbxGroupName, blenGroup))
+
+ tmp_obmapping[ob].fbxGroupNames.append(fbxGroupName) # also adds to the objects fbxGroupNames
+
+ groups.sort() # not really needed
+
+ # Assign parents using this mapping
+ for ob_generic in ob_all_typegroups:
+ for my_ob in ob_generic:
+ parent = my_ob.blenObject.parent
+ if parent and parent.name in tagged_objects: # does it exist and is it in the mapping
+# if parent and parent.tag: # does it exist and is it in the mapping
+ my_ob.fbxParent = tmp_obmapping[parent]
+
+
+ del tmp_obmapping
+ # Finished finding groups we use
+
+
+ materials = [(sane_matname(mat_tex_pair), mat_tex_pair) for mat_tex_pair in materials.keys()]
+ textures = [(sane_texname(tex), tex) for tex in textures.keys() if tex]
+ materials.sort() # sort by name
+ textures.sort()
+
+ camera_count = 8
+ file.write('''
+
+; Object definitions
+;------------------------------------------------------------------
+
+Definitions: {
+ Version: 100
+ Count: %i''' % (\
+ 1+1+camera_count+\
+ len(ob_meshes)+\
+ len(ob_lights)+\
+ len(ob_cameras)+\
+ len(ob_arms)+\
+ len(ob_null)+\
+ len(ob_bones)+\
+ bone_deformer_count+\
+ len(materials)+\
+ (len(textures)*2))) # add 1 for the root model 1 for global settings
+
+ del bone_deformer_count
+
+ file.write('''
+ ObjectType: "Model" {
+ Count: %i
+ }''' % (\
+ 1+camera_count+\
+ len(ob_meshes)+\
+ len(ob_lights)+\
+ len(ob_cameras)+\
+ len(ob_arms)+\
+ len(ob_null)+\
+ len(ob_bones))) # add 1 for the root model
+
+ file.write('''
+ ObjectType: "Geometry" {
+ Count: %i
+ }''' % len(ob_meshes))
+
+ if materials:
+ file.write('''
+ ObjectType: "Material" {
+ Count: %i
+ }''' % len(materials))
+
+ if textures:
+ file.write('''
+ ObjectType: "Texture" {
+ Count: %i
+ }''' % len(textures)) # add 1 for an empty tex
+ file.write('''
+ ObjectType: "Video" {
+ Count: %i
+ }''' % len(textures)) # add 1 for an empty tex
+
+ tmp = 0
+ # Add deformer nodes
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ tmp+=1
+
+ # Add subdeformers
+ for my_bone in ob_bones:
+ tmp += len(my_bone.blenMeshes)
+
+ if tmp:
+ file.write('''
+ ObjectType: "Deformer" {
+ Count: %i
+ }''' % tmp)
+ del tmp
+
+ # we could avoid writing this possibly but for now just write it
+
+ file.write('''
+ ObjectType: "Pose" {
+ Count: 1
+ }''')
+
+ if groups:
+ file.write('''
+ ObjectType: "GroupSelection" {
+ Count: %i
+ }''' % len(groups))
+
+ file.write('''
+ ObjectType: "GlobalSettings" {
+ Count: 1
+ }
+}''')
+
+ file.write('''
+
+; Object properties
+;------------------------------------------------------------------
+
+Objects: {''')
+
+ # To comply with other FBX FILES
+ write_camera_switch()
+
+ # Write the null object
+ write_null(None, 'blend_root')# , GLOBAL_MATRIX)
+
+ for my_null in ob_null:
+ write_null(my_null)
+
+ for my_arm in ob_arms:
+ write_null(my_arm)
+
+ for my_cam in ob_cameras:
+ write_camera(my_cam)
+
+ for my_light in ob_lights:
+ write_light(my_light)
+
+ for my_mesh in ob_meshes:
+ write_mesh(my_mesh)
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ write_bone(my_bone)
+
+ write_camera_default()
+
+ for matname, (mat, tex) in materials:
+ write_material(matname, mat) # We only need to have a material per image pair, but no need to write any image info into the material (dumb fbx standard)
+
+ # each texture uses a video, odd
+ for texname, tex in textures:
+ write_video(texname, tex)
+ i = 0
+ for texname, tex in textures:
+ write_texture(texname, tex, i)
+ i+=1
+
+ for groupname, group in groups:
+ write_group(groupname)
+
+ # NOTE - c4d and motionbuilder dont need normalized weights, but deep-exploration 5 does and (max?) do.
+
+ # Write armature modifiers
+ # TODO - add another MODEL? - because of this skin definition.
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ write_deformer_skin(my_mesh.fbxName)
+
+ # Get normalized weights for temorary use
+ if my_mesh.fbxBoneParent:
+ weights = None
+ else:
+ weights = meshNormalizedWeights(my_mesh.blenObject)
+# weights = meshNormalizedWeights(my_mesh.blenData)
+
+ #for bonename, bone, obname, bone_mesh, armob in ob_bones:
+ for my_bone in ob_bones:
+ if me in iter(my_bone.blenMeshes.values()):
+ write_sub_deformer_skin(my_mesh, my_bone, weights)
+
+ # Write pose's really weired, only needed when an armature and mesh are used together
+ # each by themselves dont need pose data. for now only pose meshes and bones
+
+ file.write('''
+ Pose: "Pose::BIND_POSES", "BindPose" {
+ Type: "BindPose"
+ Version: 100
+ Properties60: {
+ }
+ NbPoseNodes: ''')
+ file.write(str(len(pose_items)))
+
+
+ for fbxName, matrix in pose_items:
+ file.write('\n\t\tPoseNode: {')
+ file.write('\n\t\t\tNode: "Model::%s"' % fbxName )
+ if matrix: file.write('\n\t\t\tMatrix: %s' % mat4x4str(matrix))
+ else: file.write('\n\t\t\tMatrix: %s' % mat4x4str(mtx4_identity))
+ file.write('\n\t\t}')
+
+ file.write('\n\t}')
+
+
+ # Finish Writing Objects
+ # Write global settings
+ file.write('''
+ GlobalSettings: {
+ Version: 1000
+ Properties60: {
+ Property: "UpAxis", "int", "",1
+ Property: "UpAxisSign", "int", "",1
+ Property: "FrontAxis", "int", "",2
+ Property: "FrontAxisSign", "int", "",1
+ Property: "CoordAxis", "int", "",0
+ Property: "CoordAxisSign", "int", "",1
+ Property: "UnitScaleFactor", "double", "",100
+ }
+ }
+''')
+ file.write('}')
+
+ file.write('''
+
+; Object relations
+;------------------------------------------------------------------
+
+Relations: {''')
+
+ file.write('\n\tModel: "Model::blend_root", "Null" {\n\t}')
+
+ for my_null in ob_null:
+ file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_null.fbxName)
+
+ for my_arm in ob_arms:
+ file.write('\n\tModel: "Model::%s", "Null" {\n\t}' % my_arm.fbxName)
+
+ for my_mesh in ob_meshes:
+ file.write('\n\tModel: "Model::%s", "Mesh" {\n\t}' % my_mesh.fbxName)
+
+ # TODO - limbs can have the same name for multiple armatures, should prefix.
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ file.write('\n\tModel: "Model::%s", "Limb" {\n\t}' % my_bone.fbxName)
+
+ for my_cam in ob_cameras:
+ file.write('\n\tModel: "Model::%s", "Camera" {\n\t}' % my_cam.fbxName)
+
+ for my_light in ob_lights:
+ file.write('\n\tModel: "Model::%s", "Light" {\n\t}' % my_light.fbxName)
+
+ file.write('''
+ Model: "Model::Producer Perspective", "Camera" {
+ }
+ Model: "Model::Producer Top", "Camera" {
+ }
+ Model: "Model::Producer Bottom", "Camera" {
+ }
+ Model: "Model::Producer Front", "Camera" {
+ }
+ Model: "Model::Producer Back", "Camera" {
+ }
+ Model: "Model::Producer Right", "Camera" {
+ }
+ Model: "Model::Producer Left", "Camera" {
+ }
+ Model: "Model::Camera Switcher", "CameraSwitcher" {
+ }''')
+
+ for matname, (mat, tex) in materials:
+ file.write('\n\tMaterial: "Material::%s", "" {\n\t}' % matname)
+
+ if textures:
+ for texname, tex in textures:
+ file.write('\n\tTexture: "Texture::%s", "TextureVideoClip" {\n\t}' % texname)
+ for texname, tex in textures:
+ file.write('\n\tVideo: "Video::%s", "Clip" {\n\t}' % texname)
+
+ # deformers - modifiers
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ file.write('\n\tDeformer: "Deformer::Skin %s", "Skin" {\n\t}' % my_mesh.fbxName)
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ for fbxMeshObName in my_bone.blenMeshes: # .keys() - fbxMeshObName
+ # is this bone effecting a mesh?
+ file.write('\n\tDeformer: "SubDeformer::Cluster %s %s", "Cluster" {\n\t}' % (fbxMeshObName, my_bone.fbxName))
+
+ # This should be at the end
+ # file.write('\n\tPose: "Pose::BIND_POSES", "BindPose" {\n\t}')
+
+ for groupname, group in groups:
+ file.write('\n\tGroupSelection: "GroupSelection::%s", "Default" {\n\t}' % groupname)
+
+ file.write('\n}')
+ file.write('''
+
+; Object connections
+;------------------------------------------------------------------
+
+Connections: {''')
+
+ # NOTE - The FBX SDK dosnt care about the order but some importers DO!
+ # for instance, defining the material->mesh connection
+ # before the mesh->blend_root crashes cinema4d
+
+
+ # write the fake root node
+ file.write('\n\tConnect: "OO", "Model::blend_root", "Model::Scene"')
+
+ for ob_generic in ob_all_typegroups: # all blender 'Object's we support
+ for my_ob in ob_generic:
+ if my_ob.fbxParent:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_ob.fbxName, my_ob.fbxParent.fbxName))
+ else:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_ob.fbxName)
+
+ if materials:
+ for my_mesh in ob_meshes:
+ # Connect all materials to all objects, not good form but ok for now.
+ for mat, tex in my_mesh.blenMaterials:
+ if mat: mat_name = mat.name
+ else: mat_name = None
+
+ if tex: tex_name = tex.name
+ else: tex_name = None
+
+ file.write('\n\tConnect: "OO", "Material::%s", "Model::%s"' % (sane_name_mapping_mat[mat_name, tex_name], my_mesh.fbxName))
+
+ if textures:
+ for my_mesh in ob_meshes:
+ if my_mesh.blenTextures:
+ # file.write('\n\tConnect: "OO", "Texture::_empty_", "Model::%s"' % my_mesh.fbxName)
+ for tex in my_mesh.blenTextures:
+ if tex:
+ file.write('\n\tConnect: "OO", "Texture::%s", "Model::%s"' % (sane_name_mapping_tex[tex.name], my_mesh.fbxName))
+
+ for texname, tex in textures:
+ file.write('\n\tConnect: "OO", "Video::%s", "Texture::%s"' % (texname, texname))
+
+ for my_mesh in ob_meshes:
+ if my_mesh.fbxArm:
+ file.write('\n\tConnect: "OO", "Deformer::Skin %s", "Model::%s"' % (my_mesh.fbxName, my_mesh.fbxName))
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ for fbxMeshObName in my_bone.blenMeshes: # .keys()
+ file.write('\n\tConnect: "OO", "SubDeformer::Cluster %s %s", "Deformer::Skin %s"' % (fbxMeshObName, my_bone.fbxName, fbxMeshObName))
+
+ # limbs -> deformers
+ # for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ for fbxMeshObName in my_bone.blenMeshes: # .keys()
+ file.write('\n\tConnect: "OO", "Model::%s", "SubDeformer::Cluster %s %s"' % (my_bone.fbxName, fbxMeshObName, my_bone.fbxName))
+
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for my_bone in ob_bones:
+ # Always parent to armature now
+ if my_bone.parent:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.parent.fbxName) )
+ else:
+ # the armature object is written as an empty and all root level bones connect to it
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::%s"' % (my_bone.fbxName, my_bone.fbxArm.fbxName) )
+
+ # groups
+ if groups:
+ for ob_generic in ob_all_typegroups:
+ for ob_base in ob_generic:
+ for fbxGroupName in ob_base.fbxGroupNames:
+ file.write('\n\tConnect: "OO", "Model::%s", "GroupSelection::%s"' % (ob_base.fbxName, fbxGroupName))
+
+ for my_arm in ob_arms:
+ file.write('\n\tConnect: "OO", "Model::%s", "Model::blend_root"' % my_arm.fbxName)
+
+ file.write('\n}')
+
+
+ # Needed for scene footer as well as animation
+ render = sce.render_data
+# render = sce.render
+
+ # from the FBX sdk
+ #define KTIME_ONE_SECOND KTime (K_LONGLONG(46186158000))
+ def fbx_time(t):
+ # 0.5 + val is the same as rounding.
+ return int(0.5 + ((t/fps) * 46186158000))
+
+ fps = float(render.fps)
+ start = sce.start_frame
+# start = render.sFrame
+ end = sce.end_frame
+# end = render.eFrame
+ if end < start: start, end = end, start
+ if start==end: ANIM_ENABLE = False
+
+ # animations for these object types
+ ob_anim_lists = ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms
+
+ if ANIM_ENABLE and [tmp for tmp in ob_anim_lists if tmp]:
+
+ frame_orig = sce.current_frame
+# frame_orig = Blender.Get('curframe')
+
+ if ANIM_OPTIMIZE:
+ ANIM_OPTIMIZE_PRECISSION_FLOAT = 0.1 ** ANIM_OPTIMIZE_PRECISSION
+
+ # default action, when no actions are avaioable
+ tmp_actions = [None] # None is the default action
+ blenActionDefault = None
+ action_lastcompat = None
+
+ # instead of tagging
+ tagged_actions = []
+
+ if ANIM_ACTION_ALL:
+# bpy.data.actions.tag = False
+ tmp_actions = list(bpy.data.actions)
+
+
+ # find which actions are compatible with the armatures
+ # blenActions is not yet initialized so do it now.
+ tmp_act_count = 0
+ for my_arm in ob_arms:
+
+ # get the default name
+ if not blenActionDefault:
+ blenActionDefault = my_arm.blenAction
+
+ arm_bone_names = set([my_bone.blenName for my_bone in my_arm.fbxBones])
+
+ for action in tmp_actions:
+
+ action_chan_names = arm_bone_names.intersection( set([g.name for g in action.groups]) )
+# action_chan_names = arm_bone_names.intersection( set(action.getChannelNames()) )
+
+ if action_chan_names: # at least one channel matches.
+ my_arm.blenActionList.append(action)
+ tagged_actions.append(action.name)
+# action.tag = True
+ tmp_act_count += 1
+
+ # incase there is no actions applied to armatures
+ action_lastcompat = action
+
+ if tmp_act_count:
+ # unlikely to ever happen but if no actions applied to armatures, just use the last compatible armature.
+ if not blenActionDefault:
+ blenActionDefault = action_lastcompat
+
+ del action_lastcompat
+
+ file.write('''
+;Takes and animation section
+;----------------------------------------------------
+
+Takes: {''')
+
+ if blenActionDefault:
+ file.write('\n\tCurrent: "%s"' % sane_takename(blenActionDefault))
+ else:
+ file.write('\n\tCurrent: "Default Take"')
+
+ for blenAction in tmp_actions:
+ # we have tagged all actious that are used be selected armatures
+ if blenAction:
+ if blenAction.name in tagged_actions:
+# if blenAction.tag:
+ print('\taction: "%s" exporting...' % blenAction.name)
+ else:
+ print('\taction: "%s" has no armature using it, skipping' % blenAction.name)
+ continue
+
+ if blenAction == None:
+ # Warning, this only accounts for tmp_actions being [None]
+ file.write('\n\tTake: "Default Take" {')
+ act_start = start
+ act_end = end
+ else:
+ # use existing name
+ if blenAction == blenActionDefault: # have we alredy got the name
+ file.write('\n\tTake: "%s" {' % sane_name_mapping_take[blenAction.name])
+ else:
+ file.write('\n\tTake: "%s" {' % sane_takename(blenAction))
+
+ act_start, act_end = blenAction.get_frame_range()
+# tmp = blenAction.getFrameNumbers()
+# if tmp:
+# act_start = min(tmp)
+# act_end = max(tmp)
+# del tmp
+# else:
+# # Fallback on this, theres not much else we can do? :/
+# # when an action has no length
+# act_start = start
+# act_end = end
+
+ # Set the action active
+ for my_bone in ob_arms:
+ if blenAction in my_bone.blenActionList:
+ ob.action = blenAction
+ # print '\t\tSetting Action!', blenAction
+ # sce.update(1)
+
+ file.write('\n\t\tFileName: "Default_Take.tak"') # ??? - not sure why this is needed
+ file.write('\n\t\tLocalTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed
+ file.write('\n\t\tReferenceTime: %i,%i' % (fbx_time(act_start-1), fbx_time(act_end-1))) # ??? - not sure why this is needed
+
+ file.write('''
+
+ ;Models animation
+ ;----------------------------------------------------''')
+
+
+ # set pose data for all bones
+ # do this here incase the action changes
+ '''
+ for my_bone in ob_bones:
+ my_bone.flushAnimData()
+ '''
+ i = act_start
+ while i <= act_end:
+ sce.set_frame(i)
+# Blender.Set('curframe', i)
+ for ob_generic in ob_anim_lists:
+ for my_ob in ob_generic:
+ #Blender.Window.RedrawAll()
+ if ob_generic == ob_meshes and my_ob.fbxArm:
+ # We cant animate armature meshes!
+ pass
+ else:
+ my_ob.setPoseFrame(i)
+
+ i+=1
+
+
+ #for bonename, bone, obname, me, armob in ob_bones:
+ for ob_generic in (ob_bones, ob_meshes, ob_null, ob_cameras, ob_lights, ob_arms):
+
+ for my_ob in ob_generic:
+
+ if ob_generic == ob_meshes and my_ob.fbxArm:
+ # do nothing,
+ pass
+ else:
+
+ file.write('\n\t\tModel: "Model::%s" {' % my_ob.fbxName) # ??? - not sure why this is needed
+ file.write('\n\t\t\tVersion: 1.1')
+ file.write('\n\t\t\tChannel: "Transform" {')
+
+ context_bone_anim_mats = [ (my_ob.getAnimParRelMatrix(frame), my_ob.getAnimParRelMatrixRot(frame)) for frame in range(act_start, act_end+1) ]
+
+ # ----------------
+ # ----------------
+ for TX_LAYER, TX_CHAN in enumerate('TRS'): # transform, rotate, scale
+
+ if TX_CHAN=='T': context_bone_anim_vecs = [mtx[0].translationPart() for mtx in context_bone_anim_mats]
+ elif TX_CHAN=='S': context_bone_anim_vecs = [mtx[0].scalePart() for mtx in context_bone_anim_mats]
+ elif TX_CHAN=='R':
+ # Was....
+ # elif TX_CHAN=='R': context_bone_anim_vecs = [mtx[1].toEuler() for mtx in context_bone_anim_mats]
+ #
+ # ...but we need to use the previous euler for compatible conversion.
+ context_bone_anim_vecs = []
+ prev_eul = None
+ for mtx in context_bone_anim_mats:
+ if prev_eul: prev_eul = mtx[1].toEuler(prev_eul)
+ else: prev_eul = mtx[1].toEuler()
+ context_bone_anim_vecs.append(eulerRadToDeg(prev_eul))
+# context_bone_anim_vecs.append(prev_eul)
+
+ file.write('\n\t\t\t\tChannel: "%s" {' % TX_CHAN) # translation
+
+ for i in range(3):
+ # Loop on each axis of the bone
+ file.write('\n\t\t\t\t\tChannel: "%s" {'% ('XYZ'[i])) # translation
+ file.write('\n\t\t\t\t\t\tDefault: %.15f' % context_bone_anim_vecs[0][i] )
+ file.write('\n\t\t\t\t\t\tKeyVer: 4005')
+
+ if not ANIM_OPTIMIZE:
+ # Just write all frames, simple but in-eficient
+ file.write('\n\t\t\t\t\t\tKeyCount: %i' % (1 + act_end - act_start))
+ file.write('\n\t\t\t\t\t\tKey: ')
+ frame = act_start
+ while frame <= act_end:
+ if frame!=act_start:
+ file.write(',')
+
+ # Curve types are 'C,n' for constant, 'L' for linear
+ # C,n is for bezier? - linear is best for now so we can do simple keyframe removal
+ file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame-1), context_bone_anim_vecs[frame-act_start][i] ))
+ frame+=1
+ else:
+ # remove unneeded keys, j is the frame, needed when some frames are removed.
+ context_bone_anim_keys = [ (vec[i], j) for j, vec in enumerate(context_bone_anim_vecs) ]
+
+ # last frame to fisrt frame, missing 1 frame on either side.
+ # removeing in a backwards loop is faster
+ #for j in xrange( (act_end-act_start)-1, 0, -1 ):
+ # j = (act_end-act_start)-1
+ j = len(context_bone_anim_keys)-2
+ while j > 0 and len(context_bone_anim_keys) > 2:
+ # print j, len(context_bone_anim_keys)
+ # Is this key the same as the ones next to it?
+
+ # co-linear horizontal...
+ if abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j-1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT and\
+ abs(context_bone_anim_keys[j][0] - context_bone_anim_keys[j+1][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
+
+ del context_bone_anim_keys[j]
+
+ else:
+ frame_range = float(context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j-1][1])
+ frame_range_fac1 = (context_bone_anim_keys[j+1][1] - context_bone_anim_keys[j][1]) / frame_range
+ frame_range_fac2 = 1.0 - frame_range_fac1
+
+ if abs(((context_bone_anim_keys[j-1][0]*frame_range_fac1 + context_bone_anim_keys[j+1][0]*frame_range_fac2)) - context_bone_anim_keys[j][0]) < ANIM_OPTIMIZE_PRECISSION_FLOAT:
+ del context_bone_anim_keys[j]
+ else:
+ j-=1
+
+ # keep the index below the list length
+ if j > len(context_bone_anim_keys)-2:
+ j = len(context_bone_anim_keys)-2
+
+ if len(context_bone_anim_keys) == 2 and context_bone_anim_keys[0][0] == context_bone_anim_keys[1][0]:
+ # This axis has no moton, its okay to skip KeyCount and Keys in this case
+ pass
+ else:
+ # We only need to write these if there is at least one
+ file.write('\n\t\t\t\t\t\tKeyCount: %i' % len(context_bone_anim_keys))
+ file.write('\n\t\t\t\t\t\tKey: ')
+ for val, frame in context_bone_anim_keys:
+ if frame != context_bone_anim_keys[0][1]: # not the first
+ file.write(',')
+ # frame is alredy one less then blenders frame
+ file.write('\n\t\t\t\t\t\t\t%i,%.15f,L' % (fbx_time(frame), val ))
+
+ if i==0: file.write('\n\t\t\t\t\t\tColor: 1,0,0')
+ elif i==1: file.write('\n\t\t\t\t\t\tColor: 0,1,0')
+ elif i==2: file.write('\n\t\t\t\t\t\tColor: 0,0,1')
+
+ file.write('\n\t\t\t\t\t}')
+ file.write('\n\t\t\t\t\tLayerType: %i' % (TX_LAYER+1) )
+ file.write('\n\t\t\t\t}')
+
+ # ---------------
+
+ file.write('\n\t\t\t}')
+ file.write('\n\t\t}')
+
+ # end the take
+ file.write('\n\t}')
+
+ # end action loop. set original actions
+ # do this after every loop incase actions effect eachother.
+ for my_bone in ob_arms:
+ my_bone.blenObject.action = my_bone.blenAction
+
+ file.write('\n}')
+
+ sce.set_frame(frame_orig)
+# Blender.Set('curframe', frame_orig)
+
+ else:
+ # no animation
+ file.write('\n;Takes and animation section')
+ file.write('\n;----------------------------------------------------')
+ file.write('\n')
+ file.write('\nTakes: {')
+ file.write('\n\tCurrent: ""')
+ file.write('\n}')
+
+
+ # write meshes animation
+ #for obname, ob, mtx, me, mats, arm, armname in ob_meshes:
+
+
+ # Clear mesh data Only when writing with modifiers applied
+ for me in meshes_to_clear:
+ bpy.data.remove_mesh(me)
+# me.verts = None
+
+ # --------------------------- Footer
+ if world:
+ m = world.mist
+ has_mist = m.enabled
+# has_mist = world.mode & 1
+ mist_intense = m.intensity
+ mist_start = m.start
+ mist_end = m.depth
+ mist_height = m.height
+# mist_intense, mist_start, mist_end, mist_height = world.mist
+ world_hor = world.horizon_color
+# world_hor = world.hor
+ else:
+ has_mist = mist_intense = mist_start = mist_end = mist_height = 0
+ world_hor = 0,0,0
+
+ file.write('\n;Version 5 settings')
+ file.write('\n;------------------------------------------------------------------')
+ file.write('\n')
+ file.write('\nVersion5: {')
+ file.write('\n\tAmbientRenderSettings: {')
+ file.write('\n\t\tVersion: 101')
+ file.write('\n\t\tAmbientLightColor: %.1f,%.1f,%.1f,0' % tuple(world_amb))
+ file.write('\n\t}')
+ file.write('\n\tFogOptions: {')
+ file.write('\n\t\tFlogEnable: %i' % has_mist)
+ file.write('\n\t\tFogMode: 0')
+ file.write('\n\t\tFogDensity: %.3f' % mist_intense)
+ file.write('\n\t\tFogStart: %.3f' % mist_start)
+ file.write('\n\t\tFogEnd: %.3f' % mist_end)
+ file.write('\n\t\tFogColor: %.1f,%.1f,%.1f,1' % tuple(world_hor))
+ file.write('\n\t}')
+ file.write('\n\tSettings: {')
+ file.write('\n\t\tFrameRate: "%i"' % int(fps))
+ file.write('\n\t\tTimeFormat: 1')
+ file.write('\n\t\tSnapOnFrames: 0')
+ file.write('\n\t\tReferenceTimeIndex: -1')
+ file.write('\n\t\tTimeLineStartTime: %i' % fbx_time(start-1))
+ file.write('\n\t\tTimeLineStopTime: %i' % fbx_time(end-1))
+ file.write('\n\t}')
+ file.write('\n\tRendererSetting: {')
+ file.write('\n\t\tDefaultCamera: "Producer Perspective"')
+ file.write('\n\t\tDefaultViewingMode: 0')
+ file.write('\n\t}')
+ file.write('\n}')
+ file.write('\n')
+
+ # Incase sombody imports this, clean up by clearing global dicts
+ sane_name_mapping_ob.clear()
+ sane_name_mapping_mat.clear()
+ sane_name_mapping_tex.clear()
+
+ ob_arms[:] = []
+ ob_bones[:] = []
+ ob_cameras[:] = []
+ ob_lights[:] = []
+ ob_meshes[:] = []
+ ob_null[:] = []
+
+
+ # copy images if enabled
+# if EXP_IMAGE_COPY:
+# # copy_images( basepath, [ tex[1] for tex in textures if tex[1] != None ])
+# bpy.util.copy_images( [ tex[1] for tex in textures if tex[1] != None ], basepath)
+
+ print('export finished in %.4f sec.' % (time.clock() - start_time))
+# print 'export finished in %.4f sec.' % (Blender.sys.time() - start_time)
+ return True
+
+
+# --------------------------------------------
+# UI Function - not a part of the exporter.
+# this is to seperate the user interface from the rest of the exporter.
+# from Blender import Draw, Window
+EVENT_NONE = 0
+EVENT_EXIT = 1
+EVENT_REDRAW = 2
+EVENT_FILESEL = 3
+
+GLOBALS = {}
+
+# export opts
+
+def do_redraw(e,v): GLOBALS['EVENT'] = e
+
+# toggle between these 2, only allow one on at once
+def do_obs_sel(e,v):
+ GLOBALS['EVENT'] = e
+ GLOBALS['EXP_OBS_SCENE'].val = 0
+ GLOBALS['EXP_OBS_SELECTED'].val = 1
+
+def do_obs_sce(e,v):
+ GLOBALS['EVENT'] = e
+ GLOBALS['EXP_OBS_SCENE'].val = 1
+ GLOBALS['EXP_OBS_SELECTED'].val = 0
+
+def do_batch_type_grp(e,v):
+ GLOBALS['EVENT'] = e
+ GLOBALS['BATCH_GROUP'].val = 1
+ GLOBALS['BATCH_SCENE'].val = 0
+
+def do_batch_type_sce(e,v):
+ GLOBALS['EVENT'] = e
+ GLOBALS['BATCH_GROUP'].val = 0
+ GLOBALS['BATCH_SCENE'].val = 1
+
+def do_anim_act_all(e,v):
+ GLOBALS['EVENT'] = e
+ GLOBALS['ANIM_ACTION_ALL'][0].val = 1
+ GLOBALS['ANIM_ACTION_ALL'][1].val = 0
+
+def do_anim_act_cur(e,v):
+ if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val:
+ Draw.PupMenu('Warning%t|Cant use this with batch export group option')
+ else:
+ GLOBALS['EVENT'] = e
+ GLOBALS['ANIM_ACTION_ALL'][0].val = 0
+ GLOBALS['ANIM_ACTION_ALL'][1].val = 1
+
+def fbx_ui_exit(e,v):
+ GLOBALS['EVENT'] = e
+
+def do_help(e,v):
+ url = 'http://wiki.blender.org/index.php/Scripts/Manual/Export/autodesk_fbx'
+ print('Trying to open web browser with documentation at this address...')
+ print('\t' + url)
+
+ try:
+ import webbrowser
+ webbrowser.open(url)
+ except:
+ Blender.Draw.PupMenu("Error%t|Opening a webbrowser requires a full python installation")
+ print('...could not open a browser window.')
+
+
+
+# run when export is pressed
+#def fbx_ui_write(e,v):
+def fbx_ui_write(filename, context):
+
+ # Dont allow overwriting files when saving normally
+ if not GLOBALS['BATCH_ENABLE'].val:
+ if not BPyMessages.Warning_SaveOver(filename):
+ return
+
+ GLOBALS['EVENT'] = EVENT_EXIT
+
+ # Keep the order the same as above for simplicity
+ # the [] is a dummy arg used for objects
+
+ Blender.Window.WaitCursor(1)
+
+ # Make the matrix
+ GLOBAL_MATRIX = mtx4_identity
+ GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = GLOBALS['_SCALE'].val
+ if GLOBALS['_XROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n
+ if GLOBALS['_YROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n
+ if GLOBALS['_ZROT90'].val: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n
+
+ ret = write(\
+ filename, None,\
+ context,
+ GLOBALS['EXP_OBS_SELECTED'].val,\
+ GLOBALS['EXP_MESH'].val,\
+ GLOBALS['EXP_MESH_APPLY_MOD'].val,\
+ GLOBALS['EXP_MESH_HQ_NORMALS'].val,\
+ GLOBALS['EXP_ARMATURE'].val,\
+ GLOBALS['EXP_LAMP'].val,\
+ GLOBALS['EXP_CAMERA'].val,\
+ GLOBALS['EXP_EMPTY'].val,\
+ GLOBALS['EXP_IMAGE_COPY'].val,\
+ GLOBAL_MATRIX,\
+ GLOBALS['ANIM_ENABLE'].val,\
+ GLOBALS['ANIM_OPTIMIZE'].val,\
+ GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val,\
+ GLOBALS['ANIM_ACTION_ALL'][0].val,\
+ GLOBALS['BATCH_ENABLE'].val,\
+ GLOBALS['BATCH_GROUP'].val,\
+ GLOBALS['BATCH_SCENE'].val,\
+ GLOBALS['BATCH_FILE_PREFIX'].val,\
+ GLOBALS['BATCH_OWN_DIR'].val,\
+ )
+
+ Blender.Window.WaitCursor(0)
+ GLOBALS.clear()
+
+ if ret == False:
+ Draw.PupMenu('Error%t|Path cannot be written to!')
+
+
+def fbx_ui():
+ # Only to center the UI
+ x,y = GLOBALS['MOUSE']
+ x-=180; y-=0 # offset... just to get it centered
+
+ Draw.Label('Export Objects...', x+20,y+165, 200, 20)
+
+ if not GLOBALS['BATCH_ENABLE'].val:
+ Draw.BeginAlign()
+ GLOBALS['EXP_OBS_SELECTED'] = Draw.Toggle('Selected Objects', EVENT_REDRAW, x+20, y+145, 160, 20, GLOBALS['EXP_OBS_SELECTED'].val, 'Export selected objects on visible layers', do_obs_sel)
+ GLOBALS['EXP_OBS_SCENE'] = Draw.Toggle('Scene Objects', EVENT_REDRAW, x+180, y+145, 160, 20, GLOBALS['EXP_OBS_SCENE'].val, 'Export all objects in this scene', do_obs_sce)
+ Draw.EndAlign()
+
+ Draw.BeginAlign()
+ GLOBALS['_SCALE'] = Draw.Number('Scale:', EVENT_NONE, x+20, y+120, 140, 20, GLOBALS['_SCALE'].val, 0.01, 1000.0, 'Scale all data, (Note! some imports dont support scaled armatures)')
+ GLOBALS['_XROT90'] = Draw.Toggle('Rot X90', EVENT_NONE, x+160, y+120, 60, 20, GLOBALS['_XROT90'].val, 'Rotate all objects 90 degrese about the X axis')
+ GLOBALS['_YROT90'] = Draw.Toggle('Rot Y90', EVENT_NONE, x+220, y+120, 60, 20, GLOBALS['_YROT90'].val, 'Rotate all objects 90 degrese about the Y axis')
+ GLOBALS['_ZROT90'] = Draw.Toggle('Rot Z90', EVENT_NONE, x+280, y+120, 60, 20, GLOBALS['_ZROT90'].val, 'Rotate all objects 90 degrese about the Z axis')
+ Draw.EndAlign()
+
+ y -= 35
+
+ Draw.BeginAlign()
+ GLOBALS['EXP_EMPTY'] = Draw.Toggle('Empty', EVENT_NONE, x+20, y+120, 60, 20, GLOBALS['EXP_EMPTY'].val, 'Export empty objects')
+ GLOBALS['EXP_CAMERA'] = Draw.Toggle('Camera', EVENT_NONE, x+80, y+120, 60, 20, GLOBALS['EXP_CAMERA'].val, 'Export camera objects')
+ GLOBALS['EXP_LAMP'] = Draw.Toggle('Lamp', EVENT_NONE, x+140, y+120, 60, 20, GLOBALS['EXP_LAMP'].val, 'Export lamp objects')
+ GLOBALS['EXP_ARMATURE'] = Draw.Toggle('Armature', EVENT_NONE, x+200, y+120, 60, 20, GLOBALS['EXP_ARMATURE'].val, 'Export armature objects')
+ GLOBALS['EXP_MESH'] = Draw.Toggle('Mesh', EVENT_REDRAW, x+260, y+120, 80, 20, GLOBALS['EXP_MESH'].val, 'Export mesh objects', do_redraw) #, do_axis_z)
+ Draw.EndAlign()
+
+ if GLOBALS['EXP_MESH'].val:
+ # below mesh but
+ Draw.BeginAlign()
+ GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Toggle('Modifiers', EVENT_NONE, x+260, y+100, 80, 20, GLOBALS['EXP_MESH_APPLY_MOD'].val, 'Apply modifiers to mesh objects') #, do_axis_z)
+ GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Toggle('HQ Normals', EVENT_NONE, x+260, y+80, 80, 20, GLOBALS['EXP_MESH_HQ_NORMALS'].val, 'Generate high quality normals') #, do_axis_z)
+ Draw.EndAlign()
+
+ GLOBALS['EXP_IMAGE_COPY'] = Draw.Toggle('Copy Image Files', EVENT_NONE, x+20, y+80, 160, 20, GLOBALS['EXP_IMAGE_COPY'].val, 'Copy image files to the destination path') #, do_axis_z)
+
+
+ Draw.Label('Export Armature Animation...', x+20,y+45, 300, 20)
+
+ GLOBALS['ANIM_ENABLE'] = Draw.Toggle('Enable Animation', EVENT_REDRAW, x+20, y+25, 160, 20, GLOBALS['ANIM_ENABLE'].val, 'Export keyframe animation', do_redraw)
+ if GLOBALS['ANIM_ENABLE'].val:
+ Draw.BeginAlign()
+ GLOBALS['ANIM_OPTIMIZE'] = Draw.Toggle('Optimize Keyframes', EVENT_REDRAW, x+20, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE'].val, 'Remove double keyframes', do_redraw)
+ if GLOBALS['ANIM_OPTIMIZE'].val:
+ GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Number('Precission: ', EVENT_NONE, x+180, y+0, 160, 20, GLOBALS['ANIM_OPTIMIZE_PRECISSION'].val, 1, 16, 'Tolerence for comparing double keyframes (higher for greater accuracy)')
+ Draw.EndAlign()
+
+ Draw.BeginAlign()
+ GLOBALS['ANIM_ACTION_ALL'][1] = Draw.Toggle('Current Action', EVENT_REDRAW, x+20, y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][1].val, 'Use actions currently applied to the armatures (use scene start/end frame)', do_anim_act_cur)
+ GLOBALS['ANIM_ACTION_ALL'][0] = Draw.Toggle('All Actions', EVENT_REDRAW, x+180,y-25, 160, 20, GLOBALS['ANIM_ACTION_ALL'][0].val, 'Use all actions for armatures', do_anim_act_all)
+ Draw.EndAlign()
+
+
+ Draw.Label('Export Batch...', x+20,y-60, 300, 20)
+ GLOBALS['BATCH_ENABLE'] = Draw.Toggle('Enable Batch', EVENT_REDRAW, x+20, y-80, 160, 20, GLOBALS['BATCH_ENABLE'].val, 'Automate exporting multiple scenes or groups to files', do_redraw)
+
+ if GLOBALS['BATCH_ENABLE'].val:
+ Draw.BeginAlign()
+ GLOBALS['BATCH_GROUP'] = Draw.Toggle('Group > File', EVENT_REDRAW, x+20, y-105, 160, 20, GLOBALS['BATCH_GROUP'].val, 'Export each group as an FBX file', do_batch_type_grp)
+ GLOBALS['BATCH_SCENE'] = Draw.Toggle('Scene > File', EVENT_REDRAW, x+180, y-105, 160, 20, GLOBALS['BATCH_SCENE'].val, 'Export each scene as an FBX file', do_batch_type_sce)
+
+ # Own dir requires OS module
+ if os:
+ GLOBALS['BATCH_OWN_DIR'] = Draw.Toggle('Own Dir', EVENT_NONE, x+20, y-125, 80, 20, GLOBALS['BATCH_OWN_DIR'].val, 'Create a dir for each exported file')
+ GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+100, y-125, 240, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ')
+ else:
+ GLOBALS['BATCH_FILE_PREFIX'] = Draw.String('Prefix: ', EVENT_NONE, x+20, y-125, 320, 20, GLOBALS['BATCH_FILE_PREFIX'].val, 64, 'Prefix each file with this name ')
+
+
+ Draw.EndAlign()
+
+ #y+=80
+
+ '''
+ Draw.BeginAlign()
+ GLOBALS['FILENAME'] = Draw.String('path: ', EVENT_NONE, x+20, y-170, 300, 20, GLOBALS['FILENAME'].val, 64, 'Prefix each file with this name ')
+ Draw.PushButton('..', EVENT_FILESEL, x+320, y-170, 20, 20, 'Select the path', do_redraw)
+ '''
+ # Until batch is added
+ #
+
+
+ #Draw.BeginAlign()
+ Draw.PushButton('Online Help', EVENT_REDRAW, x+20, y-160, 100, 20, 'Open online help in a browser window', do_help)
+ Draw.PushButton('Cancel', EVENT_EXIT, x+130, y-160, 100, 20, 'Exit the exporter', fbx_ui_exit)
+ Draw.PushButton('Export', EVENT_FILESEL, x+240, y-160, 100, 20, 'Export the fbx file', do_redraw)
+
+ #Draw.PushButton('Export', EVENT_EXIT, x+180, y-160, 160, 20, 'Export the fbx file', fbx_ui_write)
+ #Draw.EndAlign()
+
+ # exit when mouse out of the view?
+ # GLOBALS['EVENT'] = EVENT_EXIT
+
+#def write_ui(filename):
+def write_ui():
+
+ # globals
+ GLOBALS['EVENT'] = EVENT_REDRAW
+ #GLOBALS['MOUSE'] = Window.GetMouseCoords()
+ GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()]
+ GLOBALS['FILENAME'] = ''
+ '''
+ # IF called from the fileselector
+ if filename == None:
+ GLOBALS['FILENAME'] = filename # Draw.Create(Blender.sys.makename(ext='.fbx'))
+ else:
+ GLOBALS['FILENAME'].val = filename
+ '''
+ GLOBALS['EXP_OBS_SELECTED'] = Draw.Create(1) # dont need 2 variables but just do this for clarity
+ GLOBALS['EXP_OBS_SCENE'] = Draw.Create(0)
+
+ GLOBALS['EXP_MESH'] = Draw.Create(1)
+ GLOBALS['EXP_MESH_APPLY_MOD'] = Draw.Create(1)
+ GLOBALS['EXP_MESH_HQ_NORMALS'] = Draw.Create(0)
+ GLOBALS['EXP_ARMATURE'] = Draw.Create(1)
+ GLOBALS['EXP_LAMP'] = Draw.Create(1)
+ GLOBALS['EXP_CAMERA'] = Draw.Create(1)
+ GLOBALS['EXP_EMPTY'] = Draw.Create(1)
+ GLOBALS['EXP_IMAGE_COPY'] = Draw.Create(0)
+ # animation opts
+ GLOBALS['ANIM_ENABLE'] = Draw.Create(1)
+ GLOBALS['ANIM_OPTIMIZE'] = Draw.Create(1)
+ GLOBALS['ANIM_OPTIMIZE_PRECISSION'] = Draw.Create(4) # decimal places
+ GLOBALS['ANIM_ACTION_ALL'] = [Draw.Create(0), Draw.Create(1)] # not just the current action
+
+ # batch export options
+ GLOBALS['BATCH_ENABLE'] = Draw.Create(0)
+ GLOBALS['BATCH_GROUP'] = Draw.Create(1) # cant have both of these enabled at once.
+ GLOBALS['BATCH_SCENE'] = Draw.Create(0) # see above
+ GLOBALS['BATCH_FILE_PREFIX'] = Draw.Create(Blender.sys.makename(ext='_').split('\\')[-1].split('/')[-1])
+ GLOBALS['BATCH_OWN_DIR'] = Draw.Create(0)
+ # done setting globals
+
+ # Used by the user interface
+ GLOBALS['_SCALE'] = Draw.Create(1.0)
+ GLOBALS['_XROT90'] = Draw.Create(True)
+ GLOBALS['_YROT90'] = Draw.Create(False)
+ GLOBALS['_ZROT90'] = Draw.Create(False)
+
+ # best not do move the cursor
+ # Window.SetMouseCoords(*[i/2 for i in Window.GetScreenSize()])
+
+ # hack so the toggle buttons redraw. this is not nice at all
+ while GLOBALS['EVENT'] != EVENT_EXIT:
+
+ if GLOBALS['BATCH_ENABLE'].val and GLOBALS['BATCH_GROUP'].val and GLOBALS['ANIM_ACTION_ALL'][1].val:
+ #Draw.PupMenu("Warning%t|Cant batch export groups with 'Current Action' ")
+ GLOBALS['ANIM_ACTION_ALL'][0].val = 1
+ GLOBALS['ANIM_ACTION_ALL'][1].val = 0
+
+ if GLOBALS['EVENT'] == EVENT_FILESEL:
+ if GLOBALS['BATCH_ENABLE'].val:
+ txt = 'Batch FBX Dir'
+ name = Blender.sys.expandpath('//')
+ else:
+ txt = 'Export FBX'
+ name = Blender.sys.makename(ext='.fbx')
+
+ Blender.Window.FileSelector(fbx_ui_write, txt, name)
+ #fbx_ui_write('/test.fbx')
+ break
+
+ Draw.UIBlock(fbx_ui, 0)
+
+
+ # GLOBALS.clear()
+
+class EXPORT_OT_fbx(bpy.types.Operator):
+ '''
+ Operator documentation text, will be used for the operator tooltip and python docs.
+ '''
+ __idname__ = "export.fbx"
+ __label__ = "Export FBX"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the FBX file", maxlen= 1024, default= ""),
+
+ bpy.props.BoolProperty(attr="EXP_OBS_SELECTED", name="Selected Objects", description="Export selected objects on visible layers", default=True),
+# bpy.props.BoolProperty(attr="EXP_OBS_SCENE", name="Scene Objects", description="Export all objects in this scene", default=True),
+ bpy.props.FloatProperty(attr="_SCALE", name="Scale", description="Scale all data, (Note! some imports dont support scaled armatures)", min=0.01, max=1000.0, soft_min=0.01, soft_max=1000.0, default=1.0),
+ bpy.props.BoolProperty(attr="_XROT90", name="Rot X90", description="Rotate all objects 90 degrese about the X axis", default=True),
+ bpy.props.BoolProperty(attr="_YROT90", name="Rot Y90", description="Rotate all objects 90 degrese about the Y axis", default=False),
+ bpy.props.BoolProperty(attr="_ZROT90", name="Rot Z90", description="Rotate all objects 90 degrese about the Z axis", default=False),
+ bpy.props.BoolProperty(attr="EXP_EMPTY", name="Empties", description="Export empty objects", default=True),
+ bpy.props.BoolProperty(attr="EXP_CAMERA", name="Cameras", description="Export camera objects", default=True),
+ bpy.props.BoolProperty(attr="EXP_LAMP", name="Lamps", description="Export lamp objects", default=True),
+ bpy.props.BoolProperty(attr="EXP_ARMATURE", name="Armatures", description="Export armature objects", default=True),
+ bpy.props.BoolProperty(attr="EXP_MESH", name="Meshes", description="Export mesh objects", default=True),
+ bpy.props.BoolProperty(attr="EXP_MESH_APPLY_MOD", name="Modifiers", description="Apply modifiers to mesh objects", default=True),
+ bpy.props.BoolProperty(attr="EXP_MESH_HQ_NORMALS", name="HQ Normals", description="Generate high quality normals", default=True),
+ bpy.props.BoolProperty(attr="EXP_IMAGE_COPY", name="Copy Image Files", description="Copy image files to the destination path", default=False),
+ # armature animation
+ bpy.props.BoolProperty(attr="ANIM_ENABLE", name="Enable Animation", description="Export keyframe animation", default=True),
+ bpy.props.BoolProperty(attr="ANIM_OPTIMIZE", name="Optimize Keyframes", description="Remove double keyframes", default=True),
+ bpy.props.FloatProperty(attr="ANIM_OPTIMIZE_PRECISSION", name="Precision", description="Tolerence for comparing double keyframes (higher for greater accuracy)", min=1, max=16, soft_min=1, soft_max=16, default=6.0),
+# bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="Current Action", description="Use actions currently applied to the armatures (use scene start/end frame)", default=True),
+ bpy.props.BoolProperty(attr="ANIM_ACTION_ALL", name="All Actions", description="Use all actions for armatures, if false, use current action", default=False),
+ # batch
+ bpy.props.BoolProperty(attr="BATCH_ENABLE", name="Enable Batch", description="Automate exporting multiple scenes or groups to files", default=False),
+ bpy.props.BoolProperty(attr="BATCH_GROUP", name="Group > File", description="Export each group as an FBX file, if false, export each scene as an FBX file", default=False),
+ bpy.props.BoolProperty(attr="BATCH_OWN_DIR", name="Own Dir", description="Create a dir for each exported file", default=True),
+ bpy.props.StringProperty(attr="BATCH_FILE_PREFIX", name="Prefix", description="Prefix each file with this name", maxlen= 1024, default=""),
+ ]
+
+ def poll(self, context):
+ print("Poll")
+ return context.active_object != None
+
+ def execute(self, context):
+ if not self.path:
+ raise Exception("path not set")
+
+ GLOBAL_MATRIX = mtx4_identity
+ GLOBAL_MATRIX[0][0] = GLOBAL_MATRIX[1][1] = GLOBAL_MATRIX[2][2] = self._SCALE
+ if self._XROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_x90n
+ if self._YROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_y90n
+ if self._ZROT90: GLOBAL_MATRIX = GLOBAL_MATRIX * mtx4_z90n
+
+ write(self.path,
+ None, # XXX
+ context,
+ self.EXP_OBS_SELECTED,
+ self.EXP_MESH,
+ self.EXP_MESH_APPLY_MOD,
+# self.EXP_MESH_HQ_NORMALS,
+ self.EXP_ARMATURE,
+ self.EXP_LAMP,
+ self.EXP_CAMERA,
+ self.EXP_EMPTY,
+ self.EXP_IMAGE_COPY,
+ GLOBAL_MATRIX,
+ self.ANIM_ENABLE,
+ self.ANIM_OPTIMIZE,
+ self.ANIM_OPTIMIZE_PRECISSION,
+ self.ANIM_ACTION_ALL,
+ self.BATCH_ENABLE,
+ self.BATCH_GROUP,
+ self.BATCH_FILE_PREFIX,
+ self.BATCH_OWN_DIR)
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+
+
+bpy.ops.add(EXPORT_OT_fbx)
+
+# if __name__ == "__main__":
+# bpy.ops.EXPORT_OT_ply(filename="/tmp/test.ply")
+
+
+# NOTES (all line numbers correspond to original export_fbx.py (under release/scripts)
+# - Draw.PupMenu alternative in 2.5?, temporarily replaced PupMenu with print
+# - get rid of cleanName somehow
+# + fixed: isinstance(inst, bpy.types.*) doesn't work on RNA objects: line 565
+# + get rid of BPyObject_getObjectArmature, move it in RNA?
+# - BATCH_ENABLE and BATCH_GROUP options: line 327
+# - implement all BPyMesh_* used here with RNA
+# - getDerivedObjects is not fully replicated with .dupli* funcs
+# - talk to Campbell, this code won't work? lines 1867-1875
+# - don't know what those colbits are, do we need them? they're said to be deprecated in DNA_object_types.h: 1886-1893
+# - no hq normals: 1900-1901
+
+# TODO
+
+# - bpy.data.remove_scene: line 366
+# - bpy.sys.time move to bpy.sys.util?
+# - new scene creation, activation: lines 327-342, 368
+# - uses bpy.sys.expandpath, *.relpath - replace at least relpath
+
+# SMALL or COSMETICAL
+# - find a way to get blender version, and put it in bpy.util?, old was Blender.Get('version')
diff --git a/release/scripts/io/export_obj.py b/release/scripts/io/export_obj.py
new file mode 100644
index 00000000000..83b400816e3
--- /dev/null
+++ b/release/scripts/io/export_obj.py
@@ -0,0 +1,996 @@
+#!BPY
+
+"""
+Name: 'Wavefront (.obj)...'
+Blender: 248
+Group: 'Export'
+Tooltip: 'Save a Wavefront OBJ File'
+"""
+
+__author__ = "Campbell Barton, Jiri Hnidek, Paolo Ciccone"
+__url__ = ['http://wiki.blender.org/index.php/Scripts/Manual/Export/wavefront_obj', 'www.blender.org', 'blenderartists.org']
+__version__ = "1.21"
+
+__bpydoc__ = """\
+This script is an exporter to OBJ file format.
+
+Usage:
+
+Select the objects you wish to export and run this script from "File->Export" menu.
+Selecting the default options from the popup box will be good in most cases.
+All objects that can be represented as a mesh (mesh, curve, metaball, surface, text3d)
+will be exported as mesh data.
+"""
+
+
+# --------------------------------------------------------------------------
+# OBJ Export v1.1 by Campbell Barton (AKA Ideasman)
+# --------------------------------------------------------------------------
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+# --------------------------------------------------------------------------
+
+# import math and other in functions that use them for the sake of fast Blender startup
+# import math
+import os
+import time
+
+import bpy
+import Mathutils
+
+
+# Returns a tuple - path,extension.
+# 'hello.obj' > ('hello', '.obj')
+def splitExt(path):
+ dotidx = path.rfind('.')
+ if dotidx == -1:
+ return path, ''
+ else:
+ return path[:dotidx], path[dotidx:]
+
+def fixName(name):
+ if name == None:
+ return 'None'
+ else:
+ return name.replace(' ', '_')
+
+
+# this used to be in BPySys module
+# frankly, I don't understand how it works
+def BPySys_cleanName(name):
+
+ v = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,58,59,60,61,62,63,64,91,92,93,94,96,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254]
+
+ invalid = ''.join([chr(i) for i in v])
+
+ for ch in invalid:
+ name = name.replace(ch, '_')
+ return name
+
+# A Dict of Materials
+# (material.name, image.name):matname_imagename # matname_imagename has gaps removed.
+MTL_DICT = {}
+
+def write_mtl(scene, filename, copy_images):
+
+ world = scene.world
+ worldAmb = world.ambient_color
+
+ dest_dir = os.path.dirname(filename)
+
+ def copy_image(image):
+ rel = image.get_export_path(dest_dir, True)
+
+ if copy_images:
+ abspath = image.get_export_path(dest_dir, False)
+ if not os.path.exists(abs_path):
+ shutil.copy(image.get_abs_filename(), abs_path)
+
+ return rel
+
+
+ file = open(filename, "w")
+ # XXX
+# file.write('# Blender3D MTL File: %s\n' % Blender.Get('filename').split('\\')[-1].split('/')[-1])
+ file.write('# Material Count: %i\n' % len(MTL_DICT))
+ # Write material/image combinations we have used.
+ for key, (mtl_mat_name, mat, img) in MTL_DICT.items():
+
+ # Get the Blender data for the material and the image.
+ # Having an image named None will make a bug, dont do it :)
+
+ file.write('newmtl %s\n' % mtl_mat_name) # Define a new material: matname_imgname
+
+ if mat:
+ file.write('Ns %.6f\n' % ((mat.specular_hardness-1) * 1.9607843137254901) ) # Hardness, convert blenders 1-511 to MTL's
+ file.write('Ka %.6f %.6f %.6f\n' % tuple([c*mat.ambient for c in worldAmb]) ) # Ambient, uses mirror colour,
+ file.write('Kd %.6f %.6f %.6f\n' % tuple([c*mat.diffuse_intensity for c in mat.diffuse_color]) ) # Diffuse
+ file.write('Ks %.6f %.6f %.6f\n' % tuple([c*mat.specular_intensity for c in mat.specular_color]) ) # Specular
+ if hasattr(mat, "ior"):
+ file.write('Ni %.6f\n' % mat.ior) # Refraction index
+ else:
+ file.write('Ni %.6f\n' % 1.0)
+ file.write('d %.6f\n' % mat.alpha) # Alpha (obj uses 'd' for dissolve)
+
+ # 0 to disable lighting, 1 for ambient & diffuse only (specular color set to black), 2 for full lighting.
+ if mat.shadeless:
+ file.write('illum 0\n') # ignore lighting
+ elif mat.specular_intensity == 0:
+ file.write('illum 1\n') # no specular.
+ else:
+ file.write('illum 2\n') # light normaly
+
+ else:
+ #write a dummy material here?
+ file.write('Ns 0\n')
+ file.write('Ka %.6f %.6f %.6f\n' % tuple([c for c in worldAmb]) ) # Ambient, uses mirror colour,
+ file.write('Kd 0.8 0.8 0.8\n')
+ file.write('Ks 0.8 0.8 0.8\n')
+ file.write('d 1\n') # No alpha
+ file.write('illum 2\n') # light normaly
+
+ # Write images!
+ if img: # We have an image on the face!
+ # write relative image path
+ rel = copy_image(img)
+ file.write('map_Kd %s\n' % rel) # Diffuse mapping image
+# file.write('map_Kd %s\n' % img.filename.split('\\')[-1].split('/')[-1]) # Diffuse mapping image
+
+ elif mat: # No face image. if we havea material search for MTex image.
+ for mtex in mat.textures:
+ if mtex and mtex.texture.type == 'IMAGE':
+ try:
+ filename = copy_image(mtex.texture.image)
+# filename = mtex.texture.image.filename.split('\\')[-1].split('/')[-1]
+ file.write('map_Kd %s\n' % filename) # Diffuse mapping image
+ break
+ except:
+ # Texture has no image though its an image type, best ignore.
+ pass
+
+ file.write('\n\n')
+
+ file.close()
+
+# XXX not used
+def copy_file(source, dest):
+ file = open(source, 'rb')
+ data = file.read()
+ file.close()
+
+ file = open(dest, 'wb')
+ file.write(data)
+ file.close()
+
+
+# XXX not used
+def copy_images(dest_dir):
+ if dest_dir[-1] != os.sep:
+ dest_dir += os.sep
+# if dest_dir[-1] != sys.sep:
+# dest_dir += sys.sep
+
+ # Get unique image names
+ uniqueImages = {}
+ for matname, mat, image in MTL_DICT.values(): # Only use image name
+ # Get Texface images
+ if image:
+ uniqueImages[image] = image # Should use sets here. wait until Python 2.4 is default.
+
+ # Get MTex images
+ if mat:
+ for mtex in mat.textures:
+ if mtex and mtex.texture.type == 'IMAGE':
+ image_tex = mtex.texture.image
+ if image_tex:
+ try:
+ uniqueImages[image_tex] = image_tex
+ except:
+ pass
+
+ # Now copy images
+ copyCount = 0
+
+# for bImage in uniqueImages.values():
+# image_path = bpy.sys.expandpath(bImage.filename)
+# if bpy.sys.exists(image_path):
+# # Make a name for the target path.
+# dest_image_path = dest_dir + image_path.split('\\')[-1].split('/')[-1]
+# if not bpy.sys.exists(dest_image_path): # Image isnt alredy there
+# print('\tCopying "%s" > "%s"' % (image_path, dest_image_path))
+# copy_file(image_path, dest_image_path)
+# copyCount+=1
+
+# paths= bpy.util.copy_images(uniqueImages.values(), dest_dir)
+
+ print('\tCopied %d images' % copyCount)
+# print('\tCopied %d images' % copyCount)
+
+# XXX not converted
+def test_nurbs_compat(ob):
+ if ob.type != 'Curve':
+ return False
+
+ for nu in ob.data:
+ if (not nu.knotsV) and nu.type != 1: # not a surface and not bezier
+ return True
+
+ return False
+
+
+# XXX not converted
+def write_nurb(file, ob, ob_mat):
+ tot_verts = 0
+ cu = ob.data
+
+ # use negative indices
+ Vector = Blender.Mathutils.Vector
+ for nu in cu:
+
+ if nu.type==0: DEG_ORDER_U = 1
+ else: DEG_ORDER_U = nu.orderU-1 # Tested to be correct
+
+ if nu.type==1:
+ print("\tWarning, bezier curve:", ob.name, "only poly and nurbs curves supported")
+ continue
+
+ if nu.knotsV:
+ print("\tWarning, surface:", ob.name, "only poly and nurbs curves supported")
+ continue
+
+ if len(nu) <= DEG_ORDER_U:
+ print("\tWarning, orderU is lower then vert count, skipping:", ob.name)
+ continue
+
+ pt_num = 0
+ do_closed = (nu.flagU & 1)
+ do_endpoints = (do_closed==0) and (nu.flagU & 2)
+
+ for pt in nu:
+ pt = Vector(pt[0], pt[1], pt[2]) * ob_mat
+ file.write('v %.6f %.6f %.6f\n' % (pt[0], pt[1], pt[2]))
+ pt_num += 1
+ tot_verts += pt_num
+
+ file.write('g %s\n' % (fixName(ob.name))) # fixName(ob.getData(1)) could use the data name too
+ file.write('cstype bspline\n') # not ideal, hard coded
+ file.write('deg %d\n' % DEG_ORDER_U) # not used for curves but most files have it still
+
+ curve_ls = [-(i+1) for i in range(pt_num)]
+
+ # 'curv' keyword
+ if do_closed:
+ if DEG_ORDER_U == 1:
+ pt_num += 1
+ curve_ls.append(-1)
+ else:
+ pt_num += DEG_ORDER_U
+ curve_ls = curve_ls + curve_ls[0:DEG_ORDER_U]
+
+ file.write('curv 0.0 1.0 %s\n' % (' '.join( [str(i) for i in curve_ls] ))) # Blender has no U and V values for the curve
+
+ # 'parm' keyword
+ tot_parm = (DEG_ORDER_U + 1) + pt_num
+ tot_parm_div = float(tot_parm-1)
+ parm_ls = [(i/tot_parm_div) for i in range(tot_parm)]
+
+ if do_endpoints: # end points, force param
+ for i in range(DEG_ORDER_U+1):
+ parm_ls[i] = 0.0
+ parm_ls[-(1+i)] = 1.0
+
+ file.write('parm u %s\n' % ' '.join( [str(i) for i in parm_ls] ))
+
+ file.write('end\n')
+
+ return tot_verts
+
+def write(filename, objects, scene,
+ EXPORT_TRI=False,
+ EXPORT_EDGES=False,
+ EXPORT_NORMALS=False,
+ EXPORT_NORMALS_HQ=False,
+ EXPORT_UV=True,
+ EXPORT_MTL=True,
+ EXPORT_COPY_IMAGES=False,
+ EXPORT_APPLY_MODIFIERS=True,
+ EXPORT_ROTX90=True,
+ EXPORT_BLEN_OBS=True,
+ EXPORT_GROUP_BY_OB=False,
+ EXPORT_GROUP_BY_MAT=False,
+ EXPORT_KEEP_VERT_ORDER=False,
+ EXPORT_POLYGROUPS=False,
+ EXPORT_CURVE_AS_NURBS=True):
+ '''
+ Basic write function. The context and options must be alredy set
+ This can be accessed externaly
+ eg.
+ write( 'c:\\test\\foobar.obj', Blender.Object.GetSelected() ) # Using default options.
+ '''
+
+ # XXX
+ import math
+
+ def veckey3d(v):
+ return round(v.x, 6), round(v.y, 6), round(v.z, 6)
+
+ def veckey2d(v):
+ return round(v[0], 6), round(v[1], 6)
+ # return round(v.x, 6), round(v.y, 6)
+
+ def findVertexGroupName(face, vWeightMap):
+ """
+ Searches the vertexDict to see what groups is assigned to a given face.
+ We use a frequency system in order to sort out the name because a given vetex can
+ belong to two or more groups at the same time. To find the right name for the face
+ we list all the possible vertex group names with their frequency and then sort by
+ frequency in descend order. The top element is the one shared by the highest number
+ of vertices is the face's group
+ """
+ weightDict = {}
+ for vert_index in face.verts:
+# for vert in face:
+ vWeights = vWeightMap[vert_index]
+# vWeights = vWeightMap[vert]
+ for vGroupName, weight in vWeights:
+ weightDict[vGroupName] = weightDict.get(vGroupName, 0) + weight
+
+ if weightDict:
+ alist = [(weight,vGroupName) for vGroupName, weight in weightDict.items()] # sort least to greatest amount of weight
+ alist.sort()
+ return(alist[-1][1]) # highest value last
+ else:
+ return '(null)'
+
+ # TODO: implement this in C? dunno how it should be called...
+ def getVertsFromGroup(me, group_index):
+ ret = []
+
+ for i, v in enumerate(me.verts):
+ for g in v.groups:
+ if g.group == group_index:
+ ret.append((i, g.weight))
+
+ return ret
+
+
+ print('OBJ Export path: "%s"' % filename)
+ temp_mesh_name = '~tmp-mesh'
+
+ time1 = time.clock()
+# time1 = sys.time()
+# scn = Scene.GetCurrent()
+
+ file = open(filename, "w")
+
+ # Write Header
+ version = "2.5"
+ file.write('# Blender3D v%s OBJ File: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] ))
+ file.write('# www.blender3d.org\n')
+
+ # Tell the obj file what material file to use.
+ if EXPORT_MTL:
+ mtlfilename = '%s.mtl' % '.'.join(filename.split('.')[:-1])
+ file.write('mtllib %s\n' % ( mtlfilename.split('\\')[-1].split('/')[-1] ))
+
+ if EXPORT_ROTX90:
+ mat_xrot90= Mathutils.RotationMatrix(-math.pi/2, 4, 'x')
+
+ # Initialize totals, these are updated each object
+ totverts = totuvco = totno = 1
+
+ face_vert_index = 1
+
+ globalNormals = {}
+
+ # Get all meshes
+ for ob_main in objects:
+
+ # ignore dupli children
+ if ob_main.parent and ob_main.parent.dupli_type != 'NONE':
+ # XXX
+ print(ob_main.name, 'is a dupli child - ignoring')
+ continue
+
+ obs = []
+ if ob_main.dupli_type != 'NONE':
+ # XXX
+ print('creating dupli_list on', ob_main.name)
+ ob_main.create_dupli_list()
+
+ obs = [(dob.object, dob.matrix) for dob in ob_main.dupli_list]
+
+ # XXX debug print
+ print(ob_main.name, 'has', len(obs), 'dupli children')
+ else:
+ obs = [(ob_main, ob_main.matrix)]
+
+ for ob, ob_mat in obs:
+
+ # XXX postponed
+# # Nurbs curve support
+# if EXPORT_CURVE_AS_NURBS and test_nurbs_compat(ob):
+# if EXPORT_ROTX90:
+# ob_mat = ob_mat * mat_xrot90
+
+# totverts += write_nurb(file, ob, ob_mat)
+
+# continue
+# end nurbs
+
+ if ob.type != 'MESH':
+ continue
+
+ me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW')
+
+ if EXPORT_ROTX90:
+ me.transform(ob_mat * mat_xrot90)
+ else:
+ me.transform(ob_mat)
+
+# # Will work for non meshes now! :)
+# me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, EXPORT_POLYGROUPS, scn)
+# if not me:
+# continue
+
+ if EXPORT_UV:
+ faceuv = len(me.uv_textures) > 0
+ else:
+ faceuv = False
+
+ # XXX - todo, find a better way to do triangulation
+ # ...removed convert_to_triface because it relies on editmesh
+ '''
+ # We have a valid mesh
+ if EXPORT_TRI and me.faces:
+ # Add a dummy object to it.
+ has_quads = False
+ for f in me.faces:
+ if f.verts[3] != 0:
+ has_quads = True
+ break
+
+ if has_quads:
+ newob = bpy.data.add_object('MESH', 'temp_object')
+ newob.data = me
+ # if we forget to set Object.data - crash
+ scene.add_object(newob)
+ newob.convert_to_triface(scene)
+ # mesh will still be there
+ scene.remove_object(newob)
+ '''
+
+ # Make our own list so it can be sorted to reduce context switching
+ face_index_pairs = [ (face, index) for index, face in enumerate(me.faces)]
+ # faces = [ f for f in me.faces ]
+
+ if EXPORT_EDGES:
+ edges = me.edges
+ else:
+ edges = []
+
+ if not (len(face_index_pairs)+len(edges)+len(me.verts)): # Make sure there is somthing to write
+
+ # clean up
+ bpy.data.remove_mesh(me)
+
+ continue # dont bother with this mesh.
+
+ # XXX
+ # High Quality Normals
+ if EXPORT_NORMALS and face_index_pairs:
+ me.calc_normals()
+# if EXPORT_NORMALS_HQ:
+# BPyMesh.meshCalcNormals(me)
+# else:
+# # transforming normals is incorrect
+# # when the matrix is scaled,
+# # better to recalculate them
+# me.calcNormals()
+
+ materials = me.materials
+
+ materialNames = []
+ materialItems = [m for m in materials]
+ if materials:
+ for mat in materials:
+ if mat: # !=None
+ materialNames.append(mat.name)
+ else:
+ materialNames.append(None)
+ # Cant use LC because some materials are None.
+ # materialNames = map(lambda mat: mat.name, materials) # Bug Blender, dosent account for null materials, still broken.
+
+ # Possible there null materials, will mess up indicies
+ # but at least it will export, wait until Blender gets fixed.
+ materialNames.extend((16-len(materialNames)) * [None])
+ materialItems.extend((16-len(materialItems)) * [None])
+
+ # Sort by Material, then images
+ # so we dont over context switch in the obj file.
+ if EXPORT_KEEP_VERT_ORDER:
+ pass
+ elif faceuv:
+ # XXX update
+ tface = me.active_uv_texture.data
+
+ # exception only raised if Python 2.3 or lower...
+ try:
+ face_index_pairs.sort(key = lambda a: (a[0].material_index, tface[a[1]].image, a[0].smooth))
+ except:
+ face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, tface[a[1]].image, a[0].smooth),
+ (b[0].material_index, tface[b[1]].image, b[0].smooth)))
+ elif len(materials) > 1:
+ try:
+ face_index_pairs.sort(key = lambda a: (a[0].material_index, a[0].smooth))
+ except:
+ face_index_pairs.sort(lambda a,b: cmp((a[0].material_index, a[0].smooth),
+ (b[0].material_index, b[0].smooth)))
+ else:
+ # no materials
+ try:
+ face_index_pairs.sort(key = lambda a: a[0].smooth)
+ except:
+ face_index_pairs.sort(lambda a,b: cmp(a[0].smooth, b[0].smooth))
+# if EXPORT_KEEP_VERT_ORDER:
+# pass
+# elif faceuv:
+# try: faces.sort(key = lambda a: (a.mat, a.image, a.smooth))
+# except: faces.sort(lambda a,b: cmp((a.mat, a.image, a.smooth), (b.mat, b.image, b.smooth)))
+# elif len(materials) > 1:
+# try: faces.sort(key = lambda a: (a.mat, a.smooth))
+# except: faces.sort(lambda a,b: cmp((a.mat, a.smooth), (b.mat, b.smooth)))
+# else:
+# # no materials
+# try: faces.sort(key = lambda a: a.smooth)
+# except: faces.sort(lambda a,b: cmp(a.smooth, b.smooth))
+
+ faces = [pair[0] for pair in face_index_pairs]
+
+ # Set the default mat to no material and no image.
+ contextMat = (0, 0) # Can never be this, so we will label a new material teh first chance we get.
+ contextSmooth = None # Will either be true or false, set bad to force initialization switch.
+
+ if EXPORT_BLEN_OBS or EXPORT_GROUP_BY_OB:
+ name1 = ob.name
+ name2 = ob.data.name
+ if name1 == name2:
+ obnamestring = fixName(name1)
+ else:
+ obnamestring = '%s_%s' % (fixName(name1), fixName(name2))
+
+ if EXPORT_BLEN_OBS:
+ file.write('o %s\n' % obnamestring) # Write Object name
+ else: # if EXPORT_GROUP_BY_OB:
+ file.write('g %s\n' % obnamestring)
+
+
+ # Vert
+ for v in me.verts:
+ file.write('v %.6f %.6f %.6f\n' % tuple(v.co))
+
+ # UV
+ if faceuv:
+ uv_face_mapping = [[0,0,0,0] for f in faces] # a bit of a waste for tri's :/
+
+ uv_dict = {} # could use a set() here
+ uv_layer = me.active_uv_texture
+ for f, f_index in face_index_pairs:
+
+ tface = uv_layer.data[f_index]
+
+ uvs = tface.uv
+ # uvs = [tface.uv1, tface.uv2, tface.uv3]
+
+ # # add another UV if it's a quad
+ # if len(f.verts) == 4:
+ # uvs.append(tface.uv4)
+
+ for uv_index, uv in enumerate(uvs):
+ uvkey = veckey2d(uv)
+ try:
+ uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
+ except:
+ uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
+ file.write('vt %.6f %.6f\n' % tuple(uv))
+
+# uv_dict = {} # could use a set() here
+# for f_index, f in enumerate(faces):
+
+# for uv_index, uv in enumerate(f.uv):
+# uvkey = veckey2d(uv)
+# try:
+# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey]
+# except:
+# uv_face_mapping[f_index][uv_index] = uv_dict[uvkey] = len(uv_dict)
+# file.write('vt %.6f %.6f\n' % tuple(uv))
+
+ uv_unique_count = len(uv_dict)
+# del uv, uvkey, uv_dict, f_index, uv_index
+ # Only need uv_unique_count and uv_face_mapping
+
+ # NORMAL, Smooth/Non smoothed.
+ if EXPORT_NORMALS:
+ for f in faces:
+ if f.smooth:
+ for v in f:
+ noKey = veckey3d(v.normal)
+ if noKey not in globalNormals:
+ globalNormals[noKey] = totno
+ totno +=1
+ file.write('vn %.6f %.6f %.6f\n' % noKey)
+ else:
+ # Hard, 1 normal from the face.
+ noKey = veckey3d(f.normal)
+ if noKey not in globalNormals:
+ globalNormals[noKey] = totno
+ totno +=1
+ file.write('vn %.6f %.6f %.6f\n' % noKey)
+
+ if not faceuv:
+ f_image = None
+
+ # XXX
+ if EXPORT_POLYGROUPS:
+ # Retrieve the list of vertex groups
+# vertGroupNames = me.getVertGroupNames()
+
+ currentVGroup = ''
+ # Create a dictionary keyed by face id and listing, for each vertex, the vertex groups it belongs to
+ vgroupsMap = [[] for _i in range(len(me.verts))]
+# vgroupsMap = [[] for _i in xrange(len(me.verts))]
+ for g in ob.vertex_groups:
+# for vertexGroupName in vertGroupNames:
+ for vIdx, vWeight in getVertsFromGroup(me, g.index):
+# for vIdx, vWeight in me.getVertsFromGroup(vertexGroupName, 1):
+ vgroupsMap[vIdx].append((g.name, vWeight))
+
+ for f_index, f in enumerate(faces):
+ f_v = [{"index": index, "vertex": me.verts[index]} for index in f.verts]
+
+ # if f.verts[3] == 0:
+ # f_v.pop()
+
+# f_v= f.v
+ f_smooth= f.smooth
+ f_mat = min(f.material_index, len(materialNames)-1)
+# f_mat = min(f.mat, len(materialNames)-1)
+ if faceuv:
+
+ tface = me.active_uv_texture.data[face_index_pairs[f_index][1]]
+
+ f_image = tface.image
+ f_uv = tface.uv
+ # f_uv= [tface.uv1, tface.uv2, tface.uv3]
+ # if len(f.verts) == 4:
+ # f_uv.append(tface.uv4)
+# f_image = f.image
+# f_uv= f.uv
+
+ # MAKE KEY
+ if faceuv and f_image: # Object is always true.
+ key = materialNames[f_mat], f_image.name
+ else:
+ key = materialNames[f_mat], None # No image, use None instead.
+
+ # Write the vertex group
+ if EXPORT_POLYGROUPS:
+ if len(ob.vertex_groups):
+ # find what vertext group the face belongs to
+ theVGroup = findVertexGroupName(f,vgroupsMap)
+ if theVGroup != currentVGroup:
+ currentVGroup = theVGroup
+ file.write('g %s\n' % theVGroup)
+# # Write the vertex group
+# if EXPORT_POLYGROUPS:
+# if vertGroupNames:
+# # find what vertext group the face belongs to
+# theVGroup = findVertexGroupName(f,vgroupsMap)
+# if theVGroup != currentVGroup:
+# currentVGroup = theVGroup
+# file.write('g %s\n' % theVGroup)
+
+ # CHECK FOR CONTEXT SWITCH
+ if key == contextMat:
+ pass # Context alredy switched, dont do anything
+ else:
+ if key[0] == None and key[1] == None:
+ # Write a null material, since we know the context has changed.
+ if EXPORT_GROUP_BY_MAT:
+ # can be mat_image or (null)
+ file.write('g %s_%s\n' % (fixName(ob.name), fixName(ob.data.name)) ) # can be mat_image or (null)
+ file.write('usemtl (null)\n') # mat, image
+
+ else:
+ mat_data= MTL_DICT.get(key)
+ if not mat_data:
+ # First add to global dict so we can export to mtl
+ # Then write mtl
+
+ # Make a new names from the mat and image name,
+ # converting any spaces to underscores with fixName.
+
+ # If none image dont bother adding it to the name
+ if key[1] == None:
+ mat_data = MTL_DICT[key] = ('%s'%fixName(key[0])), materialItems[f_mat], f_image
+ else:
+ mat_data = MTL_DICT[key] = ('%s_%s' % (fixName(key[0]), fixName(key[1]))), materialItems[f_mat], f_image
+
+ if EXPORT_GROUP_BY_MAT:
+ file.write('g %s_%s_%s\n' % (fixName(ob.name), fixName(ob.data.name), mat_data[0]) ) # can be mat_image or (null)
+
+ file.write('usemtl %s\n' % mat_data[0]) # can be mat_image or (null)
+
+ contextMat = key
+ if f_smooth != contextSmooth:
+ if f_smooth: # on now off
+ file.write('s 1\n')
+ contextSmooth = f_smooth
+ else: # was off now on
+ file.write('s off\n')
+ contextSmooth = f_smooth
+
+ file.write('f')
+ if faceuv:
+ if EXPORT_NORMALS:
+ if f_smooth: # Smoothed, use vertex normals
+ for vi, v in enumerate(f_v):
+ file.write( ' %d/%d/%d' % \
+ (v["index"] + totverts,
+ totuvco + uv_face_mapping[f_index][vi],
+ globalNormals[ veckey3d(v["vertex"].normal) ]) ) # vert, uv, normal
+
+ else: # No smoothing, face normals
+ no = globalNormals[ veckey3d(f.normal) ]
+ for vi, v in enumerate(f_v):
+ file.write( ' %d/%d/%d' % \
+ (v["index"] + totverts,
+ totuvco + uv_face_mapping[f_index][vi],
+ no) ) # vert, uv, normal
+ else: # No Normals
+ for vi, v in enumerate(f_v):
+ file.write( ' %d/%d' % (\
+ v["index"] + totverts,\
+ totuvco + uv_face_mapping[f_index][vi])) # vert, uv
+
+ face_vert_index += len(f_v)
+
+ else: # No UV's
+ if EXPORT_NORMALS:
+ if f_smooth: # Smoothed, use vertex normals
+ for v in f_v:
+ file.write( ' %d//%d' %
+ (v["index"] + totverts, globalNormals[ veckey3d(v["vertex"].normal) ]) )
+ else: # No smoothing, face normals
+ no = globalNormals[ veckey3d(f.normal) ]
+ for v in f_v:
+ file.write( ' %d//%d' % (v["index"] + totverts, no) )
+ else: # No Normals
+ for v in f_v:
+ file.write( ' %d' % (v["index"] + totverts) )
+
+ file.write('\n')
+
+ # Write edges.
+ if EXPORT_EDGES:
+ for ed in edges:
+ if ed.loose:
+ file.write('f %d %d\n' % (ed.verts[0] + totverts, ed.verts[1] + totverts))
+
+ # Make the indicies global rather then per mesh
+ totverts += len(me.verts)
+ if faceuv:
+ totuvco += uv_unique_count
+
+ # clean up
+ bpy.data.remove_mesh(me)
+
+ if ob_main.dupli_type != 'NONE':
+ ob_main.free_dupli_list()
+
+ file.close()
+
+
+ # Now we have all our materials, save them
+ if EXPORT_MTL:
+ write_mtl(scene, mtlfilename, EXPORT_COPY_IMAGES)
+# if EXPORT_COPY_IMAGES:
+# dest_dir = os.path.basename(filename)
+# # dest_dir = filename
+# # # Remove chars until we are just the path.
+# # while dest_dir and dest_dir[-1] not in '\\/':
+# # dest_dir = dest_dir[:-1]
+# if dest_dir:
+# copy_images(dest_dir)
+# else:
+# print('\tError: "%s" could not be used as a base for an image path.' % filename)
+
+ print("OBJ Export time: %.2f" % (time.clock() - time1))
+# print "OBJ Export time: %.2f" % (sys.time() - time1)
+
+def do_export(filename, context,
+ EXPORT_APPLY_MODIFIERS = True, # not used
+ EXPORT_ROTX90 = True, # wrong
+ EXPORT_TRI = False, # ok
+ EXPORT_EDGES = False,
+ EXPORT_NORMALS = False, # not yet
+ EXPORT_NORMALS_HQ = False, # not yet
+ EXPORT_UV = True, # ok
+ EXPORT_MTL = True,
+ EXPORT_SEL_ONLY = True, # ok
+ EXPORT_ALL_SCENES = False, # XXX not working atm
+ EXPORT_ANIMATION = False,
+ EXPORT_COPY_IMAGES = False,
+ EXPORT_BLEN_OBS = True,
+ EXPORT_GROUP_BY_OB = False,
+ EXPORT_GROUP_BY_MAT = False,
+ EXPORT_KEEP_VERT_ORDER = False,
+ EXPORT_POLYGROUPS = False,
+ EXPORT_CURVE_AS_NURBS = True):
+ # Window.EditMode(0)
+ # Window.WaitCursor(1)
+
+ base_name, ext = splitExt(filename)
+ context_name = [base_name, '', '', ext] # Base name, scene name, frame number, extension
+
+ orig_scene = context.scene
+
+# if EXPORT_ALL_SCENES:
+# export_scenes = bpy.data.scenes
+# else:
+# export_scenes = [orig_scene]
+
+ # XXX only exporting one scene atm since changing
+ # current scene is not possible.
+ # Brecht says that ideally in 2.5 we won't need such a function,
+ # allowing multiple scenes open at once.
+ export_scenes = [orig_scene]
+
+ # Export all scenes.
+ for scn in export_scenes:
+ # scn.makeCurrent() # If already current, this is not slow.
+ # context = scn.getRenderingContext()
+ orig_frame = scn.current_frame
+
+ if EXPORT_ALL_SCENES: # Add scene name into the context_name
+ context_name[1] = '_%s' % BPySys_cleanName(scn.name) # WARNING, its possible that this could cause a collision. we could fix if were feeling parranoied.
+
+ # Export an animation?
+ if EXPORT_ANIMATION:
+ scene_frames = range(scn.start_frame, context.end_frame+1) # Up to and including the end frame.
+ else:
+ scene_frames = [orig_frame] # Dont export an animation.
+
+ # Loop through all frames in the scene and export.
+ for frame in scene_frames:
+ if EXPORT_ANIMATION: # Add frame to the filename.
+ context_name[2] = '_%.6d' % frame
+
+ scn.current_frame = frame
+ if EXPORT_SEL_ONLY:
+ export_objects = context.selected_objects
+ else:
+ export_objects = scn.objects
+
+ full_path= ''.join(context_name)
+
+ # erm... bit of a problem here, this can overwrite files when exporting frames. not too bad.
+ # EXPORT THE FILE.
+ write(full_path, export_objects, scn,
+ EXPORT_TRI, EXPORT_EDGES, EXPORT_NORMALS,
+ EXPORT_NORMALS_HQ, EXPORT_UV, EXPORT_MTL,
+ EXPORT_COPY_IMAGES, EXPORT_APPLY_MODIFIERS,
+ EXPORT_ROTX90, EXPORT_BLEN_OBS,
+ EXPORT_GROUP_BY_OB, EXPORT_GROUP_BY_MAT, EXPORT_KEEP_VERT_ORDER,
+ EXPORT_POLYGROUPS, EXPORT_CURVE_AS_NURBS)
+
+
+ scn.current_frame = orig_frame
+
+ # Restore old active scene.
+# orig_scene.makeCurrent()
+# Window.WaitCursor(0)
+
+
+class EXPORT_OT_obj(bpy.types.Operator):
+ '''
+ Currently the exporter lacks these features:
+ * nurbs
+ * multiple scene export (only active scene is written)
+ * particles
+ '''
+ __idname__ = "export.obj"
+ __label__ = 'Export OBJ'
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the OBJ file", maxlen= 1024, default= ""),
+
+ # context group
+ bpy.props.BoolProperty(attr="use_selection", name="Selection Only", description="", default= False),
+ bpy.props.BoolProperty(attr="use_all_scenes", name="All Scenes", description="", default= False),
+ bpy.props.BoolProperty(attr="use_animation", name="All Animation", description="", default= False),
+
+ # object group
+ bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="", default= True),
+ bpy.props.BoolProperty(attr="use_rotate90", name="Rotate X90", description="", default= True),
+
+ # extra data group
+ bpy.props.BoolProperty(attr="use_edges", name="Edges", description="", default= True),
+ bpy.props.BoolProperty(attr="use_normals", name="Normals", description="", default= False),
+ bpy.props.BoolProperty(attr="use_hq_normals", name="High Quality Normals", description="", default= True),
+ bpy.props.BoolProperty(attr="use_uvs", name="UVs", description="", default= True),
+ bpy.props.BoolProperty(attr="use_materials", name="Materials", description="", default= True),
+ bpy.props.BoolProperty(attr="copy_images", name="Copy Images", description="", default= False),
+ bpy.props.BoolProperty(attr="use_triangles", name="Triangulate", description="", default= False),
+ bpy.props.BoolProperty(attr="use_vertex_groups", name="Polygroups", description="", default= False),
+ bpy.props.BoolProperty(attr="use_nurbs", name="Nurbs", description="", default= False),
+
+ # grouping group
+ bpy.props.BoolProperty(attr="use_blen_objects", name="Objects as OBJ Objects", description="", default= True),
+ bpy.props.BoolProperty(attr="group_by_object", name="Objects as OBJ Groups ", description="", default= False),
+ bpy.props.BoolProperty(attr="group_by_material", name="Material Groups", description="", default= False),
+ bpy.props.BoolProperty(attr="keep_vertex_order", name="Keep Vertex Order", description="", default= False)
+ ]
+
+ def execute(self, context):
+
+ do_export(self.path, context,
+ EXPORT_TRI=self.use_triangles,
+ EXPORT_EDGES=self.use_edges,
+ EXPORT_NORMALS=self.use_normals,
+ EXPORT_NORMALS_HQ=self.use_hq_normals,
+ EXPORT_UV=self.use_uvs,
+ EXPORT_MTL=self.use_materials,
+ EXPORT_COPY_IMAGES=self.copy_images,
+ EXPORT_APPLY_MODIFIERS=self.use_modifiers,
+ EXPORT_ROTX90=self.use_rotate90,
+ EXPORT_BLEN_OBS=self.use_blen_objects,
+ EXPORT_GROUP_BY_OB=self.group_by_object,
+ EXPORT_GROUP_BY_MAT=self.group_by_material,
+ EXPORT_KEEP_VERT_ORDER=self.keep_vertex_order,
+ EXPORT_POLYGROUPS=self.use_vertex_groups,
+ EXPORT_CURVE_AS_NURBS=self.use_nurbs,
+ EXPORT_SEL_ONLY=self.use_selection,
+ EXPORT_ALL_SCENES=self.use_all_scenes)
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+
+ def poll(self, context): # Poll isnt working yet
+ print("Poll")
+ return context.active_object != None
+
+bpy.ops.add(EXPORT_OT_obj)
+
+if __name__ == "__main__":
+ bpy.ops.EXPORT_OT_obj(filename="/tmp/test.obj")
+
+# CONVERSION ISSUES
+# - matrix problem
+# - duplis - only tested dupliverts
+# - NURBS - needs API additions
+# - all scenes export
+# + normals calculation
+# - get rid of cleanName somehow
diff --git a/release/scripts/io/export_ply.py b/release/scripts/io/export_ply.py
new file mode 100644
index 00000000000..8e79c3741bb
--- /dev/null
+++ b/release/scripts/io/export_ply.py
@@ -0,0 +1,279 @@
+import bpy
+
+__author__ = "Bruce Merry"
+__version__ = "0.93"
+__bpydoc__ = """\
+This script exports Stanford PLY files from Blender. It supports normals,
+colours, and texture coordinates per face or per vertex.
+Only one mesh can be exported at a time.
+"""
+
+# Copyright (C) 2004, 2005: Bruce Merry, bmerry@cs.uct.ac.za
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Vector rounding se we can use as keys
+#
+# Updated on Aug 11, 2008 by Campbell Barton
+# - added 'comment' prefix to comments - Needed to comply with the PLY spec.
+#
+# Updated on Jan 1, 2007 by Gabe Ghearing
+# - fixed normals so they are correctly smooth/flat
+# - fixed crash when the model doesn't have uv coords or vertex colors
+# - fixed crash when the model has vertex colors but doesn't have uv coords
+# - changed float32 to float and uint8 to uchar for compatibility
+# Errata/Notes as of Jan 1, 2007
+# - script exports texture coords if they exist even if TexFace isn't selected (not a big deal to me)
+# - ST(R) should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either)
+#
+# Updated on Jan 3, 2007 by Gabe Ghearing
+# - fixed "sticky" vertex UV exporting
+# - added pupmenu to enable/disable exporting normals, uv coords, and colors
+# Errata/Notes as of Jan 3, 2007
+# - ST(R) coords should probably be renamed UV(T) like in most PLY files (importer needs to be updated to take either)
+# - edges should be exported since PLY files support them
+# - code is getting spaghettish, it should be refactored...
+#
+
+
+def rvec3d(v): return round(v[0], 6), round(v[1], 6), round(v[2], 6)
+def rvec2d(v): return round(v[0], 6), round(v[1], 6)
+
+def write(filename, scene, ob, \
+ EXPORT_APPLY_MODIFIERS= True,\
+ EXPORT_NORMALS= True,\
+ EXPORT_UV= True,\
+ EXPORT_COLORS= True\
+ ):
+
+ if not filename.lower().endswith('.ply'):
+ filename += '.ply'
+
+ if not ob:
+ raise Exception("Error, Select 1 active object")
+ return
+
+ file = open(filename, 'w')
+
+
+ #EXPORT_EDGES = Draw.Create(0)
+ """
+ is_editmode = Blender.Window.EditMode()
+ if is_editmode:
+ Blender.Window.EditMode(0, '', 0)
+
+ Window.WaitCursor(1)
+ """
+
+ #mesh = BPyMesh.getMeshFromObject(ob, None, EXPORT_APPLY_MODIFIERS, False, scn) # XXX
+ if EXPORT_APPLY_MODIFIERS:
+ mesh = ob.create_mesh(True, 'PREVIEW')
+ else:
+ mesh = ob.data
+
+ if not mesh:
+ raise ("Error, could not get mesh data from active object")
+ return
+
+ # mesh.transform(ob.matrixWorld) # XXX
+
+ faceUV = len(mesh.uv_textures) > 0
+ vertexUV = len(mesh.sticky) > 0
+ vertexColors = len(mesh.vertex_colors) > 0
+
+ if (not faceUV) and (not vertexUV): EXPORT_UV = False
+ if not vertexColors: EXPORT_COLORS = False
+
+ if not EXPORT_UV: faceUV = vertexUV = False
+ if not EXPORT_COLORS: vertexColors = False
+
+ if faceUV:
+ active_uv_layer = None
+ for lay in mesh.uv_textures:
+ if lay.active:
+ active_uv_layer= lay.data
+ break
+ if not active_uv_layer:
+ EXPORT_UV = False
+ faceUV = None
+
+ if vertexColors:
+ active_col_layer = None
+ for lay in mesh.vertex_colors:
+ if lay.active:
+ active_col_layer= lay.data
+ if not active_col_layer:
+ EXPORT_COLORS = False
+ vertexColors = None
+
+ # incase
+ color = uvcoord = uvcoord_key = normal = normal_key = None
+
+ mesh_verts = mesh.verts # save a lookup
+ ply_verts = [] # list of dictionaries
+ # vdict = {} # (index, normal, uv) -> new index
+ vdict = [{} for i in range(len(mesh_verts))]
+ ply_faces = [[] for f in range(len(mesh.faces))]
+ vert_count = 0
+ for i, f in enumerate(mesh.faces):
+
+
+ smooth = f.smooth
+ if not smooth:
+ normal = tuple(f.normal)
+ normal_key = rvec3d(normal)
+
+ if faceUV:
+ uv = active_uv_layer[i]
+ uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/
+ if vertexColors:
+ col = active_col_layer[i]
+ col = col.color1, col.color2, col.color3, col.color4
+
+ f_verts= f.verts
+
+ pf= ply_faces[i]
+ for j, vidx in enumerate(f_verts):
+ v = mesh_verts[vidx]
+
+ if smooth:
+ normal= tuple(v.normal)
+ normal_key = rvec3d(normal)
+
+ if faceUV:
+ uvcoord= uv[j][0], 1.0-uv[j][1]
+ uvcoord_key = rvec2d(uvcoord)
+ elif vertexUV:
+ uvcoord= v.uvco[0], 1.0-v.uvco[1]
+ uvcoord_key = rvec2d(uvcoord)
+
+ if vertexColors:
+ color= col[j]
+ color= int(color[0]*255.0), int(color[1]*255.0), int(color[2]*255.0)
+
+
+ key = normal_key, uvcoord_key, color
+
+ vdict_local = vdict[vidx]
+ pf_vidx = vdict_local.get(key) # Will be None initially
+
+ if pf_vidx == None: # same as vdict_local.has_key(key)
+ pf_vidx = vdict_local[key] = vert_count;
+ ply_verts.append((vidx, normal, uvcoord, color))
+ vert_count += 1
+
+ pf.append(pf_vidx)
+
+ file.write('ply\n')
+ file.write('format ascii 1.0\n')
+ version = "2.5" # Blender.Get('version')
+ file.write('comment Created by Blender3D %s - www.blender.org, source file: %s\n' % (version, bpy.data.filename.split('/')[-1].split('\\')[-1] ))
+
+ file.write('element vertex %d\n' % len(ply_verts))
+
+ file.write('property float x\n')
+ file.write('property float y\n')
+ file.write('property float z\n')
+
+ # XXX
+ """
+ if EXPORT_NORMALS:
+ file.write('property float nx\n')
+ file.write('property float ny\n')
+ file.write('property float nz\n')
+ """
+ if EXPORT_UV:
+ file.write('property float s\n')
+ file.write('property float t\n')
+ if EXPORT_COLORS:
+ file.write('property uchar red\n')
+ file.write('property uchar green\n')
+ file.write('property uchar blue\n')
+
+ file.write('element face %d\n' % len(mesh.faces))
+ file.write('property list uchar uint vertex_indices\n')
+ file.write('end_header\n')
+
+ for i, v in enumerate(ply_verts):
+ file.write('%.6f %.6f %.6f ' % tuple(mesh_verts[v[0]].co)) # co
+ """
+ if EXPORT_NORMALS:
+ file.write('%.6f %.6f %.6f ' % v[1]) # no
+ """
+ if EXPORT_UV: file.write('%.6f %.6f ' % v[2]) # uv
+ if EXPORT_COLORS: file.write('%u %u %u' % v[3]) # col
+ file.write('\n')
+
+ for pf in ply_faces:
+ if len(pf)==3: file.write('3 %d %d %d\n' % tuple(pf))
+ else: file.write('4 %d %d %d %d\n' % tuple(pf))
+
+ file.close()
+ print("writing", filename, "done")
+
+ if EXPORT_APPLY_MODIFIERS:
+ bpy.data.remove_mesh(mesh)
+
+ # XXX
+ """
+ if is_editmode:
+ Blender.Window.EditMode(1, '', 0)
+ """
+
+class EXPORT_OT_ply(bpy.types.Operator):
+ '''Export a single object as a stanford PLY with normals, colours and texture coordinates.'''
+ __idname__ = "export.ply"
+ __label__ = "Export PLY"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the PLY file", maxlen= 1024, default= ""),
+ bpy.props.BoolProperty(attr="use_modifiers", name="Apply Modifiers", description="Apply Modifiers to the exported mesh", default= True),
+ bpy.props.BoolProperty(attr="use_normals", name="Export Normals", description="Export Normals for smooth and hard shaded faces", default= True),
+ bpy.props.BoolProperty(attr="use_uvs", name="Export UVs", description="Exort the active UV layer", default= True),
+ bpy.props.BoolProperty(attr="use_colors", name="Export Vertex Colors", description="Exort the active vertex color layer", default= True)
+ ]
+
+ def poll(self, context):
+ return context.active_object != None
+
+ def execute(self, context):
+ # print("Selected: " + context.active_object.name)
+
+ if not self.path:
+ raise Exception("filename not set")
+
+ write(self.path, context.scene, context.active_object,\
+ EXPORT_APPLY_MODIFIERS = self.use_modifiers,
+ EXPORT_NORMALS = self.use_normals,
+ EXPORT_UV = self.use_uvs,
+ EXPORT_COLORS = self.use_colors,
+ )
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+
+
+bpy.ops.add(EXPORT_OT_ply)
+
+if __name__ == "__main__":
+ bpy.ops.EXPORT_OT_ply(path="/tmp/test.ply")
+
+
diff --git a/release/scripts/io/export_x3d.py b/release/scripts/io/export_x3d.py
new file mode 100644
index 00000000000..db29afc7d6d
--- /dev/null
+++ b/release/scripts/io/export_x3d.py
@@ -0,0 +1,1240 @@
+#!BPY
+""" Registration info for Blender menus:
+Name: 'X3D Extensible 3D (.x3d)...'
+Blender: 245
+Group: 'Export'
+Tooltip: 'Export selection to Extensible 3D file (.x3d)'
+"""
+
+__author__ = ("Bart", "Campbell Barton")
+__email__ = ["Bart, bart:neeneenee*de"]
+__url__ = ["Author's (Bart) homepage, http://www.neeneenee.de/vrml"]
+__version__ = "2006/01/17"
+__bpydoc__ = """\
+This script exports to X3D format.
+
+Usage:
+
+Run this script from "File->Export" menu. A pop-up will ask whether you
+want to export only selected or all relevant objects.
+
+Known issues:<br>
+ Doesn't handle multiple materials (don't use material indices);<br>
+ Doesn't handle multiple UV textures on a single mesh (create a mesh for each texture);<br>
+ Can't get the texture array associated with material * not the UV ones;
+"""
+
+
+# $Id$
+#
+#------------------------------------------------------------------------
+# X3D exporter for blender 2.36 or above
+#
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+
+####################################
+# Library dependancies
+####################################
+
+import math
+import os
+
+import bpy
+import Mathutils
+
+from export_3ds import create_derived_objects, free_derived_objects
+
+# import Blender
+# from Blender import Object, Lamp, Draw, Image, Text, sys, Mesh
+# from Blender.Scene import Render
+# import BPyObject
+# import BPyMesh
+
+#
+DEG2RAD=0.017453292519943295
+MATWORLD= Mathutils.RotationMatrix(-90, 4, 'x')
+
+####################################
+# Global Variables
+####################################
+
+filename = ""
+# filename = Blender.Get('filename')
+_safeOverwrite = True
+
+extension = ''
+
+##########################################################
+# Functions for writing output file
+##########################################################
+
+class x3d_class:
+
+ def __init__(self, filename):
+ #--- public you can change these ---
+ self.writingcolor = 0
+ self.writingtexture = 0
+ self.writingcoords = 0
+ self.proto = 1
+ self.matonly = 0
+ self.share = 0
+ self.billnode = 0
+ self.halonode = 0
+ self.collnode = 0
+ self.tilenode = 0
+ self.verbose=2 # level of verbosity in console 0-none, 1-some, 2-most
+ self.cp=3 # decimals for material color values 0.000 - 1.000
+ self.vp=3 # decimals for vertex coordinate values 0.000 - n.000
+ self.tp=3 # decimals for texture coordinate values 0.000 - 1.000
+ self.it=3
+
+ #--- class private don't touch ---
+ self.texNames={} # dictionary of textureNames
+ self.matNames={} # dictionary of materiaNames
+ self.meshNames={} # dictionary of meshNames
+ self.indentLevel=0 # keeps track of current indenting
+ self.filename=filename
+ self.file = None
+ if filename.lower().endswith('.x3dz'):
+ try:
+ import gzip
+ self.file = gzip.open(filename, "w")
+ except:
+ print("failed to import compression modules, exporting uncompressed")
+ self.filename = filename[:-1] # remove trailing z
+
+ if self.file == None:
+ self.file = open(self.filename, "w")
+
+ self.bNav=0
+ self.nodeID=0
+ self.namesReserved=[ "Anchor","Appearance","Arc2D","ArcClose2D","AudioClip","Background","Billboard",
+ "BooleanFilter","BooleanSequencer","BooleanToggle","BooleanTrigger","Box","Circle2D",
+ "Collision","Color","ColorInterpolator","ColorRGBA","component","Cone","connect",
+ "Contour2D","ContourPolyline2D","Coordinate","CoordinateDouble","CoordinateInterpolator",
+ "CoordinateInterpolator2D","Cylinder","CylinderSensor","DirectionalLight","Disk2D",
+ "ElevationGrid","EspduTransform","EXPORT","ExternProtoDeclare","Extrusion","field",
+ "fieldValue","FillProperties","Fog","FontStyle","GeoCoordinate","GeoElevationGrid",
+ "GeoLocationLocation","GeoLOD","GeoMetadata","GeoOrigin","GeoPositionInterpolator",
+ "GeoTouchSensor","GeoViewpoint","Group","HAnimDisplacer","HAnimHumanoid","HAnimJoint",
+ "HAnimSegment","HAnimSite","head","ImageTexture","IMPORT","IndexedFaceSet",
+ "IndexedLineSet","IndexedTriangleFanSet","IndexedTriangleSet","IndexedTriangleStripSet",
+ "Inline","IntegerSequencer","IntegerTrigger","IS","KeySensor","LineProperties","LineSet",
+ "LoadSensor","LOD","Material","meta","MetadataDouble","MetadataFloat","MetadataInteger",
+ "MetadataSet","MetadataString","MovieTexture","MultiTexture","MultiTextureCoordinate",
+ "MultiTextureTransform","NavigationInfo","Normal","NormalInterpolator","NurbsCurve",
+ "NurbsCurve2D","NurbsOrientationInterpolator","NurbsPatchSurface",
+ "NurbsPositionInterpolator","NurbsSet","NurbsSurfaceInterpolator","NurbsSweptSurface",
+ "NurbsSwungSurface","NurbsTextureCoordinate","NurbsTrimmedSurface","OrientationInterpolator",
+ "PixelTexture","PlaneSensor","PointLight","PointSet","Polyline2D","Polypoint2D",
+ "PositionInterpolator","PositionInterpolator2D","ProtoBody","ProtoDeclare","ProtoInstance",
+ "ProtoInterface","ProximitySensor","ReceiverPdu","Rectangle2D","ROUTE","ScalarInterpolator",
+ "Scene","Script","Shape","SignalPdu","Sound","Sphere","SphereSensor","SpotLight","StaticGroup",
+ "StringSensor","Switch","Text","TextureBackground","TextureCoordinate","TextureCoordinateGenerator",
+ "TextureTransform","TimeSensor","TimeTrigger","TouchSensor","Transform","TransmitterPdu",
+ "TriangleFanSet","TriangleSet","TriangleSet2D","TriangleStripSet","Viewpoint","VisibilitySensor",
+ "WorldInfo","X3D","XvlShell","VertexShader","FragmentShader","MultiShaderAppearance","ShaderAppearance" ]
+ self.namesStandard=[ "Empty","Empty.000","Empty.001","Empty.002","Empty.003","Empty.004","Empty.005",
+ "Empty.006","Empty.007","Empty.008","Empty.009","Empty.010","Empty.011","Empty.012",
+ "Scene.001","Scene.002","Scene.003","Scene.004","Scene.005","Scene.06","Scene.013",
+ "Scene.006","Scene.007","Scene.008","Scene.009","Scene.010","Scene.011","Scene.012",
+ "World","World.000","World.001","World.002","World.003","World.004","World.005" ]
+ self.namesFog=[ "","LINEAR","EXPONENTIAL","" ]
+
+##########################################################
+# Writing nodes routines
+##########################################################
+
+ def writeHeader(self):
+ #bfile = sys.expandpath( Blender.Get('filename') ).replace('<', '&lt').replace('>', '&gt')
+ bfile = self.filename.replace('<', '&lt').replace('>', '&gt') # use outfile name
+ self.file.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
+ self.file.write("<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n")
+ self.file.write("<X3D version=\"3.0\" profile=\"Immersive\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\">\n")
+ self.file.write("<head>\n")
+ self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % os.path.basename(bfile))
+ # self.file.write("\t<meta name=\"filename\" content=\"%s\" />\n" % sys.basename(bfile))
+ self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % '2.5')
+ # self.file.write("\t<meta name=\"generator\" content=\"Blender %s\" />\n" % Blender.Get('version'))
+ self.file.write("\t<meta name=\"translator\" content=\"X3D exporter v1.55 (2006/01/17)\" />\n")
+ self.file.write("</head>\n")
+ self.file.write("<Scene>\n")
+
+ # This functionality is poorly defined, disabling for now - campbell
+ '''
+ def writeInline(self):
+ inlines = Blender.Scene.Get()
+ allinlines = len(inlines)
+ if scene != inlines[0]:
+ return
+ else:
+ for i in xrange(allinlines):
+ nameinline=inlines[i].name
+ if (nameinline not in self.namesStandard) and (i > 0):
+ self.file.write("<Inline DEF=\"%s\" " % (self.cleanStr(nameinline)))
+ nameinline = nameinline+".x3d"
+ self.file.write("url=\"%s\" />" % nameinline)
+ self.file.write("\n\n")
+
+
+ def writeScript(self):
+ textEditor = Blender.Text.Get()
+ alltext = len(textEditor)
+ for i in xrange(alltext):
+ nametext = textEditor[i].name
+ nlines = textEditor[i].getNLines()
+ if (self.proto == 1):
+ if (nametext == "proto" or nametext == "proto.js" or nametext == "proto.txt") and (nlines != None):
+ nalllines = len(textEditor[i].asLines())
+ alllines = textEditor[i].asLines()
+ for j in xrange(nalllines):
+ self.writeIndented(alllines[j] + "\n")
+ elif (self.proto == 0):
+ if (nametext == "route" or nametext == "route.js" or nametext == "route.txt") and (nlines != None):
+ nalllines = len(textEditor[i].asLines())
+ alllines = textEditor[i].asLines()
+ for j in xrange(nalllines):
+ self.writeIndented(alllines[j] + "\n")
+ self.writeIndented("\n")
+ '''
+
+ def writeViewpoint(self, ob, mat, scene):
+ context = scene.render_data
+ # context = scene.render
+ ratio = float(context.resolution_x)/float(context.resolution_y)
+ # ratio = float(context.imageSizeY())/float(context.imageSizeX())
+ lens = (360* (math.atan(ratio *16 / ob.data.lens) / math.pi))*(math.pi/180)
+ # lens = (360* (math.atan(ratio *16 / ob.data.getLens()) / math.pi))*(math.pi/180)
+ lens = min(lens, math.pi)
+
+ # get the camera location, subtract 90 degress from X to orient like X3D does
+ # mat = ob.matrixWorld - mat is now passed!
+
+ loc = self.rotatePointForVRML(mat.translationPart())
+ rot = mat.toEuler()
+ rot = (((rot[0]-90)), rot[1], rot[2])
+ # rot = (((rot[0]-90)*DEG2RAD), rot[1]*DEG2RAD, rot[2]*DEG2RAD)
+ nRot = self.rotatePointForVRML( rot )
+ # convert to Quaternion and to Angle Axis
+ Q = self.eulerToQuaternions(nRot[0], nRot[1], nRot[2])
+ Q1 = self.multiplyQuaternions(Q[0], Q[1])
+ Qf = self.multiplyQuaternions(Q1, Q[2])
+ angleAxis = self.quaternionToAngleAxis(Qf)
+ self.file.write("<Viewpoint DEF=\"%s\" " % (self.cleanStr(ob.name)))
+ self.file.write("description=\"%s\" " % (ob.name))
+ self.file.write("centerOfRotation=\"0 0 0\" ")
+ self.file.write("position=\"%3.2f %3.2f %3.2f\" " % (loc[0], loc[1], loc[2]))
+ self.file.write("orientation=\"%3.2f %3.2f %3.2f %3.2f\" " % (angleAxis[0], angleAxis[1], -angleAxis[2], angleAxis[3]))
+ self.file.write("fieldOfView=\"%.3f\" />\n\n" % (lens))
+
+ def writeFog(self, world):
+ if world:
+ mtype = world.mist.falloff
+ # mtype = world.getMistype()
+ mparam = world.mist
+ # mparam = world.getMist()
+ grd = world.horizon_color
+ # grd = world.getHor()
+ grd0, grd1, grd2 = grd[0], grd[1], grd[2]
+ else:
+ return
+ if (mtype == 'LINEAR' or mtype == 'INVERSE_QUADRATIC'):
+ mtype = 1 if mtype == 'LINEAR' else 2
+ # if (mtype == 1 or mtype == 2):
+ self.file.write("<Fog fogType=\"%s\" " % self.namesFog[mtype])
+ self.file.write("color=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ self.file.write("visibilityRange=\"%s\" />\n\n" % round(mparam[2],self.cp))
+ else:
+ return
+
+ def writeNavigationInfo(self, scene):
+ self.file.write('<NavigationInfo headlight="FALSE" visibilityLimit="0.0" type=\'"EXAMINE","ANY"\' avatarSize="0.25, 1.75, 0.75" />\n')
+
+ def writeSpotLight(self, ob, mtx, lamp, world):
+ safeName = self.cleanStr(ob.name)
+ if world:
+ ambi = world.ambient_color
+ # ambi = world.amb
+ ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5
+ else:
+ ambi = 0
+ ambientIntensity = 0
+
+ # compute cutoff and beamwidth
+ intensity=min(lamp.energy/1.75,1.0)
+ beamWidth=((lamp.spot_size*math.pi)/180.0)*.37;
+ # beamWidth=((lamp.spotSize*math.pi)/180.0)*.37;
+ cutOffAngle=beamWidth*1.3
+
+ dx,dy,dz=self.computeDirection(mtx)
+ # note -dx seems to equal om[3][0]
+ # note -dz seems to equal om[3][1]
+ # note dy seems to equal om[3][2]
+
+ #location=(ob.matrixWorld*MATWORLD).translationPart() # now passed
+ location=(mtx*MATWORLD).translationPart()
+
+ radius = lamp.distance*math.cos(beamWidth)
+ # radius = lamp.dist*math.cos(beamWidth)
+ self.file.write("<SpotLight DEF=\"%s\" " % safeName)
+ self.file.write("radius=\"%s\" " % (round(radius,self.cp)))
+ self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity,self.cp)))
+ self.file.write("intensity=\"%s\" " % (round(intensity,self.cp)))
+ self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0],self.cp), round(lamp.color[1],self.cp), round(lamp.color[2],self.cp)))
+ # self.file.write("color=\"%s %s %s\" " % (round(lamp.col[0],self.cp), round(lamp.col[1],self.cp), round(lamp.col[2],self.cp)))
+ self.file.write("beamWidth=\"%s\" " % (round(beamWidth,self.cp)))
+ self.file.write("cutOffAngle=\"%s\" " % (round(cutOffAngle,self.cp)))
+ self.file.write("direction=\"%s %s %s\" " % (round(dx,3),round(dy,3),round(dz,3)))
+ self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3)))
+
+
+ def writeDirectionalLight(self, ob, mtx, lamp, world):
+ safeName = self.cleanStr(ob.name)
+ if world:
+ ambi = world.ambient_color
+ # ambi = world.amb
+ ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5
+ else:
+ ambi = 0
+ ambientIntensity = 0
+
+ intensity=min(lamp.energy/1.75,1.0)
+ (dx,dy,dz)=self.computeDirection(mtx)
+ self.file.write("<DirectionalLight DEF=\"%s\" " % safeName)
+ self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity,self.cp)))
+ self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0],self.cp), round(lamp.color[1],self.cp), round(lamp.color[2],self.cp)))
+ # self.file.write("color=\"%s %s %s\" " % (round(lamp.col[0],self.cp), round(lamp.col[1],self.cp), round(lamp.col[2],self.cp)))
+ self.file.write("intensity=\"%s\" " % (round(intensity,self.cp)))
+ self.file.write("direction=\"%s %s %s\" />\n\n" % (round(dx,4),round(dy,4),round(dz,4)))
+
+ def writePointLight(self, ob, mtx, lamp, world):
+ safeName = self.cleanStr(ob.name)
+ if world:
+ ambi = world.ambient_color
+ # ambi = world.amb
+ ambientIntensity = ((float(ambi[0] + ambi[1] + ambi[2]))/3)/2.5
+ else:
+ ambi = 0
+ ambientIntensity = 0
+
+ # location=(ob.matrixWorld*MATWORLD).translationPart() # now passed
+ location= (mtx*MATWORLD).translationPart()
+
+ self.file.write("<PointLight DEF=\"%s\" " % safeName)
+ self.file.write("ambientIntensity=\"%s\" " % (round(ambientIntensity,self.cp)))
+ self.file.write("color=\"%s %s %s\" " % (round(lamp.color[0],self.cp), round(lamp.color[1],self.cp), round(lamp.color[2],self.cp)))
+ # self.file.write("color=\"%s %s %s\" " % (round(lamp.col[0],self.cp), round(lamp.col[1],self.cp), round(lamp.col[2],self.cp)))
+ self.file.write("intensity=\"%s\" " % (round( min(lamp.energy/1.75,1.0) ,self.cp)))
+ self.file.write("radius=\"%s\" " % lamp.distance )
+ # self.file.write("radius=\"%s\" " % lamp.dist )
+ self.file.write("location=\"%s %s %s\" />\n\n" % (round(location[0],3), round(location[1],3), round(location[2],3)))
+ '''
+ def writeNode(self, ob, mtx):
+ obname=str(ob.name)
+ if obname in self.namesStandard:
+ return
+ else:
+ dx,dy,dz = self.computeDirection(mtx)
+ # location=(ob.matrixWorld*MATWORLD).translationPart()
+ location=(mtx*MATWORLD).translationPart()
+ self.writeIndented("<%s\n" % obname,1)
+ self.writeIndented("direction=\"%s %s %s\"\n" % (round(dx,3),round(dy,3),round(dz,3)))
+ self.writeIndented("location=\"%s %s %s\"\n" % (round(location[0],3), round(location[1],3), round(location[2],3)))
+ self.writeIndented("/>\n",-1)
+ self.writeIndented("\n")
+ '''
+ def secureName(self, name):
+ name = name + str(self.nodeID)
+ self.nodeID=self.nodeID+1
+ if len(name) <= 3:
+ newname = "_" + str(self.nodeID)
+ return "%s" % (newname)
+ else:
+ for bad in ['"','#',"'",',','.','[','\\',']','{','}']:
+ name=name.replace(bad,'_')
+ if name in self.namesReserved:
+ newname = name[0:3] + "_" + str(self.nodeID)
+ return "%s" % (newname)
+ elif name[0].isdigit():
+ newname = "_" + name + str(self.nodeID)
+ return "%s" % (newname)
+ else:
+ newname = name
+ return "%s" % (newname)
+
+ def writeIndexedFaceSet(self, ob, mesh, mtx, world, EXPORT_TRI = False):
+ imageMap={} # set of used images
+ sided={} # 'one':cnt , 'two':cnt
+ vColors={} # 'multi':1
+ meshName = self.cleanStr(ob.name)
+
+ meshME = self.cleanStr(ob.data.name) # We dont care if its the mesh name or not
+ # meshME = self.cleanStr(ob.getData(mesh=1).name) # We dont care if its the mesh name or not
+ if len(mesh.faces) == 0: return
+ mode = []
+ # mode = 0
+ if mesh.active_uv_texture:
+ # if mesh.faceUV:
+ for face in mesh.active_uv_texture.data:
+ # for face in mesh.faces:
+ if face.halo and 'HALO' not in mode:
+ mode += ['HALO']
+ if face.billboard and 'BILLBOARD' not in mode:
+ mode += ['BILLBOARD']
+ if face.object_color and 'OBJECT_COLOR' not in mode:
+ mode += ['OBJECT_COLOR']
+ if face.collision and 'COLLISION' not in mode:
+ mode += ['COLLISION']
+ # mode |= face.mode
+
+ if 'HALO' in mode and self.halonode == 0:
+ # if mode & Mesh.FaceModes.HALO and self.halonode == 0:
+ self.writeIndented("<Billboard axisOfRotation=\"0 0 0\">\n",1)
+ self.halonode = 1
+ elif 'BILLBOARD' in mode and self.billnode == 0:
+ # elif mode & Mesh.FaceModes.BILLBOARD and self.billnode == 0:
+ self.writeIndented("<Billboard axisOfRotation=\"0 1 0\">\n",1)
+ self.billnode = 1
+ elif 'OBJECT_COLOR' in mode and self.matonly == 0:
+ # elif mode & Mesh.FaceModes.OBCOL and self.matonly == 0:
+ self.matonly = 1
+ # TF_TILES is marked as deprecated in DNA_meshdata_types.h
+ # elif mode & Mesh.FaceModes.TILES and self.tilenode == 0:
+ # self.tilenode = 1
+ elif 'COLLISION' not in mode and self.collnode == 0:
+ # elif not mode & Mesh.FaceModes.DYNAMIC and self.collnode == 0:
+ self.writeIndented("<Collision enabled=\"false\">\n",1)
+ self.collnode = 1
+
+ nIFSCnt=self.countIFSSetsNeeded(mesh, imageMap, sided, vColors)
+
+ if nIFSCnt > 1:
+ self.writeIndented("<Group DEF=\"%s%s\">\n" % ("G_", meshName),1)
+
+ if 'two' in sided and sided['two'] > 0:
+ bTwoSided=1
+ else:
+ bTwoSided=0
+
+ # mtx = ob.matrixWorld * MATWORLD # mtx is now passed
+ mtx = mtx * MATWORLD
+
+ loc= mtx.translationPart()
+ sca= mtx.scalePart()
+ quat = mtx.toQuat()
+ rot= quat.axis
+
+ self.writeIndented('<Transform DEF="%s" translation="%.6f %.6f %.6f" scale="%.6f %.6f %.6f" rotation="%.6f %.6f %.6f %.6f">\n' % \
+ (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle) )
+ # self.writeIndented('<Transform DEF="%s" translation="%.6f %.6f %.6f" scale="%.6f %.6f %.6f" rotation="%.6f %.6f %.6f %.6f">\n' % \
+ # (meshName, loc[0], loc[1], loc[2], sca[0], sca[1], sca[2], rot[0], rot[1], rot[2], quat.angle*DEG2RAD) )
+
+ self.writeIndented("<Shape>\n",1)
+ maters=mesh.materials
+ hasImageTexture=0
+ issmooth=0
+
+ if len(maters) > 0 or mesh.active_uv_texture:
+ # if len(maters) > 0 or mesh.faceUV:
+ self.writeIndented("<Appearance>\n", 1)
+ # right now this script can only handle a single material per mesh.
+ if len(maters) >= 1:
+ mat=maters[0]
+ # matFlags = mat.getMode()
+ if not mat.face_texture:
+ # if not matFlags & Blender.Material.Modes['TEXFACE']:
+ self.writeMaterial(mat, self.cleanStr(mat.name,''), world)
+ # self.writeMaterial(mat, self.cleanStr(maters[0].name,''), world)
+ if len(maters) > 1:
+ print("Warning: mesh named %s has multiple materials" % meshName)
+ print("Warning: only one material per object handled")
+
+ #-- textures
+ face = None
+ if mesh.active_uv_texture:
+ # if mesh.faceUV:
+ for face in mesh.active_uv_texture.data:
+ # for face in mesh.faces:
+ if face.image:
+ # if (hasImageTexture == 0) and (face.image):
+ self.writeImageTexture(face.image)
+ # hasImageTexture=1 # keep track of face texture
+ break
+ if self.tilenode == 1 and face and face.image:
+ # if self.tilenode == 1:
+ self.writeIndented("<TextureTransform scale=\"%s %s\" />\n" % (face.image.xrep, face.image.yrep))
+ self.tilenode = 0
+ self.writeIndented("</Appearance>\n", -1)
+
+ #-- IndexedFaceSet or IndexedLineSet
+
+ # user selected BOUNDS=1, SOLID=3, SHARED=4, or TEXTURE=5
+ ifStyle="IndexedFaceSet"
+ # look up mesh name, use it if available
+ if meshME in self.meshNames:
+ self.writeIndented("<%s USE=\"ME_%s\">" % (ifStyle, meshME), 1)
+ self.meshNames[meshME]+=1
+ else:
+ if int(mesh.users) > 1:
+ self.writeIndented("<%s DEF=\"ME_%s\" " % (ifStyle, meshME), 1)
+ self.meshNames[meshME]=1
+ else:
+ self.writeIndented("<%s " % ifStyle, 1)
+
+ if bTwoSided == 1:
+ self.file.write("solid=\"false\" ")
+ else:
+ self.file.write("solid=\"true\" ")
+
+ for face in mesh.faces:
+ if face.smooth:
+ issmooth=1
+ break
+ if issmooth==1:
+ creaseAngle=(mesh.autosmooth_angle)*(math.pi/180.0)
+ # creaseAngle=(mesh.degr)*(math.pi/180.0)
+ self.file.write("creaseAngle=\"%s\" " % (round(creaseAngle,self.cp)))
+
+ #--- output textureCoordinates if UV texture used
+ if mesh.active_uv_texture:
+ # if mesh.faceUV:
+ if self.matonly == 1 and self.share == 1:
+ self.writeFaceColors(mesh)
+ elif hasImageTexture == 1:
+ self.writeTextureCoordinates(mesh)
+ #--- output coordinates
+ self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI)
+
+ self.writingcoords = 1
+ self.writingtexture = 1
+ self.writingcolor = 1
+ self.writeCoordinates(ob, mesh, meshName, EXPORT_TRI)
+
+ #--- output textureCoordinates if UV texture used
+ if mesh.active_uv_texture:
+ # if mesh.faceUV:
+ if hasImageTexture == 1:
+ self.writeTextureCoordinates(mesh)
+ elif self.matonly == 1 and self.share == 1:
+ self.writeFaceColors(mesh)
+ #--- output vertexColors
+ self.matonly = 0
+ self.share = 0
+
+ self.writingcoords = 0
+ self.writingtexture = 0
+ self.writingcolor = 0
+ #--- output closing braces
+ self.writeIndented("</%s>\n" % ifStyle, -1)
+ self.writeIndented("</Shape>\n", -1)
+ self.writeIndented("</Transform>\n", -1)
+
+ if self.halonode == 1:
+ self.writeIndented("</Billboard>\n", -1)
+ self.halonode = 0
+
+ if self.billnode == 1:
+ self.writeIndented("</Billboard>\n", -1)
+ self.billnode = 0
+
+ if self.collnode == 1:
+ self.writeIndented("</Collision>\n", -1)
+ self.collnode = 0
+
+ if nIFSCnt > 1:
+ self.writeIndented("</Group>\n", -1)
+
+ self.file.write("\n")
+
+ def writeCoordinates(self, ob, mesh, meshName, EXPORT_TRI = False):
+ # create vertex list and pre rotate -90 degrees X for VRML
+
+ if self.writingcoords == 0:
+ self.file.write('coordIndex="')
+ for face in mesh.faces:
+ fv = face.verts
+ # fv = face.v
+
+ if len(fv)==3:
+ # if len(face)==3:
+ self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
+ # self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index))
+ else:
+ if EXPORT_TRI:
+ self.file.write("%i %i %i -1, " % (fv[0], fv[1], fv[2]))
+ # self.file.write("%i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index))
+ self.file.write("%i %i %i -1, " % (fv[0], fv[2], fv[3]))
+ # self.file.write("%i %i %i -1, " % (fv[0].index, fv[2].index, fv[3].index))
+ else:
+ self.file.write("%i %i %i %i -1, " % (fv[0], fv[1], fv[2], fv[3]))
+ # self.file.write("%i %i %i %i -1, " % (fv[0].index, fv[1].index, fv[2].index, fv[3].index))
+
+ self.file.write("\">\n")
+ else:
+ #-- vertices
+ # mesh.transform(ob.matrixWorld)
+ self.writeIndented("<Coordinate DEF=\"%s%s\" \n" % ("coord_",meshName), 1)
+ self.file.write("\t\t\t\tpoint=\"")
+ for v in mesh.verts:
+ self.file.write("%.6f %.6f %.6f, " % tuple(v.co))
+ self.file.write("\" />")
+ self.writeIndented("\n", -1)
+
+ def writeTextureCoordinates(self, mesh):
+ texCoordList=[]
+ texIndexList=[]
+ j=0
+
+ for face in mesh.active_uv_texture.data:
+ # for face in mesh.faces:
+ uvs = face.uv
+ # uvs = [face.uv1, face.uv2, face.uv3, face.uv4] if face.verts[3] else [face.uv1, face.uv2, face.uv3]
+
+ for uv in uvs:
+ # for uv in face.uv:
+ texIndexList.append(j)
+ texCoordList.append(uv)
+ j=j+1
+ texIndexList.append(-1)
+ if self.writingtexture == 0:
+ self.file.write("\n\t\t\ttexCoordIndex=\"")
+ texIndxStr=""
+ for i in range(len(texIndexList)):
+ texIndxStr = texIndxStr + "%d, " % texIndexList[i]
+ if texIndexList[i]==-1:
+ self.file.write(texIndxStr)
+ texIndxStr=""
+ self.file.write("\"\n\t\t\t")
+ else:
+ self.writeIndented("<TextureCoordinate point=\"", 1)
+ for i in range(len(texCoordList)):
+ self.file.write("%s %s, " % (round(texCoordList[i][0],self.tp), round(texCoordList[i][1],self.tp)))
+ self.file.write("\" />")
+ self.writeIndented("\n", -1)
+
+ def writeFaceColors(self, mesh):
+ if self.writingcolor == 0:
+ self.file.write("colorPerVertex=\"false\" ")
+ elif mesh.active_vertex_color:
+ # else:
+ self.writeIndented("<Color color=\"", 1)
+ for face in mesh.active_vertex_color.data:
+ c = face.color1
+ if self.verbose > 2:
+ print("Debug: face.col r=%d g=%d b=%d" % (c[0], c[1], c[2]))
+ # print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b))
+ aColor = self.rgbToFS(c)
+ self.file.write("%s, " % aColor)
+
+ # for face in mesh.faces:
+ # if face.col:
+ # c=face.col[0]
+ # if self.verbose > 2:
+ # print("Debug: face.col r=%d g=%d b=%d" % (c.r, c.g, c.b))
+ # aColor = self.rgbToFS(c)
+ # self.file.write("%s, " % aColor)
+ self.file.write("\" />")
+ self.writeIndented("\n",-1)
+
+ def writeMaterial(self, mat, matName, world):
+ # look up material name, use it if available
+ if matName in self.matNames:
+ self.writeIndented("<Material USE=\"MA_%s\" />\n" % matName)
+ self.matNames[matName]+=1
+ return;
+
+ self.matNames[matName]=1
+
+ ambient = mat.ambient/3
+ # ambient = mat.amb/3
+ diffuseR, diffuseG, diffuseB = tuple(mat.diffuse_color)
+ # diffuseR, diffuseG, diffuseB = mat.rgbCol[0], mat.rgbCol[1],mat.rgbCol[2]
+ if world:
+ ambi = world.ambient_color
+ # ambi = world.getAmb()
+ ambi0, ambi1, ambi2 = (ambi[0]*mat.ambient)*2, (ambi[1]*mat.ambient)*2, (ambi[2]*mat.ambient)*2
+ # ambi0, ambi1, ambi2 = (ambi[0]*mat.amb)*2, (ambi[1]*mat.amb)*2, (ambi[2]*mat.amb)*2
+ else:
+ ambi0, ambi1, ambi2 = 0, 0, 0
+ emisR, emisG, emisB = (diffuseR*mat.emit+ambi0)/2, (diffuseG*mat.emit+ambi1)/2, (diffuseB*mat.emit+ambi2)/2
+
+ shininess = mat.specular_hardness/512.0
+ # shininess = mat.hard/512.0
+ specR = (mat.specular_color[0]+0.001)/(1.25/(mat.specular_intensity+0.001))
+ # specR = (mat.specCol[0]+0.001)/(1.25/(mat.spec+0.001))
+ specG = (mat.specular_color[1]+0.001)/(1.25/(mat.specular_intensity+0.001))
+ # specG = (mat.specCol[1]+0.001)/(1.25/(mat.spec+0.001))
+ specB = (mat.specular_color[2]+0.001)/(1.25/(mat.specular_intensity+0.001))
+ # specB = (mat.specCol[2]+0.001)/(1.25/(mat.spec+0.001))
+ transp = 1-mat.alpha
+ # matFlags = mat.getMode()
+ if mat.shadeless:
+ # if matFlags & Blender.Material.Modes['SHADELESS']:
+ ambient = 1
+ shine = 1
+ specR = emitR = diffuseR
+ specG = emitG = diffuseG
+ specB = emitB = diffuseB
+ self.writeIndented("<Material DEF=\"MA_%s\" " % matName, 1)
+ self.file.write("diffuseColor=\"%s %s %s\" " % (round(diffuseR,self.cp), round(diffuseG,self.cp), round(diffuseB,self.cp)))
+ self.file.write("specularColor=\"%s %s %s\" " % (round(specR,self.cp), round(specG,self.cp), round(specB,self.cp)))
+ self.file.write("emissiveColor=\"%s %s %s\" \n" % (round(emisR,self.cp), round(emisG,self.cp), round(emisB,self.cp)))
+ self.writeIndented("ambientIntensity=\"%s\" " % (round(ambient,self.cp)))
+ self.file.write("shininess=\"%s\" " % (round(shininess,self.cp)))
+ self.file.write("transparency=\"%s\" />" % (round(transp,self.cp)))
+ self.writeIndented("\n",-1)
+
+ def writeImageTexture(self, image):
+ name = image.name
+ filename = image.filename.split('/')[-1].split('\\')[-1]
+ if name in self.texNames:
+ self.writeIndented("<ImageTexture USE=\"%s\" />\n" % self.cleanStr(name))
+ self.texNames[name] += 1
+ return
+ else:
+ self.writeIndented("<ImageTexture DEF=\"%s\" " % self.cleanStr(name), 1)
+ self.file.write("url=\"%s\" />" % name)
+ self.writeIndented("\n",-1)
+ self.texNames[name] = 1
+
+ def writeBackground(self, world, alltextures):
+ if world: worldname = world.name
+ else: return
+ blending = (world.blend_sky, world.paper_sky, world.real_sky)
+ # blending = world.getSkytype()
+ grd = world.horizon_color
+ # grd = world.getHor()
+ grd0, grd1, grd2 = grd[0], grd[1], grd[2]
+ sky = world.zenith_color
+ # sky = world.getZen()
+ sky0, sky1, sky2 = sky[0], sky[1], sky[2]
+ mix0, mix1, mix2 = grd[0]+sky[0], grd[1]+sky[1], grd[2]+sky[2]
+ mix0, mix1, mix2 = mix0/2, mix1/2, mix2/2
+ self.file.write("<Background ")
+ if worldname not in self.namesStandard:
+ self.file.write("DEF=\"%s\" " % self.secureName(worldname))
+ # No Skytype - just Hor color
+ if blending == (0, 0, 0):
+ # if blending == 0:
+ self.file.write("groundColor=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ self.file.write("skyColor=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ # Blend Gradient
+ elif blending == (1, 0, 0):
+ # elif blending == 1:
+ self.file.write("groundColor=\"%s %s %s, " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
+ self.file.write("skyColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+ self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
+ # Blend+Real Gradient Inverse
+ elif blending == (1, 0, 1):
+ # elif blending == 3:
+ self.file.write("groundColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+ self.file.write("%s %s %s\" groundAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
+ self.file.write("skyColor=\"%s %s %s, " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ self.file.write("%s %s %s\" skyAngle=\"1.57, 1.57\" " %(round(mix0,self.cp), round(mix1,self.cp), round(mix2,self.cp)))
+ # Paper - just Zen Color
+ elif blending == (0, 0, 1):
+ # elif blending == 4:
+ self.file.write("groundColor=\"%s %s %s\" " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+ self.file.write("skyColor=\"%s %s %s\" " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+ # Blend+Real+Paper - komplex gradient
+ elif blending == (1, 1, 1):
+ # elif blending == 7:
+ self.writeIndented("groundColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+ self.writeIndented("%s %s %s\" groundAngle=\"1.57, 1.57\" " %(round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ self.writeIndented("skyColor=\"%s %s %s, " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+ self.writeIndented("%s %s %s\" skyAngle=\"1.57, 1.57\" " %(round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ # Any Other two colors
+ else:
+ self.file.write("groundColor=\"%s %s %s\" " % (round(grd0,self.cp), round(grd1,self.cp), round(grd2,self.cp)))
+ self.file.write("skyColor=\"%s %s %s\" " % (round(sky0,self.cp), round(sky1,self.cp), round(sky2,self.cp)))
+
+ alltexture = len(alltextures)
+
+ for i in range(alltexture):
+ tex = alltextures[i]
+
+ if tex.type != 'IMAGE' or tex.image == None:
+ continue
+
+ namemat = tex.name
+ # namemat = alltextures[i].name
+
+ pic = tex.image
+
+ # using .expandpath just in case, os.path may not expect //
+ basename = os.path.basename(pic.get_abs_filename())
+
+ pic = alltextures[i].image
+ # pic = alltextures[i].getImage()
+ if (namemat == "back") and (pic != None):
+ self.file.write("\n\tbackUrl=\"%s\" " % basename)
+ # self.file.write("\n\tbackUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
+ elif (namemat == "bottom") and (pic != None):
+ self.writeIndented("bottomUrl=\"%s\" " % basename)
+ # self.writeIndented("bottomUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
+ elif (namemat == "front") and (pic != None):
+ self.writeIndented("frontUrl=\"%s\" " % basename)
+ # self.writeIndented("frontUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
+ elif (namemat == "left") and (pic != None):
+ self.writeIndented("leftUrl=\"%s\" " % basename)
+ # self.writeIndented("leftUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
+ elif (namemat == "right") and (pic != None):
+ self.writeIndented("rightUrl=\"%s\" " % basename)
+ # self.writeIndented("rightUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
+ elif (namemat == "top") and (pic != None):
+ self.writeIndented("topUrl=\"%s\" " % basename)
+ # self.writeIndented("topUrl=\"%s\" " % pic.filename.split('/')[-1].split('\\')[-1])
+ self.writeIndented("/>\n\n")
+
+##########################################################
+# export routine
+##########################################################
+
+ def export(self, scene, world, alltextures,\
+ EXPORT_APPLY_MODIFIERS = False,\
+ EXPORT_TRI= False,\
+ ):
+
+ print("Info: starting X3D export to " + self.filename + "...")
+ self.writeHeader()
+ # self.writeScript()
+ self.writeNavigationInfo(scene)
+ self.writeBackground(world, alltextures)
+ self.writeFog(world)
+ self.proto = 0
+
+
+ # # COPIED FROM OBJ EXPORTER
+ # if EXPORT_APPLY_MODIFIERS:
+ # temp_mesh_name = '~tmp-mesh'
+
+ # # Get the container mesh. - used for applying modifiers and non mesh objects.
+ # containerMesh = meshName = tempMesh = None
+ # for meshName in Blender.NMesh.GetNames():
+ # if meshName.startswith(temp_mesh_name):
+ # tempMesh = Mesh.Get(meshName)
+ # if not tempMesh.users:
+ # containerMesh = tempMesh
+ # if not containerMesh:
+ # containerMesh = Mesh.New(temp_mesh_name)
+ # --------------------------
+
+
+ for ob_main in [o for o in scene.objects if o.is_visible()]:
+ # for ob_main in scene.objects.context:
+
+ free, derived = create_derived_objects(ob_main)
+
+ if derived == None: continue
+
+ for ob, ob_mat in derived:
+ # for ob, ob_mat in BPyObject.getDerivedObjects(ob_main):
+ objType=ob.type
+ objName=ob.name
+ self.matonly = 0
+ if objType == "CAMERA":
+ # if objType == "Camera":
+ self.writeViewpoint(ob, ob_mat, scene)
+ elif objType in ("MESH", "CURVE", "SURF", "TEXT") :
+ # elif objType in ("Mesh", "Curve", "Surf", "Text") :
+ if EXPORT_APPLY_MODIFIERS or objType != 'MESH':
+ # if EXPORT_APPLY_MODIFIERS or objType != 'Mesh':
+ me = ob.create_mesh(EXPORT_APPLY_MODIFIERS, 'PREVIEW')
+ # me= BPyMesh.getMeshFromObject(ob, containerMesh, EXPORT_APPLY_MODIFIERS, False, scene)
+ else:
+ me = ob.data
+ # me = ob.getData(mesh=1)
+
+ self.writeIndexedFaceSet(ob, me, ob_mat, world, EXPORT_TRI = EXPORT_TRI)
+
+ # free mesh created with create_mesh()
+ if me != ob.data:
+ bpy.data.remove_mesh(me)
+
+ elif objType == "LAMP":
+ # elif objType == "Lamp":
+ data= ob.data
+ datatype=data.type
+ if datatype == 'POINT':
+ # if datatype == Lamp.Types.Lamp:
+ self.writePointLight(ob, ob_mat, data, world)
+ elif datatype == 'SPOT':
+ # elif datatype == Lamp.Types.Spot:
+ self.writeSpotLight(ob, ob_mat, data, world)
+ elif datatype == 'SUN':
+ # elif datatype == Lamp.Types.Sun:
+ self.writeDirectionalLight(ob, ob_mat, data, world)
+ else:
+ self.writeDirectionalLight(ob, ob_mat, data, world)
+ # do you think x3d could document what to do with dummy objects?
+ #elif objType == "Empty" and objName != "Empty":
+ # self.writeNode(ob, ob_mat)
+ else:
+ #print "Info: Ignoring [%s], object type [%s] not handle yet" % (object.name,object.getType)
+ pass
+
+ if free:
+ free_derived_objects(ob_main)
+
+ self.file.write("\n</Scene>\n</X3D>")
+
+ # if EXPORT_APPLY_MODIFIERS:
+ # if containerMesh:
+ # containerMesh.verts = None
+
+ self.cleanup()
+
+##########################################################
+# Utility methods
+##########################################################
+
+ def cleanup(self):
+ self.file.close()
+ self.texNames={}
+ self.matNames={}
+ self.indentLevel=0
+ print("Info: finished X3D export to %s\n" % self.filename)
+
+ def cleanStr(self, name, prefix='rsvd_'):
+ """cleanStr(name,prefix) - try to create a valid VRML DEF name from object name"""
+
+ newName=name[:]
+ if len(newName) == 0:
+ self.nNodeID+=1
+ return "%s%d" % (prefix, self.nNodeID)
+
+ if newName in self.namesReserved:
+ newName='%s%s' % (prefix,newName)
+
+ if newName[0].isdigit():
+ newName='%s%s' % ('_',newName)
+
+ for bad in [' ','"','#',"'",',','.','[','\\',']','{','}']:
+ newName=newName.replace(bad,'_')
+ return newName
+
+ def countIFSSetsNeeded(self, mesh, imageMap, sided, vColors):
+ """
+ countIFFSetsNeeded() - should look at a blender mesh to determine
+ how many VRML IndexFaceSets or IndexLineSets are needed. A
+ new mesh created under the following conditions:
+
+ o - split by UV Textures / one per mesh
+ o - split by face, one sided and two sided
+ o - split by smooth and flat faces
+ o - split when faces only have 2 vertices * needs to be an IndexLineSet
+ """
+
+ imageNameMap={}
+ faceMap={}
+ nFaceIndx=0
+
+ if mesh.active_uv_texture:
+ # if mesh.faceUV:
+ for face in mesh.active_uv_texture.data:
+ # for face in mesh.faces:
+ sidename='';
+ if face.twoside:
+ # if face.mode & Mesh.FaceModes.TWOSIDE:
+ sidename='two'
+ else:
+ sidename='one'
+
+ if sidename in sided:
+ sided[sidename]+=1
+ else:
+ sided[sidename]=1
+
+ image = face.image
+ if image:
+ faceName="%s_%s" % (face.image.name, sidename);
+ try:
+ imageMap[faceName].append(face)
+ except:
+ imageMap[faceName]=[face.image.name,sidename,face]
+
+ if self.verbose > 2:
+ for faceName in imageMap.keys():
+ ifs=imageMap[faceName]
+ print("Debug: faceName=%s image=%s, solid=%s facecnt=%d" % \
+ (faceName, ifs[0], ifs[1], len(ifs)-2))
+
+ return len(imageMap)
+
+ def faceToString(self,face):
+
+ print("Debug: face.flag=0x%x (bitflags)" % face.flag)
+ if face.sel:
+ print("Debug: face.sel=true")
+
+ print("Debug: face.mode=0x%x (bitflags)" % face.mode)
+ if face.mode & Mesh.FaceModes.TWOSIDE:
+ print("Debug: face.mode twosided")
+
+ print("Debug: face.transp=0x%x (enum)" % face.transp)
+ if face.transp == Mesh.FaceTranspModes.SOLID:
+ print("Debug: face.transp.SOLID")
+
+ if face.image:
+ print("Debug: face.image=%s" % face.image.name)
+ print("Debug: face.materialIndex=%d" % face.materialIndex)
+
+ # XXX not used
+ # def getVertexColorByIndx(self, mesh, indx):
+ # c = None
+ # for face in mesh.faces:
+ # j=0
+ # for vertex in face.v:
+ # if vertex.index == indx:
+ # c=face.col[j]
+ # break
+ # j=j+1
+ # if c: break
+ # return c
+
+ def meshToString(self,mesh):
+ # print("Debug: mesh.hasVertexUV=%d" % mesh.vertexColors)
+ print("Debug: mesh.faceUV=%d" % (len(mesh.uv_textures) > 0))
+ # print("Debug: mesh.faceUV=%d" % mesh.faceUV)
+ print("Debug: mesh.hasVertexColours=%d" % (len(mesh.vertex_colors) > 0))
+ # print("Debug: mesh.hasVertexColours=%d" % mesh.hasVertexColours())
+ print("Debug: mesh.verts=%d" % len(mesh.verts))
+ print("Debug: mesh.faces=%d" % len(mesh.faces))
+ print("Debug: mesh.materials=%d" % len(mesh.materials))
+
+ def rgbToFS(self, c):
+ s="%s %s %s" % (round(c[0]/255.0,self.cp),
+ round(c[1]/255.0,self.cp),
+ round(c[2]/255.0,self.cp))
+
+ # s="%s %s %s" % (
+ # round(c.r/255.0,self.cp),
+ # round(c.g/255.0,self.cp),
+ # round(c.b/255.0,self.cp))
+ return s
+
+ def computeDirection(self, mtx):
+ x,y,z=(0,-1.0,0) # point down
+
+ ax,ay,az = (mtx*MATWORLD).toEuler()
+
+ # ax *= DEG2RAD
+ # ay *= DEG2RAD
+ # az *= DEG2RAD
+
+ # rot X
+ x1=x
+ y1=y*math.cos(ax)-z*math.sin(ax)
+ z1=y*math.sin(ax)+z*math.cos(ax)
+
+ # rot Y
+ x2=x1*math.cos(ay)+z1*math.sin(ay)
+ y2=y1
+ z2=z1*math.cos(ay)-x1*math.sin(ay)
+
+ # rot Z
+ x3=x2*math.cos(az)-y2*math.sin(az)
+ y3=x2*math.sin(az)+y2*math.cos(az)
+ z3=z2
+
+ return [x3,y3,z3]
+
+
+ # swap Y and Z to handle axis difference between Blender and VRML
+ #------------------------------------------------------------------------
+ def rotatePointForVRML(self, v):
+ x = v[0]
+ y = v[2]
+ z = -v[1]
+
+ vrmlPoint=[x, y, z]
+ return vrmlPoint
+
+ # For writing well formed VRML code
+ #------------------------------------------------------------------------
+ def writeIndented(self, s, inc=0):
+ if inc < 1:
+ self.indentLevel = self.indentLevel + inc
+
+ spaces=""
+ for x in range(self.indentLevel):
+ spaces = spaces + "\t"
+ self.file.write(spaces + s)
+
+ if inc > 0:
+ self.indentLevel = self.indentLevel + inc
+
+ # Converts a Euler to three new Quaternions
+ # Angles of Euler are passed in as radians
+ #------------------------------------------------------------------------
+ def eulerToQuaternions(self, x, y, z):
+ Qx = [math.cos(x/2), math.sin(x/2), 0, 0]
+ Qy = [math.cos(y/2), 0, math.sin(y/2), 0]
+ Qz = [math.cos(z/2), 0, 0, math.sin(z/2)]
+
+ quaternionVec=[Qx,Qy,Qz]
+ return quaternionVec
+
+ # Multiply two Quaternions together to get a new Quaternion
+ #------------------------------------------------------------------------
+ def multiplyQuaternions(self, Q1, Q2):
+ result = [((Q1[0] * Q2[0]) - (Q1[1] * Q2[1]) - (Q1[2] * Q2[2]) - (Q1[3] * Q2[3])),
+ ((Q1[0] * Q2[1]) + (Q1[1] * Q2[0]) + (Q1[2] * Q2[3]) - (Q1[3] * Q2[2])),
+ ((Q1[0] * Q2[2]) + (Q1[2] * Q2[0]) + (Q1[3] * Q2[1]) - (Q1[1] * Q2[3])),
+ ((Q1[0] * Q2[3]) + (Q1[3] * Q2[0]) + (Q1[1] * Q2[2]) - (Q1[2] * Q2[1]))]
+
+ return result
+
+ # Convert a Quaternion to an Angle Axis (ax, ay, az, angle)
+ # angle is in radians
+ #------------------------------------------------------------------------
+ def quaternionToAngleAxis(self, Qf):
+ scale = math.pow(Qf[1],2) + math.pow(Qf[2],2) + math.pow(Qf[3],2)
+ ax = Qf[1]
+ ay = Qf[2]
+ az = Qf[3]
+
+ if scale > .0001:
+ ax/=scale
+ ay/=scale
+ az/=scale
+
+ angle = 2 * math.acos(Qf[0])
+
+ result = [ax, ay, az, angle]
+ return result
+
+##########################################################
+# Callbacks, needed before Main
+##########################################################
+
+def x3d_export(filename,
+ context,
+ EXPORT_APPLY_MODIFIERS=False,
+ EXPORT_TRI=False,
+ EXPORT_GZIP=False):
+
+ if EXPORT_GZIP:
+ if not filename.lower().endswith('.x3dz'):
+ filename = '.'.join(filename.split('.')[:-1]) + '.x3dz'
+ else:
+ if not filename.lower().endswith('.x3d'):
+ filename = '.'.join(filename.split('.')[:-1]) + '.x3d'
+
+
+ scene = context.scene
+ # scene = Blender.Scene.GetCurrent()
+ world = scene.world
+
+ # XXX these are global textures while .Get() returned only scene's?
+ alltextures = bpy.data.textures
+ # alltextures = Blender.Texture.Get()
+
+ wrlexport=x3d_class(filename)
+ wrlexport.export(\
+ scene,\
+ world,\
+ alltextures,\
+ \
+ EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS,\
+ EXPORT_TRI = EXPORT_TRI,\
+ )
+
+
+def x3d_export_ui(filename):
+ if not filename.endswith(extension):
+ filename += extension
+ #if _safeOverwrite and sys.exists(filename):
+ # result = Draw.PupMenu("File Already Exists, Overwrite?%t|Yes%x1|No%x0")
+ #if(result != 1):
+ # return
+
+ # Get user options
+ EXPORT_APPLY_MODIFIERS = Draw.Create(1)
+ EXPORT_TRI = Draw.Create(0)
+ EXPORT_GZIP = Draw.Create( filename.lower().endswith('.x3dz') )
+
+ # Get USER Options
+ pup_block = [\
+ ('Apply Modifiers', EXPORT_APPLY_MODIFIERS, 'Use transformed mesh data from each object.'),\
+ ('Triangulate', EXPORT_TRI, 'Triangulate quads.'),\
+ ('Compress', EXPORT_GZIP, 'GZip the resulting file, requires a full python install'),\
+ ]
+
+ if not Draw.PupBlock('Export...', pup_block):
+ return
+
+ Blender.Window.EditMode(0)
+ Blender.Window.WaitCursor(1)
+
+ x3d_export(filename,\
+ EXPORT_APPLY_MODIFIERS = EXPORT_APPLY_MODIFIERS.val,\
+ EXPORT_TRI = EXPORT_TRI.val,\
+ EXPORT_GZIP = EXPORT_GZIP.val\
+ )
+
+ Blender.Window.WaitCursor(0)
+
+
+
+#########################################################
+# main routine
+#########################################################
+
+
+# if __name__ == '__main__':
+# Blender.Window.FileSelector(x3d_export_ui,"Export X3D", Blender.Get('filename').replace('.blend', '.x3d'))
+
+class EXPORT_OT_x3d(bpy.types.Operator):
+ '''
+ X3D Exporter
+ '''
+ __idname__ = "export.x3d"
+ __label__ = 'Export X3D'
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for exporting the X3D file", maxlen= 1024, default= ""),
+
+ bpy.props.BoolProperty(attr="apply_modifiers", name="Apply Modifiers", description="Use transformed mesh data from each object.", default=True),
+ bpy.props.BoolProperty(attr="triangulate", name="Triangulate", description="Triangulate quads.", default=False),
+ bpy.props.BoolProperty(attr="compress", name="Compress", description="GZip the resulting file, requires a full python install.", default=False),
+ ]
+
+ def execute(self, context):
+ x3d_export(self.path, context, self.apply_modifiers, self.triangulate, self.compress)
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+
+ def poll(self, context): # Poll isnt working yet
+ print("Poll")
+ return context.active_object != None
+
+bpy.ops.add(EXPORT_OT_x3d)
+
+# NOTES
+# - blender version is hardcoded
diff --git a/release/scripts/io/import_3ds.py b/release/scripts/io/import_3ds.py
new file mode 100644
index 00000000000..339fac839ea
--- /dev/null
+++ b/release/scripts/io/import_3ds.py
@@ -0,0 +1,1167 @@
+#!BPY
+"""
+Name: '3D Studio (.3ds)...'
+Blender: 244
+Group: 'Import'
+Tooltip: 'Import from 3DS file format (.3ds)'
+"""
+
+__author__= ['Bob Holcomb', 'Richard L?rk?ng', 'Damien McGinnes', 'Campbell Barton', 'Mario Lapin']
+__url__ = ("blenderartists.org", "www.blender.org", "www.gametutorials.com", "lib3ds.sourceforge.net/")
+__version__= '0.996'
+__bpydoc__= '''\
+
+3ds Importer
+
+This script imports a 3ds file and the materials into Blender for editing.
+
+Loader is based on 3ds loader from www.gametutorials.com (Thanks DigiBen).
+
+0.996 by Mario Lapin (mario.lapin@gmail.com) 13/04/200 <br>
+ - Implemented workaround to correct association between name, geometry and materials of
+ imported meshes.
+
+ Without this patch, version 0.995 of this importer would associate to each mesh object the
+ geometry and the materials of the previously parsed mesh object. By so, the name of the
+ first mesh object would be thrown away, and the name of the last mesh object would be
+ automatically merged with a '.001' at the end. No object would desappear, however object's
+ names and materials would be completely jumbled.
+
+0.995 by Campbell Barton<br>
+- workaround for buggy mesh vert delete
+- minor tweaks
+
+0.99 by Bob Holcomb<br>
+- added support for floating point color values that previously broke on import.
+
+0.98 by Campbell Barton<br>
+- import faces and verts to lists instead of a mesh, convert to a mesh later
+- use new index mapping feature of mesh to re-map faces that were not added.
+
+0.97 by Campbell Barton<br>
+- Strip material names of spaces
+- Added import as instance to import the 3ds into its own
+ scene and add a group instance to the current scene
+- New option to scale down imported objects so they are within a limited bounding area.
+
+0.96 by Campbell Barton<br>
+- Added workaround for bug in setting UV's for Zero vert index UV faces.
+- Removed unique name function, let blender make the names unique.
+
+0.95 by Campbell Barton<br>
+- Removed workarounds for Blender 2.41
+- Mesh objects split by material- many 3ds objects used more then 16 per mesh.
+- Removed a lot of unneeded variable creation.
+
+0.94 by Campbell Barton<br>
+- Face import tested to be about overall 16x speedup over 0.93.
+- Material importing speedup.
+- Tested with more models.
+- Support some corrupt models.
+
+0.93 by Campbell Barton<br>
+- Tested with 400 3ds files from turbosquid and samples.
+- Tactfully ignore faces that used the same verts twice.
+- Rollback to 0.83 sloppy un-reorganized code, this broke UV coord loading.
+- Converted from NMesh to Mesh.
+- Faster and cleaner new names.
+- Use external comprehensive image loader.
+- Re intergrated 0.92 and 0.9 changes
+- Fixes for 2.41 compat.
+- Non textured faces do not use a texture flag.
+
+0.92<br>
+- Added support for diffuse, alpha, spec, bump maps in a single material
+
+0.9<br>
+- Reorganized code into object/material block functions<br>
+- Use of Matrix() to copy matrix data<br>
+- added support for material transparency<br>
+
+0.83 2005-08-07: Campell Barton
+- Aggressive image finding and case insensitivy for posisx systems.
+
+0.82a 2005-07-22
+- image texture loading (both for face uv and renderer)
+
+0.82 - image texture loading (for face uv)
+
+0.81a (fork- not 0.9) Campbell Barton 2005-06-08
+- Simplified import code
+- Never overwrite data
+- Faster list handling
+- Leaves import selected
+
+0.81 Damien McGinnes 2005-01-09
+- handle missing images better
+
+0.8 Damien McGinnes 2005-01-08
+- copies sticky UV coords to face ones
+- handles images better
+- Recommend that you run 'RemoveDoubles' on each imported mesh after using this script
+
+'''
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# Script copyright (C) Bob Holcomb
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+# --------------------------------------------------------------------------
+
+# Importing modules
+
+import os
+import time
+import struct
+
+from import_obj import unpack_face_list, load_image
+
+import bpy
+import Mathutils
+
+# import Blender
+# from Blender import Mesh, Object, Material, Image, Texture, Lamp, Mathutils
+# from Blender.Mathutils import Vector
+# import BPyImage
+
+# import BPyMessages
+
+# try:
+# from struct import calcsize, unpack
+# except:
+# calcsize= unpack= None
+
+
+
+# # If python version is less than 2.4, try to get set stuff from module
+# try:
+# set
+# except:
+# from sets import Set as set
+
+BOUNDS_3DS = []
+
+
+#this script imports uvcoords as sticky vertex coords
+#this parameter enables copying these to face uv coords
+#which shold be more useful.
+
+def createBlenderTexture(material, name, image):
+ texture = bpy.data.textures.new(name)
+ texture.setType('Image')
+ texture.image = image
+ material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL)
+
+
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will see
+#----- Primary Chunk, at the beginning of each file
+PRIMARY = int('0x4D4D',16)
+
+#------ Main Chunks
+OBJECTINFO = int('0x3D3D',16); #This gives the version of the mesh and is found right before the material and object information
+VERSION = int('0x0002',16); #This gives the version of the .3ds file
+EDITKEYFRAME= int('0xB000',16); #This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL = 45055 #0xAFFF // This stored the texture info
+OBJECT = 16384 #0x4000 // This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+#------ sub defines of MATERIAL_BLOCK
+MAT_NAME = int('0xA000',16) # This holds the material name
+MAT_AMBIENT = int('0xA010',16) # Ambient color of the object/material
+MAT_DIFFUSE = int('0xA020',16) # This holds the color of the object/material
+MAT_SPECULAR = int('0xA030',16) # SPecular color of the object/material
+MAT_SHINESS = int('0xA040',16) # ??
+MAT_TRANSPARENCY= int('0xA050',16) # Transparency value of material
+MAT_SELF_ILLUM = int('0xA080',16) # Self Illumination value of material
+MAT_WIRE = int('0xA085',16) # Only render's wireframe
+
+MAT_TEXTURE_MAP = int('0xA200',16) # This is a header for a new texture map
+MAT_SPECULAR_MAP= int('0xA204',16) # This is a header for a new specular map
+MAT_OPACITY_MAP = int('0xA210',16) # This is a header for a new opacity map
+MAT_REFLECTION_MAP= int('0xA220',16) # This is a header for a new reflection map
+MAT_BUMP_MAP = int('0xA230',16) # This is a header for a new bump map
+MAT_MAP_FILENAME = int('0xA300',16) # This holds the file name of the texture
+
+MAT_FLOAT_COLOR = int ('0x0010', 16) #color defined as 3 floats
+MAT_24BIT_COLOR = int ('0x0011', 16) #color defined as 3 bytes
+
+#>------ sub defines of OBJECT
+OBJECT_MESH = int('0x4100',16); # This lets us know that we are reading a new object
+OBJECT_LAMP = int('0x4600',16); # This lets un know we are reading a light object
+OBJECT_LAMP_SPOT = int('0x4610',16); # The light is a spotloght.
+OBJECT_LAMP_OFF = int('0x4620',16); # The light off.
+OBJECT_LAMP_ATTENUATE = int('0x4625',16);
+OBJECT_LAMP_RAYSHADE = int('0x4627',16);
+OBJECT_LAMP_SHADOWED = int('0x4630',16);
+OBJECT_LAMP_LOCAL_SHADOW = int('0x4640',16);
+OBJECT_LAMP_LOCAL_SHADOW2 = int('0x4641',16);
+OBJECT_LAMP_SEE_CONE = int('0x4650',16);
+OBJECT_LAMP_SPOT_RECTANGULAR = int('0x4651',16);
+OBJECT_LAMP_SPOT_OVERSHOOT = int('0x4652',16);
+OBJECT_LAMP_SPOT_PROJECTOR = int('0x4653',16);
+OBJECT_LAMP_EXCLUDE = int('0x4654',16);
+OBJECT_LAMP_RANGE = int('0x4655',16);
+OBJECT_LAMP_ROLL = int('0x4656',16);
+OBJECT_LAMP_SPOT_ASPECT = int('0x4657',16);
+OBJECT_LAMP_RAY_BIAS = int('0x4658',16);
+OBJECT_LAMP_INNER_RANGE = int('0x4659',16);
+OBJECT_LAMP_OUTER_RANGE = int('0x465A',16);
+OBJECT_LAMP_MULTIPLIER = int('0x465B',16);
+OBJECT_LAMP_AMBIENT_LIGHT = int('0x4680',16);
+
+
+
+OBJECT_CAMERA= int('0x4700',16); # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES= int('0x4720',16); # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES = int('0x4110',16); # The objects vertices
+OBJECT_FACES = int('0x4120',16); # The objects faces
+OBJECT_MATERIAL = int('0x4130',16); # This is found if the object has a material, either texture map or color
+OBJECT_UV = int('0x4140',16); # The UV texture coordinates
+OBJECT_TRANS_MATRIX = int('0x4160',16); # The Object Matrix
+
+global scn
+scn = None
+
+#the chunk class
+class chunk:
+ ID = 0
+ length = 0
+ bytes_read = 0
+
+ #we don't read in the bytes_read, we compute that
+ binary_format='<HI'
+
+ def __init__(self):
+ self.ID = 0
+ self.length = 0
+ self.bytes_read = 0
+
+ def dump(self):
+ print('ID: ', self.ID)
+ print('ID in hex: ', hex(self.ID))
+ print('length: ', self.length)
+ print('bytes_read: ', self.bytes_read)
+
+def read_chunk(file, chunk):
+ temp_data = file.read(struct.calcsize(chunk.binary_format))
+ data = struct.unpack(chunk.binary_format, temp_data)
+ chunk.ID = data[0]
+ chunk.length = data[1]
+ #update the bytes read function
+ chunk.bytes_read = 6
+
+ #if debugging
+ #chunk.dump()
+
+def read_string(file):
+ #read in the characters till we get a null character
+ s = b''
+# s = ''
+ while not s.endswith(b'\x00'):
+# while not s.endswith('\x00'):
+ s += struct.unpack('<c', file.read(1))[0]
+# s += struct.unpack( '<c', file.read(1) )[0]
+ #print 'string: ',s
+
+ s = str(s[:-1], 'ASCII')
+# print("read string", s)
+
+ #remove the null character from the string
+ return s
+# return s[:-1]
+
+######################################################
+# IMPORT
+######################################################
+def process_next_object_chunk(file, previous_chunk):
+ new_chunk = chunk()
+ temp_chunk = chunk()
+
+ while (previous_chunk.bytes_read < previous_chunk.length):
+ #read the next chunk
+ read_chunk(file, new_chunk)
+
+def skip_to_end(file, skip_chunk):
+ buffer_size = skip_chunk.length - skip_chunk.bytes_read
+ binary_format='%ic' % buffer_size
+ temp_data = file.read(struct.calcsize(binary_format))
+ skip_chunk.bytes_read += buffer_size
+
+
+def add_texture_to_material(image, texture, material, mapto):
+# if mapto=='DIFFUSE':
+# map = Texture.MapTo.COL
+# elif mapto=='SPECULAR':
+# map = Texture.MapTo.SPEC
+# elif mapto=='OPACITY':
+# map = Texture.MapTo.ALPHA
+# elif mapto=='BUMP':
+# map = Texture.MapTo.NOR
+# else:
+ if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
+ print('/tError: Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
+ mapto = "COLOR"
+# map = Texture.MapTo.COL
+
+ if image: texture.image = image
+# if image: texture.setImage(image) # double check its an image.
+
+ material.add_texture(texture, "UV", mapto)
+# free_tex_slots = [i for i, tex in enumerate( material.getTextures() ) if tex == None]
+# if not free_tex_slots:
+# print('/tError: Cannot add "%s" map. 10 Texture slots alredy used.' % mapto)
+# else:
+# material.setTexture(free_tex_slots[0],texture,Texture.TexCo.UV,map)
+
+
+def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
+ #print previous_chunk.bytes_read, 'BYTES READ'
+ contextObName = None
+ contextLamp = [None, None] # object, Data
+ contextMaterial = None
+ contextMatrix_rot = None # Blender.Mathutils.Matrix(); contextMatrix.identity()
+ #contextMatrix_tx = None # Blender.Mathutils.Matrix(); contextMatrix.identity()
+ contextMesh_vertls = None
+ contextMesh_facels = None
+ contextMeshMaterials = {} # matname:[face_idxs]
+ contextMeshUV = None
+
+ TEXTURE_DICT = {}
+ MATDICT = {}
+# TEXMODE = Mesh.FaceModes['TEX']
+
+ # Localspace variable names, faster.
+ STRUCT_SIZE_1CHAR = struct.calcsize('c')
+ STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
+ STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
+ STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
+ STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
+ STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
+ _STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
+ # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
+ # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
+
+ def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
+
+ materialFaces = set() # faces that have a material. Can optimize?
+
+ # Now make copies with assigned materils.
+
+ def makeMeshMaterialCopy(matName, faces):
+ '''
+ Make a new mesh with only face the faces that use this material.
+ faces can be any iterable object - containing ints.
+ '''
+
+ faceVertUsers = [False] * len(myContextMesh_vertls)
+ ok = 0
+ for fIdx in faces:
+ for vindex in myContextMesh_facels[fIdx]:
+ faceVertUsers[vindex] = True
+ if matName != None: # if matName is none then this is a set(), meaning we are using the untextured faces and do not need to store textured faces.
+ materialFaces.add(fIdx)
+ ok = 1
+
+ if not ok:
+ return
+
+ myVertMapping = {}
+ vertMappingIndex = 0
+
+ vertsToUse = [i for i in range(len(myContextMesh_vertls)) if faceVertUsers[i]]
+ myVertMapping = dict( [ (ii, i) for i, ii in enumerate(vertsToUse) ] )
+
+ tempName= '%s_%s' % (contextObName, matName) # matName may be None.
+ bmesh = bpy.data.add_mesh(tempName)
+# bmesh = bpy.data.meshes.new(tempName)
+
+ if matName == None:
+ img = None
+ else:
+ bmat = MATDICT[matName][1]
+ bmesh.add_material(bmat)
+# bmesh.materials = [bmat]
+ try: img = TEXTURE_DICT[bmat.name]
+ except: img = None
+
+# bmesh_verts = bmesh.verts
+ if len(vertsToUse):
+ bmesh.add_geometry(len(vertsToUse), 0, len(faces))
+
+ # XXX why add extra vertex?
+# bmesh_verts.extend( [Vector()] )
+ bmesh.verts.foreach_set("co", [x for tup in [myContextMesh_vertls[i] for i in vertsToUse] for x in tup])
+# bmesh_verts.extend( [myContextMesh_vertls[i] for i in vertsToUse] )
+
+ # +1 because of DUMMYVERT
+ bmesh.faces.foreach_set("verts_raw", unpack_face_list([[myVertMapping[vindex] for vindex in myContextMesh_facels[fIdx]] for fIdx in faces]))
+# face_mapping = bmesh.faces.extend( [ [ bmesh_verts[ myVertMapping[vindex]+1] for vindex in myContextMesh_facels[fIdx]] for fIdx in faces ], indexList=True )
+
+ if bmesh.faces and (contextMeshUV or img):
+ bmesh.add_uv_texture()
+# bmesh.faceUV = 1
+ for ii, i in enumerate(faces):
+
+ # Mapped index- faces may have not been added- if so, then map to the correct index
+ # BUGGY API - face_mapping is not always the right length
+# map_index = face_mapping[ii]
+
+ if 1:
+# if map_index != None:
+ targetFace = bmesh.faces[ii]
+# targetFace = bmesh.faces[map_index]
+
+ uf = bmesh.active_uv_texture.data[ii]
+
+ if contextMeshUV:
+ # v.index-1 because of the DUMMYVERT
+ uvs = [contextMeshUV[vindex] for vindex in myContextMesh_facels[i]]
+
+ if len(myContextMesh_facels[i]) == 3:
+ uf.uv1, uf.uv2, uf.uv3, uf.uv4 = uvs + [(0.0, 0.0)]
+ else:
+ uf.uv1, uf.uv2, uf.uv3, uf.uv4 = uvs
+# targetFace.uv = [contextMeshUV[vindex] for vindex in myContextMesh_facels[i]]
+ if img:
+ uf.image = img
+# targetFace.image = img
+
+ # bmesh.transform(contextMatrix)
+ ob = bpy.data.add_object("MESH", tempName)
+ ob.data = bmesh
+ SCN.add_object(ob)
+# ob = SCN_OBJECTS.new(bmesh, tempName)
+ '''
+ if contextMatrix_tx:
+ ob.setMatrix(contextMatrix_tx)
+ '''
+
+ if contextMatrix_rot:
+ # ob.matrix = [x for row in contextMatrix_rot for x in row]
+ ob.matrix = contextMatrix_rot
+# ob.setMatrix(contextMatrix_rot)
+
+ importedObjects.append(ob)
+ bmesh.update()
+# bmesh.calcNormals()
+
+ for matName, faces in myContextMeshMaterials.items():
+ makeMeshMaterialCopy(matName, faces)
+
+ if len(materialFaces) != len(myContextMesh_facels):
+ # Invert material faces.
+ makeMeshMaterialCopy(None, set(range(len( myContextMesh_facels ))) - materialFaces)
+ #raise 'Some UnMaterialed faces', len(contextMesh.faces)
+
+ #a spare chunk
+ new_chunk = chunk()
+ temp_chunk = chunk()
+
+ CreateBlenderObject = False
+
+ def read_float_color(temp_chunk):
+ temp_data = file.read(struct.calcsize('3f'))
+ temp_chunk.bytes_read += 12
+ return [float(col) for col in struct.unpack('<3f', temp_data)]
+
+ def read_byte_color(temp_chunk):
+ temp_data = file.read(struct.calcsize('3B'))
+ temp_chunk.bytes_read += 3
+ return [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+
+ def read_texture(new_chunk, temp_chunk, name, mapto):
+ new_texture = bpy.data.add_texture('Diffuse')
+ new_texture.type = 'IMAGE'
+
+ img = None
+ while (new_chunk.bytes_read < new_chunk.length):
+ #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
+ read_chunk(file, temp_chunk)
+
+ if (temp_chunk.ID == MAT_MAP_FILENAME):
+ texture_name = read_string(file)
+ img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+ new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
+
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ # add the map to the material in the right channel
+ if img:
+ add_texture_to_material(img, new_texture, contextMaterial, mapto)
+
+ dirname = os.path.dirname(FILENAME)
+
+ #loop through all the data for this chunk (previous chunk) and see what it is
+ while (previous_chunk.bytes_read < previous_chunk.length):
+ #print '\t', previous_chunk.bytes_read, 'keep going'
+ #read the next chunk
+ #print 'reading a chunk'
+ read_chunk(file, new_chunk)
+
+ #is it a Version chunk?
+ if (new_chunk.ID == VERSION):
+ #print 'if (new_chunk.ID == VERSION):'
+ #print 'found a VERSION chunk'
+ #read in the version of the file
+ #it's an unsigned short (H)
+ temp_data = file.read(struct.calcsize('I'))
+ version = struct.unpack('<I', temp_data)[0]
+ new_chunk.bytes_read += 4 #read the 4 bytes for the version number
+ #this loader works with version 3 and below, but may not with 4 and above
+ if (version > 3):
+ print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version)
+
+ #is it an object info chunk?
+ elif (new_chunk.ID == OBJECTINFO):
+ #print 'elif (new_chunk.ID == OBJECTINFO):'
+ # print 'found an OBJECTINFO chunk'
+ process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
+
+ #keep track of how much we read in the main chunk
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ #is it an object chunk?
+ elif (new_chunk.ID == OBJECT):
+
+ if CreateBlenderObject:
+ putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+ contextMesh_vertls = []; contextMesh_facels = []
+
+ ## preparando para receber o proximo objeto
+ contextMeshMaterials = {} # matname:[face_idxs]
+ contextMeshUV = None
+ #contextMesh.vertexUV = 1 # Make sticky coords.
+ # Reset matrix
+ contextMatrix_rot = None
+ #contextMatrix_tx = None
+
+ CreateBlenderObject = True
+ tempName = read_string(file)
+ contextObName = tempName
+ new_chunk.bytes_read += len(tempName)+1
+
+ #is it a material chunk?
+ elif (new_chunk.ID == MATERIAL):
+
+# print("read material")
+
+ #print 'elif (new_chunk.ID == MATERIAL):'
+ contextMaterial = bpy.data.add_material('Material')
+# contextMaterial = bpy.data.materials.new('Material')
+
+ elif (new_chunk.ID == MAT_NAME):
+ #print 'elif (new_chunk.ID == MAT_NAME):'
+ material_name = read_string(file)
+
+# print("material name", material_name)
+
+ #plus one for the null character that ended the string
+ new_chunk.bytes_read += len(material_name)+1
+
+ contextMaterial.name = material_name.rstrip() # remove trailing whitespace
+ MATDICT[material_name]= (contextMaterial.name, contextMaterial)
+
+ elif (new_chunk.ID == MAT_AMBIENT):
+ #print 'elif (new_chunk.ID == MAT_AMBIENT):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.mirror_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.mirror_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_DIFFUSE):
+ #print 'elif (new_chunk.ID == MAT_DIFFUSE):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.diffuse_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.diffuse_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+
+# print("read material diffuse color", contextMaterial.diffuse_color)
+
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_SPECULAR):
+ #print 'elif (new_chunk.ID == MAT_SPECULAR):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.specular_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.specular_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_TEXTURE_MAP):
+ read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
+# #print 'elif (new_chunk.ID==MAT_TEXTURE_MAP):'
+# new_texture= bpy.data.textures.new('Diffuse')
+# new_texture.setType('Image')
+# img = None
+# while (new_chunk.bytes_read<new_chunk.length):
+# #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
+# read_chunk(file, temp_chunk)
+
+# if (temp_chunk.ID==MAT_MAP_FILENAME):
+# texture_name=read_string(file)
+# #img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
+# img= TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
+# new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
+
+# else:
+# skip_to_end(file, temp_chunk)
+
+# new_chunk.bytes_read+= temp_chunk.bytes_read
+
+# #add the map to the material in the right channel
+# if img:
+# add_texture_to_material(img, new_texture, contextMaterial, 'DIFFUSE')
+
+ elif (new_chunk.ID == MAT_SPECULAR_MAP):
+ read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
+# #print 'elif (new_chunk.ID == MAT_SPECULAR_MAP):'
+# new_texture = bpy.data.textures.new('Specular')
+# new_texture.setType('Image')
+# img = None
+# while (new_chunk.bytes_read < new_chunk.length):
+# read_chunk(file, temp_chunk)
+
+# if (temp_chunk.ID == MAT_MAP_FILENAME):
+# texture_name = read_string(file)
+# #img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
+# img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+# new_chunk.bytes_read+= (len(texture_name)+1) #plus one for the null character that gets removed
+# else:
+# skip_to_end(file, temp_chunk)
+
+# new_chunk.bytes_read += temp_chunk.bytes_read
+
+# #add the map to the material in the right channel
+# if img:
+# add_texture_to_material(img, new_texture, contextMaterial, 'SPECULAR')
+
+ elif (new_chunk.ID == MAT_OPACITY_MAP):
+ read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
+# #print 'new_texture = Blender.Texture.New('Opacity')'
+# new_texture = bpy.data.textures.new('Opacity')
+# new_texture.setType('Image')
+# img = None
+# while (new_chunk.bytes_read < new_chunk.length):
+# read_chunk(file, temp_chunk)
+
+# if (temp_chunk.ID == MAT_MAP_FILENAME):
+# texture_name = read_string(file)
+# #img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
+# img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+# new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
+# else:
+# skip_to_end(file, temp_chunk)
+
+# new_chunk.bytes_read += temp_chunk.bytes_read
+# #add the map to the material in the right channel
+# if img:
+# add_texture_to_material(img, new_texture, contextMaterial, 'OPACITY')
+
+ elif (new_chunk.ID == MAT_BUMP_MAP):
+ read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
+# #print 'elif (new_chunk.ID == MAT_BUMP_MAP):'
+# new_texture = bpy.data.textures.new('Bump')
+# new_texture.setType('Image')
+# img = None
+# while (new_chunk.bytes_read < new_chunk.length):
+# read_chunk(file, temp_chunk)
+
+# if (temp_chunk.ID == MAT_MAP_FILENAME):
+# texture_name = read_string(file)
+# #img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
+# img = BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+# new_chunk.bytes_read += (len(texture_name)+1) #plus one for the null character that gets removed
+# else:
+# skip_to_end(file, temp_chunk)
+
+# new_chunk.bytes_read += temp_chunk.bytes_read
+
+# #add the map to the material in the right channel
+# if img:
+# add_texture_to_material(img, new_texture, contextMaterial, 'BUMP')
+
+ elif (new_chunk.ID == MAT_TRANSPARENCY):
+ #print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
+ read_chunk(file, temp_chunk)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+
+ temp_chunk.bytes_read += 2
+ contextMaterial.alpha = 1-(float(struct.unpack('<H', temp_data)[0])/100)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+
+ elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
+
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+
+ x,y,z = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+
+ ob = bpy.data.add_object("LAMP", "Lamp")
+ ob.data = bpy.data.add_lamp("Lamp")
+ SCN.add_object(ob)
+
+ contextLamp[1]= ob.data
+# contextLamp[1]= bpy.data.lamps.new()
+ contextLamp[0]= ob
+# contextLamp[0]= SCN_OBJECTS.new(contextLamp[1])
+ importedObjects.append(contextLamp[0])
+
+ #print 'number of faces: ', num_faces
+ #print x,y,z
+ contextLamp[0].location = (x, y, z)
+# contextLamp[0].setLocation(x,y,z)
+
+ # Reset matrix
+ contextMatrix_rot = None
+ #contextMatrix_tx = None
+ #print contextLamp.name,
+
+ elif (new_chunk.ID == OBJECT_MESH):
+ # print 'Found an OBJECT_MESH chunk'
+ pass
+ elif (new_chunk.ID == OBJECT_VERTICES):
+ '''
+ Worldspace vertex locations
+ '''
+ # print 'elif (new_chunk.ID == OBJECT_VERTICES):'
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_verts = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ # print 'number of verts: ', num_verts
+ def getvert():
+ temp_data = struct.unpack('<3f', file.read(STRUCT_SIZE_3FLOAT))
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT #12: 3 floats x 4 bytes each
+ return temp_data
+
+ #contextMesh.verts.extend( [Vector(),] ) # DUMMYVERT! - remove when blenders internals are fixed.
+ contextMesh_vertls = [getvert() for i in range(num_verts)]
+
+ #print 'object verts: bytes read: ', new_chunk.bytes_read
+
+ elif (new_chunk.ID == OBJECT_FACES):
+ # print 'elif (new_chunk.ID == OBJECT_FACES):'
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_faces = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+ #print 'number of faces: ', num_faces
+
+ def getface():
+ # print '\ngetting a face'
+ temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT #4 short ints x 2 bytes each
+ v1,v2,v3,dummy = struct.unpack('<4H', temp_data)
+ return v1, v2, v3
+
+ contextMesh_facels = [ getface() for i in range(num_faces) ]
+
+
+ elif (new_chunk.ID == OBJECT_MATERIAL):
+ # print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
+ material_name = read_string(file)
+ new_chunk.bytes_read += len(material_name)+1 # remove 1 null character.
+
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_faces_using_mat = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+
+ def getmat():
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ return struct.unpack('<H', temp_data)[0]
+
+ contextMeshMaterials[material_name]= [ getmat() for i in range(num_faces_using_mat) ]
+
+ #look up the material in all the materials
+
+ elif (new_chunk.ID == OBJECT_UV):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_uv = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ def getuv():
+ temp_data = file.read(STRUCT_SIZE_2FLOAT)
+ new_chunk.bytes_read += STRUCT_SIZE_2FLOAT #2 float x 4 bytes each
+ return Mathutils.Vector( struct.unpack('<2f', temp_data) )
+# return Vector( struct.unpack('<2f', temp_data) )
+
+ contextMeshUV = [ getuv() for i in range(num_uv) ]
+
+ elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
+ # How do we know the matrix size? 54 == 4x4 48 == 4x3
+ temp_data = file.read(STRUCT_SIZE_4x3MAT)
+ data = list( struct.unpack('<ffffffffffff', temp_data) )
+ new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
+
+ contextMatrix_rot = Mathutils.Matrix(\
+# contextMatrix_rot = Blender.Mathutils.Matrix(\
+ data[:3] + [0],\
+ data[3:6] + [0],\
+ data[6:9] + [0],\
+ data[9:] + [1])
+
+
+ '''
+ contextMatrix_rot = Blender.Mathutils.Matrix(\
+ data[:3] + [0],\
+ data[3:6] + [0],\
+ data[6:9] + [0],\
+ [0,0,0,1])
+ '''
+
+ '''
+ contextMatrix_rot = Blender.Mathutils.Matrix(\
+ data[:3] ,\
+ data[3:6],\
+ data[6:9])
+ '''
+
+ '''
+ contextMatrix_rot = Blender.Mathutils.Matrix()
+ m = 0
+ for j in xrange(4):
+ for i in xrange(3):
+ contextMatrix_rot[j][i] = data[m]
+ m += 1
+
+ contextMatrix_rot[0][3]=0;
+ contextMatrix_rot[1][3]=0;
+ contextMatrix_rot[2][3]=0;
+ contextMatrix_rot[3][3]=1;
+ '''
+
+ #contextMatrix_rot.resize4x4()
+ #print "MTX"
+ #print contextMatrix_rot
+ contextMatrix_rot.invert()
+ #print contextMatrix_rot
+ #contextMatrix_tx = Blender.Mathutils.TranslationMatrix(0.5 * Blender.Mathutils.Vector(data[9:]))
+ #contextMatrix_tx.invert()
+
+ #tx.invert()
+
+ #contextMatrix = contextMatrix * tx
+ #contextMatrix = contextMatrix *tx
+
+ elif (new_chunk.ID == MAT_MAP_FILENAME):
+ texture_name = read_string(file)
+ try:
+ TEXTURE_DICT[contextMaterial.name]
+ except:
+ #img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME)
+ img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+# img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILENAME, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+
+ new_chunk.bytes_read += len(texture_name)+1 #plus one for the null character that gets removed
+
+ else: #(new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
+ # print 'skipping to end of this chunk'
+ buffer_size = new_chunk.length - new_chunk.bytes_read
+ binary_format='%ic' % buffer_size
+ temp_data = file.read(struct.calcsize(binary_format))
+ new_chunk.bytes_read += buffer_size
+
+
+ #update the previous chunk bytes read
+ # print 'previous_chunk.bytes_read += new_chunk.bytes_read'
+ # print previous_chunk.bytes_read, new_chunk.bytes_read
+ previous_chunk.bytes_read += new_chunk.bytes_read
+ ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
+
+ # FINISHED LOOP
+ # There will be a number of objects still not added
+ if contextMesh_facels != None:
+ putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+
+def load_3ds(filename, context, IMPORT_CONSTRAIN_BOUNDS=10.0, IMAGE_SEARCH=True, APPLY_MATRIX=False):
+ global FILENAME, SCN
+# global FILENAME, SCN_OBJECTS
+
+ # XXX
+# if BPyMessages.Error_NoFile(filename):
+# return
+
+ print('\n\nImporting 3DS: "%s"' % (filename))
+# print('\n\nImporting 3DS: "%s"' % (Blender.sys.expandpath(filename)))
+
+ time1 = time.clock()
+# time1 = Blender.sys.time()
+
+ FILENAME = filename
+ current_chunk = chunk()
+
+ file = open(filename,'rb')
+
+ #here we go!
+ # print 'reading the first chunk'
+ read_chunk(file, current_chunk)
+ if (current_chunk.ID!=PRIMARY):
+ print('\tFatal Error: Not a valid 3ds file: ', filename)
+ file.close()
+ return
+
+
+ # IMPORT_AS_INSTANCE = Blender.Draw.Create(0)
+# IMPORT_CONSTRAIN_BOUNDS = Blender.Draw.Create(10.0)
+# IMAGE_SEARCH = Blender.Draw.Create(1)
+# APPLY_MATRIX = Blender.Draw.Create(0)
+
+ # Get USER Options
+# pup_block = [\
+# ('Size Constraint:', IMPORT_CONSTRAIN_BOUNDS, 0.0, 1000.0, 'Scale the model by 10 until it reacehs the size constraint. Zero Disables.'),\
+# ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
+# ('Transform Fix', APPLY_MATRIX, 'Workaround for object transformations importing incorrectly'),\
+# #('Group Instance', IMPORT_AS_INSTANCE, 'Import objects into a new scene and group, creating an instance in the current scene.'),\
+# ]
+
+# if PREF_UI:
+# if not Blender.Draw.PupBlock('Import 3DS...', pup_block):
+# return
+
+# Blender.Window.WaitCursor(1)
+
+# IMPORT_CONSTRAIN_BOUNDS = IMPORT_CONSTRAIN_BOUNDS.val
+# # IMPORT_AS_INSTANCE = IMPORT_AS_INSTANCE.val
+# IMAGE_SEARCH = IMAGE_SEARCH.val
+# APPLY_MATRIX = APPLY_MATRIX.val
+
+ if IMPORT_CONSTRAIN_BOUNDS:
+ BOUNDS_3DS[:]= [1<<30, 1<<30, 1<<30, -1<<30, -1<<30, -1<<30]
+ else:
+ BOUNDS_3DS[:]= []
+
+ ##IMAGE_SEARCH
+
+ scn = context.scene
+# scn = bpy.data.scenes.active
+ SCN = scn
+# SCN_OBJECTS = scn.objects
+# SCN_OBJECTS.selected = [] # de select all
+
+ importedObjects = [] # Fill this list with objects
+ process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
+
+
+ # Link the objects into this scene.
+ # Layers = scn.Layers
+
+ # REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
+
+
+# for ob in importedObjects:
+# if ob.type == 'MESH':
+# # if ob.type=='Mesh':
+# me = ob.getData(mesh=1)
+# me.verts.delete([me.verts[0],])
+# if not APPLY_MATRIX:
+# me.transform(ob.matrixWorld.copy().invert())
+
+ # Done DUMMYVERT
+ """
+ if IMPORT_AS_INSTANCE:
+ name = filename.split('\\')[-1].split('/')[-1]
+ # Create a group for this import.
+ group_scn = Scene.New(name)
+ for ob in importedObjects:
+ group_scn.link(ob) # dont worry about the layers
+
+ grp = Blender.Group.New(name)
+ grp.objects = importedObjects
+
+ grp_ob = Object.New('Empty', name)
+ grp_ob.enableDupGroup = True
+ grp_ob.DupGroup = grp
+ scn.link(grp_ob)
+ grp_ob.Layers = Layers
+ grp_ob.sel = 1
+ else:
+ # Select all imported objects.
+ for ob in importedObjects:
+ scn.link(ob)
+ ob.Layers = Layers
+ ob.sel = 1
+ """
+
+ if 0:
+# if IMPORT_CONSTRAIN_BOUNDS!=0.0:
+ # Set bounds from objecyt bounding box
+ for ob in importedObjects:
+ if ob.type == 'MESH':
+# if ob.type=='Mesh':
+ ob.makeDisplayList() # Why dosnt this update the bounds?
+ for v in ob.getBoundBox():
+ for i in (0,1,2):
+ if v[i] < BOUNDS_3DS[i]:
+ BOUNDS_3DS[i]= v[i] # min
+
+ if v[i] > BOUNDS_3DS[i + 3]:
+ BOUNDS_3DS[i + 3]= v[i] # min
+
+ # Get the max axis x/y/z
+ max_axis = max(BOUNDS_3DS[3]-BOUNDS_3DS[0], BOUNDS_3DS[4]-BOUNDS_3DS[1], BOUNDS_3DS[5]-BOUNDS_3DS[2])
+ # print max_axis
+ if max_axis < 1 << 30: # Should never be false but just make sure.
+
+ # Get a new scale factor if set as an option
+ SCALE = 1.0
+ while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
+ SCALE/=10
+
+ # SCALE Matrix
+ SCALE_MAT = Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1])
+# SCALE_MAT = Blender.Mathutils.Matrix([SCALE,0,0,0],[0,SCALE,0,0],[0,0,SCALE,0],[0,0,0,1])
+
+ for ob in importedObjects:
+ ob.setMatrix(ob.matrixWorld * SCALE_MAT)
+
+ # Done constraining to bounds.
+
+ # Select all new objects.
+ print('finished importing: "%s" in %.4f sec.' % (filename, (time.clock()-time1)))
+# print('finished importing: "%s" in %.4f sec.' % (filename, (Blender.sys.time()-time1)))
+ file.close()
+# Blender.Window.WaitCursor(0)
+
+
+DEBUG = False
+# if __name__=='__main__' and not DEBUG:
+# if calcsize == None:
+# Blender.Draw.PupMenu('Error%t|a full python installation not found')
+# else:
+# Blender.Window.FileSelector(load_3ds, 'Import 3DS', '*.3ds')
+
+# For testing compatibility
+#load_3ds('/metavr/convert/vehicle/truck_002/TruckTanker1.3DS', False)
+#load_3ds('/metavr/archive/convert/old/arranged_3ds_to_hpx-2/only-need-engine-trains/Engine2.3DS', False)
+'''
+
+else:
+ import os
+ # DEBUG ONLY
+ TIME = Blender.sys.time()
+ import os
+ print 'Searching for files'
+ os.system('find /metavr/ -iname "*.3ds" > /tmp/temp3ds_list')
+ # os.system('find /storage/ -iname "*.3ds" > /tmp/temp3ds_list')
+ print '...Done'
+ file = open('/tmp/temp3ds_list', 'r')
+ lines = file.readlines()
+ file.close()
+ # sort by filesize for faster testing
+ lines_size = [(os.path.getsize(f[:-1]), f[:-1]) for f in lines]
+ lines_size.sort()
+ lines = [f[1] for f in lines_size]
+
+
+ def between(v,a,b):
+ if v <= max(a,b) and v >= min(a,b):
+ return True
+ return False
+
+ for i, _3ds in enumerate(lines):
+ if between(i, 650,800):
+ #_3ds= _3ds[:-1]
+ print 'Importing', _3ds, '\nNUMBER', i, 'of', len(lines)
+ _3ds_file= _3ds.split('/')[-1].split('\\')[-1]
+ newScn = Blender.Scene.New(_3ds_file)
+ newScn.makeCurrent()
+ load_3ds(_3ds, False)
+
+ print 'TOTAL TIME: %.6f' % (Blender.sys.time() - TIME)
+
+'''
+
+class IMPORT_OT_3ds(bpy.types.Operator):
+ '''
+ 3DS Importer
+ '''
+ __idname__ = "import.3ds"
+ __label__ = 'Import 3DS'
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for importing the 3DS file", maxlen= 1024, default= ""),
+
+# bpy.props.FloatProperty(attr="size_constraint", name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0),
+# bpy.props.BoolProperty(attr="search_images", name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True),
+# bpy.props.BoolProperty(attr="apply_matrix", name="Transform Fix", description="Workaround for object transformations importing incorrectly", default=False),
+ ]
+
+ def execute(self, context):
+ load_3ds(self.path, context, 0.0, False, False)
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+ '''
+ def poll(self, context):
+ print("Poll")
+ return context.active_object != None'''
+
+bpy.ops.add(IMPORT_OT_3ds)
+
+# NOTES:
+# why add 1 extra vertex? and remove it when done?
+# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)
diff --git a/release/scripts/io/import_obj.py b/release/scripts/io/import_obj.py
new file mode 100644
index 00000000000..a762005ae7d
--- /dev/null
+++ b/release/scripts/io/import_obj.py
@@ -0,0 +1,1638 @@
+#!BPY
+
+"""
+Name: 'Wavefront (.obj)...'
+Blender: 249
+Group: 'Import'
+Tooltip: 'Load a Wavefront OBJ File, Shift: batch import all dir.'
+"""
+
+__author__= "Campbell Barton", "Jiri Hnidek", "Paolo Ciccone"
+__url__= ['http://wiki.blender.org/index.php/Scripts/Manual/Import/wavefront_obj', 'blender.org', 'blenderartists.org']
+__version__= "2.11"
+
+__bpydoc__= """\
+This script imports a Wavefront OBJ files to Blender.
+
+Usage:
+Run this script from "File->Import" menu and then load the desired OBJ file.
+Note, This loads mesh objects and materials only, nurbs and curves are not supported.
+"""
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# Script copyright (C) Campbell J Barton 2007
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+# --------------------------------------------------------------------------
+
+import os
+import time
+import bpy
+import Mathutils
+import Geometry
+
+# from Blender import Mesh, Draw, Window, Texture, Material, sys
+# # import BPyMesh
+# import BPyImage
+# import BPyMessages
+
+# try: import os
+# except: os= False
+
+# Generic path functions
+def stripFile(path):
+ '''Return directory, where the file is'''
+ lastSlash= max(path.rfind('\\'), path.rfind('/'))
+ if lastSlash != -1:
+ path= path[:lastSlash]
+ return '%s%s' % (path, os.sep)
+# return '%s%s' % (path, sys.sep)
+
+def stripPath(path):
+ '''Strips the slashes from the back of a string'''
+ return path.split('/')[-1].split('\\')[-1]
+
+def stripExt(name): # name is a string
+ '''Strips the prefix off the name before writing'''
+ index= name.rfind('.')
+ if index != -1:
+ return name[ : index ]
+ else:
+ return name
+# end path funcs
+
+def unpack_list(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ l.extend(t)
+ return l
+
+# same as above except that it adds 0 for triangle faces
+def unpack_face_list(list_of_tuples):
+ l = []
+ for t in list_of_tuples:
+ face = [i for i in t]
+
+ if len(face) != 3 and len(face) != 4:
+ raise RuntimeError("{0} vertices in face.".format(len(face)))
+
+ # rotate indices if the 4th is 0
+ if len(face) == 4 and face[3] == 0:
+ face = [face[3], face[0], face[1], face[2]]
+
+ if len(face) == 3:
+ face.append(0)
+
+ l.extend(face)
+
+ return l
+
+def BPyMesh_ngon(from_data, indices, PREF_FIX_LOOPS= True):
+ '''
+ Takes a polyline of indices (fgon)
+ and returns a list of face indicie lists.
+ Designed to be used for importers that need indices for an fgon to create from existing verts.
+
+ from_data: either a mesh, or a list/tuple of vectors.
+ indices: a list of indicies to use this list is the ordered closed polyline to fill, and can be a subset of the data given.
+ PREF_FIX_LOOPS: If this is enabled polylines that use loops to make multiple polylines are delt with correctly.
+ '''
+
+ if not set: # Need sets for this, otherwise do a normal fill.
+ PREF_FIX_LOOPS= False
+
+ Vector= Mathutils.Vector
+ if not indices:
+ return []
+
+ # return []
+ def rvec(co): return round(co.x, 6), round(co.y, 6), round(co.z, 6)
+ def mlen(co): return abs(co[0])+abs(co[1])+abs(co[2]) # manhatten length of a vector, faster then length
+
+ def vert_treplet(v, i):
+ return v, rvec(v), i, mlen(v)
+
+ def ed_key_mlen(v1, v2):
+ if v1[3] > v2[3]:
+ return v2[1], v1[1]
+ else:
+ return v1[1], v2[1]
+
+
+ if not PREF_FIX_LOOPS:
+ '''
+ Normal single concave loop filling
+ '''
+ if type(from_data) in (tuple, list):
+ verts= [Vector(from_data[i]) for ii, i in enumerate(indices)]
+ else:
+ verts= [from_data.verts[i].co for ii, i in enumerate(indices)]
+
+ for i in range(len(verts)-1, 0, -1): # same as reversed(xrange(1, len(verts))):
+ if verts[i][1]==verts[i-1][0]:
+ verts.pop(i-1)
+
+ fill= Geometry.PolyFill([verts])
+
+ else:
+ '''
+ Seperate this loop into multiple loops be finding edges that are used twice
+ This is used by lightwave LWO files a lot
+ '''
+
+ if type(from_data) in (tuple, list):
+ verts= [vert_treplet(Vector(from_data[i]), ii) for ii, i in enumerate(indices)]
+ else:
+ verts= [vert_treplet(from_data.verts[i].co, ii) for ii, i in enumerate(indices)]
+
+ edges= [(i, i-1) for i in range(len(verts))]
+ if edges:
+ edges[0]= (0,len(verts)-1)
+
+ if not verts:
+ return []
+
+
+ edges_used= set()
+ edges_doubles= set()
+ # We need to check if any edges are used twice location based.
+ for ed in edges:
+ edkey= ed_key_mlen(verts[ed[0]], verts[ed[1]])
+ if edkey in edges_used:
+ edges_doubles.add(edkey)
+ else:
+ edges_used.add(edkey)
+
+ # Store a list of unconnected loop segments split by double edges.
+ # will join later
+ loop_segments= []
+
+ v_prev= verts[0]
+ context_loop= [v_prev]
+ loop_segments= [context_loop]
+
+ for v in verts:
+ if v!=v_prev:
+ # Are we crossing an edge we removed?
+ if ed_key_mlen(v, v_prev) in edges_doubles:
+ context_loop= [v]
+ loop_segments.append(context_loop)
+ else:
+ if context_loop and context_loop[-1][1]==v[1]:
+ #raise "as"
+ pass
+ else:
+ context_loop.append(v)
+
+ v_prev= v
+ # Now join loop segments
+
+ def join_seg(s1,s2):
+ if s2[-1][1]==s1[0][1]: #
+ s1,s2= s2,s1
+ elif s1[-1][1]==s2[0][1]:
+ pass
+ else:
+ return False
+
+ # If were stuill here s1 and s2 are 2 segments in the same polyline
+ s1.pop() # remove the last vert from s1
+ s1.extend(s2) # add segment 2 to segment 1
+
+ if s1[0][1]==s1[-1][1]: # remove endpoints double
+ s1.pop()
+
+ s2[:]= [] # Empty this segment s2 so we dont use it again.
+ return True
+
+ joining_segments= True
+ while joining_segments:
+ joining_segments= False
+ segcount= len(loop_segments)
+
+ for j in range(segcount-1, -1, -1): #reversed(range(segcount)):
+ seg_j= loop_segments[j]
+ if seg_j:
+ for k in range(j-1, -1, -1): # reversed(range(j)):
+ if not seg_j:
+ break
+ seg_k= loop_segments[k]
+
+ if seg_k and join_seg(seg_j, seg_k):
+ joining_segments= True
+
+ loop_list= loop_segments
+
+ for verts in loop_list:
+ while verts and verts[0][1]==verts[-1][1]:
+ verts.pop()
+
+ loop_list= [verts for verts in loop_list if len(verts)>2]
+ # DONE DEALING WITH LOOP FIXING
+
+
+ # vert mapping
+ vert_map= [None]*len(indices)
+ ii=0
+ for verts in loop_list:
+ if len(verts)>2:
+ for i, vert in enumerate(verts):
+ vert_map[i+ii]= vert[2]
+ ii+=len(verts)
+
+ fill= Geometry.PolyFill([ [v[0] for v in loop] for loop in loop_list ])
+ #draw_loops(loop_list)
+ #raise 'done loop'
+ # map to original indicies
+ fill= [[vert_map[i] for i in reversed(f)] for f in fill]
+
+
+ if not fill:
+ print('Warning Cannot scanfill, fallback on a triangle fan.')
+ fill= [ [0, i-1, i] for i in range(2, len(indices)) ]
+ else:
+ # Use real scanfill.
+ # See if its flipped the wrong way.
+ flip= None
+ for fi in fill:
+ if flip != None:
+ break
+ for i, vi in enumerate(fi):
+ if vi==0 and fi[i-1]==1:
+ flip= False
+ break
+ elif vi==1 and fi[i-1]==0:
+ flip= True
+ break
+
+ if not flip:
+ for i, fi in enumerate(fill):
+ fill[i]= tuple([ii for ii in reversed(fi)])
+
+ return fill
+
+def line_value(line_split):
+ '''
+ Returns 1 string represneting the value for this line
+ None will be returned if theres only 1 word
+ '''
+ length= len(line_split)
+ if length == 1:
+ return None
+
+ elif length == 2:
+ return line_split[1]
+
+ elif length > 2:
+ return ' '.join( line_split[1:] )
+
+# limited replacement for BPyImage.comprehensiveImageLoad
+def load_image(imagepath, dirname):
+
+ if os.path.exists(imagepath):
+ return bpy.data.add_image(imagepath)
+
+ variants = [os.path.join(dirname, imagepath), os.path.join(dirname, os.path.basename(imagepath))]
+
+ for path in variants:
+ if os.path.exists(path):
+ return bpy.data.add_image(path)
+ else:
+ print(path, "doesn't exist")
+
+ # TODO comprehensiveImageLoad also searched in bpy.config.textureDir
+ return None
+
+def obj_image_load(imagepath, DIR, IMAGE_SEARCH):
+
+ if '_' in imagepath:
+ image= load_image(imagepath.replace('_', ' '), DIR)
+ if image: return image
+
+ return load_image(imagepath, DIR)
+
+# def obj_image_load(imagepath, DIR, IMAGE_SEARCH):
+# '''
+# Mainly uses comprehensiveImageLoad
+# but tries to replace '_' with ' ' for Max's exporter replaces spaces with underscores.
+# '''
+
+# if '_' in imagepath:
+# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
+# if image: return image
+# # Did the exporter rename the image?
+# image= BPyImage.comprehensiveImageLoad(imagepath.replace('_', ' '), DIR, PLACE_HOLDER= False, RECURSIVE= IMAGE_SEARCH)
+# if image: return image
+
+# # Return an image, placeholder if it dosnt exist
+# image= BPyImage.comprehensiveImageLoad(imagepath, DIR, PLACE_HOLDER= True, RECURSIVE= IMAGE_SEARCH)
+# return image
+
+
+def create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH):
+ '''
+ Create all the used materials in this obj,
+ assign colors and images to the materials from all referenced material libs
+ '''
+ DIR= stripFile(filepath)
+
+ #==================================================================================#
+ # This function sets textures defined in .mtl file #
+ #==================================================================================#
+ def load_material_image(blender_material, context_material_name, imagepath, type):
+
+ texture= bpy.data.add_texture(type)
+ texture.type= 'IMAGE'
+# texture= bpy.data.textures.new(type)
+# texture.setType('Image')
+
+ # Absolute path - c:\.. etc would work here
+ image= obj_image_load(imagepath, DIR, IMAGE_SEARCH)
+ has_data = image.has_data if image else False
+
+ if image:
+ texture.image = image
+
+ # Adds textures for materials (rendering)
+ if type == 'Kd':
+ if has_data and image.depth == 32:
+ # Image has alpha
+
+ # XXX bitmask won't work?
+ blender_material.add_texture(texture, "UV", ("COLOR", "ALPHA"))
+ texture.mipmap = True
+ texture.interpolation = True
+ texture.use_alpha = True
+ blender_material.z_transparency = True
+ blender_material.alpha = 0.0
+
+# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL | Texture.MapTo.ALPHA)
+# texture.setImageFlags('MipMap', 'InterPol', 'UseAlpha')
+# blender_material.mode |= Material.Modes.ZTRANSP
+# blender_material.alpha = 0.0
+ else:
+ blender_material.add_texture(texture, "UV", "COLOR")
+# blender_material.setTexture(0, texture, Texture.TexCo.UV, Texture.MapTo.COL)
+
+ # adds textures to faces (Textured/Alt-Z mode)
+ # Only apply the diffuse texture to the face if the image has not been set with the inline usemat func.
+ unique_material_images[context_material_name]= image, has_data # set the texface image
+
+ elif type == 'Ka':
+ blender_material.add_texture(texture, "UV", "AMBIENT")
+# blender_material.setTexture(1, texture, Texture.TexCo.UV, Texture.MapTo.CMIR) # TODO- Add AMB to BPY API
+
+ elif type == 'Ks':
+ blender_material.add_texture(texture, "UV", "SPECULARITY")
+# blender_material.setTexture(2, texture, Texture.TexCo.UV, Texture.MapTo.SPEC)
+
+ elif type == 'Bump':
+ blender_material.add_texture(texture, "UV", "NORMAL")
+# blender_material.setTexture(3, texture, Texture.TexCo.UV, Texture.MapTo.NOR)
+ elif type == 'D':
+ blender_material.add_texture(texture, "UV", "ALPHA")
+ blender_material.z_transparency = True
+ blender_material.alpha = 0.0
+# blender_material.setTexture(4, texture, Texture.TexCo.UV, Texture.MapTo.ALPHA)
+# blender_material.mode |= Material.Modes.ZTRANSP
+# blender_material.alpha = 0.0
+ # Todo, unset deffuse material alpha if it has an alpha channel
+
+ elif type == 'refl':
+ blender_material.add_texture(texture, "UV", "REFLECTION")
+# blender_material.setTexture(5, texture, Texture.TexCo.UV, Texture.MapTo.REF)
+
+
+ # Add an MTL with the same name as the obj if no MTLs are spesified.
+ temp_mtl= stripExt(stripPath(filepath))+ '.mtl'
+
+ if os.path.exists(DIR + temp_mtl) and temp_mtl not in material_libs:
+# if sys.exists(DIR + temp_mtl) and temp_mtl not in material_libs:
+ material_libs.append( temp_mtl )
+ del temp_mtl
+
+ #Create new materials
+ for name in unique_materials: # .keys()
+ if name != None:
+ unique_materials[name]= bpy.data.add_material(name)
+# unique_materials[name]= bpy.data.materials.new(name)
+ unique_material_images[name]= None, False # assign None to all material images to start with, add to later.
+
+ unique_materials[None]= None
+ unique_material_images[None]= None, False
+
+ for libname in material_libs:
+ mtlpath= DIR + libname
+ if not os.path.exists(mtlpath):
+# if not sys.exists(mtlpath):
+ #print '\tError Missing MTL: "%s"' % mtlpath
+ pass
+ else:
+ #print '\t\tloading mtl: "%s"' % mtlpath
+ context_material= None
+ mtl= open(mtlpath, 'rU')
+ for line in mtl: #.xreadlines():
+ if line.startswith('newmtl'):
+ context_material_name= line_value(line.split())
+ if context_material_name in unique_materials:
+ context_material = unique_materials[ context_material_name ]
+ else:
+ context_material = None
+
+ elif context_material:
+ # we need to make a material to assign properties to it.
+ line_split= line.split()
+ line_lower= line.lower().lstrip()
+ if line_lower.startswith('ka'):
+ context_material.mirror_color = (float(line_split[1]), float(line_split[2]), float(line_split[3]))
+# context_material.setMirCol((float(line_split[1]), float(line_split[2]), float(line_split[3])))
+ elif line_lower.startswith('kd'):
+ context_material.diffuse_color = (float(line_split[1]), float(line_split[2]), float(line_split[3]))
+# context_material.setRGBCol((float(line_split[1]), float(line_split[2]), float(line_split[3])))
+ elif line_lower.startswith('ks'):
+ context_material.specular_color = (float(line_split[1]), float(line_split[2]), float(line_split[3]))
+# context_material.setSpecCol((float(line_split[1]), float(line_split[2]), float(line_split[3])))
+ elif line_lower.startswith('ns'):
+ context_material.specular_hardness = int((float(line_split[1])*0.51))
+# context_material.setHardness( int((float(line_split[1])*0.51)) )
+ elif line_lower.startswith('ni'): # Refraction index
+ context_material.ior = max(1, min(float(line_split[1]), 3))
+# context_material.setIOR( max(1, min(float(line_split[1]), 3))) # Between 1 and 3
+ elif line_lower.startswith('d') or line_lower.startswith('tr'):
+ context_material.alpha = float(line_split[1])
+# context_material.setAlpha(float(line_split[1]))
+ elif line_lower.startswith('map_ka'):
+ img_filepath= line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Ka')
+ elif line_lower.startswith('map_ks'):
+ img_filepath= line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Ks')
+ elif line_lower.startswith('map_kd'):
+ img_filepath= line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Kd')
+ elif line_lower.startswith('map_bump'):
+ img_filepath= line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'Bump')
+ elif line_lower.startswith('map_d') or line_lower.startswith('map_tr'): # Alpha map - Dissolve
+ img_filepath= line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'D')
+
+ elif line_lower.startswith('refl'): # Reflectionmap
+ img_filepath= line_value(line.split())
+ if img_filepath:
+ load_material_image(context_material, context_material_name, img_filepath, 'refl')
+ mtl.close()
+
+
+
+
+def split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS):
+ '''
+ Takes vert_loc and faces, and seperates into multiple sets of
+ (verts_loc, faces, unique_materials, dataname)
+ This is done so objects do not overload the 16 material limit.
+ '''
+
+ filename = stripExt(stripPath(filepath))
+
+ if not SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS:
+ # use the filename for the object name since we arnt chopping up the mesh.
+ return [(verts_loc, faces, unique_materials, filename)]
+
+
+ def key_to_name(key):
+ # if the key is a tuple, join it to make a string
+ if type(key) == tuple:
+ return '%s_%s' % key
+ elif not key:
+ return filename # assume its a string. make sure this is true if the splitting code is changed
+ else:
+ return key
+
+ # Return a key that makes the faces unique.
+ if SPLIT_OB_OR_GROUP and not SPLIT_MATERIALS:
+ def face_key(face):
+ return face[4] # object
+
+ elif not SPLIT_OB_OR_GROUP and SPLIT_MATERIALS:
+ def face_key(face):
+ return face[2] # material
+
+ else: # Both
+ def face_key(face):
+ return face[4], face[2] # object,material
+
+
+ face_split_dict= {}
+
+ oldkey= -1 # initialize to a value that will never match the key
+
+ for face in faces:
+
+ key= face_key(face)
+
+ if oldkey != key:
+ # Check the key has changed.
+ try:
+ verts_split, faces_split, unique_materials_split, vert_remap= face_split_dict[key]
+ except KeyError:
+ faces_split= []
+ verts_split= []
+ unique_materials_split= {}
+ vert_remap= [-1]*len(verts_loc)
+
+ face_split_dict[key]= (verts_split, faces_split, unique_materials_split, vert_remap)
+
+ oldkey= key
+
+ face_vert_loc_indicies= face[0]
+
+ # Remap verts to new vert list and add where needed
+ for enum, i in enumerate(face_vert_loc_indicies):
+ if vert_remap[i] == -1:
+ new_index= len(verts_split)
+ vert_remap[i]= new_index # set the new remapped index so we only add once and can reference next time.
+ face_vert_loc_indicies[enum] = new_index # remap to the local index
+ verts_split.append( verts_loc[i] ) # add the vert to the local verts
+
+ else:
+ face_vert_loc_indicies[enum] = vert_remap[i] # remap to the local index
+
+ matname= face[2]
+ if matname and matname not in unique_materials_split:
+ unique_materials_split[matname] = unique_materials[matname]
+
+ faces_split.append(face)
+
+
+ # remove one of the itemas and reorder
+ return [(value[0], value[1], value[2], key_to_name(key)) for key, value in list(face_split_dict.items())]
+
+
+def create_mesh(scn, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc, verts_tex, faces, unique_materials, unique_material_images, unique_smooth_groups, vertex_groups, dataname):
+ '''
+ Takes all the data gathered and generates a mesh, adding the new object to new_objects
+ deals with fgons, sharp edges and assigning materials
+ '''
+ if not has_ngons:
+ CREATE_FGONS= False
+
+ if unique_smooth_groups:
+ sharp_edges= {}
+ smooth_group_users= dict([ (context_smooth_group, {}) for context_smooth_group in list(unique_smooth_groups.keys()) ])
+ context_smooth_group_old= -1
+
+ # Split fgons into tri's
+ fgon_edges= {} # Used for storing fgon keys
+ if CREATE_EDGES:
+ edges= []
+
+ context_object= None
+
+ # reverse loop through face indicies
+ for f_idx in range(len(faces)-1, -1, -1):
+
+ face_vert_loc_indicies,\
+ face_vert_tex_indicies,\
+ context_material,\
+ context_smooth_group,\
+ context_object= faces[f_idx]
+
+ len_face_vert_loc_indicies = len(face_vert_loc_indicies)
+
+ if len_face_vert_loc_indicies==1:
+ faces.pop(f_idx)# cant add single vert faces
+
+ elif not face_vert_tex_indicies or len_face_vert_loc_indicies == 2: # faces that have no texture coords are lines
+ if CREATE_EDGES:
+ # generators are better in python 2.4+ but can't be used in 2.3
+ # edges.extend( (face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in xrange(len_face_vert_loc_indicies-1) )
+ edges.extend( [(face_vert_loc_indicies[i], face_vert_loc_indicies[i+1]) for i in range(len_face_vert_loc_indicies-1)] )
+
+ faces.pop(f_idx)
+ else:
+
+ # Smooth Group
+ if unique_smooth_groups and context_smooth_group:
+ # Is a part of of a smooth group and is a face
+ if context_smooth_group_old is not context_smooth_group:
+ edge_dict= smooth_group_users[context_smooth_group]
+ context_smooth_group_old= context_smooth_group
+
+ for i in range(len_face_vert_loc_indicies):
+ i1= face_vert_loc_indicies[i]
+ i2= face_vert_loc_indicies[i-1]
+ if i1>i2: i1,i2= i2,i1
+
+ try:
+ edge_dict[i1,i2]+= 1
+ except KeyError:
+ edge_dict[i1,i2]= 1
+
+ # FGons into triangles
+ if has_ngons and len_face_vert_loc_indicies > 4:
+
+ ngon_face_indices= BPyMesh_ngon(verts_loc, face_vert_loc_indicies)
+ faces.extend(\
+ [(\
+ [face_vert_loc_indicies[ngon[0]], face_vert_loc_indicies[ngon[1]], face_vert_loc_indicies[ngon[2]] ],\
+ [face_vert_tex_indicies[ngon[0]], face_vert_tex_indicies[ngon[1]], face_vert_tex_indicies[ngon[2]] ],\
+ context_material,\
+ context_smooth_group,\
+ context_object)\
+ for ngon in ngon_face_indices]\
+ )
+
+ # edges to make fgons
+ if CREATE_FGONS:
+ edge_users= {}
+ for ngon in ngon_face_indices:
+ for i in (0,1,2):
+ i1= face_vert_loc_indicies[ngon[i ]]
+ i2= face_vert_loc_indicies[ngon[i-1]]
+ if i1>i2: i1,i2= i2,i1
+
+ try:
+ edge_users[i1,i2]+=1
+ except KeyError:
+ edge_users[i1,i2]= 1
+
+ for key, users in edge_users.items():
+ if users>1:
+ fgon_edges[key]= None
+
+ # remove all after 3, means we dont have to pop this one.
+ faces.pop(f_idx)
+
+
+ # Build sharp edges
+ if unique_smooth_groups:
+ for edge_dict in list(smooth_group_users.values()):
+ for key, users in list(edge_dict.items()):
+ if users==1: # This edge is on the boundry of a group
+ sharp_edges[key]= None
+
+
+ # map the material names to an index
+ material_mapping= dict([(name, i) for i, name in enumerate(unique_materials)]) # enumerate over unique_materials keys()
+
+ materials= [None] * len(unique_materials)
+
+ for name, index in list(material_mapping.items()):
+ materials[index]= unique_materials[name]
+
+ me= bpy.data.add_mesh(dataname)
+# me= bpy.data.meshes.new(dataname)
+
+ # make sure the list isnt too big
+ for material in materials[0:16]:
+ me.add_material(material)
+# me.materials= materials[0:16] # make sure the list isnt too big.
+ #me.verts.extend([(0,0,0)]) # dummy vert
+
+ me.add_geometry(len(verts_loc), 0, len(faces))
+
+ # verts_loc is a list of (x, y, z) tuples
+ me.verts.foreach_set("co", unpack_list(verts_loc))
+# me.verts.extend(verts_loc)
+
+ # faces is a list of (vert_indices, texco_indices, ...) tuples
+ # XXX faces should contain either 3 or 4 verts
+ # XXX no check for valid face indices
+ me.faces.foreach_set("verts_raw", unpack_face_list([f[0] for f in faces]))
+# face_mapping= me.faces.extend([f[0] for f in faces], indexList=True)
+
+ if verts_tex and me.faces:
+ me.add_uv_texture()
+# me.faceUV= 1
+ # TEXMODE= Mesh.FaceModes['TEX']
+
+ context_material_old= -1 # avoid a dict lookup
+ mat= 0 # rare case it may be un-initialized.
+ me_faces= me.faces
+# ALPHA= Mesh.FaceTranspModes.ALPHA
+
+ for i, face in enumerate(faces):
+ if len(face[0]) < 2:
+ pass #raise "bad face"
+ elif len(face[0])==2:
+ if CREATE_EDGES:
+ edges.append(face[0])
+ else:
+# face_index_map= face_mapping[i]
+
+ # since we use foreach_set to add faces, all of them are added
+ if 1:
+# if face_index_map!=None: # None means the face wasnt added
+
+ blender_face = me.faces[i]
+# blender_face= me_faces[face_index_map]
+
+ face_vert_loc_indicies,\
+ face_vert_tex_indicies,\
+ context_material,\
+ context_smooth_group,\
+ context_object= face
+
+
+
+ if context_smooth_group:
+ blender_face.smooth= True
+
+ if context_material:
+ if context_material_old is not context_material:
+ mat= material_mapping[context_material]
+ if mat>15:
+ mat= 15
+ context_material_old= context_material
+
+ blender_face.material_index= mat
+# blender_face.mat= mat
+
+
+ if verts_tex:
+
+ blender_tface= me.uv_textures[0].data[i]
+
+ if context_material:
+ image, has_data= unique_material_images[context_material]
+ if image: # Can be none if the material dosnt have an image.
+ blender_tface.image= image
+# blender_face.image= image
+ if has_data:
+# if has_data and image.depth == 32:
+ blender_tface.transp = 'ALPHA'
+# blender_face.transp |= ALPHA
+
+ # BUG - Evil eekadoodle problem where faces that have vert index 0 location at 3 or 4 are shuffled.
+ if len(face_vert_loc_indicies)==4:
+ if face_vert_loc_indicies[2]==0 or face_vert_loc_indicies[3]==0:
+ face_vert_tex_indicies= face_vert_tex_indicies[2], face_vert_tex_indicies[3], face_vert_tex_indicies[0], face_vert_tex_indicies[1]
+ else: # length of 3
+ if face_vert_loc_indicies[2]==0:
+ face_vert_tex_indicies= face_vert_tex_indicies[1], face_vert_tex_indicies[2], face_vert_tex_indicies[0]
+ # END EEEKADOODLE FIX
+
+ # assign material, uv's and image
+ blender_tface.uv1= verts_tex[face_vert_tex_indicies[0]]
+ blender_tface.uv2= verts_tex[face_vert_tex_indicies[1]]
+ blender_tface.uv3= verts_tex[face_vert_tex_indicies[2]]
+
+ if blender_face.verts[3] != 0:
+ blender_tface.uv4= verts_tex[face_vert_tex_indicies[3]]
+
+# for ii, uv in enumerate(blender_face.uv):
+# uv.x, uv.y= verts_tex[face_vert_tex_indicies[ii]]
+ del me_faces
+# del ALPHA
+
+ if CREATE_EDGES:
+
+ me.add_geometry(0, len(edges), 0)
+
+ # edges should be a list of (a, b) tuples
+ me.edges.foreach_set("verts", unpack_list(edges))
+# me_edges.extend( edges )
+
+# del me_edges
+
+ # Add edge faces.
+# me_edges= me.edges
+
+ def edges_match(e1, e2):
+ return (e1[0] == e2[0] and e1[1] == e2[1]) or (e1[0] == e2[1] and e1[1] == e2[0])
+
+ # XXX slow
+# if CREATE_FGONS and fgon_edges:
+# for fgon_edge in fgon_edges.keys():
+# for ed in me.edges:
+# if edges_match(fgon_edge, ed.verts):
+# ed.fgon = True
+
+# if CREATE_FGONS and fgon_edges:
+# FGON= Mesh.EdgeFlags.FGON
+# for ed in me.findEdges( fgon_edges.keys() ):
+# if ed!=None:
+# me_edges[ed].flag |= FGON
+# del FGON
+
+ # XXX slow
+# if unique_smooth_groups and sharp_edges:
+# for sharp_edge in sharp_edges.keys():
+# for ed in me.edges:
+# if edges_match(sharp_edge, ed.verts):
+# ed.sharp = True
+
+# if unique_smooth_groups and sharp_edges:
+# SHARP= Mesh.EdgeFlags.SHARP
+# for ed in me.findEdges( sharp_edges.keys() ):
+# if ed!=None:
+# me_edges[ed].flag |= SHARP
+# del SHARP
+
+ me.update()
+# me.calcNormals()
+
+ ob= bpy.data.add_object("MESH", "Mesh")
+ ob.data= me
+ scn.add_object(ob)
+# ob= scn.objects.new(me)
+ new_objects.append(ob)
+
+ # Create the vertex groups. No need to have the flag passed here since we test for the
+ # content of the vertex_groups. If the user selects to NOT have vertex groups saved then
+ # the following test will never run
+ for group_name, group_indicies in vertex_groups.items():
+ group= ob.add_vertex_group(group_name)
+# me.addVertGroup(group_name)
+ for vertex_index in group_indicies:
+ ob.add_vertex_to_group(vertex_index, group, 1.0, 'REPLACE')
+# me.assignVertsToGroup(group_name, group_indicies, 1.00, Mesh.AssignModes.REPLACE)
+
+
+def create_nurbs(scn, context_nurbs, vert_loc, new_objects):
+ '''
+ Add nurbs object to blender, only support one type at the moment
+ '''
+ deg = context_nurbs.get('deg', (3,))
+ curv_range = context_nurbs.get('curv_range', None)
+ curv_idx = context_nurbs.get('curv_idx', [])
+ parm_u = context_nurbs.get('parm_u', [])
+ parm_v = context_nurbs.get('parm_v', [])
+ name = context_nurbs.get('name', 'ObjNurb')
+ cstype = context_nurbs.get('cstype', None)
+
+ if cstype == None:
+ print('\tWarning, cstype not found')
+ return
+ if cstype != 'bspline':
+ print('\tWarning, cstype is not supported (only bspline)')
+ return
+ if not curv_idx:
+ print('\tWarning, curv argument empty or not set')
+ return
+ if len(deg) > 1 or parm_v:
+ print('\tWarning, surfaces not supported')
+ return
+
+ cu = bpy.data.curves.new(name, 'Curve')
+ cu.flag |= 1 # 3D curve
+
+ nu = None
+ for pt in curv_idx:
+
+ pt = vert_loc[pt]
+ pt = (pt[0], pt[1], pt[2], 1.0)
+
+ if nu == None:
+ nu = cu.appendNurb(pt)
+ else:
+ nu.append(pt)
+
+ nu.orderU = deg[0]+1
+
+ # get for endpoint flag from the weighting
+ if curv_range and len(parm_u) > deg[0]+1:
+ do_endpoints = True
+ for i in range(deg[0]+1):
+
+ if abs(parm_u[i]-curv_range[0]) > 0.0001:
+ do_endpoints = False
+ break
+
+ if abs(parm_u[-(i+1)]-curv_range[1]) > 0.0001:
+ do_endpoints = False
+ break
+
+ else:
+ do_endpoints = False
+
+ if do_endpoints:
+ nu.flagU |= 2
+
+
+ # close
+ '''
+ do_closed = False
+ if len(parm_u) > deg[0]+1:
+ for i in xrange(deg[0]+1):
+ #print curv_idx[i], curv_idx[-(i+1)]
+
+ if curv_idx[i]==curv_idx[-(i+1)]:
+ do_closed = True
+ break
+
+ if do_closed:
+ nu.flagU |= 1
+ '''
+
+ ob = scn.objects.new(cu)
+ new_objects.append(ob)
+
+
+def strip_slash(line_split):
+ if line_split[-1][-1]== '\\':
+ if len(line_split[-1])==1:
+ line_split.pop() # remove the \ item
+ else:
+ line_split[-1]= line_split[-1][:-1] # remove the \ from the end last number
+ return True
+ return False
+
+
+
+def get_float_func(filepath):
+ '''
+ find the float function for this obj file
+ - weather to replace commas or not
+ '''
+ file= open(filepath, 'rU')
+ for line in file: #.xreadlines():
+ line = line.lstrip()
+ if line.startswith('v'): # vn vt v
+ if ',' in line:
+ return lambda f: float(f.replace(',', '.'))
+ elif '.' in line:
+ return float
+
+ # incase all vert values were ints
+ return float
+
+def load_obj(filepath,
+ context,
+ CLAMP_SIZE= 0.0,
+ CREATE_FGONS= True,
+ CREATE_SMOOTH_GROUPS= True,
+ CREATE_EDGES= True,
+ SPLIT_OBJECTS= True,
+ SPLIT_GROUPS= True,
+ SPLIT_MATERIALS= True,
+ ROTATE_X90= True,
+ IMAGE_SEARCH=True,
+ POLYGROUPS=False):
+ '''
+ Called by the user interface or another script.
+ load_obj(path) - should give acceptable results.
+ This function passes the file and sends the data off
+ to be split into objects and then converted into mesh objects
+ '''
+ print('\nimporting obj "%s"' % filepath)
+
+ if SPLIT_OBJECTS or SPLIT_GROUPS or SPLIT_MATERIALS:
+ POLYGROUPS = False
+
+ time_main= time.time()
+# time_main= sys.time()
+
+ verts_loc= []
+ verts_tex= []
+ faces= [] # tuples of the faces
+ material_libs= [] # filanems to material libs this uses
+ vertex_groups = {} # when POLYGROUPS is true
+
+ # Get the string to float conversion func for this file- is 'float' for almost all files.
+ float_func= get_float_func(filepath)
+
+ # Context variables
+ context_material= None
+ context_smooth_group= None
+ context_object= None
+ context_vgroup = None
+
+ # Nurbs
+ context_nurbs = {}
+ nurbs = []
+ context_parm = '' # used by nurbs too but could be used elsewhere
+
+ has_ngons= False
+ # has_smoothgroups= False - is explicit with len(unique_smooth_groups) being > 0
+
+ # Until we can use sets
+ unique_materials= {}
+ unique_material_images= {}
+ unique_smooth_groups= {}
+ # unique_obects= {} - no use for this variable since the objects are stored in the face.
+
+ # when there are faces that end with \
+ # it means they are multiline-
+ # since we use xreadline we cant skip to the next line
+ # so we need to know weather
+ context_multi_line= ''
+
+ print('\tparsing obj file "%s"...' % filepath)
+ time_sub= time.time()
+# time_sub= sys.time()
+
+ file= open(filepath, 'rU')
+ for line in file: #.xreadlines():
+ line = line.lstrip() # rare cases there is white space at the start of the line
+
+ if line.startswith('v '):
+ line_split= line.split()
+ # rotate X90: (x,-z,y)
+ verts_loc.append( (float_func(line_split[1]), -float_func(line_split[3]), float_func(line_split[2])) )
+
+ elif line.startswith('vn '):
+ pass
+
+ elif line.startswith('vt '):
+ line_split= line.split()
+ verts_tex.append( (float_func(line_split[1]), float_func(line_split[2])) )
+
+ # Handel faces lines (as faces) and the second+ lines of fa multiline face here
+ # use 'f' not 'f ' because some objs (very rare have 'fo ' for faces)
+ elif line.startswith('f') or context_multi_line == 'f':
+
+ if context_multi_line:
+ # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face
+ line_split= line.split()
+
+ else:
+ line_split= line[2:].split()
+ face_vert_loc_indicies= []
+ face_vert_tex_indicies= []
+
+ # Instance a face
+ faces.append((\
+ face_vert_loc_indicies,\
+ face_vert_tex_indicies,\
+ context_material,\
+ context_smooth_group,\
+ context_object\
+ ))
+
+ if strip_slash(line_split):
+ context_multi_line = 'f'
+ else:
+ context_multi_line = ''
+
+ for v in line_split:
+ obj_vert= v.split('/')
+
+ vert_loc_index= int(obj_vert[0])-1
+ # Add the vertex to the current group
+ # *warning*, this wont work for files that have groups defined around verts
+ if POLYGROUPS and context_vgroup:
+ vertex_groups[context_vgroup].append(vert_loc_index)
+
+ # Make relative negative vert indicies absolute
+ if vert_loc_index < 0:
+ vert_loc_index= len(verts_loc) + vert_loc_index + 1
+
+ face_vert_loc_indicies.append(vert_loc_index)
+
+ if len(obj_vert)>1 and obj_vert[1]:
+ # formatting for faces with normals and textures us
+ # loc_index/tex_index/nor_index
+
+ vert_tex_index= int(obj_vert[1])-1
+ # Make relative negative vert indicies absolute
+ if vert_tex_index < 0:
+ vert_tex_index= len(verts_tex) + vert_tex_index + 1
+
+ face_vert_tex_indicies.append(vert_tex_index)
+ else:
+ # dummy
+ face_vert_tex_indicies.append(0)
+
+ if len(face_vert_loc_indicies) > 4:
+ has_ngons= True
+
+ elif CREATE_EDGES and (line.startswith('l ') or context_multi_line == 'l'):
+ # very similar to the face load function above with some parts removed
+
+ if context_multi_line:
+ # use face_vert_loc_indicies and face_vert_tex_indicies previously defined and used the obj_face
+ line_split= line.split()
+
+ else:
+ line_split= line[2:].split()
+ face_vert_loc_indicies= []
+ face_vert_tex_indicies= []
+
+ # Instance a face
+ faces.append((\
+ face_vert_loc_indicies,\
+ face_vert_tex_indicies,\
+ context_material,\
+ context_smooth_group,\
+ context_object\
+ ))
+
+ if strip_slash(line_split):
+ context_multi_line = 'l'
+ else:
+ context_multi_line = ''
+
+ isline= line.startswith('l')
+
+ for v in line_split:
+ vert_loc_index= int(v)-1
+
+ # Make relative negative vert indicies absolute
+ if vert_loc_index < 0:
+ vert_loc_index= len(verts_loc) + vert_loc_index + 1
+
+ face_vert_loc_indicies.append(vert_loc_index)
+
+ elif line.startswith('s'):
+ if CREATE_SMOOTH_GROUPS:
+ context_smooth_group= line_value(line.split())
+ if context_smooth_group=='off':
+ context_smooth_group= None
+ elif context_smooth_group: # is not None
+ unique_smooth_groups[context_smooth_group]= None
+
+ elif line.startswith('o'):
+ if SPLIT_OBJECTS:
+ context_object= line_value(line.split())
+ # unique_obects[context_object]= None
+
+ elif line.startswith('g'):
+ if SPLIT_GROUPS:
+ context_object= line_value(line.split())
+ # print 'context_object', context_object
+ # unique_obects[context_object]= None
+ elif POLYGROUPS:
+ context_vgroup = line_value(line.split())
+ if context_vgroup and context_vgroup != '(null)':
+ vertex_groups.setdefault(context_vgroup, [])
+ else:
+ context_vgroup = None # dont assign a vgroup
+
+ elif line.startswith('usemtl'):
+ context_material= line_value(line.split())
+ unique_materials[context_material]= None
+ elif line.startswith('mtllib'): # usemap or usemat
+ material_libs.extend( line.split()[1:] ) # can have multiple mtllib filenames per line
+
+
+ # Nurbs support
+ elif line.startswith('cstype '):
+ context_nurbs['cstype']= line_value(line.split()) # 'rat bspline' / 'bspline'
+ elif line.startswith('curv ') or context_multi_line == 'curv':
+ line_split= line.split()
+
+ curv_idx = context_nurbs['curv_idx'] = context_nurbs.get('curv_idx', []) # incase were multiline
+
+ if not context_multi_line:
+ context_nurbs['curv_range'] = float_func(line_split[1]), float_func(line_split[2])
+ line_split[0:3] = [] # remove first 3 items
+
+ if strip_slash(line_split):
+ context_multi_line = 'curv'
+ else:
+ context_multi_line = ''
+
+
+ for i in line_split:
+ vert_loc_index = int(i)-1
+
+ if vert_loc_index < 0:
+ vert_loc_index= len(verts_loc) + vert_loc_index + 1
+
+ curv_idx.append(vert_loc_index)
+
+ elif line.startswith('parm') or context_multi_line == 'parm':
+ line_split= line.split()
+
+ if context_multi_line:
+ context_multi_line = ''
+ else:
+ context_parm = line_split[1]
+ line_split[0:2] = [] # remove first 2
+
+ if strip_slash(line_split):
+ context_multi_line = 'parm'
+ else:
+ context_multi_line = ''
+
+ if context_parm.lower() == 'u':
+ context_nurbs.setdefault('parm_u', []).extend( [float_func(f) for f in line_split] )
+ elif context_parm.lower() == 'v': # surfaces not suported yet
+ context_nurbs.setdefault('parm_v', []).extend( [float_func(f) for f in line_split] )
+ # else: # may want to support other parm's ?
+
+ elif line.startswith('deg '):
+ context_nurbs['deg']= [int(i) for i in line.split()[1:]]
+ elif line.startswith('end'):
+ # Add the nurbs curve
+ if context_object:
+ context_nurbs['name'] = context_object
+ nurbs.append(context_nurbs)
+ context_nurbs = {}
+ context_parm = ''
+
+ ''' # How to use usemap? depricated?
+ elif line.startswith('usema'): # usemap or usemat
+ context_image= line_value(line.split())
+ '''
+
+ file.close()
+ time_new= time.time()
+# time_new= sys.time()
+ print('%.4f sec' % (time_new-time_sub))
+ time_sub= time_new
+
+
+ print('\tloading materials and images...')
+ create_materials(filepath, material_libs, unique_materials, unique_material_images, IMAGE_SEARCH)
+
+ time_new= time.time()
+# time_new= sys.time()
+ print('%.4f sec' % (time_new-time_sub))
+ time_sub= time_new
+
+ if not ROTATE_X90:
+ verts_loc[:] = [(v[0], v[2], -v[1]) for v in verts_loc]
+
+ # deselect all
+# if context.selected_objects:
+# bpy.ops.OBJECT_OT_select_all_toggle()
+
+ scene = context.scene
+# scn = bpy.data.scenes.active
+# scn.objects.selected = []
+ new_objects= [] # put new objects here
+
+ print('\tbuilding geometry...\n\tverts:%i faces:%i materials: %i smoothgroups:%i ...' % ( len(verts_loc), len(faces), len(unique_materials), len(unique_smooth_groups) ))
+ # Split the mesh by objects/materials, may
+ if SPLIT_OBJECTS or SPLIT_GROUPS: SPLIT_OB_OR_GROUP = True
+ else: SPLIT_OB_OR_GROUP = False
+
+ for verts_loc_split, faces_split, unique_materials_split, dataname in split_mesh(verts_loc, faces, unique_materials, filepath, SPLIT_OB_OR_GROUP, SPLIT_MATERIALS):
+ # Create meshes from the data, warning 'vertex_groups' wont support splitting
+ create_mesh(scene, new_objects, has_ngons, CREATE_FGONS, CREATE_EDGES, verts_loc_split, verts_tex, faces_split, unique_materials_split, unique_material_images, unique_smooth_groups, vertex_groups, dataname)
+
+ # nurbs support
+# for context_nurbs in nurbs:
+# create_nurbs(scn, context_nurbs, verts_loc, new_objects)
+
+
+ axis_min= [ 1000000000]*3
+ axis_max= [-1000000000]*3
+
+# if CLAMP_SIZE:
+# # Get all object bounds
+# for ob in new_objects:
+# for v in ob.getBoundBox():
+# for axis, value in enumerate(v):
+# if axis_min[axis] > value: axis_min[axis]= value
+# if axis_max[axis] < value: axis_max[axis]= value
+
+# # Scale objects
+# max_axis= max(axis_max[0]-axis_min[0], axis_max[1]-axis_min[1], axis_max[2]-axis_min[2])
+# scale= 1.0
+
+# while CLAMP_SIZE < max_axis * scale:
+# scale= scale/10.0
+
+# for ob in new_objects:
+# ob.setSize(scale, scale, scale)
+
+ # Better rotate the vert locations
+ #if not ROTATE_X90:
+ # for ob in new_objects:
+ # ob.RotX = -1.570796326794896558
+
+ time_new= time.time()
+# time_new= sys.time()
+
+ print('%.4f sec' % (time_new-time_sub))
+ print('finished importing: "%s" in %.4f sec.' % (filepath, (time_new-time_main)))
+
+
+DEBUG= True
+
+
+def load_obj_ui(filepath, BATCH_LOAD= False):
+ if BPyMessages.Error_NoFile(filepath):
+ return
+
+ global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90
+
+ CREATE_SMOOTH_GROUPS= Draw.Create(0)
+ CREATE_FGONS= Draw.Create(1)
+ CREATE_EDGES= Draw.Create(1)
+ SPLIT_OBJECTS= Draw.Create(0)
+ SPLIT_GROUPS= Draw.Create(0)
+ SPLIT_MATERIALS= Draw.Create(0)
+ CLAMP_SIZE= Draw.Create(10.0)
+ IMAGE_SEARCH= Draw.Create(1)
+ POLYGROUPS= Draw.Create(0)
+ KEEP_VERT_ORDER= Draw.Create(1)
+ ROTATE_X90= Draw.Create(1)
+
+
+ # Get USER Options
+ # Note, Works but not pretty, instead use a more complicated GUI
+ '''
+ pup_block= [\
+ 'Import...',\
+ ('Smooth Groups', CREATE_SMOOTH_GROUPS, 'Surround smooth groups by sharp edges'),\
+ ('Create FGons', CREATE_FGONS, 'Import faces with more then 4 verts as fgons.'),\
+ ('Lines', CREATE_EDGES, 'Import lines and faces with 2 verts as edges'),\
+ 'Separate objects from obj...',\
+ ('Object', SPLIT_OBJECTS, 'Import OBJ Objects into Blender Objects'),\
+ ('Group', SPLIT_GROUPS, 'Import OBJ Groups into Blender Objects'),\
+ ('Material', SPLIT_MATERIALS, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)'),\
+ 'Options...',\
+ ('Keep Vert Order', KEEP_VERT_ORDER, 'Keep vert and face order, disables some other options.'),\
+ ('Clamp Scale:', CLAMP_SIZE, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)'),\
+ ('Image Search', IMAGE_SEARCH, 'Search subdirs for any assosiated images (Warning, may be slow)'),\
+ ]
+
+ if not Draw.PupBlock('Import OBJ...', pup_block):
+ return
+
+ if KEEP_VERT_ORDER.val:
+ SPLIT_OBJECTS.val = False
+ SPLIT_GROUPS.val = False
+ SPLIT_MATERIALS.val = False
+ '''
+
+
+
+ # BEGIN ALTERNATIVE UI *******************
+ if True:
+
+ EVENT_NONE = 0
+ EVENT_EXIT = 1
+ EVENT_REDRAW = 2
+ EVENT_IMPORT = 3
+
+ GLOBALS = {}
+ GLOBALS['EVENT'] = EVENT_REDRAW
+ #GLOBALS['MOUSE'] = Window.GetMouseCoords()
+ GLOBALS['MOUSE'] = [i/2 for i in Window.GetScreenSize()]
+
+ def obj_ui_set_event(e,v):
+ GLOBALS['EVENT'] = e
+
+ def do_split(e,v):
+ global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS
+ if SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val:
+ KEEP_VERT_ORDER.val = 0
+ POLYGROUPS.val = 0
+ else:
+ KEEP_VERT_ORDER.val = 1
+
+ def do_vertorder(e,v):
+ global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER
+ if KEEP_VERT_ORDER.val:
+ SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0
+ else:
+ if not (SPLIT_OBJECTS.val or SPLIT_GROUPS.val or SPLIT_MATERIALS.val):
+ KEEP_VERT_ORDER.val = 1
+
+ def do_polygroups(e,v):
+ global SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, KEEP_VERT_ORDER, POLYGROUPS
+ if POLYGROUPS.val:
+ SPLIT_OBJECTS.val = SPLIT_GROUPS.val = SPLIT_MATERIALS.val = 0
+
+ def do_help(e,v):
+ url = __url__[0]
+ print('Trying to open web browser with documentation at this address...')
+ print('\t' + url)
+
+ try:
+ import webbrowser
+ webbrowser.open(url)
+ except:
+ print('...could not open a browser window.')
+
+ def obj_ui():
+ ui_x, ui_y = GLOBALS['MOUSE']
+
+ # Center based on overall pup size
+ ui_x -= 165
+ ui_y -= 90
+
+ global CREATE_SMOOTH_GROUPS, CREATE_FGONS, CREATE_EDGES, SPLIT_OBJECTS, SPLIT_GROUPS, SPLIT_MATERIALS, CLAMP_SIZE, IMAGE_SEARCH, POLYGROUPS, KEEP_VERT_ORDER, ROTATE_X90
+
+ Draw.Label('Import...', ui_x+9, ui_y+159, 220, 21)
+ Draw.BeginAlign()
+ CREATE_SMOOTH_GROUPS = Draw.Toggle('Smooth Groups', EVENT_NONE, ui_x+9, ui_y+139, 110, 20, CREATE_SMOOTH_GROUPS.val, 'Surround smooth groups by sharp edges')
+ CREATE_FGONS = Draw.Toggle('NGons as FGons', EVENT_NONE, ui_x+119, ui_y+139, 110, 20, CREATE_FGONS.val, 'Import faces with more then 4 verts as fgons')
+ CREATE_EDGES = Draw.Toggle('Lines as Edges', EVENT_NONE, ui_x+229, ui_y+139, 110, 20, CREATE_EDGES.val, 'Import lines and faces with 2 verts as edges')
+ Draw.EndAlign()
+
+ Draw.Label('Separate objects by OBJ...', ui_x+9, ui_y+110, 220, 20)
+ Draw.BeginAlign()
+ SPLIT_OBJECTS = Draw.Toggle('Object', EVENT_REDRAW, ui_x+9, ui_y+89, 55, 21, SPLIT_OBJECTS.val, 'Import OBJ Objects into Blender Objects', do_split)
+ SPLIT_GROUPS = Draw.Toggle('Group', EVENT_REDRAW, ui_x+64, ui_y+89, 55, 21, SPLIT_GROUPS.val, 'Import OBJ Groups into Blender Objects', do_split)
+ SPLIT_MATERIALS = Draw.Toggle('Material', EVENT_REDRAW, ui_x+119, ui_y+89, 60, 21, SPLIT_MATERIALS.val, 'Import each material into a seperate mesh (Avoids > 16 per mesh error)', do_split)
+ Draw.EndAlign()
+
+ # Only used for user feedback
+ KEEP_VERT_ORDER = Draw.Toggle('Keep Vert Order', EVENT_REDRAW, ui_x+184, ui_y+89, 113, 21, KEEP_VERT_ORDER.val, 'Keep vert and face order, disables split options, enable for morph targets', do_vertorder)
+
+ ROTATE_X90 = Draw.Toggle('-X90', EVENT_REDRAW, ui_x+302, ui_y+89, 38, 21, ROTATE_X90.val, 'Rotate X 90.')
+
+ Draw.Label('Options...', ui_x+9, ui_y+60, 211, 20)
+ CLAMP_SIZE = Draw.Number('Clamp Scale: ', EVENT_NONE, ui_x+9, ui_y+39, 130, 21, CLAMP_SIZE.val, 0.0, 1000.0, 'Clamp the size to this maximum (Zero to Disable)')
+ POLYGROUPS = Draw.Toggle('Poly Groups', EVENT_REDRAW, ui_x+144, ui_y+39, 90, 21, POLYGROUPS.val, 'Import OBJ groups as vertex groups.', do_polygroups)
+ IMAGE_SEARCH = Draw.Toggle('Image Search', EVENT_NONE, ui_x+239, ui_y+39, 100, 21, IMAGE_SEARCH.val, 'Search subdirs for any assosiated images (Warning, may be slow)')
+ Draw.BeginAlign()
+ Draw.PushButton('Online Help', EVENT_REDRAW, ui_x+9, ui_y+9, 110, 21, 'Load the wiki page for this script', do_help)
+ Draw.PushButton('Cancel', EVENT_EXIT, ui_x+119, ui_y+9, 110, 21, '', obj_ui_set_event)
+ Draw.PushButton('Import', EVENT_IMPORT, ui_x+229, ui_y+9, 110, 21, 'Import with these settings', obj_ui_set_event)
+ Draw.EndAlign()
+
+
+ # hack so the toggle buttons redraw. this is not nice at all
+ while GLOBALS['EVENT'] not in (EVENT_EXIT, EVENT_IMPORT):
+ Draw.UIBlock(obj_ui, 0)
+
+ if GLOBALS['EVENT'] != EVENT_IMPORT:
+ return
+
+ # END ALTERNATIVE UI *********************
+
+
+
+
+
+
+
+ Window.WaitCursor(1)
+
+ if BATCH_LOAD: # load the dir
+ try:
+ files= [ f for f in os.listdir(filepath) if f.lower().endswith('.obj') ]
+ except:
+ Window.WaitCursor(0)
+ Draw.PupMenu('Error%t|Could not open path ' + filepath)
+ return
+
+ if not files:
+ Window.WaitCursor(0)
+ Draw.PupMenu('Error%t|No files at path ' + filepath)
+ return
+
+ for f in files:
+ scn= bpy.data.scenes.new( stripExt(f) )
+ scn.makeCurrent()
+
+ load_obj(sys.join(filepath, f),\
+ CLAMP_SIZE.val,\
+ CREATE_FGONS.val,\
+ CREATE_SMOOTH_GROUPS.val,\
+ CREATE_EDGES.val,\
+ SPLIT_OBJECTS.val,\
+ SPLIT_GROUPS.val,\
+ SPLIT_MATERIALS.val,\
+ ROTATE_X90.val,\
+ IMAGE_SEARCH.val,\
+ POLYGROUPS.val
+ )
+
+ else: # Normal load
+ load_obj(filepath,\
+ CLAMP_SIZE.val,\
+ CREATE_FGONS.val,\
+ CREATE_SMOOTH_GROUPS.val,\
+ CREATE_EDGES.val,\
+ SPLIT_OBJECTS.val,\
+ SPLIT_GROUPS.val,\
+ SPLIT_MATERIALS.val,\
+ ROTATE_X90.val,\
+ IMAGE_SEARCH.val,\
+ POLYGROUPS.val
+ )
+
+ Window.WaitCursor(0)
+
+
+def load_obj_ui_batch(file):
+ load_obj_ui(file, True)
+
+DEBUG= False
+
+# if __name__=='__main__' and not DEBUG:
+# if os and Window.GetKeyQualifiers() & Window.Qual.SHIFT:
+# Window.FileSelector(load_obj_ui_batch, 'Import OBJ Dir', '')
+# else:
+# Window.FileSelector(load_obj_ui, 'Import a Wavefront OBJ', '*.obj')
+
+ # For testing compatibility
+'''
+else:
+ # DEBUG ONLY
+ TIME= sys.time()
+ DIR = '/fe/obj'
+ import os
+ print 'Searching for files'
+ def fileList(path):
+ for dirpath, dirnames, filenames in os.walk(path):
+ for filename in filenames:
+ yield os.path.join(dirpath, filename)
+
+ files = [f for f in fileList(DIR) if f.lower().endswith('.obj')]
+ files.sort()
+
+ for i, obj_file in enumerate(files):
+ if 0 < i < 20:
+ print 'Importing', obj_file, '\nNUMBER', i, 'of', len(files)
+ newScn= bpy.data.scenes.new(os.path.basename(obj_file))
+ newScn.makeCurrent()
+ load_obj(obj_file, False, IMAGE_SEARCH=0)
+
+ print 'TOTAL TIME: %.6f' % (sys.time() - TIME)
+'''
+#load_obj('/test.obj')
+#load_obj('/fe/obj/mba1.obj')
+
+
+
+class IMPORT_OT_obj(bpy.types.Operator):
+ '''
+ Operator documentation text, will be used for the operator tooltip and python docs.
+ '''
+ __idname__ = "import.obj"
+ __label__ = "Import OBJ"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = [
+ bpy.props.StringProperty(attr="path", name="File Path", description="File path used for importing the OBJ file", maxlen= 1024, default= ""),
+
+ bpy.props.BoolProperty(attr="CREATE_SMOOTH_GROUPS", name="Smooth Groups", description="Surround smooth groups by sharp edges", default= True),
+ bpy.props.BoolProperty(attr="CREATE_FGONS", name="NGons as FGons", description="Import faces with more then 4 verts as fgons", default= True),
+ bpy.props.BoolProperty(attr="CREATE_EDGES", name="Lines as Edges", description="Import lines and faces with 2 verts as edge", default= True),
+ bpy.props.BoolProperty(attr="SPLIT_OBJECTS", name="Object", description="Import OBJ Objects into Blender Objects", default= True),
+ bpy.props.BoolProperty(attr="SPLIT_GROUPS", name="Group", description="Import OBJ Groups into Blender Objects", default= True),
+ bpy.props.BoolProperty(attr="SPLIT_MATERIALS", name="Material", description="Import each material into a seperate mesh (Avoids > 16 per mesh error)", default= True),
+ # old comment: only used for user feedback
+ # disabled this option because in old code a handler for it disabled SPLIT* params, it's not passed to load_obj
+ # bpy.props.BoolProperty(attr="KEEP_VERT_ORDER", name="Keep Vert Order", description="Keep vert and face order, disables split options, enable for morph targets", default= True),
+ bpy.props.BoolProperty(attr="ROTATE_X90", name="-X90", description="Rotate X 90.", default= True),
+ bpy.props.FloatProperty(attr="CLAMP_SIZE", name="Clamp Scale", description="Clamp the size to this maximum (Zero to Disable)", min=0.01, max=1000.0, soft_min=0.0, soft_max=1000.0, default=0.0),
+ bpy.props.BoolProperty(attr="POLYGROUPS", name="Poly Groups", description="Import OBJ groups as vertex groups.", default= True),
+ bpy.props.BoolProperty(attr="IMAGE_SEARCH", name="Image Search", description="Search subdirs for any assosiated images (Warning, may be slow)", default= True),
+ ]
+
+ '''
+ def poll(self, context):
+ return True '''
+
+ def execute(self, context):
+ # print("Selected: " + context.active_object.name)
+
+ load_obj(self.path,
+ context,
+ self.CLAMP_SIZE,
+ self.CREATE_FGONS,
+ self.CREATE_SMOOTH_GROUPS,
+ self.CREATE_EDGES,
+ self.SPLIT_OBJECTS,
+ self.SPLIT_GROUPS,
+ self.SPLIT_MATERIALS,
+ self.ROTATE_X90,
+ self.IMAGE_SEARCH,
+ self.POLYGROUPS)
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ wm = context.manager
+ wm.add_fileselect(self.__operator__)
+ return ('RUNNING_MODAL',)
+
+
+bpy.ops.add(IMPORT_OT_obj)
+
+
+# NOTES (all line numbers refer to 2.4x import_obj.py, not this file)
+# check later: line 489
+# can convert now: edge flags, edges: lines 508-528
+# ngon (uses python module BPyMesh): 384-414
+# nurbs: 947-
+# NEXT clamp size: get bound box with RNA
+# get back to l 140 (here)
+# search image in bpy.config.textureDir - load_image
+# replaced BPyImage.comprehensiveImageLoad with a simplified version that only checks additional directory specified, but doesn't search dirs recursively (obj_image_load)
+# bitmask won't work? - 132
+# uses operator bpy.ops.OBJECT_OT_select_all_toggle() to deselect all (not necessary?)
+# uses bpy.sys.time()
diff --git a/release/scripts/io/netrender/__init__.py b/release/scripts/io/netrender/__init__.py
new file mode 100644
index 00000000000..4a1dd2238e3
--- /dev/null
+++ b/release/scripts/io/netrender/__init__.py
@@ -0,0 +1,19 @@
+# This directory is a Python package.
+
+import model
+import operators
+import client
+import slave
+import master
+import master_html
+import utils
+import balancing
+import ui
+
+# store temp data in bpy module
+
+import bpy
+
+bpy.data.netrender_jobs = []
+bpy.data.netrender_slaves = []
+bpy.data.netrender_blacklist = [] \ No newline at end of file
diff --git a/release/scripts/io/netrender/balancing.py b/release/scripts/io/netrender/balancing.py
new file mode 100644
index 00000000000..637dd5ff92e
--- /dev/null
+++ b/release/scripts/io/netrender/balancing.py
@@ -0,0 +1,94 @@
+import time
+
+from netrender.utils import *
+import netrender.model
+
+class RatingRule:
+ def rate(self, job):
+ return 0
+
+class ExclusionRule:
+ def test(self, job):
+ return False
+
+class PriorityRule:
+ def test(self, job):
+ return False
+
+class Balancer:
+ def __init__(self):
+ self.rules = []
+ self.priorities = []
+ self.exceptions = []
+
+ def addRule(self, rule):
+ self.rules.append(rule)
+
+ def addPriority(self, priority):
+ self.priorities.append(priority)
+
+ def addException(self, exception):
+ self.exceptions.append(exception)
+
+ def applyRules(self, job):
+ return sum((rule.rate(job) for rule in self.rules))
+
+ def applyPriorities(self, job):
+ for priority in self.priorities:
+ if priority.test(job):
+ return True # priorities are first
+
+ return False
+
+ def applyExceptions(self, job):
+ for exception in self.exceptions:
+ if exception.test(job):
+ return True # exceptions are last
+
+ return False
+
+ def sortKey(self, job):
+ return (1 if self.applyExceptions(job) else 0, # exceptions after
+ 0 if self.applyPriorities(job) else 1, # priorities first
+ self.applyRules(job))
+
+ def balance(self, jobs):
+ if jobs:
+ jobs.sort(key=self.sortKey)
+ return jobs[0]
+ else:
+ return None
+
+# ==========================
+
+class RatingUsage(RatingRule):
+ def rate(self, job):
+ # less usage is better
+ return job.usage / job.priority
+
+class NewJobPriority(PriorityRule):
+ def __init__(self, limit = 1):
+ self.limit = limit
+
+ def test(self, job):
+ return job.countFrames(status = DONE) < self.limit
+
+class MinimumTimeBetweenDispatchPriority(PriorityRule):
+ def __init__(self, limit = 10):
+ self.limit = limit
+
+ def test(self, job):
+ return job.countFrames(status = DISPATCHED) == 0 and (time.time() - job.last_dispatched) / 60 > self.limit
+
+class ExcludeQueuedEmptyJob(ExclusionRule):
+ def test(self, job):
+ return job.status != JOB_QUEUED or job.countFrames(status = QUEUED) == 0
+
+class ExcludeSlavesLimit(ExclusionRule):
+ def __init__(self, count_jobs, count_slaves, limit = 0.75):
+ self.count_jobs = count_jobs
+ self.count_slaves = count_slaves
+ self.limit = limit
+
+ def test(self, job):
+ return not ( self.count_jobs() == 1 or self.count_slaves() <= 1 or float(job.countSlaves() + 1) / self.count_slaves() <= self.limit )
diff --git a/release/scripts/io/netrender/client.py b/release/scripts/io/netrender/client.py
new file mode 100644
index 00000000000..1897d1fd949
--- /dev/null
+++ b/release/scripts/io/netrender/client.py
@@ -0,0 +1,203 @@
+import bpy
+import sys, os, re
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+
+import netrender.model
+import netrender.slave as slave
+import netrender.master as master
+from netrender.utils import *
+
+def clientSendJob(conn, scene, anim = False):
+ netsettings = scene.network_render
+ job = netrender.model.RenderJob()
+
+ if anim:
+ for f in range(scene.start_frame, scene.end_frame + 1):
+ job.addFrame(f)
+ else:
+ job.addFrame(scene.current_frame)
+
+ filename = bpy.data.filename
+ job.addFile(filename)
+
+ job_name = netsettings.job_name
+ path, name = os.path.split(filename)
+ if job_name == "[default]":
+ job_name = name
+
+ ###########################
+ # LIBRARIES
+ ###########################
+ for lib in bpy.data.libraries:
+ lib_path = lib.filename
+
+ if lib_path.startswith("//"):
+ lib_path = path + os.sep + lib_path[2:]
+
+ job.addFile(lib_path)
+
+ ###########################
+ # POINT CACHES
+ ###########################
+
+ root, ext = os.path.splitext(name)
+ cache_path = path + os.sep + "blendcache_" + root + os.sep # need an API call for that
+
+ if os.path.exists(cache_path):
+ caches = {}
+ pattern = re.compile("([a-zA-Z0-9]+)_([0-9]+)_[0-9]+\.bphys")
+ for cache_file in sorted(os.listdir(cache_path)):
+ match = pattern.match(cache_file)
+
+ if match:
+ cache_id = match.groups()[0]
+ cache_frame = int(match.groups()[1])
+
+ cache_files = caches.get(cache_id, [])
+ cache_files.append((cache_frame, cache_file))
+ caches[cache_id] = cache_files
+
+ for cache in caches.values():
+ cache.sort()
+
+ if len(cache) == 1:
+ cache_frame, cache_file = cache[0]
+ job.addFile(cache_path + cache_file, cache_frame, cache_frame)
+ else:
+ for i in range(len(cache)):
+ current_item = cache[i]
+ next_item = cache[i+1] if i + 1 < len(cache) else None
+ previous_item = cache[i - 1] if i > 0 else None
+
+ current_frame, current_file = current_item
+
+ if not next_item and not previous_item:
+ job.addFile(cache_path + current_file, current_frame, current_frame)
+ elif next_item and not previous_item:
+ next_frame = next_item[0]
+ job.addFile(cache_path + current_file, current_frame, next_frame - 1)
+ elif not next_item and previous_item:
+ previous_frame = previous_item[0]
+ job.addFile(cache_path + current_file, previous_frame + 1, current_frame)
+ else:
+ next_frame = next_item[0]
+ previous_frame = previous_item[0]
+ job.addFile(cache_path + current_file, previous_frame + 1, next_frame - 1)
+
+ ###########################
+ # IMAGES
+ ###########################
+ for image in bpy.data.images:
+ if image.source == "FILE" and not image.packed_file:
+ job.addFile(image.filename)
+
+ # print(job.files)
+
+ job.name = job_name
+
+ for slave in scene.network_render.slaves_blacklist:
+ job.blacklist.append(slave.id)
+
+ job.chunks = netsettings.chunks
+ job.priority = netsettings.priority
+
+ # try to send path first
+ conn.request("POST", "/job", repr(job.serialize()))
+ response = conn.getresponse()
+
+ job_id = response.getheader("job-id")
+
+ # if not ACCEPTED (but not processed), send files
+ if response.status == http.client.ACCEPTED:
+ for filepath, start, end in job.files:
+ f = open(filepath, "rb")
+ conn.request("PUT", "/file", f, headers={"job-id": job_id, "job-file": filepath})
+ f.close()
+ response = conn.getresponse()
+
+ # server will reply with NOT_FOUD until all files are found
+
+ return job_id
+
+def requestResult(conn, job_id, frame):
+ conn.request("GET", "/render", headers={"job-id": job_id, "job-frame":str(frame)})
+
+@rnaType
+class NetworkRenderEngine(bpy.types.RenderEngine):
+ __idname__ = 'NET_RENDER'
+ __label__ = "Network Render"
+ def render(self, scene):
+ if scene.network_render.mode == "RENDER_CLIENT":
+ self.render_client(scene)
+ elif scene.network_render.mode == "RENDER_SLAVE":
+ self.render_slave(scene)
+ elif scene.network_render.mode == "RENDER_MASTER":
+ self.render_master(scene)
+ else:
+ print("UNKNOWN OPERATION MODE")
+
+ def render_master(self, scene):
+ netsettings = scene.network_render
+
+ address = "" if netsettings.server_address == "[default]" else netsettings.server_address
+
+ master.runMaster((address, netsettings.server_port), netsettings.server_broadcast, netsettings.path, self.update_stats, self.test_break)
+
+
+ def render_slave(self, scene):
+ slave.render_slave(self, scene)
+
+ def render_client(self, scene):
+ netsettings = scene.network_render
+ self.update_stats("", "Network render client initiation")
+
+
+ conn = clientConnection(scene)
+
+ if conn:
+ # Sending file
+
+ self.update_stats("", "Network render exporting")
+
+ job_id = netsettings.job_id
+
+ # reading back result
+
+ self.update_stats("", "Network render waiting for results")
+
+ requestResult(conn, job_id, scene.current_frame)
+ response = conn.getresponse()
+
+ if response.status == http.client.NO_CONTENT:
+ netsettings.job_id = clientSendJob(conn, scene)
+ requestResult(conn, job_id, scene.current_frame)
+
+ while response.status == http.client.ACCEPTED and not self.test_break():
+ time.sleep(1)
+ requestResult(conn, job_id, scene.current_frame)
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ conn.close()
+ return
+
+ r = scene.render_data
+ x= int(r.resolution_x*r.resolution_percentage*0.01)
+ y= int(r.resolution_y*r.resolution_percentage*0.01)
+
+ f = open(netsettings.path + "output.exr", "wb")
+ buf = response.read(1024)
+
+ while buf:
+ f.write(buf)
+ buf = response.read(1024)
+
+ f.close()
+
+ result = self.begin_result(0, 0, x, y)
+ result.load_from_file(netsettings.path + "output.exr", 0, 0)
+ self.end_result(result)
+
+ conn.close()
+
diff --git a/release/scripts/io/netrender/master.py b/release/scripts/io/netrender/master.py
new file mode 100644
index 00000000000..be23fda7a91
--- /dev/null
+++ b/release/scripts/io/netrender/master.py
@@ -0,0 +1,760 @@
+import sys, os
+import http, http.client, http.server, urllib, socket
+import subprocess, shutil, time, hashlib
+
+from netrender.utils import *
+import netrender.model
+import netrender.balancing
+import netrender.master_html
+
+class MRenderFile:
+ def __init__(self, filepath, start, end):
+ self.filepath = filepath
+ self.start = start
+ self.end = end
+ self.found = False
+
+ def test(self):
+ self.found = os.path.exists(self.filepath)
+ return self.found
+
+
+class MRenderSlave(netrender.model.RenderSlave):
+ def __init__(self, name, address, stats):
+ super().__init__()
+ self.id = hashlib.md5(bytes(repr(name) + repr(address), encoding='utf8')).hexdigest()
+ self.name = name
+ self.address = address
+ self.stats = stats
+ self.last_seen = time.time()
+
+ self.job = None
+ self.job_frames = []
+
+ netrender.model.RenderSlave._slave_map[self.id] = self
+
+ def seen(self):
+ self.last_seen = time.time()
+
+ def finishedFrame(self, frame_number):
+ self.job_frames.remove(frame_number)
+ if not self.job_frames:
+ self.job = None
+
+class MRenderJob(netrender.model.RenderJob):
+ def __init__(self, job_id, job_type, name, files, chunks = 1, priority = 1, blacklist = []):
+ super().__init__()
+ self.id = job_id
+ self.type = job_type
+ self.name = name
+ self.files = files
+ self.frames = []
+ self.chunks = chunks
+ self.priority = priority
+ self.usage = 0.0
+ self.blacklist = blacklist
+ self.last_dispatched = time.time()
+
+ # force one chunk for process jobs
+ if self.type == netrender.model.JOB_PROCESS:
+ self.chunks = 1
+
+ # special server properties
+ self.last_update = 0
+ self.save_path = ""
+ self.files_map = {path: MRenderFile(path, start, end) for path, start, end in files}
+ self.status = JOB_WAITING
+
+ def save(self):
+ if self.save_path:
+ f = open(self.save_path + "job.txt", "w")
+ f.write(repr(self.serialize()))
+ f.close()
+
+ def testStart(self):
+ for f in self.files_map.values():
+ if not f.test():
+ return False
+
+ self.start()
+ return True
+
+ def testFinished(self):
+ for f in self.frames:
+ if f.status == QUEUED or f.status == DISPATCHED:
+ break
+ else:
+ self.status = JOB_FINISHED
+
+ def start(self):
+ self.status = JOB_QUEUED
+
+ def addLog(self, frames):
+ log_name = "_".join(("%04d" % f for f in frames)) + ".log"
+ log_path = self.save_path + log_name
+
+ for number in frames:
+ frame = self[number]
+ if frame:
+ frame.log_path = log_path
+
+ def addFrame(self, frame_number, command):
+ frame = MRenderFrame(frame_number, command)
+ self.frames.append(frame)
+ return frame
+
+ def reset(self, all):
+ for f in self.frames:
+ f.reset(all)
+
+ def getFrames(self):
+ frames = []
+ for f in self.frames:
+ if f.status == QUEUED:
+ self.last_dispatched = time.time()
+ frames.append(f)
+ if len(frames) >= self.chunks:
+ break
+
+ return frames
+
+class MRenderFrame(netrender.model.RenderFrame):
+ def __init__(self, frame, command):
+ super().__init__()
+ self.number = frame
+ self.slave = None
+ self.time = 0
+ self.status = QUEUED
+ self.command = command
+
+ self.log_path = None
+
+ def reset(self, all):
+ if all or self.status == ERROR:
+ self.slave = None
+ self.time = 0
+ self.status = QUEUED
+
+
+# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+
+class RenderHandler(http.server.BaseHTTPRequestHandler):
+ def send_head(self, code = http.client.OK, headers = {}, content = "application/octet-stream"):
+ self.send_response(code)
+ self.send_header("Content-type", content)
+
+ for key, value in headers.items():
+ self.send_header(key, value)
+
+ self.end_headers()
+
+ def do_HEAD(self):
+
+ if self.path == "/status":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', -1))
+
+ job = self.server.getJobID(job_id)
+ if job:
+ frame = job[job_frame]
+
+
+ if frame:
+ self.send_head(http.client.OK)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+
+ def do_GET(self):
+
+ if self.path == "/version":
+ self.send_head()
+ self.server.stats("", "Version check")
+ self.wfile.write(VERSION)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/render":
+ job_id = self.headers['job-id']
+ job_frame = int(self.headers['job-frame'])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[job_frame]
+
+ if frame:
+ if frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.ACCEPTED)
+ elif frame.status == DONE:
+ self.server.stats("", "Sending result to client")
+ f = open(job.save_path + "%04d" % job_frame + ".exr", 'rb')
+
+ self.send_head()
+
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ elif frame.status == ERROR:
+ self.send_head(http.client.PARTIAL_CONTENT)
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/log":
+ job_id = self.headers['job-id']
+ job_frame = int(self.headers['job-frame'])
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ frame = job[job_frame]
+
+ if frame:
+ if not frame.log_path or frame.status in (QUEUED, DISPATCHED):
+ self.send_head(http.client.PROCESSING)
+ else:
+ self.server.stats("", "Sending log to client")
+ f = open(frame.log_path, 'rb')
+
+ self.send_head()
+
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/status":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', -1))
+
+ if job_id:
+
+ job = self.server.getJobID(job_id)
+ if job:
+ if job_frame != -1:
+ frame = job[frame]
+
+ if frame:
+ message = frame.serialize()
+ else:
+ # no such frame
+ self.send_heat(http.client.NO_CONTENT)
+ return
+ else:
+ message = job.serialize()
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ return
+ else: # status of all jobs
+ message = []
+
+ for job in self.server:
+ message.append(job.serialize())
+
+
+ self.server.stats("", "Sending status")
+ self.send_head()
+ self.wfile.write(bytes(repr(message), encoding='utf8'))
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/job":
+ self.server.balance()
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job, frames = self.server.newDispatch(slave_id)
+
+ if job and frames:
+ for f in frames:
+ print("dispatch", f.number)
+ f.status = DISPATCHED
+ f.slave = slave
+
+ slave.job = job
+ slave.job_frames = [f.number for f in frames]
+
+ self.send_head(headers={"job-id": job.id})
+
+ message = job.serialize(frames)
+
+ self.wfile.write(bytes(repr(message), encoding='utf8'))
+
+ self.server.stats("", "Sending job to slave")
+ else:
+ # no job available, return error code
+ slave.job = None
+ slave.job_frames = []
+
+ self.send_head(http.client.ACCEPTED)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/file":
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job_id = self.headers['job-id']
+ job_file = self.headers['job-file']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ render_file = job.files_map.get(job_file, None)
+
+ if render_file:
+ self.server.stats("", "Sending file to slave")
+ f = open(render_file.filepath, 'rb')
+
+ self.send_head()
+ shutil.copyfileobj(f, self.wfile)
+
+ f.close()
+ else:
+ # no such file
+ self.send_head(http.client.NO_CONTENT)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/slaves":
+ message = []
+
+ self.server.stats("", "Sending slaves status")
+
+ for slave in self.server.slaves:
+ message.append(slave.serialize())
+
+ self.send_head()
+
+ self.wfile.write(bytes(repr(message), encoding='utf8'))
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ else:
+ # hand over the rest to the html section
+ netrender.master_html.get(self)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ def do_POST(self):
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ if self.path == "/job":
+
+ length = int(self.headers['content-length'])
+
+ job_info = netrender.model.RenderJob.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
+
+ job_id = self.server.nextJobID()
+
+ job = MRenderJob(job_id, job_info.type, job_info.name, job_info.files, chunks = job_info.chunks, priority = job_info.priority, blacklist = job_info.blacklist)
+
+ for frame in job_info.frames:
+ frame = job.addFrame(frame.number, frame.command)
+
+ self.server.addJob(job)
+
+ headers={"job-id": job_id}
+
+ if job.testStart():
+ self.server.stats("", "New job, missing files")
+ self.send_head(headers=headers)
+ else:
+ self.server.stats("", "New job, started")
+ self.send_head(http.client.ACCEPTED, headers=headers)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/cancel":
+ job_id = self.headers.get('job-id', "")
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ self.server.stats("", "Cancelling job")
+ self.server.removeJob(job)
+ self.send_head()
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/clear":
+ # cancel all jobs
+ self.server.stats("", "Clearing jobs")
+ self.server.clear()
+
+ self.send_head()
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/reset":
+ job_id = self.headers.get('job-id', "")
+ job_frame = int(self.headers.get('job-frame', "-1"))
+ all = bool(self.headers.get('reset-all', "False"))
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ if job_frame != -1:
+
+ frame = job[job_frame]
+ if frame:
+ self.server.stats("", "Reset job frame")
+ frame.reset(all)
+ self.send_head()
+ else:
+ # no such frame
+ self.send_head(http.client.NO_CONTENT)
+
+ else:
+ self.server.stats("", "Reset job")
+ job.reset(all)
+ self.send_head()
+
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/slave":
+ length = int(self.headers['content-length'])
+ job_frame_string = self.headers['job-frame']
+
+ self.server.stats("", "New slave connected")
+
+ slave_info = netrender.model.RenderSlave.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
+
+ slave_id = self.server.addSlave(slave_info.name, self.client_address, slave_info.stats)
+
+ self.send_head(headers = {"slave-id": slave_id})
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/log":
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ length = int(self.headers['content-length'])
+
+ log_info = netrender.model.LogFile.materialize(eval(str(self.rfile.read(length), encoding='utf8')))
+
+ job = self.server.getJobID(log_info.job_id)
+
+ if job:
+ self.server.stats("", "Log announcement")
+ job.addLog(log_info.frames)
+ self.send_head(http.client.OK)
+ else:
+ # no such job id
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ def do_PUT(self):
+
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ if self.path == "/file":
+ self.server.stats("", "Receiving job")
+
+ length = int(self.headers['content-length'])
+ job_id = self.headers['job-id']
+ job_file = self.headers['job-file']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+
+ render_file = job.files_map.get(job_file, None)
+
+ if render_file:
+ main_file = job.files[0][0] # filename of the first file
+
+ main_path, main_name = os.path.split(main_file)
+
+ if job_file != main_file:
+ file_path = prefixPath(job.save_path, job_file, main_path)
+ else:
+ file_path = job.save_path + main_name
+
+ buf = self.rfile.read(length)
+
+ # add same temp file + renames as slave
+
+ f = open(file_path, "wb")
+ f.write(buf)
+ f.close()
+ del buf
+
+ render_file.filepath = file_path # set the new path
+
+ if job.testStart():
+ self.server.stats("", "File upload, starting job")
+ self.send_head(http.client.OK)
+ else:
+ self.server.stats("", "File upload, file missings")
+ self.send_head(http.client.ACCEPTED)
+ else: # invalid file
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/render":
+ self.server.stats("", "Receiving render result")
+
+ slave_id = self.headers['slave-id']
+
+ slave = self.server.getSeenSlave(slave_id)
+
+ if slave: # only if slave id is valid
+ job_id = self.headers['job-id']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(self.headers['job-frame'])
+ job_result = int(self.headers['job-result'])
+ job_time = float(self.headers['job-time'])
+
+ frame = job[job_frame]
+
+ if frame:
+ if job.type == netrender.model.JOB_BLENDER:
+ if job_result == DONE:
+ length = int(self.headers['content-length'])
+ buf = self.rfile.read(length)
+ f = open(job.save_path + "%04d" % job_frame + ".exr", 'wb')
+ f.write(buf)
+ f.close()
+
+ del buf
+ elif job_result == ERROR:
+ # blacklist slave on this job on error
+ job.blacklist.append(slave.id)
+
+ self.server.stats("", "Receiving result")
+
+ slave.finishedFrame(job_frame)
+
+ frame.status = job_result
+ frame.time = job_time
+
+ job.testFinished()
+
+ self.send_head()
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # invalid slave id
+ self.send_head(http.client.NO_CONTENT)
+ # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
+ elif self.path == "/log":
+ self.server.stats("", "Receiving log file")
+
+ job_id = self.headers['job-id']
+
+ job = self.server.getJobID(job_id)
+
+ if job:
+ job_frame = int(self.headers['job-frame'])
+
+ frame = job[job_frame]
+
+ if frame and frame.log_path:
+ length = int(self.headers['content-length'])
+ buf = self.rfile.read(length)
+ f = open(frame.log_path, 'ab')
+ f.write(buf)
+ f.close()
+
+ del buf
+
+ self.server.getSeenSlave(self.headers['slave-id'])
+
+ self.send_head()
+ else: # frame not found
+ self.send_head(http.client.NO_CONTENT)
+ else: # job not found
+ self.send_head(http.client.NO_CONTENT)
+
+class RenderMasterServer(http.server.HTTPServer):
+ def __init__(self, address, handler_class, path):
+ super().__init__(address, handler_class)
+ self.jobs = []
+ self.jobs_map = {}
+ self.slaves = []
+ self.slaves_map = {}
+ self.job_id = 0
+ self.path = path + "master_" + str(os.getpid()) + os.sep
+
+ self.slave_timeout = 2
+
+ self.balancer = netrender.balancing.Balancer()
+ self.balancer.addRule(netrender.balancing.RatingUsage())
+ self.balancer.addException(netrender.balancing.ExcludeQueuedEmptyJob())
+ self.balancer.addException(netrender.balancing.ExcludeSlavesLimit(self.countJobs, self.countSlaves, limit = 0.9))
+ self.balancer.addPriority(netrender.balancing.NewJobPriority())
+ self.balancer.addPriority(netrender.balancing.MinimumTimeBetweenDispatchPriority(limit = 2))
+
+ if not os.path.exists(self.path):
+ os.mkdir(self.path)
+
+ def nextJobID(self):
+ self.job_id += 1
+ return str(self.job_id)
+
+ def addSlave(self, name, address, stats):
+ slave = MRenderSlave(name, address, stats)
+ self.slaves.append(slave)
+ self.slaves_map[slave.id] = slave
+
+ return slave.id
+
+ def removeSlave(self, slave):
+ self.slaves.remove(slave)
+ self.slaves_map.pop(slave.id)
+
+ def getSlave(self, slave_id):
+ return self.slaves_map.get(slave_id, None)
+
+ def getSeenSlave(self, slave_id):
+ slave = self.getSlave(slave_id)
+ if slave:
+ slave.seen()
+
+ return slave
+
+ def timeoutSlaves(self):
+ removed = []
+
+ t = time.time()
+
+ for slave in self.slaves:
+ if (t - slave.last_seen) / 60 > self.slave_timeout:
+ removed.append(slave)
+
+ if slave.job:
+ for f in slave.job_frames:
+ slave.job[f].status = ERROR
+
+ for slave in removed:
+ self.removeSlave(slave)
+
+ def updateUsage(self):
+ blend = 0.5
+ for job in self.jobs:
+ job.usage *= (1 - blend)
+
+ if self.slaves:
+ slave_usage = blend / self.countSlaves()
+
+ for slave in self.slaves:
+ if slave.job:
+ slave.job.usage += slave_usage
+
+
+ def clear(self):
+ removed = self.jobs[:]
+
+ for job in removed:
+ self.removeJob(job)
+
+ def balance(self):
+ self.balancer.balance(self.jobs)
+
+ def countJobs(self, status = JOB_QUEUED):
+ total = 0
+ for j in self.jobs:
+ if j.status == status:
+ total += 1
+
+ return total
+
+ def countSlaves(self):
+ return len(self.slaves)
+
+ def removeJob(self, job):
+ self.jobs.remove(job)
+ self.jobs_map.pop(job.id)
+
+ for slave in self.slaves:
+ if slave.job == job:
+ slave.job = None
+ slave.job_frames = []
+
+ def addJob(self, job):
+ self.jobs.append(job)
+ self.jobs_map[job.id] = job
+
+ # create job directory
+ job.save_path = self.path + "job_" + job.id + os.sep
+ if not os.path.exists(job.save_path):
+ os.mkdir(job.save_path)
+
+ job.save()
+
+ def getJobID(self, id):
+ return self.jobs_map.get(id, None)
+
+ def __iter__(self):
+ for job in self.jobs:
+ yield job
+
+ def newDispatch(self, slave_id):
+ if self.jobs:
+ for job in self.jobs:
+ if not self.balancer.applyExceptions(job) and slave_id not in job.blacklist:
+ return job, job.getFrames()
+
+ return None, None
+
+def runMaster(address, broadcast, path, update_stats, test_break):
+ httpd = RenderMasterServer(address, RenderHandler, path)
+ httpd.timeout = 1
+ httpd.stats = update_stats
+
+ if broadcast:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+
+ start_time = time.time()
+
+ while not test_break():
+ httpd.handle_request()
+
+ if time.time() - start_time >= 10: # need constant here
+ httpd.timeoutSlaves()
+
+ httpd.updateUsage()
+
+ if broadcast:
+ print("broadcasting address")
+ s.sendto(bytes("%i" % address[1], encoding='utf8'), 0, ('<broadcast>', 8000))
+ start_time = time.time()
diff --git a/release/scripts/io/netrender/master_html.py b/release/scripts/io/netrender/master_html.py
new file mode 100644
index 00000000000..545659e8dc4
--- /dev/null
+++ b/release/scripts/io/netrender/master_html.py
@@ -0,0 +1,135 @@
+import re
+
+from netrender.utils import *
+
+
+def get(handler):
+ def output(text):
+ handler.wfile.write(bytes(text, encoding='utf8'))
+
+ def link(text, url):
+ return "<a href='%s'>%s</a>" % (url, text)
+
+ def startTable(border=1):
+ output("<table border='%i'>" % border)
+
+ def headerTable(*headers):
+ output("<thead><tr>")
+
+ for c in headers:
+ output("<td>" + c + "</td>")
+
+ output("</tr></thead>")
+
+ def rowTable(*data):
+ output("<tr>")
+
+ for c in data:
+ output("<td>" + str(c) + "</td>")
+
+ output("</tr>")
+
+ def endTable():
+ output("</table>")
+
+ if handler.path == "/html" or handler.path == "/":
+ handler.send_head(content = "text/html")
+ output("<html><head><title>NetRender</title></head><body>")
+
+ output("<h2>Master</h2>")
+
+ output("<h2>Slaves</h2>")
+
+ startTable()
+ headerTable("name", "address", "last seen", "stats", "job")
+
+ for slave in handler.server.slaves:
+ rowTable(slave.name, slave.address[0], time.ctime(slave.last_seen), slave.stats, link(slave.job.name, "/html/job" + slave.job.id) if slave.job else "None")
+
+ endTable()
+
+ output("<h2>Jobs</h2>")
+
+ startTable()
+ headerTable(
+ "name",
+ "priority",
+ "usage",
+ "wait",
+ "length",
+ "done",
+ "dispatched",
+ "error",
+ "first",
+ "exception"
+ )
+
+ handler.server.balance()
+
+ for job in handler.server.jobs:
+ results = job.framesStatus()
+ rowTable(
+ link(job.name, "/html/job" + job.id),
+ job.priority,
+ "%0.1f%%" % (job.usage * 100),
+ "%is" % int(time.time() - job.last_dispatched),
+ len(job),
+ results[DONE],
+ results[DISPATCHED],
+ results[ERROR],
+ handler.server.balancer.applyPriorities(job), handler.server.balancer.applyExceptions(job)
+ )
+
+ endTable()
+
+ output("</body></html>")
+
+ elif handler.path.startswith("/html/job"):
+ handler.send_head(content = "text/html")
+ job_id = handler.path[9:]
+
+ output("<html><head><title>NetRender</title></head><body>")
+
+ job = handler.server.getJobID(job_id)
+
+ if job:
+ output("<h2>Frames</h2>")
+
+ startTable()
+ headerTable("no", "status", "render time", "slave", "log")
+
+ for frame in job.frames:
+ rowTable(frame.number, frame.statusText(), "%.1fs" % frame.time, frame.slave.name if frame.slave else "&nbsp;", link("view log", "/html/log%s_%i" % (job_id, frame.number)) if frame.log_path else "&nbsp;")
+
+ endTable()
+ else:
+ output("no such job")
+
+ output("</body></html>")
+
+ elif handler.path.startswith("/html/log"):
+ handler.send_head(content = "text/plain")
+ pattern = re.compile("([a-zA-Z0-9]+)_([0-9]+)")
+
+ match = pattern.match(handler.path[9:])
+ if match:
+ job_id = match.groups()[0]
+ frame_number = int(match.groups()[1])
+
+ job = handler.server.getJobID(job_id)
+
+ if job:
+ frame = job[frame_number]
+
+ if frame:
+ f = open(frame.log_path, 'rb')
+
+ shutil.copyfileobj(f, handler.wfile)
+
+ f.close()
+ else:
+ output("no such frame")
+ else:
+ output("no such job")
+ else:
+ output("malformed url")
diff --git a/release/scripts/io/netrender/model.py b/release/scripts/io/netrender/model.py
new file mode 100644
index 00000000000..ca2a42d87f6
--- /dev/null
+++ b/release/scripts/io/netrender/model.py
@@ -0,0 +1,212 @@
+import sys, os
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+
+from netrender.utils import *
+
+class LogFile:
+ def __init__(self, job_id = 0, frames = []):
+ self.job_id = job_id
+ self.frames = frames
+
+ def serialize(self):
+ return {
+ "job_id": self.job_id,
+ "frames": self.frames
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ logfile = LogFile()
+ logfile.job_id = data["job_id"]
+ logfile.frames = data["frames"]
+
+ return logfile
+
+class RenderSlave:
+ _slave_map = {}
+
+ def __init__(self):
+ self.id = ""
+ self.name = ""
+ self.address = ("",0)
+ self.stats = ""
+ self.total_done = 0
+ self.total_error = 0
+ self.last_seen = 0.0
+
+ def serialize(self):
+ return {
+ "id": self.id,
+ "name": self.name,
+ "address": self.address,
+ "stats": self.stats,
+ "total_done": self.total_done,
+ "total_error": self.total_error,
+ "last_seen": self.last_seen
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ slave_id = data["id"]
+
+ if slave_id in RenderSlave._slave_map:
+ return RenderSlave._slave_map[slave_id]
+ else:
+ slave = RenderSlave()
+ slave.id = slave_id
+ slave.name = data["name"]
+ slave.address = data["address"]
+ slave.stats = data["stats"]
+ slave.total_done = data["total_done"]
+ slave.total_error = data["total_error"]
+ slave.last_seen = data["last_seen"]
+
+ RenderSlave._slave_map[slave_id] = slave
+
+ return slave
+
+JOB_BLENDER = 1
+JOB_PROCESS = 2
+
+JOB_TYPES = {
+ JOB_BLENDER: "Blender",
+ JOB_PROCESS: "Process"
+ }
+
+class RenderJob:
+ def __init__(self):
+ self.id = ""
+ self.type = JOB_BLENDER
+ self.name = ""
+ self.files = []
+ self.frames = []
+ self.chunks = 0
+ self.priority = 0
+ self.usage = 0.0
+ self.blacklist = []
+ self.last_dispatched = 0.0
+
+ def addFile(self, file_path, start=-1, end=-1):
+ self.files.append((file_path, start, end))
+
+ def addFrame(self, frame_number, command = ""):
+ frame = RenderFrame(frame_number, command)
+ self.frames.append(frame)
+ return frame
+
+ def __len__(self):
+ return len(self.frames)
+
+ def countFrames(self, status=QUEUED):
+ total = 0
+ for f in self.frames:
+ if f.status == status:
+ total += 1
+
+ return total
+
+ def countSlaves(self):
+ return len(set((frame.slave for frame in self.frames if frame.status == DISPATCHED)))
+
+ def framesStatus(self):
+ results = {
+ QUEUED: 0,
+ DISPATCHED: 0,
+ DONE: 0,
+ ERROR: 0
+ }
+
+ for frame in self.frames:
+ results[frame.status] += 1
+
+ return results
+
+ def __contains__(self, frame_number):
+ for f in self.frames:
+ if f.number == frame_number:
+ return True
+ else:
+ return False
+
+ def __getitem__(self, frame_number):
+ for f in self.frames:
+ if f.number == frame_number:
+ return f
+ else:
+ return None
+
+ def serialize(self, frames = None):
+ min_frame = min((f.number for f in frames)) if frames else -1
+ max_frame = max((f.number for f in frames)) if frames else -1
+ return {
+ "id": self.id,
+ "type": self.type,
+ "name": self.name,
+ "files": [f for f in self.files if f[1] == -1 or not frames or (f[1] <= min_frame <= f[2] or f[1] <= max_frame <= f[2])],
+ "frames": [f.serialize() for f in self.frames if not frames or f in frames],
+ "chunks": self.chunks,
+ "priority": self.priority,
+ "usage": self.usage,
+ "blacklist": self.blacklist,
+ "last_dispatched": self.last_dispatched
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ job = RenderJob()
+ job.id = data["id"]
+ job.type = data["type"]
+ job.name = data["name"]
+ job.files = data["files"]
+ job.frames = [RenderFrame.materialize(f) for f in data["frames"]]
+ job.chunks = data["chunks"]
+ job.priority = data["priority"]
+ job.usage = data["usage"]
+ job.blacklist = data["blacklist"]
+ job.last_dispatched = data["last_dispatched"]
+
+ return job
+
+class RenderFrame:
+ def __init__(self, number = 0, command = ""):
+ self.number = number
+ self.time = 0
+ self.status = QUEUED
+ self.slave = None
+ self.command = command
+
+ def statusText(self):
+ return STATUS_TEXT[self.status]
+
+ def serialize(self):
+ return {
+ "number": self.number,
+ "time": self.time,
+ "status": self.status,
+ "slave": None if not self.slave else self.slave.serialize(),
+ "command": self.command
+ }
+
+ @staticmethod
+ def materialize(data):
+ if not data:
+ return None
+
+ frame = RenderFrame()
+ frame.number = data["number"]
+ frame.time = data["time"]
+ frame.status = data["status"]
+ frame.slave = RenderSlave.materialize(data["slave"])
+ frame.command = data["command"]
+
+ return frame
diff --git a/release/scripts/io/netrender/operators.py b/release/scripts/io/netrender/operators.py
new file mode 100644
index 00000000000..42d1f6a0b86
--- /dev/null
+++ b/release/scripts/io/netrender/operators.py
@@ -0,0 +1,423 @@
+import bpy
+import sys, os
+import http, http.client, http.server, urllib, socket
+import webbrowser
+
+from netrender.utils import *
+import netrender.client as client
+import netrender.model
+
+@rnaOperator
+class RENDER_OT_netclientanim(bpy.types.Operator):
+ '''
+ Operator documentation text, will be used for the operator tooltip and python docs.
+ '''
+ __idname__ = "render.netclientanim"
+ __label__ = "Net Render Client Anim"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ scene = context.scene
+
+ conn = clientConnection(scene)
+
+ if conn:
+ # Sending file
+ scene.network_render.job_id = client.clientSendJob(conn, scene, True)
+ conn.close()
+
+ bpy.ops.screen.render('INVOKE_AREA', animation=True)
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class RENDER_OT_netclientsend(bpy.types.Operator):
+ '''
+ Operator documentation text, will be used for the operator tooltip and python docs.
+ '''
+ __idname__ = "render.netclientsend"
+ __label__ = "Net Render Client Send"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ scene = context.scene
+
+ conn = clientConnection(scene)
+
+ if conn:
+ # Sending file
+ scene.network_render.job_id = client.clientSendJob(conn, scene, True)
+ conn.close()
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class RENDER_OT_netclientstatus(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientstatus"
+ __label__ = "Net Render Client Status"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(context.scene)
+
+ if conn:
+ conn.request("GET", "/status")
+
+ response = conn.getresponse()
+ print( response.status, response.reason )
+
+ jobs = (netrender.model.RenderJob.materialize(j) for j in eval(str(response.read(), encoding='utf8')))
+
+ while(len(netsettings.jobs) > 0):
+ netsettings.jobs.remove(0)
+
+ bpy.data.netrender_jobs = []
+
+ for j in jobs:
+ bpy.data.netrender_jobs.append(j)
+ netsettings.jobs.add()
+ job = netsettings.jobs[-1]
+
+ j.results = j.framesStatus() # cache frame status
+
+ job.name = j.name
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class RENDER_OT_netclientblacklistslave(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientblacklistslave"
+ __label__ = "Net Render Client Blacklist Slave"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+ if netsettings.active_slave_index >= 0:
+
+ # deal with data
+ slave = bpy.data.netrender_slaves.pop(netsettings.active_slave_index)
+ bpy.data.netrender_blacklist.append(slave)
+
+ # deal with rna
+ netsettings.slaves_blacklist.add()
+ netsettings.slaves_blacklist[-1].name = slave.name
+
+ netsettings.slaves.remove(netsettings.active_slave_index)
+ netsettings.active_slave_index = -1
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class RENDER_OT_netclientwhitelistslave(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientwhitelistslave"
+ __label__ = "Net Render Client Whitelist Slave"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+ if netsettings.active_blacklisted_slave_index >= 0:
+
+ # deal with data
+ slave = bpy.data.netrender_blacklist.pop(netsettings.active_blacklisted_slave_index)
+ bpy.data.netrender_slaves.append(slave)
+
+ # deal with rna
+ netsettings.slaves.add()
+ netsettings.slaves[-1].name = slave.name
+
+ netsettings.slaves_blacklist.remove(netsettings.active_blacklisted_slave_index)
+ netsettings.active_blacklisted_slave_index = -1
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+
+@rnaOperator
+class RENDER_OT_netclientslaves(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientslaves"
+ __label__ = "Net Render Client Slaves"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(context.scene)
+
+ if conn:
+ conn.request("GET", "/slaves")
+
+ response = conn.getresponse()
+ print( response.status, response.reason )
+
+ slaves = (netrender.model.RenderSlave.materialize(s) for s in eval(str(response.read(), encoding='utf8')))
+
+ while(len(netsettings.slaves) > 0):
+ netsettings.slaves.remove(0)
+
+ bpy.data.netrender_slaves = []
+
+ for s in slaves:
+ for i in range(len(bpy.data.netrender_blacklist)):
+ slave = bpy.data.netrender_blacklist[i]
+ if slave.id == s.id:
+ bpy.data.netrender_blacklist[i] = s
+ netsettings.slaves_blacklist[i].name = s.name
+ break
+ else:
+ bpy.data.netrender_slaves.append(s)
+
+ netsettings.slaves.add()
+ slave = netsettings.slaves[-1]
+ slave.name = s.name
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class RENDER_OT_netclientcancel(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientcancel"
+ __label__ = "Net Render Client Cancel"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ netsettings = context.scene.network_render
+ return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(context.scene)
+
+ if conn:
+ job = bpy.data.netrender_jobs[netsettings.active_job_index]
+
+ conn.request("POST", "/cancel", headers={"job-id":job.id})
+
+ response = conn.getresponse()
+ print( response.status, response.reason )
+
+ netsettings.jobs.remove(netsettings.active_job_index)
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class RENDER_OT_netclientcancelall(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientcancelall"
+ __label__ = "Net Render Client Cancel All"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ conn = clientConnection(context.scene)
+
+ if conn:
+ conn.request("POST", "/clear")
+
+ response = conn.getresponse()
+ print( response.status, response.reason )
+
+ while(len(netsettings.jobs) > 0):
+ netsettings.jobs.remove(0)
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class netclientdownload(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientdownload"
+ __label__ = "Net Render Client Download"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ netsettings = context.scene.network_render
+ return netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+ rd = context.scene.render_data
+
+ conn = clientConnection(context.scene)
+
+ if conn:
+ job = bpy.data.netrender_jobs[netsettings.active_job_index]
+
+ for frame in job.frames:
+ client.requestResult(conn, job.id, frame.number)
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ print("missing", frame.number)
+ continue
+
+ print("got back", frame.number)
+
+ f = open(netsettings.path + "%06d" % frame.number + ".exr", "wb")
+ buf = response.read(1024)
+
+ while buf:
+ f.write(buf)
+ buf = response.read(1024)
+
+ f.close()
+
+ conn.close()
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class netclientscan(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientscan"
+ __label__ = "Net Render Client Scan"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+ try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
+ s.settimeout(30)
+
+ s.bind(('', 8000))
+
+ buf, address = s.recvfrom(64)
+
+ print("received:", buf)
+
+ netsettings.server_address = address[0]
+ netsettings.server_port = int(str(buf, encoding='utf8'))
+ except socket.timeout:
+ print("no server info")
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
+
+@rnaOperator
+class netclientweb(bpy.types.Operator):
+ '''Operator documentation text, will be used for the operator tooltip and python docs.'''
+ __idname__ = "render.netclientweb"
+ __label__ = "Net Render Client Web"
+
+ # List of operator properties, the attributes will be assigned
+ # to the class instance from the operator settings before calling.
+
+ __props__ = []
+
+ def poll(self, context):
+ return True
+
+ def execute(self, context):
+ netsettings = context.scene.network_render
+
+
+ # open connection to make sure server exists
+ conn = clientConnection(context.scene)
+
+ if conn:
+ conn.close()
+
+ webbrowser.open("http://%s:%i" % (netsettings.server_address, netsettings.server_port))
+
+ return ('FINISHED',)
+
+ def invoke(self, context, event):
+ return self.execute(context)
diff --git a/release/scripts/io/netrender/slave.py b/release/scripts/io/netrender/slave.py
new file mode 100644
index 00000000000..15ca6faf297
--- /dev/null
+++ b/release/scripts/io/netrender/slave.py
@@ -0,0 +1,224 @@
+import sys, os, platform
+import http, http.client, http.server, urllib
+import subprocess, time
+
+from netrender.utils import *
+import netrender.model
+
+CANCEL_POLL_SPEED = 2
+MAX_TIMEOUT = 10
+INCREMENT_TIMEOUT = 1
+
+if platform.system() == 'Windows' and platform.version() >= '5': # Error mode is only available on Win2k or higher, that's version 5
+ import ctypes
+ def SetErrorMode():
+ val = ctypes.windll.kernel32.SetErrorMode(0x0002)
+ ctypes.windll.kernel32.SetErrorMode(val | 0x0002)
+ return val
+
+ def RestoreErrorMode(val):
+ ctypes.windll.kernel32.SetErrorMode(val)
+else:
+ def SetErrorMode():
+ return 0
+
+ def RestoreErrorMode(val):
+ pass
+
+def slave_Info():
+ sysname, nodename, release, version, machine, processor = platform.uname()
+ slave = netrender.model.RenderSlave()
+ slave.name = nodename
+ slave.stats = sysname + " " + release + " " + machine + " " + processor
+ return slave
+
+def testCancel(conn, job_id, frame_number):
+ conn.request("HEAD", "/status", headers={"job-id":job_id, "job-frame": str(frame_number)})
+ response = conn.getresponse()
+
+ # cancelled if job isn't found anymore
+ if response.status == http.client.NO_CONTENT:
+ return True
+ else:
+ return False
+
+def testFile(conn, job_id, slave_id, JOB_PREFIX, file_path, main_path = None):
+ job_full_path = prefixPath(JOB_PREFIX, file_path, main_path)
+
+ if not os.path.exists(job_full_path):
+ temp_path = JOB_PREFIX + "slave.temp.blend"
+ conn.request("GET", "/file", headers={"job-id": job_id, "slave-id":slave_id, "job-file":file_path})
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ return None # file for job not returned by server, need to return an error code to server
+
+ f = open(temp_path, "wb")
+ buf = response.read(1024)
+
+ while buf:
+ f.write(buf)
+ buf = response.read(1024)
+
+ f.close()
+
+ os.renames(temp_path, job_full_path)
+
+ return job_full_path
+
+
+def render_slave(engine, scene):
+ netsettings = scene.network_render
+ timeout = 1
+
+ engine.update_stats("", "Network render node initiation")
+
+ conn = clientConnection(scene)
+
+ if conn:
+ conn.request("POST", "/slave", repr(slave_Info().serialize()))
+ response = conn.getresponse()
+
+ slave_id = response.getheader("slave-id")
+
+ NODE_PREFIX = netsettings.path + "slave_" + slave_id + os.sep
+ if not os.path.exists(NODE_PREFIX):
+ os.mkdir(NODE_PREFIX)
+
+ while not engine.test_break():
+
+ conn.request("GET", "/job", headers={"slave-id":slave_id})
+ response = conn.getresponse()
+
+ if response.status == http.client.OK:
+ timeout = 1 # reset timeout on new job
+
+ job = netrender.model.RenderJob.materialize(eval(str(response.read(), encoding='utf8')))
+
+ JOB_PREFIX = NODE_PREFIX + "job_" + job.id + os.sep
+ if not os.path.exists(JOB_PREFIX):
+ os.mkdir(JOB_PREFIX)
+
+
+ if job.type == netrender.model.JOB_BLENDER:
+ job_path = job.files[0][0] # data in files have format (path, start, end)
+ main_path, main_file = os.path.split(job_path)
+
+ job_full_path = testFile(conn, job.id, slave_id, JOB_PREFIX, job_path)
+ print("Fullpath", job_full_path)
+ print("File:", main_file, "and %i other files" % (len(job.files) - 1,))
+ engine.update_stats("", "Render File", main_file, "for job", job.id)
+
+ for file_path, start, end in job.files[1:]:
+ print("\t", file_path)
+ testFile(conn, job.id, slave_id, JOB_PREFIX, file_path, main_path)
+
+ # announce log to master
+ logfile = netrender.model.LogFile(job.id, [frame.number for frame in job.frames])
+ conn.request("POST", "/log", bytes(repr(logfile.serialize()), encoding='utf8'), headers={"slave-id":slave_id})
+ response = conn.getresponse()
+
+
+ first_frame = job.frames[0].number
+
+ # start render
+ start_t = time.time()
+
+ if job.type == netrender.model.JOB_BLENDER:
+ frame_args = []
+
+ for frame in job.frames:
+ print("frame", frame.number)
+ frame_args += ["-f", str(frame.number)]
+
+ val = SetErrorMode()
+ process = subprocess.Popen([sys.argv[0], "-b", job_full_path, "-o", JOB_PREFIX + "######", "-E", "BLENDER_RENDER", "-F", "MULTILAYER"] + frame_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ RestoreErrorMode(val)
+ elif job.type == netrender.model.JOB_PROCESS:
+ command = job.frames[0].command
+ val = SetErrorMode()
+ process = subprocess.Popen(command.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ RestoreErrorMode(val)
+
+ headers = {"job-id":job.id, "slave-id":slave_id}
+
+ cancelled = False
+ stdout = bytes()
+ run_t = time.time()
+ while process.poll() == None and not cancelled:
+ stdout += process.stdout.read(32)
+ current_t = time.time()
+ cancelled = engine.test_break()
+ if current_t - run_t > CANCEL_POLL_SPEED:
+
+ # update logs if needed
+ if stdout:
+ # (only need to update on one frame, they are linked
+ headers["job-frame"] = str(first_frame)
+ conn.request("PUT", "/log", stdout, headers=headers)
+ response = conn.getresponse()
+
+ stdout = bytes()
+
+ run_t = current_t
+ if testCancel(conn, job.id, first_frame):
+ cancelled = True
+
+ # read leftovers if needed
+ stdout += process.stdout.read()
+
+ if cancelled:
+ # kill process if needed
+ if process.poll() == None:
+ process.terminate()
+ continue # to next frame
+
+ total_t = time.time() - start_t
+
+ avg_t = total_t / len(job.frames)
+
+ status = process.returncode
+
+ print("status", status)
+
+ # flush the rest of the logs
+ if stdout:
+ # (only need to update on one frame, they are linked
+ headers["job-frame"] = str(first_frame)
+ conn.request("PUT", "/log", stdout, headers=headers)
+ response = conn.getresponse()
+
+ headers = {"job-id":job.id, "slave-id":slave_id, "job-time":str(avg_t)}
+
+ if status == 0: # non zero status is error
+ headers["job-result"] = str(DONE)
+ for frame in job.frames:
+ headers["job-frame"] = str(frame.number)
+
+ if job.type == netrender.model.JOB_BLENDER:
+ # send image back to server
+ f = open(JOB_PREFIX + "%06d" % frame.number + ".exr", 'rb')
+ conn.request("PUT", "/render", f, headers=headers)
+ f.close()
+ response = conn.getresponse()
+ elif job.type == netrender.model.JOB_PROCESS:
+ conn.request("PUT", "/render", headers=headers)
+ response = conn.getresponse()
+ else:
+ headers["job-result"] = str(ERROR)
+ for frame in job.frames:
+ headers["job-frame"] = str(frame.number)
+ # send error result back to server
+ conn.request("PUT", "/render", headers=headers)
+ response = conn.getresponse()
+ else:
+ if timeout < MAX_TIMEOUT:
+ timeout += INCREMENT_TIMEOUT
+
+ for i in range(timeout):
+ time.sleep(1)
+ if engine.test_break():
+ conn.close()
+ return
+
+ conn.close()
diff --git a/release/scripts/io/netrender/ui.py b/release/scripts/io/netrender/ui.py
new file mode 100644
index 00000000000..7681d4865e9
--- /dev/null
+++ b/release/scripts/io/netrender/ui.py
@@ -0,0 +1,321 @@
+import bpy
+import sys, os
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+
+import netrender.slave as slave
+import netrender.master as master
+
+from netrender.utils import *
+
+VERSION = b"0.3"
+
+PATH_PREFIX = "/tmp/"
+
+QUEUED = 0
+DISPATCHED = 1
+DONE = 2
+ERROR = 3
+
+class RenderButtonsPanel(bpy.types.Panel):
+ __space_type__ = "PROPERTIES"
+ __region_type__ = "WINDOW"
+ __context__ = "scene"
+ # COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
+
+ def poll(self, context):
+ rd = context.scene.render_data
+ return (rd.use_game_engine==False) and (rd.engine in self.COMPAT_ENGINES)
+
+# Setting panel, use in the scene for now.
+@rnaType
+class SCENE_PT_network_settings(RenderButtonsPanel):
+ __label__ = "Network Settings"
+ COMPAT_ENGINES = set(['NET_RENDER'])
+
+ def draw_header(self, context):
+ layout = self.layout
+ scene = context.scene
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render_data
+
+ layout.active = True
+
+ split = layout.split()
+
+ col = split.column()
+
+ col.itemR(scene.network_render, "mode")
+ col.itemR(scene.network_render, "path")
+ col.itemR(scene.network_render, "server_address")
+ col.itemR(scene.network_render, "server_port")
+
+ if scene.network_render.mode == "RENDER_MASTER":
+ col.itemR(scene.network_render, "server_broadcast")
+ else:
+ col.itemO("render.netclientscan", icon="ICON_FILE_REFRESH", text="")
+
+@rnaType
+class SCENE_PT_network_job(RenderButtonsPanel):
+ __label__ = "Job Settings"
+ COMPAT_ENGINES = set(['NET_RENDER'])
+
+ def poll(self, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render_data
+
+ layout.active = True
+
+ split = layout.split()
+
+ col = split.column()
+
+ col.itemO("render.netclientanim", icon='ICON_RENDER_ANIMATION', text="Animaton on network")
+ col.itemO("render.netclientsend", icon="ICON_FILE_BLEND", text="Send job")
+ col.itemO("render.netclientweb", icon="ICON_QUESTION", text="Open Master Monitor")
+ col.itemR(scene.network_render, "job_name")
+ col.itemR(scene.network_render, "priority")
+ col.itemR(scene.network_render, "chunks")
+
+@rnaType
+class SCENE_PT_network_slaves(RenderButtonsPanel):
+ __label__ = "Slaves Status"
+ COMPAT_ENGINES = set(['NET_RENDER'])
+
+ def poll(self, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ netsettings = scene.network_render
+
+ row = layout.row()
+ row.template_list(netsettings, "slaves", netsettings, "active_slave_index", rows=2)
+
+ col = row.column()
+
+ subcol = col.column(align=True)
+ subcol.itemO("render.netclientslaves", icon="ICON_FILE_REFRESH", text="")
+ subcol.itemO("render.netclientblacklistslave", icon="ICON_ZOOMOUT", text="")
+
+ if len(bpy.data.netrender_slaves) == 0 and len(netsettings.slaves) > 0:
+ while(len(netsettings.slaves) > 0):
+ netsettings.slaves.remove(0)
+
+ if netsettings.active_slave_index >= 0 and len(netsettings.slaves) > 0:
+ layout.itemS()
+
+ slave = bpy.data.netrender_slaves[netsettings.active_slave_index]
+
+ layout.itemL(text="Name: " + slave.name)
+ layout.itemL(text="Address: " + slave.address[0])
+ layout.itemL(text="Seen: " + time.ctime(slave.last_seen))
+ layout.itemL(text="Stats: " + slave.stats)
+
+@rnaType
+class SCENE_PT_network_slaves_blacklist(RenderButtonsPanel):
+ __label__ = "Slaves Blacklist"
+ COMPAT_ENGINES = set(['NET_RENDER'])
+
+ def poll(self, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ netsettings = scene.network_render
+
+ row = layout.row()
+ row.template_list(netsettings, "slaves_blacklist", netsettings, "active_blacklisted_slave_index", rows=2)
+
+ col = row.column()
+
+ subcol = col.column(align=True)
+ subcol.itemO("render.netclientwhitelistslave", icon="ICON_ZOOMOUT", text="")
+
+ if len(bpy.data.netrender_blacklist) == 0 and len(netsettings.slaves_blacklist) > 0:
+ while(len(netsettings.slaves_blacklist) > 0):
+ netsettings.slaves_blacklist.remove(0)
+
+ if netsettings.active_blacklisted_slave_index >= 0 and len(netsettings.slaves_blacklist) > 0:
+ layout.itemS()
+
+ slave = bpy.data.netrender_blacklist[netsettings.active_blacklisted_slave_index]
+
+ layout.itemL(text="Name: " + slave.name)
+ layout.itemL(text="Address: " + slave.address[0])
+ layout.itemL(text="Seen: " + slave.last_seen)
+ layout.itemL(text="Stats: " + time.ctime(slave.stats))
+
+@rnaType
+class SCENE_PT_network_jobs(RenderButtonsPanel):
+ __label__ = "Jobs"
+ COMPAT_ENGINES = set(['NET_RENDER'])
+
+ def poll(self, context):
+ scene = context.scene
+ return super().poll(context) and scene.network_render.mode == "RENDER_CLIENT"
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ netsettings = scene.network_render
+
+ row = layout.row()
+ row.template_list(netsettings, "jobs", netsettings, "active_job_index", rows=2)
+
+ col = row.column()
+
+ subcol = col.column(align=True)
+ subcol.itemO("render.netclientstatus", icon="ICON_FILE_REFRESH", text="")
+ subcol.itemO("render.netclientcancel", icon="ICON_ZOOMOUT", text="")
+ subcol.itemO("render.netclientcancelall", icon="ICON_PANEL_CLOSE", text="")
+ subcol.itemO("render.netclientdownload", icon='ICON_RENDER_ANIMATION', text="")
+
+ if len(bpy.data.netrender_jobs) == 0 and len(netsettings.jobs) > 0:
+ while(len(netsettings.jobs) > 0):
+ netsettings.jobs.remove(0)
+
+ if netsettings.active_job_index >= 0 and len(netsettings.jobs) > 0:
+ layout.itemS()
+
+ job = bpy.data.netrender_jobs[netsettings.active_job_index]
+
+ layout.itemL(text="Name: %s" % job.name)
+ layout.itemL(text="Length: %04i" % len(job))
+ layout.itemL(text="Done: %04i" % job.results[DONE])
+ layout.itemL(text="Error: %04i" % job.results[ERROR])
+
+@rnaType
+class NetRenderSettings(bpy.types.IDPropertyGroup):
+ pass
+
+@rnaType
+class NetRenderSlave(bpy.types.IDPropertyGroup):
+ pass
+
+@rnaType
+class NetRenderJob(bpy.types.IDPropertyGroup):
+ pass
+
+bpy.types.Scene.PointerProperty(attr="network_render", type=NetRenderSettings, name="Network Render", description="Network Render Settings")
+
+NetRenderSettings.StringProperty( attr="server_address",
+ name="Server address",
+ description="IP or name of the master render server",
+ maxlen = 128,
+ default = "[default]")
+
+NetRenderSettings.IntProperty( attr="server_port",
+ name="Server port",
+ description="port of the master render server",
+ default = 8000,
+ min=1,
+ max=65535)
+
+NetRenderSettings.BoolProperty( attr="server_broadcast",
+ name="Broadcast server address",
+ description="broadcast server address on local network",
+ default = True)
+
+if os.name == 'nt':
+ NetRenderSettings.StringProperty( attr="path",
+ name="Path",
+ description="Path for temporary files",
+ maxlen = 128,
+ default = "C:/tmp/")
+else:
+ NetRenderSettings.StringProperty( attr="path",
+ name="Path",
+ description="Path for temporary files",
+ maxlen = 128,
+ default = "/tmp/")
+
+NetRenderSettings.StringProperty( attr="job_name",
+ name="Job name",
+ description="Name of the job",
+ maxlen = 128,
+ default = "[default]")
+
+NetRenderSettings.IntProperty( attr="chunks",
+ name="Chunks",
+ description="Number of frame to dispatch to each slave in one chunk",
+ default = 5,
+ min=1,
+ max=65535)
+
+NetRenderSettings.IntProperty( attr="priority",
+ name="Priority",
+ description="Priority of the job",
+ default = 1,
+ min=1,
+ max=10)
+
+NetRenderSettings.StringProperty( attr="job_id",
+ name="Network job id",
+ description="id of the last sent render job",
+ maxlen = 64,
+ default = "")
+
+NetRenderSettings.IntProperty( attr="active_slave_index",
+ name="Index of the active slave",
+ description="",
+ default = -1,
+ min= -1,
+ max=65535)
+
+NetRenderSettings.IntProperty( attr="active_blacklisted_slave_index",
+ name="Index of the active slave",
+ description="",
+ default = -1,
+ min= -1,
+ max=65535)
+
+NetRenderSettings.IntProperty( attr="active_job_index",
+ name="Index of the active job",
+ description="",
+ default = -1,
+ min= -1,
+ max=65535)
+
+NetRenderSettings.EnumProperty(attr="mode",
+ items=(
+ ("RENDER_CLIENT", "Client", "Act as render client"),
+ ("RENDER_MASTER", "Master", "Act as render master"),
+ ("RENDER_SLAVE", "Slave", "Act as render slave"),
+ ),
+ name="network mode",
+ description="mode of operation of this instance",
+ default="RENDER_CLIENT")
+
+NetRenderSettings.CollectionProperty(attr="slaves", type=NetRenderSlave, name="Slaves", description="")
+NetRenderSettings.CollectionProperty(attr="slaves_blacklist", type=NetRenderSlave, name="Slaves Blacklist", description="")
+NetRenderSettings.CollectionProperty(attr="jobs", type=NetRenderJob, name="Job List", description="")
+
+NetRenderSlave.StringProperty( attr="name",
+ name="Name of the slave",
+ description="",
+ maxlen = 64,
+ default = "")
+
+NetRenderJob.StringProperty( attr="name",
+ name="Name of the job",
+ description="",
+ maxlen = 128,
+ default = "")
diff --git a/release/scripts/io/netrender/utils.py b/release/scripts/io/netrender/utils.py
new file mode 100644
index 00000000000..06393a738a0
--- /dev/null
+++ b/release/scripts/io/netrender/utils.py
@@ -0,0 +1,86 @@
+import bpy
+import sys, os
+import re
+import http, http.client, http.server, urllib
+import subprocess, shutil, time, hashlib
+
+import netrender.model
+
+VERSION = b"0.5"
+
+# Jobs status
+JOB_WAITING = 0 # before all data has been entered
+JOB_PAUSED = 1 # paused by user
+JOB_FINISHED = 2 # finished rendering
+JOB_QUEUED = 3 # ready to be dispatched
+
+# Frames status
+QUEUED = 0
+DISPATCHED = 1
+DONE = 2
+ERROR = 3
+
+STATUS_TEXT = {
+ QUEUED: "Queued",
+ DISPATCHED: "Dispatched",
+ DONE: "Done",
+ ERROR: "Error"
+ }
+
+def rnaType(rna_type):
+ bpy.types.register(rna_type)
+ return rna_type
+
+def rnaOperator(rna_op):
+ bpy.ops.add(rna_op)
+ return rna_op
+
+def clientConnection(scene):
+ netsettings = scene.network_render
+
+ if netsettings.server_address == "[default]":
+ bpy.ops.render.netclientscan()
+
+ conn = http.client.HTTPConnection(netsettings.server_address, netsettings.server_port)
+
+ if clientVerifyVersion(conn):
+ return conn
+ else:
+ conn.close()
+ return None
+
+def clientVerifyVersion(conn):
+ conn.request("GET", "/version")
+ response = conn.getresponse()
+
+ if response.status != http.client.OK:
+ conn.close()
+ return False
+
+ server_version = response.read()
+
+ if server_version != VERSION:
+ print("Incorrect server version!")
+ print("expected", VERSION, "received", server_version)
+ return False
+
+ return True
+
+def prefixPath(prefix_directory, file_path, prefix_path):
+ if os.path.isabs(file_path):
+ # if an absolute path, make sure path exists, if it doesn't, use relative local path
+ full_path = file_path
+ if not os.path.exists(full_path):
+ p, n = os.path.split(full_path)
+
+ if prefix_path and p.startswith(prefix_path):
+ directory = prefix_directory + p[len(prefix_path):]
+ full_path = directory + n
+ if not os.path.exists(directory):
+ os.mkdir(directory)
+ else:
+ full_path = prefix_directory + n
+ else:
+ full_path = prefix_directory + file_path
+
+ return full_path