Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'io_scene_3ds')
-rw-r--r--io_scene_3ds/__init__.py167
-rw-r--r--io_scene_3ds/export_3ds.py1062
-rw-r--r--io_scene_3ds/import_3ds.py935
3 files changed, 2164 insertions, 0 deletions
diff --git a/io_scene_3ds/__init__.py b/io_scene_3ds/__init__.py
new file mode 100644
index 00000000..e7934afc
--- /dev/null
+++ b/io_scene_3ds/__init__.py
@@ -0,0 +1,167 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+bl_info = {
+ "name": "Autodesk 3DS format",
+ "author": "Bob Holcomb, Campbell Barton",
+ "blender": (2, 5, 7),
+ "api": 35622,
+ "location": "File > Import-Export",
+ "description": "Import-Export 3DS, meshes, uvs, materials, textures, cameras & lamps",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.5/Py/"\
+ "Scripts/Import-Export/Autodesk_3DS",
+ "tracker_url": "",
+ "support": 'OFFICIAL',
+ "category": "Import-Export"}
+
+# To support reload properly, try to access a package var, if it's there, reload everything
+if "bpy" in locals():
+ import imp
+ if "import_3ds" in locals():
+ imp.reload(import_3ds)
+ if "export_3ds" in locals():
+ imp.reload(export_3ds)
+
+
+import bpy
+from bpy.props import StringProperty, FloatProperty, BoolProperty, EnumProperty
+from bpy_extras.io_utils import ImportHelper, ExportHelper, axis_conversion
+
+
+class Import3DS(bpy.types.Operator, ImportHelper):
+ '''Import from 3DS file format (.3ds)'''
+ bl_idname = "import_scene.autodesk_3ds"
+ bl_label = 'Import 3DS'
+
+ filename_ext = ".3ds"
+ filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
+
+ constrain_size = FloatProperty(name="Size Constraint", description="Scale the model by 10 until it reacehs the size constraint. Zero Disables.", min=0.0, max=1000.0, soft_min=0.0, soft_max=1000.0, default=10.0)
+ use_image_search = BoolProperty(name="Image Search", description="Search subdirectories for any assosiated images (Warning, may be slow)", default=True)
+ use_apply_transform = BoolProperty(name="Apply Transform", description="Workaround for object transformations importing incorrectly", default=True)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='Y',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Z',
+ )
+
+ def execute(self, context):
+ from . import import_3ds
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob"))
+
+ global_matrix = axis_conversion(from_forward=self.axis_forward, from_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return import_3ds.load(self, context, **keywords)
+
+
+class Export3DS(bpy.types.Operator, ExportHelper):
+ '''Export to 3DS file format (.3ds)'''
+ bl_idname = "export_scene.autodesk_3ds"
+ bl_label = 'Export 3DS'
+
+ filename_ext = ".3ds"
+ filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
+
+ use_selection = BoolProperty(name="Selection Only", description="Export selected objects only", default=False)
+
+ axis_forward = EnumProperty(
+ name="Forward",
+ items=(('X', "X Forward", ""),
+ ('Y', "Y Forward", ""),
+ ('Z', "Z Forward", ""),
+ ('-X', "-X Forward", ""),
+ ('-Y', "-Y Forward", ""),
+ ('-Z', "-Z Forward", ""),
+ ),
+ default='Y',
+ )
+
+ axis_up = EnumProperty(
+ name="Up",
+ items=(('X', "X Up", ""),
+ ('Y', "Y Up", ""),
+ ('Z', "Z Up", ""),
+ ('-X', "-X Up", ""),
+ ('-Y', "-Y Up", ""),
+ ('-Z', "-Z Up", ""),
+ ),
+ default='Z',
+ )
+
+ def execute(self, context):
+ from . import export_3ds
+
+ keywords = self.as_keywords(ignore=("axis_forward", "axis_up", "filter_glob", "check_existing"))
+ global_matrix = axis_conversion(to_forward=self.axis_forward, to_up=self.axis_up).to_4x4()
+ keywords["global_matrix"] = global_matrix
+
+ return export_3ds.save(self, context, **keywords)
+
+
+# Add to a menu
+def menu_func_export(self, context):
+ self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
+
+
+def menu_func_import(self, context):
+ self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
+
+
+def register():
+ bpy.utils.register_module(__name__)
+
+ bpy.types.INFO_MT_file_import.append(menu_func_import)
+ bpy.types.INFO_MT_file_export.append(menu_func_export)
+
+
+def unregister():
+ bpy.utils.unregister_module(__name__)
+
+ bpy.types.INFO_MT_file_import.remove(menu_func_import)
+ bpy.types.INFO_MT_file_export.remove(menu_func_export)
+
+# NOTES:
+# why add 1 extra vertex? and remove it when done? - "Answer - eekadoodle - would need to re-order UV's without this since face order isnt always what we give blender, BMesh will solve :D"
+# disabled scaling to size, this requires exposing bb (easy) and understanding how it works (needs some time)
+
+if __name__ == "__main__":
+ register()
diff --git a/io_scene_3ds/export_3ds.py b/io_scene_3ds/export_3ds.py
new file mode 100644
index 00000000..b9f5d982
--- /dev/null
+++ b/io_scene_3ds/export_3ds.py
@@ -0,0 +1,1062 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Bob Holcomb
+# Contributors: Campbell Barton, Bob Holcomb, Richard Lärkäng, Damien McGinnes, Mark Stijnman
+
+"""
+Exporting is based on 3ds loader from www.gametutorials.com(Thanks DigiBen) and using information
+from the lib3ds project (http://lib3ds.sourceforge.net/) sourcecode.
+"""
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will export
+#----- Primary Chunk, at the beginning of each file
+PRIMARY = 0x4D4D
+
+#------ Main Chunks
+OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
+VERSION = 0x0002 # This gives the version of the .3ds file
+KFDATA = 0xB000 # This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL = 45055 # 0xAFFF // This stored the texture info
+OBJECT = 16384 # 0x4000 // This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+MATNAME = 0xA000 # This holds the material name
+MATAMBIENT = 0xA010 # Ambient color of the object/material
+MATDIFFUSE = 0xA020 # This holds the color of the object/material
+MATSPECULAR = 0xA030 # SPecular color of the object/material
+MATSHINESS = 0xA040 # ??
+MATMAP = 0xA200 # This is a header for a new material
+MATMAPFILE = 0xA300 # This holds the file name of the texture
+
+RGB1 = 0x0011
+RGB2 = 0x0012
+
+#>------ sub defines of OBJECT
+OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
+OBJECT_LIGHT = 0x4600 # This lets un know we are reading a light object
+OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES = 0x4720 # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES = 0x4110 # The objects vertices
+OBJECT_FACES = 0x4120 # The objects faces
+OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
+OBJECT_UV = 0x4140 # The UV texture coordinates
+OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
+
+#>------ sub defines of KFDATA
+KFDATA_KFHDR = 0xB00A
+KFDATA_KFSEG = 0xB008
+KFDATA_KFCURTIME = 0xB009
+KFDATA_OBJECT_NODE_TAG = 0xB002
+
+#>------ sub defines of OBJECT_NODE_TAG
+OBJECT_NODE_ID = 0xB030
+OBJECT_NODE_HDR = 0xB010
+OBJECT_PIVOT = 0xB013
+OBJECT_INSTANCE_NAME = 0xB011
+POS_TRACK_TAG = 0xB020
+ROT_TRACK_TAG = 0xB021
+SCL_TRACK_TAG = 0xB022
+
+import struct
+
+# So 3ds max can open files, limit names to 12 in length
+# this is verry annoying for filenames!
+name_unique = [] # stores str, ascii only
+name_mapping = {} # stores {orig: byte} mapping
+
+
+def sane_name(name):
+ name_fixed = name_mapping.get(name)
+ if name_fixed is not None:
+ return name_fixed
+
+ # strip non ascii chars
+ new_name_clean = new_name = name.encode("ASCII", "replace").decode("ASCII")[:12]
+ i = 0
+
+ while new_name in name_unique:
+ new_name = new_name_clean + ".%.3d" % i
+ i += 1
+
+ # note, appending the 'str' version.
+ name_unique.append(new_name)
+ name_mapping[name] = new_name = new_name.encode("ASCII", "replace")
+ return new_name
+
+
+def uv_key(uv):
+ return round(uv[0], 6), round(uv[1], 6)
+
+# size defines:
+SZ_SHORT = 2
+SZ_INT = 4
+SZ_FLOAT = 4
+
+
+class _3ds_short(object):
+ '''Class representing a short (2-byte integer) for a 3ds file.
+ *** This looks like an unsigned short H is unsigned from the struct docs - Cam***'''
+ __slots__ = ("value", )
+
+ def __init__(self, val=0):
+ self.value = val
+
+ def get_size(self):
+ return SZ_SHORT
+
+ def write(self, file):
+ file.write(struct.pack("<H", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_int(object):
+ '''Class representing an int (4-byte integer) for a 3ds file.'''
+ __slots__ = ("value", )
+
+ def __init__(self, val):
+ self.value = val
+
+ def get_size(self):
+ return SZ_INT
+
+ def write(self, file):
+ file.write(struct.pack("<I", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_float(object):
+ '''Class representing a 4-byte IEEE floating point number for a 3ds file.'''
+ __slots__ = ("value", )
+
+ def __init__(self, val):
+ self.value = val
+
+ def get_size(self):
+ return SZ_FLOAT
+
+ def write(self, file):
+ file.write(struct.pack("<f", self.value))
+
+ def __str__(self):
+ return str(self.value)
+
+
+class _3ds_string(object):
+ '''Class representing a zero-terminated string for a 3ds file.'''
+ __slots__ = ("value", )
+
+ def __init__(self, val):
+ assert(type(val) == bytes)
+ self.value = val
+
+ def get_size(self):
+ return (len(self.value) + 1)
+
+ def write(self, file):
+ binary_format = "<%ds" % (len(self.value) + 1)
+ file.write(struct.pack(binary_format, self.value))
+
+ def __str__(self):
+ return self.value
+
+
+class _3ds_point_3d(object):
+ '''Class representing a three-dimensional point for a 3ds file.'''
+ __slots__ = "x", "y", "z"
+
+ def __init__(self, point):
+ self.x, self.y, self.z = point
+
+ def get_size(self):
+ return 3 * SZ_FLOAT
+
+ def write(self, file):
+ file.write(struct.pack('<3f', self.x, self.y, self.z))
+
+ def __str__(self):
+ return '(%f, %f, %f)' % (self.x, self.y, self.z)
+
+# Used for writing a track
+"""
+class _3ds_point_4d(object):
+ '''Class representing a four-dimensional point for a 3ds file, for instance a quaternion.'''
+ __slots__ = "x","y","z","w"
+ def __init__(self, point=(0.0,0.0,0.0,0.0)):
+ self.x, self.y, self.z, self.w = point
+
+ def get_size(self):
+ return 4*SZ_FLOAT
+
+ def write(self,file):
+ data=struct.pack('<4f', self.x, self.y, self.z, self.w)
+ file.write(data)
+
+ def __str__(self):
+ return '(%f, %f, %f, %f)' % (self.x, self.y, self.z, self.w)
+"""
+
+
+class _3ds_point_uv(object):
+ '''Class representing a UV-coordinate for a 3ds file.'''
+ __slots__ = ("uv", )
+
+ def __init__(self, point):
+ self.uv = point
+
+ def __cmp__(self, other):
+ return cmp(self.uv, other.uv)
+
+ def get_size(self):
+ return 2 * SZ_FLOAT
+
+ def write(self, file):
+ data = struct.pack('<2f', self.uv[0], self.uv[1])
+ file.write(data)
+
+ def __str__(self):
+ return '(%g, %g)' % self.uv
+
+
+class _3ds_rgb_color(object):
+ '''Class representing a (24-bit) rgb color for a 3ds file.'''
+ __slots__ = "r", "g", "b"
+
+ def __init__(self, col):
+ self.r, self.g, self.b = col
+
+ def get_size(self):
+ return 3
+
+ def write(self, file):
+ file.write(struct.pack('<3B', int(255 * self.r), int(255 * self.g), int(255 * self.b)))
+# file.write(struct.pack('<3c', chr(int(255*self.r)), chr(int(255*self.g)), chr(int(255*self.b)) ) )
+
+ def __str__(self):
+ return '{%f, %f, %f}' % (self.r, self.g, self.b)
+
+
+class _3ds_face(object):
+ '''Class representing a face for a 3ds file.'''
+ __slots__ = ("vindex", )
+
+ def __init__(self, vindex):
+ self.vindex = vindex
+
+ def get_size(self):
+ return 4 * SZ_SHORT
+
+ def write(self, file):
+ # The last zero is only used by 3d studio
+ file.write(struct.pack("<4H", self.vindex[0], self.vindex[1], self.vindex[2], 0))
+
+ def __str__(self):
+ return "[%d %d %d]" % (self.vindex[0], self.vindex[1], self.vindex[2])
+
+
+class _3ds_array(object):
+ '''Class representing an array of variables for a 3ds file.
+
+ Consists of a _3ds_short to indicate the number of items, followed by the items themselves.
+ '''
+ __slots__ = "values", "size"
+
+ def __init__(self):
+ self.values = []
+ self.size = SZ_SHORT
+
+ # add an item:
+ def add(self, item):
+ self.values.append(item)
+ self.size += item.get_size()
+
+ def get_size(self):
+ return self.size
+
+ def write(self, file):
+ _3ds_short(len(self.values)).write(file)
+ #_3ds_int(len(self.values)).write(file)
+ for value in self.values:
+ value.write(file)
+
+ # To not overwhelm the output in a dump, a _3ds_array only
+ # outputs the number of items, not all of the actual items.
+ def __str__(self):
+ return '(%d items)' % len(self.values)
+
+
+class _3ds_named_variable(object):
+ '''Convenience class for named variables.'''
+
+ __slots__ = "value", "name"
+
+ def __init__(self, name, val=None):
+ self.name = name
+ self.value = val
+
+ def get_size(self):
+ if self.value is None:
+ return 0
+ else:
+ return self.value.get_size()
+
+ def write(self, file):
+ if self.value is not None:
+ self.value.write(file)
+
+ def dump(self, indent):
+ if self.value is not None:
+ spaces = ""
+ for i in range(indent):
+ spaces += " "
+ if (self.name != ""):
+ print(spaces, self.name, " = ", self.value)
+ else:
+ print(spaces, "[unnamed]", " = ", self.value)
+
+
+#the chunk class
+class _3ds_chunk(object):
+ '''Class representing a chunk in a 3ds file.
+
+ Chunks contain zero or more variables, followed by zero or more subchunks.
+ '''
+ __slots__ = "ID", "size", "variables", "subchunks"
+
+ def __init__(self, id=0):
+ self.ID = _3ds_short(id)
+ self.size = _3ds_int(0)
+ self.variables = []
+ self.subchunks = []
+
+ def set_ID(id):
+ self.ID = _3ds_short(id)
+
+ def add_variable(self, name, var):
+ '''Add a named variable.
+
+ The name is mostly for debugging purposes.'''
+ self.variables.append(_3ds_named_variable(name, var))
+
+ def add_subchunk(self, chunk):
+ '''Add a subchunk.'''
+ self.subchunks.append(chunk)
+
+ def get_size(self):
+ '''Calculate the size of the chunk and return it.
+
+ The sizes of the variables and subchunks are used to determine this chunk\'s size.'''
+ tmpsize = self.ID.get_size() + self.size.get_size()
+ for variable in self.variables:
+ tmpsize += variable.get_size()
+ for subchunk in self.subchunks:
+ tmpsize += subchunk.get_size()
+ self.size.value = tmpsize
+ return self.size.value
+
+ def write(self, file):
+ '''Write the chunk to a file.
+
+ Uses the write function of the variables and the subchunks to do the actual work.'''
+ #write header
+ self.ID.write(file)
+ self.size.write(file)
+ for variable in self.variables:
+ variable.write(file)
+ for subchunk in self.subchunks:
+ subchunk.write(file)
+
+ def dump(self, indent=0):
+ '''Write the chunk to a file.
+
+ Dump is used for debugging purposes, to dump the contents of a chunk to the standard output.
+ Uses the dump function of the named variables and the subchunks to do the actual work.'''
+ spaces = ""
+ for i in range(indent):
+ spaces += " "
+ print(spaces, "ID=", hex(self.ID.value), "size=", self.get_size())
+ for variable in self.variables:
+ variable.dump(indent + 1)
+ for subchunk in self.subchunks:
+ subchunk.dump(indent + 1)
+
+
+######################################################
+# EXPORT
+######################################################
+
+def get_material_images(material):
+ # blender utility func.
+ if material:
+ return [s.texture.image for s in material.texture_slots if s and s.texture.type == 'IMAGE' and s.texture.image]
+
+ return []
+# images = []
+# if material:
+# for mtex in material.getTextures():
+# if mtex and mtex.tex.type == Blender.Texture.Types.IMAGE:
+# image = mtex.tex.image
+# if image:
+# images.append(image) # maye want to include info like diffuse, spec here.
+# return images
+
+
+def make_material_subchunk(id, color):
+ '''Make a material subchunk.
+
+ Used for color subchunks, such as diffuse color or ambient color subchunks.'''
+ mat_sub = _3ds_chunk(id)
+ col1 = _3ds_chunk(RGB1)
+ col1.add_variable("color1", _3ds_rgb_color(color))
+ mat_sub.add_subchunk(col1)
+# optional:
+# col2 = _3ds_chunk(RGB1)
+# col2.add_variable("color2", _3ds_rgb_color(color))
+# mat_sub.add_subchunk(col2)
+ return mat_sub
+
+
+def make_material_texture_chunk(id, images):
+ """Make Material Map texture chunk
+ """
+ mat_sub = _3ds_chunk(id)
+
+ def add_image(img):
+ import os
+ filename = os.path.basename(image.filepath)
+ mat_sub_file = _3ds_chunk(MATMAPFILE)
+ mat_sub_file.add_variable("mapfile", _3ds_string(sane_name(filename)))
+ mat_sub.add_subchunk(mat_sub_file)
+
+ for image in images:
+ add_image(image)
+
+ return mat_sub
+
+
+def make_material_chunk(material, image):
+ '''Make a material chunk out of a blender material.'''
+ material_chunk = _3ds_chunk(MATERIAL)
+ name = _3ds_chunk(MATNAME)
+
+ name_str = material.name if material else "None"
+
+ if image:
+ name_str += image.name
+
+ name.add_variable("name", _3ds_string(sane_name(name_str)))
+ material_chunk.add_subchunk(name)
+
+ if not material:
+ material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, (0, 0, 0)))
+ material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, (.8, .8, .8)))
+ material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, (1, 1, 1)))
+
+ else:
+ material_chunk.add_subchunk(make_material_subchunk(MATAMBIENT, [a * material.ambient for a in material.diffuse_color]))
+ material_chunk.add_subchunk(make_material_subchunk(MATDIFFUSE, material.diffuse_color))
+ material_chunk.add_subchunk(make_material_subchunk(MATSPECULAR, material.specular_color))
+
+ images = get_material_images(material) # can be None
+ if image:
+ images.append(image)
+
+ if images:
+ material_chunk.add_subchunk(make_material_texture_chunk(MATMAP, images))
+
+ return material_chunk
+
+
+class tri_wrapper(object):
+ '''Class representing a triangle.
+
+ Used when converting faces to triangles'''
+
+ __slots__ = "vertex_index", "mat", "image", "faceuvs", "offset"
+
+ def __init__(self, vindex=(0, 0, 0), mat=None, image=None, faceuvs=None):
+ self.vertex_index = vindex
+ self.mat = mat
+ self.image = image
+ self.faceuvs = faceuvs
+ self.offset = [0, 0, 0] # offset indices
+
+
+def extract_triangles(mesh):
+ '''Extract triangles from a mesh.
+
+ If the mesh contains quads, they will be split into triangles.'''
+ tri_list = []
+ do_uv = len(mesh.uv_textures)
+
+ img = None
+ for i, face in enumerate(mesh.faces):
+ f_v = face.vertices
+
+ uf = mesh.uv_textures.active.data[i] if do_uv else None
+
+ if do_uv:
+ f_uv = uf.uv
+ img = uf.image if uf else None
+ if img is not None:
+ img = img.name
+
+ # if f_v[3] == 0:
+ if len(f_v) == 3:
+ new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+ if (do_uv):
+ new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+ tri_list.append(new_tri)
+
+ else: # it's a quad
+ new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), face.material_index, img)
+ new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), face.material_index, img)
+
+ if (do_uv):
+ new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])
+ new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])
+
+ tri_list.append(new_tri)
+ tri_list.append(new_tri_2)
+
+ return tri_list
+
+
+def remove_face_uv(verts, tri_list):
+ '''Remove face UV coordinates from a list of triangles.
+
+ Since 3ds files only support one pair of uv coordinates for each vertex, face uv coordinates
+ need to be converted to vertex uv coordinates. That means that vertices need to be duplicated when
+ there are multiple uv coordinates per vertex.'''
+
+ # initialize a list of UniqueLists, one per vertex:
+ #uv_list = [UniqueList() for i in xrange(len(verts))]
+ unique_uvs = [{} for i in range(len(verts))]
+
+ # for each face uv coordinate, add it to the UniqueList of the vertex
+ for tri in tri_list:
+ for i in range(3):
+ # store the index into the UniqueList for future reference:
+ # offset.append(uv_list[tri.vertex_index[i]].add(_3ds_point_uv(tri.faceuvs[i])))
+
+ context_uv_vert = unique_uvs[tri.vertex_index[i]]
+ uvkey = tri.faceuvs[i]
+
+ offset_index__uv_3ds = context_uv_vert.get(uvkey)
+
+ if not offset_index__uv_3ds:
+ offset_index__uv_3ds = context_uv_vert[uvkey] = len(context_uv_vert), _3ds_point_uv(uvkey)
+
+ tri.offset[i] = offset_index__uv_3ds[0]
+
+ # At this point, each vertex has a UniqueList containing every uv coordinate that is associated with it
+ # only once.
+
+ # Now we need to duplicate every vertex as many times as it has uv coordinates and make sure the
+ # faces refer to the new face indices:
+ vert_index = 0
+ vert_array = _3ds_array()
+ uv_array = _3ds_array()
+ index_list = []
+ for i, vert in enumerate(verts):
+ index_list.append(vert_index)
+
+ pt = _3ds_point_3d(vert.co) # reuse, should be ok
+ uvmap = [None] * len(unique_uvs[i])
+ for ii, uv_3ds in unique_uvs[i].values():
+ # add a vertex duplicate to the vertex_array for every uv associated with this vertex:
+ vert_array.add(pt)
+ # add the uv coordinate to the uv array:
+ # This for loop does not give uv's ordered by ii, so we create a new map
+ # and add the uv's later
+ # uv_array.add(uv_3ds)
+ uvmap[ii] = uv_3ds
+
+ # Add the uv's in the correct order
+ for uv_3ds in uvmap:
+ # add the uv coordinate to the uv array:
+ uv_array.add(uv_3ds)
+
+ vert_index += len(unique_uvs[i])
+
+ # Make sure the triangle vertex indices now refer to the new vertex list:
+ for tri in tri_list:
+ for i in range(3):
+ tri.offset[i] += index_list[tri.vertex_index[i]]
+ tri.vertex_index = tri.offset
+
+ return vert_array, uv_array, tri_list
+
+
+def make_faces_chunk(tri_list, mesh, materialDict):
+ '''Make a chunk for the faces.
+
+ Also adds subchunks assigning materials to all faces.'''
+
+ materials = mesh.materials
+ if not materials:
+ mat = None
+
+ face_chunk = _3ds_chunk(OBJECT_FACES)
+ face_list = _3ds_array()
+
+ if mesh.uv_textures:
+ # Gather materials used in this mesh - mat/image pairs
+ unique_mats = {}
+ for i, tri in enumerate(tri_list):
+
+ face_list.add(_3ds_face(tri.vertex_index))
+
+ if materials:
+ mat = materials[tri.mat]
+ if mat:
+ mat = mat.name
+
+ img = tri.image
+
+ try:
+ context_mat_face_array = unique_mats[mat, img][1]
+ except:
+ name_str = mat if mat else "None"
+ if img:
+ name_str += img
+
+ context_mat_face_array = _3ds_array()
+ unique_mats[mat, img] = _3ds_string(sane_name(name_str)), context_mat_face_array
+
+ context_mat_face_array.add(_3ds_short(i))
+ # obj_material_faces[tri.mat].add(_3ds_short(i))
+
+ face_chunk.add_variable("faces", face_list)
+ for mat_name, mat_faces in unique_mats.values():
+ obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)
+ obj_material_chunk.add_variable("name", mat_name)
+ obj_material_chunk.add_variable("face_list", mat_faces)
+ face_chunk.add_subchunk(obj_material_chunk)
+
+ else:
+
+ obj_material_faces = []
+ obj_material_names = []
+ for m in materials:
+ if m:
+ obj_material_names.append(_3ds_string(sane_name(m.name)))
+ obj_material_faces.append(_3ds_array())
+ n_materials = len(obj_material_names)
+
+ for i, tri in enumerate(tri_list):
+ face_list.add(_3ds_face(tri.vertex_index))
+ if (tri.mat < n_materials):
+ obj_material_faces[tri.mat].add(_3ds_short(i))
+
+ face_chunk.add_variable("faces", face_list)
+ for i in range(n_materials):
+ obj_material_chunk = _3ds_chunk(OBJECT_MATERIAL)
+ obj_material_chunk.add_variable("name", obj_material_names[i])
+ obj_material_chunk.add_variable("face_list", obj_material_faces[i])
+ face_chunk.add_subchunk(obj_material_chunk)
+
+ return face_chunk
+
+
+def make_vert_chunk(vert_array):
+ '''Make a vertex chunk out of an array of vertices.'''
+ vert_chunk = _3ds_chunk(OBJECT_VERTICES)
+ vert_chunk.add_variable("vertices", vert_array)
+ return vert_chunk
+
+
+def make_uv_chunk(uv_array):
+ '''Make a UV chunk out of an array of UVs.'''
+ uv_chunk = _3ds_chunk(OBJECT_UV)
+ uv_chunk.add_variable("uv coords", uv_array)
+ return uv_chunk
+
+
+def make_mesh_chunk(mesh, materialDict):
+ '''Make a chunk out of a Blender mesh.'''
+
+ # Extract the triangles from the mesh:
+ tri_list = extract_triangles(mesh)
+
+ if len(mesh.uv_textures):
+# if mesh.faceUV:
+ # Remove the face UVs and convert it to vertex UV:
+ vert_array, uv_array, tri_list = remove_face_uv(mesh.vertices, tri_list)
+ else:
+ # Add the vertices to the vertex array:
+ vert_array = _3ds_array()
+ for vert in mesh.vertices:
+ vert_array.add(_3ds_point_3d(vert.co))
+ # If the mesh has vertex UVs, create an array of UVs:
+ if len(mesh.sticky):
+# if mesh.vertexUV:
+ uv_array = _3ds_array()
+ for uv in mesh.sticky:
+# for vert in mesh.vertices:
+ uv_array.add(_3ds_point_uv(uv.co))
+# uv_array.add(_3ds_point_uv(vert.uvco))
+ else:
+ # no UV at all:
+ uv_array = None
+
+ # create the chunk:
+ mesh_chunk = _3ds_chunk(OBJECT_MESH)
+
+ # add vertex chunk:
+ mesh_chunk.add_subchunk(make_vert_chunk(vert_array))
+ # add faces chunk:
+
+ mesh_chunk.add_subchunk(make_faces_chunk(tri_list, mesh, materialDict))
+
+ # if available, add uv chunk:
+ if uv_array:
+ mesh_chunk.add_subchunk(make_uv_chunk(uv_array))
+
+ return mesh_chunk
+
+""" # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+def make_kfdata(start=0, stop=0, curtime=0):
+ '''Make the basic keyframe data chunk'''
+ kfdata = _3ds_chunk(KFDATA)
+
+ kfhdr = _3ds_chunk(KFDATA_KFHDR)
+ kfhdr.add_variable("revision", _3ds_short(0))
+ # Not really sure what filename is used for, but it seems it is usually used
+ # to identify the program that generated the .3ds:
+ kfhdr.add_variable("filename", _3ds_string("Blender"))
+ kfhdr.add_variable("animlen", _3ds_int(stop-start))
+
+ kfseg = _3ds_chunk(KFDATA_KFSEG)
+ kfseg.add_variable("start", _3ds_int(start))
+ kfseg.add_variable("stop", _3ds_int(stop))
+
+ kfcurtime = _3ds_chunk(KFDATA_KFCURTIME)
+ kfcurtime.add_variable("curtime", _3ds_int(curtime))
+
+ kfdata.add_subchunk(kfhdr)
+ kfdata.add_subchunk(kfseg)
+ kfdata.add_subchunk(kfcurtime)
+ return kfdata
+"""
+
+"""
+def make_track_chunk(ID, obj):
+ '''Make a chunk for track data.
+
+ Depending on the ID, this will construct a position, rotation or scale track.'''
+ track_chunk = _3ds_chunk(ID)
+ track_chunk.add_variable("track_flags", _3ds_short())
+ track_chunk.add_variable("unknown", _3ds_int())
+ track_chunk.add_variable("unknown", _3ds_int())
+ track_chunk.add_variable("nkeys", _3ds_int(1))
+ # Next section should be repeated for every keyframe, but for now, animation is not actually supported.
+ track_chunk.add_variable("tcb_frame", _3ds_int(0))
+ track_chunk.add_variable("tcb_flags", _3ds_short())
+ if obj.type=='Empty':
+ if ID==POS_TRACK_TAG:
+ # position vector:
+ track_chunk.add_variable("position", _3ds_point_3d(obj.getLocation()))
+ elif ID==ROT_TRACK_TAG:
+ # rotation (quaternion, angle first, followed by axis):
+ q = obj.getEuler().to_quaternion() # XXX, todo!
+ track_chunk.add_variable("rotation", _3ds_point_4d((q.angle, q.axis[0], q.axis[1], q.axis[2])))
+ elif ID==SCL_TRACK_TAG:
+ # scale vector:
+ track_chunk.add_variable("scale", _3ds_point_3d(obj.getSize()))
+ else:
+ # meshes have their transformations applied before
+ # exporting, so write identity transforms here:
+ if ID==POS_TRACK_TAG:
+ # position vector:
+ track_chunk.add_variable("position", _3ds_point_3d((0.0,0.0,0.0)))
+ elif ID==ROT_TRACK_TAG:
+ # rotation (quaternion, angle first, followed by axis):
+ track_chunk.add_variable("rotation", _3ds_point_4d((0.0, 1.0, 0.0, 0.0)))
+ elif ID==SCL_TRACK_TAG:
+ # scale vector:
+ track_chunk.add_variable("scale", _3ds_point_3d((1.0, 1.0, 1.0)))
+
+ return track_chunk
+"""
+
+"""
+def make_kf_obj_node(obj, name_to_id):
+ '''Make a node chunk for a Blender object.
+
+ Takes the Blender object as a parameter. Object id's are taken from the dictionary name_to_id.
+ Blender Empty objects are converted to dummy nodes.'''
+
+ name = obj.name
+ # main object node chunk:
+ kf_obj_node = _3ds_chunk(KFDATA_OBJECT_NODE_TAG)
+ # chunk for the object id:
+ obj_id_chunk = _3ds_chunk(OBJECT_NODE_ID)
+ # object id is from the name_to_id dictionary:
+ obj_id_chunk.add_variable("node_id", _3ds_short(name_to_id[name]))
+
+ # object node header:
+ obj_node_header_chunk = _3ds_chunk(OBJECT_NODE_HDR)
+ # object name:
+ if obj.type == 'Empty':
+ # Empties are called "$$$DUMMY" and use the OBJECT_INSTANCE_NAME chunk
+ # for their name (see below):
+ obj_node_header_chunk.add_variable("name", _3ds_string("$$$DUMMY"))
+ else:
+ # Add the name:
+ obj_node_header_chunk.add_variable("name", _3ds_string(sane_name(name)))
+ # Add Flag variables (not sure what they do):
+ obj_node_header_chunk.add_variable("flags1", _3ds_short(0))
+ obj_node_header_chunk.add_variable("flags2", _3ds_short(0))
+
+ # Check parent-child relationships:
+ parent = obj.parent
+ if (parent is None) or (parent.name not in name_to_id):
+ # If no parent, or the parents name is not in the name_to_id dictionary,
+ # parent id becomes -1:
+ obj_node_header_chunk.add_variable("parent", _3ds_short(-1))
+ else:
+ # Get the parent's id from the name_to_id dictionary:
+ obj_node_header_chunk.add_variable("parent", _3ds_short(name_to_id[parent.name]))
+
+ # Add pivot chunk:
+ obj_pivot_chunk = _3ds_chunk(OBJECT_PIVOT)
+ obj_pivot_chunk.add_variable("pivot", _3ds_point_3d(obj.getLocation()))
+ kf_obj_node.add_subchunk(obj_pivot_chunk)
+
+ # add subchunks for object id and node header:
+ kf_obj_node.add_subchunk(obj_id_chunk)
+ kf_obj_node.add_subchunk(obj_node_header_chunk)
+
+ # Empty objects need to have an extra chunk for the instance name:
+ if obj.type == 'Empty':
+ obj_instance_name_chunk = _3ds_chunk(OBJECT_INSTANCE_NAME)
+ obj_instance_name_chunk.add_variable("name", _3ds_string(sane_name(name)))
+ kf_obj_node.add_subchunk(obj_instance_name_chunk)
+
+ # Add track chunks for position, rotation and scale:
+ kf_obj_node.add_subchunk(make_track_chunk(POS_TRACK_TAG, obj))
+ kf_obj_node.add_subchunk(make_track_chunk(ROT_TRACK_TAG, obj))
+ kf_obj_node.add_subchunk(make_track_chunk(SCL_TRACK_TAG, obj))
+
+ return kf_obj_node
+"""
+
+
+def save(operator,
+ context, filepath="",
+ use_selection=True,
+ global_matrix=None,
+ ):
+
+ import bpy
+ import mathutils
+
+ import time
+ from bpy_extras.io_utils import create_derived_objects, free_derived_objects
+
+ '''Save the Blender scene to a 3ds file.'''
+
+ # Time the export
+ time1 = time.clock()
+# Blender.Window.WaitCursor(1)
+
+ if global_matrix is None:
+ global_matrix = mathutils.Matrix()
+
+ if bpy.ops.object.mode_set.poll():
+ bpy.ops.object.mode_set(mode='OBJECT')
+
+ # Initialize the main chunk (primary):
+ primary = _3ds_chunk(PRIMARY)
+ # Add version chunk:
+ version_chunk = _3ds_chunk(VERSION)
+ version_chunk.add_variable("version", _3ds_int(3))
+ primary.add_subchunk(version_chunk)
+
+ # init main object info chunk:
+ object_info = _3ds_chunk(OBJECTINFO)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # init main key frame data chunk:
+ kfdata = make_kfdata()
+ '''
+
+ # Make a list of all materials used in the selected meshes (use a dictionary,
+ # each material is added once):
+ materialDict = {}
+ mesh_objects = []
+
+ scene = context.scene
+
+ if use_selection:
+ objects = (ob for ob in scene.objects if ob.is_visible(scene) and ob.select)
+ else:
+ objects = (ob for ob in scene.objects if ob.is_visible(scene))
+
+ for ob in objects:
+ # get derived objects
+ free, derived = create_derived_objects(scene, ob)
+
+ if derived is None:
+ continue
+
+ for ob_derived, mat in derived:
+ if ob.type not in ('MESH', 'CURVE', 'SURFACE', 'FONT', 'META'):
+ continue
+
+ try:
+ data = ob_derived.to_mesh(scene, True, 'PREVIEW')
+ except:
+ data = None
+
+ if data:
+ data.transform(global_matrix * mat)
+# data.transform(mat, recalc_normals=False)
+ mesh_objects.append((ob_derived, data))
+ mat_ls = data.materials
+ mat_ls_len = len(mat_ls)
+
+ # get material/image tuples.
+ if len(data.uv_textures):
+# if data.faceUV:
+ if not mat_ls:
+ mat = mat_name = None
+
+ for f, uf in zip(data.faces, data.uv_textures.active.data):
+ if mat_ls:
+ mat_index = f.material_index
+ if mat_index >= mat_ls_len:
+ mat_index = f.mat = 0
+ mat = mat_ls[mat_index]
+ mat_name = None if mat is None else mat.name
+ # else there already set to none
+
+ img = uf.image
+ img_name = None if img is None else img.name
+
+ materialDict.setdefault((mat_name, img_name), (mat, img))
+
+ else:
+ for mat in mat_ls:
+ if mat: # material may be None so check its not.
+ materialDict.setdefault((mat.name, None), (mat, None))
+
+ # Why 0 Why!
+ for f in data.faces:
+ if f.material_index >= mat_ls_len:
+# if f.mat >= mat_ls_len:
+ f.material_index = 0
+ # f.mat = 0
+
+ if free:
+ free_derived_objects(ob)
+
+ # Make material chunks for all materials used in the meshes:
+ for mat_and_image in materialDict.values():
+ object_info.add_subchunk(make_material_chunk(mat_and_image[0], mat_and_image[1]))
+
+ # Give all objects a unique ID and build a dictionary from object name to object id:
+ """
+ name_to_id = {}
+ for ob, data in mesh_objects:
+ name_to_id[ob.name]= len(name_to_id)
+ #for ob in empty_objects:
+ # name_to_id[ob.name]= len(name_to_id)
+ """
+
+ # Create object chunks for all meshes:
+ i = 0
+ for ob, blender_mesh in mesh_objects:
+ # create a new object chunk
+ object_chunk = _3ds_chunk(OBJECT)
+
+ # set the object name
+ object_chunk.add_variable("name", _3ds_string(sane_name(ob.name)))
+
+ # make a mesh chunk out of the mesh:
+ object_chunk.add_subchunk(make_mesh_chunk(blender_mesh, materialDict))
+ object_info.add_subchunk(object_chunk)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # make a kf object node for the object:
+ kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+ '''
+ if not blender_mesh.users:
+ bpy.data.meshes.remove(blender_mesh)
+# blender_mesh.vertices = None
+
+ i += i
+
+ # Create chunks for all empties:
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ for ob in empty_objects:
+ # Empties only require a kf object node:
+ kfdata.add_subchunk(make_kf_obj_node(ob, name_to_id))
+ pass
+ '''
+
+ # Add main object info chunk to primary chunk:
+ primary.add_subchunk(object_info)
+
+ ''' # COMMENTED OUT FOR 2.42 RELEASE!! CRASHES 3DS MAX
+ # Add main keyframe data chunk to primary chunk:
+ primary.add_subchunk(kfdata)
+ '''
+
+ # At this point, the chunk hierarchy is completely built.
+
+ # Check the size:
+ primary.get_size()
+ # Open the file for writing:
+ file = open(filepath, 'wb')
+
+ # Recursively write the chunks to file:
+ primary.write(file)
+
+ # Close the file:
+ file.close()
+
+ # Clear name mapping vars, could make locals too
+ name_unique[:] = []
+ name_mapping.clear()
+
+ # Debugging only: report the exporting time:
+# Blender.Window.WaitCursor(0)
+ print("3ds export time: %.2f" % (time.clock() - time1))
+
+ # Debugging only: dump the chunk hierarchy:
+ #primary.dump()
+
+ return {'FINISHED'}
diff --git a/io_scene_3ds/import_3ds.py b/io_scene_3ds/import_3ds.py
new file mode 100644
index 00000000..803b8e53
--- /dev/null
+++ b/io_scene_3ds/import_3ds.py
@@ -0,0 +1,935 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+# Script copyright (C) Bob Holcomb
+# Contributors: Bob Holcomb, Richard L?rk?ng, Damien McGinnes, Campbell Barton, Mario Lapin, Dominique Lorre
+
+import os
+import time
+import struct
+
+import bpy
+import mathutils
+
+BOUNDS_3DS = []
+
+
+######################################################
+# Data Structures
+######################################################
+
+#Some of the chunks that we will see
+#----- Primary Chunk, at the beginning of each file
+PRIMARY = 0x4D4D
+
+#------ Main Chunks
+OBJECTINFO = 0x3D3D # This gives the version of the mesh and is found right before the material and object information
+VERSION = 0x0002 # This gives the version of the .3ds file
+EDITKEYFRAME = 0xB000 # This is the header for all of the key frame info
+
+#------ sub defines of OBJECTINFO
+MATERIAL = 0xAFFF # This stored the texture info
+OBJECT = 0x4000 # This stores the faces, vertices, etc...
+
+#>------ sub defines of MATERIAL
+#------ sub defines of MATERIAL_BLOCK
+MAT_NAME = 0xA000 # This holds the material name
+MAT_AMBIENT = 0xA010 # Ambient color of the object/material
+MAT_DIFFUSE = 0xA020 # This holds the color of the object/material
+MAT_SPECULAR = 0xA030 # SPecular color of the object/material
+MAT_SHINESS = 0xA040 # ??
+MAT_TRANSPARENCY = 0xA050 # Transparency value of material
+MAT_SELF_ILLUM = 0xA080 # Self Illumination value of material
+MAT_WIRE = 0xA085 # Only render's wireframe
+
+MAT_TEXTURE_MAP = 0xA200 # This is a header for a new texture map
+MAT_SPECULAR_MAP = 0xA204 # This is a header for a new specular map
+MAT_OPACITY_MAP = 0xA210 # This is a header for a new opacity map
+MAT_REFLECTION_MAP = 0xA220 # This is a header for a new reflection map
+MAT_BUMP_MAP = 0xA230 # This is a header for a new bump map
+MAT_MAP_FILEPATH = 0xA300 # This holds the file name of the texture
+
+MAT_FLOAT_COLOR = 0x0010 # color defined as 3 floats
+MAT_24BIT_COLOR = 0x0011 # color defined as 3 bytes
+
+#>------ sub defines of OBJECT
+OBJECT_MESH = 0x4100 # This lets us know that we are reading a new object
+OBJECT_LAMP = 0x4600 # This lets un know we are reading a light object
+OBJECT_LAMP_SPOT = 0x4610 # The light is a spotloght.
+OBJECT_LAMP_OFF = 0x4620 # The light off.
+OBJECT_LAMP_ATTENUATE = 0x4625
+OBJECT_LAMP_RAYSHADE = 0x4627
+OBJECT_LAMP_SHADOWED = 0x4630
+OBJECT_LAMP_LOCAL_SHADOW = 0x4640
+OBJECT_LAMP_LOCAL_SHADOW2 = 0x4641
+OBJECT_LAMP_SEE_CONE = 0x4650
+OBJECT_LAMP_SPOT_RECTANGULAR = 0x4651
+OBJECT_LAMP_SPOT_OVERSHOOT = 0x4652
+OBJECT_LAMP_SPOT_PROJECTOR = 0x4653
+OBJECT_LAMP_EXCLUDE = 0x4654
+OBJECT_LAMP_RANGE = 0x4655
+OBJECT_LAMP_ROLL = 0x4656
+OBJECT_LAMP_SPOT_ASPECT = 0x4657
+OBJECT_LAMP_RAY_BIAS = 0x4658
+OBJECT_LAMP_INNER_RANGE = 0x4659
+OBJECT_LAMP_OUTER_RANGE = 0x465A
+OBJECT_LAMP_MULTIPLIER = 0x465B
+OBJECT_LAMP_AMBIENT_LIGHT = 0x4680
+
+OBJECT_CAMERA = 0x4700 # This lets un know we are reading a camera object
+
+#>------ sub defines of CAMERA
+OBJECT_CAM_RANGES = 0x4720 # The camera range values
+
+#>------ sub defines of OBJECT_MESH
+OBJECT_VERTICES = 0x4110 # The objects vertices
+OBJECT_FACES = 0x4120 # The objects faces
+OBJECT_MATERIAL = 0x4130 # This is found if the object has a material, either texture map or color
+OBJECT_UV = 0x4140 # The UV texture coordinates
+OBJECT_TRANS_MATRIX = 0x4160 # The Object Matrix
+
+#>------ sub defines of EDITKEYFRAME
+ED_KEY_AMBIENT_NODE = 0xB001
+ED_KEY_OBJECT_NODE = 0xB002
+ED_KEY_CAMERA_NODE = 0xB003
+ED_KEY_TARGET_NODE = 0xB004
+ED_KEY_LIGHT_NODE = 0xB005
+ED_KEY_L_TARGET_NODE = 0xB006
+ED_KEY_SPOTLIGHT_NODE = 0xB007
+#>------ sub defines of ED_KEY_OBJECT_NODE
+# EK_OB_KEYFRAME_SEG = 0xB008
+# EK_OB_KEYFRAME_CURTIME = 0xB009
+# EK_OB_KEYFRAME_HEADER = 0xB00A
+EK_OB_NODE_HEADER = 0xB010
+EK_OB_INSTANCE_NAME = 0xB011
+# EK_OB_PRESCALE = 0xB012
+EK_OB_PIVOT = 0xB013
+# EK_OB_BOUNDBOX = 0xB014
+# EK_OB_MORPH_SMOOTH = 0xB015
+EK_OB_POSITION_TRACK = 0xB020
+EK_OB_ROTATION_TRACK = 0xB021
+EK_OB_SCALE_TRACK = 0xB022
+# EK_OB_CAMERA_FOV_TRACK = 0xB023
+# EK_OB_CAMERA_ROLL_TRACK = 0xB024
+# EK_OB_COLOR_TRACK = 0xB025
+# EK_OB_MORPH_TRACK = 0xB026
+# EK_OB_HOTSPOT_TRACK = 0xB027
+# EK_OB_FALLOF_TRACK = 0xB028
+# EK_OB_HIDE_TRACK = 0xB029
+# EK_OB_NODE_ID = 0xB030
+
+ROOT_OBJECT = 0xFFFF
+
+global scn
+scn = None
+
+object_dictionary = {}
+object_matrix = {}
+
+
+#the chunk class
+class chunk:
+ ID = 0
+ length = 0
+ bytes_read = 0
+
+ #we don't read in the bytes_read, we compute that
+ binary_format = "<HI"
+
+ def __init__(self):
+ self.ID = 0
+ self.length = 0
+ self.bytes_read = 0
+
+ def dump(self):
+ print('ID: ', self.ID)
+ print('ID in hex: ', hex(self.ID))
+ print('length: ', self.length)
+ print('bytes_read: ', self.bytes_read)
+
+
+def read_chunk(file, chunk):
+ temp_data = file.read(struct.calcsize(chunk.binary_format))
+ data = struct.unpack(chunk.binary_format, temp_data)
+ chunk.ID = data[0]
+ chunk.length = data[1]
+ #update the bytes read function
+ chunk.bytes_read = 6
+
+ #if debugging
+ #chunk.dump()
+
+
+def read_string(file):
+ #read in the characters till we get a null character
+ s = b''
+ while True:
+ c = struct.unpack('<c', file.read(1))[0]
+ if c == b'\x00':
+ break
+ s += c
+ #print 'string: ',s
+
+ #remove the null character from the string
+# print("read string", s)
+ return str(s, "utf-8", "replace"), len(s) + 1
+
+######################################################
+# IMPORT
+######################################################
+
+
+def process_next_object_chunk(file, previous_chunk):
+ new_chunk = chunk()
+ temp_chunk = chunk()
+
+ while (previous_chunk.bytes_read < previous_chunk.length):
+ #read the next chunk
+ read_chunk(file, new_chunk)
+
+
+def skip_to_end(file, skip_chunk):
+ buffer_size = skip_chunk.length - skip_chunk.bytes_read
+ binary_format = "%ic" % buffer_size
+ temp_data = file.read(struct.calcsize(binary_format))
+ skip_chunk.bytes_read += buffer_size
+
+
+def add_texture_to_material(image, texture, material, mapto):
+ #print('assigning %s to %s' % (texture, material))
+
+ if mapto not in ("COLOR", "SPECULARITY", "ALPHA", "NORMAL"):
+ print('/tError: Cannot map to "%s"\n\tassuming diffuse color. modify material "%s" later.' % (mapto, material.name))
+ mapto = "COLOR"
+
+ if image:
+ texture.image = image
+
+ mtex = material.texture_slots.add()
+ mtex.texture = texture
+ mtex.texture_coords = 'UV'
+ mtex.use_map_color_diffuse = False
+
+ if mapto == 'COLOR':
+ mtex.use_map_color_diffuse = True
+ elif mapto == 'SPECULARITY':
+ mtex.use_map_specular = True
+ elif mapto == 'ALPHA':
+ mtex.use_map_alpha = True
+ elif mapto == 'NORMAL':
+ mtex.use_map_normal = True
+
+
+def process_next_chunk(file, previous_chunk, importedObjects, IMAGE_SEARCH):
+ from bpy_extras.image_utils import load_image
+
+ #print previous_chunk.bytes_read, 'BYTES READ'
+ contextObName = None
+ contextLamp = [None, None] # object, Data
+ contextMaterial = None
+ contextMatrix_rot = None # Blender.mathutils.Matrix(); contextMatrix.identity()
+ #contextMatrix_tx = None # Blender.mathutils.Matrix(); contextMatrix.identity()
+ contextMesh_vertls = None # flat array: (verts * 3)
+ contextMesh_facels = None
+ contextMeshMaterials = [] # (matname, [face_idxs])
+ contextMeshUV = None # flat array (verts * 2)
+
+ TEXTURE_DICT = {}
+ MATDICT = {}
+# TEXMODE = Mesh.FaceModes['TEX']
+
+ # Localspace variable names, faster.
+ STRUCT_SIZE_1CHAR = struct.calcsize('c')
+ STRUCT_SIZE_2FLOAT = struct.calcsize('2f')
+ STRUCT_SIZE_3FLOAT = struct.calcsize('3f')
+ STRUCT_SIZE_4FLOAT = struct.calcsize('4f')
+ STRUCT_SIZE_UNSIGNED_SHORT = struct.calcsize('H')
+ STRUCT_SIZE_4UNSIGNED_SHORT = struct.calcsize('4H')
+ STRUCT_SIZE_4x3MAT = struct.calcsize('ffffffffffff')
+ _STRUCT_SIZE_4x3MAT = struct.calcsize('fffffffffffff')
+ # STRUCT_SIZE_4x3MAT = calcsize('ffffffffffff')
+ # print STRUCT_SIZE_4x3MAT, ' STRUCT_SIZE_4x3MAT'
+ # only init once
+ object_list = [] # for hierarchy
+ object_parent = [] # index of parent in hierarchy, 0xFFFF = no parent
+ pivot_list = [] # pivots with hierarchy handling
+
+ def putContextMesh(myContextMesh_vertls, myContextMesh_facels, myContextMeshMaterials):
+ bmesh = bpy.data.meshes.new(contextObName)
+
+ if myContextMesh_facels is None:
+ myContextMesh_facels = []
+
+ if myContextMesh_vertls:
+
+ bmesh.vertices.add(len(myContextMesh_vertls) // 3)
+ bmesh.faces.add(len(myContextMesh_facels))
+ bmesh.vertices.foreach_set("co", myContextMesh_vertls)
+
+ eekadoodle_faces = []
+ for v1, v2, v3 in myContextMesh_facels:
+ eekadoodle_faces.extend([v3, v1, v2, 0] if v3 == 0 else [v1, v2, v3, 0])
+ bmesh.faces.foreach_set("vertices_raw", eekadoodle_faces)
+
+ if bmesh.faces and contextMeshUV:
+ bmesh.uv_textures.new()
+ uv_faces = bmesh.uv_textures.active.data[:]
+ else:
+ uv_faces = None
+
+ for mat_idx, (matName, faces) in enumerate(myContextMeshMaterials):
+ if matName is None:
+ bmat = None
+ else:
+ bmat = MATDICT.get(matName)
+ # in rare cases no materials defined.
+ if bmat:
+ img = TEXTURE_DICT.get(bmat.name)
+ else:
+ print(" warning: material %r not defined!" % matName)
+ bmat = MATDICT[matName] = bpy.data.materials.new(matName)
+ img = None
+
+ bmesh.materials.append(bmat) # can be None
+
+ if uv_faces and img:
+ for fidx in faces:
+ bmesh.faces[fidx].material_index = mat_idx
+ uf = uv_faces[fidx]
+ uf.image = img
+ uf.use_image = True
+ else:
+ for fidx in faces:
+ bmesh.faces[fidx].material_index = mat_idx
+
+ if uv_faces:
+ for fidx, uf in enumerate(uv_faces):
+ face = myContextMesh_facels[fidx]
+ v1, v2, v3 = face
+
+ # eekadoodle
+ if v3 == 0:
+ v1, v2, v3 = v3, v1, v2
+
+ uf.uv1 = contextMeshUV[v1 * 2:(v1 * 2) + 2]
+ uf.uv2 = contextMeshUV[v2 * 2:(v2 * 2) + 2]
+ uf.uv3 = contextMeshUV[v3 * 2:(v3 * 2) + 2]
+ # always a tri
+
+ bmesh.validate()
+ bmesh.update()
+
+ ob = bpy.data.objects.new(contextObName, bmesh)
+ object_dictionary[contextObName] = ob
+ SCN.objects.link(ob)
+ importedObjects.append(ob)
+
+ if contextMatrix_rot:
+ ob.matrix_local = contextMatrix_rot
+ object_matrix[ob] = contextMatrix_rot.copy()
+
+ #a spare chunk
+ new_chunk = chunk()
+ temp_chunk = chunk()
+
+ CreateBlenderObject = False
+
+ def read_float_color(temp_chunk):
+ temp_data = file.read(struct.calcsize('3f'))
+ temp_chunk.bytes_read += 12
+ return [float(col) for col in struct.unpack('<3f', temp_data)]
+
+ def read_byte_color(temp_chunk):
+ temp_data = file.read(struct.calcsize('3B'))
+ temp_chunk.bytes_read += 3
+ return [float(col) / 255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+
+ def read_texture(new_chunk, temp_chunk, name, mapto):
+ new_texture = bpy.data.textures.new(name, type='IMAGE')
+
+ img = None
+ while (new_chunk.bytes_read < new_chunk.length):
+ #print 'MAT_TEXTURE_MAP..while', new_chunk.bytes_read, new_chunk.length
+ read_chunk(file, temp_chunk)
+
+ if (temp_chunk.ID == MAT_MAP_FILEPATH):
+ texture_name, read_str_len = read_string(file)
+ img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+ new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
+
+ else:
+ skip_to_end(file, temp_chunk)
+
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ # add the map to the material in the right channel
+ if img:
+ add_texture_to_material(img, new_texture, contextMaterial, mapto)
+
+ dirname = os.path.dirname(file.name)
+
+ #loop through all the data for this chunk (previous chunk) and see what it is
+ while (previous_chunk.bytes_read < previous_chunk.length):
+ #print '\t', previous_chunk.bytes_read, 'keep going'
+ #read the next chunk
+ #print 'reading a chunk'
+ read_chunk(file, new_chunk)
+
+ #is it a Version chunk?
+ if (new_chunk.ID == VERSION):
+ #print 'if (new_chunk.ID == VERSION):'
+ #print 'found a VERSION chunk'
+ #read in the version of the file
+ #it's an unsigned short (H)
+ temp_data = file.read(struct.calcsize('I'))
+ version = struct.unpack('<I', temp_data)[0]
+ new_chunk.bytes_read += 4 # read the 4 bytes for the version number
+ #this loader works with version 3 and below, but may not with 4 and above
+ if (version > 3):
+ print('\tNon-Fatal Error: Version greater than 3, may not load correctly: ', version)
+
+ #is it an object info chunk?
+ elif (new_chunk.ID == OBJECTINFO):
+ #print 'elif (new_chunk.ID == OBJECTINFO):'
+ # print 'found an OBJECTINFO chunk'
+ process_next_chunk(file, new_chunk, importedObjects, IMAGE_SEARCH)
+
+ #keep track of how much we read in the main chunk
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ #is it an object chunk?
+ elif (new_chunk.ID == OBJECT):
+
+ if CreateBlenderObject:
+ putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+ contextMesh_vertls = []
+ contextMesh_facels = []
+
+ ## preparando para receber o proximo objeto
+ contextMeshMaterials = [] # matname:[face_idxs]
+ contextMeshUV = None
+ #contextMesh.vertexUV = 1 # Make sticky coords.
+ # Reset matrix
+ contextMatrix_rot = None
+ #contextMatrix_tx = None
+
+ CreateBlenderObject = True
+ contextObName, read_str_len = read_string(file)
+ new_chunk.bytes_read += read_str_len
+
+ #is it a material chunk?
+ elif (new_chunk.ID == MATERIAL):
+
+# print("read material")
+
+ #print 'elif (new_chunk.ID == MATERIAL):'
+ contextMaterial = bpy.data.materials.new('Material')
+
+ elif (new_chunk.ID == MAT_NAME):
+ #print 'elif (new_chunk.ID == MAT_NAME):'
+ material_name, read_str_len = read_string(file)
+
+# print("material name", material_name)
+
+ #plus one for the null character that ended the string
+ new_chunk.bytes_read += read_str_len
+
+ contextMaterial.name = material_name.rstrip() # remove trailing whitespace
+ MATDICT[material_name] = contextMaterial
+
+ elif (new_chunk.ID == MAT_AMBIENT):
+ #print 'elif (new_chunk.ID == MAT_AMBIENT):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.mirror_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.mirror_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_DIFFUSE):
+ #print 'elif (new_chunk.ID == MAT_DIFFUSE):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.diffuse_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.rgbCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.diffuse_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.rgbCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+
+# print("read material diffuse color", contextMaterial.diffuse_color)
+
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_SPECULAR):
+ #print 'elif (new_chunk.ID == MAT_SPECULAR):'
+ read_chunk(file, temp_chunk)
+ if (temp_chunk.ID == MAT_FLOAT_COLOR):
+ contextMaterial.specular_color = read_float_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3f'))
+# temp_chunk.bytes_read += 12
+# contextMaterial.mirCol = [float(col) for col in struct.unpack('<3f', temp_data)]
+ elif (temp_chunk.ID == MAT_24BIT_COLOR):
+ contextMaterial.specular_color = read_byte_color(temp_chunk)
+# temp_data = file.read(struct.calcsize('3B'))
+# temp_chunk.bytes_read += 3
+# contextMaterial.mirCol = [float(col)/255 for col in struct.unpack('<3B', temp_data)] # data [0,1,2] == rgb
+ else:
+ skip_to_end(file, temp_chunk)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == MAT_TEXTURE_MAP):
+ read_texture(new_chunk, temp_chunk, "Diffuse", "COLOR")
+
+ elif (new_chunk.ID == MAT_SPECULAR_MAP):
+ read_texture(new_chunk, temp_chunk, "Specular", "SPECULARITY")
+
+ elif (new_chunk.ID == MAT_OPACITY_MAP):
+ read_texture(new_chunk, temp_chunk, "Opacity", "ALPHA")
+
+ elif (new_chunk.ID == MAT_BUMP_MAP):
+ read_texture(new_chunk, temp_chunk, "Bump", "NORMAL")
+
+ elif (new_chunk.ID == MAT_TRANSPARENCY):
+ #print 'elif (new_chunk.ID == MAT_TRANSPARENCY):'
+ read_chunk(file, temp_chunk)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+
+ temp_chunk.bytes_read += 2
+ contextMaterial.alpha = 1 - (float(struct.unpack('<H', temp_data)[0]) / 100)
+ new_chunk.bytes_read += temp_chunk.bytes_read
+
+ elif (new_chunk.ID == OBJECT_LAMP): # Basic lamp support.
+
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+
+ x, y, z = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+
+ # no lamp in dict that would be confusing
+ contextLamp[1] = bpy.data.lamps.new("Lamp", 'POINT')
+ contextLamp[0] = ob = bpy.data.objects.new("Lamp", contextLamp[1])
+
+ SCN.objects.link(ob)
+ importedObjects.append(contextLamp[0])
+
+ #print 'number of faces: ', num_faces
+ #print x,y,z
+ contextLamp[0].location = (x, y, z)
+# contextLamp[0].setLocation(x,y,z)
+
+ # Reset matrix
+ contextMatrix_rot = None
+ #contextMatrix_tx = None
+ #print contextLamp.name,
+
+ elif (new_chunk.ID == OBJECT_MESH):
+ # print 'Found an OBJECT_MESH chunk'
+ pass
+ elif (new_chunk.ID == OBJECT_VERTICES):
+ '''
+ Worldspace vertex locations
+ '''
+ # print 'elif (new_chunk.ID == OBJECT_VERTICES):'
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_verts = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ # print 'number of verts: ', num_verts
+ contextMesh_vertls = struct.unpack('<%df' % (num_verts * 3), file.read(STRUCT_SIZE_3FLOAT * num_verts))
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT * num_verts
+ # dummyvert is not used atm!
+
+ #print 'object verts: bytes read: ', new_chunk.bytes_read
+
+ elif (new_chunk.ID == OBJECT_FACES):
+ # print 'elif (new_chunk.ID == OBJECT_FACES):'
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_faces = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+ #print 'number of faces: ', num_faces
+
+ # print '\ngetting a face'
+ temp_data = file.read(STRUCT_SIZE_4UNSIGNED_SHORT * num_faces)
+ new_chunk.bytes_read += STRUCT_SIZE_4UNSIGNED_SHORT * num_faces # 4 short ints x 2 bytes each
+ contextMesh_facels = struct.unpack('<%dH' % (num_faces * 4), temp_data)
+ contextMesh_facels = [contextMesh_facels[i - 3:i] for i in range(3, (num_faces * 4) + 3, 4)]
+
+ elif (new_chunk.ID == OBJECT_MATERIAL):
+ # print 'elif (new_chunk.ID == OBJECT_MATERIAL):'
+ material_name, read_str_len = read_string(file)
+ new_chunk.bytes_read += read_str_len # remove 1 null character.
+
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_faces_using_mat = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * num_faces_using_mat
+
+ temp_data = struct.unpack("<%dH" % (num_faces_using_mat), temp_data)
+
+ contextMeshMaterials.append((material_name, temp_data))
+
+ #look up the material in all the materials
+
+ elif (new_chunk.ID == OBJECT_UV):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ num_uv = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ temp_data = file.read(STRUCT_SIZE_2FLOAT * num_uv)
+ new_chunk.bytes_read += STRUCT_SIZE_2FLOAT * num_uv
+ contextMeshUV = struct.unpack('<%df' % (num_uv * 2), temp_data)
+
+ elif (new_chunk.ID == OBJECT_TRANS_MATRIX):
+ # How do we know the matrix size? 54 == 4x4 48 == 4x3
+ temp_data = file.read(STRUCT_SIZE_4x3MAT)
+ data = list(struct.unpack('<ffffffffffff', temp_data))
+ new_chunk.bytes_read += STRUCT_SIZE_4x3MAT
+
+ contextMatrix_rot = mathutils.Matrix((data[:3] + [0], \
+ data[3:6] + [0], \
+ data[6:9] + [0], \
+ data[9:] + [1], \
+ ))
+
+ elif (new_chunk.ID == MAT_MAP_FILEPATH):
+ texture_name, read_str_len = read_string(file)
+ try:
+ TEXTURE_DICT[contextMaterial.name]
+ except:
+ #img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH)
+ img = TEXTURE_DICT[contextMaterial.name] = load_image(texture_name, dirname)
+# img = TEXTURE_DICT[contextMaterial.name]= BPyImage.comprehensiveImageLoad(texture_name, FILEPATH, PLACE_HOLDER=False, RECURSIVE=IMAGE_SEARCH)
+
+ new_chunk.bytes_read += read_str_len # plus one for the null character that gets removed
+ elif new_chunk.ID == EDITKEYFRAME:
+ pass
+
+ # including these here means their EK_OB_NODE_HEADER are scanned
+ elif new_chunk.ID in {ED_KEY_AMBIENT_NODE,
+ ED_KEY_OBJECT_NODE,
+ ED_KEY_CAMERA_NODE,
+ ED_KEY_TARGET_NODE,
+ ED_KEY_LIGHT_NODE,
+ ED_KEY_L_TARGET_NODE,
+ ED_KEY_SPOTLIGHT_NODE}: # another object is being processed
+ child = None
+
+ elif new_chunk.ID == EK_OB_NODE_HEADER:
+ object_name, read_str_len = read_string(file)
+ new_chunk.bytes_read += read_str_len
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += 4
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ hierarchy = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += 2
+
+ child = object_dictionary.get(object_name)
+
+ if child is None:
+ child = bpy.data.objects.new(object_name, None) # create an empty object
+ SCN.objects.link(child)
+ importedObjects.append(child)
+
+ object_list.append(child)
+ object_parent.append(hierarchy)
+ pivot_list.append(mathutils.Vector((0.0, 0.0, 0.0)))
+
+ elif new_chunk.ID == EK_OB_INSTANCE_NAME:
+ object_name, read_str_len = read_string(file)
+ # child.name = object_name
+ child.name += "." + object_name
+ object_dictionary[object_name] = child
+ new_chunk.bytes_read += read_str_len
+ # print("new instance object:", object_name)
+
+ elif new_chunk.ID == EK_OB_PIVOT: # translation
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+ pivot = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ pivot_list[len(pivot_list) - 1] = mathutils.Vector(pivot)
+
+ elif new_chunk.ID == EK_OB_POSITION_TRACK: # translation
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nkeys = struct.unpack('<H', temp_data)[0]
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ for i in range(nkeys):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nframe = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+ loc = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ if nframe == 0:
+ child.location = loc
+
+ elif new_chunk.ID == EK_OB_ROTATION_TRACK: # rotation
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nkeys = struct.unpack('<H', temp_data)[0]
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ for i in range(nkeys):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nframe = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ temp_data = file.read(STRUCT_SIZE_4FLOAT)
+ rad, axis_x, axis_y, axis_z = struct.unpack("<4f", temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_4FLOAT
+ if nframe == 0:
+ child.rotation_euler = mathutils.Quaternion((axis_x, axis_y, axis_z), -rad).to_euler() # why negative?
+
+ elif new_chunk.ID == EK_OB_SCALE_TRACK: # translation
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 5
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 5)
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nkeys = struct.unpack('<H', temp_data)[0]
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ for i in range(nkeys):
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT)
+ nframe = struct.unpack('<H', temp_data)[0]
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT
+ temp_data = file.read(STRUCT_SIZE_UNSIGNED_SHORT * 2)
+ new_chunk.bytes_read += STRUCT_SIZE_UNSIGNED_SHORT * 2
+ temp_data = file.read(STRUCT_SIZE_3FLOAT)
+ sca = struct.unpack('<3f', temp_data)
+ new_chunk.bytes_read += STRUCT_SIZE_3FLOAT
+ if nframe == 0:
+ child.scale = sca
+
+ else: # (new_chunk.ID!=VERSION or new_chunk.ID!=OBJECTINFO or new_chunk.ID!=OBJECT or new_chunk.ID!=MATERIAL):
+ # print 'skipping to end of this chunk'
+ #print("unknown chunk: "+hex(new_chunk.ID))
+ buffer_size = new_chunk.length - new_chunk.bytes_read
+ binary_format = "%ic" % buffer_size
+ temp_data = file.read(struct.calcsize(binary_format))
+ new_chunk.bytes_read += buffer_size
+
+ #update the previous chunk bytes read
+ # print 'previous_chunk.bytes_read += new_chunk.bytes_read'
+ # print previous_chunk.bytes_read, new_chunk.bytes_read
+ previous_chunk.bytes_read += new_chunk.bytes_read
+ ## print 'Bytes left in this chunk: ', previous_chunk.length - previous_chunk.bytes_read
+
+ # FINISHED LOOP
+ # There will be a number of objects still not added
+ if CreateBlenderObject:
+ putContextMesh(contextMesh_vertls, contextMesh_facels, contextMeshMaterials)
+
+ # Assign parents to objects
+ for ind, ob in enumerate(object_list):
+ parent = object_parent[ind]
+ if parent == ROOT_OBJECT:
+ ob.parent = None
+ else:
+ ob.parent = object_list[parent]
+ # pivot_list[ind] += pivot_list[parent] # XXX, not sure this is correct, should parent space matrix be applied before combining?
+ # fix pivots
+ for ind, ob in enumerate(object_list):
+ if ob.type == 'MESH':
+ pivot = pivot_list[ind]
+ pivot_matrix = object_matrix.get(ob, mathutils.Matrix()) # unlikely to fail
+ pivot_matrix = mathutils.Matrix.Translation(-pivot * pivot_matrix.to_3x3())
+ ob.data.transform(pivot_matrix)
+
+
+def load_3ds(filepath,
+ context,
+ IMPORT_CONSTRAIN_BOUNDS=10.0,
+ IMAGE_SEARCH=True,
+ APPLY_MATRIX=True,
+ global_matrix=None):
+ global SCN
+
+ # XXX
+# if BPyMessages.Error_NoFile(filepath):
+# return
+
+ print("importing 3DS: %r..." % (filepath), end="")
+
+ if bpy.ops.object.select_all.poll():
+ bpy.ops.object.select_all(action='DESELECT')
+
+ time1 = time.clock()
+# time1 = Blender.sys.time()
+
+ current_chunk = chunk()
+
+ file = open(filepath, 'rb')
+
+ #here we go!
+ # print 'reading the first chunk'
+ read_chunk(file, current_chunk)
+ if (current_chunk.ID != PRIMARY):
+ print('\tFatal Error: Not a valid 3ds file: %r' % filepath)
+ file.close()
+ return
+
+ if IMPORT_CONSTRAIN_BOUNDS:
+ BOUNDS_3DS[:] = [1 << 30, 1 << 30, 1 << 30, -1 << 30, -1 << 30, -1 << 30]
+ else:
+ BOUNDS_3DS[:] = []
+
+ ##IMAGE_SEARCH
+
+ # fixme, make unglobal, clear incase
+ object_dictionary.clear()
+ object_matrix.clear()
+
+ scn = context.scene
+# scn = bpy.data.scenes.active
+ SCN = scn
+# SCN_OBJECTS = scn.objects
+# SCN_OBJECTS.selected = [] # de select all
+
+ importedObjects = [] # Fill this list with objects
+ process_next_chunk(file, current_chunk, importedObjects, IMAGE_SEARCH)
+
+ # fixme, make unglobal
+ object_dictionary.clear()
+ object_matrix.clear()
+
+ # Link the objects into this scene.
+ # Layers = scn.Layers
+
+ # REMOVE DUMMYVERT, - remove this in the next release when blenders internal are fixed.
+
+ if APPLY_MATRIX:
+ for ob in importedObjects:
+ if ob.type == 'MESH':
+ me = ob.data
+ me.transform(ob.matrix_local.inverted())
+
+ # print(importedObjects)
+ if global_matrix:
+ for ob in importedObjects:
+ if ob.parent is None:
+ ob.matrix_world = ob.matrix_world * global_matrix
+
+ for ob in importedObjects:
+ ob.select = True
+
+ # Done DUMMYVERT
+ """
+ if IMPORT_AS_INSTANCE:
+ name = filepath.split('\\')[-1].split('/')[-1]
+ # Create a group for this import.
+ group_scn = Scene.New(name)
+ for ob in importedObjects:
+ group_scn.link(ob) # dont worry about the layers
+
+ grp = Blender.Group.New(name)
+ grp.objects = importedObjects
+
+ grp_ob = Object.New('Empty', name)
+ grp_ob.enableDupGroup = True
+ grp_ob.DupGroup = grp
+ scn.link(grp_ob)
+ grp_ob.Layers = Layers
+ grp_ob.sel = 1
+ else:
+ # Select all imported objects.
+ for ob in importedObjects:
+ scn.link(ob)
+ ob.Layers = Layers
+ ob.sel = 1
+ """
+
+ if 0:
+# if IMPORT_CONSTRAIN_BOUNDS!=0.0:
+ # Set bounds from objecyt bounding box
+ for ob in importedObjects:
+ if ob.type == 'MESH':
+# if ob.type=='Mesh':
+ ob.makeDisplayList() # Why dosnt this update the bounds?
+ for v in ob.getBoundBox():
+ for i in (0, 1, 2):
+ if v[i] < BOUNDS_3DS[i]:
+ BOUNDS_3DS[i] = v[i] # min
+
+ if v[i] > BOUNDS_3DS[i + 3]:
+ BOUNDS_3DS[i + 3] = v[i] # min
+
+ # Get the max axis x/y/z
+ max_axis = max(BOUNDS_3DS[3] - BOUNDS_3DS[0], BOUNDS_3DS[4] - BOUNDS_3DS[1], BOUNDS_3DS[5] - BOUNDS_3DS[2])
+ # print max_axis
+ if max_axis < 1 << 30: # Should never be false but just make sure.
+
+ # Get a new scale factor if set as an option
+ SCALE = 1.0
+ while (max_axis * SCALE) > IMPORT_CONSTRAIN_BOUNDS:
+ SCALE /= 10.0
+
+ # SCALE Matrix
+ SCALE_MAT = mathutils.Matrix.Scale(SCALE, 4)
+
+ for ob in importedObjects:
+ if ob.parent is None:
+ ob.matrix_world = ob.matrix_world * SCALE_MAT
+
+ # Done constraining to bounds.
+
+ # Select all new objects.
+ print(" done in %.4f sec." % (time.clock() - time1))
+ file.close()
+
+
+def load(operator,
+ context,
+ filepath="",
+ constrain_size=0.0,
+ use_image_search=True,
+ use_apply_transform=True,
+ global_matrix=None,
+ ):
+
+ load_3ds(filepath,
+ context,
+ IMPORT_CONSTRAIN_BOUNDS=constrain_size,
+ IMAGE_SEARCH=use_image_search,
+ APPLY_MATRIX=use_apply_transform,
+ global_matrix=global_matrix,
+ )
+
+ return {'FINISHED'}