Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2016-01-28 05:29:04 +0300
committerCampbell Barton <ideasman42@gmail.com>2016-01-29 10:44:34 +0300
commitc3f24386bb4ec7c91949a1f024bb1d6464cfd5a2 (patch)
tree557e16b142f3122feac9e81e868fc114ab810664
parent4c669445b322dfa9be567542679463f2e84818c3 (diff)
New blend-file addon
Currently only expose packing functionality for packing a file and all its deps into a ZIP (shared code with the cloud). Can run directly or from the command line (without blender) via `blendfile_pack`. Also adds subprocess_helper module which we may want to re-use elsewhere, allowing to run external processes that don't lock blender and can be cancelled by pressing Esc.
-rw-r--r--io_blend_utils/__init__.py110
-rw-r--r--io_blend_utils/bl_utils/pipe_non_blocking.py100
-rw-r--r--io_blend_utils/bl_utils/subprocess_helper.py172
-rw-r--r--io_blend_utils/blend/blendfile.py917
-rw-r--r--io_blend_utils/blend/blendfile_path_walker.py939
-rwxr-xr-xio_blend_utils/blendfile_pack.py601
-rw-r--r--io_blend_utils/utils/system.py105
7 files changed, 2944 insertions, 0 deletions
diff --git a/io_blend_utils/__init__.py b/io_blend_utils/__init__.py
new file mode 100644
index 00000000..adbd2d04
--- /dev/null
+++ b/io_blend_utils/__init__.py
@@ -0,0 +1,110 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+bl_info = {
+ "name": "Blend File Utils",
+ "author": "Campbell Barton",
+ "version": (0, 1),
+ "blender": (2, 76, 0),
+ "location": "File > External Data > Blend Utils",
+ "description": "Utility for packing blend files",
+ "warning": "",
+ "wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Import-Export/BlendFile_Utils",
+ "support": 'OFFICIAL',
+ "category": "Import-Export",
+ }
+
+
+import bpy
+from bpy.types import Operator
+from bpy_extras.io_utils import ExportHelper
+
+from .bl_utils.subprocess_helper import SubprocessHelper
+
+
+class ExportBlendPack(Operator, ExportHelper, SubprocessHelper):
+ """Packs a blend file and all its dependencies into an archive for easy redistribution"""
+ bl_idname = "export_blend.pack"
+ bl_label = "Pack Blend to Archive"
+
+ # ExportHelper
+ filename_ext = ".zip"
+
+ # SubprocessHelper
+ report_interval = 0.25
+
+ temp_dir = None
+
+ @classmethod
+ def poll(cls, context):
+ return bpy.data.is_saved
+
+ def process_pre(self):
+ import os
+ import tempfile
+
+ self.temp_dir = tempfile.TemporaryDirectory()
+
+ filepath_blend = bpy.data.filepath
+
+ self.command = (
+ bpy.app.binary_path_python,
+ os.path.join(os.path.dirname(__file__), "blendfile_pack.py"),
+ # file to pack
+ "--input", filepath_blend,
+ # file to write
+ "--output", bpy.path.ensure_ext(self.filepath, ".zip"),
+ "--temp", self.temp_dir.name,
+ )
+
+ def process_post(self, returncode):
+ if self.temp_dir is not None:
+ try:
+ self.temp_dir.cleanup()
+ except:
+ import traceback
+ traceback.print_exc()
+
+
+def menu_func(self, context):
+ layout = self.layout
+ layout.separator()
+ layout.operator(ExportBlendPack.bl_idname)
+
+
+classes = (
+ ExportBlendPack,
+ )
+
+
+def register():
+ for cls in classes:
+ bpy.utils.register_class(cls)
+
+ bpy.types.INFO_MT_file_external_data.append(menu_func)
+
+
+def unregister():
+ for cls in classes:
+ bpy.utils.unregister_class(cls)
+
+ bpy.types.INFO_MT_file_external_data.remove(menu_func)
+
+
+if __name__ == "__main__":
+ register()
diff --git a/io_blend_utils/bl_utils/pipe_non_blocking.py b/io_blend_utils/bl_utils/pipe_non_blocking.py
new file mode 100644
index 00000000..ead0a738
--- /dev/null
+++ b/io_blend_utils/bl_utils/pipe_non_blocking.py
@@ -0,0 +1,100 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""
+Example use:
+
+ p = subprocess.Popen(
+ command,
+ stdout=subprocess.PIPE,
+ )
+
+ pipe_non_blocking_set(p.stdout.fileno())
+
+ try:
+ data = os.read(p.stdout.fileno(), 1)
+ except PortableBlockingIOError as ex:
+ if not pipe_non_blocking_is_error_blocking(ex):
+ raise ex
+"""
+
+
+__all__ = (
+ "pipe_non_blocking_set",
+ "pipe_non_blocking_is_error_blocking",
+ "PortableBlockingIOError",
+ )
+
+import os
+
+
+if os.name == "nt":
+ # MS-Windows Version
+ def pipe_non_blocking_set(fd):
+ # Constant could define globally but avoid polluting the name-space
+ # thanks to: http://stackoverflow.com/questions/34504970
+ import msvcrt
+
+ from ctypes import windll, byref, wintypes, WinError, POINTER
+ from ctypes.wintypes import HANDLE, DWORD, BOOL
+
+ LPDWORD = POINTER(DWORD)
+
+ PIPE_NOWAIT = wintypes.DWORD(0x00000001)
+
+ def pipe_no_wait(pipefd):
+ SetNamedPipeHandleState = windll.kernel32.SetNamedPipeHandleState
+ SetNamedPipeHandleState.argtypes = [HANDLE, LPDWORD, LPDWORD, LPDWORD]
+ SetNamedPipeHandleState.restype = BOOL
+
+ h = msvcrt.get_osfhandle(pipefd)
+
+ res = windll.kernel32.SetNamedPipeHandleState(h, byref(PIPE_NOWAIT), None, None)
+ if res == 0:
+ print(WinError())
+ return False
+ return True
+
+ return pipe_no_wait(fd)
+
+ def pipe_non_blocking_is_error_blocking(ex):
+ if not isinstance(ex, PortableBlockingIOError):
+ return False
+ from ctypes import GetLastError
+ ERROR_NO_DATA = 232
+
+ return (GetLastError() == ERROR_NO_DATA)
+
+ PortableBlockingIOError = OSError
+else:
+ # Posix Version
+ def pipe_non_blocking_set(fd):
+ import fcntl
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+ return True
+
+ # only for compatibility with 'nt' version.
+ def pipe_non_blocking_is_error_blocking(ex):
+ if not isinstance(ex, PortableBlockingIOError):
+ return False
+ return True
+
+ PortableBlockingIOError = BlockingIOError
diff --git a/io_blend_utils/bl_utils/subprocess_helper.py b/io_blend_utils/bl_utils/subprocess_helper.py
new file mode 100644
index 00000000..2d289d37
--- /dev/null
+++ b/io_blend_utils/bl_utils/subprocess_helper.py
@@ -0,0 +1,172 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""
+Defines an operator mix-in to use for non-blocking command line access.
+"""
+
+class SubprocessHelper:
+ """
+ Mix-in class for operators to run commands in a non-blocking way.
+
+ This uses a modal operator to manage an external process.
+
+ Subclass must define:
+ ``command``:
+ List of arguments to pass to subprocess.Popen
+ report_interval: Time in seconds between updating reports.
+
+ ``process_pre()``:
+ Callback that runs before the process executes.
+
+ ``process_post(returncode)``:
+ Callback that runs when the process has ende.
+ returncode is -1 if the process was terminated.
+ """
+
+ @staticmethod
+ def _non_blocking_readlines(f, chunk=64):
+ """
+ Iterate over lines, yielding b'' when nothings left
+ or when new data is not yet available.
+ """
+ import os
+
+ from .pipe_non_blocking import (
+ pipe_non_blocking_set,
+ pipe_non_blocking_is_error_blocking,
+ PortableBlockingIOError,
+ )
+
+ fd = f.fileno()
+ pipe_non_blocking_set(fd)
+
+ blocks = []
+
+ while True:
+ try:
+ data = os.read(fd, chunk)
+ if not data:
+ # case were reading finishes with no trailing newline
+ yield b''.join(blocks)
+ blocks.clear()
+ except PortableBlockingIOError as ex:
+ if not pipe_non_blocking_is_error_blocking(ex):
+ raise ex
+
+ yield b''
+ continue
+
+ while True:
+ n = data.find(b'\n')
+ if n == -1:
+ break
+
+ yield b''.join(blocks) + data[:n + 1]
+ data = data[n + 1:]
+ blocks.clear()
+ blocks.append(data)
+
+ def _report_output(self):
+ stdout_line_iter, stderr_line_iter = self._buffer_iter
+ for line_iter, report_type in (
+ (stdout_line_iter, {'INFO'}),
+ (stderr_line_iter, {'WARNING'})
+ ):
+ while True:
+ line = next(line_iter).rstrip() # rstrip all, to include \r on windows
+ if not line:
+ break
+ self.report(report_type, line.decode(encoding='utf-8', errors='surrogateescape'))
+
+ def _wm_enter(self, context):
+ wm = context.window_manager
+ window = context.window
+
+ self._timer = wm.event_timer_add(self.report_interval, window)
+ window.cursor_set('WAIT')
+
+ def _wm_exit(self, context):
+ wm = context.window_manager
+ window = context.window
+
+ wm.event_timer_remove(self._timer)
+ window.cursor_set('DEFAULT')
+
+ def process_pre(self):
+ pass
+
+ def process_post(self, returncode):
+ pass
+
+ def modal(self, context, event):
+ wm = context.window_manager
+ p = self._process
+
+ if event.type == 'ESC':
+ self.cancel(context)
+ self.report({'INFO'}, "Operation aborted by user.")
+ return {'CANCELLED'}
+
+ elif event.type == 'TIMER':
+ if p.poll() is not None:
+ self._report_output()
+ self._wm_exit(context)
+ self.process_post(p.returncode)
+ return {'FINISHED'}
+
+ self._report_output()
+
+ return {'PASS_THROUGH'}
+
+ def execute(self, context):
+ import subprocess
+
+ self.process_pre()
+
+ try:
+ p = subprocess.Popen(
+ self.command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ except FileNotFoundError as ex:
+ # Command not found
+ self.report({'ERROR'}, str(ex))
+ return {'CANCELLED'}
+
+ self._process = p
+ self._buffer_iter = (
+ iter(self._non_blocking_readlines(p.stdout)),
+ iter(self._non_blocking_readlines(p.stderr)),
+ )
+
+ wm = context.window_manager
+ wm.modal_handler_add(self)
+
+ self._wm_enter(context)
+
+ return {'RUNNING_MODAL'}
+
+ def cancel(self, context):
+ self._wm_exit(context)
+ self._process.kill()
+ self.process_post(-1)
+
diff --git a/io_blend_utils/blend/blendfile.py b/io_blend_utils/blend/blendfile.py
new file mode 100644
index 00000000..0739a1bc
--- /dev/null
+++ b/io_blend_utils/blend/blendfile.py
@@ -0,0 +1,917 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+# (c) 2009, At Mind B.V. - Jeroen Bakker
+# (c) 2014, Blender Foundation - Campbell Barton
+
+import os
+import struct
+import logging
+import gzip
+import tempfile
+
+log = logging.getLogger("blendfile")
+log.setLevel(logging.ERROR)
+
+FILE_BUFFER_SIZE = 1024 * 1024
+
+
+# -----------------------------------------------------------------------------
+# module global routines
+#
+# read routines
+# open a filename
+# determine if the file is compressed
+# and returns a handle
+def open_blend(filename, access="rb"):
+ """Opens a blend file for reading or writing pending on the access
+ supports 2 kind of blend files. Uncompressed and compressed.
+ Known issue: does not support packaged blend files
+ """
+ handle = open(filename, access)
+ magic_test = b"BLENDER"
+ magic = handle.read(len(magic_test))
+ if magic == magic_test:
+ log.debug("normal blendfile detected")
+ handle.seek(0, os.SEEK_SET)
+ bfile = BlendFile(handle)
+ bfile.is_compressed = False
+ bfile.filepath_orig = filename
+ return bfile
+ elif magic[:2] == b'\x1f\x8b':
+ log.debug("gzip blendfile detected")
+ handle.close()
+ log.debug("decompressing started")
+ fs = gzip.open(filename, "rb")
+ data = fs.read(FILE_BUFFER_SIZE)
+ magic = data[:len(magic_test)]
+ if magic == magic_test:
+ handle = tempfile.TemporaryFile()
+ while data:
+ handle.write(data)
+ data = fs.read(FILE_BUFFER_SIZE)
+ log.debug("decompressing finished")
+ fs.close()
+ log.debug("resetting decompressed file")
+ handle.seek(os.SEEK_SET, 0)
+ bfile = BlendFile(handle)
+ bfile.is_compressed = True
+ bfile.filepath_orig = filename
+ return bfile
+ else:
+ raise Exception("filetype inside gzip not a blend")
+ else:
+ raise Exception("filetype not a blend or a gzip blend")
+
+
+def align(offset, by):
+ n = by - 1
+ return (offset + n) & ~n
+
+
+# -----------------------------------------------------------------------------
+# module classes
+
+
+class BlendFile:
+ """
+ Blend file.
+ """
+ __slots__ = (
+ # file (result of open())
+ "handle",
+ # str (original name of the file path)
+ "filepath_orig",
+ # BlendFileHeader
+ "header",
+ # struct.Struct
+ "block_header_struct",
+ # BlendFileBlock
+ "blocks",
+ # [DNAStruct, ...]
+ "structs",
+ # dict {b'StructName': sdna_index}
+ # (where the index is an index into 'structs')
+ "sdna_index_from_id",
+ # dict {addr_old: block}
+ "block_from_offset",
+ # int
+ "code_index",
+ # bool (did we make a change)
+ "is_modified",
+ # bool (is file gzipped)
+ "is_compressed",
+ )
+
+ def __init__(self, handle):
+ log.debug("initializing reading blend-file")
+ self.handle = handle
+ self.header = BlendFileHeader(handle)
+ self.block_header_struct = self.header.create_block_header_struct()
+ self.blocks = []
+ self.code_index = {}
+
+ block = BlendFileBlock(handle, self)
+ while block.code != b'ENDB':
+ if block.code == b'DNA1':
+ (self.structs,
+ self.sdna_index_from_id,
+ ) = BlendFile.decode_structs(self.header, block, handle)
+ else:
+ handle.seek(block.size, os.SEEK_CUR)
+
+ self.blocks.append(block)
+ self.code_index.setdefault(block.code, []).append(block)
+
+ block = BlendFileBlock(handle, self)
+ self.is_modified = False
+ self.blocks.append(block)
+
+ # cache (could lazy init, incase we never use?)
+ self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'}
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def find_blocks_from_code(self, code):
+ assert(type(code) == bytes)
+ if code not in self.code_index:
+ return []
+ return self.code_index[code]
+
+ def find_block_from_offset(self, offset):
+ # same as looking looping over all blocks,
+ # then checking ``block.addr_old == offset``
+ assert(type(offset) is int)
+ return self.block_from_offset.get(offset)
+
+ def close(self):
+ """
+ Close the blend file
+ writes the blend file to disk if changes has happened
+ """
+ if not self.is_modified:
+ self.handle.close()
+ else:
+ handle = self.handle
+ if self.is_compressed:
+ log.debug("close compressed blend file")
+ handle.seek(os.SEEK_SET, 0)
+ log.debug("compressing started")
+ fs = gzip.open(self.filepath_orig, "wb")
+ data = handle.read(FILE_BUFFER_SIZE)
+ while data:
+ fs.write(data)
+ data = handle.read(FILE_BUFFER_SIZE)
+ fs.close()
+ log.debug("compressing finished")
+
+ handle.close()
+
+ def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next):
+ # never refine to a smaller type
+ if (self.structs[sdna_index_curr].size >
+ self.structs[sdna_index_next].size):
+
+ raise RuntimeError("cant refine to smaller type (%s -> %s)" %
+ (self.structs[sdna_index_curr].dna_type_id.decode('ascii'),
+ self.structs[sdna_index_next].dna_type_id.decode('ascii')))
+
+ @staticmethod
+ def decode_structs(header, block, handle):
+ """
+ DNACatalog is a catalog of all information in the DNA1 file-block
+ """
+ log.debug("building DNA catalog")
+ shortstruct = DNA_IO.USHORT[header.endian_index]
+ shortstruct2 = struct.Struct(header.endian_str + b'HH')
+ intstruct = DNA_IO.UINT[header.endian_index]
+
+ data = handle.read(block.size)
+ types = []
+ names = []
+
+ structs = []
+ sdna_index_from_id = {}
+
+ offset = 8
+ names_len = intstruct.unpack_from(data, offset)[0]
+ offset += 4
+
+ log.debug("building #%d names" % names_len)
+ for i in range(names_len):
+ tName = DNA_IO.read_data0_offset(data, offset)
+ offset = offset + len(tName) + 1
+ names.append(DNAName(tName))
+ del names_len
+
+ offset = align(offset, 4)
+ offset += 4
+ types_len = intstruct.unpack_from(data, offset)[0]
+ offset += 4
+ log.debug("building #%d types" % types_len)
+ for i in range(types_len):
+ dna_type_id = DNA_IO.read_data0_offset(data, offset)
+ # None will be replaced by the DNAStruct, below
+ types.append(DNAStruct(dna_type_id))
+ offset += len(dna_type_id) + 1
+
+ offset = align(offset, 4)
+ offset += 4
+ log.debug("building #%d type-lengths" % types_len)
+ for i in range(types_len):
+ tLen = shortstruct.unpack_from(data, offset)[0]
+ offset = offset + 2
+ types[i].size = tLen
+ del types_len
+
+ offset = align(offset, 4)
+ offset += 4
+
+ structs_len = intstruct.unpack_from(data, offset)[0]
+ offset += 4
+ log.debug("building #%d structures" % structs_len)
+ for sdna_index in range(structs_len):
+ d = shortstruct2.unpack_from(data, offset)
+ struct_type_index = d[0]
+ offset += 4
+ dna_struct = types[struct_type_index]
+ sdna_index_from_id[dna_struct.dna_type_id] = sdna_index
+ structs.append(dna_struct)
+
+ fields_len = d[1]
+ dna_offset = 0
+
+ for field_index in range(fields_len):
+ d2 = shortstruct2.unpack_from(data, offset)
+ field_type_index = d2[0]
+ field_name_index = d2[1]
+ offset += 4
+ dna_type = types[field_type_index]
+ dna_name = names[field_name_index]
+ if dna_name.is_pointer or dna_name.is_method_pointer:
+ dna_size = header.pointer_size * dna_name.array_size
+ else:
+ dna_size = dna_type.size * dna_name.array_size
+
+ field = DNAField(dna_type, dna_name, dna_size, dna_offset)
+ dna_struct.fields.append(field)
+ dna_struct.field_from_name[dna_name.name_only] = field
+ dna_offset += dna_size
+
+ return structs, sdna_index_from_id
+
+
+class BlendFileBlock:
+ """
+ Instance of a struct.
+ """
+ __slots__ = (
+ # BlendFile
+ "file",
+ "code",
+ "size",
+ "addr_old",
+ "sdna_index",
+ "count",
+ "file_offset",
+ "user_data",
+ )
+
+ def __str__(self):
+ return ("<%s.%s (%s), size=%d at %s>" %
+ # fields=[%s]
+ (self.__class__.__name__,
+ self.dna_type.dna_type_id.decode('ascii'),
+ self.code.decode(),
+ self.size,
+ # b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'),
+ hex(self.addr_old),
+ ))
+
+ def __init__(self, handle, bfile):
+ OLDBLOCK = struct.Struct(b'4sI')
+
+ self.file = bfile
+ self.user_data = None
+
+ data = handle.read(bfile.block_header_struct.size)
+ # header size can be 8, 20, or 24 bytes long
+ # 8: old blend files ENDB block (exception)
+ # 20: normal headers 32 bit platform
+ # 24: normal headers 64 bit platform
+ if len(data) > 15:
+
+ blockheader = bfile.block_header_struct.unpack(data)
+ self.code = blockheader[0].partition(b'\0')[0]
+ if self.code != b'ENDB':
+ self.size = blockheader[1]
+ self.addr_old = blockheader[2]
+ self.sdna_index = blockheader[3]
+ self.count = blockheader[4]
+ self.file_offset = handle.tell()
+ else:
+ self.size = 0
+ self.addr_old = 0
+ self.sdna_index = 0
+ self.count = 0
+ self.file_offset = 0
+ else:
+ blockheader = OLDBLOCK.unpack(data)
+ self.code = blockheader[0].partition(b'\0')[0]
+ self.code = DNA_IO.read_data0(blockheader[0])
+ self.size = 0
+ self.addr_old = 0
+ self.sdna_index = 0
+ self.count = 0
+ self.file_offset = 0
+
+ @property
+ def dna_type(self):
+ return self.file.structs[self.sdna_index]
+
+ def refine_type_from_index(self, sdna_index_next):
+ assert(type(sdna_index_next) is int)
+ sdna_index_curr = self.sdna_index
+ self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next)
+ self.sdna_index = sdna_index_next
+
+ def refine_type(self, dna_type_id):
+ assert(type(dna_type_id) is bytes)
+ self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id])
+
+ def get_file_offset(self, path,
+ default=...,
+ sdna_index_refine=None,
+ base_index=0,
+ ):
+ """
+ Return (offset, length)
+ """
+ assert(type(path) is bytes)
+
+ ofs = self.file_offset
+ if base_index != 0:
+ assert(base_index < self.count)
+ ofs += (self.size // self.count) * base_index
+ self.file.handle.seek(ofs, os.SEEK_SET)
+
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ else:
+ self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
+
+ dna_struct = self.file.structs[sdna_index_refine]
+ field = dna_struct.field_from_path(
+ self.file.header, self.file.handle, path)
+
+ return (self.file.handle.tell(), field.dna_name.array_size)
+
+ def get(self, path,
+ default=...,
+ sdna_index_refine=None,
+ use_nil=True, use_str=True,
+ base_index=0,
+ ):
+
+ ofs = self.file_offset
+ if base_index != 0:
+ assert(base_index < self.count)
+ ofs += (self.size // self.count) * base_index
+ self.file.handle.seek(ofs, os.SEEK_SET)
+
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ else:
+ self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
+
+ dna_struct = self.file.structs[sdna_index_refine]
+ return dna_struct.field_get(
+ self.file.header, self.file.handle, path,
+ default=default,
+ use_nil=use_nil, use_str=use_str,
+ )
+
+ def get_recursive_iter(self, path, path_root=b"",
+ default=...,
+ sdna_index_refine=None,
+ use_nil=True, use_str=True,
+ base_index=0,
+ ):
+ if path_root:
+ path_full = (
+ (path_root if type(path_root) is tuple else (path_root, )) +
+ (path if type(path) is tuple else (path, )))
+ else:
+ path_full = path
+
+ try:
+ yield (path_full, self.get(path_full, default, sdna_index_refine, use_nil, use_str, base_index))
+ except NotImplementedError as ex:
+ msg, dna_name, dna_type = ex.args
+ struct_index = self.file.sdna_index_from_id.get(dna_type.dna_type_id, None)
+ if struct_index is None:
+ yield (path_full, "<%s>" % dna_type.dna_type_id.decode('ascii'))
+ else:
+ struct = self.file.structs[struct_index]
+ for f in struct.fields:
+ yield from self.get_recursive_iter(
+ f.dna_name.name_only, path_full, default, None, use_nil, use_str, 0)
+
+ def items_recursive_iter(self):
+ for k in self.keys():
+ yield from self.get_recursive_iter(k, use_str=False)
+
+ def get_data_hash(self):
+ """
+ Generates a 'hash' that can be used instead of addr_old as block id, and that should be 'stable' across .blend
+ file load & save (i.e. it does not changes due to pointer addresses variations).
+ """
+ # TODO This implementation is most likely far from optimal... and CRC32 is not renown as the best hashing
+ # algo either. But for now does the job!
+ import zlib
+ def _is_pointer(self, k):
+ return self.file.structs[self.sdna_index].field_from_path(
+ self.file.header, self.file.handle, k).dna_name.is_pointer
+
+ hsh = 1
+ for k, v in self.items_recursive_iter():
+ if not _is_pointer(self, k):
+ hsh = zlib.adler32(str(v).encode(), hsh)
+ return hsh
+
+ def set(self, path, value,
+ sdna_index_refine=None,
+ ):
+
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ else:
+ self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
+
+ dna_struct = self.file.structs[sdna_index_refine]
+ self.file.handle.seek(self.file_offset, os.SEEK_SET)
+ self.file.is_modified = True
+ return dna_struct.field_set(
+ self.file.header, self.file.handle, path, value)
+
+ # ---------------
+ # Utility get/set
+ #
+ # avoid inline pointer casting
+ def get_pointer(
+ self, path,
+ default=...,
+ sdna_index_refine=None,
+ base_index=0,
+ ):
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ result = self.get(path, default, sdna_index_refine=sdna_index_refine, base_index=base_index)
+
+ # default
+ if type(result) is not int:
+ return result
+
+ assert(self.file.structs[sdna_index_refine].field_from_path(
+ self.file.header, self.file.handle, path).dna_name.is_pointer)
+ if result != 0:
+ # possible (but unlikely)
+ # that this fails and returns None
+ # maybe we want to raise some exception in this case
+ return self.file.find_block_from_offset(result)
+ else:
+ return None
+
+ # ----------------------
+ # Python convenience API
+
+ # dict like access
+ def __getitem__(self, item):
+ return self.get(item, use_str=False)
+
+ def __setitem__(self, item, value):
+ self.set(item, value)
+
+ def keys(self):
+ return (f.dna_name.name_only for f in self.dna_type.fields)
+
+ def values(self):
+ for k in self.keys():
+ try:
+ yield self[k]
+ except NotImplementedError as ex:
+ msg, dna_name, dna_type = ex.args
+ yield "<%s>" % dna_type.dna_type_id.decode('ascii')
+
+ def items(self):
+ for k in self.keys():
+ try:
+ yield (k, self[k])
+ except NotImplementedError as ex:
+ msg, dna_name, dna_type = ex.args
+ yield (k, "<%s>" % dna_type.dna_type_id.decode('ascii'))
+
+
+# -----------------------------------------------------------------------------
+# Read Magic
+#
+# magic = str
+# pointer_size = int
+# is_little_endian = bool
+# version = int
+
+
+class BlendFileHeader:
+ """
+ BlendFileHeader allocates the first 12 bytes of a blend file
+ it contains information about the hardware architecture
+ """
+ __slots__ = (
+ # str
+ "magic",
+ # int 4/8
+ "pointer_size",
+ # bool
+ "is_little_endian",
+ # int
+ "version",
+ # str, used to pass to 'struct'
+ "endian_str",
+ # int, used to index common types
+ "endian_index",
+ )
+
+ def __init__(self, handle):
+ FILEHEADER = struct.Struct(b'7s1s1s3s')
+
+ log.debug("reading blend-file-header")
+ values = FILEHEADER.unpack(handle.read(FILEHEADER.size))
+ self.magic = values[0]
+ pointer_size_id = values[1]
+ if pointer_size_id == b'-':
+ self.pointer_size = 8
+ elif pointer_size_id == b'_':
+ self.pointer_size = 4
+ else:
+ assert(0)
+ endian_id = values[2]
+ if endian_id == b'v':
+ self.is_little_endian = True
+ self.endian_str = b'<'
+ self.endian_index = 0
+ elif endian_id == b'V':
+ self.is_little_endian = False
+ self.endian_index = 1
+ self.endian_str = b'>'
+ else:
+ assert(0)
+
+ version_id = values[3]
+ self.version = int(version_id)
+
+ def create_block_header_struct(self):
+ return struct.Struct(b''.join((
+ self.endian_str,
+ b'4sI',
+ b'I' if self.pointer_size == 4 else b'Q',
+ b'II',
+ )))
+
+
+class DNAName:
+ """
+ DNAName is a C-type name stored in the DNA
+ """
+ __slots__ = (
+ "name_full",
+ "name_only",
+ "is_pointer",
+ "is_method_pointer",
+ "array_size",
+ )
+
+ def __init__(self, name_full):
+ self.name_full = name_full
+ self.name_only = self.calc_name_only()
+ self.is_pointer = self.calc_is_pointer()
+ self.is_method_pointer = self.calc_is_method_pointer()
+ self.array_size = self.calc_array_size()
+
+ def as_reference(self, parent):
+ if parent is None:
+ result = b''
+ else:
+ result = parent + b'.'
+
+ result = result + self.name_only
+ return result
+
+ def calc_name_only(self):
+ result = self.name_full.strip(b'*()')
+ index = result.find(b'[')
+ if index != -1:
+ result = result[:index]
+ return result
+
+ def calc_is_pointer(self):
+ return (b'*' in self.name_full)
+
+ def calc_is_method_pointer(self):
+ return (b'(*' in self.name_full)
+
+ def calc_array_size(self):
+ result = 1
+ temp = self.name_full
+ index = temp.find(b'[')
+
+ while index != -1:
+ index_2 = temp.find(b']')
+ result *= int(temp[index + 1:index_2])
+ temp = temp[index_2 + 1:]
+ index = temp.find(b'[')
+
+ return result
+
+
+class DNAField:
+ """
+ DNAField is a coupled DNAStruct and DNAName
+ and cache offset for reuse
+ """
+ __slots__ = (
+ # DNAName
+ "dna_name",
+ # tuple of 3 items
+ # [bytes (struct name), int (struct size), DNAStruct]
+ "dna_type",
+ # size on-disk
+ "dna_size",
+ # cached info (avoid looping over fields each time)
+ "dna_offset",
+ )
+
+ def __init__(self, dna_type, dna_name, dna_size, dna_offset):
+ self.dna_type = dna_type
+ self.dna_name = dna_name
+ self.dna_size = dna_size
+ self.dna_offset = dna_offset
+
+
+class DNAStruct:
+ """
+ DNAStruct is a C-type structure stored in the DNA
+ """
+ __slots__ = (
+ "dna_type_id",
+ "size",
+ "fields",
+ "field_from_name",
+ "user_data",
+ )
+
+ def __init__(self, dna_type_id):
+ self.dna_type_id = dna_type_id
+ self.fields = []
+ self.field_from_name = {}
+ self.user_data = None
+
+ def field_from_path(self, header, handle, path):
+ """
+ Support lookups as bytes or a tuple of bytes and optional index.
+
+ C style 'id.name' --> (b'id', b'name')
+ C style 'array[4]' --> ('array', 4)
+ """
+ if type(path) is tuple:
+ name = path[0]
+ if len(path) >= 2 and type(path[1]) is not bytes:
+ name_tail = path[2:]
+ index = path[1]
+ assert(type(index) is int)
+ else:
+ name_tail = path[1:]
+ index = 0
+ else:
+ name = path
+ name_tail = None
+ index = 0
+
+ assert(type(name) is bytes)
+
+ field = self.field_from_name.get(name)
+
+ if field is not None:
+ handle.seek(field.dna_offset, os.SEEK_CUR)
+ if index != 0:
+ if field.dna_name.is_pointer:
+ index_offset = header.pointer_size * index
+ else:
+ index_offset = field.dna_type.size * index
+ assert(index_offset < field.dna_size)
+ handle.seek(index_offset, os.SEEK_CUR)
+ if not name_tail: # None or ()
+ return field
+ else:
+ return field.dna_type.field_from_path(header, handle, name_tail)
+
+ def field_get(self, header, handle, path,
+ default=...,
+ use_nil=True, use_str=True,
+ ):
+ field = self.field_from_path(header, handle, path)
+ if field is None:
+ if default is not ...:
+ return default
+ else:
+ raise KeyError("%r not found in %r (%r)" %
+ (path, [f.dna_name.name_only for f in self.fields], self.dna_type_id))
+
+ dna_type = field.dna_type
+ dna_name = field.dna_name
+
+ if dna_name.is_pointer:
+ return DNA_IO.read_pointer(handle, header)
+ elif dna_type.dna_type_id == b'int':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_int(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_int(handle, header)
+ elif dna_type.dna_type_id == b'short':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_short(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_short(handle, header)
+ elif dna_type.dna_type_id == b'uint64_t':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_ulong(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_ulong(handle, header)
+ elif dna_type.dna_type_id == b'float':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_float(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_float(handle, header)
+ elif dna_type.dna_type_id == b'char':
+ if use_str:
+ if use_nil:
+ return DNA_IO.read_string0(handle, dna_name.array_size)
+ else:
+ return DNA_IO.read_string(handle, dna_name.array_size)
+ else:
+ if use_nil:
+ return DNA_IO.read_bytes0(handle, dna_name.array_size)
+ else:
+ return DNA_IO.read_bytes(handle, dna_name.array_size)
+ else:
+ raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" %
+ (path, dna_name.name_only), dna_name, dna_type)
+
+ def field_set(self, header, handle, path, value):
+ assert(type(path) == bytes)
+
+ field = self.field_from_path(header, handle, path)
+ if field is None:
+ raise KeyError("%r not found in %r" %
+ (path, [f.dna_name.name_only for f in self.fields]))
+
+ dna_type = field.dna_type
+ dna_name = field.dna_name
+
+ if dna_type.dna_type_id == b'char':
+ if type(value) is str:
+ return DNA_IO.write_string(handle, value, dna_name.array_size)
+ else:
+ return DNA_IO.write_bytes(handle, value, dna_name.array_size)
+ else:
+ raise NotImplementedError("Setting %r is not yet supported" %
+ dna_type[0], dna_name, dna_type)
+
+
+class DNA_IO:
+ """
+ Module like class, for read-write utility functions.
+
+ Only stores static methods & constants.
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def write_string(handle, astring, fieldlen):
+ assert(isinstance(astring, str))
+ if len(astring) >= fieldlen:
+ stringw = astring[0:fieldlen]
+ else:
+ stringw = astring + '\0'
+ handle.write(stringw.encode('utf-8'))
+
+ @staticmethod
+ def write_bytes(handle, astring, fieldlen):
+ assert(isinstance(astring, (bytes, bytearray)))
+ if len(astring) >= fieldlen:
+ stringw = astring[0:fieldlen]
+ else:
+ stringw = astring + b'\0'
+
+ handle.write(stringw)
+
+ @staticmethod
+ def read_bytes(handle, length):
+ data = handle.read(length)
+ return data
+
+ @staticmethod
+ def read_bytes0(handle, length):
+ data = handle.read(length)
+ return DNA_IO.read_data0(data)
+
+ @staticmethod
+ def read_string(handle, length):
+ return DNA_IO.read_bytes(handle, length).decode('utf-8')
+
+ @staticmethod
+ def read_string0(handle, length):
+ return DNA_IO.read_bytes0(handle, length).decode('utf-8')
+
+ @staticmethod
+ def read_data0_offset(data, offset):
+ add = data.find(b'\0', offset) - offset
+ return data[offset:offset + add]
+
+ @staticmethod
+ def read_data0(data):
+ add = data.find(b'\0')
+ return data[:add]
+
+ USHORT = struct.Struct(b'<H'), struct.Struct(b'>H')
+
+ @staticmethod
+ def read_ushort(handle, fileheader):
+ st = DNA_IO.USHORT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ SSHORT = struct.Struct(b'<h'), struct.Struct(b'>h')
+
+ @staticmethod
+ def read_short(handle, fileheader):
+ st = DNA_IO.SSHORT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ UINT = struct.Struct(b'<I'), struct.Struct(b'>I')
+
+ @staticmethod
+ def read_uint(handle, fileheader):
+ st = DNA_IO.UINT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ SINT = struct.Struct(b'<i'), struct.Struct(b'>i')
+
+ @staticmethod
+ def read_int(handle, fileheader):
+ st = DNA_IO.SINT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ FLOAT = struct.Struct(b'<f'), struct.Struct(b'>f')
+
+ @staticmethod
+ def read_float(handle, fileheader):
+ st = DNA_IO.FLOAT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ ULONG = struct.Struct(b'<Q'), struct.Struct(b'>Q')
+
+ @staticmethod
+ def read_ulong(handle, fileheader):
+ st = DNA_IO.ULONG[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ @staticmethod
+ def read_pointer(handle, header):
+ """
+ reads an pointer from a file handle
+ the pointer size is given by the header (BlendFileHeader)
+ """
+ if header.pointer_size == 4:
+ st = DNA_IO.UINT[header.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+ if header.pointer_size == 8:
+ st = DNA_IO.ULONG[header.endian_index]
+ return st.unpack(handle.read(st.size))[0]
diff --git a/io_blend_utils/blend/blendfile_path_walker.py b/io_blend_utils/blend/blendfile_path_walker.py
new file mode 100644
index 00000000..9c6c800f
--- /dev/null
+++ b/io_blend_utils/blend/blendfile_path_walker.py
@@ -0,0 +1,939 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+import os
+# gives problems with scripts that use stdout, for testing 'bam deps' for eg.
+VERBOSE = False # os.environ.get('BAM_VERBOSE', False)
+TIMEIT = False
+
+USE_ALEMBIC_BRANCH = False
+
+
+class C_defs:
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ # DNA_sequence_types.h (Sequence.type)
+ SEQ_TYPE_IMAGE = 0
+ SEQ_TYPE_META = 1
+ SEQ_TYPE_SCENE = 2
+ SEQ_TYPE_MOVIE = 3
+ SEQ_TYPE_SOUND_RAM = 4
+ SEQ_TYPE_SOUND_HD = 5
+ SEQ_TYPE_MOVIECLIP = 6
+ SEQ_TYPE_MASK = 7
+ SEQ_TYPE_EFFECT = 8
+
+ IMA_SRC_FILE = 1
+ IMA_SRC_SEQUENCE = 2
+ IMA_SRC_MOVIE = 3
+
+ # DNA_modifier_types.h
+ eModifierType_MeshCache = 46
+
+ # DNA_particle_types.h
+ PART_DRAW_OB = 7
+ PART_DRAW_GR = 8
+
+ # DNA_object_types.h
+ # Object.transflag
+ OB_DUPLIGROUP = 1 << 8
+
+ if USE_ALEMBIC_BRANCH:
+ CACHE_LIBRARY_SOURCE_CACHE = 1
+
+
+if VERBOSE:
+ import logging
+ log_deps = logging.getLogger("path_walker")
+ del logging
+
+ def set_as_str(s):
+ if s is None:
+ return "None"
+ else:
+ return (", ".join(sorted(i.decode('ascii') for i in sorted(s))))
+
+
+class FPElem:
+ """
+ Tiny filepath class to hide blendfile.
+ """
+
+ __slots__ = (
+ "basedir",
+
+ # library link level
+ "level",
+
+ # True when this is apart of a sequence (image or movieclip)
+ "is_sequence",
+
+ "userdata",
+ )
+
+ def __init__(self, basedir, level,
+ # subclasses get/set functions should use
+ userdata):
+ self.basedir = basedir
+ self.level = level
+ self.is_sequence = False
+
+ # subclass must call
+ self.userdata = userdata
+
+ def files_siblings(self):
+ return ()
+
+ # --------
+ # filepath
+
+ def filepath_absolute_resolve(self, basedir=None):
+ """
+ Resolve the filepath, with the option to override the basedir.
+ """
+ filepath = self.filepath
+ if filepath.startswith(b'//'):
+ if basedir is None:
+ basedir = self.basedir
+ return os.path.normpath(os.path.join(
+ basedir,
+ utils.compatpath(filepath[2:]),
+ ))
+ else:
+ return utils.compatpath(filepath)
+
+ def filepath_assign_edits(self, filepath, binary_edits):
+ self._set_cb_edits(filepath, binary_edits)
+
+ @staticmethod
+ def _filepath_assign_edits(block, path, filepath, binary_edits):
+ """
+ Record the write to a separate entry (binary file-like object),
+ this lets us replay the edits later.
+ (so we can replay them onto the clients local cache without a file transfer).
+ """
+ import struct
+ assert(type(filepath) is bytes)
+ assert(type(path) is bytes)
+ ofs, size = block.get_file_offset(path)
+ # ensure we dont write past the field size & allow for \0
+ filepath = filepath[:size - 1]
+ binary_edits.append((ofs, filepath + b'\0'))
+
+ @property
+ def filepath(self):
+ return self._get_cb()
+
+ @filepath.setter
+ def filepath(self, filepath):
+ self._set_cb(filepath)
+
+ @property
+ def filepath_absolute(self):
+ return self.filepath_absolute_resolve()
+
+
+class FPElem_block_path(FPElem):
+ """
+ Simple block-path:
+ userdata = (block, path)
+ """
+ __slots__ = ()
+
+ def _get_cb(self):
+ block, path = self.userdata
+ return block[path]
+
+ def _set_cb(self, filepath):
+ block, path = self.userdata
+ block[path] = filepath
+
+ def _set_cb_edits(self, filepath, binary_edits):
+ block, path = self.userdata
+ self._filepath_assign_edits(block, path, filepath, binary_edits)
+
+
+class FPElem_sequence_single(FPElem):
+ """
+ Movie sequence
+ userdata = (block, path, sub_block, sub_path)
+ """
+ __slots__ = ()
+
+ def _get_cb(self):
+ block, path, sub_block, sub_path = self.userdata
+ return block[path] + sub_block[sub_path]
+
+ def _set_cb(self, filepath):
+ block, path, sub_block, sub_path = self.userdata
+ head, sep, tail = utils.splitpath(filepath)
+
+ block[path] = head + sep
+ sub_block[sub_path] = tail
+
+ def _set_cb_edits(self, filepath, binary_edits):
+ block, path, sub_block, sub_path = self.userdata
+ head, sep, tail = utils.splitpath(filepath)
+
+ self._filepath_assign_edits(block, path, head + sep, binary_edits)
+ self._filepath_assign_edits(sub_block, sub_path, tail, binary_edits)
+
+
+class FPElem_sequence_image_seq(FPElem_sequence_single):
+ """
+ Image sequence
+ userdata = (block, path, sub_block, sub_path)
+ """
+ __slots__ = ()
+
+ def files_siblings(self):
+ block, path, sub_block, sub_path = self.userdata
+
+ array = block.get_pointer(b'stripdata')
+ files = [array.get(b'name', use_str=False, base_index=i) for i in range(array.count)]
+ return files
+
+
+class FilePath:
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ # ------------------------------------------------------------------------
+ # Main function to visit paths
+ @staticmethod
+ def visit_from_blend(
+ filepath,
+
+ # never modify the blend
+ readonly=True,
+ # callback that creates a temp file and returns its path.
+ temp_remap_cb=None,
+
+ # recursive options
+ recursive=False,
+ # recurse all indirectly linked data
+ # (not just from the initially referenced blend file)
+ recursive_all=False,
+ # list of ID block names we want to load, or None to load all
+ block_codes=None,
+ # root when we're loading libs indirectly
+ rootdir=None,
+ level=0,
+ # dict of id's used so we don't follow these links again
+ # prevents cyclic references too!
+ # {lib_path: set([block id's ...])}
+ lib_visit=None,
+
+ # optional blendfile callbacks
+ # These callbacks run on enter-exit blend files
+ # so you can keep track of what file and level you're at.
+ blendfile_level_cb=(None, None),
+ ):
+ # print(level, block_codes)
+ import os
+
+ filepath = os.path.abspath(filepath)
+
+ if VERBOSE:
+ indent_str = " " * level
+ # print(indent_str + "Opening:", filepath)
+ # print(indent_str + "... blocks:", block_codes)
+
+ log_deps.info("~")
+ log_deps.info("%s%s" % (indent_str, filepath.decode('utf-8')))
+ log_deps.info("%s%s" % (indent_str, set_as_str(block_codes)))
+
+ blendfile_level_cb_enter, blendfile_level_cb_exit = blendfile_level_cb
+
+ if blendfile_level_cb_enter is not None:
+ blendfile_level_cb_enter(filepath)
+
+ basedir = os.path.dirname(filepath)
+ if rootdir is None:
+ rootdir = basedir
+
+ if lib_visit is None:
+ lib_visit = {}
+
+
+
+ if recursive and (level > 0) and (block_codes is not None) and (recursive_all is False):
+ # prevent from expanding the
+ # same datablock more then once
+ # note: we could *almost* id_name, however this isn't unique for libraries.
+ expand_addr_visit = set()
+ # {lib_id: {block_ids... }}
+ expand_codes_idlib = {}
+
+ # libraries used by this blend
+ block_codes_idlib = set()
+
+ # XXX, checking 'block_codes' isn't 100% reliable,
+ # but at least don't touch the same blocks twice.
+ # whereas block_codes is intended to only operate on blocks we requested.
+ lib_block_codes_existing = lib_visit.setdefault(filepath, set())
+
+ # only for this block
+ def _expand_codes_add_test(block, code):
+ # return True, if the ID should be searched further
+ #
+ # we could investigate a better way...
+ # Not to be accessing ID blocks at this point. but its harmless
+ if code == b'ID':
+ assert(code == block.code)
+ if recursive:
+ expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
+ return False
+ else:
+ id_name = block[b'id', b'name']
+
+ # if we touched this already, don't touch again
+ # (else we may modify the same path multiple times)
+ #
+ # FIXME, works in some cases but not others
+ # keep, without this we get errors
+ # Gooseberry r668
+ # bam pack scenes/01_island/01_meet_franck/01_01_01_A/01_01_01_A.comp.blend
+ # gives strange errors
+ '''
+ if id_name not in block_codes:
+ return False
+ '''
+
+ # instead just don't operate on blocks multiple times
+ # ... rather than attempt to check on what we need or not.
+ len_prev = len(lib_block_codes_existing)
+ lib_block_codes_existing.add(id_name)
+ if len_prev == len(lib_block_codes_existing):
+ return False
+
+ len_prev = len(expand_addr_visit)
+ expand_addr_visit.add(block.addr_old)
+ return (len_prev != len(expand_addr_visit))
+
+ def block_expand(block, code):
+ assert(block.code == code)
+ if _expand_codes_add_test(block, code):
+ yield block
+
+ assert(block.code == code)
+ fn = ExpandID.expand_funcs.get(code)
+ if fn is not None:
+ for sub_block in fn(block):
+ if sub_block is not None:
+ yield from block_expand(sub_block, sub_block.code)
+ else:
+ if code == b'ID':
+ yield block
+ else:
+ expand_addr_visit = None
+
+ # set below
+ expand_codes_idlib = None
+
+ # never set
+ block_codes_idlib = None
+
+ def block_expand(block, code):
+ assert(block.code == code)
+ yield block
+
+ # ------
+ # Define
+ #
+ # - iter_blocks_id(code)
+ # - iter_blocks_idlib()
+ if block_codes is None:
+ def iter_blocks_id(code):
+ return blend.find_blocks_from_code(code)
+
+ def iter_blocks_idlib():
+ return blend.find_blocks_from_code(b'LI')
+ else:
+ def iter_blocks_id(code):
+ for block in blend.find_blocks_from_code(code):
+ if block[b'id', b'name'] in block_codes:
+ yield from block_expand(block, code)
+
+ if block_codes_idlib is not None:
+ def iter_blocks_idlib():
+ for block in blend.find_blocks_from_code(b'LI'):
+ # TODO, this should work but in fact mades some libs not link correctly.
+ if block[b'name'] in block_codes_idlib:
+ yield from block_expand(block, b'LI')
+ else:
+ def iter_blocks_idlib():
+ return blend.find_blocks_from_code(b'LI')
+
+ if temp_remap_cb is not None:
+ filepath_tmp = temp_remap_cb(filepath, rootdir)
+ else:
+ filepath_tmp = filepath
+
+ # store info to pass along with each iteration
+ extra_info = rootdir, os.path.basename(filepath)
+
+ from blend import blendfile
+ with blendfile.open_blend(filepath_tmp, "rb" if readonly else "r+b") as blend:
+
+ for code in blend.code_index.keys():
+ # handle library blocks as special case
+ if ((len(code) != 2) or
+ (code in {
+ # libraries handled below
+ b'LI',
+ b'ID',
+ # unneeded
+ b'WM',
+ b'SN', # bScreen
+ })):
+
+ continue
+
+ # if VERBOSE:
+ # print(" Scanning", code)
+
+ for block in iter_blocks_id(code):
+ yield from FilePath.from_block(block, basedir, extra_info, level)
+
+ # print("A:", expand_addr_visit)
+ # print("B:", block_codes)
+ if VERBOSE:
+ log_deps.info("%s%s" % (indent_str, set_as_str(expand_addr_visit)))
+
+ if recursive:
+
+ if expand_codes_idlib is None:
+ expand_codes_idlib = {}
+ for block in blend.find_blocks_from_code(b'ID'):
+ expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
+
+ # look into libraries
+ lib_all = []
+
+ for lib_id, lib_block_codes in sorted(expand_codes_idlib.items()):
+ lib = blend.find_block_from_offset(lib_id)
+ lib_path = lib[b'name']
+
+ # get all data needed to read the blend files here (it will be freed!)
+ # lib is an address at the moment, we only use as a way to group
+
+ lib_all.append((lib_path, lib_block_codes))
+ # import IPython; IPython.embed()
+
+ # ensure we expand indirect linked libs
+ if block_codes_idlib is not None:
+ block_codes_idlib.add(lib_path)
+
+ # do this after, incase we mangle names above
+ for block in iter_blocks_idlib():
+ yield from FilePath.from_block(block, basedir, extra_info, level)
+ del blend
+
+
+ # ----------------
+ # Handle Recursive
+ if recursive:
+ # now we've closed the file, loop on other files
+
+ # note, sorting - isn't needed, it just gives predictable load-order.
+ for lib_path, lib_block_codes in lib_all:
+ lib_path_abs = os.path.normpath(utils.compatpath(utils.abspath(lib_path, basedir)))
+
+ # if we visited this before,
+ # check we don't follow the same links more than once
+ lib_block_codes_existing = lib_visit.setdefault(lib_path_abs, set())
+ lib_block_codes -= lib_block_codes_existing
+
+ # don't touch them again
+ # XXX, this is now maintained in "_expand_generic_material"
+ # lib_block_codes_existing.update(lib_block_codes)
+
+ # print("looking for", lib_block_codes)
+
+ if not lib_block_codes:
+ if VERBOSE:
+ print((indent_str + " "), "Library Skipped (visited): ", filepath, " -> ", lib_path_abs, sep="")
+ continue
+
+ if not os.path.exists(lib_path_abs):
+ if VERBOSE:
+ print((indent_str + " "), "Library Missing: ", filepath, " -> ", lib_path_abs, sep="")
+ continue
+
+ # import IPython; IPython.embed()
+ if VERBOSE:
+ print((indent_str + " "), "Library: ", filepath, " -> ", lib_path_abs, sep="")
+ # print((indent_str + " "), lib_block_codes)
+ yield from FilePath.visit_from_blend(
+ lib_path_abs,
+ readonly=readonly,
+ temp_remap_cb=temp_remap_cb,
+ recursive=True,
+ block_codes=lib_block_codes,
+ rootdir=rootdir,
+ level=level + 1,
+ lib_visit=lib_visit,
+ blendfile_level_cb=blendfile_level_cb,
+ )
+
+ if blendfile_level_cb_exit is not None:
+ blendfile_level_cb_exit(filepath)
+
+ # ------------------------------------------------------------------------
+ # Direct filepaths from Blocks
+ #
+ # (no expanding or following references)
+
+ @staticmethod
+ def from_block(block, basedir, extra_info, level):
+ assert(block.code != b'DATA')
+ fn = FilePath._from_block_dict.get(block.code)
+ if fn is not None:
+ yield from fn(block, basedir, extra_info, level)
+
+ @staticmethod
+ def _from_block_OB(block, basedir, extra_info, level):
+ # 'ob->modifiers[...].filepath'
+ for block_mod in bf_utils.iter_ListBase(
+ block.get_pointer((b'modifiers', b'first')),
+ next_item=(b'modifier', b'next')):
+ item_md_type = block_mod[b'modifier', b'type']
+ if item_md_type == C_defs.eModifierType_MeshCache:
+ yield FPElem_block_path(basedir, level, (block_mod, b'filepath')), extra_info
+
+ @staticmethod
+ def _from_block_MC(block, basedir, extra_info, level):
+ # TODO, image sequence
+ fp = FPElem_block_path(basedir, level, (block, b'name'))
+ fp.is_sequence = True
+ yield fp, extra_info
+
+ @staticmethod
+ def _from_block_IM(block, basedir, extra_info, level):
+ # old files miss this
+ image_source = block.get(b'source', C_defs.IMA_SRC_FILE)
+ if image_source not in {C_defs.IMA_SRC_FILE, C_defs.IMA_SRC_SEQUENCE, C_defs.IMA_SRC_MOVIE}:
+ return
+ if block[b'packedfile']:
+ return
+
+ fp = FPElem_block_path(basedir, level, (block, b'name'))
+ if image_source == C_defs.IMA_SRC_SEQUENCE:
+ fp.is_sequence = True
+ yield fp, extra_info
+
+ @staticmethod
+ def _from_block_VF(block, basedir, extra_info, level):
+ if block[b'packedfile']:
+ return
+ if block[b'name'] != b'<builtin>': # builtin font
+ yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
+
+ @staticmethod
+ def _from_block_SO(block, basedir, extra_info, level):
+ if block[b'packedfile']:
+ return
+ yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
+
+ @staticmethod
+ def _from_block_ME(block, basedir, extra_info, level):
+ block_external = block.get_pointer((b'ldata', b'external'), None)
+ if block_external is None:
+ block_external = block.get_pointer((b'fdata', b'external'), None)
+
+ if block_external is not None:
+ yield FPElem_block_path(basedir, level, (block_external, b'filename')), extra_info
+
+ if USE_ALEMBIC_BRANCH:
+ @staticmethod
+ def _from_block_CL(block, basedir, extra_info, level):
+ if block[b'source_mode'] == C_defs.CACHE_LIBRARY_SOURCE_CACHE:
+ yield FPElem_block_path(basedir, level, (block, b'input_filepath')), extra_info
+
+ @staticmethod
+ def _from_block_SC(block, basedir, extra_info, level):
+ block_ed = block.get_pointer(b'ed')
+ if block_ed is not None:
+ sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
+
+ def seqbase(someseq):
+ for item in someseq:
+ item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
+
+ if item_type >= C_defs.SEQ_TYPE_EFFECT:
+ pass
+ elif item_type == C_defs.SEQ_TYPE_META:
+ yield from seqbase(bf_utils.iter_ListBase(
+ item.get_pointer((b'seqbase', b'first'), sdna_index_refine=sdna_index_Sequence)))
+ else:
+ item_strip = item.get_pointer(b'strip', sdna_index_refine=sdna_index_Sequence)
+ if item_strip is None: # unlikely!
+ continue
+ item_stripdata = item_strip.get_pointer(b'stripdata')
+
+ if item_type == C_defs.SEQ_TYPE_IMAGE:
+ yield FPElem_sequence_image_seq(
+ basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
+ elif item_type in {C_defs.SEQ_TYPE_MOVIE, C_defs.SEQ_TYPE_SOUND_RAM, C_defs.SEQ_TYPE_SOUND_HD}:
+ yield FPElem_sequence_single(
+ basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
+
+ yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer((b'seqbase', b'first'))))
+
+ @staticmethod
+ def _from_block_LI(block, basedir, extra_info, level):
+ if block.get(b'packedfile', None):
+ return
+
+ yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
+
+ # _from_block_IM --> {b'IM': _from_block_IM, ...}
+ _from_block_dict = {
+ k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
+ if isinstance(s_fn, staticmethod)
+ if k.startswith("_from_block_")
+ }
+
+
+class bf_utils:
+ @staticmethod
+ def iter_ListBase(block, next_item=b'next'):
+ while block:
+ yield block
+ block = block.file.find_block_from_offset(block[next_item])
+
+ def iter_array(block, length=-1):
+ assert(block.code == b'DATA')
+ import blendfile
+ import os
+ handle = block.file.handle
+ header = block.file.header
+
+ for i in range(length):
+ block.file.handle.seek(block.file_offset + (header.pointer_size * i), os.SEEK_SET)
+ offset = blendfile.DNA_IO.read_pointer(handle, header)
+ sub_block = block.file.find_block_from_offset(offset)
+ yield sub_block
+
+
+# -----------------------------------------------------------------------------
+# ID Expand
+
+class ExpandID:
+ # fake module
+ #
+ # TODO:
+ #
+ # Array lookups here are _WAY_ too complicated,
+ # we need some nicer way to represent pointer indirection (easy like in C!)
+ # but for now, use what we have.
+ #
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def _expand_generic_material(block):
+ array_len = block.get(b'totcol')
+ if array_len != 0:
+ array = block.get_pointer(b'mat')
+ for sub_block in bf_utils.iter_array(array, array_len):
+ yield sub_block
+
+ @staticmethod
+ def _expand_generic_mtex(block):
+ field = block.dna_type.field_from_name[b'mtex']
+ array_len = field.dna_size // block.file.header.pointer_size
+
+ for i in range(array_len):
+ item = block.get_pointer((b'mtex', i))
+ if item:
+ yield item.get_pointer(b'tex')
+ yield item.get_pointer(b'object')
+
+ @staticmethod
+ def _expand_generic_nodetree(block):
+ assert(block.dna_type.dna_type_id == b'bNodeTree')
+
+ sdna_index_bNode = block.file.sdna_index_from_id[b'bNode']
+ for item in bf_utils.iter_ListBase(block.get_pointer((b'nodes', b'first'))):
+ item_type = item.get(b'type', sdna_index_refine=sdna_index_bNode)
+
+ if item_type != 221: # CMP_NODE_R_LAYERS
+ yield item.get_pointer(b'id', sdna_index_refine=sdna_index_bNode)
+
+ def _expand_generic_nodetree_id(block):
+ block_ntree = block.get_pointer(b'nodetree', None)
+ if block_ntree is not None:
+ yield from ExpandID._expand_generic_nodetree(block_ntree)
+
+ @staticmethod
+ def _expand_generic_animdata(block):
+ block_adt = block.get_pointer(b'adt')
+ if block_adt:
+ yield block_adt.get_pointer(b'action')
+ # TODO, NLA
+
+ @staticmethod
+ def expand_OB(block): # 'Object'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+
+ has_dup_group = False
+ yield block.get_pointer(b'data')
+ if block[b'transflag'] & C_defs.OB_DUPLIGROUP:
+ dup_group = block.get_pointer(b'dup_group')
+ if dup_group is not None:
+ has_dup_group = True
+ yield dup_group
+ del dup_group
+
+ yield block.get_pointer(b'proxy')
+ yield block.get_pointer(b'proxy_group')
+
+ if USE_ALEMBIC_BRANCH:
+ if has_dup_group:
+ sdna_index_CacheLibrary = block.file.sdna_index_from_id.get(b'CacheLibrary')
+ if sdna_index_CacheLibrary is not None:
+ yield block.get_pointer(b'cache_library')
+
+ # 'ob->pose->chanbase[...].custom'
+ block_pose = block.get_pointer(b'pose')
+ if block_pose is not None:
+ assert(block_pose.dna_type.dna_type_id == b'bPose')
+ sdna_index_bPoseChannel = block_pose.file.sdna_index_from_id[b'bPoseChannel']
+ for item in bf_utils.iter_ListBase(block_pose.get_pointer((b'chanbase', b'first'))):
+ item_custom = item.get_pointer(b'custom', sdna_index_refine=sdna_index_bPoseChannel)
+ if item_custom is not None:
+ yield item_custom
+ # Expand the objects 'ParticleSettings' via:
+ # 'ob->particlesystem[...].part'
+ sdna_index_ParticleSystem = block.file.sdna_index_from_id.get(b'ParticleSystem')
+ if sdna_index_ParticleSystem is not None:
+ for item in bf_utils.iter_ListBase(
+ block.get_pointer((b'particlesystem', b'first'))):
+ item_part = item.get_pointer(b'part', sdna_index_refine=sdna_index_ParticleSystem)
+ if item_part is not None:
+ yield item_part
+
+ @staticmethod
+ def expand_ME(block): # 'Mesh'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+ yield block.get_pointer(b'texcomesh')
+ # TODO, TexFace? - it will be slow, we could simply ignore :S
+
+ @staticmethod
+ def expand_CU(block): # 'Curve'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+
+ sub_block = block.get_pointer(b'vfont')
+ if sub_block is not None:
+ yield sub_block
+ yield block.get_pointer(b'vfontb')
+ yield block.get_pointer(b'vfonti')
+ yield block.get_pointer(b'vfontbi')
+
+ yield block.get_pointer(b'bevobj')
+ yield block.get_pointer(b'taperobj')
+ yield block.get_pointer(b'textoncurve')
+
+ @staticmethod
+ def expand_MB(block): # 'MBall'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+
+ @staticmethod
+ def expand_AR(block): # 'bArmature'
+ yield from ExpandID._expand_generic_animdata(block)
+
+ @staticmethod
+ def expand_LA(block): # 'Lamp'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield from ExpandID._expand_generic_mtex(block)
+
+ @staticmethod
+ def expand_MA(block): # 'Material'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield from ExpandID._expand_generic_mtex(block)
+
+ yield block.get_pointer(b'group')
+
+ @staticmethod
+ def expand_TE(block): # 'Tex'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield block.get_pointer(b'ima')
+
+ @staticmethod
+ def expand_WO(block): # 'World'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield from ExpandID._expand_generic_mtex(block)
+
+ @staticmethod
+ def expand_NT(block): # 'bNodeTree'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree(block)
+
+ @staticmethod
+ def expand_PA(block): # 'ParticleSettings'
+ yield from ExpandID._expand_generic_animdata(block)
+ block_ren_as = block[b'ren_as']
+ if block_ren_as == C_defs.PART_DRAW_GR:
+ yield block.get_pointer(b'dup_group')
+ elif block_ren_as == C_defs.PART_DRAW_OB:
+ yield block.get_pointer(b'dup_ob')
+
+ @staticmethod
+ def expand_SC(block): # 'Scene'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield block.get_pointer(b'camera')
+ yield block.get_pointer(b'world')
+ yield block.get_pointer(b'set', None)
+ yield block.get_pointer(b'clip', None)
+
+ sdna_index_Base = block.file.sdna_index_from_id[b'Base']
+ for item in bf_utils.iter_ListBase(block.get_pointer((b'base', b'first'))):
+ yield item.get_pointer(b'object', sdna_index_refine=sdna_index_Base)
+
+ block_ed = block.get_pointer(b'ed')
+ if block_ed is not None:
+ sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
+
+ def seqbase(someseq):
+ for item in someseq:
+ item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
+
+ if item_type >= C_defs.SEQ_TYPE_EFFECT:
+ pass
+ elif item_type == C_defs.SEQ_TYPE_META:
+ yield from seqbase(bf_utils.iter_ListBase(
+ item.get_pointer((b'seqbase' b'first'), sdna_index_refine=sdna_index_Sequence)))
+ else:
+ if item_type == C_defs.SEQ_TYPE_SCENE:
+ yield item.get_pointer(b'scene')
+ elif item_type == C_defs.SEQ_TYPE_MOVIECLIP:
+ yield item.get_pointer(b'clip')
+ elif item_type == C_defs.SEQ_TYPE_MASK:
+ yield item.get_pointer(b'mask')
+ elif item_type == C_defs.SEQ_TYPE_SOUND_RAM:
+ yield item.get_pointer(b'sound')
+
+ yield from seqbase(bf_utils.iter_ListBase(
+ block_ed.get_pointer((b'seqbase', b'first'))))
+
+ @staticmethod
+ def expand_GR(block): # 'Group'
+ sdna_index_GroupObject = block.file.sdna_index_from_id[b'GroupObject']
+ for item in bf_utils.iter_ListBase(block.get_pointer((b'gobject', b'first'))):
+ yield item.get_pointer(b'ob', sdna_index_refine=sdna_index_GroupObject)
+
+ # expand_GR --> {b'GR': expand_GR, ...}
+ expand_funcs = {
+ k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
+ if isinstance(s_fn, staticmethod)
+ if k.startswith("expand_")
+ }
+
+
+# -----------------------------------------------------------------------------
+# Packing Utility
+
+
+class utils:
+ # fake module
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def abspath(path, start, library=None):
+ import os
+ if path.startswith(b'//'):
+ # if library:
+ # start = os.path.dirname(abspath(library.filepath))
+ return os.path.join(start, path[2:])
+ return path
+
+ if __import__("os").sep == '/':
+ @staticmethod
+ def compatpath(path):
+ return path.replace(b'\\', b'/')
+ else:
+ @staticmethod
+ def compatpath(path):
+ # keep '//'
+ return path[:2] + path[2:].replace(b'/', b'\\')
+
+ @staticmethod
+ def splitpath(path):
+ """
+ Splits the path using either slashes
+ """
+ split1 = path.rpartition(b'/')
+ split2 = path.rpartition(b'\\')
+ if len(split1[0]) > len(split2[0]):
+ return split1
+ else:
+ return split2
+
+ def find_sequence_paths(filepath, use_fullpath=True):
+ # supports str, byte paths
+ basedir, filename = os.path.split(filepath)
+ if not os.path.exists(basedir):
+ return []
+
+ filename_noext, ext = os.path.splitext(filename)
+
+ from string import digits
+ if isinstance(filepath, bytes):
+ digits = digits.encode()
+ filename_nodigits = filename_noext.rstrip(digits)
+
+ if len(filename_nodigits) == len(filename_noext):
+ # input isn't from a sequence
+ return []
+
+ files = os.listdir(basedir)
+ files[:] = [
+ f for f in files
+ if f.startswith(filename_nodigits) and
+ f.endswith(ext) and
+ f[len(filename_nodigits):-len(ext) if ext else -1].isdigit()
+ ]
+ if use_fullpath:
+ files[:] = [
+ os.path.join(basedir, f) for f in files
+ ]
+
+ return files
diff --git a/io_blend_utils/blendfile_pack.py b/io_blend_utils/blendfile_pack.py
new file mode 100755
index 00000000..225a941d
--- /dev/null
+++ b/io_blend_utils/blendfile_pack.py
@@ -0,0 +1,601 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+from blend import blendfile_path_walker
+
+TIMEIT = False
+
+
+# ----------------------
+# debug low level output
+#
+# ... when internals _really_ fail & we want to know why
+def _dbg(text):
+ import sys
+ from utils.system import colorize
+ if type(text) is bytes:
+ text = text.decode('utf-8')
+ sys.__stdout__.write(colorize(text, color='red') + "\n")
+ sys.__stdout__.flush()
+
+
+def _relpath_remap(
+ path_src,
+ base_dir_src,
+ fp_basedir,
+ blendfile_src_dir_fakeroot,
+ ):
+
+ import os
+
+ if not os.path.isabs(path_src):
+ # Absolute win32 paths on a unix system
+ # cause bad issues!
+ if len(path_src) >= 2:
+ if path_src[0] != b'/'[0] and path_src[1] == b':'[0]:
+ pass
+ else:
+ raise Exception("Internal error 'path_src' -> %r must be absolute" % path_src)
+
+ path_src = os.path.normpath(path_src)
+ path_dst = os.path.relpath(path_src, base_dir_src)
+
+ if blendfile_src_dir_fakeroot is None:
+ # /foo/../bar.png --> /foo/__/bar.png
+ path_dst = path_dst.replace(b'..', b'__')
+ path_dst = os.path.normpath(path_dst)
+ else:
+ if b'..' in path_dst:
+ # remap, relative to project root
+
+ # paths
+ path_dst = os.path.join(blendfile_src_dir_fakeroot, path_dst)
+ path_dst = os.path.normpath(path_dst)
+ # if there are paths outside the root still...
+ # This means they are outside the project directory, We dont support this,
+ # so name accordingly
+ if b'..' in path_dst:
+ # SHOULD NEVER HAPPEN
+ path_dst = path_dst.replace(b'..', b'__nonproject__')
+ path_dst = b'_' + path_dst
+
+ # _dbg(b"FINAL A: " + path_dst)
+ path_dst_final = os.path.join(os.path.relpath(base_dir_src, fp_basedir), path_dst)
+ path_dst_final = os.path.normpath(path_dst_final)
+ # _dbg(b"FINAL B: " + path_dst_final)
+
+ return path_dst, path_dst_final
+
+
+def pack(
+ # store the blendfile relative to this directory, can be:
+ # os.path.dirname(blendfile_src)
+ # but in some cases we wan't to use a path higher up.
+ # base_dir_src,
+ blendfile_src, blendfile_dst,
+ mode='ZIP',
+ # optionally pass in the temp dir
+ base_dir_dst_temp=None,
+ paths_remap_relbase=None,
+ deps_remap=None, paths_remap=None, paths_uuid=None,
+ # load every libs dep, not just used deps.
+ all_deps=False,
+ compress_level=-1,
+ # yield reports
+ report=None,
+
+ # The project path, eg:
+ # /home/me/myproject/mysession/path/to/blend/file.blend
+ # the path would be: b'path/to/blend'
+ #
+ # This is needed so we can choose to store paths
+ # relative to project or relative to the current file.
+ #
+ # When None, map _all_ paths are relative to the current blend.
+ # converting: '../../bar' --> '__/__/bar'
+ # so all paths are nested and not moved outside the session path.
+ blendfile_src_dir_fakeroot=None,
+
+ # Read variations from json files.
+ use_variations=False,
+
+ # do _everything_ except to write the paths.
+ # useful if we want to calculate deps to remap but postpone applying them.
+ readonly=False,
+ # dict of binary_edits:
+ # {file: [(ofs, bytes), ...], ...}
+ # ... where the file is the relative 'packed' location.
+ binary_edits=None,
+
+ # Filename filter, allow to exclude files from the pack,
+ # function takes a string returns True if the files should be included.
+ filename_filter=None,
+ ):
+ """
+ :param deps_remap: Store path deps_remap info as follows.
+ {"file.blend": {"path_new": "path_old", ...}, ...}
+
+ :type deps_remap: dict or None
+ """
+
+ # Internal details:
+ # - we copy to a temp path before operating on the blend file
+ # so we can modify in-place.
+ # - temp files are only created once, (if we never touched them before),
+ # this way, for linked libraries - a single blend file may be used
+ # multiple times, each access will apply new edits on top of the old ones.
+ # - we track which libs we have touched (using 'lib_visit' arg),
+ # this means that the same libs wont be touched many times to modify the same data
+ # also prevents cyclic loops from crashing.
+
+ import os
+ import sys
+
+ if sys.stdout.isatty():
+ from utils.system import colorize
+ else:
+ from utils.system import colorize_dummy as colorize
+
+ # in case this is directly from the command line or user-input
+ blendfile_src = os.path.normpath(os.path.abspath(blendfile_src))
+ blendfile_dst = os.path.normpath(os.path.abspath(blendfile_dst))
+
+ # first check args are OK
+ # fakeroot _cant_ start with a separator, since we prepend chars to it.
+ assert((blendfile_src_dir_fakeroot is None) or
+ (not blendfile_src_dir_fakeroot.startswith(os.sep.encode('ascii'))))
+
+ path_temp_files = set()
+ path_copy_files = set()
+
+ # path_temp_files --> original-location
+ path_temp_files_orig = {}
+
+ TEMP_SUFFIX = b'@'
+
+ if report is None:
+ def report(msg):
+ return msg
+
+ yield report("%s: %r...\n" % (colorize("\nscanning deps", color='bright_green'), blendfile_src))
+
+ if TIMEIT:
+ import time
+ t = time.time()
+
+ base_dir_src = os.path.dirname(blendfile_src)
+ base_dir_dst = os.path.dirname(blendfile_dst)
+ # _dbg(blendfile_src)
+ # _dbg(blendfile_dst)
+
+ if base_dir_dst_temp is None:
+ base_dir_dst_temp = base_dir_dst
+
+ if mode == 'ZIP':
+ base_dir_dst_temp = os.path.join(base_dir_dst_temp, b'__blendfile_temp__')
+ else:
+ base_dir_dst_temp = os.path.join(base_dir_dst_temp, b'__blendfile_pack__')
+
+ def temp_remap_cb(filepath, rootdir):
+ """
+ Create temp files in the destination path.
+ """
+ filepath = blendfile_path_walker.utils.compatpath(filepath)
+
+ if use_variations:
+ if blendfile_levels_dict_curr:
+ filepath = blendfile_levels_dict_curr.get(filepath, filepath)
+
+ # ...
+
+ # first remap this blend file to the location it will end up (so we can get images relative to _that_)
+ # TODO(cam) cache the results
+ fp_basedir_conv = _relpath_remap(os.path.join(rootdir, b'dummy'), base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
+ fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
+
+ # then get the file relative to the new location
+ filepath_tmp = _relpath_remap(filepath, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)[0]
+ filepath_tmp = os.path.normpath(os.path.join(base_dir_dst_temp, filepath_tmp)) + TEMP_SUFFIX
+
+ # only overwrite once (so we can write into a path already containing files)
+ if filepath_tmp not in path_temp_files:
+ if mode != 'NONE':
+ import shutil
+ os.makedirs(os.path.dirname(filepath_tmp), exist_ok=True)
+ shutil.copy(filepath, filepath_tmp)
+ path_temp_files.add(filepath_tmp)
+ path_temp_files_orig[filepath_tmp] = filepath
+ if mode != 'NONE':
+ return filepath_tmp
+ else:
+ return filepath
+
+ # -----------------
+ # Variation Support
+ #
+ # Use a json file to allow recursive-remapping of variations.
+ #
+ # file_a.blend
+ # file_a.json '{"variations": ["tree.blue.blend", ...]}'
+ # file_a.blend -> file_b.blend
+ # file_b.blend --> tree.blend
+ #
+ # the variation of `file_a.blend` causes `file_b.blend`
+ # to link in `tree.blue.blend`
+
+ if use_variations:
+ blendfile_levels = []
+ blendfile_levels_dict = []
+ blendfile_levels_dict_curr = {}
+
+ def blendfile_levels_rebuild():
+ # after changing blend file configurations,
+ # re-create current variation lookup table
+ blendfile_levels_dict_curr.clear()
+ for d in blendfile_levels_dict:
+ if d is not None:
+ blendfile_levels_dict_curr.update(d)
+
+ # use variations!
+ def blendfile_level_cb_enter(filepath):
+ import json
+
+ filepath_json = os.path.splitext(filepath)[0] + b".json"
+ if os.path.exists(filepath_json):
+ with open(filepath_json, encoding='utf-8') as f_handle:
+ variations = [f.encode("utf-8") for f in json.load(f_handle).get("variations")]
+ # convert to absolute paths
+ basepath = os.path.dirname(filepath)
+ variations = {
+ # Reverse lookup, from non-variation to variation we specify in this file.
+ # {"/abs/path/foo.png": "/abs/path/foo.variation.png", ...}
+ # .. where the input _is_ the variation,
+ # we just make it absolute and use the non-variation as
+ # the key to the variation value.
+ b".".join(f.rsplit(b".", 2)[0::2]): f for f_ in variations
+ for f in (os.path.normpath(os.path.join(basepath, f_)),)
+ }
+ else:
+ variations = None
+
+ blendfile_levels.append(filepath)
+ blendfile_levels_dict.append(variations)
+
+ if variations:
+ blendfile_levels_rebuild()
+
+ def blendfile_level_cb_exit(filepath):
+ blendfile_levels.pop()
+ blendfile_levels_dict.pop()
+
+ if blendfile_levels_dict_curr:
+ blendfile_levels_rebuild()
+ else:
+ blendfile_level_cb_enter = blendfile_level_cb_exit = None
+ blendfile_levels_dict_curr = None
+
+ lib_visit = {}
+ fp_blend_basename_last = b''
+
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_src,
+ readonly=readonly,
+ temp_remap_cb=temp_remap_cb,
+ recursive=True,
+ recursive_all=all_deps,
+ lib_visit=lib_visit,
+ blendfile_level_cb=(
+ blendfile_level_cb_enter,
+ blendfile_level_cb_exit,
+ )
+ ):
+
+ # we could pass this in!
+ fp_blend = os.path.join(fp.basedir, fp_blend_basename)
+
+ if fp_blend_basename_last != fp_blend_basename:
+ yield report(" %s: %s\n" % (colorize("blend", color='blue'), fp_blend))
+ fp_blend_basename_last = fp_blend_basename
+
+ if binary_edits is not None:
+ # TODO, temp_remap_cb makes paths, this isn't ideal,
+ # in this case we only want to remap!
+ if mode == 'NONE':
+ tmp = temp_remap_cb(fp_blend, base_dir_src)
+ tmp = os.path.relpath(tmp, base_dir_src)
+ else:
+ tmp = temp_remap_cb(fp_blend, base_dir_src)
+ tmp = os.path.relpath(tmp[:-len(TEMP_SUFFIX)], base_dir_dst_temp)
+ binary_edits_curr = binary_edits.setdefault(tmp, [])
+ del tmp
+
+ # assume the path might be relative
+ path_src_orig = fp.filepath
+ path_rel = blendfile_path_walker.utils.compatpath(path_src_orig)
+ path_src = blendfile_path_walker.utils.abspath(path_rel, fp.basedir)
+ path_src = os.path.normpath(path_src)
+
+ if filename_filter and not filename_filter(path_src):
+ yield report(" %s: %r\n" % (colorize("exclude", color='yellow'), path_src))
+ continue
+
+ # apply variation (if available)
+ if use_variations:
+ if blendfile_levels_dict_curr:
+ path_src_variation = blendfile_levels_dict_curr.get(path_src)
+ if path_src_variation is not None:
+ path_src = path_src_variation
+ path_rel = os.path.join(os.path.dirname(path_rel), os.path.basename(path_src))
+ del path_src_variation
+
+ # destination path realtive to the root
+ # assert(b'..' not in path_src)
+ assert(b'..' not in base_dir_src)
+
+ # first remap this blend file to the location it will end up (so we can get images relative to _that_)
+ # TODO(cam) cache the results
+ fp_basedir_conv = _relpath_remap(fp_blend, base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
+ fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
+
+ # then get the file relative to the new location
+ path_dst, path_dst_final = _relpath_remap(path_src, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)
+
+ path_dst = os.path.join(base_dir_dst, path_dst)
+
+ path_dst_final = b'//' + path_dst_final
+
+ # Assign direct or add to edit-list (to apply later)
+ if not readonly:
+ fp.filepath = path_dst_final
+ if binary_edits is not None:
+ fp.filepath_assign_edits(path_dst_final, binary_edits_curr)
+
+ # add to copy-list
+ # never copy libs (handled separately)
+ if not isinstance(fp, blendfile_path_walker.FPElem_block_path) or fp.userdata[0].code != b'LI':
+ path_copy_files.add((path_src, path_dst))
+
+ for file_list in (
+ blendfile_path_walker.utils.find_sequence_paths(path_src) if fp.is_sequence else (),
+ fp.files_siblings(),
+ ):
+
+ _src_dir = os.path.dirname(path_src)
+ _dst_dir = os.path.dirname(path_dst)
+ path_copy_files.update(
+ {(os.path.join(_src_dir, f), os.path.join(_dst_dir, f))
+ for f in file_list
+ })
+ del _src_dir, _dst_dir
+
+ if deps_remap is not None:
+ # this needs to become JSON later... ugh, need to use strings
+ deps_remap.setdefault(
+ fp_blend_basename.decode('utf-8'),
+ {})[path_dst_final.decode('utf-8')] = path_src_orig.decode('utf-8')
+
+ del lib_visit, fp_blend_basename_last
+
+ if TIMEIT:
+ print(" Time: %.4f\n" % (time.time() - t))
+
+ yield report(("%s: %d files\n") %
+ (colorize("\narchiving", color='bright_green'), len(path_copy_files) + 1))
+
+ # handle deps_remap and file renaming
+ if deps_remap is not None:
+ blendfile_src_basename = os.path.basename(blendfile_src).decode('utf-8')
+ blendfile_dst_basename = os.path.basename(blendfile_dst).decode('utf-8')
+
+ if blendfile_src_basename != blendfile_dst_basename:
+ if mode == 'FILE':
+ deps_remap[blendfile_dst_basename] = deps_remap[blendfile_src_basename]
+ del deps_remap[blendfile_src_basename]
+ del blendfile_src_basename, blendfile_dst_basename
+
+ # store path mapping {dst: src}
+ if paths_remap is not None:
+
+ if paths_remap_relbase is not None:
+ def relbase(fn):
+ return os.path.relpath(fn, paths_remap_relbase)
+ else:
+ def relbase(fn):
+ return fn
+
+ for src, dst in path_copy_files:
+ # TODO. relative to project-basepath
+ paths_remap[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = relbase(src).decode('utf-8')
+ # main file XXX, should have better way!
+ paths_remap[os.path.basename(blendfile_src).decode('utf-8')] = relbase(blendfile_src).decode('utf-8')
+
+ # blend libs
+ for dst in path_temp_files:
+ src = path_temp_files_orig[dst]
+ k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
+ paths_remap[k] = relbase(src).decode('utf-8')
+ del k
+
+ del relbase
+
+ if paths_uuid is not None:
+ from utils.system import uuid_from_file
+
+ for src, dst in path_copy_files:
+ # reports are handled again, later on.
+ if os.path.exists(src):
+ paths_uuid[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = uuid_from_file(src)
+ # XXX, better way to store temp target
+ blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
+ paths_uuid[os.path.basename(blendfile_src).decode('utf-8')] = uuid_from_file(blendfile_dst_tmp)
+
+ # blend libs
+ for dst in path_temp_files:
+ k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
+ if k not in paths_uuid:
+ if mode == 'NONE':
+ dst = path_temp_files_orig[dst]
+ paths_uuid[k] = uuid_from_file(dst)
+ del k
+
+ del blendfile_dst_tmp
+ del uuid_from_file
+
+ # --------------------
+ # Handle File Copy/Zip
+
+ if mode == 'FILE':
+ import shutil
+ blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
+
+ shutil.move(blendfile_dst_tmp, blendfile_dst)
+ path_temp_files.remove(blendfile_dst_tmp)
+
+ # strip TEMP_SUFFIX
+ for fn in path_temp_files:
+ shutil.move(fn, fn[:-len(TEMP_SUFFIX)])
+
+ for src, dst in path_copy_files:
+ assert(b'.blend' not in dst)
+
+ # in rare cases a filepath could point to a directory
+ if (not os.path.exists(src)) or os.path.isdir(src):
+ yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
+ else:
+ yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), src, dst))
+ shutil.copy(src, dst)
+
+ yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
+
+ elif mode == 'ZIP':
+ import shutil
+ import zipfile
+
+ # not awesome!
+ import zlib
+ assert(compress_level in range(-1, 10))
+ _compress_level_orig = zlib.Z_DEFAULT_COMPRESSION
+ zlib.Z_DEFAULT_COMPRESSION = compress_level
+ _compress_mode = zipfile.ZIP_STORED if (compress_level == 0) else zipfile.ZIP_DEFLATED
+ if _compress_mode == zipfile.ZIP_STORED:
+ def is_compressed_filetype(fn):
+ return False
+ else:
+ from utils.system import is_compressed_filetype
+
+ with zipfile.ZipFile(blendfile_dst.decode('utf-8'), 'w', _compress_mode) as zip_handle:
+ for fn in path_temp_files:
+ yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), fn))
+ zip_handle.write(
+ fn.decode('utf-8'),
+ arcname=os.path.relpath(fn[:-1], base_dir_dst_temp).decode('utf-8'),
+ )
+ os.remove(fn)
+
+ shutil.rmtree(base_dir_dst_temp)
+
+ for src, dst in path_copy_files:
+ assert(not dst.endswith(b'.blend'))
+
+ # in rare cases a filepath could point to a directory
+ if (not os.path.exists(src)) or os.path.isdir(src):
+ yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
+ else:
+ yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), src))
+ zip_handle.write(
+ src.decode('utf-8'),
+ arcname=os.path.relpath(dst, base_dir_dst).decode('utf-8'),
+ compress_type=zipfile.ZIP_STORED if is_compressed_filetype(dst) else _compress_mode,
+ )
+
+ zlib.Z_DEFAULT_COMPRESSION = _compress_level_orig
+ del _compress_level_orig, _compress_mode
+
+ yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
+ elif mode == 'NONE':
+ pass
+ else:
+ raise Exception("%s not a known mode" % mode)
+
+
+def create_argparse():
+ import os
+ import argparse
+
+ usage_text = (
+ "Run this script to extract blend-files(s) to a destination path: " +
+ os.path.basename(__file__) +
+ " --input=FILE --output=FILE [options]")
+
+ parser = argparse.ArgumentParser(description=usage_text)
+
+ # for main_render() only, but validate args.
+ parser.add_argument(
+ "-i", "--input", dest="path_src", metavar='FILE', required=True,
+ help="Input blend file",
+ )
+ parser.add_argument(
+ "-o", "--output", dest="path_dst", metavar='DIR', required=True,
+ help="Output file",
+ )
+ parser.add_argument(
+ "-m", "--mode", dest="mode", metavar='MODE', required=False,
+ choices=('FILE', 'ZIP'), default='ZIP',
+ help="Type of archive to write into",
+ )
+ parser.add_argument(
+ "-q", "--quiet", dest="use_quiet", action='store_true', required=False,
+ help="Suppress status output",
+ )
+ parser.add_argument(
+ "-t", "--temp", dest="temp_path", metavar='DIR', required=False,
+ help="Override the default temp directory",
+ )
+
+ return parser
+
+
+def main():
+ import sys
+
+ parser = create_argparse()
+ args = parser.parse_args(sys.argv[1:])
+
+ if args.use_quiet:
+ def report(msg):
+ pass
+ else:
+ def report(msg):
+ sys.stdout.write(msg)
+ sys.stdout.flush()
+
+ for msg in pack(
+ args.path_src.encode('utf-8'),
+ args.path_dst.encode('utf-8'),
+ mode=args.mode,
+ base_dir_dst_temp=(
+ args.temp_path.encode('utf-8')
+ if args.temp_path else None),
+ ):
+ report(msg)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/io_blend_utils/utils/system.py b/io_blend_utils/utils/system.py
new file mode 100644
index 00000000..970a6464
--- /dev/null
+++ b/io_blend_utils/utils/system.py
@@ -0,0 +1,105 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+def colorize_dummy(msg, color=None):
+ return msg
+
+_USE_COLOR = True
+if _USE_COLOR:
+ color_codes = {
+ 'black': '\033[0;30m',
+ 'bright_gray': '\033[0;37m',
+ 'blue': '\033[0;34m',
+ 'white': '\033[1;37m',
+ 'green': '\033[0;32m',
+ 'bright_blue': '\033[1;34m',
+ 'cyan': '\033[0;36m',
+ 'bright_green': '\033[1;32m',
+ 'red': '\033[0;31m',
+ 'bright_cyan': '\033[1;36m',
+ 'purple': '\033[0;35m',
+ 'bright_red': '\033[1;31m',
+ 'yellow': '\033[0;33m',
+ 'bright_purple':'\033[1;35m',
+ 'dark_gray': '\033[1;30m',
+ 'bright_yellow':'\033[1;33m',
+ 'normal': '\033[0m',
+ }
+
+ def colorize(msg, color=None):
+ return (color_codes[color] + msg + color_codes['normal'])
+else:
+ colorize = colorize_dummy
+
+
+def uuid_from_file(fn, block_size=1 << 20):
+ """
+ Returns an arbitrary sized unique ASCII string based on the file contents.
+ (exact hashing method may change).
+ """
+ with open(fn, 'rb') as f:
+ # first get the size
+ import os
+ f.seek(0, os.SEEK_END)
+ size = f.tell()
+ f.seek(0, os.SEEK_SET)
+ del os
+ # done!
+
+ import hashlib
+ sha1 = hashlib.new('sha512')
+ while True:
+ data = f.read(block_size)
+ if not data:
+ break
+ sha1.update(data)
+ # skip the '0x'
+ return hex(size)[2:] + sha1.hexdigest()
+
+
+def is_compressed_filetype(filepath):
+ """
+ Use to check if we should compress files in a zip.
+ """
+ # for now, only include files which Blender is likely to reference
+ import os
+ assert(isinstance(filepath, bytes))
+ return os.path.splitext(filepath)[1].lower() in {
+ # images
+ b'.exr',
+ b'.jpg', b'.jpeg',
+ b'.png',
+
+ # audio
+ b'.aif', b'.aiff',
+ b'.mp3',
+ b'.ogg', b'.ogv',
+ b'.wav',
+
+ # video
+ b'.avi',
+ b'.mkv',
+ b'.mov',
+ b'.mpg', b'.mpeg',
+
+ # archives
+ # '.bz2', '.tbz',
+ # '.gz', '.tgz',
+ # '.zip',
+ }
+