Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSybren A. Stüvel <sybren@stuvel.eu>2017-06-10 11:42:03 +0300
committerSybren A. Stüvel <sybren@stuvel.eu>2017-06-10 11:42:03 +0300
commit6e38b99641004ed1ef1f87c937750e8774a37c50 (patch)
tree71c7ef2dfd68d3939d91e30ea55d257f1632cd96
parent940bd28434a27f00eaeac9ab9503cbf804e1087f (diff)
Unpacked BAM wheel into a directory.
This avoids having binary data in the addons repository. I've also added a script that automates this task. It: - wipes any pre-existing unpacked BAM wheel, - unpacks the new BAM wheel, - copies some files from the extracted wheel into the addon directly, - updates the version number of the add-on so that it matches the version of BAM, and - updates the path where the add-on searches for the unpacked wheel.
-rw-r--r--io_blend_utils/README.md17
-rw-r--r--io_blend_utils/__init__.py8
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whlbin49953 -> 0 bytes
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__init__.py8
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__main__.py8
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/__init__.py0
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile.py956
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_copy.py114
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack.py673
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack_restore.py143
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_remap.py280
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_walker.py953
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/cli.py2018
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/pack.py10
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/__init__.py0
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/system.py143
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/DESCRIPTION.rst3
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/METADATA24
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/RECORD20
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/WHEEL5
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/entry_points.txt3
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/metadata.json1
-rw-r--r--io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/top_level.txt1
-rwxr-xr-xio_blend_utils/install_whl.py129
24 files changed, 5507 insertions, 10 deletions
diff --git a/io_blend_utils/README.md b/io_blend_utils/README.md
index 436ec684..e3f3b0a0 100644
--- a/io_blend_utils/README.md
+++ b/io_blend_utils/README.md
@@ -10,17 +10,23 @@ Bundling BAM with Blender
-------------------------
Blender is bundled with a version of [BAM](https://pypi.python.org/pypi/blender-bam/).
-To update this version, first build a new `wheel <http://pythonwheels.com/>`_ file in
+To update this version, first build a new [wheel](http://pythonwheels.com/) file in
BAM itself:
python3 setup.py bdist_wheel
-Then copy this wheel to Blender:
+Since we do not want to have binaries in the addons repository, unpack this wheel to Blender
+by running:
- cp dist/blender_bam-xxx.whl /path/to/blender/release/scripts/addons/io_blend_utils/
+ python3 install_whl.py /path/to/blender-asset-manager/dist/blender_bam-xxx.whl
-Remove old wheels that are still in `/path/to/blender/release/scripts/addons/io_blend_utils/`
-before committing.
+This script also updates `__init__.py` to update the version number and path of the extracted
+wheel, and removes any pre-existing older versions of the BAM wheels.
+
+The version number and `.whl` extension are maintained in the directory name on purpose.
+This way it is clear that it is not a directory to import directly into Blender itself.
+Furthermore, I (Sybren) hope that it helps to get changes made in the addons repository
+back into the BAM repository.
Running bam-pack from the wheel
@@ -29,4 +35,3 @@ Running bam-pack from the wheel
This is the way that Blender runs bam-pack:
PYTHONPATH=./path/to/blender_bam-xxx.whl python3 -m bam.pack
-
diff --git a/io_blend_utils/__init__.py b/io_blend_utils/__init__.py
index a00366ac..03cae55c 100644
--- a/io_blend_utils/__init__.py
+++ b/io_blend_utils/__init__.py
@@ -29,7 +29,7 @@ bl_info = {
"category": "Import-Export",
}
-BAM_WHEEL_FILE = 'blender_bam-1.1.7-py3-none-any.whl'
+BAM_WHEEL_PATH = 'blender_bam-1.1.7-py3-none-any.whl'
import logging
@@ -117,11 +117,11 @@ def pythonpath() -> str:
log = logging.getLogger('%s.pythonpath' % __name__)
# Find the wheel to run.
- wheelpath = pathlib.Path(__file__).with_name(BAM_WHEEL_FILE)
+ wheelpath = pathlib.Path(__file__).with_name(BAM_WHEEL_PATH)
if not wheelpath.exists():
- raise EnvironmentError('Wheel file %s does not exist!' % wheelpath)
+ raise EnvironmentError('Wheel %s does not exist!' % wheelpath)
- log.info('Using wheel file %s to run BAM-Pack', wheelpath)
+ log.info('Using wheel %s to run BAM-Pack', wheelpath)
# Update the PYTHONPATH to include that wheel.
existing_pypath = os.environ.get('PYTHONPATH', '')
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl
deleted file mode 100644
index 72fad694..00000000
--- a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl
+++ /dev/null
Binary files differ
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__init__.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__init__.py
new file mode 100644
index 00000000..cd0a5014
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+
+__version__ = '1.1.7'
+
+if __name__ == '__main__':
+ from .cli import main
+
+ main()
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__main__.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__main__.py
new file mode 100644
index 00000000..c5f166b0
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/__main__.py
@@ -0,0 +1,8 @@
+"""Main module for running python -m bam.
+
+Doesn't do much, except for printing general usage information.
+"""
+
+print("The 'bam' module cannot be run directly. The following subcommand is available:")
+print()
+print("python -m bam.pack")
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/__init__.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/__init__.py
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile.py
new file mode 100644
index 00000000..e471beae
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile.py
@@ -0,0 +1,956 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+#
+# (c) 2009, At Mind B.V. - Jeroen Bakker
+# (c) 2014, Blender Foundation - Campbell Barton
+
+import gzip
+import logging
+import os
+import struct
+import tempfile
+
+log = logging.getLogger("blendfile")
+
+FILE_BUFFER_SIZE = 1024 * 1024
+
+
+class BlendFileError(Exception):
+ """Raised when there was an error reading/parsing a blend file."""
+
+
+# -----------------------------------------------------------------------------
+# module global routines
+#
+# read routines
+# open a filename
+# determine if the file is compressed
+# and returns a handle
+def open_blend(filename, access="rb"):
+ """Opens a blend file for reading or writing pending on the access
+ supports 2 kind of blend files. Uncompressed and compressed.
+ Known issue: does not support packaged blend files
+ """
+ handle = open(filename, access)
+ magic_test = b"BLENDER"
+ magic = handle.read(len(magic_test))
+ if magic == magic_test:
+ log.debug("normal blendfile detected")
+ handle.seek(0, os.SEEK_SET)
+ bfile = BlendFile(handle)
+ bfile.is_compressed = False
+ bfile.filepath_orig = filename
+ return bfile
+ elif magic[:2] == b'\x1f\x8b':
+ log.debug("gzip blendfile detected")
+ handle.close()
+ log.debug("decompressing started")
+ fs = gzip.open(filename, "rb")
+ data = fs.read(FILE_BUFFER_SIZE)
+ magic = data[:len(magic_test)]
+ if magic == magic_test:
+ handle = tempfile.TemporaryFile()
+ while data:
+ handle.write(data)
+ data = fs.read(FILE_BUFFER_SIZE)
+ log.debug("decompressing finished")
+ fs.close()
+ log.debug("resetting decompressed file")
+ handle.seek(os.SEEK_SET, 0)
+ bfile = BlendFile(handle)
+ bfile.is_compressed = True
+ bfile.filepath_orig = filename
+ return bfile
+ else:
+ raise BlendFileError("filetype inside gzip not a blend")
+ else:
+ raise BlendFileError("filetype not a blend or a gzip blend")
+
+
+def pad_up_4(offset):
+ return (offset + 3) & ~3
+
+
+# -----------------------------------------------------------------------------
+# module classes
+
+
+class BlendFile:
+ """
+ Blend file.
+ """
+ __slots__ = (
+ # file (result of open())
+ "handle",
+ # str (original name of the file path)
+ "filepath_orig",
+ # BlendFileHeader
+ "header",
+ # struct.Struct
+ "block_header_struct",
+ # BlendFileBlock
+ "blocks",
+ # [DNAStruct, ...]
+ "structs",
+ # dict {b'StructName': sdna_index}
+ # (where the index is an index into 'structs')
+ "sdna_index_from_id",
+ # dict {addr_old: block}
+ "block_from_offset",
+ # int
+ "code_index",
+ # bool (did we make a change)
+ "is_modified",
+ # bool (is file gzipped)
+ "is_compressed",
+ )
+
+ def __init__(self, handle):
+ log.debug("initializing reading blend-file")
+ self.handle = handle
+ self.header = BlendFileHeader(handle)
+ self.block_header_struct = self.header.create_block_header_struct()
+ self.blocks = []
+ self.code_index = {}
+ self.structs = []
+ self.sdna_index_from_id = {}
+
+ block = BlendFileBlock(handle, self)
+ while block.code != b'ENDB':
+ if block.code == b'DNA1':
+ (self.structs,
+ self.sdna_index_from_id,
+ ) = BlendFile.decode_structs(self.header, block, handle)
+ else:
+ handle.seek(block.size, os.SEEK_CUR)
+
+ self.blocks.append(block)
+ self.code_index.setdefault(block.code, []).append(block)
+
+ block = BlendFileBlock(handle, self)
+ self.is_modified = False
+ self.blocks.append(block)
+
+ if not self.structs:
+ raise BlendFileError("No DNA1 block in file, this is not a valid .blend file!")
+
+ # cache (could lazy init, incase we never use?)
+ self.block_from_offset = {block.addr_old: block for block in self.blocks if block.code != b'ENDB'}
+
+ def __repr__(self):
+ return '<%s %r>' % (self.__class__.__qualname__, self.handle)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def find_blocks_from_code(self, code):
+ assert(type(code) == bytes)
+ if code not in self.code_index:
+ return []
+ return self.code_index[code]
+
+ def find_block_from_offset(self, offset):
+ # same as looking looping over all blocks,
+ # then checking ``block.addr_old == offset``
+ assert(type(offset) is int)
+ return self.block_from_offset.get(offset)
+
+ def close(self):
+ """
+ Close the blend file
+ writes the blend file to disk if changes has happened
+ """
+ handle = self.handle
+
+ if self.is_modified:
+ if self.is_compressed:
+ log.debug("close compressed blend file")
+ handle.seek(os.SEEK_SET, 0)
+ log.debug("compressing started")
+ fs = gzip.open(self.filepath_orig, "wb")
+ data = handle.read(FILE_BUFFER_SIZE)
+ while data:
+ fs.write(data)
+ data = handle.read(FILE_BUFFER_SIZE)
+ fs.close()
+ log.debug("compressing finished")
+
+ handle.close()
+
+ def ensure_subtype_smaller(self, sdna_index_curr, sdna_index_next):
+ # never refine to a smaller type
+ if (self.structs[sdna_index_curr].size >
+ self.structs[sdna_index_next].size):
+
+ raise RuntimeError("cant refine to smaller type (%s -> %s)" %
+ (self.structs[sdna_index_curr].dna_type_id.decode('ascii'),
+ self.structs[sdna_index_next].dna_type_id.decode('ascii')))
+
+ @staticmethod
+ def decode_structs(header, block, handle):
+ """
+ DNACatalog is a catalog of all information in the DNA1 file-block
+ """
+ log.debug("building DNA catalog")
+ shortstruct = DNA_IO.USHORT[header.endian_index]
+ shortstruct2 = struct.Struct(header.endian_str + b'HH')
+ intstruct = DNA_IO.UINT[header.endian_index]
+
+ data = handle.read(block.size)
+ types = []
+ names = []
+
+ structs = []
+ sdna_index_from_id = {}
+
+ offset = 8
+ names_len = intstruct.unpack_from(data, offset)[0]
+ offset += 4
+
+ log.debug("building #%d names" % names_len)
+ for i in range(names_len):
+ tName = DNA_IO.read_data0_offset(data, offset)
+ offset = offset + len(tName) + 1
+ names.append(DNAName(tName))
+ del names_len
+
+ offset = pad_up_4(offset)
+ offset += 4
+ types_len = intstruct.unpack_from(data, offset)[0]
+ offset += 4
+ log.debug("building #%d types" % types_len)
+ for i in range(types_len):
+ dna_type_id = DNA_IO.read_data0_offset(data, offset)
+ # None will be replaced by the DNAStruct, below
+ types.append(DNAStruct(dna_type_id))
+ offset += len(dna_type_id) + 1
+
+ offset = pad_up_4(offset)
+ offset += 4
+ log.debug("building #%d type-lengths" % types_len)
+ for i in range(types_len):
+ tLen = shortstruct.unpack_from(data, offset)[0]
+ offset = offset + 2
+ types[i].size = tLen
+ del types_len
+
+ offset = pad_up_4(offset)
+ offset += 4
+
+ structs_len = intstruct.unpack_from(data, offset)[0]
+ offset += 4
+ log.debug("building #%d structures" % structs_len)
+ for sdna_index in range(structs_len):
+ d = shortstruct2.unpack_from(data, offset)
+ struct_type_index = d[0]
+ offset += 4
+ dna_struct = types[struct_type_index]
+ sdna_index_from_id[dna_struct.dna_type_id] = sdna_index
+ structs.append(dna_struct)
+
+ fields_len = d[1]
+ dna_offset = 0
+
+ for field_index in range(fields_len):
+ d2 = shortstruct2.unpack_from(data, offset)
+ field_type_index = d2[0]
+ field_name_index = d2[1]
+ offset += 4
+ dna_type = types[field_type_index]
+ dna_name = names[field_name_index]
+ if dna_name.is_pointer or dna_name.is_method_pointer:
+ dna_size = header.pointer_size * dna_name.array_size
+ else:
+ dna_size = dna_type.size * dna_name.array_size
+
+ field = DNAField(dna_type, dna_name, dna_size, dna_offset)
+ dna_struct.fields.append(field)
+ dna_struct.field_from_name[dna_name.name_only] = field
+ dna_offset += dna_size
+
+ return structs, sdna_index_from_id
+
+
+class BlendFileBlock:
+ """
+ Instance of a struct.
+ """
+ __slots__ = (
+ # BlendFile
+ "file",
+ "code",
+ "size",
+ "addr_old",
+ "sdna_index",
+ "count",
+ "file_offset",
+ "user_data",
+ )
+
+ def __str__(self):
+ return ("<%s.%s (%s), size=%d at %s>" %
+ # fields=[%s]
+ (self.__class__.__name__,
+ self.dna_type_name,
+ self.code.decode(),
+ self.size,
+ # b", ".join(f.dna_name.name_only for f in self.dna_type.fields).decode('ascii'),
+ hex(self.addr_old),
+ ))
+
+ def __init__(self, handle, bfile):
+ OLDBLOCK = struct.Struct(b'4sI')
+
+ self.file = bfile
+ self.user_data = None
+
+ data = handle.read(bfile.block_header_struct.size)
+
+ if len(data) != bfile.block_header_struct.size:
+ print("WARNING! Blend file seems to be badly truncated!")
+ self.code = b'ENDB'
+ self.size = 0
+ self.addr_old = 0
+ self.sdna_index = 0
+ self.count = 0
+ self.file_offset = 0
+ return
+ # header size can be 8, 20, or 24 bytes long
+ # 8: old blend files ENDB block (exception)
+ # 20: normal headers 32 bit platform
+ # 24: normal headers 64 bit platform
+ if len(data) > 15:
+ blockheader = bfile.block_header_struct.unpack(data)
+ self.code = blockheader[0].partition(b'\0')[0]
+ if self.code != b'ENDB':
+ self.size = blockheader[1]
+ self.addr_old = blockheader[2]
+ self.sdna_index = blockheader[3]
+ self.count = blockheader[4]
+ self.file_offset = handle.tell()
+ else:
+ self.size = 0
+ self.addr_old = 0
+ self.sdna_index = 0
+ self.count = 0
+ self.file_offset = 0
+ else:
+ blockheader = OLDBLOCK.unpack(data)
+ self.code = blockheader[0].partition(b'\0')[0]
+ self.code = DNA_IO.read_data0(blockheader[0])
+ self.size = 0
+ self.addr_old = 0
+ self.sdna_index = 0
+ self.count = 0
+ self.file_offset = 0
+
+ @property
+ def dna_type(self):
+ return self.file.structs[self.sdna_index]
+
+ @property
+ def dna_type_name(self):
+ return self.dna_type.dna_type_id.decode('ascii')
+
+ def refine_type_from_index(self, sdna_index_next):
+ assert(type(sdna_index_next) is int)
+ sdna_index_curr = self.sdna_index
+ self.file.ensure_subtype_smaller(sdna_index_curr, sdna_index_next)
+ self.sdna_index = sdna_index_next
+
+ def refine_type(self, dna_type_id):
+ assert(type(dna_type_id) is bytes)
+ self.refine_type_from_index(self.file.sdna_index_from_id[dna_type_id])
+
+ def get_file_offset(self, path,
+ default=...,
+ sdna_index_refine=None,
+ base_index=0,
+ ):
+ """
+ Return (offset, length)
+ """
+ assert(type(path) is bytes)
+
+ ofs = self.file_offset
+ if base_index != 0:
+ assert(base_index < self.count)
+ ofs += (self.size // self.count) * base_index
+ self.file.handle.seek(ofs, os.SEEK_SET)
+
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ else:
+ self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
+
+ dna_struct = self.file.structs[sdna_index_refine]
+ field = dna_struct.field_from_path(
+ self.file.header, self.file.handle, path)
+
+ return (self.file.handle.tell(), field.dna_name.array_size)
+
+ def get(self, path,
+ default=...,
+ sdna_index_refine=None,
+ use_nil=True, use_str=True,
+ base_index=0,
+ ):
+
+ ofs = self.file_offset
+ if base_index != 0:
+ assert(base_index < self.count)
+ ofs += (self.size // self.count) * base_index
+ self.file.handle.seek(ofs, os.SEEK_SET)
+
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ else:
+ self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
+
+ dna_struct = self.file.structs[sdna_index_refine]
+ return dna_struct.field_get(
+ self.file.header, self.file.handle, path,
+ default=default,
+ use_nil=use_nil, use_str=use_str,
+ )
+
+ def get_recursive_iter(self, path, path_root=b"",
+ default=...,
+ sdna_index_refine=None,
+ use_nil=True, use_str=True,
+ base_index=0,
+ ):
+ if path_root:
+ path_full = (
+ (path_root if type(path_root) is tuple else (path_root, )) +
+ (path if type(path) is tuple else (path, )))
+ else:
+ path_full = path
+
+ try:
+ yield (path_full, self.get(path_full, default, sdna_index_refine, use_nil, use_str, base_index))
+ except NotImplementedError as ex:
+ msg, dna_name, dna_type = ex.args
+ struct_index = self.file.sdna_index_from_id.get(dna_type.dna_type_id, None)
+ if struct_index is None:
+ yield (path_full, "<%s>" % dna_type.dna_type_id.decode('ascii'))
+ else:
+ struct = self.file.structs[struct_index]
+ for f in struct.fields:
+ yield from self.get_recursive_iter(
+ f.dna_name.name_only, path_full, default, None, use_nil, use_str, 0)
+
+ def items_recursive_iter(self):
+ for k in self.keys():
+ yield from self.get_recursive_iter(k, use_str=False)
+
+ def get_data_hash(self):
+ """
+ Generates a 'hash' that can be used instead of addr_old as block id, and that should be 'stable' across .blend
+ file load & save (i.e. it does not changes due to pointer addresses variations).
+ """
+ # TODO This implementation is most likely far from optimal... and CRC32 is not renown as the best hashing
+ # algo either. But for now does the job!
+ import zlib
+ def _is_pointer(self, k):
+ return self.file.structs[self.sdna_index].field_from_path(
+ self.file.header, self.file.handle, k).dna_name.is_pointer
+
+ hsh = 1
+ for k, v in self.items_recursive_iter():
+ if not _is_pointer(self, k):
+ hsh = zlib.adler32(str(v).encode(), hsh)
+ return hsh
+
+ def set(self, path, value,
+ sdna_index_refine=None,
+ ):
+
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ else:
+ self.file.ensure_subtype_smaller(self.sdna_index, sdna_index_refine)
+
+ dna_struct = self.file.structs[sdna_index_refine]
+ self.file.handle.seek(self.file_offset, os.SEEK_SET)
+ self.file.is_modified = True
+ return dna_struct.field_set(
+ self.file.header, self.file.handle, path, value)
+
+ # ---------------
+ # Utility get/set
+ #
+ # avoid inline pointer casting
+ def get_pointer(
+ self, path,
+ default=...,
+ sdna_index_refine=None,
+ base_index=0,
+ ):
+ if sdna_index_refine is None:
+ sdna_index_refine = self.sdna_index
+ result = self.get(path, default, sdna_index_refine=sdna_index_refine, base_index=base_index)
+
+ # default
+ if type(result) is not int:
+ return result
+
+ assert(self.file.structs[sdna_index_refine].field_from_path(
+ self.file.header, self.file.handle, path).dna_name.is_pointer)
+ if result != 0:
+ # possible (but unlikely)
+ # that this fails and returns None
+ # maybe we want to raise some exception in this case
+ return self.file.find_block_from_offset(result)
+ else:
+ return None
+
+ # ----------------------
+ # Python convenience API
+
+ # dict like access
+ def __getitem__(self, item):
+ return self.get(item, use_str=False)
+
+ def __setitem__(self, item, value):
+ self.set(item, value)
+
+ def keys(self):
+ return (f.dna_name.name_only for f in self.dna_type.fields)
+
+ def values(self):
+ for k in self.keys():
+ try:
+ yield self[k]
+ except NotImplementedError as ex:
+ msg, dna_name, dna_type = ex.args
+ yield "<%s>" % dna_type.dna_type_id.decode('ascii')
+
+ def items(self):
+ for k in self.keys():
+ try:
+ yield (k, self[k])
+ except NotImplementedError as ex:
+ msg, dna_name, dna_type = ex.args
+ yield (k, "<%s>" % dna_type.dna_type_id.decode('ascii'))
+
+
+# -----------------------------------------------------------------------------
+# Read Magic
+#
+# magic = str
+# pointer_size = int
+# is_little_endian = bool
+# version = int
+
+
+class BlendFileHeader:
+ """
+ BlendFileHeader allocates the first 12 bytes of a blend file
+ it contains information about the hardware architecture
+ """
+ __slots__ = (
+ # str
+ "magic",
+ # int 4/8
+ "pointer_size",
+ # bool
+ "is_little_endian",
+ # int
+ "version",
+ # str, used to pass to 'struct'
+ "endian_str",
+ # int, used to index common types
+ "endian_index",
+ )
+
+ def __init__(self, handle):
+ FILEHEADER = struct.Struct(b'7s1s1s3s')
+
+ log.debug("reading blend-file-header")
+ values = FILEHEADER.unpack(handle.read(FILEHEADER.size))
+ self.magic = values[0]
+ pointer_size_id = values[1]
+ if pointer_size_id == b'-':
+ self.pointer_size = 8
+ elif pointer_size_id == b'_':
+ self.pointer_size = 4
+ else:
+ assert(0)
+ endian_id = values[2]
+ if endian_id == b'v':
+ self.is_little_endian = True
+ self.endian_str = b'<'
+ self.endian_index = 0
+ elif endian_id == b'V':
+ self.is_little_endian = False
+ self.endian_index = 1
+ self.endian_str = b'>'
+ else:
+ assert(0)
+
+ version_id = values[3]
+ self.version = int(version_id)
+
+ def create_block_header_struct(self):
+ return struct.Struct(b''.join((
+ self.endian_str,
+ b'4sI',
+ b'I' if self.pointer_size == 4 else b'Q',
+ b'II',
+ )))
+
+
+class DNAName:
+ """
+ DNAName is a C-type name stored in the DNA
+ """
+ __slots__ = (
+ "name_full",
+ "name_only",
+ "is_pointer",
+ "is_method_pointer",
+ "array_size",
+ )
+
+ def __init__(self, name_full):
+ self.name_full = name_full
+ self.name_only = self.calc_name_only()
+ self.is_pointer = self.calc_is_pointer()
+ self.is_method_pointer = self.calc_is_method_pointer()
+ self.array_size = self.calc_array_size()
+
+ def __repr__(self):
+ return '%s(%r)' % (type(self).__qualname__, self.name_full)
+
+ def as_reference(self, parent):
+ if parent is None:
+ result = b''
+ else:
+ result = parent + b'.'
+
+ result = result + self.name_only
+ return result
+
+ def calc_name_only(self):
+ result = self.name_full.strip(b'*()')
+ index = result.find(b'[')
+ if index != -1:
+ result = result[:index]
+ return result
+
+ def calc_is_pointer(self):
+ return (b'*' in self.name_full)
+
+ def calc_is_method_pointer(self):
+ return (b'(*' in self.name_full)
+
+ def calc_array_size(self):
+ result = 1
+ temp = self.name_full
+ index = temp.find(b'[')
+
+ while index != -1:
+ index_2 = temp.find(b']')
+ result *= int(temp[index + 1:index_2])
+ temp = temp[index_2 + 1:]
+ index = temp.find(b'[')
+
+ return result
+
+
+class DNAField:
+ """
+ DNAField is a coupled DNAStruct and DNAName
+ and cache offset for reuse
+ """
+ __slots__ = (
+ # DNAName
+ "dna_name",
+ # tuple of 3 items
+ # [bytes (struct name), int (struct size), DNAStruct]
+ "dna_type",
+ # size on-disk
+ "dna_size",
+ # cached info (avoid looping over fields each time)
+ "dna_offset",
+ )
+
+ def __init__(self, dna_type, dna_name, dna_size, dna_offset):
+ self.dna_type = dna_type
+ self.dna_name = dna_name
+ self.dna_size = dna_size
+ self.dna_offset = dna_offset
+
+
+class DNAStruct:
+ """
+ DNAStruct is a C-type structure stored in the DNA
+ """
+ __slots__ = (
+ "dna_type_id",
+ "size",
+ "fields",
+ "field_from_name",
+ "user_data",
+ )
+
+ def __init__(self, dna_type_id):
+ self.dna_type_id = dna_type_id
+ self.fields = []
+ self.field_from_name = {}
+ self.user_data = None
+
+ def __repr__(self):
+ return '%s(%r)' % (type(self).__qualname__, self.dna_type_id)
+
+ def field_from_path(self, header, handle, path):
+ """
+ Support lookups as bytes or a tuple of bytes and optional index.
+
+ C style 'id.name' --> (b'id', b'name')
+ C style 'array[4]' --> ('array', 4)
+ """
+ if type(path) is tuple:
+ name = path[0]
+ if len(path) >= 2 and type(path[1]) is not bytes:
+ name_tail = path[2:]
+ index = path[1]
+ assert(type(index) is int)
+ else:
+ name_tail = path[1:]
+ index = 0
+ else:
+ name = path
+ name_tail = None
+ index = 0
+
+ assert(type(name) is bytes)
+
+ field = self.field_from_name.get(name)
+
+ if field is not None:
+ handle.seek(field.dna_offset, os.SEEK_CUR)
+ if index != 0:
+ if field.dna_name.is_pointer:
+ index_offset = header.pointer_size * index
+ else:
+ index_offset = field.dna_type.size * index
+ assert(index_offset < field.dna_size)
+ handle.seek(index_offset, os.SEEK_CUR)
+ if not name_tail: # None or ()
+ return field
+ else:
+ return field.dna_type.field_from_path(header, handle, name_tail)
+
+ def field_get(self, header, handle, path,
+ default=...,
+ use_nil=True, use_str=True,
+ ):
+ field = self.field_from_path(header, handle, path)
+ if field is None:
+ if default is not ...:
+ return default
+ else:
+ raise KeyError("%r not found in %r (%r)" %
+ (path, [f.dna_name.name_only for f in self.fields], self.dna_type_id))
+
+ dna_type = field.dna_type
+ dna_name = field.dna_name
+ dna_size = field.dna_size
+
+ if dna_name.is_pointer:
+ return DNA_IO.read_pointer(handle, header)
+ elif dna_type.dna_type_id == b'int':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_int(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_int(handle, header)
+ elif dna_type.dna_type_id == b'short':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_short(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_short(handle, header)
+ elif dna_type.dna_type_id == b'uint64_t':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_ulong(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_ulong(handle, header)
+ elif dna_type.dna_type_id == b'float':
+ if dna_name.array_size > 1:
+ return [DNA_IO.read_float(handle, header) for i in range(dna_name.array_size)]
+ return DNA_IO.read_float(handle, header)
+ elif dna_type.dna_type_id == b'char':
+ if dna_size == 1:
+ # Single char, assume it's bitflag or int value, and not a string/bytes data...
+ return DNA_IO.read_char(handle, header)
+ if use_str:
+ if use_nil:
+ return DNA_IO.read_string0(handle, dna_name.array_size)
+ else:
+ return DNA_IO.read_string(handle, dna_name.array_size)
+ else:
+ if use_nil:
+ return DNA_IO.read_bytes0(handle, dna_name.array_size)
+ else:
+ return DNA_IO.read_bytes(handle, dna_name.array_size)
+ else:
+ raise NotImplementedError("%r exists but isn't pointer, can't resolve field %r" %
+ (path, dna_name.name_only), dna_name, dna_type)
+
+ def field_set(self, header, handle, path, value):
+ assert(type(path) == bytes)
+
+ field = self.field_from_path(header, handle, path)
+ if field is None:
+ raise KeyError("%r not found in %r" %
+ (path, [f.dna_name.name_only for f in self.fields]))
+
+ dna_type = field.dna_type
+ dna_name = field.dna_name
+
+ if dna_type.dna_type_id == b'char':
+ if type(value) is str:
+ return DNA_IO.write_string(handle, value, dna_name.array_size)
+ else:
+ return DNA_IO.write_bytes(handle, value, dna_name.array_size)
+ else:
+ raise NotImplementedError("Setting %r is not yet supported for %r" %
+ (dna_type, dna_name), dna_name, dna_type)
+
+
+class DNA_IO:
+ """
+ Module like class, for read-write utility functions.
+
+ Only stores static methods & constants.
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def write_string(handle, astring, fieldlen):
+ assert(isinstance(astring, str))
+ if len(astring) >= fieldlen:
+ stringw = astring[0:fieldlen]
+ else:
+ stringw = astring + '\0'
+ handle.write(stringw.encode('utf-8'))
+
+ @staticmethod
+ def write_bytes(handle, astring, fieldlen):
+ assert(isinstance(astring, (bytes, bytearray)))
+ if len(astring) >= fieldlen:
+ stringw = astring[0:fieldlen]
+ else:
+ stringw = astring + b'\0'
+
+ handle.write(stringw)
+
+ @staticmethod
+ def read_bytes(handle, length):
+ data = handle.read(length)
+ return data
+
+ @staticmethod
+ def read_bytes0(handle, length):
+ data = handle.read(length)
+ return DNA_IO.read_data0(data)
+
+ @staticmethod
+ def read_string(handle, length):
+ return DNA_IO.read_bytes(handle, length).decode('utf-8')
+
+ @staticmethod
+ def read_string0(handle, length):
+ return DNA_IO.read_bytes0(handle, length).decode('utf-8')
+
+ @staticmethod
+ def read_data0_offset(data, offset):
+ add = data.find(b'\0', offset) - offset
+ return data[offset:offset + add]
+
+ @staticmethod
+ def read_data0(data):
+ add = data.find(b'\0')
+ return data[:add]
+
+ UCHAR = struct.Struct(b'<b'), struct.Struct(b'>b')
+
+ @staticmethod
+ def read_char(handle, fileheader):
+ st = DNA_IO.UCHAR[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ USHORT = struct.Struct(b'<H'), struct.Struct(b'>H')
+
+ @staticmethod
+ def read_ushort(handle, fileheader):
+ st = DNA_IO.USHORT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ SSHORT = struct.Struct(b'<h'), struct.Struct(b'>h')
+
+ @staticmethod
+ def read_short(handle, fileheader):
+ st = DNA_IO.SSHORT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ UINT = struct.Struct(b'<I'), struct.Struct(b'>I')
+
+ @staticmethod
+ def read_uint(handle, fileheader):
+ st = DNA_IO.UINT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ SINT = struct.Struct(b'<i'), struct.Struct(b'>i')
+
+ @staticmethod
+ def read_int(handle, fileheader):
+ st = DNA_IO.SINT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ FLOAT = struct.Struct(b'<f'), struct.Struct(b'>f')
+
+ @staticmethod
+ def read_float(handle, fileheader):
+ st = DNA_IO.FLOAT[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ ULONG = struct.Struct(b'<Q'), struct.Struct(b'>Q')
+
+ @staticmethod
+ def read_ulong(handle, fileheader):
+ st = DNA_IO.ULONG[fileheader.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+
+ @staticmethod
+ def read_pointer(handle, header):
+ """
+ reads an pointer from a file handle
+ the pointer size is given by the header (BlendFileHeader)
+ """
+ if header.pointer_size == 4:
+ st = DNA_IO.UINT[header.endian_index]
+ return st.unpack(handle.read(st.size))[0]
+ if header.pointer_size == 8:
+ st = DNA_IO.ULONG[header.endian_index]
+ return st.unpack(handle.read(st.size))[0]
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_copy.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_copy.py
new file mode 100644
index 00000000..595f2b0f
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_copy.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+"""
+A simply utility to copy blend files and their deps to a new location.
+
+Similar to packing, but don't attempt any path remapping.
+"""
+
+from bam.blend import blendfile_path_walker
+
+TIMEIT = False
+
+# ------------------
+# Ensure module path
+import os
+import sys
+path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "modules"))
+if path not in sys.path:
+ sys.path.append(path)
+del os, sys, path
+# --------
+
+
+def copy_paths(
+ paths,
+ output,
+ base,
+
+ # load every libs dep, not just used deps.
+ all_deps=False,
+ # yield reports
+ report=None,
+
+ # Filename filter, allow to exclude files from the pack,
+ # function takes a string returns True if the files should be included.
+ filename_filter=None,
+ ):
+
+ import os
+ import shutil
+
+ from bam.utils.system import colorize, is_subdir
+
+ path_copy_files = set(paths)
+
+ # Avoid walking over same libs many times
+ lib_visit = {}
+
+ yield report("Reading %d blend file(s)\n" % len(paths))
+ for blendfile_src in paths:
+ yield report(" %s: %r\n" % (colorize("blend", color='blue'), blendfile_src))
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_src,
+ readonly=True,
+ recursive=True,
+ recursive_all=all_deps,
+ lib_visit=lib_visit,
+ ):
+
+ f_abs = os.path.normpath(fp.filepath_absolute)
+ path_copy_files.add(f_abs)
+
+ # Source -> Dest Map
+ path_src_dst_map = {}
+
+ for path_src in sorted(path_copy_files):
+
+ if filename_filter and not filename_filter(path_src):
+ yield report(" %s: %r\n" % (colorize("exclude", color='yellow'), path_src))
+ continue
+
+ if not os.path.exists(path_src):
+ yield report(" %s: %r\n" % (colorize("missing path", color='red'), path_src))
+ continue
+
+ if not is_subdir(path_src, base):
+ yield report(" %s: %r\n" % (colorize("external path ignored", color='red'), path_src))
+ continue
+
+ path_rel = os.path.relpath(path_src, base)
+ path_dst = os.path.join(output, path_rel)
+
+ path_src_dst_map[path_src] = path_dst
+
+ # Create directories
+ path_dst_dir = {os.path.dirname(path_dst) for path_dst in path_src_dst_map.values()}
+ yield report("Creating %d directories in %r\n" % (len(path_dst_dir), output))
+ for path_dir in sorted(path_dst_dir):
+ os.makedirs(path_dir, exist_ok=True)
+ del path_dst_dir
+
+ # Copy files
+ yield report("Copying %d files to %r\n" % (len(path_src_dst_map), output))
+ for path_src, path_dst in sorted(path_src_dst_map.items()):
+ yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), path_src, path_dst))
+ shutil.copy(path_src, path_dst)
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack.py
new file mode 100644
index 00000000..39c97ea1
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack.py
@@ -0,0 +1,673 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+import os
+import sys
+import shutil
+from bam.blend import blendfile_path_walker
+
+TIMEIT = False
+
+# ------------------
+# Ensure module path
+path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "modules"))
+if path not in sys.path:
+ sys.path.append(path)
+del path
+# --------
+
+
+# ----------------------
+# debug low level output
+#
+# ... when internals _really_ fail & we want to know why
+def _dbg(text):
+ from bam.utils.system import colorize
+ if type(text) is bytes:
+ text = text.decode('utf-8')
+ sys.__stdout__.write(colorize(text, color='red') + "\n")
+ sys.__stdout__.flush()
+
+
+def _relpath_remap(
+ path_src,
+ base_dir_src,
+ fp_basedir,
+ blendfile_src_dir_fakeroot=None,
+ ):
+
+ if not os.path.isabs(path_src):
+ # Absolute win32 paths on a unix system
+ # cause bad issues!
+ if len(path_src) >= 2:
+ if path_src[0] != b'/'[0] and path_src[1] == b':'[0]:
+ pass
+ else:
+ raise Exception("Internal error 'path_src' -> %r must be absolute" % path_src)
+
+ path_src = os.path.normpath(path_src)
+ if os.name != "nt":
+ path_dst = os.path.relpath(path_src, base_dir_src)
+ else:
+ # exception for windows, we need to support mapping between drives
+ try:
+ path_dst = os.path.relpath(path_src, base_dir_src)
+ except ValueError:
+ # include the absolute path when the file is on a different drive.
+ path_dst = os.path.relpath(
+ os.path.join(base_dir_src, b'__' + path_src.replace(b':', b'\\')),
+ base_dir_src,
+ )
+
+ if blendfile_src_dir_fakeroot is None:
+ # /foo/../bar.png --> /foo/__/bar.png
+ path_dst = path_dst.replace(b'..', b'__')
+ path_dst = os.path.normpath(path_dst)
+ else:
+ if b'..' in path_dst:
+ # remap, relative to project root
+
+ # paths
+ path_dst = os.path.join(blendfile_src_dir_fakeroot, path_dst)
+ path_dst = os.path.normpath(path_dst)
+ # if there are paths outside the root still...
+ # This means they are outside the project directory, We dont support this,
+ # so name accordingly
+ if b'..' in path_dst:
+ # SHOULD NEVER HAPPEN
+ path_dst = path_dst.replace(b'..', b'__nonproject__')
+ path_dst = b'_' + path_dst
+
+ # _dbg(b"FINAL A: " + path_dst)
+ path_dst_final = os.path.join(os.path.relpath(base_dir_src, fp_basedir), path_dst)
+ path_dst_final = os.path.normpath(path_dst_final)
+ # _dbg(b"FINAL B: " + path_dst_final)
+
+ return path_dst, path_dst_final
+
+
+def pack(
+ # store the blendfile relative to this directory, can be:
+ # os.path.dirname(blendfile_src)
+ # but in some cases we wan't to use a path higher up.
+ # base_dir_src,
+ blendfile_src, blendfile_dst,
+
+ # the path to the top directory of the project's repository.
+ # the packed archive will reproduce the exact same hierarchy below that base path
+ # if set to None, it defaults to the given blendfile_src's directory.
+ # especially useful when used together with the warn_remap_externals option.
+ repository_base_path=None,
+
+ # type of archive to produce (either ZIP or plain usual directory).
+ mode='ZIP',
+
+ # optionally pass in the temp dir
+ base_dir_dst_temp=None,
+ paths_remap_relbase=None,
+ deps_remap=None, paths_remap=None, paths_uuid=None,
+ # load every libs dep, not just used deps.
+ all_deps=False,
+ compress_level=-1,
+ # yield reports
+ report=None,
+
+ # The project path, eg:
+ # /home/me/myproject/mysession/path/to/blend/file.blend
+ # the path would be: b'path/to/blend'
+ #
+ # This is needed so we can choose to store paths
+ # relative to project or relative to the current file.
+ #
+ # When None, map _all_ paths are relative to the current blend.
+ # converting: '../../bar' --> '__/__/bar'
+ # so all paths are nested and not moved outside the session path.
+ blendfile_src_dir_fakeroot=None,
+
+ # Read variations from json files.
+ use_variations=False,
+
+ # do _everything_ except to write the paths.
+ # useful if we want to calculate deps to remap but postpone applying them.
+ readonly=False,
+ # Warn when we found a dependency external to given repository_base_path.
+ warn_remap_externals=False,
+ # dict of binary_edits:
+ # {file: [(ofs, bytes), ...], ...}
+ # ... where the file is the relative 'packed' location.
+ binary_edits=None,
+
+ # Filename filter, allow to exclude files from the pack,
+ # function takes a string returns True if the files should be included.
+ filename_filter=None,
+ ):
+ """
+ :param deps_remap: Store path deps_remap info as follows.
+ {"file.blend": {"path_new": "path_old", ...}, ...}
+
+ :type deps_remap: dict or None
+ """
+
+ # Internal details:
+ # - we copy to a temp path before operating on the blend file
+ # so we can modify in-place.
+ # - temp files are only created once, (if we never touched them before),
+ # this way, for linked libraries - a single blend file may be used
+ # multiple times, each access will apply new edits on top of the old ones.
+ # - we track which libs we have touched (using 'lib_visit' arg),
+ # this means that the same libs wont be touched many times to modify the same data
+ # also prevents cyclic loops from crashing.
+
+ if sys.stdout.isatty():
+ from bam.utils.system import colorize
+ else:
+ from bam.utils.system import colorize_dummy as colorize
+
+ assert isinstance(blendfile_src, bytes)
+ assert isinstance(blendfile_dst, bytes)
+
+ # in case this is directly from the command line or user-input
+ blendfile_src = os.path.normpath(os.path.abspath(blendfile_src))
+ blendfile_dst = os.path.normpath(os.path.abspath(blendfile_dst))
+ assert blendfile_src != blendfile_dst
+
+ # first check args are OK
+ # fakeroot _cant_ start with a separator, since we prepend chars to it.
+ if blendfile_src_dir_fakeroot is not None:
+ assert isinstance(blendfile_src_dir_fakeroot, bytes)
+ assert not blendfile_src_dir_fakeroot.startswith(os.sep.encode('ascii'))
+
+ path_temp_files = set()
+ path_copy_files = set()
+
+ # path_temp_files --> original-location
+ path_temp_files_orig = {}
+
+ TEMP_SUFFIX = b'@'
+
+ if report is None:
+ def report(msg):
+ return msg
+
+ yield report("%s: %r...\n" % (colorize("\nscanning deps", color='bright_green'), blendfile_src))
+
+ if TIMEIT:
+ import time
+ t = time.time()
+
+ base_dir_src = os.path.dirname(blendfile_src) if repository_base_path is None \
+ else os.path.normpath(os.path.abspath(repository_base_path))
+ base_dir_dst = os.path.dirname(blendfile_dst)
+ # _dbg(blendfile_src)
+ # _dbg(blendfile_dst)
+ assert base_dir_src != base_dir_dst
+
+ if base_dir_dst_temp is None:
+ # Always try to pack using a unique folder name.
+ import uuid
+
+ suf = 'temp' if mode == 'ZIP' else 'pack'
+
+ while True:
+ unique = uuid.uuid4().hex
+ name = '__blendfile_%s_%s__' % (unique, suf)
+ base_dir_dst_temp = os.path.join(base_dir_dst, name.encode('ascii'))
+
+ if not os.path.exists(base_dir_dst_temp):
+ break
+
+ def temp_remap_cb(filepath, rootdir):
+ """
+ Create temp files in the destination path.
+ """
+ filepath = blendfile_path_walker.utils.compatpath(filepath)
+
+ if use_variations:
+ if blendfile_levels_dict_curr:
+ filepath = blendfile_levels_dict_curr.get(filepath, filepath)
+
+ # ...
+
+ # first remap this blend file to the location it will end up (so we can get images relative to _that_)
+ # TODO(cam) cache the results
+ fp_basedir_conv = _relpath_remap(os.path.join(rootdir, b'dummy'), base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
+ fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
+
+ # then get the file relative to the new location
+ filepath_tmp = _relpath_remap(filepath, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)[0]
+ filepath_tmp = os.path.normpath(os.path.join(base_dir_dst_temp, filepath_tmp)) + TEMP_SUFFIX
+
+ # only overwrite once (so we can write into a path already containing files)
+ if filepath_tmp not in path_temp_files:
+ if mode != 'NONE':
+ import shutil
+ os.makedirs(os.path.dirname(filepath_tmp), exist_ok=True)
+ shutil.copy(filepath, filepath_tmp)
+ path_temp_files.add(filepath_tmp)
+ path_temp_files_orig[filepath_tmp] = filepath
+ if mode != 'NONE':
+ return filepath_tmp
+ else:
+ return filepath
+
+ # -----------------
+ # Variation Support
+ #
+ # Use a json file to allow recursive-remapping of variations.
+ #
+ # file_a.blend
+ # file_a.json '{"variations": ["tree.blue.blend", ...]}'
+ # file_a.blend -> file_b.blend
+ # file_b.blend --> tree.blend
+ #
+ # the variation of `file_a.blend` causes `file_b.blend`
+ # to link in `tree.blue.blend`
+
+ if use_variations:
+ blendfile_levels = []
+ blendfile_levels_dict = []
+ blendfile_levels_dict_curr = {}
+
+ def blendfile_levels_rebuild():
+ # after changing blend file configurations,
+ # re-create current variation lookup table
+ blendfile_levels_dict_curr.clear()
+ for d in blendfile_levels_dict:
+ if d is not None:
+ blendfile_levels_dict_curr.update(d)
+
+ # use variations!
+ def blendfile_level_cb_enter(filepath):
+ import json
+
+ filepath_json = os.path.splitext(filepath)[0] + b".json"
+ if os.path.exists(filepath_json):
+ with open(filepath_json, encoding='utf-8') as f_handle:
+ variations = [f.encode("utf-8") for f in json.load(f_handle).get("variations")]
+ # convert to absolute paths
+ basepath = os.path.dirname(filepath)
+ variations = {
+ # Reverse lookup, from non-variation to variation we specify in this file.
+ # {"/abs/path/foo.png": "/abs/path/foo.variation.png", ...}
+ # .. where the input _is_ the variation,
+ # we just make it absolute and use the non-variation as
+ # the key to the variation value.
+ b".".join(f.rsplit(b".", 2)[0::2]): f for f_ in variations
+ for f in (os.path.normpath(os.path.join(basepath, f_)),)
+ }
+ else:
+ variations = None
+
+ blendfile_levels.append(filepath)
+ blendfile_levels_dict.append(variations)
+
+ if variations:
+ blendfile_levels_rebuild()
+
+ def blendfile_level_cb_exit(filepath):
+ blendfile_levels.pop()
+ blendfile_levels_dict.pop()
+
+ if blendfile_levels_dict_curr:
+ blendfile_levels_rebuild()
+ else:
+ blendfile_level_cb_enter = blendfile_level_cb_exit = None
+ blendfile_levels_dict_curr = None
+
+ lib_visit = {}
+ fp_blend_basename_last = b''
+
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_src,
+ readonly=readonly,
+ temp_remap_cb=temp_remap_cb,
+ recursive=True,
+ recursive_all=all_deps,
+ lib_visit=lib_visit,
+ blendfile_level_cb=(
+ blendfile_level_cb_enter,
+ blendfile_level_cb_exit,
+ )
+ ):
+
+ # we could pass this in!
+ fp_blend = os.path.join(fp.basedir, fp_blend_basename)
+
+ if fp_blend_basename_last != fp_blend_basename:
+ yield report(" %s: %r\n" % (colorize("blend", color='blue'), fp_blend))
+ fp_blend_basename_last = fp_blend_basename
+
+ if binary_edits is not None:
+ # TODO, temp_remap_cb makes paths, this isn't ideal,
+ # in this case we only want to remap!
+ if mode == 'NONE':
+ tmp = temp_remap_cb(fp_blend, base_dir_src)
+ tmp = os.path.relpath(tmp, base_dir_src)
+ else:
+ tmp = temp_remap_cb(fp_blend, base_dir_src)
+ tmp = os.path.relpath(tmp[:-len(TEMP_SUFFIX)], base_dir_dst_temp)
+ binary_edits_curr = binary_edits.setdefault(tmp, [])
+ del tmp
+
+ # assume the path might be relative
+ path_src_orig = fp.filepath
+ path_rel = blendfile_path_walker.utils.compatpath(path_src_orig)
+ path_src = blendfile_path_walker.utils.abspath(path_rel, fp.basedir)
+ path_src = os.path.normpath(path_src)
+
+ if warn_remap_externals and b".." in os.path.relpath(path_src, base_dir_src):
+ yield report(" %s: %r\n" % (colorize("non-local", color='bright_yellow'), path_src))
+
+ if filename_filter and not filename_filter(path_src):
+ yield report(" %s: %r\n" % (colorize("exclude", color='yellow'), path_src))
+ continue
+
+ # apply variation (if available)
+ if use_variations:
+ if blendfile_levels_dict_curr:
+ path_src_variation = blendfile_levels_dict_curr.get(path_src)
+ if path_src_variation is not None:
+ path_src = path_src_variation
+ path_rel = os.path.join(os.path.dirname(path_rel), os.path.basename(path_src))
+ del path_src_variation
+
+ # destination path realtive to the root
+ # assert(b'..' not in path_src)
+ assert(b'..' not in base_dir_src)
+
+ # first remap this blend file to the location it will end up (so we can get images relative to _that_)
+ # TODO(cam) cache the results
+ fp_basedir_conv = _relpath_remap(fp_blend, base_dir_src, base_dir_src, blendfile_src_dir_fakeroot)[0]
+ fp_basedir_conv = os.path.join(base_dir_src, os.path.dirname(fp_basedir_conv))
+
+ # then get the file relative to the new location
+ path_dst, path_dst_final = _relpath_remap(path_src, base_dir_src, fp_basedir_conv, blendfile_src_dir_fakeroot)
+
+ path_dst = os.path.join(base_dir_dst, path_dst)
+
+ path_dst_final = b'//' + path_dst_final
+
+ # Assign direct or add to edit-list (to apply later)
+ if not readonly:
+ fp.filepath = path_dst_final
+ if binary_edits is not None:
+ fp.filepath_assign_edits(path_dst_final, binary_edits_curr)
+
+ # add to copy-list
+ # never copy libs (handled separately)
+ if not isinstance(fp, blendfile_path_walker.FPElem_block_path) or fp.userdata[0].code != b'LI':
+ assert path_src != path_dst
+ path_copy_files.add((path_src, path_dst))
+
+ for file_list in (
+ blendfile_path_walker.utils.find_sequence_paths(path_src) if fp.is_sequence else (),
+ fp.files_siblings(),
+ ):
+
+ _src_dir = os.path.dirname(path_src)
+ _dst_dir = os.path.dirname(path_dst)
+ path_copy_files.update(
+ {(os.path.join(_src_dir, f), os.path.join(_dst_dir, f))
+ for f in file_list
+ })
+ del _src_dir, _dst_dir
+
+ if deps_remap is not None:
+ # this needs to become JSON later... ugh, need to use strings
+ deps_remap.setdefault(
+ fp_blend_basename.decode('utf-8'),
+ {})[path_dst_final.decode('utf-8')] = path_src_orig.decode('utf-8')
+
+ del lib_visit, fp_blend_basename_last
+
+ if TIMEIT:
+ print(" Time: %.4f\n" % (time.time() - t))
+
+ yield report(("%s: %d files\n") %
+ (colorize("\narchiving", color='bright_green'), len(path_copy_files) + 1))
+
+ # handle deps_remap and file renaming
+ if deps_remap is not None:
+ blendfile_src_basename = os.path.basename(blendfile_src).decode('utf-8')
+ blendfile_dst_basename = os.path.basename(blendfile_dst).decode('utf-8')
+
+ if blendfile_src_basename != blendfile_dst_basename:
+ if mode == 'FILE':
+ deps_remap[blendfile_dst_basename] = deps_remap[blendfile_src_basename]
+ del deps_remap[blendfile_src_basename]
+ del blendfile_src_basename, blendfile_dst_basename
+
+ # store path mapping {dst: src}
+ if paths_remap is not None:
+
+ if paths_remap_relbase is not None:
+ def relbase(fn):
+ return os.path.relpath(fn, paths_remap_relbase)
+ else:
+ def relbase(fn):
+ return fn
+
+ for src, dst in path_copy_files:
+ # TODO. relative to project-basepath
+ paths_remap[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = relbase(src).decode('utf-8')
+ # main file XXX, should have better way!
+ paths_remap[os.path.basename(blendfile_src).decode('utf-8')] = relbase(blendfile_src).decode('utf-8')
+
+ # blend libs
+ for dst in path_temp_files:
+ src = path_temp_files_orig[dst]
+ k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
+ paths_remap[k] = relbase(src).decode('utf-8')
+ del k
+
+ del relbase
+
+ if paths_uuid is not None:
+ from bam.utils.system import uuid_from_file
+
+ for src, dst in path_copy_files:
+ # reports are handled again, later on.
+ if os.path.exists(src):
+ paths_uuid[os.path.relpath(dst, base_dir_dst).decode('utf-8')] = uuid_from_file(src)
+ # XXX, better way to store temp target
+ blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
+ paths_uuid[os.path.basename(blendfile_src).decode('utf-8')] = uuid_from_file(blendfile_dst_tmp)
+
+ # blend libs
+ for dst in path_temp_files:
+ k = os.path.relpath(dst[:-len(TEMP_SUFFIX)], base_dir_dst_temp).decode('utf-8')
+ if k not in paths_uuid:
+ if mode == 'NONE':
+ dst = path_temp_files_orig[dst]
+ paths_uuid[k] = uuid_from_file(dst)
+ del k
+
+ del blendfile_dst_tmp
+ del uuid_from_file
+
+ # --------------------
+ # Handle File Copy/Zip
+
+ if mode == 'FILE':
+ blendfile_dst_tmp = temp_remap_cb(blendfile_src, base_dir_src)
+
+ shutil.move(blendfile_dst_tmp, blendfile_dst)
+ path_temp_files.remove(blendfile_dst_tmp)
+
+ # strip TEMP_SUFFIX and move to the destination directory.
+ for fn in path_temp_files:
+ dst_rel, _ = _relpath_remap(fn[:-len(TEMP_SUFFIX)], base_dir_dst_temp, base_dir_dst, None)
+ dst = os.path.join(base_dir_dst, dst_rel)
+ yield report(" %s: %r -> %r\n" % (colorize("moving", color='blue'), fn, dst))
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.move(fn, dst)
+
+ for src, dst in path_copy_files:
+ assert(b'.blend' not in dst)
+ assert src != dst
+
+ # in rare cases a filepath could point to a directory
+ if (not os.path.exists(src)) or os.path.isdir(src):
+ yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
+ else:
+ yield report(" %s: %r -> %r\n" % (colorize("copying", color='blue'), src, dst))
+ os.makedirs(os.path.dirname(dst), exist_ok=True)
+ shutil.copy(src, dst)
+
+ shutil.rmtree(base_dir_dst_temp)
+
+ yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
+
+ elif mode == 'ZIP':
+ import zipfile
+
+ # not awesome!
+ import zlib
+ assert(compress_level in range(-1, 10))
+ _compress_level_orig = zlib.Z_DEFAULT_COMPRESSION
+ zlib.Z_DEFAULT_COMPRESSION = compress_level
+ _compress_mode = zipfile.ZIP_STORED if (compress_level == 0) else zipfile.ZIP_DEFLATED
+ if _compress_mode == zipfile.ZIP_STORED:
+ def is_compressed_filetype(fn):
+ return False
+ else:
+ from bam.utils.system import is_compressed_filetype
+
+ with zipfile.ZipFile(blendfile_dst.decode('utf-8'), 'w', _compress_mode) as zip_handle:
+ for fn in path_temp_files:
+ yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), fn))
+ zip_handle.write(
+ fn.decode('utf-8'),
+ arcname=os.path.relpath(fn[:-1], base_dir_dst_temp).decode('utf-8'),
+ )
+ os.remove(fn)
+
+ shutil.rmtree(base_dir_dst_temp)
+
+ for src, dst in path_copy_files:
+ assert(not dst.endswith(b'.blend'))
+
+ # in rare cases a filepath could point to a directory
+ if (not os.path.exists(src)) or os.path.isdir(src):
+ yield report(" %s: %r\n" % (colorize("source missing", color='red'), src))
+ else:
+ yield report(" %s: %r -> <archive>\n" % (colorize("copying", color='blue'), src))
+ zip_handle.write(
+ src.decode('utf-8'),
+ arcname=os.path.relpath(dst, base_dir_dst).decode('utf-8'),
+ compress_type=zipfile.ZIP_STORED if is_compressed_filetype(dst) else _compress_mode,
+ )
+
+ zlib.Z_DEFAULT_COMPRESSION = _compress_level_orig
+ del _compress_level_orig, _compress_mode
+
+ yield report(" %s: %r\n" % (colorize("written", color='green'), blendfile_dst))
+ elif mode == 'NONE':
+ pass
+ else:
+ raise Exception("%s not a known mode" % mode)
+
+
+def create_argparse():
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ description="Run this script to extract blend-files(s) and their dependencies "
+ "to a destination path.")
+
+ # for main_render() only, but validate args.
+ parser.add_argument(
+ "-i", "--input", dest="path_src", metavar='FILE', required=True,
+ help="Input path(s) or a wildcard to glob many files",
+ )
+ parser.add_argument(
+ "-e", "--exclude", dest="exclude", metavar='PATTERN', required=False,
+ help='Exclusion pattern, such as "*.abc;*.mov;*.mkv"')
+ parser.add_argument(
+ "-o", "--output", dest="path_dst", metavar='DIR', required=True,
+ help="Output file (must be a .blend for --mode=FILE or a .zip when --mode=ZIP), "
+ "or a directory when multiple inputs are passed",
+ )
+ parser.add_argument(
+ "-m", "--mode", dest="mode", metavar='MODE', required=False,
+ choices=('FILE', 'ZIP'), default='ZIP',
+ help="FILE copies the blend file(s) + dependencies to a directory, ZIP to an archive.",
+ )
+ parser.add_argument(
+ "-q", "--quiet", dest="use_quiet", action='store_true', required=False,
+ help="Suppress status output",
+ )
+ parser.add_argument(
+ "-t", "--temp", dest="temp_path", metavar='DIR', required=False,
+ help="Temporary directory to use. When not supplied, a unique directory is used.",
+ )
+
+ return parser
+
+
+def exclusion_filter(exclude: str):
+ """Converts a filter string "*.abc;*.def" to a function that can be passed to pack().
+
+ If 'exclude' is None or an empty string, returns None (which means "no filtering").
+ """
+
+ if not exclude:
+ return None
+
+ import re
+ import fnmatch
+
+ # convert string into regex callback that operates on bytes
+ # "*.txt;*.png;*.rst" --> rb".*\.txt$|.*\.png$|.*\.rst$"
+ pattern = b'|'.join(fnmatch.translate(f).encode('utf-8')
+ for f in exclude.split(';')
+ if f)
+ compiled_pattern = re.compile(pattern, re.IGNORECASE)
+
+ def filename_filter(fname: bytes):
+ return not compiled_pattern.match(fname)
+
+ return filename_filter
+
+
+def main():
+ parser = create_argparse()
+ args = parser.parse_args()
+
+ if args.use_quiet:
+ def report(msg):
+ pass
+ else:
+ def report(msg):
+ sys.stdout.write(msg)
+ sys.stdout.flush()
+
+ for msg in pack(
+ args.path_src.encode('utf8'),
+ args.path_dst.encode('utf8'),
+ mode=args.mode,
+ base_dir_dst_temp=args.temp_path,
+ filename_filter=exclusion_filter(args.exclude),
+ ):
+ report(msg)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack_restore.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack_restore.py
new file mode 100644
index 00000000..653d362f
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_pack_restore.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+"""
+This script takes Blend-File and remaps their paths to the original locations.
+
+(needed for uploading to the server)
+"""
+
+VERBOSE = 1
+
+from bam.blend import blendfile_path_walker
+
+
+def blendfile_remap(
+ blendfile_src, blendpath_dst,
+ deps_remap=None, deps_remap_cb=None,
+ deps_remap_cb_userdata=None,
+ ):
+ import os
+
+ def temp_remap_cb(filepath, level):
+ """
+ Simply point to the output dir.
+ """
+ basename = os.path.basename(blendfile_src)
+ filepath_tmp = os.path.join(blendpath_dst, basename)
+
+ # ideally we could avoid copying _ALL_ blends
+ # TODO(cam)
+ import shutil
+ shutil.copy(filepath, filepath_tmp)
+
+ return filepath_tmp
+
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_src,
+ readonly=False,
+ temp_remap_cb=temp_remap_cb,
+ recursive=False,
+ ):
+
+ # path_dst_final - current path in blend.
+ # path_src_orig - original path from JSON.
+
+ path_dst_final_b = fp.filepath
+
+ # support 2 modes, callback or dictionary
+ if deps_remap_cb is not None:
+ path_src_orig = deps_remap_cb(path_dst_final_b, deps_remap_cb_userdata)
+ if path_src_orig is not None:
+ fp.filepath = path_src_orig
+ if VERBOSE:
+ print(" Remapping:", path_dst_final_b, "->", path_src_orig)
+ else:
+ path_dst_final = path_dst_final_b.decode('utf-8')
+ path_src_orig = deps_remap.get(path_dst_final)
+ if path_src_orig is not None:
+ fp.filepath = path_src_orig.encode('utf-8')
+ if VERBOSE:
+ print(" Remapping:", path_dst_final, "->", path_src_orig)
+
+
+def pack_restore(blendfile_dir_src, blendfile_dir_dst, pathmap):
+ import os
+
+ for dirpath, dirnames, filenames in os.walk(blendfile_dir_src):
+ if dirpath.startswith(b"."):
+ continue
+
+ for filename in filenames:
+ if os.path.splitext(filename)[1].lower() == b".blend":
+ remap = pathmap.get(filename.decode('utf-8'))
+ if remap is not None:
+ filepath = os.path.join(dirpath, filename)
+
+ # main function call
+ blendfile_remap(filepath, blendfile_dir_dst, remap)
+
+
+def create_argparse():
+ import os
+ import argparse
+
+ usage_text = (
+ "Run this script to remap blend-file(s) paths using a JSON file created by 'packer.py':" +
+ os.path.basename(__file__) +
+ "--input=DIR --remap=JSON [options]")
+
+ parser = argparse.ArgumentParser(description=usage_text)
+
+ # for main_render() only, but validate args.
+ parser.add_argument(
+ "-i", "--input", dest="path_src", metavar='DIR', required=True,
+ help="Input path(s) or a wildcard to glob many files")
+ parser.add_argument(
+ "-o", "--output", dest="path_dst", metavar='DIR', required=True,
+ help="Output directory ")
+ parser.add_argument(
+ "-r", "--deps_remap", dest="deps_remap", metavar='JSON', required=True,
+ help="JSON file containing the path remapping info")
+
+ return parser
+
+
+def main():
+ import sys
+ import json
+
+ parser = create_argparse()
+ args = parser.parse_args(sys.argv[1:])
+
+ encoding = sys.getfilesystemencoding()
+
+ with open(args.deps_remap, 'r', encoding='utf-8') as f:
+ pathmap = json.load(f)
+
+ pack_restore(
+ args.path_src.encode(encoding),
+ args.path_dst.encode(encoding),
+ pathmap,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_remap.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_remap.py
new file mode 100644
index 00000000..3b792c3a
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_remap.py
@@ -0,0 +1,280 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+"""
+Module for remapping paths from one directory to another.
+"""
+
+import os
+
+
+# ----------------------------------------------------------------------------
+# private utility functions
+
+def _is_blend(f):
+ return f.lower().endswith(b'.blend')
+
+
+def _warn__ascii(msg):
+ print(" warning: %s" % msg)
+
+
+def _info__ascii(msg):
+ print(msg)
+
+
+def _warn__json(msg):
+ import json
+ print(json.dumps(("warning", msg)), end=",\n")
+
+def _info__json(msg):
+ import json
+ print(json.dumps(("info", msg)), end=",\n")
+
+
+def _uuid_from_file(fn, block_size=1 << 20):
+ with open(fn, 'rb') as f:
+ # first get the size
+ f.seek(0, os.SEEK_END)
+ size = f.tell()
+ f.seek(0, os.SEEK_SET)
+ # done!
+
+ import hashlib
+ sha1 = hashlib.new('sha512')
+ while True:
+ data = f.read(block_size)
+ if not data:
+ break
+ sha1.update(data)
+ return (hex(size)[2:] + sha1.hexdigest()).encode()
+
+
+def _iter_files(paths, check_ext=None):
+ # note, sorting isn't needed
+ # just gives predictable output
+ for p in paths:
+ p = os.path.abspath(p)
+ for dirpath, dirnames, filenames in sorted(os.walk(p)):
+ # skip '.svn'
+ if dirpath.startswith(b'.') and dirpath != b'.':
+ continue
+
+ for filename in sorted(filenames):
+ if check_ext is None or check_ext(filename):
+ filepath = os.path.join(dirpath, filename)
+ yield filepath
+
+
+# ----------------------------------------------------------------------------
+# Public Functions
+
+def start(
+ paths,
+ is_quiet=False,
+ dry_run=False,
+ use_json=False,
+ ):
+
+ if use_json:
+ warn = _warn__json
+ info = _info__json
+ else:
+ warn = _warn__ascii
+ info = _info__ascii
+
+ if use_json:
+ print("[")
+
+ # {(sha1, length): "filepath"}
+ remap_uuid = {}
+
+ # relative paths which don't exist,
+ # don't complain when they're missing on remap.
+ # {f_src: [relative path deps, ...]}
+ remap_lost = {}
+
+ # all files we need to map
+ # absolute paths
+ files_to_map = set()
+
+ # TODO, validate paths aren't nested! ["/foo", "/foo/bar"]
+ # it will cause problems touching files twice!
+
+ # ------------------------------------------------------------------------
+ # First walk over all blends
+ from bam.blend import blendfile_path_walker
+
+ for blendfile_src in _iter_files(paths, check_ext=_is_blend):
+ if not is_quiet:
+ info("blend read: %r" % blendfile_src)
+
+ remap_lost[blendfile_src] = remap_lost_blendfile_src = set()
+
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_src,
+ readonly=True,
+ recursive=False,
+ ):
+ # TODO. warn when referencing files outside 'paths'
+
+ # so we can update the reference
+ f_abs = fp.filepath_absolute
+ f_abs = os.path.normpath(f_abs)
+ if os.path.exists(f_abs):
+ files_to_map.add(f_abs)
+ else:
+ if not is_quiet:
+ warn("file %r not found!" % f_abs)
+
+ # don't complain about this file being missing on remap
+ remap_lost_blendfile_src.add(fp.filepath)
+
+ # so we can know where its moved to
+ files_to_map.add(blendfile_src)
+ del blendfile_path_walker
+
+ # ------------------------------------------------------------------------
+ # Store UUID
+ #
+ # note, sorting is only to give predictable warnings/behavior
+ for f in sorted(files_to_map):
+ f_uuid = _uuid_from_file(f)
+
+ f_match = remap_uuid.get(f_uuid)
+ if f_match is not None:
+ if not is_quiet:
+ warn("duplicate file found! (%r, %r)" % (f_match, f))
+
+ remap_uuid[f_uuid] = f
+
+ # now find all deps
+ remap_data_args = (
+ remap_uuid,
+ remap_lost,
+ )
+
+ if use_json:
+ if not remap_uuid:
+ print("\"nothing to remap!\"")
+ else:
+ print("\"complete\"")
+ print("]")
+ else:
+ if not remap_uuid:
+ print("Nothing to remap!")
+
+ return remap_data_args
+
+
+def finish(
+ paths, remap_data_args,
+ is_quiet=False,
+ force_relative=False,
+ dry_run=False,
+ use_json=False,
+ ):
+
+ if use_json:
+ warn = _warn__json
+ info = _info__json
+ else:
+ warn = _warn__ascii
+ info = _info__ascii
+
+ if use_json:
+ print("[")
+
+ (remap_uuid,
+ remap_lost,
+ ) = remap_data_args
+
+ remap_src_to_dst = {}
+ remap_dst_to_src = {}
+
+ for f_dst in _iter_files(paths):
+ f_uuid = _uuid_from_file(f_dst)
+ f_src = remap_uuid.get(f_uuid)
+ if f_src is not None:
+ remap_src_to_dst[f_src] = f_dst
+ remap_dst_to_src[f_dst] = f_src
+
+ # now the fun begins, remap _all_ paths
+ from bam.blend import blendfile_path_walker
+
+ for blendfile_dst in _iter_files(paths, check_ext=_is_blend):
+ blendfile_src = remap_dst_to_src.get(blendfile_dst)
+ if blendfile_src is None:
+ if not is_quiet:
+ warn("new blendfile added since beginning 'remap': %r" % blendfile_dst)
+ continue
+
+ # not essential, just so we can give more meaningful errors
+ remap_lost_blendfile_src = remap_lost[blendfile_src]
+
+ if not is_quiet:
+ info("blend write: %r -> %r" % (blendfile_src, blendfile_dst))
+
+ blendfile_src_basedir = os.path.dirname(blendfile_src)
+ blendfile_dst_basedir = os.path.dirname(blendfile_dst)
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_dst,
+ readonly=False,
+ recursive=False,
+ ):
+ # TODO. warn when referencing files outside 'paths'
+
+ # so we can update the reference
+ f_src_orig = fp.filepath
+
+ if f_src_orig in remap_lost_blendfile_src:
+ # this file never existed, so we can't remap it
+ continue
+
+ is_relative = f_src_orig.startswith(b'//')
+ if is_relative:
+ f_src_abs = fp.filepath_absolute_resolve(basedir=blendfile_src_basedir)
+ else:
+ f_src_abs = f_src_orig
+
+ f_src_abs = os.path.normpath(f_src_abs)
+ f_dst_abs = remap_src_to_dst.get(f_src_abs)
+
+ if f_dst_abs is None:
+ if not is_quiet:
+ warn("file %r not found in map!" % f_src_abs)
+ continue
+
+ # now remap!
+ if is_relative or force_relative:
+ f_dst_final = b'//' + os.path.relpath(f_dst_abs, blendfile_dst_basedir)
+ else:
+ f_dst_final = f_dst_abs
+
+ if f_dst_final != f_src_orig:
+ if not dry_run:
+ fp.filepath = f_dst_final
+ if not is_quiet:
+ info("remap %r -> %r" % (f_src_abs, f_dst_abs))
+
+ del blendfile_path_walker
+
+ if use_json:
+ print("\"complete\"\n]")
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_walker.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_walker.py
new file mode 100644
index 00000000..df07235e
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/blend/blendfile_path_walker.py
@@ -0,0 +1,953 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+import os
+import logging
+
+from . import blendfile
+
+# gives problems with scripts that use stdout, for testing 'bam deps' for eg.
+DEBUG = False
+VERBOSE = DEBUG or False # os.environ.get('BAM_VERBOSE', False)
+TIMEIT = False
+
+USE_ALEMBIC_BRANCH = True
+
+
+class C_defs:
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ # DNA_sequence_types.h (Sequence.type)
+ SEQ_TYPE_IMAGE = 0
+ SEQ_TYPE_META = 1
+ SEQ_TYPE_SCENE = 2
+ SEQ_TYPE_MOVIE = 3
+ SEQ_TYPE_SOUND_RAM = 4
+ SEQ_TYPE_SOUND_HD = 5
+ SEQ_TYPE_MOVIECLIP = 6
+ SEQ_TYPE_MASK = 7
+ SEQ_TYPE_EFFECT = 8
+
+ IMA_SRC_FILE = 1
+ IMA_SRC_SEQUENCE = 2
+ IMA_SRC_MOVIE = 3
+
+ # DNA_modifier_types.h
+ eModifierType_MeshCache = 46
+
+ # DNA_particle_types.h
+ PART_DRAW_OB = 7
+ PART_DRAW_GR = 8
+
+ # DNA_object_types.h
+ # Object.transflag
+ OB_DUPLIGROUP = 1 << 8
+
+ if USE_ALEMBIC_BRANCH:
+ CACHE_LIBRARY_SOURCE_CACHE = 1
+
+log_deps = logging.getLogger("path_walker")
+log_deps.setLevel({
+ (True, True): logging.DEBUG,
+ (False, True): logging.INFO,
+ (False, False): logging.WARNING
+}[DEBUG, VERBOSE])
+
+if VERBOSE:
+ def set_as_str(s):
+ if s is None:
+ return "None"
+ return ", ".join(sorted(str(i) for i in s))
+
+
+class FPElem:
+ """
+ Tiny filepath class to hide blendfile.
+ """
+
+ __slots__ = (
+ "basedir",
+
+ # library link level
+ "level",
+
+ # True when this is apart of a sequence (image or movieclip)
+ "is_sequence",
+
+ "userdata",
+ )
+
+ def __init__(self, basedir, level,
+ # subclasses get/set functions should use
+ userdata):
+ self.basedir = basedir
+ self.level = level
+ self.is_sequence = False
+
+ # subclass must call
+ self.userdata = userdata
+
+ def files_siblings(self):
+ return ()
+
+ # --------
+ # filepath
+
+ def filepath_absolute_resolve(self, basedir=None):
+ """
+ Resolve the filepath, with the option to override the basedir.
+ """
+ filepath = self.filepath
+ if filepath.startswith(b'//'):
+ if basedir is None:
+ basedir = self.basedir
+ return os.path.normpath(os.path.join(
+ basedir,
+ utils.compatpath(filepath[2:]),
+ ))
+ else:
+ return utils.compatpath(filepath)
+
+ def filepath_assign_edits(self, filepath, binary_edits):
+ self._set_cb_edits(filepath, binary_edits)
+
+ @staticmethod
+ def _filepath_assign_edits(block, path, filepath, binary_edits):
+ """
+ Record the write to a separate entry (binary file-like object),
+ this lets us replay the edits later.
+ (so we can replay them onto the clients local cache without a file transfer).
+ """
+ import struct
+ assert(type(filepath) is bytes)
+ assert(type(path) is bytes)
+ ofs, size = block.get_file_offset(path)
+ # ensure we dont write past the field size & allow for \0
+ filepath = filepath[:size - 1]
+ binary_edits.append((ofs, filepath + b'\0'))
+
+ @property
+ def filepath(self):
+ return self._get_cb()
+
+ @filepath.setter
+ def filepath(self, filepath):
+ self._set_cb(filepath)
+
+ @property
+ def filepath_absolute(self):
+ return self.filepath_absolute_resolve()
+
+
+class FPElem_block_path(FPElem):
+ """
+ Simple block-path:
+ userdata = (block, path)
+ """
+ __slots__ = ()
+
+ def _get_cb(self):
+ block, path = self.userdata
+ return block[path]
+
+ def _set_cb(self, filepath):
+ block, path = self.userdata
+ block[path] = filepath
+
+ def _set_cb_edits(self, filepath, binary_edits):
+ block, path = self.userdata
+ self._filepath_assign_edits(block, path, filepath, binary_edits)
+
+
+class FPElem_sequence_single(FPElem):
+ """
+ Movie sequence
+ userdata = (block, path, sub_block, sub_path)
+ """
+ __slots__ = ()
+
+ def _get_cb(self):
+ block, path, sub_block, sub_path = self.userdata
+ return block[path] + sub_block[sub_path]
+
+ def _set_cb(self, filepath):
+ block, path, sub_block, sub_path = self.userdata
+ head, sep, tail = utils.splitpath(filepath)
+
+ block[path] = head + sep
+ sub_block[sub_path] = tail
+
+ def _set_cb_edits(self, filepath, binary_edits):
+ block, path, sub_block, sub_path = self.userdata
+ head, sep, tail = utils.splitpath(filepath)
+
+ self._filepath_assign_edits(block, path, head + sep, binary_edits)
+ self._filepath_assign_edits(sub_block, sub_path, tail, binary_edits)
+
+
+class FPElem_sequence_image_seq(FPElem_sequence_single):
+ """
+ Image sequence
+ userdata = (block, path, sub_block, sub_path)
+ """
+ __slots__ = ()
+
+ def files_siblings(self):
+ block, path, sub_block, sub_path = self.userdata
+
+ array = block.get_pointer(b'stripdata')
+ files = [array.get(b'name', use_str=False, base_index=i) for i in range(array.count)]
+ return files
+
+
+class FilePath:
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ # ------------------------------------------------------------------------
+ # Main function to visit paths
+ @staticmethod
+ def visit_from_blend(
+ filepath,
+
+ # never modify the blend
+ readonly=True,
+ # callback that creates a temp file and returns its path.
+ temp_remap_cb=None,
+
+ # recursive options
+ recursive=False,
+ # recurse all indirectly linked data
+ # (not just from the initially referenced blend file)
+ recursive_all=False,
+ # list of ID block names we want to load, or None to load all
+ block_codes=None,
+ # root when we're loading libs indirectly
+ rootdir=None,
+ level=0,
+ # dict of id's used so we don't follow these links again
+ # prevents cyclic references too!
+ # {lib_path: set([block id's ...])}
+ lib_visit=None,
+
+ # optional blendfile callbacks
+ # These callbacks run on enter-exit blend files
+ # so you can keep track of what file and level you're at.
+ blendfile_level_cb=(None, None),
+ ):
+ # print(level, block_codes)
+ import os
+
+ filepath = os.path.abspath(filepath)
+
+ indent_str = " " * level
+ # print(indent_str + "Opening:", filepath)
+ # print(indent_str + "... blocks:", block_codes)
+
+ log = log_deps.getChild('visit_from_blend')
+ log.info("~")
+ log.info("%sOpening: %s", indent_str, filepath)
+ if VERBOSE:
+ log.info("%s blocks: %s", indent_str, set_as_str(block_codes))
+
+ blendfile_level_cb_enter, blendfile_level_cb_exit = blendfile_level_cb
+
+ if blendfile_level_cb_enter is not None:
+ blendfile_level_cb_enter(filepath)
+
+ basedir = os.path.dirname(filepath)
+ if rootdir is None:
+ rootdir = basedir
+
+ if lib_visit is None:
+ lib_visit = {}
+
+
+
+ if recursive and (level > 0) and (block_codes is not None) and (recursive_all is False):
+ # prevent from expanding the
+ # same datablock more then once
+ # note: we could *almost* id_name, however this isn't unique for libraries.
+ expand_addr_visit = set()
+ # {lib_id: {block_ids... }}
+ expand_codes_idlib = {}
+
+ # libraries used by this blend
+ block_codes_idlib = set()
+
+ # XXX, checking 'block_codes' isn't 100% reliable,
+ # but at least don't touch the same blocks twice.
+ # whereas block_codes is intended to only operate on blocks we requested.
+ lib_block_codes_existing = lib_visit.setdefault(filepath, set())
+
+ # only for this block
+ def _expand_codes_add_test(block, code):
+ # return True, if the ID should be searched further
+ #
+ # we could investigate a better way...
+ # Not to be accessing ID blocks at this point. but its harmless
+ if code == b'ID':
+ assert(code == block.code)
+ if recursive:
+ expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
+ return False
+ else:
+ id_name = block[b'id', b'name']
+
+ # if we touched this already, don't touch again
+ # (else we may modify the same path multiple times)
+ #
+ # FIXME, works in some cases but not others
+ # keep, without this we get errors
+ # Gooseberry r668
+ # bam pack scenes/01_island/01_meet_franck/01_01_01_A/01_01_01_A.comp.blend
+ # gives strange errors
+ '''
+ if id_name not in block_codes:
+ return False
+ '''
+
+ # instead just don't operate on blocks multiple times
+ # ... rather than attempt to check on what we need or not.
+ len_prev = len(lib_block_codes_existing)
+ lib_block_codes_existing.add(id_name)
+ if len_prev == len(lib_block_codes_existing):
+ return False
+
+ len_prev = len(expand_addr_visit)
+ expand_addr_visit.add(block.addr_old)
+ return (len_prev != len(expand_addr_visit))
+
+ def block_expand(block, code):
+ assert(block.code == code)
+ if _expand_codes_add_test(block, code):
+ yield block
+
+ assert(block.code == code)
+ fn = ExpandID.expand_funcs.get(code)
+ if fn is not None:
+ for sub_block in fn(block):
+ if sub_block is not None:
+ yield from block_expand(sub_block, sub_block.code)
+ else:
+ if code == b'ID':
+ yield block
+ else:
+ expand_addr_visit = None
+
+ # set below
+ expand_codes_idlib = None
+
+ # never set
+ block_codes_idlib = None
+
+ def block_expand(block, code):
+ assert(block.code == code)
+ yield block
+
+ # ------
+ # Define
+ #
+ # - iter_blocks_id(code)
+ # - iter_blocks_idlib()
+ if block_codes is None:
+ def iter_blocks_id(code):
+ return blend.find_blocks_from_code(code)
+
+ def iter_blocks_idlib():
+ return blend.find_blocks_from_code(b'LI')
+ else:
+ def iter_blocks_id(code):
+ for block in blend.find_blocks_from_code(code):
+ if block[b'id', b'name'] in block_codes:
+ yield from block_expand(block, code)
+
+ if block_codes_idlib is not None:
+ def iter_blocks_idlib():
+ for block in blend.find_blocks_from_code(b'LI'):
+ # TODO, this should work but in fact mades some libs not link correctly.
+ if block[b'name'] in block_codes_idlib:
+ yield from block_expand(block, b'LI')
+ else:
+ def iter_blocks_idlib():
+ return blend.find_blocks_from_code(b'LI')
+
+ if temp_remap_cb is not None:
+ filepath_tmp = temp_remap_cb(filepath, rootdir)
+ else:
+ filepath_tmp = filepath
+
+ # store info to pass along with each iteration
+ extra_info = rootdir, os.path.basename(filepath)
+
+ with blendfile.open_blend(filepath_tmp, "rb" if readonly else "r+b") as blend:
+
+ for code in blend.code_index.keys():
+ # handle library blocks as special case
+ if ((len(code) != 2) or
+ (code in {
+ # libraries handled below
+ b'LI',
+ b'ID',
+ # unneeded
+ b'WM',
+ b'SN', # bScreen
+ })):
+
+ continue
+
+ # if VERBOSE:
+ # print(" Scanning", code)
+
+ for block in iter_blocks_id(code):
+ yield from FilePath.from_block(block, basedir, extra_info, level)
+
+ # print("A:", expand_addr_visit)
+ # print("B:", block_codes)
+ if VERBOSE:
+ log.info("%s expand_addr_visit=%s", indent_str, set_as_str(expand_addr_visit))
+
+ if recursive:
+
+ if expand_codes_idlib is None:
+ expand_codes_idlib = {}
+ for block in blend.find_blocks_from_code(b'ID'):
+ expand_codes_idlib.setdefault(block[b'lib'], set()).add(block[b'name'])
+
+ # look into libraries
+ lib_all = []
+
+ for lib_id, lib_block_codes in sorted(expand_codes_idlib.items()):
+ lib = blend.find_block_from_offset(lib_id)
+ lib_path = lib[b'name']
+
+ # get all data needed to read the blend files here (it will be freed!)
+ # lib is an address at the moment, we only use as a way to group
+
+ lib_all.append((lib_path, lib_block_codes))
+ # import IPython; IPython.embed()
+
+ # ensure we expand indirect linked libs
+ if block_codes_idlib is not None:
+ block_codes_idlib.add(lib_path)
+
+ # do this after, incase we mangle names above
+ for block in iter_blocks_idlib():
+ yield from FilePath.from_block(block, basedir, extra_info, level)
+ del blend
+
+
+ # ----------------
+ # Handle Recursive
+ if recursive:
+ # now we've closed the file, loop on other files
+
+ # note, sorting - isn't needed, it just gives predictable load-order.
+ for lib_path, lib_block_codes in lib_all:
+ lib_path_abs = os.path.normpath(utils.compatpath(utils.abspath(lib_path, basedir)))
+
+ # if we visited this before,
+ # check we don't follow the same links more than once
+ lib_block_codes_existing = lib_visit.setdefault(lib_path_abs, set())
+ lib_block_codes -= lib_block_codes_existing
+
+ # don't touch them again
+ # XXX, this is now maintained in "_expand_generic_material"
+ # lib_block_codes_existing.update(lib_block_codes)
+
+ # print("looking for", lib_block_codes)
+
+ if not lib_block_codes:
+ if VERBOSE:
+ print((indent_str + " "), "Library Skipped (visited): ", filepath, " -> ", lib_path_abs, sep="")
+ continue
+
+ if not os.path.exists(lib_path_abs):
+ if VERBOSE:
+ print((indent_str + " "), "Library Missing: ", filepath, " -> ", lib_path_abs, sep="")
+ continue
+
+ # import IPython; IPython.embed()
+ if VERBOSE:
+ print((indent_str + " "), "Library: ", filepath, " -> ", lib_path_abs, sep="")
+ # print((indent_str + " "), lib_block_codes)
+ yield from FilePath.visit_from_blend(
+ lib_path_abs,
+ readonly=readonly,
+ temp_remap_cb=temp_remap_cb,
+ recursive=True,
+ block_codes=lib_block_codes,
+ rootdir=rootdir,
+ level=level + 1,
+ lib_visit=lib_visit,
+ blendfile_level_cb=blendfile_level_cb,
+ )
+
+ if blendfile_level_cb_exit is not None:
+ blendfile_level_cb_exit(filepath)
+
+ # ------------------------------------------------------------------------
+ # Direct filepaths from Blocks
+ #
+ # (no expanding or following references)
+
+ @staticmethod
+ def from_block(block: blendfile.BlendFileBlock, basedir, extra_info, level):
+ assert(block.code != b'DATA')
+ fn = FilePath._from_block_dict.get(block.code)
+ if fn is None:
+ return
+
+ yield from fn(block, basedir, extra_info, level)
+
+ @staticmethod
+ def _from_block_OB(block, basedir, extra_info, level):
+ # 'ob->modifiers[...].filepath'
+ for block_mod in bf_utils.iter_ListBase(
+ block.get_pointer((b'modifiers', b'first')),
+ next_item=(b'modifier', b'next')):
+ item_md_type = block_mod[b'modifier', b'type']
+ if item_md_type == C_defs.eModifierType_MeshCache:
+ yield FPElem_block_path(basedir, level, (block_mod, b'filepath')), extra_info
+
+ @staticmethod
+ def _from_block_MC(block, basedir, extra_info, level):
+ # TODO, image sequence
+ fp = FPElem_block_path(basedir, level, (block, b'name'))
+ fp.is_sequence = True
+ yield fp, extra_info
+
+ @staticmethod
+ def _from_block_IM(block, basedir, extra_info, level):
+ # old files miss this
+ image_source = block.get(b'source', C_defs.IMA_SRC_FILE)
+ if image_source not in {C_defs.IMA_SRC_FILE, C_defs.IMA_SRC_SEQUENCE, C_defs.IMA_SRC_MOVIE}:
+ return
+ if block[b'packedfile']:
+ return
+
+ fp = FPElem_block_path(basedir, level, (block, b'name'))
+ if image_source == C_defs.IMA_SRC_SEQUENCE:
+ fp.is_sequence = True
+ yield fp, extra_info
+
+ @staticmethod
+ def _from_block_VF(block, basedir, extra_info, level):
+ if block[b'packedfile']:
+ return
+ if block[b'name'] != b'<builtin>': # builtin font
+ yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
+
+ @staticmethod
+ def _from_block_SO(block, basedir, extra_info, level):
+ if block[b'packedfile']:
+ return
+ yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
+
+ @staticmethod
+ def _from_block_ME(block, basedir, extra_info, level):
+ block_external = block.get_pointer((b'ldata', b'external'), None)
+ if block_external is None:
+ block_external = block.get_pointer((b'fdata', b'external'), None)
+
+ if block_external is not None:
+ yield FPElem_block_path(basedir, level, (block_external, b'filename')), extra_info
+
+ if USE_ALEMBIC_BRANCH:
+ @staticmethod
+ def _from_block_CL(block, basedir, extra_info, level):
+ if block[b'source_mode'] == C_defs.CACHE_LIBRARY_SOURCE_CACHE:
+ yield FPElem_block_path(basedir, level, (block, b'input_filepath')), extra_info
+
+ @staticmethod
+ def _from_block_CF(block, basedir, extra_info, level):
+ yield FPElem_block_path(basedir, level, (block, b'filepath')), extra_info
+
+
+ @staticmethod
+ def _from_block_SC(block, basedir, extra_info, level):
+ block_ed = block.get_pointer(b'ed')
+ if block_ed is not None:
+ sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
+
+ def seqbase(someseq):
+ for item in someseq:
+ item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
+
+ if item_type >= C_defs.SEQ_TYPE_EFFECT:
+ pass
+ elif item_type == C_defs.SEQ_TYPE_META:
+ yield from seqbase(bf_utils.iter_ListBase(
+ item.get_pointer((b'seqbase', b'first'), sdna_index_refine=sdna_index_Sequence)))
+ else:
+ item_strip = item.get_pointer(b'strip', sdna_index_refine=sdna_index_Sequence)
+ if item_strip is None: # unlikely!
+ continue
+ item_stripdata = item_strip.get_pointer(b'stripdata')
+
+ if item_type == C_defs.SEQ_TYPE_IMAGE:
+ yield FPElem_sequence_image_seq(
+ basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
+ elif item_type in {C_defs.SEQ_TYPE_MOVIE, C_defs.SEQ_TYPE_SOUND_RAM, C_defs.SEQ_TYPE_SOUND_HD}:
+ yield FPElem_sequence_single(
+ basedir, level, (item_strip, b'dir', item_stripdata, b'name')), extra_info
+
+ yield from seqbase(bf_utils.iter_ListBase(block_ed.get_pointer((b'seqbase', b'first'))))
+
+ @staticmethod
+ def _from_block_LI(block, basedir, extra_info, level):
+ if block.get(b'packedfile', None):
+ return
+
+ yield FPElem_block_path(basedir, level, (block, b'name')), extra_info
+
+ # _from_block_IM --> {b'IM': _from_block_IM, ...}
+ _from_block_dict = {
+ k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
+ if isinstance(s_fn, staticmethod)
+ if k.startswith("_from_block_")
+ }
+
+
+class bf_utils:
+ @staticmethod
+ def iter_ListBase(block, next_item=b'next'):
+ while block:
+ yield block
+ block = block.file.find_block_from_offset(block[next_item])
+
+ def iter_array(block, length=-1):
+ assert(block.code == b'DATA')
+ from . import blendfile
+ import os
+ handle = block.file.handle
+ header = block.file.header
+
+ for i in range(length):
+ block.file.handle.seek(block.file_offset + (header.pointer_size * i), os.SEEK_SET)
+ offset = blendfile.DNA_IO.read_pointer(handle, header)
+ sub_block = block.file.find_block_from_offset(offset)
+ yield sub_block
+
+
+# -----------------------------------------------------------------------------
+# ID Expand
+
+class ExpandID:
+ # fake module
+ #
+ # TODO:
+ #
+ # Array lookups here are _WAY_ too complicated,
+ # we need some nicer way to represent pointer indirection (easy like in C!)
+ # but for now, use what we have.
+ #
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def _expand_generic_material(block):
+ array_len = block.get(b'totcol')
+ if array_len != 0:
+ array = block.get_pointer(b'mat')
+ for sub_block in bf_utils.iter_array(array, array_len):
+ yield sub_block
+
+ @staticmethod
+ def _expand_generic_mtex(block):
+ field = block.dna_type.field_from_name[b'mtex']
+ array_len = field.dna_size // block.file.header.pointer_size
+
+ for i in range(array_len):
+ item = block.get_pointer((b'mtex', i))
+ if item:
+ yield item.get_pointer(b'tex')
+ yield item.get_pointer(b'object')
+
+ @staticmethod
+ def _expand_generic_nodetree(block):
+ assert(block.dna_type.dna_type_id == b'bNodeTree')
+
+ sdna_index_bNode = block.file.sdna_index_from_id[b'bNode']
+ for item in bf_utils.iter_ListBase(block.get_pointer((b'nodes', b'first'))):
+ item_type = item.get(b'type', sdna_index_refine=sdna_index_bNode)
+
+ if item_type != 221: # CMP_NODE_R_LAYERS
+ yield item.get_pointer(b'id', sdna_index_refine=sdna_index_bNode)
+
+ def _expand_generic_nodetree_id(block):
+ block_ntree = block.get_pointer(b'nodetree', None)
+ if block_ntree is not None:
+ yield from ExpandID._expand_generic_nodetree(block_ntree)
+
+ @staticmethod
+ def _expand_generic_animdata(block):
+ block_adt = block.get_pointer(b'adt')
+ if block_adt:
+ yield block_adt.get_pointer(b'action')
+ # TODO, NLA
+
+ @staticmethod
+ def expand_OB(block): # 'Object'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+
+ has_dup_group = False
+ yield block.get_pointer(b'data')
+ if block[b'transflag'] & C_defs.OB_DUPLIGROUP:
+ dup_group = block.get_pointer(b'dup_group')
+ if dup_group is not None:
+ has_dup_group = True
+ yield dup_group
+ del dup_group
+
+ yield block.get_pointer(b'proxy')
+ yield block.get_pointer(b'proxy_group')
+
+ if USE_ALEMBIC_BRANCH:
+ if has_dup_group:
+ sdna_index_CacheLibrary = block.file.sdna_index_from_id.get(b'CacheLibrary')
+ if sdna_index_CacheLibrary is not None:
+ yield block.get_pointer(b'cache_library')
+
+ # 'ob->pose->chanbase[...].custom'
+ block_pose = block.get_pointer(b'pose')
+ if block_pose is not None:
+ assert(block_pose.dna_type.dna_type_id == b'bPose')
+ sdna_index_bPoseChannel = block_pose.file.sdna_index_from_id[b'bPoseChannel']
+ for item in bf_utils.iter_ListBase(block_pose.get_pointer((b'chanbase', b'first'))):
+ item_custom = item.get_pointer(b'custom', sdna_index_refine=sdna_index_bPoseChannel)
+ if item_custom is not None:
+ yield item_custom
+ # Expand the objects 'ParticleSettings' via:
+ # 'ob->particlesystem[...].part'
+ sdna_index_ParticleSystem = block.file.sdna_index_from_id.get(b'ParticleSystem')
+ if sdna_index_ParticleSystem is not None:
+ for item in bf_utils.iter_ListBase(
+ block.get_pointer((b'particlesystem', b'first'))):
+ item_part = item.get_pointer(b'part', sdna_index_refine=sdna_index_ParticleSystem)
+ if item_part is not None:
+ yield item_part
+
+ @staticmethod
+ def expand_ME(block): # 'Mesh'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+ yield block.get_pointer(b'texcomesh')
+ # TODO, TexFace? - it will be slow, we could simply ignore :S
+
+ @staticmethod
+ def expand_CU(block): # 'Curve'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+
+ sub_block = block.get_pointer(b'vfont')
+ if sub_block is not None:
+ yield sub_block
+ yield block.get_pointer(b'vfontb')
+ yield block.get_pointer(b'vfonti')
+ yield block.get_pointer(b'vfontbi')
+
+ yield block.get_pointer(b'bevobj')
+ yield block.get_pointer(b'taperobj')
+ yield block.get_pointer(b'textoncurve')
+
+ @staticmethod
+ def expand_MB(block): # 'MBall'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_material(block)
+
+ @staticmethod
+ def expand_AR(block): # 'bArmature'
+ yield from ExpandID._expand_generic_animdata(block)
+
+ @staticmethod
+ def expand_LA(block): # 'Lamp'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield from ExpandID._expand_generic_mtex(block)
+
+ @staticmethod
+ def expand_MA(block): # 'Material'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield from ExpandID._expand_generic_mtex(block)
+
+ yield block.get_pointer(b'group')
+
+ @staticmethod
+ def expand_TE(block): # 'Tex'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield block.get_pointer(b'ima')
+
+ @staticmethod
+ def expand_WO(block): # 'World'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield from ExpandID._expand_generic_mtex(block)
+
+ @staticmethod
+ def expand_NT(block): # 'bNodeTree'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree(block)
+
+ @staticmethod
+ def expand_PA(block): # 'ParticleSettings'
+ yield from ExpandID._expand_generic_animdata(block)
+ block_ren_as = block[b'ren_as']
+ if block_ren_as == C_defs.PART_DRAW_GR:
+ yield block.get_pointer(b'dup_group')
+ elif block_ren_as == C_defs.PART_DRAW_OB:
+ yield block.get_pointer(b'dup_ob')
+ yield from ExpandID._expand_generic_mtex(block)
+
+ @staticmethod
+ def expand_SC(block): # 'Scene'
+ yield from ExpandID._expand_generic_animdata(block)
+ yield from ExpandID._expand_generic_nodetree_id(block)
+ yield block.get_pointer(b'camera')
+ yield block.get_pointer(b'world')
+ yield block.get_pointer(b'set', None)
+ yield block.get_pointer(b'clip', None)
+
+ sdna_index_Base = block.file.sdna_index_from_id[b'Base']
+ for item in bf_utils.iter_ListBase(block.get_pointer((b'base', b'first'))):
+ yield item.get_pointer(b'object', sdna_index_refine=sdna_index_Base)
+
+ block_ed = block.get_pointer(b'ed')
+ if block_ed is not None:
+ sdna_index_Sequence = block.file.sdna_index_from_id[b'Sequence']
+
+ def seqbase(someseq):
+ for item in someseq:
+ item_type = item.get(b'type', sdna_index_refine=sdna_index_Sequence)
+
+ if item_type >= C_defs.SEQ_TYPE_EFFECT:
+ pass
+ elif item_type == C_defs.SEQ_TYPE_META:
+ yield from seqbase(bf_utils.iter_ListBase(
+ item.get_pointer((b'seqbase' b'first'), sdna_index_refine=sdna_index_Sequence)))
+ else:
+ if item_type == C_defs.SEQ_TYPE_SCENE:
+ yield item.get_pointer(b'scene')
+ elif item_type == C_defs.SEQ_TYPE_MOVIECLIP:
+ yield item.get_pointer(b'clip')
+ elif item_type == C_defs.SEQ_TYPE_MASK:
+ yield item.get_pointer(b'mask')
+ elif item_type == C_defs.SEQ_TYPE_SOUND_RAM:
+ yield item.get_pointer(b'sound')
+
+ yield from seqbase(bf_utils.iter_ListBase(
+ block_ed.get_pointer((b'seqbase', b'first'))))
+
+ @staticmethod
+ def expand_GR(block): # 'Group'
+ sdna_index_GroupObject = block.file.sdna_index_from_id[b'GroupObject']
+ for item in bf_utils.iter_ListBase(block.get_pointer((b'gobject', b'first'))):
+ yield item.get_pointer(b'ob', sdna_index_refine=sdna_index_GroupObject)
+
+ # expand_GR --> {b'GR': expand_GR, ...}
+ expand_funcs = {
+ k.rpartition("_")[2].encode('ascii'): s_fn.__func__ for k, s_fn in locals().items()
+ if isinstance(s_fn, staticmethod)
+ if k.startswith("expand_")
+ }
+
+
+# -----------------------------------------------------------------------------
+# Packing Utility
+
+
+class utils:
+ # fake module
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def abspath(path, start, library=None):
+ import os
+ if path.startswith(b'//'):
+ # if library:
+ # start = os.path.dirname(abspath(library.filepath))
+ return os.path.join(start, path[2:])
+ return path
+
+ if __import__("os").sep == '/':
+ @staticmethod
+ def compatpath(path):
+ return path.replace(b'\\', b'/')
+ else:
+ @staticmethod
+ def compatpath(path):
+ # keep '//'
+ return path[:2] + path[2:].replace(b'/', b'\\')
+
+ @staticmethod
+ def splitpath(path):
+ """
+ Splits the path using either slashes
+ """
+ split1 = path.rpartition(b'/')
+ split2 = path.rpartition(b'\\')
+ if len(split1[0]) > len(split2[0]):
+ return split1
+ else:
+ return split2
+
+ def find_sequence_paths(filepath, use_fullpath=True):
+ # supports str, byte paths
+ basedir, filename = os.path.split(filepath)
+ if not os.path.exists(basedir):
+ return []
+
+ filename_noext, ext = os.path.splitext(filename)
+
+ from string import digits
+ if isinstance(filepath, bytes):
+ digits = digits.encode()
+ filename_nodigits = filename_noext.rstrip(digits)
+
+ if len(filename_nodigits) == len(filename_noext):
+ # input isn't from a sequence
+ return []
+
+ files = os.listdir(basedir)
+ files[:] = [
+ f for f in files
+ if f.startswith(filename_nodigits) and
+ f.endswith(ext) and
+ f[len(filename_nodigits):-len(ext) if ext else -1].isdigit()
+ ]
+ if use_fullpath:
+ files[:] = [
+ os.path.join(basedir, f) for f in files
+ ]
+
+ return files
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/cli.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/cli.py
new file mode 100644
index 00000000..ef7dfe47
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/cli.py
@@ -0,0 +1,2018 @@
+#!/usr/bin/env python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+"""
+This is the entry point for command line access.
+"""
+
+import os
+import sys
+import json
+
+# ------------------
+# Ensure module path
+path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "modules"))
+if path not in sys.path:
+ sys.path.append(path)
+del path
+# --------
+
+import logging
+log = logging.getLogger("bam_cli")
+
+
+def fatal(msg):
+ if __name__ == "__main__":
+ sys.stderr.write("fatal: ")
+ sys.stderr.write(msg)
+ sys.stderr.write("\n")
+ sys.exit(1)
+ else:
+ raise RuntimeError(msg)
+
+
+class bam_config:
+ # fake module
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ CONFIG_DIR = ".bam"
+ # can infact be any file in the session
+ SESSION_FILE = ".bam_paths_remap.json"
+
+ @staticmethod
+ def find_basedir(cwd=None, path_suffix=None, abort=False, test_subpath=CONFIG_DIR, descr="<unknown>"):
+ """
+ Return the config path (or None when not found)
+ Actually should raise an error?
+ """
+
+ if cwd is None:
+ cwd = os.getcwd()
+
+ parent = (os.path.normpath(
+ os.path.abspath(
+ cwd)))
+
+ parent_prev = None
+
+ while parent != parent_prev:
+ test_dir = os.path.join(parent, test_subpath)
+ if os.path.exists(test_dir):
+ if path_suffix is not None:
+ test_dir = os.path.join(test_dir, path_suffix)
+ return test_dir
+
+ parent_prev = parent
+ parent = os.path.dirname(parent)
+
+ if abort is True:
+ fatal("Not a %s (or any of the parent directories): %s" % (descr, test_subpath))
+
+ return None
+
+ @staticmethod
+ def find_rootdir(cwd=None, path_suffix=None, abort=False, test_subpath=CONFIG_DIR, descr="<unknown>"):
+ """
+ find_basedir(), without '.bam' suffix
+ """
+ path = bam_config.find_basedir(
+ cwd=cwd,
+ path_suffix=path_suffix,
+ abort=abort,
+ test_subpath=test_subpath,
+ )
+
+ return path[:-(len(test_subpath) + 1)]
+
+ def find_sessiondir(cwd=None, abort=False):
+ """
+ from: my_project/my_session/some/subdir
+ to: my_project/my_session
+ where: my_project/.bam/ (is the basedir)
+ """
+ session_rootdir = bam_config.find_basedir(
+ cwd=cwd,
+ test_subpath=bam_config.SESSION_FILE,
+ abort=abort,
+ descr="bam session"
+ )
+
+ if session_rootdir is not None:
+ return session_rootdir[:-len(bam_config.SESSION_FILE)]
+ else:
+ if abort:
+ if not os.path.isdir(session_rootdir):
+ fatal("Expected a directory (%r)" % session_rootdir)
+ return None
+
+ @staticmethod
+ def load(id_="config", cwd=None, abort=False):
+ filepath = bam_config.find_basedir(
+ cwd=cwd,
+ path_suffix=id_,
+ descr="bam repository",
+ )
+ if abort is True:
+ if filepath is None:
+ fatal("Not a bam repository (or any of the parent directories): .bam")
+
+ with open(filepath, 'r') as f:
+ return json.load(f)
+
+ @staticmethod
+ def write(id_="config", data=None, cwd=None):
+ filepath = bam_config.find_basedir(
+ cwd=cwd,
+ path_suffix=id_,
+ descr="bam repository",
+ )
+
+ from bam.utils.system import write_json_to_file
+ write_json_to_file(filepath, data)
+
+ @staticmethod
+ def write_bamignore(cwd=None):
+ path = bam_config.find_rootdir(cwd=cwd)
+ if path:
+ filepath = os.path.join(path, ".bamignore")
+ with open(filepath, 'w') as f:
+ f.write(r".*\.blend\d+$")
+
+ @staticmethod
+ def create_bamignore_filter(id_=".bamignore", cwd=None):
+ path = bam_config.find_rootdir()
+ bamignore = os.path.join(path, id_)
+ if os.path.isfile(bamignore):
+ with open(bamignore, 'r', encoding='utf-8') as f:
+ compiled_patterns = []
+
+ import re
+ for i, l in enumerate(f):
+ l = l.rstrip()
+ if l:
+ try:
+ p = re.compile(l)
+ except re.error as e:
+ fatal("%s:%d file contains an invalid regular expression, %s" %
+ (bamignore, i + 1, str(e)))
+ compiled_patterns.append(p)
+
+ if compiled_patterns:
+ def filter_ignore(f):
+ for pattern in filter_ignore.compiled_patterns:
+ if re.match(pattern, f):
+ return False
+ return True
+ filter_ignore.compiled_patterns = compiled_patterns
+
+ return filter_ignore
+
+ return None
+
+
+class bam_session:
+ # fake module
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ def session_path_to_cache(
+ path,
+ cachedir=None,
+ session_rootdir=None,
+ paths_remap_relbase=None,
+ abort=True):
+ """
+ Given an absolute path, give us the cache-path on disk.
+ """
+
+ if session_rootdir is None:
+ session_rootdir = bam_config.find_sessiondir(path, abort=abort)
+
+ if paths_remap_relbase is None:
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as fp:
+ paths_remap = json.load(fp)
+ paths_remap_relbase = paths_remap.get(".", "")
+ del fp, paths_remap
+
+ cachedir = os.path.join(bam_config.find_rootdir(cwd=session_rootdir, abort=True), ".cache")
+ path_rel = os.path.relpath(path, session_rootdir)
+ if path_rel[0] == "_":
+ path_cache = os.path.join(cachedir, path_rel[1:])
+ else:
+ path_cache = os.path.join(cachedir, paths_remap_relbase, path_rel)
+ path_cache = os.path.normpath(path_cache)
+ return path_cache
+
+ @staticmethod
+ def request_url(req_path):
+ cfg = bam_config.load()
+ result = "%s/%s" % (cfg['url'], req_path)
+ return result
+
+ @staticmethod
+ def status(session_rootdir,
+ paths_uuid_update=None):
+
+ paths_add = {}
+ paths_remove = {}
+ paths_modified = {}
+
+ from bam.utils.system import uuid_from_file
+
+ session_rootdir = os.path.abspath(session_rootdir)
+
+ # don't commit metadata
+ paths_used = {
+ os.path.join(session_rootdir, ".bam_paths_uuid.json"),
+ os.path.join(session_rootdir, ".bam_paths_remap.json"),
+ os.path.join(session_rootdir, ".bam_deps_remap.json"),
+ os.path.join(session_rootdir, ".bam_paths_edit.data"),
+ os.path.join(session_rootdir, ".bam_tmp.zip"),
+ }
+
+ paths_uuid = bam_session.load_paths_uuid(session_rootdir)
+
+ for f_rel, sha1 in paths_uuid.items():
+ f_abs = os.path.join(session_rootdir, f_rel)
+ if os.path.exists(f_abs):
+ sha1_modified = uuid_from_file(f_abs)
+ if sha1_modified != sha1:
+ paths_modified[f_rel] = f_abs
+ if paths_uuid_update is not None:
+ paths_uuid_update[f_rel] = sha1_modified
+ paths_used.add(f_abs)
+ else:
+ paths_remove[f_rel] = f_abs
+
+ # ----
+ # find new files
+ def iter_files(path, filename_check=None):
+ for dirpath, dirnames, filenames in os.walk(path):
+
+ # skip '.svn'
+ if dirpath.startswith(".") and dirpath != ".":
+ continue
+
+ for filename in filenames:
+ filepath = os.path.join(dirpath, filename)
+ if filename_check is None or filename_check(filepath):
+ yield filepath
+
+ bamignore_filter = bam_config.create_bamignore_filter()
+
+ for f_abs in iter_files(session_rootdir, bamignore_filter):
+ if f_abs not in paths_used:
+ # we should be clever - add the file to a useful location based on some rules
+ # (category, filetype & tags?)
+
+ f_rel = os.path.relpath(f_abs, session_rootdir)
+
+ paths_add[f_rel] = f_abs
+
+ if paths_uuid_update is not None:
+ paths_uuid_update[f_rel] = uuid_from_file(f_abs)
+
+ return paths_add, paths_remove, paths_modified
+
+ @staticmethod
+ def load_paths_uuid(session_rootdir):
+ with open(os.path.join(session_rootdir, ".bam_paths_uuid.json")) as f:
+ return json.load(f)
+
+ @staticmethod
+ def is_dirty(session_rootdir):
+ paths_add, paths_remove, paths_modified = bam_session.status(session_rootdir)
+ return any((paths_add, paths_modified, paths_remove))
+
+ @staticmethod
+ def binary_edits_apply_single(
+ blendfile_abs, # str
+ blendfile, # bytes
+ binary_edits,
+ session_rootdir,
+ paths_uuid_update=None,
+ ):
+
+ sys.stdout.write(" operating on: %r\n" % blendfile_abs)
+ sys.stdout.flush()
+ # we don't want to read, just edit whats there.
+ with open(blendfile_abs, 'rb+') as fh_blend:
+ for ofs, data in binary_edits:
+ # sys.stdout.write("\n%r\n" % data)
+ sys.stdout.flush()
+ # ensure we're writing to the correct location.
+ # fh_blend.seek(ofs)
+ # sys.stdout.write(repr(b'existing data: ' + fh_blend.read(len(data) + 1)))
+ fh_blend.seek(ofs)
+ fh_blend.write(data)
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+
+ if paths_uuid_update is not None:
+ # update hash!
+ # we could do later, but the file is fresh in cache, so do now
+ from bam.utils.system import uuid_from_file
+ f_rel = os.path.relpath(blendfile_abs, session_rootdir)
+ paths_uuid_update[f_rel] = uuid_from_file(blendfile_abs)
+ del uuid_from_file
+
+ @staticmethod
+ def binary_edits_apply_all(
+ session_rootdir,
+ # collection of local paths or None (to apply all binary edits)
+ paths=None,
+ update_uuid=False,
+ ):
+
+ # sanity check
+ if paths is not None:
+ for path in paths:
+ assert(type(path) is bytes)
+ assert(not os.path.isabs(path))
+ assert(os.path.exists(os.path.join(session_rootdir, path.decode('utf-8'))))
+
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as fp:
+ paths_remap = json.load(fp)
+ paths_remap_relbase = paths_remap.get(".", "")
+ paths_remap_reverse = {v: k for k, v in paths_remap.items()}
+ del paths_remap
+
+ with open(os.path.join(session_rootdir, ".bam_paths_edit.data"), 'rb') as fh:
+ import pickle
+ binary_edits_all = pickle.load(fh)
+ paths_uuid_update = {} if update_uuid else None
+ for blendfile, binary_edits in binary_edits_all.items():
+ if binary_edits:
+ if paths is not None and blendfile not in paths:
+ continue
+
+ # get the absolute path as it is in the main repo
+ # then remap back to our local checkout
+ blendfile_abs_remote = os.path.normpath(os.path.join(paths_remap_relbase, blendfile.decode('utf-8')))
+ blendfile_abs = os.path.join(session_rootdir, paths_remap_reverse[blendfile_abs_remote])
+
+ bam_session.binary_edits_apply_single(
+ blendfile_abs,
+ blendfile,
+ binary_edits,
+ session_rootdir,
+ paths_uuid_update,
+ )
+ del pickle
+ del binary_edits_all
+
+ if update_uuid and paths_uuid_update:
+ # freshen the UUID's based on the replayed binary_edits
+ from bam.utils.system import write_json_to_file
+ paths_uuid = bam_session.load_paths_uuid(session_rootdir)
+ paths_uuid.update(paths_uuid_update)
+ write_json_to_file(os.path.join(session_rootdir, ".bam_paths_uuid.json"), paths_uuid)
+ del write_json_to_file
+ del paths_uuid
+
+ @staticmethod
+ def binary_edits_update_single(
+ blendfile_abs,
+ binary_edits,
+ # callback, takes a filepath
+ remap_filepath_cb,
+ ):
+ """
+ After committing a blend file, we need to re-create the binary edits.
+ """
+ from bam.blend import blendfile_path_walker
+ for fp, (rootdir, fp_blend_basename) in blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_abs,
+ readonly=True,
+ recursive=False,
+ ):
+ f_rel_orig = fp.filepath
+ f_rel = remap_filepath_cb(f_rel_orig)
+ fp.filepath_assign_edits(f_rel, binary_edits)
+
+
+class bam_commands:
+ """
+ Sub-commands from the command-line map directly to these methods.
+ """
+ # fake module
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise RuntimeError("%s should not be instantiated" % cls)
+
+ @staticmethod
+ def init(url, directory_name=None):
+ import urllib.parse
+
+ if "@" in url:
+ # first & last :)
+ username, url = url.rpartition('@')[0::2]
+ else:
+ import getpass
+ username = getpass.getuser()
+ print("Using username:", username)
+ del getpass
+
+ parsed_url = urllib.parse.urlsplit(url)
+
+ proj_dirname = os.path.basename(parsed_url.path)
+ if directory_name:
+ proj_dirname = directory_name
+ proj_dirname_abs = os.path.join(os.getcwd(), proj_dirname)
+
+ if os.path.exists(proj_dirname_abs):
+ fatal("Cannot create project %r already exists" % proj_dirname_abs)
+
+ # Create the project directory inside the current directory
+ os.mkdir(proj_dirname_abs)
+ # Create the .bam directory
+ bam_basedir = os.path.join(proj_dirname_abs, bam_config.CONFIG_DIR)
+ os.mkdir(bam_basedir)
+
+ # Add a config file with project url, username and password
+ bam_config.write(
+ data={
+ "url": url,
+ "user": username,
+ "password": "",
+ "config_version": 1
+ },
+ cwd=proj_dirname_abs)
+
+ # Create the default .bamignore
+ # TODO (fsiddi) get this data from the project config on the server
+ bam_config.write_bamignore(cwd=proj_dirname_abs)
+
+ print("Project %r initialized" % proj_dirname)
+
+ @staticmethod
+ def create(session_name):
+ rootdir = bam_config.find_rootdir(abort=True)
+
+ session_rootdir = os.path.join(rootdir, session_name)
+
+ if os.path.exists(session_rootdir):
+ fatal("session path exists %r" % session_rootdir)
+ if rootdir != bam_config.find_rootdir(cwd=session_rootdir):
+ fatal("session is located outside %r" % rootdir)
+
+ def write_empty(f, data):
+ with open(os.path.join(session_rootdir, f), 'wb') as f:
+ f.write(data)
+
+ os.makedirs(session_rootdir)
+
+ write_empty(".bam_paths_uuid.json", b'{}')
+ write_empty(".bam_paths_remap.json", b'{}')
+ write_empty(".bam_deps_remap.json", b'{}')
+
+ print("Session %r created" % session_name)
+
+ @staticmethod
+ def checkout(
+ path,
+ output_dir=None,
+ session_rootdir_partial=None,
+ all_deps=False,
+ ):
+
+ # ---------
+ # constants
+ CHUNK_SIZE = 1024
+
+ cfg = bam_config.load(abort=True)
+
+ if output_dir is None:
+ # fallback to the basename
+ session_rootdir = os.path.splitext(os.path.basename(path))[0]
+ else:
+ output_dir = os.path.realpath(output_dir)
+ if os.sep in output_dir.rstrip(os.sep):
+ # are we a subdirectory?
+ # (we know this exists, since we have config already)
+ project_rootdir = bam_config.find_rootdir(abort=True)
+ if ".." in os.path.relpath(output_dir, project_rootdir).split(os.sep):
+ fatal("Output %r is outside the project path %r" % (output_dir, project_rootdir))
+ del project_rootdir
+ session_rootdir = output_dir
+ del output_dir
+
+ if bam_config.find_sessiondir(cwd=session_rootdir):
+ fatal("Can't checkout in existing session. Use update.")
+
+ payload = {
+ "filepath": path,
+ "command": "checkout",
+ "arguments": json.dumps({
+ "all_deps": all_deps,
+ }),
+ }
+
+ # --------------------------------------------------------------------
+ # First request we simply get a list of files to download
+ #
+ import requests
+ r = requests.get(
+ bam_session.request_url("file"),
+ params=payload,
+ auth=(cfg['user'], cfg['password']),
+ stream=True,
+ )
+
+ if r.status_code not in {200, }:
+ # TODO(cam), make into reusable function?
+ print("Error %d:\n%s" % (r.status_code, next(r.iter_content(chunk_size=1024)).decode('utf-8')))
+ return
+
+ # TODO(cam) how to tell if we get back a message payload? or real data???
+ dst_dir_data = payload['filepath'].split('/')[-1]
+
+ if 1:
+ dst_dir_data += ".zip"
+
+ with open(dst_dir_data, 'wb') as f:
+ import struct
+ ID_MESSAGE = 1
+ ID_PAYLOAD = 2
+ head = r.raw.read(4)
+ if head != b'BAM\0':
+ fatal("bad header from server")
+
+ while True:
+ msg_type, msg_size = struct.unpack("<II", r.raw.read(8))
+ if msg_type == ID_MESSAGE:
+ sys.stdout.write(r.raw.read(msg_size).decode('utf-8'))
+ sys.stdout.flush()
+ elif msg_type == ID_PAYLOAD:
+ # payload
+ break
+
+ tot_size = 0
+ for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
+ if chunk: # filter out keep-alive new chunks
+ tot_size += len(chunk)
+ f.write(chunk)
+ f.flush()
+
+ sys.stdout.write("\rdownload: [%03d%%]" % ((100 * tot_size) // msg_size))
+ sys.stdout.flush()
+ del struct
+
+ # ---------------
+ # extract the zip
+ import zipfile
+ with open(dst_dir_data, 'rb') as zip_file:
+ zip_handle = zipfile.ZipFile(zip_file)
+ zip_handle.extractall(session_rootdir)
+ del zipfile, zip_file
+
+ os.remove(dst_dir_data)
+ sys.stdout.write("\nwritten: %r\n" % session_rootdir)
+
+ # ----
+ # Update cache
+ cachedir = os.path.join(bam_config.find_rootdir(cwd=session_rootdir, abort=True), ".cache")
+ # os.makedirs(cachedir, exist_ok=True)
+
+ # --------------------------------------------------------------------
+ # Second request we simply download the files..
+ #
+ # which we don't have in cache,
+ # note that its possible we have all in cache and don't need to make a second request.
+ files = []
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as fp:
+ from bam.utils.system import uuid_from_file
+ paths_remap = json.load(fp)
+
+ paths_uuid = bam_session.load_paths_uuid(session_rootdir)
+
+ for f_src, f_dst in paths_remap.items():
+ if f_src == ".":
+ continue
+
+ uuid = paths_uuid.get(f_src)
+ if uuid is not None:
+ f_dst_abs = os.path.join(cachedir, f_dst)
+ if os.path.exists(f_dst_abs):
+ # check if we need to download this file?
+ uuid_exists = uuid_from_file(f_dst_abs)
+ assert(type(uuid) is type(uuid_exists))
+ if uuid == uuid_exists:
+ continue
+
+ files.append(f_dst)
+
+ del uuid_from_file
+
+ if files:
+ payload = {
+ "command": "checkout_download",
+ "arguments": json.dumps({
+ "files": files,
+ }),
+ }
+ import requests
+ r = requests.get(
+ bam_session.request_url("file"),
+ params=payload,
+ auth=(cfg['user'], cfg['password']),
+ stream=True,
+ )
+
+ if r.status_code not in {200, }:
+ # TODO(cam), make into reusable function?
+ print("Error %d:\n%s" % (r.status_code, next(r.iter_content(chunk_size=1024)).decode('utf-8')))
+ return
+
+ # TODO(cam) how to tell if we get back a message payload? or real data???
+ # needed so we don't read past buffer bounds
+ def iter_content_size(r, size, chunk_size=CHUNK_SIZE):
+ while size >= chunk_size:
+ size -= chunk_size
+ yield r.raw.read(chunk_size)
+ if size:
+ yield r.raw.read(size)
+
+
+ import struct
+ ID_MESSAGE = 1
+ ID_PAYLOAD = 2
+ ID_PAYLOAD_APPEND = 3
+ ID_PAYLOAD_EMPTY = 4
+ ID_DONE = 5
+ head = r.raw.read(4)
+ if head != b'BAM\0':
+ fatal("bad header from server")
+
+ file_index = 0
+ is_header_read = True
+ while True:
+ if is_header_read:
+ msg_type, msg_size = struct.unpack("<II", r.raw.read(8))
+ else:
+ is_header_read = True
+
+ if msg_type == ID_MESSAGE:
+ sys.stdout.write(r.raw.read(msg_size).decode('utf-8'))
+ sys.stdout.flush()
+ elif msg_type == ID_PAYLOAD_EMPTY:
+ file_index += 1
+ elif msg_type == ID_PAYLOAD:
+ f_rel = files[file_index]
+ f_abs = os.path.join(cachedir, files[file_index])
+ file_index += 1
+
+ # server also prints... we could do this a bit different...
+ sys.stdout.write("file: %r" % f_rel)
+ sys.stdout.flush()
+
+ os.makedirs(os.path.dirname(f_abs), exist_ok=True)
+
+ with open(f_abs, "wb") as f:
+ while True:
+ tot_size = 0
+ # No need to worry about filling memory,
+ # total chunk size is capped by the server
+ chunks = []
+ # for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
+ for chunk in iter_content_size(r, msg_size, chunk_size=CHUNK_SIZE):
+ if chunk: # filter out keep-alive new chunks
+ tot_size += len(chunk)
+ # f.write(chunk)
+ # f.flush()
+ chunks.append(chunk)
+
+ sys.stdout.write("\rdownload: [%03d%%]" % ((100 * tot_size) // msg_size))
+ sys.stdout.flush()
+ assert(tot_size == msg_size)
+
+ # decompress all chunks
+ import lzma
+ f.write(lzma.decompress(b''.join(chunks)))
+ f.flush()
+ del chunks
+
+ # take care! - re-reading the next header to see if
+ # we're appending to this file or not
+ msg_type, msg_size = struct.unpack("<II", r.raw.read(8))
+ if msg_type == ID_PAYLOAD_APPEND:
+ continue
+ # otherwise continue the outer loop, without re-reading the header
+
+ # don't re-read the header next iteration
+ is_header_read = False
+ break
+
+ elif msg_type == ID_DONE:
+ break
+ elif msg_type == ID_PAYLOAD_APPEND:
+ # Should only handle in a read-loop above
+ raise Exception("Invalid state for message-type %d" % msg_type)
+ else:
+ raise Exception("Unknown message-type %d" % msg_type)
+ del struct
+
+
+ del files
+
+ # ------------
+ # Update Cache
+ #
+ # TODO, remove stale cache
+ # we need this to map to project level paths
+ #
+ # Copy cache into our session before applying binary edits.
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as fp:
+ paths_remap = json.load(fp)
+ for f_dst, f_src in paths_remap.items():
+ if f_dst == ".":
+ continue
+
+ f_src_abs = os.path.join(cachedir, f_src)
+
+ # this should 'almost' always be true
+ if os.path.exists(f_src_abs):
+
+ f_dst_abs = os.path.join(session_rootdir, f_dst)
+ os.makedirs(os.path.dirname(f_dst_abs), exist_ok=True)
+
+ import shutil
+ # print("from ", f_dst_abs, os.path.exists(f_dst_abs))
+ # print("to ", f_src_abs, os.path.exists(f_src_abs))
+ # print("CREATING: ", f_src_abs)
+ shutil.copyfile(f_src_abs, f_dst_abs)
+ del shutil
+ # import time
+ # time.sleep(10000)
+
+ del paths_remap, cachedir
+ # ...done updating cache
+ # ----------------------
+
+ # -------------------
+ # replay binary edits
+ #
+ # We've downloaded the files pristine from their repo.
+ # This means we can use local cache and avoid re-downloading.
+ #
+ # But for files to work locally we have to apply binary edits given to us by the server.
+
+ sys.stdout.write("replaying edits...\n")
+ bam_session.binary_edits_apply_all(session_rootdir, paths=None, update_uuid=True)
+
+ # ...done with binary edits
+ # -------------------------
+
+ @staticmethod
+ def update(paths):
+ # Load project configuration
+ # cfg = bam_config.load(abort=True)
+
+ # TODO(cam) multiple paths
+ session_rootdir = bam_config.find_sessiondir(paths[0], abort=True)
+ # so as to avoid off-by-one errors string mangling
+ session_rootdir = session_rootdir.rstrip(os.sep)
+
+ paths_uuid = bam_session.load_paths_uuid(session_rootdir)
+
+ if not paths_uuid:
+ print("Nothing to update!")
+ return
+
+ if bam_session.is_dirty(session_rootdir):
+ fatal("Local changes detected, commit before checking out!")
+
+ # -------------------------------------------------------------------------------
+ # TODO(cam) don't guess this important info
+ files = [f for f in os.listdir(session_rootdir) if not f.startswith(".")]
+ files_blend = [f for f in files if f.endswith(".blend")]
+ if files_blend:
+ f = files_blend[0]
+ else:
+ f = files[0]
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as fp:
+ paths_remap = json.load(fp)
+ paths_remap_relbase = paths_remap.get(".", "")
+ path = os.path.join(paths_remap_relbase, f)
+ # -------------------------------------------------------------------------------
+
+ # merge sessions
+ session_tmp = session_rootdir + ".tmp"
+ bam_commands.checkout(
+ path,
+ output_dir=session_tmp,
+ session_rootdir_partial=session_rootdir,
+ )
+
+ for dirpath, dirnames, filenames in os.walk(session_tmp):
+ for filename in filenames:
+ filepath = os.path.join(dirpath, filename)
+ f_src = filepath
+ f_dst = session_rootdir + filepath[len(session_tmp):]
+ os.rename(f_src, f_dst)
+ import shutil
+ shutil.rmtree(session_tmp)
+
+ @staticmethod
+ def revert(paths):
+ # Copy files back from the cache
+ # a relatively lightweight operation
+
+ def _get_from_path(session_rootdir, cachedir, paths_remap, path_abs):
+ print("====================")
+ print(path_abs)
+ path_abs = os.path.normpath(path_abs)
+ print(paths_remap)
+ for f_src, f_dst in paths_remap.items():
+ if f_src == ".":
+ continue
+ print("-----------------")
+ f_src_abs = os.path.join(session_rootdir, f_src)
+ #if os.path.samefile(f_src_abs, path_abs):
+ print(f_src_abs)
+ print(f_src)
+ print(f_dst)
+ if f_src_abs == path_abs:
+ f_dst_abs = os.path.join(cachedir, f_dst)
+ return f_src, f_src_abs, f_dst_abs
+ return None, None, None
+
+ # 2 passes, once to check, another to execute
+ for pass_ in range(2):
+ for path in paths:
+ path = os.path.normpath(os.path.abspath(path))
+ if os.path.isdir(path):
+ fatal("Reverting a directory not yet supported (%r)" % path)
+
+ # possible we try revert different session's files
+ session_rootdir = bam_config.find_sessiondir(path, abort=True)
+ cachedir = os.path.join(bam_config.find_rootdir(cwd=session_rootdir, abort=True), ".cache")
+ if not os.path.exists(cachedir):
+ fatal("Local cache missing (%r)" %
+ cachedir)
+
+ path_rel = os.path.relpath(path, session_rootdir)
+
+ with open(os.path.join(session_rootdir, ".bam_paths_uuid.json")) as fp:
+ paths_uuid = json.load(fp)
+ if paths_uuid.get(path_rel) is None:
+ fatal("Given path isn't in the session, skipping (%s)" %
+ path_abs)
+
+ # first pass is sanity check only
+ if pass_ == 0:
+ continue
+
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as fp:
+ paths_remap = json.load(fp)
+ paths_remap_relbase = paths_remap.get(".", "")
+ del fp, paths_remap
+
+ path_cache = bam_session.session_path_to_cache(
+ path,
+ cachedir=cachedir,
+ session_rootdir=session_rootdir,
+ paths_remap_relbase=paths_remap_relbase,
+ )
+
+ if not os.path.exists(path_cache):
+ fatal("Given path missing cache disk (%s)" %
+ path_cache)
+
+ if pass_ == 1:
+ # for real
+ print(" Reverting %r" % path)
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ import shutil
+ shutil.copyfile(path_cache, path)
+
+ bam_session.binary_edits_apply_all(
+ session_rootdir,
+ paths={path_rel.encode('utf-8')},
+ update_uuid=False,
+ )
+
+ @staticmethod
+ def commit(paths, message):
+ from bam.utils.system import write_json_to_file, write_json_to_zip
+ import requests
+
+ # Load project configuration
+ cfg = bam_config.load(abort=True)
+
+ session_rootdir = bam_config.find_sessiondir(paths[0], abort=True)
+
+ cachedir = os.path.join(bam_config.find_rootdir(cwd=session_rootdir, abort=True), ".cache")
+ basedir = bam_config.find_basedir(
+ cwd=session_rootdir,
+ descr="bam repository",
+ )
+ basedir_temp = os.path.join(basedir, "tmp")
+
+ if os.path.isdir(basedir_temp):
+ fatal("Path found, "
+ "another commit in progress, or remove with path! (%r)" %
+ basedir_temp)
+
+ if not os.path.exists(os.path.join(session_rootdir, ".bam_paths_uuid.json")):
+ fatal("Path not a project session, (%r)" %
+ session_rootdir)
+
+
+ # make a zipfile from session
+ paths_uuid = bam_session.load_paths_uuid(session_rootdir)
+
+ # No longer used
+ """
+ with open(os.path.join(session_rootdir, ".bam_deps_remap.json")) as f:
+ deps_remap = json.load(f)
+ """
+
+ paths_uuid_update = {}
+
+ paths_add, paths_remove, paths_modified = bam_session.status(session_rootdir, paths_uuid_update)
+
+ if not any((paths_add, paths_modified, paths_remove)):
+ print("Nothing to commit!")
+ return
+
+ # we need to update paths_remap as we go
+ with open(os.path.join(session_rootdir, ".bam_paths_remap.json")) as f:
+ paths_remap = json.load(f)
+ paths_remap_relbase = paths_remap.get(".", "")
+ paths_remap_relbase_bytes = paths_remap_relbase.encode("utf-8")
+
+ def remap_filepath_bytes(f_rel):
+ assert(type(f_rel) is bytes)
+ f_rel_in_proj = paths_remap.get(f_rel.decode("utf-8"))
+ if f_rel_in_proj is None:
+ if paths_remap_relbase_bytes:
+ if f_rel.startswith(b'_'):
+ f_rel_in_proj = f_rel[1:]
+ else:
+ f_rel_in_proj = os.path.join(paths_remap_relbase_bytes, f_rel)
+ else:
+ if f_rel.startswith(b'_'):
+ # we're already project relative
+ f_rel_in_proj = f_rel[1:]
+ else:
+ f_rel_in_proj = f_rel
+ else:
+ f_rel_in_proj = f_rel_in_proj.encode("utf-8")
+ return f_rel_in_proj
+
+ def remap_filepath(f_rel):
+ assert(type(f_rel) is str)
+ f_rel_in_proj = paths_remap.get(f_rel)
+ if f_rel_in_proj is None:
+ if paths_remap_relbase:
+ if f_rel.startswith("_"):
+ f_rel_in_proj = f_rel[1:]
+ else:
+ f_rel_in_proj = os.path.join(paths_remap_relbase, f_rel)
+ else:
+ if f_rel.startswith("_"):
+ # we're already project relative
+ f_rel_in_proj = f_rel[1:]
+ else:
+ f_rel_in_proj = f_rel
+
+ return f_rel_in_proj
+
+ def remap_cb(f, data):
+ # check for the absolute path hint
+ if f.startswith(b'//_'):
+ proj_base_b = data
+ return b'//' + os.path.relpath(f[3:], proj_base_b)
+ return None
+
+ def remap_file(f_rel, f_abs):
+ f_abs_remap = os.path.join(basedir_temp, f_rel)
+ dir_remap = os.path.dirname(f_abs_remap)
+ os.makedirs(dir_remap, exist_ok=True)
+
+ # final location in the project
+ f_rel_in_proj = remap_filepath(f_rel)
+ proj_base_b = os.path.dirname(f_rel_in_proj).encode("utf-8")
+
+ from bam.blend import blendfile_pack_restore
+ blendfile_pack_restore.blendfile_remap(
+ f_abs.encode('utf-8'),
+ dir_remap.encode('utf-8'),
+ deps_remap_cb=remap_cb,
+ deps_remap_cb_userdata=proj_base_b,
+ )
+ return f_abs_remap
+
+ for f_rel, f_abs in list(paths_modified.items()):
+ if f_abs.endswith(".blend"):
+ f_abs_remap = remap_file(f_rel, f_abs)
+ if os.path.exists(f_abs_remap):
+ paths_modified[f_rel] = f_abs_remap
+
+ for f_rel, f_abs in list(paths_add.items()):
+ if f_abs.endswith(".blend"):
+ f_abs_remap = remap_file(f_rel, f_abs)
+ if os.path.exists(f_abs_remap):
+ paths_add[f_rel] = f_abs_remap
+
+ """
+ deps = deps_remap.get(f_rel)
+ if deps:
+ # ----
+ # remap!
+ f_abs_remap = os.path.join(basedir_temp, f_rel)
+ dir_remap = os.path.dirname(f_abs_remap)
+ os.makedirs(dir_remap, exist_ok=True)
+ import blendfile_pack_restore
+ blendfile_pack_restore.blendfile_remap(
+ f_abs.encode('utf-8'),
+ dir_remap.encode('utf-8'),
+ deps,
+ )
+ if os.path.exists(f_abs_remap):
+ f_abs = f_abs_remap
+ paths_modified[f_rel] = f_abs
+ """
+
+ # -------------------------
+ print("Now make a zipfile")
+ import zipfile
+ temp_zip = os.path.join(session_rootdir, ".bam_tmp.zip")
+ with zipfile.ZipFile(temp_zip, 'w', zipfile.ZIP_DEFLATED) as zip_handle:
+ for paths_dict, op in ((paths_modified, 'M'), (paths_add, 'A')):
+ for (f_rel, f_abs) in paths_dict.items():
+ print(" packing (%s): %r" % (op, f_abs))
+ zip_handle.write(f_abs, arcname=f_rel)
+
+ # make a paths remap that only includes modified files
+ # TODO(cam), from 'packer.py'
+
+ paths_remap_subset = {
+ f_rel: f_rel_in_proj
+ for f_rel, f_rel_in_proj in paths_remap.items() if f_rel in paths_modified}
+ paths_remap_subset.update({
+ f_rel: remap_filepath(f_rel)
+ for f_rel in paths_add})
+
+ # paths_remap_subset.update(paths_remap_subset_add)
+ write_json_to_zip(zip_handle, ".bam_paths_remap.json", paths_remap_subset)
+
+ # build a list of path manipulation operations
+ paths_ops = {}
+ # paths_remove ...
+ for f_rel, f_abs in paths_remove.items():
+ # TODO
+ f_abs_remote = paths_remap[f_rel]
+ paths_ops[f_abs_remote] = 'D'
+
+ write_json_to_zip(zip_handle, ".bam_paths_ops.json", paths_ops)
+ log.debug(paths_ops)
+
+
+ # --------------
+ # Commit Request
+ payload = {
+ "command": "commit",
+ "arguments": json.dumps({
+ 'message': message,
+ }),
+ }
+ files = {
+ "file": open(temp_zip, 'rb'),
+ }
+
+ with files["file"]:
+ r = requests.put(
+ bam_session.request_url("file"),
+ params=payload,
+ auth=(cfg["user"], cfg["password"]),
+ files=files)
+
+ os.remove(temp_zip)
+
+ try:
+ r_json = r.json()
+ print(r_json.get("message", "<empty>"))
+ except Exception:
+ print(r.text)
+
+ # TODO, handle error cases
+ ok = True
+ if ok:
+
+ # ----------
+ # paths_uuid
+ paths_uuid.update(paths_uuid_update)
+ write_json_to_file(os.path.join(session_rootdir, ".bam_paths_uuid.json"), paths_uuid_update)
+
+ # -----------
+ # paths_remap
+ paths_remap.update(paths_remap_subset)
+ for k in paths_remove:
+ del paths_remap[k]
+ write_json_to_file(os.path.join(session_rootdir, ".bam_paths_remap.json"), paths_remap)
+ del write_json_to_file
+
+ # ------------------
+ # Update Local Cache
+ #
+ # We now have 'pristine' files in basedir_temp, the commit went fine.
+ # So move these into local cache AND we have to remake the binary_edit data.
+ # since files were modified, if we don't do this - we wont be able to revert or avoid
+ # re-downloading the files later.
+ binary_edits_all_update = {}
+ binary_edits_all_remove = set()
+ for paths_dict, op in ((paths_modified, 'M'), (paths_add, 'A')):
+ for f_rel, f_abs in paths_dict.items():
+ print(" caching (%s): %r" % (op, f_abs))
+ f_dst_abs = os.path.join(cachedir, f_rel)
+ os.makedirs(os.path.dirname(f_dst_abs), exist_ok=True)
+ if f_abs.startswith(basedir_temp):
+ os.rename(f_abs, f_dst_abs)
+ else:
+ import shutil
+ shutil.copyfile(f_abs, f_dst_abs)
+ del shutil
+ binary_edits = binary_edits_all_update[f_rel.encode('utf-8')] = []
+
+ # update binary_edits
+ if f_rel.endswith(".blend"):
+ bam_session.binary_edits_update_single(
+ f_dst_abs,
+ binary_edits,
+ remap_filepath_cb=remap_filepath_bytes,
+ )
+ for f_rel, f_abs in paths_remove.items():
+ binary_edits_all_remove.add(f_rel)
+
+ paths_edit_abs = os.path.join(session_rootdir, ".bam_paths_edit.data")
+ if binary_edits_all_update or binary_edits_all_remove:
+ if os.path.exists(paths_edit_abs):
+ with open(paths_edit_abs, 'rb') as fh:
+ import pickle
+ binary_edits_all = pickle.load(fh)
+ del pickle
+ else:
+ binary_edits_all = {}
+
+ if binary_edits_all_remove and binary_edits_all:
+ for f_rel in binary_edits_all_remove:
+ if f_rel in binary_edits_all:
+ try:
+ del binary_edits_all[f_rel]
+ except KeyError:
+ pass
+ if binary_edits_all_update:
+ binary_edits_all.update(binary_edits_all_update)
+
+ import pickle
+ with open(paths_edit_abs, 'wb') as fh:
+ print()
+ pickle.dump(binary_edits_all, fh, pickle.HIGHEST_PROTOCOL)
+ del binary_edits_all
+ del paths_edit_abs
+ del pickle
+
+ # ------------------------------
+ # Cleanup temp dir to finish off
+ if os.path.exists(basedir_temp):
+ import shutil
+ shutil.rmtree(basedir_temp)
+ del shutil
+
+ @staticmethod
+ def status(paths, use_json=False):
+ # TODO(cam) multiple paths
+ path = paths[0]
+ del paths
+
+ session_rootdir = bam_config.find_sessiondir(path, abort=True)
+ paths_add, paths_remove, paths_modified = bam_session.status(session_rootdir)
+
+ if not use_json:
+ for f in sorted(paths_add):
+ print(" A: %s" % f)
+ for f in sorted(paths_modified):
+ print(" M: %s" % f)
+ for f in sorted(paths_remove):
+ print(" D: %s" % f)
+ else:
+ ret = []
+ for f in sorted(paths_add):
+ ret.append(("A", f))
+ for f in sorted(paths_modified):
+ ret.append(("M", f))
+ for f in sorted(paths_remove):
+ ret.append(("D", f))
+
+ print(json.dumps(ret))
+
+ @staticmethod
+ def list_dir(paths, use_full=False, use_json=False):
+ import requests
+
+ # Load project configuration
+ cfg = bam_config.load(abort=True)
+
+ # TODO(cam) multiple paths
+ path = paths[0]
+ del paths
+
+ payload = {
+ "path": path,
+ }
+ r = requests.get(
+ bam_session.request_url("file_list"),
+ params=payload,
+ auth=(cfg['user'], cfg['password']),
+ stream=True,
+ )
+
+ r_json = r.json()
+ items = r_json.get("items_list")
+ if items is None:
+ fatal(r_json.get("message", "<empty>"))
+
+ items.sort()
+
+ if use_json:
+ ret = []
+ for (name_short, name_full, file_type) in items:
+ ret.append((name_short, file_type))
+
+ print(json.dumps(ret))
+ else:
+ def strip_dot_slash(f):
+ return f[2:] if f.startswith("./") else f
+
+ for (name_short, name_full, file_type) in items:
+ if file_type == "dir":
+ print(" %s/" % (strip_dot_slash(name_full) if use_full else name_short))
+ for (name_short, name_full, file_type) in items:
+ if file_type != "dir":
+ print(" %s" % (strip_dot_slash(name_full) if use_full else name_short))
+
+ @staticmethod
+ def deps(paths, recursive=False, use_json=False):
+
+ def deps_path_walker():
+ from bam.blend import blendfile_path_walker
+ for blendfile_src in paths:
+ blendfile_src = blendfile_src.encode('utf-8')
+ yield from blendfile_path_walker.FilePath.visit_from_blend(
+ blendfile_src,
+ readonly=True,
+ recursive=recursive,
+ )
+
+ def status_walker():
+ for fp, (rootdir, fp_blend_basename) in deps_path_walker():
+ f_rel = fp.filepath
+ f_abs = fp.filepath_absolute
+
+ yield (
+ # blendfile-src
+ os.path.join(fp.basedir, fp_blend_basename).decode('utf-8'),
+ # fillepath-dst
+ f_rel.decode('utf-8'),
+ f_abs.decode('utf-8'),
+ # filepath-status
+ "OK" if os.path.exists(f_abs) else "MISSING FILE",
+ )
+
+ if use_json:
+ is_first = True
+ # print in parts, so we don't block the output
+ print("[")
+ for f_src, f_dst, f_dst_abs, f_status in status_walker():
+ if is_first:
+ is_first = False
+ else:
+ print(",")
+
+ print(json.dumps((f_src, f_dst, f_dst_abs, f_status)), end="")
+ print("]")
+ else:
+ for f_src, f_dst, f_dst_abs, f_status in status_walker():
+ print(" %r -> (%r = %r) %s" % (f_src, f_dst, f_dst_abs, f_status))
+
+ @staticmethod
+ def pack(
+ paths,
+ output,
+ mode,
+ repository_base_path=None,
+ all_deps=False,
+ use_quiet=False,
+ warn_remap_externals=False,
+ compress_level=-1,
+ filename_filter=None,
+ ):
+ # Local packing (don't use any project/session stuff)
+ from .blend import blendfile_pack
+
+ # TODO(cam) multiple paths
+ path = paths[0]
+ del paths
+
+ if output is None:
+ fatal("Output path must be given when packing with: --mode=FILE")
+
+ if os.path.isdir(output):
+ if mode == "ZIP":
+ output = os.path.join(output, os.path.splitext(path)[0] + ".zip")
+ else: # FILE
+ output = os.path.join(output, os.path.basename(path))
+
+ if use_quiet:
+ report = lambda msg: None
+ else:
+ report = lambda msg: print(msg, end="")
+
+ if repository_base_path is not None:
+ repository_base_path = repository_base_path.encode('utf-8')
+
+ # replace var with a pattern matching callback
+ filename_filter_cb = blendfile_pack.exclusion_filter(filename_filter)
+
+ for msg in blendfile_pack.pack(
+ path.encode('utf-8'),
+ output.encode('utf-8'),
+ mode=mode,
+ all_deps=all_deps,
+ repository_base_path=repository_base_path,
+ compress_level=compress_level,
+ report=report,
+ warn_remap_externals=warn_remap_externals,
+ use_variations=True,
+ filename_filter=filename_filter_cb,
+ ):
+ pass
+
+ @staticmethod
+ def copy(
+ paths,
+ output,
+ base,
+ all_deps=False,
+ use_quiet=False,
+ filename_filter=None,
+ ):
+ # Local packing (don't use any project/session stuff)
+ from .blend import blendfile_copy
+ from bam.utils.system import is_subdir
+
+ paths = [os.path.abspath(path) for path in paths]
+ base = os.path.abspath(base)
+ output = os.path.abspath(output)
+
+ # check all blends are in the base path
+ for path in paths:
+ if not is_subdir(path, base):
+ fatal("Input blend file %r is not a sub directory of %r" % (path, base))
+
+ if use_quiet:
+ report = lambda msg: None
+ else:
+ report = lambda msg: print(msg, end="")
+
+ # replace var with a pattern matching callback
+ if filename_filter:
+ # convert string into regex callback
+ # "*.txt;*.png;*.rst" --> r".*\.txt$|.*\.png$|.*\.rst$"
+ import re
+ import fnmatch
+
+ compiled_pattern = re.compile(
+ b'|'.join(fnmatch.translate(f).encode('utf-8')
+ for f in filename_filter.split(";") if f),
+ re.IGNORECASE,
+ )
+
+ def filename_filter(f):
+ return (not filename_filter.compiled_pattern.match(f))
+ filename_filter.compiled_pattern = compiled_pattern
+
+ del compiled_pattern
+ del re, fnmatch
+
+ for msg in blendfile_copy.copy_paths(
+ [path.encode('utf-8') for path in paths],
+ output.encode('utf-8'),
+ base.encode('utf-8'),
+ all_deps=all_deps,
+ report=report,
+ filename_filter=filename_filter,
+ ):
+ pass
+
+ @staticmethod
+ def remap_start(
+ paths,
+ use_json=False,
+ ):
+ filepath_remap = "bam_remap.data"
+
+ for p in paths:
+ if not os.path.exists(p):
+ fatal("Path %r not found!" % p)
+ paths = [p.encode('utf-8') for p in paths]
+
+ if os.path.exists(filepath_remap):
+ fatal("Remap in progress, run with 'finish' or remove %r" % filepath_remap)
+
+ from bam.blend import blendfile_path_remap
+ remap_data = blendfile_path_remap.start(
+ paths,
+ use_json=use_json,
+ )
+
+ with open(filepath_remap, 'wb') as fh:
+ import pickle
+ pickle.dump(remap_data, fh, pickle.HIGHEST_PROTOCOL)
+ del pickle
+
+ @staticmethod
+ def remap_finish(
+ paths,
+ force_relative=False,
+ dry_run=False,
+ use_json=False,
+ ):
+ filepath_remap = "bam_remap.data"
+
+ for p in paths:
+ if not os.path.exists(p):
+ fatal("Path %r not found!" % p)
+ # bytes needed for blendfile_path_remap API
+ paths = [p.encode('utf-8') for p in paths]
+
+ if not os.path.exists(filepath_remap):
+ fatal("Remap not started, run with 'start', (%r not found)" % filepath_remap)
+
+ with open(filepath_remap, 'rb') as fh:
+ import pickle
+ remap_data = pickle.load(fh)
+ del pickle
+
+ from bam.blend import blendfile_path_remap
+ blendfile_path_remap.finish(
+ paths, remap_data,
+ force_relative=force_relative,
+ dry_run=dry_run,
+ use_json=use_json,
+ )
+
+ if not dry_run:
+ os.remove(filepath_remap)
+
+ @staticmethod
+ def remap_reset(
+ use_json=False,
+ ):
+ filepath_remap = "bam_remap.data"
+ if os.path.exists(filepath_remap):
+ os.remove(filepath_remap)
+ else:
+ fatal("remapping not started, nothing to do!")
+
+
+# -----------------------------------------------------------------------------
+# Argument Parser
+
+def init_argparse_common(
+ subparse,
+ use_json=False,
+ use_all_deps=False,
+ use_quiet=False,
+ use_compress_level=False,
+ use_exclude=False,
+ ):
+ import argparse
+
+ if use_json:
+ subparse.add_argument(
+ "-j", "--json", dest="json", action='store_true',
+ help="Generate JSON output",
+ )
+ if use_all_deps:
+ subparse.add_argument(
+ "-a", "--all-deps", dest="all_deps", action='store_true',
+ help="Follow all dependencies (unused indirect dependencies too)",
+ )
+ if use_quiet:
+ subparse.add_argument(
+ "-q", "--quiet", dest="use_quiet", action='store_true',
+ help="Suppress status output",
+ )
+ if use_compress_level:
+ class ChoiceToZlibLevel(argparse.Action):
+ def __call__(self, parser, namespace, value, option_string=None):
+ setattr(namespace, self.dest, {"default": -1, "fast": 1, "best": 9, "store": 0}[value[0]])
+
+ subparse.add_argument(
+ "-c", "--compress", dest="compress_level", nargs=1, default=-1, metavar='LEVEL',
+ action=ChoiceToZlibLevel,
+ choices=('default', 'fast', 'best', 'store'),
+ help="Compression level for resulting archive",
+ )
+ if use_exclude:
+ subparse.add_argument(
+ "-e", "--exclude", dest="exclude", metavar='PATTERN(S)', required=False,
+ default="",
+ help="""
+ Optionally exclude files from the pack.
+
+ Using Unix shell-style wildcards *(case insensitive)*.
+ ``--exclude="*.png"``
+
+ Multiple patterns can be passed using the ``;`` separator.
+ ``--exclude="*.txt;*.avi;*.wav"``
+ """
+ )
+
+
+def create_argparse_init(subparsers):
+ subparse = subparsers.add_parser("init",
+ help="Initialize a new project directory")
+ subparse.add_argument(
+ dest="url",
+ help="Project repository url",
+ )
+ subparse.add_argument(
+ dest="directory_name", nargs="?",
+ help="Directory name",
+ )
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.init(args.url, args.directory_name),
+ )
+
+
+def create_argparse_create(subparsers):
+ subparse = subparsers.add_parser(
+ "create", aliases=("cr",),
+ help="Create a new empty session directory",
+ )
+ subparse.add_argument(
+ dest="session_name", nargs=1,
+ help="Name of session directory",
+ )
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.create(args.session_name[0]),
+ )
+
+
+def create_argparse_checkout(subparsers):
+ subparse = subparsers.add_parser(
+ "checkout", aliases=("co",),
+ help="Checkout a remote path in an existing project",
+ )
+ subparse.add_argument(
+ dest="path", type=str, metavar='REMOTE_PATH',
+ help="Path to checkout on the server",
+ )
+ subparse.add_argument(
+ "-o", "--output", dest="output", type=str, metavar='DIRNAME',
+ help="Local name to checkout the session into (optional, falls back to path name)",
+ )
+
+ init_argparse_common(subparse, use_all_deps=True)
+
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.checkout(args.path, args.output, args.all_deps),
+ )
+
+
+def create_argparse_update(subparsers):
+ subparse = subparsers.add_parser(
+ "update", aliases=("up",),
+ help="Update a local session with changes from the remote project",
+ )
+ subparse.add_argument(
+ dest="paths", nargs="*",
+ help="Path(s) to operate on",
+ )
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.update(args.paths or ["."]),
+ )
+
+
+def create_argparse_revert(subparsers):
+ subparse = subparsers.add_parser(
+ "revert", aliases=("rv",),
+ help="Reset local changes back to the state at time of checkout",
+ )
+ subparse.add_argument(
+ dest="paths", nargs="+",
+ help="Path(s) to operate on",
+ )
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.revert(args.paths or ["."]),
+ )
+
+
+def create_argparse_commit(subparsers):
+ subparse = subparsers.add_parser(
+ "commit", aliases=("ci",),
+ help="Commit changes from a session to the remote project",
+ )
+ subparse.add_argument(
+ "-m", "--message", dest="message", metavar='MESSAGE',
+ required=True,
+ help="Commit message",
+ )
+ subparse.add_argument(
+ dest="paths", nargs="*",
+ help="paths to commit",
+ )
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.commit(args.paths or ["."], args.message),
+ )
+
+
+def create_argparse_status(subparsers):
+ subparse = subparsers.add_parser(
+ "status", aliases=("st",),
+ help="Show any edits made in the local session",
+ )
+ subparse.add_argument(
+ dest="paths", nargs="*",
+ help="Path(s) to operate on",
+ )
+
+ init_argparse_common(subparse, use_json=True)
+
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.status(args.paths or ["."], use_json=args.json),
+ )
+
+
+def create_argparse_list(subparsers):
+ subparse = subparsers.add_parser(
+ "list", aliases=("ls",),
+ help="List the contents of a remote directory",
+ )
+ subparse.add_argument(
+ dest="paths", nargs="*",
+ help="Path(s) to operate on",
+ )
+ subparse.add_argument(
+ "-f", "--full", dest="full", action='store_true',
+ help="Show the full paths",
+ )
+
+ init_argparse_common(subparse, use_json=True)
+
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.list_dir(
+ args.paths or ["."],
+ use_full=args.full,
+ use_json=args.json,
+ ),
+ )
+
+
+def create_argparse_deps(subparsers):
+ subparse = subparsers.add_parser(
+ "deps", aliases=("dp",),
+ help="List dependencies for file(s)",
+ )
+ subparse.add_argument(
+ dest="paths", nargs="+",
+ help="Path(s) to operate on",
+ )
+ subparse.add_argument(
+ "-r", "--recursive", dest="recursive", action='store_true',
+ help="Scan dependencies recursively",
+ )
+
+ init_argparse_common(subparse, use_json=True)
+
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.deps(
+ args.paths, args.recursive,
+ use_json=args.json),
+ )
+
+
+def create_argparse_pack(subparsers):
+ import argparse
+ subparse = subparsers.add_parser(
+ "pack", aliases=("pk",),
+ help="Pack a blend file and its dependencies into an archive",
+ description=
+ """
+ You can simply pack a blend file like this to create a zip-file of the same name.
+
+ .. code-block:: sh
+
+ bam pack /path/to/scene.blend
+
+ You may also want to give an explicit output directory.
+
+ This command is used for packing a ``.blend`` file into a ``.zip`` file for redistribution.
+
+ .. code-block:: sh
+
+ # pack a blend with maximum compression for online downloads
+ bam pack /path/to/scene.blend --output my_scene.zip --compress=best
+
+ You may also pack a .blend while keeping your whole repository hierarchy by passing
+ the path to the top directory of the repository, and ask to be warned about dependencies paths
+ outside of that base path:
+
+ .. code-block:: sh
+
+ bam pack --repo="/path/to/repo" --warn-external /path/to/repo/path/to/scene.blend
+
+ """,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ subparse.add_argument(
+ dest="paths", nargs="+",
+ help="Path(s) to operate on",
+ )
+ subparse.add_argument(
+ "-o", "--output", dest="output", metavar='FILE', required=False,
+ help="Output file or a directory when multiple inputs are passed",
+ )
+ subparse.add_argument(
+ "-m", "--mode", dest="mode", metavar='MODE', required=False,
+ default='ZIP',
+ choices=('ZIP', 'FILE'),
+ help="Output file or a directory when multiple inputs are passed",
+ )
+ subparse.add_argument(
+ "--repo", dest="repository_base_path", metavar='DIR', required=False,
+ help="Base directory from which you want to keep existing hierarchy (usually to repository directory),"
+ "will default to packed blend file's directory if not specified",
+ )
+ subparse.add_argument(
+ "--warn-external", dest="warn_remap_externals", action='store_true',
+ help="Warn for every dependency outside of given repository base path",
+ )
+
+ init_argparse_common(subparse, use_all_deps=True, use_quiet=True, use_compress_level=True, use_exclude=True)
+
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.pack(
+ args.paths,
+ args.output or
+ ((os.path.splitext(args.paths[0])[0] + ".zip")
+ if args.mode == 'ZIP' else None),
+ args.mode,
+ repository_base_path=args.repository_base_path or None,
+ all_deps=args.all_deps,
+ use_quiet=args.use_quiet,
+ warn_remap_externals=args.warn_remap_externals,
+ compress_level=args.compress_level,
+ filename_filter=args.exclude,
+ ),
+ )
+
+
+def create_argparse_copy(subparsers):
+ import argparse
+ subparse = subparsers.add_parser(
+ "copy", aliases=("cp",),
+ help="Copy blend file(s) and their dependencies to a new location (maintaining the directory structure).",
+ description=
+ """
+ The line below will copy ``scene.blend`` to ``/destination/to/scene.blend``.
+
+ .. code-block:: sh
+
+ bam copy /path/to/scene.blend --base=/path --output=/destination
+
+ .. code-block:: sh
+
+ # you can also copy multiple files
+ bam copy /path/to/scene.blend /path/other/file.blend --base=/path --output /other/destination
+ """,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ subparse.add_argument(
+ dest="paths", nargs="+",
+ help="Path(s) to blend files to operate on",
+ )
+ subparse.add_argument(
+ "-o", "--output", dest="output", metavar='DIR', required=True,
+ help="Output directory where where files will be copied to",
+ )
+ subparse.add_argument(
+ "-b", "--base", dest="base", metavar='DIR', required=True,
+ help="Base directory for input paths (files outside this path will be omitted)",
+ )
+
+ init_argparse_common(subparse, use_all_deps=True, use_quiet=True, use_exclude=True)
+
+ subparse.set_defaults(
+ func=lambda args:
+ bam_commands.copy(
+ args.paths,
+ args.output,
+ args.base,
+ all_deps=args.all_deps,
+ use_quiet=args.use_quiet,
+ filename_filter=args.exclude,
+ ),
+ )
+
+
+def create_argparse_remap(subparsers):
+ import argparse
+
+ subparse = subparsers.add_parser(
+ "remap",
+ help="Remap blend file paths",
+ description=
+ """
+ This command is a 3 step process:
+
+ - first run ``bam remap start .`` which stores the current state of your project (recursively).
+ - then re-arrange the files on the filesystem (rename, relocate).
+ - finally run ``bam remap finish`` to apply the changes, updating the ``.blend`` files internal paths.
+
+
+ .. code-block:: sh
+
+ cd /my/project
+
+ bam remap start .
+ mv photos textures
+ mv house_v14_library.blend house_libraray.blend
+ bam remap finish
+
+ .. note::
+
+ Remapping creates a file called ``bam_remap.data`` in the current directory.
+ You can relocate the entire project to a new location but on executing ``finish``,
+ this file must be accessible from the current directory.
+
+ .. note::
+
+ This command depends on files unique contents,
+ take care not to modify the files once remap is started.
+ """,
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+
+ subparse_remap_commands = subparse.add_subparsers(
+ title="Remap commands",
+ description='valid subcommands',
+ help='additional help',
+ )
+ sub_subparse = subparse_remap_commands.add_parser(
+ "start",
+ help="Start remapping the blend files",
+ )
+
+ sub_subparse.add_argument(
+ dest="paths", nargs="*",
+ help="Path(s) to operate on",
+ )
+ init_argparse_common(sub_subparse, use_json=True)
+
+ sub_subparse.set_defaults(
+ func=lambda args:
+ bam_commands.remap_start(
+ args.paths or ["."],
+ use_json=args.json,
+ ),
+ )
+
+ sub_subparse = subparse_remap_commands.add_parser(
+ "finish",
+ help="Finish remapping the blend files",
+ )
+ sub_subparse.add_argument(
+ dest="paths", nargs="*",
+ help="Path(s) to operate on",
+ )
+ sub_subparse.add_argument(
+ "-r", "--force-relative", dest="force_relative", action='store_true',
+ help="Make all remapped paths relative (even if they were originally absolute)",
+ )
+ sub_subparse.add_argument(
+ "-d", "--dry-run", dest="dry_run", action='store_true',
+ help="Just print output as if the paths are being run",
+ )
+ init_argparse_common(sub_subparse, use_json=True)
+
+ sub_subparse.set_defaults(
+ func=lambda args:
+ bam_commands.remap_finish(
+ args.paths or ["."],
+ force_relative=args.force_relative,
+ dry_run=args.dry_run,
+ use_json=args.json,
+ ),
+ )
+
+ sub_subparse = subparse_remap_commands.add_parser(
+ "reset",
+ help="Cancel path remapping",
+ )
+ init_argparse_common(sub_subparse, use_json=True)
+
+ sub_subparse.set_defaults(
+ func=lambda args:
+ bam_commands.remap_reset(
+ use_json=args.json,
+ ),
+ )
+
+
+def create_argparse():
+ import argparse
+
+ usage_text = (
+ "BAM!\n" +
+ __doc__
+ )
+
+ parser = argparse.ArgumentParser(
+ prog="bam",
+ description=usage_text,
+ )
+
+ subparsers = parser.add_subparsers(
+ title='subcommands',
+ description='valid subcommands',
+ help='additional help',
+ )
+
+ create_argparse_init(subparsers)
+ create_argparse_create(subparsers)
+ create_argparse_checkout(subparsers)
+ create_argparse_commit(subparsers)
+ create_argparse_update(subparsers)
+ create_argparse_revert(subparsers)
+ create_argparse_status(subparsers)
+ create_argparse_list(subparsers)
+
+ # non-bam project commands
+ create_argparse_deps(subparsers)
+ create_argparse_pack(subparsers)
+ create_argparse_copy(subparsers)
+ create_argparse_remap(subparsers)
+
+ return parser
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv[1:]
+
+ logging.basicConfig(
+ level=logging.INFO,
+ format='%(asctime)-15s %(levelname)8s %(name)s %(message)s',
+ )
+
+ parser = create_argparse()
+ args = parser.parse_args(argv)
+
+ # call subparser callback
+ if not hasattr(args, "func"):
+ parser.print_help()
+ return
+
+ args.func(args)
+
+
+if __name__ == "__main__":
+ raise Exception("This module can't be executed directly, Call '../bam_cli.py'")
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/pack.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/pack.py
new file mode 100644
index 00000000..8df5b9d4
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/pack.py
@@ -0,0 +1,10 @@
+"""CLI interface to BAM-pack.
+
+Run this using:
+
+python -m bam.pack
+"""
+
+if __name__ == '__main__':
+ from bam.blend import blendfile_pack
+ blendfile_pack.main()
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/__init__.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/__init__.py
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/system.py b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/system.py
new file mode 100644
index 00000000..313173ee
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/bam/utils/system.py
@@ -0,0 +1,143 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# ***** END GPL LICENCE BLOCK *****
+
+def colorize_dummy(msg, color=None):
+ return msg
+
+_USE_COLOR = True
+if _USE_COLOR:
+ color_codes = {
+ 'black': '\033[0;30m',
+ 'bright_gray': '\033[0;37m',
+ 'blue': '\033[0;34m',
+ 'white': '\033[1;37m',
+ 'green': '\033[0;32m',
+ 'bright_blue': '\033[1;34m',
+ 'cyan': '\033[0;36m',
+ 'bright_green': '\033[1;32m',
+ 'red': '\033[0;31m',
+ 'bright_cyan': '\033[1;36m',
+ 'purple': '\033[0;35m',
+ 'bright_red': '\033[1;31m',
+ 'yellow': '\033[0;33m',
+ 'bright_purple':'\033[1;35m',
+ 'dark_gray': '\033[1;30m',
+ 'bright_yellow':'\033[1;33m',
+ 'normal': '\033[0m',
+ }
+
+ def colorize(msg, color=None):
+ return (color_codes[color] + msg + color_codes['normal'])
+else:
+ colorize = colorize_dummy
+
+
+def uuid_from_file(fn, block_size=1 << 20):
+ """
+ Returns an arbitrary sized unique ASCII string based on the file contents.
+ (exact hashing method may change).
+ """
+ with open(fn, 'rb') as f:
+ # first get the size
+ import os
+ f.seek(0, os.SEEK_END)
+ size = f.tell()
+ f.seek(0, os.SEEK_SET)
+ del os
+ # done!
+
+ import hashlib
+ sha1 = hashlib.new('sha512')
+ while True:
+ data = f.read(block_size)
+ if not data:
+ break
+ sha1.update(data)
+ # skip the '0x'
+ return hex(size)[2:] + sha1.hexdigest()
+
+
+def write_json_to_zip(zip_handle, path, data=None):
+ import json
+ zip_handle.writestr(
+ path,
+ json.dumps(
+ data,
+ check_circular=False,
+ # optional (pretty)
+ sort_keys=True, indent=4, separators=(',', ': '),
+ ).encode('utf-8'))
+
+
+def write_json_to_file(path, data):
+ import json
+ with open(path, 'w') as file_handle:
+ json.dump(
+ data, file_handle, ensure_ascii=False,
+ check_circular=False,
+ # optional (pretty)
+ sort_keys=True, indent=4, separators=(',', ': '),
+ )
+
+
+def is_compressed_filetype(filepath):
+ """
+ Use to check if we should compress files in a zip.
+ """
+ # for now, only include files which Blender is likely to reference
+ import os
+ assert(isinstance(filepath, bytes))
+ return os.path.splitext(filepath)[1].lower() in {
+ # images
+ b'.exr',
+ b'.jpg', b'.jpeg',
+ b'.png',
+
+ # audio
+ b'.aif', b'.aiff',
+ b'.mp3',
+ b'.ogg', b'.ogv',
+ b'.wav',
+
+ # video
+ b'.avi',
+ b'.mkv',
+ b'.mov',
+ b'.mpg', b'.mpeg',
+
+ # archives
+ # '.bz2', '.tbz',
+ # '.gz', '.tgz',
+ # '.zip',
+ }
+
+
+def is_subdir(path, directory):
+ """
+ Returns true if *path* in a subdirectory of *directory*.
+ """
+ import os
+ from os.path import normpath, normcase, sep
+ path = normpath(normcase(path))
+ directory = normpath(normcase(directory))
+ if len(path) > len(directory):
+ sep = sep.encode('ascii') if isinstance(directory, bytes) else sep
+ if path.startswith(directory.rstrip(sep) + sep):
+ return True
+ return False
+
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/DESCRIPTION.rst b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/DESCRIPTION.rst
new file mode 100644
index 00000000..f6839507
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/DESCRIPTION.rst
@@ -0,0 +1,3 @@
+Bam Asset Manager is a tool to manage assets in Blender.
+
+
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/METADATA b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/METADATA
new file mode 100644
index 00000000..84bfb16d
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/METADATA
@@ -0,0 +1,24 @@
+Metadata-Version: 2.0
+Name: blender-bam
+Version: 1.1.7
+Summary: Bam Asset Manager
+Home-page: http://developer.blender.org/project/view/55
+Author: Campbell Barton, Francesco Siddi
+Author-email: ideasman42@gmail.com
+License: GPLv2+
+Download-URL: https://pypi.python.org/pypi/blender-bam
+Platform: any
+Classifier: Development Status :: 3 - Alpha
+Classifier: Environment :: Console
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Utilities
+Requires-Dist: requests (>=2.4)
+
+Bam Asset Manager is a tool to manage assets in Blender.
+
+
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/RECORD b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/RECORD
new file mode 100644
index 00000000..5fc033af
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/RECORD
@@ -0,0 +1,20 @@
+bam/__init__.py,sha256=8dY6Im7e-qr6P60wCCOd2eipbQT3rfZ8De66BoEzmlA,113
+bam/__main__.py,sha256=wHUbMNeJKFAT6tvd6-EiuT6gVpbHrXOFo0SypFjo3hk,237
+bam/cli.py,sha256=jUZ30j4e2RHb6CKJjn3y6sj-2fTR70XliphQMAaETTc,71067
+bam/pack.py,sha256=tAmpW_o1-m5TLXeNY4_FbZCdtqnIcg_bE2Uv_twABlE,166
+bam/blend/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+bam/blend/blendfile.py,sha256=AoufnaY7u9S7TIYPOz4IShu-goVdyafzoq-s5Kzifwo,31207
+bam/blend/blendfile_copy.py,sha256=1Dm2xBZ6_49usDHI0xW6iR2tJ6c-UrfCuqO-mjCuhL4,3733
+bam/blend/blendfile_pack.py,sha256=vJb_3bQdJ4jkG7R44TJm_WNqmt9-o_oW-n2w8WUTBYo,26053
+bam/blend/blendfile_pack_restore.py,sha256=WRRbzq6iZj_nGPU1wqmiJl3rYNiXEEfF8F82m34iYDY,4531
+bam/blend/blendfile_path_remap.py,sha256=KtluKRf8rfjYv8Lgd1ZTjcTBgtMw_iJmE-fpjxBN8xI,8102
+bam/blend/blendfile_path_walker.py,sha256=tFDm4XQbir8lBipJmT-T_UF-Pct4sdx-3ir7CBt-m_Y,34344
+bam/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+bam/utils/system.py,sha256=FdKBEpO6it1lg9u5QK9j1VN-sK9MZVTzvGLA1GW1xEk,4107
+blender_bam-1.1.7.dist-info/DESCRIPTION.rst,sha256=gxRn9duvXLzZWddzDz36dsnzAkNwg1eMAbfz7KAMV9s,59
+blender_bam-1.1.7.dist-info/METADATA,sha256=X0NTEUrOeWvEz-vJmDvprtKuKiByuVXM3q0OpfSnZPE,813
+blender_bam-1.1.7.dist-info/RECORD,,
+blender_bam-1.1.7.dist-info/WHEEL,sha256=dXGL5yz26tu5uNsUy9EBoBYhrvMYqmFH9Vm82OQUT-8,95
+blender_bam-1.1.7.dist-info/entry_points.txt,sha256=yGjZcACWl4EQuQbVwuGgtURy1yYE2YvkH7c6Is6ADgQ,38
+blender_bam-1.1.7.dist-info/metadata.json,sha256=g6R9wHmwtz2QQntiWKsPTy_Uqn2z6A9txtSr03a-gHA,1069
+blender_bam-1.1.7.dist-info/top_level.txt,sha256=3Jh27QbVRbZ8nvfhcKPiJaOO3iyjKw8SSS5dabqINcw,4
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/WHEEL b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/WHEEL
new file mode 100644
index 00000000..a68f0882
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.30.0.a0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/entry_points.txt b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/entry_points.txt
new file mode 100644
index 00000000..cff8d5d5
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+bam = bam.cli:main
+
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/metadata.json b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/metadata.json
new file mode 100644
index 00000000..2d2cfe9d
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 3 - Alpha", "Environment :: Console", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Utilities"], "download_url": "https://pypi.python.org/pypi/blender-bam", "extensions": {"python.commands": {"wrap_console": {"bam": "bam.cli:main"}}, "python.details": {"contacts": [{"email": "ideasman42@gmail.com", "name": "Campbell Barton, Francesco Siddi", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://developer.blender.org/project/view/55"}}, "python.exports": {"console_scripts": {"bam": "bam.cli:main"}}}, "extras": [], "generator": "bdist_wheel (0.30.0.a0)", "license": "GPLv2+", "metadata_version": "2.0", "name": "blender-bam", "platform": "any", "run_requires": [{"requires": ["requests (>=2.4)"]}], "summary": "Bam Asset Manager", "version": "1.1.7"} \ No newline at end of file
diff --git a/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/top_level.txt b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/top_level.txt
new file mode 100644
index 00000000..974e5b15
--- /dev/null
+++ b/io_blend_utils/blender_bam-1.1.7-py3-none-any.whl/blender_bam-1.1.7.dist-info/top_level.txt
@@ -0,0 +1 @@
+bam
diff --git a/io_blend_utils/install_whl.py b/io_blend_utils/install_whl.py
new file mode 100755
index 00000000..a2f031a5
--- /dev/null
+++ b/io_blend_utils/install_whl.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python3.5
+
+"""This Python script installs a new version of BAM here."""
+
+import pathlib
+
+my_dir = pathlib.Path(__file__).absolute().parent
+
+
+def main():
+ import argparse
+
+ parser = argparse.ArgumentParser(description="This script installs a new version of BAM here.")
+ parser.add_argument('wheelfile', type=pathlib.Path,
+ help='Location of the wheel file to install.')
+
+ args = parser.parse_args()
+ install(args.wheelfile.expanduser())
+
+
+def install(wheelfile: pathlib.Path):
+ import json
+ import os
+ import re
+
+ assert_is_zipfile(wheelfile)
+ wipe_preexisting()
+
+ print('Installing %s' % wheelfile)
+ target = my_dir / wheelfile.name
+ print('Creating target directory %s' % target)
+ target.mkdir(parents=True)
+
+ extract(wheelfile, target)
+ copy_files(target)
+
+ version = find_version(target)
+ print('This is BAM version %s' % (version, ))
+ update_init_file(wheelfile, version)
+
+ print('Done installing %s' % wheelfile.name)
+
+
+def assert_is_zipfile(wheelfile: pathlib.Path):
+ import zipfile
+
+ # In Python 3.6 conversion to str is not necessary any more:
+ if not zipfile.is_zipfile(str(wheelfile)):
+ log.error('%s is not a valid ZIP file!' % wheelfile)
+ raise SystemExit()
+
+
+def wipe_preexisting():
+ import shutil
+
+ for existing in sorted(my_dir.glob('blender_bam-*.whl')):
+ if existing.is_dir():
+ print('Wiping pre-existing directory %s' % existing)
+ # In Python 3.6 conversion to str is not necessary any more:
+ shutil.rmtree(str(existing))
+ else:
+ print('Wiping pre-existing file %s' % existing)
+ existing.unlink()
+
+
+def extract(wheelfile: pathlib.Path, target: pathlib.Path):
+ import os
+ import zipfile
+
+ # In Python 3.6 conversion to str is not necessary any more:
+ os.chdir(str(target))
+
+ print('Extracting wheel')
+ # In Python 3.6 conversion to str is not necessary any more:
+ with zipfile.ZipFile(str(wheelfile)) as whlzip:
+ whlzip.extractall()
+
+ os.chdir(str(my_dir))
+
+
+def copy_files(target: pathlib.Path):
+ import shutil
+
+ print('Copying some files from wheel to other locations')
+ # In Python 3.6 conversion to str is not necessary any more:
+ shutil.copy(str(target / 'bam' / 'blend' / 'blendfile_path_walker.py'), './blend')
+ shutil.copy(str(target / 'bam' / 'blend' / 'blendfile.py'), './blend')
+ shutil.copy(str(target / 'bam' / 'utils' / 'system.py'), './utils')
+
+
+def find_version(target: pathlib.Path):
+ import json
+
+ print('Obtaining version number from wheel.')
+
+ distinfo = next(target.glob('*.dist-info'))
+ with (distinfo / 'metadata.json').open() as infofile:
+ metadata = json.load(infofile)
+
+ # "1.2.3" -> (1, 2, 3)
+ str_ver = metadata['version']
+ return tuple(int(x) for x in str_ver.split('.'))
+
+
+def update_init_file(wheelfile: pathlib.Path, version: tuple):
+ import os
+ import re
+
+ print('Updating __init__.py to point to this wheel.')
+
+ path_line_re = re.compile(r'^BAM_WHEEL_PATH\s*=')
+ version_line_re = re.compile(r'^\s+[\'"]version[\'"]: (\([0-9,]+\)),')
+
+ with open('__init__.py', 'r') as infile, \
+ open('__init__.py~whl~installer~', 'w') as outfile:
+
+ for line in infile:
+ if version_line_re.match(line):
+ outfile.write(" 'version': %s,%s" % (version, os.linesep))
+ if path_line_re.match(line):
+ outfile.write("BAM_WHEEL_PATH = '%s'%s" % (wheelfile.name, os.linesep))
+ else:
+ outfile.write(line)
+
+ os.unlink('__init__.py')
+ os.rename('__init__.py~whl~installer~', '__init__.py')
+
+if __name__ == '__main__':
+ main()