From 05fd1e08cfb277b1ea13bfca47fbb56dca156032 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vil=C3=A9m=20Duha?= Date: Sat, 5 Dec 2020 18:26:05 +0100 Subject: BlenderKit: resolutions This introduces resolutins into the addon. This update should enhance the usability of the addon, especially for people with weaker computers. It downloads reduced version of the assets - only images are scaled down by now. Images are also converted in some cases from .png to .jpgs to save space. - there's a default resolution setting - resolutions can be swapped by user - resolutions apply only to textured models and materials with textures larger than 1024px - Resolutions aren't yet generated on the server, so will be visible after a few days. Version of the addon was bumped up to 1.0.40. --- blenderkit/resolutions.py | 851 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 851 insertions(+) create mode 100644 blenderkit/resolutions.py (limited to 'blenderkit/resolutions.py') diff --git a/blenderkit/resolutions.py b/blenderkit/resolutions.py new file mode 100644 index 00000000..11651e1b --- /dev/null +++ b/blenderkit/resolutions.py @@ -0,0 +1,851 @@ +# ##### BEGIN GPL LICENSE BLOCK ##### +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# ##### END GPL LICENSE BLOCK ##### + + +if "bpy" in locals(): + from importlib import reload + + paths = reload(paths) + append_link = reload(append_link) + bg_blender = reload(bg_blender) + utils = reload(utils) + download = reload(download) + search = reload(search) + rerequests = reload(rerequests) + upload_bg = reload(upload_bg) +else: + from blenderkit import paths, append_link, bg_blender, utils, download, search, rerequests, upload_bg + +import sys, json, os, time +import subprocess +import tempfile +import numpy as np +import bpy +import requests +import math +import threading + +resolutions = { + 'resolution_0_5K': 512, + 'resolution_1K': 1024, + 'resolution_2K': 2048, + 'resolution_4K': 4096, + 'resolution_8K': 8192, +} +rkeys = list(resolutions.keys()) + +resolution_props_to_server = { + + '512': 'resolution_0_5K', + '1024': 'resolution_1K', + '2048': 'resolution_2K', + '4096': 'resolution_4K', + '8192': 'resolution_8K', + 'ORIGINAL': 'blend', +} + + +def get_current_resolution(): + actres = 0 + for i in bpy.data.images: + if i.name != 'Render Result': + actres = max(actres, i.size[0], i.size[1]) + return actres + + +def can_erase_alpha(na): + alpha = na[3::4] + alpha_sum = alpha.sum() + if alpha_sum == alpha.size: + print('image can have alpha erased') + # print(alpha_sum, alpha.size) + return alpha_sum == alpha.size + + +def is_image_black(na): + r = na[::4] + g = na[1::4] + b = na[2::4] + + rgbsum = r.sum() + g.sum() + b.sum() + + # print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum()) + if rgbsum == 0: + print('image can have alpha channel dropped') + return rgbsum == 0 + + +def is_image_bw(na): + r = na[::4] + g = na[1::4] + b = na[2::4] + + rg_equal = r == g + gb_equal = g == b + rgbequal = rg_equal.all() and gb_equal.all() + if rgbequal: + print('image is black and white, can have channels reduced') + + return rgbequal + + +def numpytoimage(a, iname, width=0, height=0, channels=3): + t = time.time() + foundimage = False + + for image in bpy.data.images: + + if image.name[:len(iname)] == iname and image.size[0] == a.shape[0] and image.size[1] == a.shape[1]: + i = image + foundimage = True + if not foundimage: + if channels == 4: + bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0, 1), alpha=True, + generated_type='BLANK', float=True) + if channels == 3: + bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0), alpha=False, + generated_type='BLANK', float=True) + + for image in bpy.data.images: + # print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1]) + if image.name[:len(iname)] == iname and image.size[0] == width and image.size[1] == height: + i = image + + # dropping this re-shaping code - just doing flat array for speed and simplicity + # d = a.shape[0] * a.shape[1] + # a = a.swapaxes(0, 1) + # a = a.reshape(d) + # a = a.repeat(channels) + # a[3::4] = 1 + i.pixels.foreach_set(a) # this gives big speedup! + print('\ntime ' + str(time.time() - t)) + return i + + +def imagetonumpy(i): + t = time.time() + + width = i.size[0] + height = i.size[1] + # print(i.channels) + + size = width * height * i.channels + na = np.empty(size, np.float32) + i.pixels.foreach_get(na) + + # dropping this re-shaping code - just doing flat array for speed and simplicity + # na = na[::4] + # na = na.reshape(height, width, i.channels) + # na = na.swapaxnes(0, 1) + + # print('\ntime of image to numpy ' + str(time.time() - t)) + return na + + +def save_image_safely(teximage, filepath): + ''' + Blender makes it really hard to save images... this is to fix it's crazy bad image saving. + Would be worth investigating PIL or similar instead + Parameters + ---------- + teximage + + Returns + ------- + + ''' + JPEG_QUALITY = 98 + + rs = bpy.context.scene.render + ims = rs.image_settings + + orig_file_format = ims.file_format + orig_quality = ims.quality + orig_color_mode = ims.color_mode + orig_compression = ims.compression + + ims = rs.image_settings + + orig_file_format = ims.file_format + orig_quality = ims.quality + orig_color_mode = ims.color_mode + orig_compression = ims.compression + + ims.file_format = teximage.file_format + if teximage.file_format == 'PNG': + ims.color_mode = 'RGBA' + elif teximage.channels == 3: + ims.color_mode = 'RGB' + else: + ims.color_mode = 'BW' + + # all pngs with max compression + if ims.file_format == 'PNG': + ims.compression = 100 + # all jpgs brought to reasonable quality + if ims.file_format == 'JPG': + ims.quality = JPEG_QUALITY + # it's actually very important not to try to change the image filepath and packed file filepath before saving, + # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes. + teximage.save_render(filepath=bpy.path.abspath(filepath), scene=bpy.context.scene) + + teximage.filepath = filepath + for packed_file in teximage.packed_files: + packed_file.filepath = filepath + teximage.filepath_raw = filepath + teximage.reload() + + ims.file_format = orig_file_format + ims.quality = orig_quality + ims.color_mode = orig_color_mode + ims.compression = orig_compression + + +def extxchange_to_resolution(filepath): + base, ext = os.path.splitext(filepath) + if ext in ('.png', '.PNG'): + ext = 'jpg' + + +def make_possible_reductions_on_image(teximage, input_filepath, do_reductions=False, do_downscale=False): + '''checks the image and saves it to drive with possibly reduced channels. + Also can remove the image from the asset if the image is pure black + - it finds it's usages and replaces the inputs where the image is used + with zero/black color. + currently implemented file type conversions: + PNG->JPG + ''' + colorspace = teximage.colorspace_settings.name + teximage.colorspace_settings.name = 'Non-Color' + + JPEG_QUALITY = 90 + # is_image_black(na) + # is_image_bw(na) + + rs = bpy.context.scene.render + ims = rs.image_settings + + orig_file_format = ims.file_format + orig_quality = ims.quality + orig_color_mode = ims.color_mode + orig_compression = ims.compression + + # if is_image_black(na): + # # just erase the image from the asset here, no need to store black images. + # pass; + + # fp = teximage.filepath + fp = input_filepath + if do_reductions: + na = imagetonumpy(teximage) + + if can_erase_alpha(na): + print(teximage.file_format) + if teximage.file_format == 'PNG': + print('changing type of image to JPG') + base, ext = os.path.splitext(fp) + teximage['original_extension'] = ext + + fp = fp.replace('.png', '.jpg') + fp = fp.replace('.PNG', '.jpg') + + teximage.name = teximage.name.replace('.png', '.jpg') + teximage.name = teximage.name.replace('.PNG', '.jpg') + + teximage.file_format = 'JPEG' + ims.quality = JPEG_QUALITY + ims.color_mode = 'RGB' + + if is_image_bw(na): + ims.color_mode = 'BW' + + ims.file_format = teximage.file_format + + # all pngs with max compression + if ims.file_format == 'PNG': + ims.compression = 100 + # all jpgs brought to reasonable quality + if ims.file_format == 'JPG': + ims.quality = JPEG_QUALITY + + if do_downscale: + downscale(teximage) + + # it's actually very important not to try to change the image filepath and packed file filepath before saving, + # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes. + teximage.save_render(filepath=bpy.path.abspath(fp), scene=bpy.context.scene) + if len(teximage.packed_files) > 0: + teximage.unpack(method='REMOVE') + teximage.filepath = fp + teximage.filepath_raw = fp + teximage.reload() + + teximage.colorspace_settings.name = colorspace + + ims.file_format = orig_file_format + ims.quality = orig_quality + ims.color_mode = orig_color_mode + ims.compression = orig_compression + + +def downscale(i): + minsize = 128 + + sx, sy = i.size[:] + sx = round(sx / 2) + sy = round(sy / 2) + if sx > minsize and sy > minsize: + i.scale(sx, sy) + + +def upload_resolutions(files, data): + preferences = bpy.context.preferences.addons['blenderkit'].preferences + + upload_data = { + "token": preferences.api_key, + "id": data['asset_data']['id'] + } + + uploaded = upload_bg.upload_files(upload_data, files) + + if uploaded: + bg_blender.progress('upload finished successfully') + else: + bg_blender.progress('upload failed.') + + +def unpack_asset(data): + utils.p('unpacking asset') + asset_data = data['asset_data'] + # utils.pprint(asset_data) + + blend_file_name = os.path.basename(bpy.data.filepath) + ext = os.path.splitext(blend_file_name)[1] + + resolution = asset_data.get('resolution', 'blend') + # TODO - passing resolution inside asset data might not be the best solution + tex_dir_path = paths.get_texture_directory(asset_data, resolution=resolution) + tex_dir_abs = bpy.path.abspath(tex_dir_path) + if not os.path.exists(tex_dir_abs): + try: + os.mkdir(tex_dir_abs) + except Exception as e: + print(e) + bpy.data.use_autopack = False + for image in bpy.data.images: + if image.name != 'Render Result': + # suffix = paths.resolution_suffix(data['suffix']) + fp = get_texture_filepath(tex_dir_path, image, resolution=resolution) + utils.p('unpacking file', image.name) + utils.p(image.filepath, fp) + + for pf in image.packed_files: + pf.filepath = fp # bpy.path.abspath(fp) + image.filepath = fp # bpy.path.abspath(fp) + image.filepath_raw = fp # bpy.path.abspath(fp) + image.save() + image.unpack(method='REMOVE') + + bpy.ops.wm.save_mainfile(compress=False) + # now try to delete the .blend1 file + try: + os.remove(bpy.data.filepath + '1') + except Exception as e: + print(e) + + +def patch_asset_empty(asset_id, api_key): + ''' + This function patches the asset for the purpose of it getting a reindex. + Should be removed once this is fixed on the server and + the server is able to reindex after uploads of resolutions + Returns + ------- + ''' + upload_data = { + } + url = paths.get_api_url() + 'assets/' + str(asset_id) + '/' + headers = utils.get_headers(api_key) + try: + r = rerequests.patch(url, json=upload_data, headers=headers, verify=True) # files = files, + except requests.exceptions.RequestException as e: + print(e) + return {'CANCELLED'} + return {'FINISHED'} + + +def reduce_all_images(target_scale=1024): + for img in bpy.data.images: + if img.name != 'Render Result': + print('scaling ', img.name, img.size[0], img.size[1]) + # make_possible_reductions_on_image(i) + if max(img.size) > target_scale: + ratio = float(target_scale) / float(max(img.size)) + print(ratio) + # i.save() + fp = '//tempimagestorage' + # print('generated filename',fp) + # for pf in img.packed_files: + # pf.filepath = fp # bpy.path.abspath(fp) + + img.filepath = fp + img.filepath_raw = fp + print(int(img.size[0] * ratio), int(img.size[1] * ratio)) + img.scale(int(img.size[0] * ratio), int(img.size[1] * ratio)) + img.update() + # img.save() + # img.reload() + img.pack() + + +def get_texture_filepath(tex_dir_path, image, resolution='blend'): + image_file_name = bpy.path.basename(image.filepath) + if image_file_name == '': + image_file_name = image.name.split('.')[0] + + suffix = paths.resolution_suffix[resolution] + + fp = os.path.join(tex_dir_path, image_file_name) + # check if there is allready an image with same name and thus also assigned path + # (can happen easily with genearted tex sets and more materials) + done = False + fpn = fp + i = 0 + while not done: + is_solo = True + for image1 in bpy.data.images: + if image != image1 and image1.filepath == fpn: + is_solo = False + fpleft, fpext = os.path.splitext(fp) + fpn = fpleft + str(i).zfill(3) + fpext + i += 1 + if is_solo: + done = True + + return fpn + + +def generate_lower_resolutions(data): + asset_data = data['asset_data'] + actres = get_current_resolution() + # first let's skip procedural assets + base_fpath = bpy.data.filepath + + s = bpy.context.scene + + print('current resolution of the asset ', actres) + if actres > 0: + p2res = paths.round_to_closest_resolution(actres) + orig_res = p2res + print(p2res) + finished = False + files = [] + # now skip assets that have lowest possible resolution already + if p2res != [0]: + original_textures_filesize = 0 + for i in bpy.data.images: + abspath = bpy.path.abspath(i.filepath) + if os.path.exists(abspath): + original_textures_filesize += os.path.getsize(abspath) + + while not finished: + + blend_file_name = os.path.basename(base_fpath) + + dirn = os.path.dirname(base_fpath) + fn_strip, ext = os.path.splitext(blend_file_name) + + fn = fn_strip + paths.resolution_suffix[p2res] + ext + fpath = os.path.join(dirn, fn) + + tex_dir_path = paths.get_texture_directory(asset_data, resolution=p2res) + + tex_dir_abs = bpy.path.abspath(tex_dir_path) + if not os.path.exists(tex_dir_abs): + os.mkdir(tex_dir_abs) + + reduced_textures_filessize = 0 + for i in bpy.data.images: + if i.name != 'Render Result': + + print('scaling ', i.name, i.size[0], i.size[1]) + fp = get_texture_filepath(tex_dir_path, i, resolution=p2res) + + if p2res == orig_res: + # first, let's link the image back to the original one. + i['blenderkit_original_path'] = i.filepath + # first round also makes reductions on the image, while keeping resolution + make_possible_reductions_on_image(i, fp, do_reductions=True, do_downscale=False) + + else: + # lower resolutions only downscale + make_possible_reductions_on_image(i, fp, do_reductions=False, do_downscale=True) + + abspath = bpy.path.abspath(i.filepath) + if os.path.exists(abspath): + reduced_textures_filessize += os.path.getsize(abspath) + + i.pack() + # save + print(fpath) + # save the file + bpy.ops.wm.save_as_mainfile(filepath=fpath, compress=True, copy=True) + # compare file sizes + print(f'textures size was reduced from {original_textures_filesize} to {reduced_textures_filessize}') + if reduced_textures_filessize < original_textures_filesize: + files.append({ + "type": p2res, + "index": 0, + "file_path": fpath + }) + + print('prepared resolution file: ', p2res) + if rkeys.index(p2res) == 0: + finished = True + else: + p2res = rkeys[rkeys.index(p2res) - 1] + print('uploading resolution files') + # upload_resolutions(files, data) + preferences = bpy.context.preferences.addons['blenderkit'].preferences + patch_asset_empty(data['asset_data']['id'], preferences.api_key) + return + + +def regenerate_thumbnail_material(data): + # this should re-generate material thumbnail and re-upload it. + # first let's skip procedural assets + base_fpath = bpy.data.filepath + blend_file_name = os.path.basename(base_fpath) + bpy.ops.mesh.primitive_cube_add() + aob = bpy.context.active_object + bpy.ops.object.material_slot_add() + aob.material_slots[0].material = bpy.data.materials[0] + props = aob.active_material.blenderkit + props.thumbnail_generator_type = 'BALL' + props.thumbnail_background = False + props.thumbnail_resolution = '256' + # layout.prop(props, 'thumbnail_generator_type') + # layout.prop(props, 'thumbnail_scale') + # layout.prop(props, 'thumbnail_background') + # if props.thumbnail_background: + # layout.prop(props, 'thumbnail_background_lightness') + # layout.prop(props, 'thumbnail_resolution') + # layout.prop(props, 'thumbnail_samples') + # layout.prop(props, 'thumbnail_denoising') + # layout.prop(props, 'adaptive_subdivision') + # preferences = bpy.context.preferences.addons['blenderkit'].preferences + # layout.prop(preferences, "thumbnail_use_gpu") + # TODO: here it should call start_material_thumbnailer , but with the wait property on, so it can upload afterwards. + bpy.ops.object.blenderkit_material_thumbnail() + time.sleep(130) + # save + # this does the actual job + + return + + +def assets_db_path(): + dpath = os.path.dirname(bpy.data.filepath) + fpath = os.path.join(dpath, 'all_assets.json') + return fpath + + +def get_assets_search(): + bpy.app.debug_value = 2 + + results = [] + preferences = bpy.context.preferences.addons['blenderkit'].preferences + url = paths.get_api_url() + 'search/all' + i = 0 + while url is not None: + headers = utils.get_headers(preferences.api_key) + print('fetching assets from assets endpoint') + print(url) + retries = 0 + while retries < 3: + r = rerequests.get(url, headers=headers) + + try: + adata = r.json() + url = adata.get('next') + print(i) + i += 1 + except Exception as e: + print(e) + print('failed to get next') + if retries == 2: + url = None + if adata.get('results') != None: + results.extend(adata['results']) + retries = 3 + print(f'fetched page {i}') + retries += 1 + + fpath = assets_db_path() + with open(fpath, 'w') as s: + json.dump(results, s) + + +def get_assets_for_resolutions(page_size=100, max_results=100000000): + preferences = bpy.context.preferences.addons['blenderkit'].preferences + + dpath = os.path.dirname(bpy.data.filepath) + filepath = os.path.join(dpath, 'assets_for_resolutions.json') + params = { + 'order': '-created', + 'textureResolutionMax_gte': '100', + # 'last_resolution_upload_lt':'2020-9-01' + } + search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results, + api_key=preferences.api_key) + return filepath + + +def get_materials_for_validation(page_size=100, max_results=100000000): + preferences = bpy.context.preferences.addons['blenderkit'].preferences + dpath = os.path.dirname(bpy.data.filepath) + filepath = os.path.join(dpath, 'materials_for_validation.json') + params = { + 'order': '-created', + 'asset_type': 'material', + 'verification_status': 'uploaded' + } + search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results, + api_key=preferences.api_key) + return filepath + + +# This gets all assets in the database through the/assets endpoint. Currently not used, since we use elastic for everything. +# def get_assets_list(): +# bpy.app.debug_value = 2 +# +# results = [] +# preferences = bpy.context.preferences.addons['blenderkit'].preferences +# url = paths.get_api_url() + 'assets/all' +# i = 0 +# while url is not None: +# headers = utils.get_headers(preferences.api_key) +# print('fetching assets from assets endpoint') +# print(url) +# retries = 0 +# while retries < 3: +# r = rerequests.get(url, headers=headers) +# +# try: +# adata = r.json() +# url = adata.get('next') +# print(i) +# i += 1 +# except Exception as e: +# print(e) +# print('failed to get next') +# if retries == 2: +# url = None +# if adata.get('results') != None: +# results.extend(adata['results']) +# retries = 3 +# print(f'fetched page {i}') +# retries += 1 +# +# fpath = assets_db_path() +# with open(fpath, 'w') as s: +# json.dump(results, s) + + +def load_assets_list(filepath): + if os.path.exists(filepath): + with open(filepath, 'r') as s: + assets = json.load(s) + return assets + + +def check_needs_resolutions(a): + if a['verificationStatus'] == 'validated' and a['assetType'] in ('material', 'model', 'scene'): + # the search itself now picks the right assets so there's no need to filter more than asset types. + for f in a['files']: + if f['fileType'].find('resolution') > -1: + return False + + return True + return False + + +def download_asset(asset_data, resolution='blend', unpack=False, api_key=''): + ''' + Download an asset non-threaded way. + Parameters + ---------- + asset_data - search result from elastic or assets endpoints from API + + Returns + ------- + path to the resulting asset file or None if asset isn't accessible + ''' + + has_url = download.get_download_url(asset_data, download.get_scene_id(), api_key, tcom=None, + resolution='blend') + if has_url: + fpath = download.download_file(asset_data) + if fpath and unpack: + send_to_bg(asset_data, fpath, command='unpack', wait=True) + return fpath + + return None + + +def generate_resolution_thread(asset_data, api_key): + ''' + A thread that downloads file and only then starts an instance of Blender that generates the resolution + Parameters + ---------- + asset_data + + Returns + ------- + + ''' + + fpath = download_asset(asset_data, unpack=True, api_key=api_key) + if fpath: + print('send to bg ', fpath) + proc = send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True); + # send_to_bg by now waits for end of the process. + # time.sleep((5)) + + +def iterate_for_resolutions(filepath, process_count=12, api_key=''): + ''' iterate through all assigned assets, check for those which need generation and send them to res gen''' + assets = load_assets_list(filepath) + print(len(assets)) + threads = [] + for asset_data in assets: + asset_data = search.parse_result(asset_data) + if asset_data is not None: + + if check_needs_resolutions(asset_data): + print('downloading and generating resolution for %s' % asset_data['name']) + # this is just a quick hack for not using original dirs in blendrkit... + thread = threading.Thread(target=generate_resolution_thread, args=(asset_data, api_key)) + thread.start() + + threads.append(thread) + print('processes ', len(threads)) + while len(threads) > process_count - 1: + for proc in threads: + if not proc.is_alive(): + threads.remove(proc) + break; + else: + print(f'Failed to retrieve asset from server:{asset_data["name"]}') + else: + print('not generated resolutions:', asset_data['name']) + + +def send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True): + ''' + Send varioust task to a new blender instance that runs and closes after finishing the task. + This function waits until the process finishes. + The function tries to set the same bpy.app.debug_value in the instance of Blender that is run. + Parameters + ---------- + asset_data + fpath - file that will be processed + command - command which should be run in background. + + Returns + ------- + None + ''' + data = { + 'fpath': fpath, + 'debug_value': bpy.app.debug_value, + 'asset_data': asset_data, + 'command': command, + } + binary_path = bpy.app.binary_path + tempdir = tempfile.mkdtemp() + datafile = os.path.join(tempdir + 'resdata.json') + script_path = os.path.dirname(os.path.realpath(__file__)) + with open(datafile, 'w') as s: + json.dump(data, s) + + print('opening Blender instance to do processing - ', command) + + if wait: + proc = subprocess.run([ + binary_path, + "--background", + "-noaudio", + fpath, + "--python", os.path.join(script_path, "resolutions_bg.py"), + "--", datafile + ], bufsize=1, stdout=sys.stdout, stdin=subprocess.PIPE, creationflags=utils.get_process_flags()) + + else: + # TODO this should be fixed to allow multithreading. + proc = subprocess.Popen([ + binary_path, + "--background", + "-noaudio", + fpath, + "--python", os.path.join(script_path, "resolutions_bg.py"), + "--", datafile + ], bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE, creationflags=utils.get_process_flags()) + return proc + + +def write_data_back(asset_data): + '''ensures that the data in the resolution file is the same as in the database.''' + pass; + + +def run_bg(datafile): + print('background file operation') + with open(datafile, 'r') as f: + data = json.load(f) + bpy.app.debug_value = data['debug_value'] + write_data_back(data['asset_data']) + if data['command'] == 'generate_resolutions': + generate_lower_resolutions(data) + elif data['command'] == 'unpack': + unpack_asset(data) + elif data['command'] == 'regen_thumbnail': + regenerate_thumbnail_material(data) + +# load_assets_list() +# generate_lower_resolutions() +# class TestOperator(bpy.types.Operator): +# """Tooltip""" +# bl_idname = "object.test_anything" +# bl_label = "Test Operator" +# +# @classmethod +# def poll(cls, context): +# return True +# +# def execute(self, context): +# iterate_for_resolutions() +# return {'FINISHED'} +# +# +# def register(): +# bpy.utils.register_class(TestOperator) +# +# +# def unregister(): +# bpy.utils.unregister_class(TestOperator) -- cgit v1.2.3