# ##### BEGIN GPL LICENSE BLOCK ##### # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # ##### END GPL LICENSE BLOCK ##### from blenderkit import paths, utils, categories, ui, colors, bkit_oauth, version_checker, tasks_queue, rerequests, \ resolutions, image_utils, ratings_utils import blenderkit from bpy.app.handlers import persistent from bpy.props import ( # TODO only keep the ones actually used when cleaning IntProperty, FloatProperty, FloatVectorProperty, StringProperty, EnumProperty, BoolProperty, PointerProperty, ) from bpy.types import ( Operator, Panel, AddonPreferences, PropertyGroup, UIList ) import requests, os, random import time import threading import platform import bpy import copy import json import math import unicodedata import urllib import queue import logging bk_logger = logging.getLogger('blenderkit') search_start_time = 0 prev_time = 0 def check_errors(rdata): if rdata.get('statusCode') and int(rdata.get('statusCode')) > 299: utils.p(rdata) if rdata.get('detail') == 'Invalid token.': user_preferences = bpy.context.preferences.addons['blenderkit'].preferences if user_preferences.api_key != '': if user_preferences.enable_oauth: bkit_oauth.refresh_token_thread() return False, rdata.get('detail') return False, 'Use login panel to connect your profile.' else: return False, rdata.get('detail') return True, '' search_threads = [] thumb_sml_download_threads = {} thumb_full_download_threads = {} reports_queue = queue.Queue() rtips = ['Click or drag model or material in scene to link/append ', "Please rate responsively and plentifully. This helps us distribute rewards to the authors.", "Click on brushes to link them into scene.", "All materials are free.", "Storage for public assets is unlimited.", "Locked models are available if you subscribe to Full plan.", "Login to upload your own models, materials or brushes.", "Use 'A' key over asset bar to search assets by same author.", "Use 'W' key over asset bar to open Authors webpage.", ] def refresh_token_timer(): ''' this timer gets run every time the token needs refresh. It refreshes tokens and also categories.''' utils.p('refresh timer') user_preferences = bpy.context.preferences.addons['blenderkit'].preferences fetch_server_data() categories.load_categories() return max(3600, user_preferences.api_key_life - 3600) def update_ad(ad): if not ad.get('assetBaseId'): try: ad['assetBaseId'] = ad['asset_base_id'] # this should stay ONLY for compatibility with older scenes ad['assetType'] = ad['asset_type'] # this should stay ONLY for compatibility with older scenes ad['verificationStatus'] = ad[ 'verification_status'] # this should stay ONLY for compatibility with older scenes ad['author'] = {} ad['author']['id'] = ad['author_id'] # this should stay ONLY for compatibility with older scenes ad['canDownload'] = ad['can_download'] # this should stay ONLY for compatibility with older scenes except Exception as e: bk_logger.error('BlenderKit failed to update older asset data') return ad def update_assets_data(): # updates assets data on scene load. '''updates some properties that were changed on scenes with older assets. The properties were mainly changed from snake_case to CamelCase to fit the data that is coming from the server. ''' data = bpy.data datablocks = [ bpy.data.objects, bpy.data.materials, bpy.data.brushes, ] for dtype in datablocks: for block in dtype: if block.get('asset_data') != None: update_ad(block['asset_data']) dicts = [ 'assets used', # 'assets rated',# assets rated stores only true/false, not asset data. ] for s in bpy.data.scenes: for bkdict in dicts: d = s.get(bkdict) if not d: continue; for asset_id in d.keys(): update_ad(d[asset_id]) # bpy.context.scene['assets used'][ad] = ad def purge_search_results(): ''' clean up search results on save/load.''' s = bpy.context.scene sr_props = [ 'search results', 'search results orig', ] asset_types = ['model', 'material', 'scene', 'hdr', 'brush'] for at in asset_types: sr_props.append('bkit {at} search') sr_props.append('bkit {at} search orig') for sr_prop in sr_props: if s.get(sr_prop): del (s[sr_prop]) @persistent def scene_load(context): ''' Loads categories , checks timers registration, and updates scene asset data. Should (probably)also update asset data from server (after user consent) ''' wm = bpy.context.window_manager purge_search_results() fetch_server_data() categories.load_categories() if not bpy.app.timers.is_registered(refresh_token_timer): bpy.app.timers.register(refresh_token_timer, persistent=True, first_interval=36000) update_assets_data() def fetch_server_data(): ''' download categories , profile, and refresh token if needed.''' if not bpy.app.background: user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key # Only refresh new type of tokens(by length), and only one hour before the token timeouts. if user_preferences.enable_oauth and \ len(user_preferences.api_key) < 38 and len(user_preferences.api_key) > 0 and \ user_preferences.api_key_timeout < time.time() + 3600: bkit_oauth.refresh_token_thread() if api_key != '' and bpy.context.window_manager.get('bkit profile') == None: get_profile() if bpy.context.window_manager.get('bkit_categories') is None: categories.fetch_categories_thread(api_key, force=False) first_time = True last_clipboard = '' def check_clipboard(): ''' Checks clipboard for an exact string containing asset ID. The string is generated on www.blenderkit.com as for example here: https://www.blenderkit.com/get-blenderkit/54ff5c85-2c73-49e9-ba80-aec18616a408/ ''' # clipboard monitoring to search assets from web if platform.system() != 'Linux': global last_clipboard if bpy.context.window_manager.clipboard != last_clipboard: last_clipboard = bpy.context.window_manager.clipboard instr = 'asset_base_id:' # first check if contains asset id, then asset type if last_clipboard[:len(instr)] == instr: atstr = 'asset_type:' ati = last_clipboard.find(atstr) # this only checks if the asset_type keyword is there but let's the keywords update function do the parsing. if ati > -1: search_props = utils.get_search_props() search_props.search_keywords = last_clipboard # don't run search after this - assigning to keywords runs the search_update function. def parse_result(r): ''' needed to generate some extra data in the result(by now) Parameters ---------- r - search result, also called asset_data ''' scene = bpy.context.scene # TODO remove this fix when filesSize is fixed. # this is a temporary fix for too big numbers from the server. # try: # r['filesSize'] = int(r['filesSize'] / 1024) # except: # utils.p('asset with no files-size') asset_type = r['assetType'] if len(r['files']) > 0: # TODO remove this condition so all assets are parsed. get_author(r) r['available_resolutions'] = [] allthumbs = [] durl, tname, small_tname = '', '', '' if r['assetType'] == 'hdr': tname = paths.extract_filename_from_url(r['thumbnailLargeUrlNonsquared']) else: tname = paths.extract_filename_from_url(r['thumbnailMiddleUrl']) small_tname = paths.extract_filename_from_url(r['thumbnailSmallUrl']) allthumbs.append(tname) # TODO just first thumb is used now. # if r['fileType'] == 'thumbnail': # tname = paths.extract_filename_from_url(f['fileThumbnailLarge']) # small_tname = paths.extract_filename_from_url(f['fileThumbnail']) # allthumbs.append(tname) # TODO just first thumb is used now. for f in r['files']: # if f['fileType'] == 'thumbnail': # tname = paths.extract_filename_from_url(f['fileThumbnailLarge']) # small_tname = paths.extract_filename_from_url(f['fileThumbnail']) # allthumbs.append(tname) # TODO just first thumb is used now. if f['fileType'] == 'blend': durl = f['downloadUrl'].split('?')[0] # fname = paths.extract_filename_from_url(f['filePath']) if f['fileType'].find('resolution') > -1: r['available_resolutions'].append(resolutions.resolutions[f['fileType']]) # code for more thumbnails # tdict = {} # for i, t in enumerate(allthumbs): # tdict['thumbnail_%i'] = t r['max_resolution'] = 0 if r['available_resolutions']: # should check only for non-empty sequences r['max_resolution'] = max(r['available_resolutions']) tooltip = generate_tooltip(r) # for some reason, the id was still int on some occurances. investigate this. r['author']['id'] = str(r['author']['id']) # some helper props, but generally shouldn't be renaming/duplifiying original properties, # so blender's data is same as on server. asset_data = {'thumbnail': tname, 'thumbnail_small': small_tname, # 'thumbnails':allthumbs, # 'download_url': durl, #made obsolete since files are stored in orig form. # 'id': r['id'], # 'asset_base_id': r['assetBaseId'],#this should stay ONLY for compatibility with older scenes # 'name': r['name'], # 'asset_type': r['assetType'], #this should stay ONLY for compatibility with older scenes 'tooltip': tooltip, # 'tags': r['tags'], # 'can_download': r.get('canDownload', True),#this should stay ONLY for compatibility with older scenes # 'verification_status': r['verificationStatus'],#this should stay ONLY for compatibility with older scenes # 'author_id': r['author']['id'],#this should stay ONLY for compatibility with older scenes # 'author': r['author']['firstName'] + ' ' + r['author']['lastName'] # 'description': r['description'], } asset_data['downloaded'] = 0 # parse extra params needed for blender here params = utils.params_to_dict(r['parameters']) if asset_type == 'model': if params.get('boundBoxMinX') != None: bbox = { 'bbox_min': ( float(params['boundBoxMinX']), float(params['boundBoxMinY']), float(params['boundBoxMinZ'])), 'bbox_max': ( float(params['boundBoxMaxX']), float(params['boundBoxMaxY']), float(params['boundBoxMaxZ'])) } else: bbox = { 'bbox_min': (-.5, -.5, 0), 'bbox_max': (.5, .5, 1) } asset_data.update(bbox) if asset_type == 'material': asset_data['texture_size_meters'] = params.get('textureSizeMeters', 1.0) # asset_data.update(tdict) au = scene.get('assets used', {}) if au == {}: scene['assets used'] = au if r['assetBaseId'] in au.keys(): asset_data['downloaded'] = 100 # transcribe all urls already fetched from the server r_previous = au[r['assetBaseId']] if r_previous.get('files'): for f in r_previous['files']: if f.get('url'): for f1 in r['files']: if f1['fileType'] == f['fileType']: f1['url'] = f['url'] # attempt to switch to use original data gradually, since the parsing as itself should become obsolete. asset_data.update(r) return asset_data # @bpy.app.handlers.persistent def search_timer(): # this makes a first search after opening blender. showing latest assets. # utils.p('timer search') # utils.p('start search timer') global first_time preferences = bpy.context.preferences.addons['blenderkit'].preferences if first_time and not bpy.app.background: # first time first_time = False if preferences.show_on_start: # TODO here it should check if there are some results, and only open assetbar if this is the case, not search. # if bpy.context.window_manager.get('search results') is None: search() # preferences.first_run = False if preferences.tips_on_start: utils.get_largest_area() ui.update_ui_size(ui.active_area_pointer, ui.active_region_pointer) ui.add_report(text='BlenderKit Tip: ' + random.choice(rtips), timeout=12, color=colors.GREEN) # utils.p('end search timer') return 3.0 # if preferences.first_run: # search() # preferences.first_run = False # check_clipboard() global search_threads if len(search_threads) == 0: # utils.p('end search timer') return 1.0 # don't do anything while dragging - this could switch asset during drag, and make results list length different, # causing a lot of throuble. if bpy.context.scene.blenderkitUI.dragging: # utils.p('end search timer') return 0.5 for thread in search_threads: # TODO this doesn't check all processes when one gets removed, # but most of the time only one is running anyway if not thread[0].is_alive(): search_threads.remove(thread) # icons_dir = thread[1] scene = bpy.context.scene # these 2 lines should update the previews enum and set the first result as active. wm = bpy.context.window_manager asset_type = thread[2] if asset_type == 'model': props = scene.blenderkit_models # json_filepath = os.path.join(icons_dir, 'model_searchresult.json') if asset_type == 'scene': props = scene.blenderkit_scene # json_filepath = os.path.join(icons_dir, 'scene_searchresult.json') if asset_type == 'hdr': props = scene.blenderkit_HDR # json_filepath = os.path.join(icons_dir, 'scene_searchresult.json') if asset_type == 'material': props = scene.blenderkit_mat # json_filepath = os.path.join(icons_dir, 'material_searchresult.json') if asset_type == 'brush': props = scene.blenderkit_brush # json_filepath = os.path.join(icons_dir, 'brush_searchresult.json') search_name = f'bkit {asset_type} search' wm[search_name] = [] global reports_queue while not reports_queue.empty(): props.report = str(reports_queue.get()) # utils.p('end search timer') return .2 rdata = thread[0].result result_field = [] ok, error = check_errors(rdata) if ok: ui_props = bpy.context.scene.blenderkitUI if not ui_props.assetbar_on: bpy.ops.object.run_assetbar_fix_context() for r in rdata['results']: asset_data = parse_result(r) if asset_data != None: result_field.append(asset_data) # Get ratings from BlenderKit server if utils.profile_is_validator(): user_preferences = bpy.context.preferences.addons['blenderkit'].preferences api_key = user_preferences.api_key headers = utils.get_headers(api_key) for r in rdata['results']: if ratings_utils.get_rating_local(asset_data['id']) is None: thread = threading.Thread(target=ratings_utils.get_rating, args=([r['id'], headers]), daemon=True) thread.start() wm[search_name] = result_field wm['search results'] = result_field wm[search_name + ' orig'] = copy.deepcopy(rdata) wm['search results orig'] = wm[search_name + ' orig'] load_previews() if len(result_field) < ui_props.scrolloffset or not (thread[0].params.get('get_next')): # jump back ui_props.scrolloffset = 0 props.is_searching = False props.search_error = False props.report = 'Found %i results. ' % (wm['search results orig']['count']) if len(wm['search results']) == 0: tasks_queue.add_task((ui.add_report, ('No matching results found.',))) # undo push # bpy.ops.wm.undo_push_context(message='Get BlenderKit search') else: bk_logger.error(error) props.report = error props.search_error = True # print('finished search thread') mt('preview loading finished') # utils.p('end search timer') return .3 def load_previews(): scene = bpy.context.scene # FIRST START SEARCH props = scene.blenderkitUI directory = paths.get_temp_dir('%s_search' % props.asset_type.lower()) s = bpy.context.scene results = bpy.context.window_manager.get('search results') # if results is not None: inames = [] tpaths = [] i = 0 for r in results: tpath = os.path.join(directory, r['thumbnail_small']) if not r['thumbnail_small']: tpath = paths.get_addon_thumbnail_path('thumbnail_not_available.jpg') if not os.path.exists(tpath): continue iname = utils.previmg_name(i) # if os.path.exists(tpath): # sometimes we are unlucky... img = bpy.data.images.get(iname) if img is None: img = bpy.data.images.load(tpath) img.name = iname elif img.filepath != tpath: # had to add this check for autopacking files... if img.packed_file is not None: img.unpack(method='USE_ORIGINAL') img.filepath = tpath img.reload() if r['assetType'] == 'hdr': # to display hdr thumbnails correctly, we use non-color, otherwise looks shifted image_utils.set_colorspace(img, 'Non-Color') else: image_utils.set_colorspace(img, 'sRGB') i += 1 # print('previews loaded') # line splitting for longer texts... def split_subs(text, threshold=40): if text == '': return [] # temporarily disable this, to be able to do this in drawing code text = text.rstrip() text = text.replace('\r\n', '\n') lines = [] while len(text) > threshold: # first handle if there's an \n line ending i_rn = text.find('\n') if 1 < i_rn < threshold: i = i_rn text = text.replace('\n', '', 1) else: i = text.rfind(' ', 0, threshold) i1 = text.rfind(',', 0, threshold) i2 = text.rfind('.', 0, threshold) i = max(i, i1, i2) if i <= 0: i = threshold lines.append(text[:i]) text = text[i:] lines.append(text) return lines def list_to_str(input): output = '' for i, text in enumerate(input): output += text if i < len(input) - 1: output += ', ' return output def writeblock(t, input, width=40): # for longer texts dlines = split_subs(input, threshold=width) for i, l in enumerate(dlines): t += '%s\n' % l return t def writeblockm(tooltip, mdata, key='', pretext=None, width=40): # for longer texts if mdata.get(key) == None: return tooltip else: intext = mdata[key] if type(intext) == list: intext = list_to_str(intext) if type(intext) == float: intext = round(intext, 3) intext = str(intext) if intext.rstrip() == '': return tooltip if pretext == None: pretext = key if pretext != '': pretext = pretext + ': ' text = pretext + intext dlines = split_subs(text, threshold=width) for i, l in enumerate(dlines): tooltip += '%s\n' % l return tooltip def has(mdata, prop): if mdata.get(prop) is not None and mdata[prop] is not None and mdata[prop] is not False: return True else: return False def generate_tooltip(mdata): col_w = 40 if type(mdata['parameters']) == list: mparams = utils.params_to_dict(mdata['parameters']) else: mparams = mdata['parameters'] t = '' t = writeblock(t, mdata['displayName'], width=int(col_w * .6)) # t += '\n' # t = writeblockm(t, mdata, key='description', pretext='', width=col_w) return t def get_random_tip(): t = '' tip = 'Tip: ' + random.choice(rtips) t = writeblock(t, tip) return t def generate_author_textblock(adata): t = '' if adata not in (None, ''): col_w = 2000 if len(adata['firstName'] + adata['lastName']) > 0: t = '%s %s\n' % (adata['firstName'], adata['lastName']) t += '\n' if adata.get('aboutMe') is not None: t = writeblockm(t, adata, key='aboutMe', pretext='', width=col_w) return t class ThumbDownloader(threading.Thread): query = None def __init__(self, url, path): super(ThumbDownloader, self).__init__() self.url = url self.path = path self._stop_event = threading.Event() def stop(self): self._stop_event.set() def stopped(self): return self._stop_event.is_set() def run(self): # print('thumb downloader', self.url) # utils.p('start thumbdownloader thread') r = None try: r = requests.get(self.url, stream=False) except Exception as e: bk_logger.error('Thumbnail download failed') bk_logger.error(str(e)) if r and r.status_code == 200: with open(self.path, 'wb') as f: f.write(r.content) # ORIGINALLY WE DOWNLOADED THUMBNAILS AS STREAM, BUT THIS WAS TOO SLOW. # with open(path, 'wb') as f: # for chunk in r.iter_content(1048576*4): # f.write(chunk) # utils.p('end thumbdownloader thread') def write_gravatar(a_id, gravatar_path): ''' Write down gravatar path, as a result of thread-based gravatar image download. This should happen on timer in queue. ''' # print('write author', a_id, type(a_id)) authors = bpy.context.window_manager['bkit authors'] if authors.get(a_id) is not None: adata = authors.get(a_id) adata['gravatarImg'] = gravatar_path def fetch_gravatar(adata): ''' Gets avatars from blenderkit server Parameters ---------- adata - author data from elastic search result ''' # utils.p('fetch gravatar') #fetch new avatars if available already if adata.get('avatar128') is not None: avatar_path = paths.get_temp_dir(subdir='bkit_g/') + adata['id']+ '.jpg' if os.path.exists(avatar_path): tasks_queue.add_task((write_gravatar, (adata['id'], avatar_path))) return; url= paths.get_bkit_url() + adata['avatar128'] r = rerequests.get(url, stream=False) # print(r.body) if r.status_code == 200: # print(url) # print(r.headers['content-disposition']) with open(avatar_path, 'wb') as f: f.write(r.content) tasks_queue.add_task((write_gravatar, (adata['id'], avatar_path))) elif r.status_code == '404': adata['avatar128'] = None utils.p('avatar for author not available.') return #older gravatar code if adata.get('gravatarHash') is not None: gravatar_path = paths.get_temp_dir(subdir='bkit_g/') + adata['gravatarHash'] + '.jpg' if os.path.exists(gravatar_path): tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path))) return; url = "https://www.gravatar.com/avatar/" + adata['gravatarHash'] + '?d=404' r = rerequests.get(url, stream=False) if r.status_code == 200: with open(gravatar_path, 'wb') as f: f.write(r.content) tasks_queue.add_task((write_gravatar, (adata['id'], gravatar_path))) elif r.status_code == '404': adata['gravatarHash'] = None utils.p('gravatar for author not available.') fetching_gravatars = {} def get_author(r): ''' Writes author info (now from search results) and fetches gravatar if needed.''' global fetching_gravatars a_id = str(r['author']['id']) preferences = bpy.context.preferences.addons['blenderkit'].preferences authors = bpy.context.window_manager.get('bkit authors', {}) if authors == {}: bpy.context.window_manager['bkit authors'] = authors a = authors.get(a_id) if a is None: # or a is '' or (a.get('gravatarHash') is not None and a.get('gravatarImg') is None): a = r['author'] a['id'] = a_id a['tooltip'] = generate_author_textblock(a) authors[a_id] = a if fetching_gravatars.get(a['id']) is None: fetching_gravatars[a['id']] = True thread = threading.Thread(target=fetch_gravatar, args=(a.copy(),), daemon=True) thread.start() return a def write_profile(adata): utils.p('writing profile information') user = adata['user'] # we have to convert to MiB here, numbers too big for python int type if user.get('sumAssetFilesSize') is not None: user['sumAssetFilesSize'] /= (1024 * 1024) if user.get('sumPrivateAssetFilesSize') is not None: user['sumPrivateAssetFilesSize'] /= (1024 * 1024) if user.get('remainingPrivateQuota') is not None: user['remainingPrivateQuota'] /= (1024 * 1024) if adata.get('canEditAllAssets') is True: user['exmenu'] = True else: user['exmenu'] = False bpy.context.window_manager['bkit profile'] = adata def request_profile(api_key): a_url = paths.get_api_url() + 'me/' headers = utils.get_headers(api_key) r = rerequests.get(a_url, headers=headers) adata = r.json() if adata.get('user') is None: utils.p(adata) utils.p('getting profile failed') return None return adata def fetch_profile(api_key): utils.p('fetch profile') try: adata = request_profile(api_key) if adata is not None: tasks_queue.add_task((write_profile, (adata,))) except Exception as e: bk_logger.error(e) def get_profile(): preferences = bpy.context.preferences.addons['blenderkit'].preferences a = bpy.context.window_manager.get('bkit profile') thread = threading.Thread(target=fetch_profile, args=(preferences.api_key,), daemon=True) thread.start() return a def query_to_url(query={}, params={}): # build a new request url = paths.get_api_url() + 'search/' # build request manually # TODO use real queries requeststring = '?query=' # if query.get('query') not in ('', None): requeststring += query['query'].lower() for i, q in enumerate(query): if q != 'query': requeststring += '+' requeststring += q + ':' + str(query[q]).lower() # result ordering: _score - relevance, score - BlenderKit score order = [] if params['free_first']: order = ['-is_free', ] if query.get('query') is None and query.get('category_subtree') == None: # assumes no keywords and no category, thus an empty search that is triggered on start. # orders by last core file upload if query.get('verification_status') == 'uploaded': # for validators, sort uploaded from oldest order.append('created') else: order.append('-last_upload') elif query.get('author_id') is not None and utils.profile_is_validator(): order.append('-created') else: if query.get('category_subtree') is not None: order.append('-score,_score') else: order.append('_score') if requeststring.find('+order:')==-1: requeststring += '+order:' + ','.join(order) requeststring += '&addon_version=%s' % params['addon_version'] if params.get('scene_uuid') is not None: requeststring += '&scene_uuid=%s' % params['scene_uuid'] # print('params', params) urlquery = url + requeststring return urlquery def parse_html_formated_error(text): report = text[text.find('