Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVilém Duha <vilda.novak@gmail.com>2020-12-05 20:26:05 +0300
committerVilém Duha <vilda.novak@gmail.com>2020-12-05 20:26:05 +0300
commit05fd1e08cfb277b1ea13bfca47fbb56dca156032 (patch)
tree2c8df464f7de39874e850473d0814076b7c9fc7d
parent8c87cf0afccb2bb73a3575eaa58016cfec4e177a (diff)
BlenderKit: resolutions
This introduces resolutins into the addon. This update should enhance the usability of the addon, especially for people with weaker computers. It downloads reduced version of the assets - only images are scaled down by now. Images are also converted in some cases from .png to .jpgs to save space. - there's a default resolution setting - resolutions can be swapped by user - resolutions apply only to textured models and materials with textures larger than 1024px - Resolutions aren't yet generated on the server, so will be visible after a few days. Version of the addon was bumped up to 1.0.40.
-rw-r--r--blenderkit/__init__.py47
-rw-r--r--blenderkit/append_link.py49
-rw-r--r--blenderkit/asset_pack_bg.py8
-rw-r--r--blenderkit/autothumb.py3
-rw-r--r--blenderkit/bg_blender.py13
-rw-r--r--blenderkit/blendfiles/material_thumbnailer_cycles.blendbin2974288 -> 2974541 bytes
-rw-r--r--blenderkit/download.py655
-rw-r--r--blenderkit/overrides.py2
-rw-r--r--blenderkit/paths.py183
-rw-r--r--blenderkit/rerequests.py3
-rw-r--r--blenderkit/resolutions.py851
-rw-r--r--blenderkit/resolutions_bg.py8
-rw-r--r--blenderkit/search.py135
-rw-r--r--blenderkit/tasks_queue.py6
-rw-r--r--blenderkit/ui.py94
-rw-r--r--blenderkit/ui_panels.py195
-rw-r--r--blenderkit/upload.py51
-rw-r--r--blenderkit/upload_bg.py10
-rw-r--r--blenderkit/utils.py72
19 files changed, 2076 insertions, 309 deletions
diff --git a/blenderkit/__init__.py b/blenderkit/__init__.py
index c6aa82d7..3726a3c2 100644
--- a/blenderkit/__init__.py
+++ b/blenderkit/__init__.py
@@ -19,8 +19,8 @@
bl_info = {
"name": "BlenderKit Online Asset Library",
"author": "Vilem Duha, Petr Dlouhy",
- "version": (1, 0, 32),
- "blender": (2, 83, 0),
+ "version": (1, 0, 40),
+ "blender": (2, 92, 0),
"location": "View3D > Properties > BlenderKit",
"description": "Online BlenderKit library (materials, models, brushes and more). Connects to the internet.",
"warning": "",
@@ -47,10 +47,11 @@ if "bpy" in locals():
categories = reload(categories)
bkit_oauth = reload(bkit_oauth)
tasks_queue = reload(tasks_queue)
+ resolutions = reload(resolutions)
else:
from blenderkit import asset_inspector, search, download, upload, ratings, autothumb, ui, icons, bg_blender, paths, \
utils, \
- overrides, ui_panels, categories, bkit_oauth, tasks_queue
+ overrides, ui_panels, categories, bkit_oauth, tasks_queue, resolutions
import os
import math
@@ -86,7 +87,8 @@ from bpy.types import (
@persistent
def scene_load(context):
- search.load_previews()
+ if not bpy.app.background:
+ search.load_previews()
ui_props = bpy.context.scene.blenderkitUI
ui_props.assetbar_on = False
ui_props.turn_off = False
@@ -488,6 +490,29 @@ class BlenderKitCommonSearchProps(object):
update=search.search_update,
)
+ #resolution download/import settings
+ resolution: EnumProperty(
+ name="Max resolution",
+ description="Cap texture sizes in the file to this resolution",
+ items=
+ (
+ # ('256', '256x256', ''),
+ ('512', '512x512', ''),
+ ('1024', '1024x1024', ''),
+ ('2048', '2048x2048', ''),
+ ('4096', '4096x4096', ''),
+ ('8192', '8192x8192', ''),
+ ('ORIGINAL', 'ORIGINAL FILE', ''),
+
+ ),
+ default='1024',
+ )
+
+ unpack_files: BoolProperty(name="Unpack Files",
+ description="Unpack files after download",
+ default=True
+ )
+
def name_update(self, context):
''' checks for name change, because it decides if whole asset has to be re-uploaded. Name is stored in the blend file
@@ -725,6 +750,16 @@ class BlenderKitMaterialSearchProps(PropertyGroup, BlenderKitCommonSearchProps):
default="",
update=search.search_update,
)
+ append_method: EnumProperty(
+ name="Import Method",
+ items=(
+ ('LINK', 'Link', "Link Material - will be in external file and can't be directly edited"),
+ ('APPEND', 'Append', 'Append if you need to edit the material'),
+ ),
+ description="Appended materials are editable in your scene. Linked assets are saved in original files, "
+ "aren't editable directly, but also don't increase your file size",
+ default="APPEND"
+ )
automap: BoolProperty(name="Auto-Map",
description="reset object texture space and also add automatically a cube mapped UV "
"to the object. \n this allows most materials to apply instantly to any mesh",
@@ -1195,9 +1230,9 @@ class BlenderKitSceneUploadProps(PropertyGroup, BlenderKitCommonUploadProps):
default=(.25, .25, .5),
)
- texture_resolution_min: IntProperty(name="Texture Eesolution Min",
+ texture_resolution_min: IntProperty(name="Texture Resolution Min",
description="texture resolution min, autofilled", default=0)
- texture_resolution_max: IntProperty(name="Texture Eesolution Max",
+ texture_resolution_max: IntProperty(name="Texture Resolution Max",
description="texture resolution max, autofilled", default=0)
pbr: BoolProperty(name="PBR Compatible", description="Is compatible with PBR standard", default=False)
diff --git a/blenderkit/append_link.py b/blenderkit/append_link.py
index 56b2857d..66fa711f 100644
--- a/blenderkit/append_link.py
+++ b/blenderkit/append_link.py
@@ -48,22 +48,33 @@ def append_material(file_name, matname=None, link=False, fake_user=True):
# in previous step there's check if the imported material
# is already in the scene, so we know same name != same material
- mats_before = bpy.data.materials.keys()
-
- with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
- for m in data_from.materials:
- if m == matname or matname is None:
- data_to.materials = [m]
- # print(m, type(m))
- matname = m
- break;
-
+ mats_before = bpy.data.materials[:]
+ try:
+ with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
+ found = False
+ for m in data_from.materials:
+ if m == matname or matname is None:
+ data_to.materials = [m]
+ # print(m, type(m))
+ matname = m
+ found = True
+ break;
+
+ #not found yet? probably some name inconsistency then.
+ # if not found and len(data_from.materials)>0:
+ # data_to.materials = data_from.materials[0]
+ # matname = data_from.materials[0]
+ # print('had to assign')
+ # print('in the appended file the name is ', matname)
+
+ except Exception as e:
+ print(e)
+ print('failed to open the asset file')
# we have to find the new material :(
- for mname in bpy.data.materials.keys():
- if mname not in mats_before:
- mat = bpy.data.materials[mname]
+ for m in bpy.data.materials:
+ if m not in mats_before:
+ mat = m
break
-
if fake_user:
mat.use_fake_user = True
@@ -88,13 +99,10 @@ def append_scene(file_name, scenename=None, link=False, fake_user=False):
def link_collection(file_name, obnames=[], location=(0, 0, 0), link=False, parent = None, **kwargs):
'''link an instanced group - model type asset'''
sel = utils.selection_get()
- print('link collection')
- print(kwargs)
with bpy.data.libraries.load(file_name, link=link, relative=True) as (data_from, data_to):
scols = []
for col in data_from.collections:
- print('linking this ', col)
if col == kwargs['name']:
data_to.collections = [col]
@@ -106,7 +114,9 @@ def link_collection(file_name, obnames=[], location=(0, 0, 0), link=False, paren
main_object = bpy.context.view_layer.objects.active
main_object.instance_type = 'COLLECTION'
- main_object.parent = parent
+ if parent is not None:
+ main_object.parent = bpy.data.objects.get(parent)
+
main_object.matrix_world.translation = location
for col in bpy.data.collections:
@@ -201,7 +211,6 @@ def append_objects(file_name, obnames=[], location=(0, 0, 0), link=False, **kwar
fc = utils.get_fake_context(bpy.context, area_type='VIEW_3D')
bpy.ops.wm.append(fc, filename=object_name, directory=path)
-
return_obs = []
for ob in bpy.context.scene.objects:
if ob.select_get():
@@ -262,8 +271,6 @@ def append_objects(file_name, obnames=[], location=(0, 0, 0), link=False, **kwar
for ob in hidden_objects:
ob.hide_viewport = True
- print(return_obs)
- print(main_object)
if kwargs.get('rotation') is not None:
main_object.rotation_euler = kwargs['rotation']
diff --git a/blenderkit/asset_pack_bg.py b/blenderkit/asset_pack_bg.py
new file mode 100644
index 00000000..adde9515
--- /dev/null
+++ b/blenderkit/asset_pack_bg.py
@@ -0,0 +1,8 @@
+import sys
+import json
+from blenderkit import resolutions
+
+BLENDERKIT_EXPORT_DATA = sys.argv[-1]
+
+if __name__ == "__main__":
+ resolutions.run_bg(sys.argv[-1]) \ No newline at end of file
diff --git a/blenderkit/autothumb.py b/blenderkit/autothumb.py
index cf8edb10..ec0028f0 100644
--- a/blenderkit/autothumb.py
+++ b/blenderkit/autothumb.py
@@ -166,7 +166,7 @@ def start_thumbnailer(self, context):
return {'FINISHED'}
-def start_material_thumbnailer(self, context):
+def start_material_thumbnailer(self, context, wait = False):
# Prepare to save the file
mat = bpy.context.active_object.active_material
mat.blenderkit.is_generating_thumbnail = True
@@ -290,6 +290,7 @@ class GenerateMaterialThumbnailOperator(bpy.types.Operator):
bl_label = "BlenderKit Material Thumbnail Generator"
bl_options = {'REGISTER', 'INTERNAL'}
+
@classmethod
def poll(cls, context):
return bpy.context.view_layer.objects.active is not None
diff --git a/blenderkit/bg_blender.py b/blenderkit/bg_blender.py
index c9ec43e4..a8597675 100644
--- a/blenderkit/bg_blender.py
+++ b/blenderkit/bg_blender.py
@@ -93,14 +93,21 @@ def progress(text, n=None):
else:
n = ' ' + ' ' + str(int(n * 1000) / 1000) + '% '
spaces = ' ' * (len(text) + 55)
- sys.stdout.write('progress{%s%s}\n' % (text, n))
- sys.stdout.flush()
+ try:
+ sys.stdout.write('progress{%s%s}\n' % (text, n))
+
+ sys.stdout.flush()
+ except Exception as e:
+ print('background progress reporting race condition')
+ print(e)
# @bpy.app.handlers.persistent
def bg_update():
'''monitoring of background process'''
text = ''
+ #utils.p('timer search')
+
s = bpy.context.scene
global bg_processes
@@ -195,8 +202,6 @@ class KillBgProcess(bpy.types.Operator):
# print(tcom.process_type, self.process_type)
if tcom.process_type == self.process_type:
source = eval(tcom.eval_path)
- print(source.bl_rna.name, self.process_source)
- print(source.name)
kill = False
if source.bl_rna.name == 'Object' and self.process_source == 'MODEL':
if source.name == bpy.context.active_object.name:
diff --git a/blenderkit/blendfiles/material_thumbnailer_cycles.blend b/blenderkit/blendfiles/material_thumbnailer_cycles.blend
index a4780345..584f0bd3 100644
--- a/blenderkit/blendfiles/material_thumbnailer_cycles.blend
+++ b/blenderkit/blendfiles/material_thumbnailer_cycles.blend
Binary files differ
diff --git a/blenderkit/download.py b/blenderkit/download.py
index e3a69ea8..8749b696 100644
--- a/blenderkit/download.py
+++ b/blenderkit/download.py
@@ -26,8 +26,9 @@ if "bpy" in locals():
colors = reload(colors)
tasks_queue = reload(tasks_queue)
rerequests = reload(rerequests)
+ rerequests = reload(resolutions)
else:
- from blenderkit import paths, append_link, utils, ui, colors, tasks_queue, rerequests
+ from blenderkit import paths, append_link, utils, ui, colors, tasks_queue, rerequests, resolutions
import threading
import time
@@ -68,7 +69,8 @@ def check_missing():
for l in missing:
asset_data = l['asset_data']
- downloaded = check_existing(asset_data)
+
+ downloaded = check_existing(asset_data, resolution=asset_data['resolution'])
if downloaded:
try:
l.reload()
@@ -80,7 +82,7 @@ def check_missing():
def check_unused():
'''find assets that have been deleted from scene but their library is still present.'''
- #this is obviously broken. Blender should take care of the extra data automaticlaly
+ # this is obviously broken. Blender should take care of the extra data automaticlaly
return;
used_libs = []
for ob in bpy.data.objects:
@@ -143,7 +145,7 @@ def scene_load(context):
# for asset_id in reset_asset_ids:
# asset_data = reset_asset_ids[asset_id]
# done = False
- # if check_existing(asset_data):
+ # if check_existing(asset_data, resolution = should be here):
# for obname in reset_obs[asset_id]:
# downloader = s.collection.objects[obname]
# done = try_finished_append(asset_data,
@@ -269,13 +271,41 @@ def report_usages():
thread = threading.Thread(target=utils.requests_post_thread, args=(url, usage_report, headers))
thread.start()
mt = time.time() - mt
- print('report generation: ', mt)
+ # print('report generation: ', mt)
+
+
+def udpate_asset_data_in_dicts(asset_data):
+ '''
+ updates asset data in all relevant dictionaries, after a threaded download task \
+ - where the urls were retrieved, and now they can be reused
+ Parameters
+ ----------
+ asset_data - data coming back from thread, thus containing also download urls
+ '''
+ scene = bpy.context.scene
+ scene['assets used'] = scene.get('assets used', {})
+ scene['assets used'][asset_data['assetBaseId']] = asset_data.copy()
+
+ scene['assets rated'] = scene.get('assets rated', {})
+ id = asset_data['assetBaseId']
+ scene['assets rated'][id] = scene['assets rated'].get(id, False)
+ sr = bpy.context.scene['search results']
+ for i, r in enumerate(sr):
+ if r['assetBaseId'] == asset_data['assetBaseId']:
+ for f in asset_data['files']:
+ if f.get('url'):
+ for f1 in r['files']:
+ if f1['fileType'] == f['fileType']:
+ f1['url'] = f['url']
def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
- '''Link asset to the scene'''
+ '''Link asset to the scene.
- file_names = paths.get_download_filenames(asset_data)
+
+ '''
+
+ file_names = paths.get_download_filepaths(asset_data, kwargs['resolution'])
props = None
#####
# how to do particle drop:
@@ -291,12 +321,12 @@ def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
if asset_data['assetType'] == 'scene':
scene = append_link.append_scene(file_names[0], link=False, fake_user=False)
props = scene.blenderkit
- parent = scene
+ asset_main = scene
+
+ s = bpy.context.scene
if asset_data['assetType'] == 'model':
- s = bpy.context.scene
downloaders = kwargs.get('downloaders')
- s = bpy.context.scene
sprops = s.blenderkit_models
# TODO this is here because combinations of linking objects or appending groups are rather not-usefull
if sprops.append_method == 'LINK_COLLECTION':
@@ -309,8 +339,10 @@ def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
# copy for override
al = sprops.append_link
# set consistency for objects already in scene, otherwise this literally breaks blender :)
- ain = asset_in_scene(asset_data)
-
+ ain, resolution = asset_in_scene(asset_data)
+ # this is commented out since it already happens in start_download function.
+ # if resolution:
+ # kwargs['resolution'] = resolution
# override based on history
if ain is not False:
if ain == 'LINKED':
@@ -319,16 +351,20 @@ def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
al = 'APPEND'
if asset_data['assetType'] == 'model':
source_parent = get_asset_in_scene(asset_data)
- parent, new_obs = duplicate_asset(source=source_parent, **kwargs)
- parent.location = kwargs['model_location']
- parent.rotation_euler = kwargs['model_rotation']
- # this is a case where asset is already in scene and should be duplicated instead.
- # there is a big chance that the duplication wouldn't work perfectly(hidden or unselectable objects)
- # so here we need to check and return if there was success
- # also, if it was successful, no other operations are needed , basically all asset data is already ready from the original asset
- if new_obs:
- bpy.ops.wm.undo_push_context(message='add %s to scene' % asset_data['name'])
- return
+ if source_parent:
+ asset_main, new_obs = duplicate_asset(source=source_parent, **kwargs)
+ asset_main.location = kwargs['model_location']
+ asset_main.rotation_euler = kwargs['model_rotation']
+ # this is a case where asset is already in scene and should be duplicated instead.
+ # there is a big chance that the duplication wouldn't work perfectly(hidden or unselectable objects)
+ # so here we need to check and return if there was success
+ # also, if it was successful, no other operations are needed , basically all asset data is already ready from the original asset
+ if new_obs:
+ # update here assets rated/used because there might be new download urls?
+ udpate_asset_data_in_dicts(asset_data)
+ bpy.ops.wm.undo_push_context(message='add %s to scene' % asset_data['name'])
+
+ return
# first get conditions for append link
link = al == 'LINK'
@@ -346,52 +382,52 @@ def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
return
if link:
- parent, new_obs = append_link.link_collection(file_names[-1],
- location=downloader['location'],
- rotation=downloader['rotation'],
- link=link,
- name=asset_data['name'],
- parent=kwargs.get('parent'))
+ asset_main, new_obs = append_link.link_collection(file_names[-1],
+ location=downloader['location'],
+ rotation=downloader['rotation'],
+ link=link,
+ name=asset_data['name'],
+ parent=kwargs.get('parent'))
else:
- parent, new_obs = append_link.append_objects(file_names[-1],
- location=downloader['location'],
- rotation=downloader['rotation'],
- link=link,
- name=asset_data['name'],
- parent=kwargs.get('parent'))
- if parent.type == 'EMPTY' and link:
+ asset_main, new_obs = append_link.append_objects(file_names[-1],
+ location=downloader['location'],
+ rotation=downloader['rotation'],
+ link=link,
+ name=asset_data['name'],
+ parent=kwargs.get('parent'))
+ if asset_main.type == 'EMPTY' and link:
bmin = asset_data['bbox_min']
bmax = asset_data['bbox_max']
size_min = min(1.0, (bmax[0] - bmin[0] + bmax[1] - bmin[1] + bmax[2] - bmin[2]) / 3)
- parent.empty_display_size = size_min
+ asset_main.empty_display_size = size_min
elif kwargs.get('model_location') is not None:
if link:
- parent, new_obs = append_link.link_collection(file_names[-1],
- location=kwargs['model_location'],
- rotation=kwargs['model_rotation'],
- link=link,
- name=asset_data['name'],
- parent=kwargs.get('parent'))
+ asset_main, new_obs = append_link.link_collection(file_names[-1],
+ location=kwargs['model_location'],
+ rotation=kwargs['model_rotation'],
+ link=link,
+ name=asset_data['name'],
+ parent=kwargs.get('parent'))
else:
- parent, new_obs = append_link.append_objects(file_names[-1],
- location=kwargs['model_location'],
- rotation=kwargs['model_rotation'],
- link=link,
- name=asset_data['name'],
- parent=kwargs.get('parent'))
+ asset_main, new_obs = append_link.append_objects(file_names[-1],
+ location=kwargs['model_location'],
+ rotation=kwargs['model_rotation'],
+ link=link,
+ name=asset_data['name'],
+ parent=kwargs.get('parent'))
# scale Empty for assets, so they don't clutter the scene.
- if parent.type == 'EMPTY' and link:
+ if asset_main.type == 'EMPTY' and link:
bmin = asset_data['bbox_min']
bmax = asset_data['bbox_max']
size_min = min(1.0, (bmax[0] - bmin[0] + bmax[1] - bmin[1] + bmax[2] - bmin[2]) / 3)
- parent.empty_display_size = size_min
+ asset_main.empty_display_size = size_min
if link:
- group = parent.instance_collection
+ group = asset_main.instance_collection
lib = group.library
lib['asset_data'] = asset_data
@@ -425,17 +461,20 @@ def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
# bpy.context.tool_settings.image_paint.brush = brush
props = brush.blenderkit
- parent = brush
+ asset_main = brush
elif asset_data['assetType'] == 'material':
inscene = False
+ sprops = s.blenderkit_mat
+
for m in bpy.data.materials:
if m.blenderkit.id == asset_data['id']:
inscene = True
material = m
break;
if not inscene:
- material = append_link.append_material(file_names[-1], link=False, fake_user=False)
+ link = sprops.append_method == 'LINK'
+ material = append_link.append_material(file_names[-1], link=link, fake_user=False)
target_object = bpy.data.objects[kwargs['target_object']]
if len(target_object.material_slots) == 0:
@@ -443,28 +482,93 @@ def append_asset(asset_data, **kwargs): # downloaders=[], location=None,
else:
target_object.material_slots[kwargs['material_target_slot']].material = material
- parent = material
-
- scene['assets used'] = scene.get('assets used', {})
- scene['assets used'][asset_data['assetBaseId']] = asset_data.copy()
+ asset_main = material
- scene['assets rated'] = scene.get('assets rated', {})
+ asset_data['resolution'] = kwargs['resolution']
+ udpate_asset_data_in_dicts(asset_data)
- id = asset_data['assetBaseId']
- scene['assets rated'][id] = scene['assets rated'].get(id, False)
-
- parent['asset_data'] = asset_data # TODO remove this??? should write to blenderkit Props?
+ asset_main['asset_data'] = asset_data # TODO remove this??? should write to blenderkit Props?
bpy.ops.wm.undo_push_context(message='add %s to scene' % asset_data['name'])
# moving reporting to on save.
# report_use_success(asset_data['id'])
+def replace_resolution_linked(file_paths, asset_data):
+ # replace one asset resolution for another.
+ # this is the much simpler case
+ # - find the library,
+ # - replace the path and name of the library, reload.
+ file_name = os.path.basename(file_paths[-1])
+
+ for l in bpy.data.libraries:
+ if not l.get('asset_data'):
+ continue;
+ if not l['asset_data']['assetBaseId'] == asset_data['assetBaseId']:
+ continue;
+
+ utils.p('try to re-link library')
+
+ if not os.path.isfile(file_paths[-1]):
+ utils.p('library file doesnt exist')
+ break;
+ l.filepath = os.path.join(os.path.dirname(l.filepath), file_name)
+ l.name = file_name
+ udpate_asset_data_in_dicts(asset_data)
+
+
+def replace_resolution_appended(file_paths, asset_data, resolution):
+ # In this case the texture paths need to be replaced.
+ # Find the file path pattern that is present in texture paths
+ # replace the pattern with the new one.
+ file_name = os.path.basename(file_paths[-1])
+
+ new_filename_pattern = os.path.splitext(file_name)[0]
+ all_patterns = []
+ for suff in paths.resolution_suffix.values():
+ pattern = f"{asset_data['id']}{os.sep}textures{suff}{os.sep}"
+ all_patterns.append(pattern)
+ new_pattern = f"{asset_data['id']}{os.sep}textures{paths.resolution_suffix[resolution]}{os.sep}"
+
+ # replace the pattern with the new one.
+ # print(existing_filename_patterns)
+ # print(new_filename_pattern)
+ # print('existing images:')
+ for i in bpy.data.images:
+
+ for old_pattern in all_patterns:
+ if i.filepath.find(old_pattern) > -1:
+ fp = i.filepath.replace(old_pattern, new_pattern)
+ fpabs = bpy.path.abspath(fp)
+ if not os.path.exists(fpabs):
+ # this currently handles .png's that have been swapped to .jpg's during resolution generation process.
+ # should probably also handle .exr's and similar others.
+ # utils.p('need to find a replacement')
+ base, ext = os.path.splitext(fp)
+ if resolution == 'blend' and i.get('original_extension'):
+ fp = base + i.get('original_extension')
+ elif ext in ('.png', '.PNG'):
+ fp = base + '.jpg'
+ i.filepath = fp
+ i.filepath_raw = fp # bpy.path.abspath(fp)
+ for pf in i.packed_files:
+ pf.filepath = fp
+ i.reload()
+ udpate_asset_data_in_dicts(asset_data)
+
+
# @bpy.app.handlers.persistent
-def timer_update(): # TODO might get moved to handle all blenderkit stuff, not to slow down.
- '''check for running and finished downloads and react. write progressbars too.'''
+def timer_update():
+ # TODO might get moved to handle all blenderkit stuff, not to slow down.
+ '''
+ check for running and finished downloads.
+ Running downloads get checked for progress which is passed to UI.
+ Finished downloads are processed and linked/appended to scene.
+ '''
global download_threads
+ # utils.p('timer download')
+
if len(download_threads) == 0:
- return 1.0
+ return 2.0
s = bpy.context.scene
for threaddata in download_threads:
t = threaddata[0]
@@ -487,20 +591,27 @@ def timer_update(): # TODO might get moved to handle all blenderkit stuff, not
sprops.report = tcom.report
download_threads.remove(threaddata)
return
- file_names = paths.get_download_filenames(asset_data)
+ file_paths = paths.get_download_filepaths(asset_data, tcom.passargs['resolution'])
+
+ if len(file_paths) == 0:
+ utils.p('library names not found in asset data after download')
+ download_threads.remove(threaddata)
+ break;
+
wm = bpy.context.window_manager
at = asset_data['assetType']
- if ((bpy.context.mode == 'OBJECT' and (at == 'model' \
- or at == 'material'))) \
+ if ((bpy.context.mode == 'OBJECT' and \
+ (at == 'model' or at == 'material'))) \
or ((at == 'brush') \
- and wm.get(
- 'appendable') == True) or at == 'scene': # don't do this stuff in editmode and other modes, just wait...
+ and wm.get('appendable') == True) or at == 'scene':
+ # don't do this stuff in editmode and other modes, just wait...
download_threads.remove(threaddata)
# duplicate file if the global and subdir are used in prefs
- if len(file_names) == 2: # todo this should try to check if both files exist and are ok.
- shutil.copyfile(file_names[0], file_names[1])
+ if len(file_paths) == 2: # todo this should try to check if both files exist and are ok.
+ utils.copy_asset(file_paths[0], file_paths[1])
+ # shutil.copyfile(file_paths[0], file_paths[1])
utils.p('appending asset')
# progress bars:
@@ -513,8 +624,23 @@ def timer_update(): # TODO might get moved to handle all blenderkit stuff, not
# handle lost libraries here:
for l in bpy.data.libraries:
if l.get('asset_data') is not None and l['asset_data']['id'] == asset_data['id']:
- l.filepath = file_names[-1]
+ l.filepath = file_paths[-1]
l.reload()
+
+ if tcom.passargs.get('replace_resolution'):
+ # try to relink first.
+
+ ain, resolution = asset_in_scene(asset_data)
+
+ if ain == 'LINKED':
+ replace_resolution_linked(file_paths, asset_data)
+
+
+ elif ain == 'APPENDED':
+ replace_resolution_appended(file_paths, asset_data, tcom.passargs['resolution'])
+
+
+
else:
done = try_finished_append(asset_data, **tcom.passargs)
if not done:
@@ -537,42 +663,87 @@ def timer_update(): # TODO might get moved to handle all blenderkit stuff, not
return .5
-def download_file(asset_data):
+def delete_unfinished_file(file_name):
+ '''
+ Deletes download if it wasn't finished. If the folder it's containing is empty, it also removes the directory
+ Parameters
+ ----------
+ file_name
+
+ Returns
+ -------
+ None
+ '''
+ try:
+ os.remove(file_name)
+ except Exception as e:
+ print(e)
+ asset_dir = os.path.dirname(file_name)
+ if len(os.listdir(asset_dir)) == 0:
+ os.rmdir(asset_dir)
+ return
+
+
+def download_file(asset_data, resolution='blend'):
# this is a simple non-threaded way to download files for background resolution genenration tool
- file_name = paths.get_download_filenames(asset_data)[0] # prefer global dir if possible.
+ file_name = paths.get_download_filepaths(asset_data, resolution)[0] # prefer global dir if possible.
- if check_existing(asset_data):
+ if check_existing(asset_data, resolution=resolution):
# this sends the thread for processing, where another check should occur, since the file might be corrupted.
utils.p('not downloading, already in db')
return file_name
preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = preferences.api_key
+ download_canceled = False
+
with open(file_name, "wb") as f:
print("Downloading %s" % file_name)
headers = utils.get_headers(api_key)
-
- response = requests.get(asset_data['url'], stream=True)
+ res_file_info, resolution = paths.get_res_file(asset_data, resolution)
+ response = requests.get(res_file_info['url'], stream=True)
total_length = response.headers.get('Content-Length')
- if total_length is None: # no content length header
- f.write(response.content)
+ if total_length is None or int(total_length) < 1000: # no content length header
+ download_canceled = True
+ print(response.content)
else:
+ total_length = int(total_length)
dl = 0
- for data in response.iter_content(chunk_size=4096):
+ last_percent = 0
+ percent = 0
+ for data in response.iter_content(chunk_size=4096 * 10):
dl += len(data)
- print(dl)
+
+ # the exact output you're looking for:
+ fs_str = utils.files_size_to_text(total_length)
+
+ percent = int(dl * 100 / total_length)
+ if percent > last_percent:
+ last_percent = percent
+ # sys.stdout.write('\r')
+ # sys.stdout.write(f'Downloading {asset_data['name']} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x')
+ print(
+ f'Downloading {asset_data["name"]} {fs_str} {percent}% ') # + int(dl * 50 / total_length) * 'x')
+ # sys.stdout.flush()
+
+ # print(int(dl*50/total_length)*'x'+'\r')
f.write(data)
+ if download_canceled:
+ delete_unfinished_file(file_name)
+ return None
+
return file_name
class Downloader(threading.Thread):
- def __init__(self, asset_data, tcom, scene_id, api_key):
+ def __init__(self, asset_data, tcom, scene_id, api_key, resolution='blend'):
super(Downloader, self).__init__()
self.asset_data = asset_data
self.tcom = tcom
self.scene_id = scene_id
self.api_key = api_key
+ self.resolution = resolution
self._stop_event = threading.Event()
def stop(self):
@@ -588,9 +759,9 @@ class Downloader(threading.Thread):
tcom = self.tcom
scene_id = self.scene_id
api_key = self.api_key
-
+ tcom.report = 'Looking for asset'
# TODO get real link here...
- has_url = get_download_url(asset_data, scene_id, api_key, tcom=tcom)
+ has_url = get_download_url(asset_data, scene_id, api_key, resolution=self.resolution, tcom=tcom)
if not has_url:
tasks_queue.add_task(
@@ -600,41 +771,68 @@ class Downloader(threading.Thread):
return
# only now we can check if the file already exists. This should have 2 levels, for materials and for brushes
# different than for the non free content. delete is here when called after failed append tries.
- if check_existing(asset_data) and not tcom.passargs.get('delete'):
+
+ if check_existing(asset_data, resolution=self.resolution) and not tcom.passargs.get('delete'):
# this sends the thread for processing, where another check should occur, since the file might be corrupted.
tcom.downloaded = 100
utils.p('not downloading, trying to append again')
return;
- file_name = paths.get_download_filenames(asset_data)[0] # prefer global dir if possible.
+ file_name = paths.get_download_filepaths(asset_data, self.resolution)[0] # prefer global dir if possible.
# for k in asset_data:
# print(asset_data[k])
if self.stopped():
utils.p('stopping download: ' + asset_data['name'])
return;
+ download_canceled = False
with open(file_name, "wb") as f:
- print("Downloading %s" % file_name)
+ utils.p("Downloading %s" % file_name)
headers = utils.get_headers(api_key)
-
- response = requests.get(asset_data['url'], stream=True)
+ res_file_info, self.resolution = paths.get_res_file(asset_data, self.resolution)
+ response = requests.get(res_file_info['url'], stream=True)
total_length = response.headers.get('Content-Length')
-
if total_length is None: # no content length header
- f.write(response.content)
+ print('no content length')
+ print(response.content)
+ tcom.report = response.content
+ download_canceled = True
else:
+ # utils.p(total_length)
+ if int(total_length) < 1000: # means probably no file returned.
+ tasks_queue.add_task((ui.add_report, (response.content, 20, colors.RED)))
+
+ tcom.report = response.content
+
tcom.file_size = int(total_length)
+ fsmb = tcom.file_size // (1024 * 1024)
+ fskb = tcom.file_size % 1024
+ if fsmb == 0:
+ t = '%iKB' % fskb
+ else:
+ t = ' %iMB' % fsmb
+ tcom.report = f'Downloading {t} {self.resolution}'
+
dl = 0
totdata = []
- for data in response.iter_content(chunk_size=4096*32): #crashed here... why? investigate:
+ for data in response.iter_content(chunk_size=4096 * 32): # crashed here... why? investigate:
dl += len(data)
tcom.downloaded = dl
tcom.progress = int(100 * tcom.downloaded / tcom.file_size)
f.write(data)
if self.stopped():
utils.p('stopping download: ' + asset_data['name'])
- os.remove(file_name)
- return;
+ download_canceled = True
+ break
+
+ if download_canceled:
+ delete_unfinished_file(file_name)
+ return;
+ # unpack the file immediately after download
+
+ tcom.report = f'Unpacking files'
+ self.asset_data['resolution'] = self.resolution
+ resolutions.send_to_bg(self.asset_data, file_name, command='unpack')
class ThreadCom: # object passed to threads to read background process stdout info
@@ -672,7 +870,7 @@ def download(asset_data, **kwargs):
asset_data = copy.deepcopy(asset_data)
else:
asset_data = asset_data.to_dict()
- readthread = Downloader(asset_data, tcom, scene_id, api_key)
+ readthread = Downloader(asset_data, tcom, scene_id, api_key, resolution=kwargs['resolution'])
readthread.start()
global download_threads
@@ -699,22 +897,26 @@ def check_downloading(asset_data, **kwargs):
return downloading
-def check_existing(asset_data):
+def check_existing(asset_data, resolution='blend', can_return_others=False):
''' check if the object exists on the hard drive'''
fexists = False
- file_names = paths.get_download_filenames(asset_data)
+ if asset_data.get('files') == None:
+ # this is because of some very odl files where asset data had no files structure.
+ return False
- utils.p('check if file already exists')
+ file_names = paths.get_download_filepaths(asset_data, resolution, can_return_others=can_return_others)
+
+ utils.p('check if file already exists', file_names)
if len(file_names) == 2:
# TODO this should check also for failed or running downloads.
# If download is running, assign just the running thread. if download isn't running but the file is wrong size,
# delete file and restart download (or continue downoad? if possible.)
- if os.path.isfile(file_names[0]) and not os.path.isfile(file_names[1]):
- shutil.copy(file_names[0], file_names[1])
+ if os.path.isfile(file_names[0]): # and not os.path.isfile(file_names[1])
+ utils.copy_asset(file_names[0], file_names[1])
elif not os.path.isfile(file_names[0]) and os.path.isfile(
file_names[1]): # only in case of changed settings or deleted/moved global dict.
- shutil.copy(file_names[1], file_names[0])
+ utils.copy_asset(file_names[1], file_names[0])
if len(file_names) > 0 and os.path.isfile(file_names[0]):
fexists = True
@@ -724,17 +926,23 @@ def check_existing(asset_data):
def try_finished_append(asset_data, **kwargs): # location=None, material_target=None):
''' try to append asset, if not successfully delete source files.
This means probably wrong download, so download should restart'''
- file_names = paths.get_download_filenames(asset_data)
+ file_names = paths.get_download_filepaths(asset_data, kwargs['resolution'])
done = False
utils.p('try to append already existing asset')
if len(file_names) > 0:
if os.path.isfile(file_names[-1]):
kwargs['name'] = asset_data['name']
+ append_asset(asset_data, **kwargs)
+ done = True
+ return done
try:
append_asset(asset_data, **kwargs)
done = True
except Exception as e:
+ # TODO: this should distinguis if the appending failed (wrong file)
+ # or something else happened(shouldn't delete the files)
print(e)
+ done = False
for f in file_names:
try:
os.remove(f)
@@ -742,7 +950,8 @@ def try_finished_append(asset_data, **kwargs): # location=None, material_target
# e = sys.exc_info()[0]
print(e)
pass;
- done = False
+ return done
+
return done
@@ -777,8 +986,11 @@ def check_selectible(obs):
def duplicate_asset(source, **kwargs):
- '''Duplicate asset when it's already appended in the scene, so that blender's append doesn't create duplicated data.'''
-
+ '''
+ Duplicate asset when it's already appended in the scene,
+ so that blender's append doesn't create duplicated data.
+ '''
+ utils.p('duplicate asset instead')
# we need to save selection
sel = utils.selection_get()
bpy.ops.object.select_all(action='DESELECT')
@@ -794,16 +1006,22 @@ def duplicate_asset(source, **kwargs):
# duplicate the asset objects
bpy.ops.object.duplicate(linked=True)
-
nobs = bpy.context.selected_objects[:]
- #get parent
+ # get asset main object
for ob in nobs:
if ob.parent not in nobs:
- parent = ob
+ asset_main = ob
break
+
+ # in case of replacement,there might be a paarent relationship that can be restored
+ if kwargs.get('parent'):
+ parent = bpy.data.objects[kwargs['parent']]
+ asset_main.parent = parent # even if parent is None this is ok without if condition
+ else:
+ asset_main.parent = None
# restore original selection
utils.selection_set(sel)
- return parent , nobs
+ return asset_main, nobs
def asset_in_scene(asset_data):
@@ -812,21 +1030,38 @@ def asset_in_scene(asset_data):
au = scene.get('assets used', {})
id = asset_data['assetBaseId']
+ print(id)
if id in au.keys():
ad = au[id]
- if ad.get('file_name') != None:
-
- asset_data['file_name'] = ad['file_name']
- asset_data['url'] = ad['url']
-
- # browse all collections since linked collections can have same name.
- for c in bpy.data.collections:
- if c.name == ad['name']:
- # there can also be more linked collections with same name, we need to check id.
- if c.library and c.library.get('asset_data') and c.library['asset_data']['assetBaseId'] == id:
- return 'LINKED'
- return 'APPENDED'
- return False
+ if ad.get('files'):
+ for fi in ad['files']:
+ if fi.get('file_name') != None:
+
+ for fi1 in asset_data['files']:
+ if fi['fileType'] == fi1['fileType']:
+ fi1['file_name'] = fi['file_name']
+ fi1['url'] = fi['url']
+
+ # browse all collections since linked collections can have same name.
+ if asset_data['assetType'] == 'MODEL':
+ for c in bpy.data.collections:
+ if c.name == ad['name']:
+ # there can also be more linked collections with same name, we need to check id.
+ if c.library and c.library.get('asset_data') and c.library['asset_data'][
+ 'assetBaseId'] == id:
+ print('asset linked')
+ return 'LINKED', ad.get('resolution')
+ elif asset_data['assetType'] == 'MATERIAL':
+ for m in bpy.data.materials:
+ if not m.get('asset_data'):
+ continue
+ if m['asset_data']['assetBaseId'] == asset_data[
+ 'assetBaseId'] and bpy.context.active_object.active_material.library:
+ return 'LINKED', ad.get('resolution')
+
+ print('asset appended')
+ return 'APPENDED', ad.get('resolution')
+ return False, None
def fprint(text):
@@ -837,9 +1072,10 @@ def fprint(text):
print('###################################################################################')
-def get_download_url(asset_data, scene_id, api_key, tcom=None):
+def get_download_url(asset_data, scene_id, api_key, tcom=None, resolution='blend'):
''''retrieves the download url. The server checks if user can download the item.'''
mt = time.time()
+ utils.pprint('getting download url')
headers = utils.get_headers(api_key)
@@ -848,8 +1084,10 @@ def get_download_url(asset_data, scene_id, api_key, tcom=None):
}
r = None
+ res_file_info, resolution = paths.get_res_file(asset_data, resolution)
+
try:
- r = rerequests.get(asset_data['download_url'], params=data, headers=headers)
+ r = rerequests.get(res_file_info['downloadUrl'], params=data, headers=headers)
except Exception as e:
print(e)
if tcom is not None:
@@ -860,14 +1098,20 @@ def get_download_url(asset_data, scene_id, api_key, tcom=None):
tcom.error = True
return 'Connection Error'
-
if r.status_code < 400:
data = r.json()
url = data['filePath']
- asset_data['url'] = url
- asset_data['file_name'] = paths.extract_filename_from_url(url)
+
+ res_file_info['url'] = url
+ res_file_info['file_name'] = paths.extract_filename_from_url(url)
+
+ # print(res_file_info, url)
+ print(url)
return True
+ # let's print it into UI
+ tasks_queue.add_task((ui.add_report, (str(r), 10, colors.RED)))
+
if r.status_code == 403:
r = 'You need Full plan to get this item.'
# r1 = 'All materials and brushes are available for free. Only users registered to Standard plan can use all models.'
@@ -876,8 +1120,15 @@ def get_download_url(asset_data, scene_id, api_key, tcom=None):
tcom.report = r
tcom.error = True
+ if r.status_code == 404:
+ r = 'Url not found - 404.'
+ # r1 = 'All materials and brushes are available for free. Only users registered to Standard plan can use all models.'
+ if tcom is not None:
+ tcom.report = r
+ tcom.error = True
+
elif r.status_code >= 500:
- utils.p(r.text)
+ # utils.p(r.text)
if tcom is not None:
tcom.report = 'Server error'
tcom.error = True
@@ -889,8 +1140,11 @@ def start_download(asset_data, **kwargs):
check if file isn't downloading or doesn't exist, then start new download
'''
# first check if the asset is already in scene. We can use that asset without checking with server
- quota_ok = asset_in_scene(asset_data) is not False
+ ain, resolution = asset_in_scene(asset_data)
+ # quota_ok = ain is not False
+ # if resolution:
+ # kwargs['resolution'] = resolution
# otherwise, check on server
s = bpy.context.scene
@@ -900,9 +1154,11 @@ def start_download(asset_data, **kwargs):
if not downloading:
# check if there are files already. This check happens 2x once here(for free assets),
# once in thread(for non-free)
- fexists = check_existing(asset_data)
-
- if fexists and quota_ok:
+ fexists = check_existing(asset_data, resolution=kwargs['resolution'])
+ utils.p('does file exist?', fexists)
+ utils.p('asset is in scene', ain)
+ if ain and not kwargs.get('replace_resolution'):
+ # this goes to appending asset - where it should duplicate the original asset already in scene.
done = try_finished_append(asset_data, **kwargs)
# else:
# props = utils.get_search_props()
@@ -917,7 +1173,7 @@ def start_download(asset_data, **kwargs):
elif asset_data['assetType'] == 'scene':
download(asset_data, **kwargs)
elif asset_data['assetType'] == 'brush' or asset_data['assetType'] == 'texture':
- download(asset_data)
+ download(asset_data, **kwargs)
asset_types = (
@@ -946,18 +1202,44 @@ class BlenderkitKillDownloadOperator(bpy.types.Operator):
return {'FINISHED'}
+def available_resolutions_callback(self, context):
+ '''
+ Returns
+ checks active asset for available resolutions and offers only those available
+ TODO: this currently returns always the same list of resolutions, make it actually work
+ '''
+ # print('callback called', self.asset_data)
+ pat_items = (
+ ('512', '512', '', 1),
+ ('1024', '1024', '', 2),
+ ('2048', '2048', '', 3),
+ ('4096', '4096', '', 4),
+ ('8192', '8192', '', 5),
+ )
+ items = []
+ for item in pat_items:
+ if int(self.max_resolution) >= int(item[0]):
+ items.append(item)
+ items.append(('ORIGINAL', 'Original', '', 6))
+ return items
+
+
+def show_enum_values(obj, prop_name):
+ print([item.identifier for item in obj.bl_rna.properties[prop_name].enum_items])
+
+
class BlenderkitDownloadOperator(bpy.types.Operator):
"""Download and link asset to scene. Only link if asset already available locally."""
bl_idname = "scene.blenderkit_download"
bl_label = "BlenderKit Asset Download"
bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
- asset_type: EnumProperty(
- name="Type",
- items=asset_types,
- description="Type of download",
- default="MODEL",
- )
+ # asset_type: EnumProperty(
+ # name="Type",
+ # items=asset_types,
+ # description="Type of download",
+ # default="MODEL",
+ # )
asset_index: IntProperty(name="Asset Index", description='asset index in search results', default=-1)
asset_base_id: StringProperty(
@@ -976,6 +1258,25 @@ class BlenderkitDownloadOperator(bpy.types.Operator):
replace: BoolProperty(name='Replace', description='replace selection with the asset', default=False)
+ replace_resolution: BoolProperty(name='Replace resolution', description='replace resolution of the active asset',
+ default=False)
+
+ invoke_resolution: BoolProperty(name='Replace resolution popup',
+ description='pop up to ask which resolution to download', default=False)
+
+ resolution: EnumProperty(
+ items=available_resolutions_callback,
+ default=0,
+ description='Replace resolution'
+ )
+
+ max_resolution: IntProperty(
+ name="Max resolution",
+ description="",
+ default=0)
+ # has_res_0_5k: BoolProperty(name='512',
+ # description='', default=False)
+
cast_parent: StringProperty(
name="Particles Target Object",
description="",
@@ -984,8 +1285,8 @@ class BlenderkitDownloadOperator(bpy.types.Operator):
# @classmethod
# def poll(cls, context):
# return bpy.context.window_manager.BlenderKitModelThumbnails is not ''
-
- def execute(self, context):
+ def get_asset_data(self, context):
+ # get asset data - it can come from scene, or from search results.
s = bpy.context.scene
if self.asset_index > -1:
@@ -1004,22 +1305,43 @@ class BlenderkitDownloadOperator(bpy.types.Operator):
if asset_base_id in s.get('assets used'):
# already used assets have already download link and especially file link.
asset_data = s['assets used'][asset_base_id].to_dict()
+ return asset_data
+
+ def execute(self, context):
+ sprops = utils.get_search_props()
- atype = asset_data['assetType']
+ self.asset_data = self.get_asset_data(context)
+
+ # print('after getting asset data')
+ # print(self.asset_base_id)
+
+ atype = self.asset_data['assetType']
if bpy.context.mode != 'OBJECT' and (
atype == 'model' or atype == 'material') and bpy.context.view_layer.objects.active is not None:
bpy.ops.object.mode_set(mode='OBJECT')
+ if self.resolution == 0 or self.resolution == '':
+ resolution = sprops.resolution
+ else:
+ resolution = self.resolution
+
+ resolution = resolutions.resolution_props_to_server[resolution]
if self.replace: # cleanup first, assign later.
obs = utils.get_selected_replace_adepts()
# print(obs)
for ob in obs:
- print('replace attempt ', ob.name)
+ # print('replace attempt ', ob.name)
if self.asset_base_id != '':
- # this is for a case when replace is called from a panel, this makes the first of the objects not replacable.
- if ob.get('asset_data') is not None and ob['asset_data']['assetBaseId'] == self.asset_base_id:
- print('skipping this oneli')
+ # this is for a case when replace is called from a panel,
+ # this uses active object as replacement source instead of target.
+ if ob.get('asset_data') is not None and \
+ (ob['asset_data']['assetBaseId'] == self.asset_base_id and ob['asset_data'][
+ 'resolution'] == resolution):
+ # print('skipping this one')
continue;
+ parent = ob.parent
+ if parent:
+ parent = ob.parent.name # after this, parent is either name or None.
kwargs = {
'cast_parent': self.cast_parent,
@@ -1027,24 +1349,53 @@ class BlenderkitDownloadOperator(bpy.types.Operator):
'material_target_slot': ob.active_material_index,
'model_location': tuple(ob.matrix_world.translation),
'model_rotation': tuple(ob.matrix_world.to_euler()),
- 'replace': False,
- 'parent': ob.parent
+ 'replace': True,
+ 'replace_resolution': False,
+ 'parent': parent,
+ 'resolution': resolution
}
+ # TODO - move this After download, not before, so that the replacement
utils.delete_hierarchy(ob)
- start_download(asset_data, **kwargs)
+ start_download(self.asset_data, **kwargs)
else:
+ # replace resolution needs to replace all instances of the resolution in the scene
+ # and deleting originals has to be thus done after the downlaod
+
kwargs = {
'cast_parent': self.cast_parent,
'target_object': self.target_object,
'material_target_slot': self.material_target_slot,
'model_location': tuple(self.model_location),
'model_rotation': tuple(self.model_rotation),
- 'replace': False
+ 'replace': False,
+ 'replace_resolution': self.replace_resolution,
+ 'resolution': resolution
}
- start_download(asset_data, **kwargs)
+ start_download(self.asset_data, **kwargs)
return {'FINISHED'}
+ def draw(self, context):
+ layout = self.layout
+ layout.prop(self, 'resolution', expand=True, icon_only=False)
+
+ def invoke(self, context, event):
+ print(self.asset_base_id)
+ wm = context.window_manager
+ # only make a pop up in case of switching resolutions
+ if self.invoke_resolution:
+ # show_enum_values(self, 'resolution')
+ # print('ENUM VALUES')
+ self.asset_data = self.get_asset_data(context)
+ sprops = utils.get_search_props()
+ if int(sprops.resolution) <= int(self.max_resolution):
+ self.resolution = sprops.resolution
+ elif int(self.max_resolution) > 0:
+ self.resolution = self.max_resolution
+ else:
+ self.resolution = 'ORIGINAL'
+ return wm.invoke_props_dialog(self)
+
def register_download():
bpy.utils.register_class(BlenderkitDownloadOperator)
diff --git a/blenderkit/overrides.py b/blenderkit/overrides.py
index 606a19bb..56013f0c 100644
--- a/blenderkit/overrides.py
+++ b/blenderkit/overrides.py
@@ -143,7 +143,7 @@ def modelProxy():
bpy.ops.object.parent_set(type='OBJECT', keep_transform=True)
return True
else: # TODO report this to ui
- print('not sure what to proxify')
+ utils.p('not sure what to proxify')
return False
diff --git a/blenderkit/paths.py b/blenderkit/paths.py
index 399e7555..ef0dd19d 100644
--- a/blenderkit/paths.py
+++ b/blenderkit/paths.py
@@ -17,7 +17,7 @@
# ##### END GPL LICENSE BLOCK #####
import bpy, os, sys, tempfile, shutil
-from blenderkit import tasks_queue, ui
+from blenderkit import tasks_queue, ui, utils
_presets = os.path.join(bpy.utils.user_resource('SCRIPTS'), "presets")
BLENDERKIT_LOCAL = "http://localhost:8001"
@@ -36,6 +36,7 @@ BLENDERKIT_OAUTH_LANDING_URL = "/oauth-landing/"
BLENDERKIT_SIGNUP_URL = "https://www.blenderkit.com/accounts/register"
BLENDERKIT_SETTINGS_FILENAME = os.path.join(_presets, "bkit.json")
+
def cleanup_old_folders():
'''function to clean up any historical folders for BlenderKit. By now removes the temp folder.'''
orig_temp = os.path.join(os.path.expanduser('~'), 'blenderkit_data', 'temp')
@@ -46,6 +47,7 @@ def cleanup_old_folders():
print(e)
print("couldn't delete old temp directory")
+
def get_bkit_url():
# bpy.app.debug_value = 2
d = bpy.app.debug_value
@@ -75,6 +77,7 @@ def get_api_url():
def get_oauth_landing_url():
return get_bkit_url() + BLENDERKIT_OAUTH_LANDING_URL
+
def get_author_gallery_url(author_id):
return f'{get_bkit_url()}/asset-gallery?query=author_id:{author_id}'
@@ -114,14 +117,15 @@ def get_temp_dir(subdir=None):
tasks_queue.add_task((ui.add_report, (message,)))
return None
user_preferences.global_dir = p
- tempdir = get_temp_dir(subdir = subdir)
+ tempdir = get_temp_dir(subdir=subdir)
return tempdir
+
def get_download_dirs(asset_type):
''' get directories where assets will be downloaded'''
subdmapping = {'brush': 'brushes', 'texture': 'textures', 'model': 'models', 'scene': 'scenes',
- 'material': 'materials'}
+ 'material': 'materials', 'hdr':'hdrs'}
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
dirs = []
@@ -132,13 +136,11 @@ def get_download_dirs(asset_type):
if not os.path.exists(ddir):
os.makedirs(ddir)
- subdirs = ['brushes', 'textures', 'models', 'scenes', 'materials']
- for subd in subdirs:
- subdir = os.path.join(ddir, subd)
- if not os.path.exists(subdir):
- os.makedirs(subdir)
- if subdmapping[asset_type] == subd:
- dirs.append(subdir)
+ subd = subdmapping[asset_type]
+ subdir = os.path.join(ddir, subd)
+ if not os.path.exists(subdir):
+ os.makedirs(subdir)
+ dirs.append(subdir)
if (
user_preferences.directory_behaviour == 'BOTH' or user_preferences.directory_behaviour == 'LOCAL') and bpy.data.is_saved: # it's important local get's solved as second, since for the linking process only last filename will be taken. For download process first name will be taken and if 2 filenames were returned, file will be copied to the 2nd path.
ddir = user_preferences.project_subdir
@@ -147,13 +149,12 @@ def get_download_dirs(asset_type):
if not os.path.exists(ddir):
os.makedirs(ddir)
- subdirs = ['textures', 'models', 'scenes', 'materials'] # brushes get stored only globally.
- for subd in subdirs:
- subdir = os.path.join(ddir, subd)
- if not os.path.exists(subdir):
- os.makedirs(subdir)
- if subdmapping[asset_type] == subd:
- dirs.append(subdir)
+ subd = subdmapping[asset_type]
+
+ subdir = os.path.join(ddir, subd)
+ if not os.path.exists(subdir):
+ os.makedirs(subdir)
+ dirs.append(subdir)
return dirs
@@ -165,19 +166,22 @@ def slugify(slug):
"""
import unicodedata, re
slug = slug.lower()
- slug = slug.replace('.', '_')
- slug = slug.replace('"', '')
- slug = slug.replace(' ', '_')
+
+ characters = '.," <>()'
+ for ch in characters:
+ slug = slug.replace(ch, '_')
# import re
# slug = unicodedata.normalize('NFKD', slug)
# slug = slug.encode('ascii', 'ignore').lower()
slug = re.sub(r'[^a-z0-9]+.- ', '-', slug).strip('-')
slug = re.sub(r'[-]+', '-', slug)
slug = re.sub(r'/', '_', slug)
+ slug = re.sub(r'\\\'\"', '_', slug)
return slug
def extract_filename_from_url(url):
+ # print(url)
if url is not None:
imgname = url.split('/')[-1]
imgname = imgname.split('?')[0]
@@ -185,37 +189,148 @@ def extract_filename_from_url(url):
return ''
-def get_download_filenames(asset_data):
+resolution_suffix = {
+ 'blend': '',
+ 'resolution_0_5K': '_05k',
+ 'resolution_1K': '_1k',
+ 'resolution_2K': '_2k',
+ 'resolution_4K': '_4k',
+ 'resolution_8K': '_8k',
+}
+resolutions = {
+ 'resolution_0_5K': 512,
+ 'resolution_1K': 1024,
+ 'resolution_2K': 2048,
+ 'resolution_4K': 4096,
+ 'resolution_8K': 8192,
+}
+
+
+def round_to_closest_resolution(res):
+ rdist = 1000000
+ # while res/2>1:
+ # p2res*=2
+ # res = res/2
+ # print(p2res, res)
+ for rkey in resolutions:
+ # print(resolutions[rkey], rdist)
+ d = abs(res - resolutions[rkey])
+ if d < rdist:
+ rdist = d
+ p2res = rkey
+
+ return p2res
+
+
+def get_res_file(asset_data, resolution, find_closest_with_url = False):
+ '''
+ Returns closest resolution that current asset can offer.
+ If there are no resolutions, return orig file.
+ If orig file is requested, return it.
+ params
+ asset_data
+ resolution - ideal resolution
+ find_closest_with_url:
+ returns only resolutions that already containt url in the asset data, used in scenes where asset is/was already present.
+ Returns:
+ resolution file
+ resolution, so that other processess can pass correctly which resolution is downloaded.
+ '''
+ orig = None
+ res = None
+ closest = None
+ target_resolution = resolutions.get(resolution)
+ mindist = 100000000
+
+ for f in asset_data['files']:
+ if f['fileType'] == 'blend':
+ orig = f
+ if resolution == 'blend':
+ #orig file found, return.
+ return orig , 'blend'
+
+ if f['fileType'] == resolution:
+ #exact match found, return.
+ return f, resolution
+ # find closest resolution if the exact match won't be found.
+ rval = resolutions.get(f['fileType'])
+ if rval and target_resolution:
+ rdiff = abs(target_resolution - rval)
+ if rdiff < mindist:
+ closest = f
+ mindist = rdiff
+ # print('\n\n\n\n\n\n\n\n')
+ # print(closest)
+ # print('\n\n\n\n\n\n\n\n')
+ if not res and not closest:
+ # utils.pprint(f'will download blend instead of resolution {resolution}')
+ return orig , 'blend'
+ # utils.pprint(f'found closest resolution {closest["fileType"]} instead of the requested {resolution}')
+ return closest, closest['fileType']
+
+def server_2_local_filename(asset_data, filename):
+ '''
+ Convert file name on server to file name local.
+ This should get replaced
+ '''
+ # print(filename)
+ fn = filename.replace('blend_', '')
+ fn = fn.replace('resolution_', '')
+ # print('after replace ', fn)
+ n = slugify(asset_data['name']) + '_' + fn
+ return n
+
+def get_texture_directory(asset_data, resolution = 'blend'):
+ tex_dir_path = f"//textures{resolution_suffix[resolution]}{os.sep}"
+ return tex_dir_path
+
+def get_download_filepaths(asset_data, resolution='blend', can_return_others = False):
+ '''Get all possible paths of the asset and resolution. Usually global and local directory.'''
dirs = get_download_dirs(asset_data['assetType'])
+ res_file, resolution = get_res_file(asset_data, resolution, find_closest_with_url = can_return_others)
+
+ name_slug = slugify(asset_data['name'])
+ asset_folder_name = f"{name_slug}_{asset_data['id']}"
+
+ # utils.pprint('get download filenames ', dict(res_file))
file_names = []
+
# fn = asset_data['file_name'].replace('blend_', '')
- if asset_data.get('url') is not None:
- # this means asset is already in scene and we don't need to check
-
- fn = extract_filename_from_url(asset_data['url'])
- fn.replace('_blend', '')
- n = slugify(asset_data['name']) + '_' + fn
- # n = 'x.blend'
- # strs = (n, asset_data['name'], asset_data['file_name'])
+ if res_file.get('url') is not None:
+ #Tweak the names a bit:
+ # remove resolution and blend words in names
+ #
+ fn = extract_filename_from_url(res_file['url'])
+ n = server_2_local_filename(asset_data,fn)
for d in dirs:
- file_name = os.path.join(d, n)
+ asset_folder_path = os.path.join(d,asset_folder_name)
+ if not os.path.exists(asset_folder_path):
+ os.makedirs(asset_folder_path)
+
+ file_name = os.path.join(asset_folder_path, n)
file_names.append(file_name)
+
+ utils.p('file paths', file_names)
return file_names
def delete_asset_debug(asset_data):
+ '''TODO fix this for resolutions - should get ALL files from ALL resolutions.'''
from blenderkit import download
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
api_key = user_preferences.api_key
download.get_download_url(asset_data, download.get_scene_id(), api_key)
- file_names = get_download_filenames(asset_data)
+ file_names = get_download_filepaths(asset_data)
for f in file_names:
- if os.path.isfile(f):
+ asset_dir = os.path.dirname(f)
+
+ if os.path.isdir(asset_dir):
+
try:
- print(f)
- os.remove(f)
+ print(asset_dir)
+ shutil.rmtree(asset_dir)
except:
e = sys.exc_info()[0]
print(e)
diff --git a/blenderkit/rerequests.py b/blenderkit/rerequests.py
index c655c8c5..25f693d7 100644
--- a/blenderkit/rerequests.py
+++ b/blenderkit/rerequests.py
@@ -76,6 +76,9 @@ def rerequest(method, url, **kwargs):
utils.p('reresult', response.status_code)
if response.status_code >= 400:
utils.p('reresult', response.text)
+ tasks_queue.add_task((ui.add_report, (
+ response.text, 10)))
+
else:
tasks_queue.add_task((ui.add_report, (
'Refreshing token failed.Please login manually.', 10)))
diff --git a/blenderkit/resolutions.py b/blenderkit/resolutions.py
new file mode 100644
index 00000000..11651e1b
--- /dev/null
+++ b/blenderkit/resolutions.py
@@ -0,0 +1,851 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+
+if "bpy" in locals():
+ from importlib import reload
+
+ paths = reload(paths)
+ append_link = reload(append_link)
+ bg_blender = reload(bg_blender)
+ utils = reload(utils)
+ download = reload(download)
+ search = reload(search)
+ rerequests = reload(rerequests)
+ upload_bg = reload(upload_bg)
+else:
+ from blenderkit import paths, append_link, bg_blender, utils, download, search, rerequests, upload_bg
+
+import sys, json, os, time
+import subprocess
+import tempfile
+import numpy as np
+import bpy
+import requests
+import math
+import threading
+
+resolutions = {
+ 'resolution_0_5K': 512,
+ 'resolution_1K': 1024,
+ 'resolution_2K': 2048,
+ 'resolution_4K': 4096,
+ 'resolution_8K': 8192,
+}
+rkeys = list(resolutions.keys())
+
+resolution_props_to_server = {
+
+ '512': 'resolution_0_5K',
+ '1024': 'resolution_1K',
+ '2048': 'resolution_2K',
+ '4096': 'resolution_4K',
+ '8192': 'resolution_8K',
+ 'ORIGINAL': 'blend',
+}
+
+
+def get_current_resolution():
+ actres = 0
+ for i in bpy.data.images:
+ if i.name != 'Render Result':
+ actres = max(actres, i.size[0], i.size[1])
+ return actres
+
+
+def can_erase_alpha(na):
+ alpha = na[3::4]
+ alpha_sum = alpha.sum()
+ if alpha_sum == alpha.size:
+ print('image can have alpha erased')
+ # print(alpha_sum, alpha.size)
+ return alpha_sum == alpha.size
+
+
+def is_image_black(na):
+ r = na[::4]
+ g = na[1::4]
+ b = na[2::4]
+
+ rgbsum = r.sum() + g.sum() + b.sum()
+
+ # print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum())
+ if rgbsum == 0:
+ print('image can have alpha channel dropped')
+ return rgbsum == 0
+
+
+def is_image_bw(na):
+ r = na[::4]
+ g = na[1::4]
+ b = na[2::4]
+
+ rg_equal = r == g
+ gb_equal = g == b
+ rgbequal = rg_equal.all() and gb_equal.all()
+ if rgbequal:
+ print('image is black and white, can have channels reduced')
+
+ return rgbequal
+
+
+def numpytoimage(a, iname, width=0, height=0, channels=3):
+ t = time.time()
+ foundimage = False
+
+ for image in bpy.data.images:
+
+ if image.name[:len(iname)] == iname and image.size[0] == a.shape[0] and image.size[1] == a.shape[1]:
+ i = image
+ foundimage = True
+ if not foundimage:
+ if channels == 4:
+ bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0, 1), alpha=True,
+ generated_type='BLANK', float=True)
+ if channels == 3:
+ bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0), alpha=False,
+ generated_type='BLANK', float=True)
+
+ for image in bpy.data.images:
+ # print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1])
+ if image.name[:len(iname)] == iname and image.size[0] == width and image.size[1] == height:
+ i = image
+
+ # dropping this re-shaping code - just doing flat array for speed and simplicity
+ # d = a.shape[0] * a.shape[1]
+ # a = a.swapaxes(0, 1)
+ # a = a.reshape(d)
+ # a = a.repeat(channels)
+ # a[3::4] = 1
+ i.pixels.foreach_set(a) # this gives big speedup!
+ print('\ntime ' + str(time.time() - t))
+ return i
+
+
+def imagetonumpy(i):
+ t = time.time()
+
+ width = i.size[0]
+ height = i.size[1]
+ # print(i.channels)
+
+ size = width * height * i.channels
+ na = np.empty(size, np.float32)
+ i.pixels.foreach_get(na)
+
+ # dropping this re-shaping code - just doing flat array for speed and simplicity
+ # na = na[::4]
+ # na = na.reshape(height, width, i.channels)
+ # na = na.swapaxnes(0, 1)
+
+ # print('\ntime of image to numpy ' + str(time.time() - t))
+ return na
+
+
+def save_image_safely(teximage, filepath):
+ '''
+ Blender makes it really hard to save images... this is to fix it's crazy bad image saving.
+ Would be worth investigating PIL or similar instead
+ Parameters
+ ----------
+ teximage
+
+ Returns
+ -------
+
+ '''
+ JPEG_QUALITY = 98
+
+ rs = bpy.context.scene.render
+ ims = rs.image_settings
+
+ orig_file_format = ims.file_format
+ orig_quality = ims.quality
+ orig_color_mode = ims.color_mode
+ orig_compression = ims.compression
+
+ ims = rs.image_settings
+
+ orig_file_format = ims.file_format
+ orig_quality = ims.quality
+ orig_color_mode = ims.color_mode
+ orig_compression = ims.compression
+
+ ims.file_format = teximage.file_format
+ if teximage.file_format == 'PNG':
+ ims.color_mode = 'RGBA'
+ elif teximage.channels == 3:
+ ims.color_mode = 'RGB'
+ else:
+ ims.color_mode = 'BW'
+
+ # all pngs with max compression
+ if ims.file_format == 'PNG':
+ ims.compression = 100
+ # all jpgs brought to reasonable quality
+ if ims.file_format == 'JPG':
+ ims.quality = JPEG_QUALITY
+ # it's actually very important not to try to change the image filepath and packed file filepath before saving,
+ # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
+ teximage.save_render(filepath=bpy.path.abspath(filepath), scene=bpy.context.scene)
+
+ teximage.filepath = filepath
+ for packed_file in teximage.packed_files:
+ packed_file.filepath = filepath
+ teximage.filepath_raw = filepath
+ teximage.reload()
+
+ ims.file_format = orig_file_format
+ ims.quality = orig_quality
+ ims.color_mode = orig_color_mode
+ ims.compression = orig_compression
+
+
+def extxchange_to_resolution(filepath):
+ base, ext = os.path.splitext(filepath)
+ if ext in ('.png', '.PNG'):
+ ext = 'jpg'
+
+
+def make_possible_reductions_on_image(teximage, input_filepath, do_reductions=False, do_downscale=False):
+ '''checks the image and saves it to drive with possibly reduced channels.
+ Also can remove the image from the asset if the image is pure black
+ - it finds it's usages and replaces the inputs where the image is used
+ with zero/black color.
+ currently implemented file type conversions:
+ PNG->JPG
+ '''
+ colorspace = teximage.colorspace_settings.name
+ teximage.colorspace_settings.name = 'Non-Color'
+
+ JPEG_QUALITY = 90
+ # is_image_black(na)
+ # is_image_bw(na)
+
+ rs = bpy.context.scene.render
+ ims = rs.image_settings
+
+ orig_file_format = ims.file_format
+ orig_quality = ims.quality
+ orig_color_mode = ims.color_mode
+ orig_compression = ims.compression
+
+ # if is_image_black(na):
+ # # just erase the image from the asset here, no need to store black images.
+ # pass;
+
+ # fp = teximage.filepath
+ fp = input_filepath
+ if do_reductions:
+ na = imagetonumpy(teximage)
+
+ if can_erase_alpha(na):
+ print(teximage.file_format)
+ if teximage.file_format == 'PNG':
+ print('changing type of image to JPG')
+ base, ext = os.path.splitext(fp)
+ teximage['original_extension'] = ext
+
+ fp = fp.replace('.png', '.jpg')
+ fp = fp.replace('.PNG', '.jpg')
+
+ teximage.name = teximage.name.replace('.png', '.jpg')
+ teximage.name = teximage.name.replace('.PNG', '.jpg')
+
+ teximage.file_format = 'JPEG'
+ ims.quality = JPEG_QUALITY
+ ims.color_mode = 'RGB'
+
+ if is_image_bw(na):
+ ims.color_mode = 'BW'
+
+ ims.file_format = teximage.file_format
+
+ # all pngs with max compression
+ if ims.file_format == 'PNG':
+ ims.compression = 100
+ # all jpgs brought to reasonable quality
+ if ims.file_format == 'JPG':
+ ims.quality = JPEG_QUALITY
+
+ if do_downscale:
+ downscale(teximage)
+
+ # it's actually very important not to try to change the image filepath and packed file filepath before saving,
+ # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
+ teximage.save_render(filepath=bpy.path.abspath(fp), scene=bpy.context.scene)
+ if len(teximage.packed_files) > 0:
+ teximage.unpack(method='REMOVE')
+ teximage.filepath = fp
+ teximage.filepath_raw = fp
+ teximage.reload()
+
+ teximage.colorspace_settings.name = colorspace
+
+ ims.file_format = orig_file_format
+ ims.quality = orig_quality
+ ims.color_mode = orig_color_mode
+ ims.compression = orig_compression
+
+
+def downscale(i):
+ minsize = 128
+
+ sx, sy = i.size[:]
+ sx = round(sx / 2)
+ sy = round(sy / 2)
+ if sx > minsize and sy > minsize:
+ i.scale(sx, sy)
+
+
+def upload_resolutions(files, data):
+ preferences = bpy.context.preferences.addons['blenderkit'].preferences
+
+ upload_data = {
+ "token": preferences.api_key,
+ "id": data['asset_data']['id']
+ }
+
+ uploaded = upload_bg.upload_files(upload_data, files)
+
+ if uploaded:
+ bg_blender.progress('upload finished successfully')
+ else:
+ bg_blender.progress('upload failed.')
+
+
+def unpack_asset(data):
+ utils.p('unpacking asset')
+ asset_data = data['asset_data']
+ # utils.pprint(asset_data)
+
+ blend_file_name = os.path.basename(bpy.data.filepath)
+ ext = os.path.splitext(blend_file_name)[1]
+
+ resolution = asset_data.get('resolution', 'blend')
+ # TODO - passing resolution inside asset data might not be the best solution
+ tex_dir_path = paths.get_texture_directory(asset_data, resolution=resolution)
+ tex_dir_abs = bpy.path.abspath(tex_dir_path)
+ if not os.path.exists(tex_dir_abs):
+ try:
+ os.mkdir(tex_dir_abs)
+ except Exception as e:
+ print(e)
+ bpy.data.use_autopack = False
+ for image in bpy.data.images:
+ if image.name != 'Render Result':
+ # suffix = paths.resolution_suffix(data['suffix'])
+ fp = get_texture_filepath(tex_dir_path, image, resolution=resolution)
+ utils.p('unpacking file', image.name)
+ utils.p(image.filepath, fp)
+
+ for pf in image.packed_files:
+ pf.filepath = fp # bpy.path.abspath(fp)
+ image.filepath = fp # bpy.path.abspath(fp)
+ image.filepath_raw = fp # bpy.path.abspath(fp)
+ image.save()
+ image.unpack(method='REMOVE')
+
+ bpy.ops.wm.save_mainfile(compress=False)
+ # now try to delete the .blend1 file
+ try:
+ os.remove(bpy.data.filepath + '1')
+ except Exception as e:
+ print(e)
+
+
+def patch_asset_empty(asset_id, api_key):
+ '''
+ This function patches the asset for the purpose of it getting a reindex.
+ Should be removed once this is fixed on the server and
+ the server is able to reindex after uploads of resolutions
+ Returns
+ -------
+ '''
+ upload_data = {
+ }
+ url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
+ headers = utils.get_headers(api_key)
+ try:
+ r = rerequests.patch(url, json=upload_data, headers=headers, verify=True) # files = files,
+ except requests.exceptions.RequestException as e:
+ print(e)
+ return {'CANCELLED'}
+ return {'FINISHED'}
+
+
+def reduce_all_images(target_scale=1024):
+ for img in bpy.data.images:
+ if img.name != 'Render Result':
+ print('scaling ', img.name, img.size[0], img.size[1])
+ # make_possible_reductions_on_image(i)
+ if max(img.size) > target_scale:
+ ratio = float(target_scale) / float(max(img.size))
+ print(ratio)
+ # i.save()
+ fp = '//tempimagestorage'
+ # print('generated filename',fp)
+ # for pf in img.packed_files:
+ # pf.filepath = fp # bpy.path.abspath(fp)
+
+ img.filepath = fp
+ img.filepath_raw = fp
+ print(int(img.size[0] * ratio), int(img.size[1] * ratio))
+ img.scale(int(img.size[0] * ratio), int(img.size[1] * ratio))
+ img.update()
+ # img.save()
+ # img.reload()
+ img.pack()
+
+
+def get_texture_filepath(tex_dir_path, image, resolution='blend'):
+ image_file_name = bpy.path.basename(image.filepath)
+ if image_file_name == '':
+ image_file_name = image.name.split('.')[0]
+
+ suffix = paths.resolution_suffix[resolution]
+
+ fp = os.path.join(tex_dir_path, image_file_name)
+ # check if there is allready an image with same name and thus also assigned path
+ # (can happen easily with genearted tex sets and more materials)
+ done = False
+ fpn = fp
+ i = 0
+ while not done:
+ is_solo = True
+ for image1 in bpy.data.images:
+ if image != image1 and image1.filepath == fpn:
+ is_solo = False
+ fpleft, fpext = os.path.splitext(fp)
+ fpn = fpleft + str(i).zfill(3) + fpext
+ i += 1
+ if is_solo:
+ done = True
+
+ return fpn
+
+
+def generate_lower_resolutions(data):
+ asset_data = data['asset_data']
+ actres = get_current_resolution()
+ # first let's skip procedural assets
+ base_fpath = bpy.data.filepath
+
+ s = bpy.context.scene
+
+ print('current resolution of the asset ', actres)
+ if actres > 0:
+ p2res = paths.round_to_closest_resolution(actres)
+ orig_res = p2res
+ print(p2res)
+ finished = False
+ files = []
+ # now skip assets that have lowest possible resolution already
+ if p2res != [0]:
+ original_textures_filesize = 0
+ for i in bpy.data.images:
+ abspath = bpy.path.abspath(i.filepath)
+ if os.path.exists(abspath):
+ original_textures_filesize += os.path.getsize(abspath)
+
+ while not finished:
+
+ blend_file_name = os.path.basename(base_fpath)
+
+ dirn = os.path.dirname(base_fpath)
+ fn_strip, ext = os.path.splitext(blend_file_name)
+
+ fn = fn_strip + paths.resolution_suffix[p2res] + ext
+ fpath = os.path.join(dirn, fn)
+
+ tex_dir_path = paths.get_texture_directory(asset_data, resolution=p2res)
+
+ tex_dir_abs = bpy.path.abspath(tex_dir_path)
+ if not os.path.exists(tex_dir_abs):
+ os.mkdir(tex_dir_abs)
+
+ reduced_textures_filessize = 0
+ for i in bpy.data.images:
+ if i.name != 'Render Result':
+
+ print('scaling ', i.name, i.size[0], i.size[1])
+ fp = get_texture_filepath(tex_dir_path, i, resolution=p2res)
+
+ if p2res == orig_res:
+ # first, let's link the image back to the original one.
+ i['blenderkit_original_path'] = i.filepath
+ # first round also makes reductions on the image, while keeping resolution
+ make_possible_reductions_on_image(i, fp, do_reductions=True, do_downscale=False)
+
+ else:
+ # lower resolutions only downscale
+ make_possible_reductions_on_image(i, fp, do_reductions=False, do_downscale=True)
+
+ abspath = bpy.path.abspath(i.filepath)
+ if os.path.exists(abspath):
+ reduced_textures_filessize += os.path.getsize(abspath)
+
+ i.pack()
+ # save
+ print(fpath)
+ # save the file
+ bpy.ops.wm.save_as_mainfile(filepath=fpath, compress=True, copy=True)
+ # compare file sizes
+ print(f'textures size was reduced from {original_textures_filesize} to {reduced_textures_filessize}')
+ if reduced_textures_filessize < original_textures_filesize:
+ files.append({
+ "type": p2res,
+ "index": 0,
+ "file_path": fpath
+ })
+
+ print('prepared resolution file: ', p2res)
+ if rkeys.index(p2res) == 0:
+ finished = True
+ else:
+ p2res = rkeys[rkeys.index(p2res) - 1]
+ print('uploading resolution files')
+ # upload_resolutions(files, data)
+ preferences = bpy.context.preferences.addons['blenderkit'].preferences
+ patch_asset_empty(data['asset_data']['id'], preferences.api_key)
+ return
+
+
+def regenerate_thumbnail_material(data):
+ # this should re-generate material thumbnail and re-upload it.
+ # first let's skip procedural assets
+ base_fpath = bpy.data.filepath
+ blend_file_name = os.path.basename(base_fpath)
+ bpy.ops.mesh.primitive_cube_add()
+ aob = bpy.context.active_object
+ bpy.ops.object.material_slot_add()
+ aob.material_slots[0].material = bpy.data.materials[0]
+ props = aob.active_material.blenderkit
+ props.thumbnail_generator_type = 'BALL'
+ props.thumbnail_background = False
+ props.thumbnail_resolution = '256'
+ # layout.prop(props, 'thumbnail_generator_type')
+ # layout.prop(props, 'thumbnail_scale')
+ # layout.prop(props, 'thumbnail_background')
+ # if props.thumbnail_background:
+ # layout.prop(props, 'thumbnail_background_lightness')
+ # layout.prop(props, 'thumbnail_resolution')
+ # layout.prop(props, 'thumbnail_samples')
+ # layout.prop(props, 'thumbnail_denoising')
+ # layout.prop(props, 'adaptive_subdivision')
+ # preferences = bpy.context.preferences.addons['blenderkit'].preferences
+ # layout.prop(preferences, "thumbnail_use_gpu")
+ # TODO: here it should call start_material_thumbnailer , but with the wait property on, so it can upload afterwards.
+ bpy.ops.object.blenderkit_material_thumbnail()
+ time.sleep(130)
+ # save
+ # this does the actual job
+
+ return
+
+
+def assets_db_path():
+ dpath = os.path.dirname(bpy.data.filepath)
+ fpath = os.path.join(dpath, 'all_assets.json')
+ return fpath
+
+
+def get_assets_search():
+ bpy.app.debug_value = 2
+
+ results = []
+ preferences = bpy.context.preferences.addons['blenderkit'].preferences
+ url = paths.get_api_url() + 'search/all'
+ i = 0
+ while url is not None:
+ headers = utils.get_headers(preferences.api_key)
+ print('fetching assets from assets endpoint')
+ print(url)
+ retries = 0
+ while retries < 3:
+ r = rerequests.get(url, headers=headers)
+
+ try:
+ adata = r.json()
+ url = adata.get('next')
+ print(i)
+ i += 1
+ except Exception as e:
+ print(e)
+ print('failed to get next')
+ if retries == 2:
+ url = None
+ if adata.get('results') != None:
+ results.extend(adata['results'])
+ retries = 3
+ print(f'fetched page {i}')
+ retries += 1
+
+ fpath = assets_db_path()
+ with open(fpath, 'w') as s:
+ json.dump(results, s)
+
+
+def get_assets_for_resolutions(page_size=100, max_results=100000000):
+ preferences = bpy.context.preferences.addons['blenderkit'].preferences
+
+ dpath = os.path.dirname(bpy.data.filepath)
+ filepath = os.path.join(dpath, 'assets_for_resolutions.json')
+ params = {
+ 'order': '-created',
+ 'textureResolutionMax_gte': '100',
+ # 'last_resolution_upload_lt':'2020-9-01'
+ }
+ search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results,
+ api_key=preferences.api_key)
+ return filepath
+
+
+def get_materials_for_validation(page_size=100, max_results=100000000):
+ preferences = bpy.context.preferences.addons['blenderkit'].preferences
+ dpath = os.path.dirname(bpy.data.filepath)
+ filepath = os.path.join(dpath, 'materials_for_validation.json')
+ params = {
+ 'order': '-created',
+ 'asset_type': 'material',
+ 'verification_status': 'uploaded'
+ }
+ search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results,
+ api_key=preferences.api_key)
+ return filepath
+
+
+# This gets all assets in the database through the/assets endpoint. Currently not used, since we use elastic for everything.
+# def get_assets_list():
+# bpy.app.debug_value = 2
+#
+# results = []
+# preferences = bpy.context.preferences.addons['blenderkit'].preferences
+# url = paths.get_api_url() + 'assets/all'
+# i = 0
+# while url is not None:
+# headers = utils.get_headers(preferences.api_key)
+# print('fetching assets from assets endpoint')
+# print(url)
+# retries = 0
+# while retries < 3:
+# r = rerequests.get(url, headers=headers)
+#
+# try:
+# adata = r.json()
+# url = adata.get('next')
+# print(i)
+# i += 1
+# except Exception as e:
+# print(e)
+# print('failed to get next')
+# if retries == 2:
+# url = None
+# if adata.get('results') != None:
+# results.extend(adata['results'])
+# retries = 3
+# print(f'fetched page {i}')
+# retries += 1
+#
+# fpath = assets_db_path()
+# with open(fpath, 'w') as s:
+# json.dump(results, s)
+
+
+def load_assets_list(filepath):
+ if os.path.exists(filepath):
+ with open(filepath, 'r') as s:
+ assets = json.load(s)
+ return assets
+
+
+def check_needs_resolutions(a):
+ if a['verificationStatus'] == 'validated' and a['assetType'] in ('material', 'model', 'scene'):
+ # the search itself now picks the right assets so there's no need to filter more than asset types.
+ for f in a['files']:
+ if f['fileType'].find('resolution') > -1:
+ return False
+
+ return True
+ return False
+
+
+def download_asset(asset_data, resolution='blend', unpack=False, api_key=''):
+ '''
+ Download an asset non-threaded way.
+ Parameters
+ ----------
+ asset_data - search result from elastic or assets endpoints from API
+
+ Returns
+ -------
+ path to the resulting asset file or None if asset isn't accessible
+ '''
+
+ has_url = download.get_download_url(asset_data, download.get_scene_id(), api_key, tcom=None,
+ resolution='blend')
+ if has_url:
+ fpath = download.download_file(asset_data)
+ if fpath and unpack:
+ send_to_bg(asset_data, fpath, command='unpack', wait=True)
+ return fpath
+
+ return None
+
+
+def generate_resolution_thread(asset_data, api_key):
+ '''
+ A thread that downloads file and only then starts an instance of Blender that generates the resolution
+ Parameters
+ ----------
+ asset_data
+
+ Returns
+ -------
+
+ '''
+
+ fpath = download_asset(asset_data, unpack=True, api_key=api_key)
+ if fpath:
+ print('send to bg ', fpath)
+ proc = send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True);
+ # send_to_bg by now waits for end of the process.
+ # time.sleep((5))
+
+
+def iterate_for_resolutions(filepath, process_count=12, api_key=''):
+ ''' iterate through all assigned assets, check for those which need generation and send them to res gen'''
+ assets = load_assets_list(filepath)
+ print(len(assets))
+ threads = []
+ for asset_data in assets:
+ asset_data = search.parse_result(asset_data)
+ if asset_data is not None:
+
+ if check_needs_resolutions(asset_data):
+ print('downloading and generating resolution for %s' % asset_data['name'])
+ # this is just a quick hack for not using original dirs in blendrkit...
+ thread = threading.Thread(target=generate_resolution_thread, args=(asset_data, api_key))
+ thread.start()
+
+ threads.append(thread)
+ print('processes ', len(threads))
+ while len(threads) > process_count - 1:
+ for proc in threads:
+ if not proc.is_alive():
+ threads.remove(proc)
+ break;
+ else:
+ print(f'Failed to retrieve asset from server:{asset_data["name"]}')
+ else:
+ print('not generated resolutions:', asset_data['name'])
+
+
+def send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True):
+ '''
+ Send varioust task to a new blender instance that runs and closes after finishing the task.
+ This function waits until the process finishes.
+ The function tries to set the same bpy.app.debug_value in the instance of Blender that is run.
+ Parameters
+ ----------
+ asset_data
+ fpath - file that will be processed
+ command - command which should be run in background.
+
+ Returns
+ -------
+ None
+ '''
+ data = {
+ 'fpath': fpath,
+ 'debug_value': bpy.app.debug_value,
+ 'asset_data': asset_data,
+ 'command': command,
+ }
+ binary_path = bpy.app.binary_path
+ tempdir = tempfile.mkdtemp()
+ datafile = os.path.join(tempdir + 'resdata.json')
+ script_path = os.path.dirname(os.path.realpath(__file__))
+ with open(datafile, 'w') as s:
+ json.dump(data, s)
+
+ print('opening Blender instance to do processing - ', command)
+
+ if wait:
+ proc = subprocess.run([
+ binary_path,
+ "--background",
+ "-noaudio",
+ fpath,
+ "--python", os.path.join(script_path, "resolutions_bg.py"),
+ "--", datafile
+ ], bufsize=1, stdout=sys.stdout, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
+
+ else:
+ # TODO this should be fixed to allow multithreading.
+ proc = subprocess.Popen([
+ binary_path,
+ "--background",
+ "-noaudio",
+ fpath,
+ "--python", os.path.join(script_path, "resolutions_bg.py"),
+ "--", datafile
+ ], bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
+ return proc
+
+
+def write_data_back(asset_data):
+ '''ensures that the data in the resolution file is the same as in the database.'''
+ pass;
+
+
+def run_bg(datafile):
+ print('background file operation')
+ with open(datafile, 'r') as f:
+ data = json.load(f)
+ bpy.app.debug_value = data['debug_value']
+ write_data_back(data['asset_data'])
+ if data['command'] == 'generate_resolutions':
+ generate_lower_resolutions(data)
+ elif data['command'] == 'unpack':
+ unpack_asset(data)
+ elif data['command'] == 'regen_thumbnail':
+ regenerate_thumbnail_material(data)
+
+# load_assets_list()
+# generate_lower_resolutions()
+# class TestOperator(bpy.types.Operator):
+# """Tooltip"""
+# bl_idname = "object.test_anything"
+# bl_label = "Test Operator"
+#
+# @classmethod
+# def poll(cls, context):
+# return True
+#
+# def execute(self, context):
+# iterate_for_resolutions()
+# return {'FINISHED'}
+#
+#
+# def register():
+# bpy.utils.register_class(TestOperator)
+#
+#
+# def unregister():
+# bpy.utils.unregister_class(TestOperator)
diff --git a/blenderkit/resolutions_bg.py b/blenderkit/resolutions_bg.py
new file mode 100644
index 00000000..adde9515
--- /dev/null
+++ b/blenderkit/resolutions_bg.py
@@ -0,0 +1,8 @@
+import sys
+import json
+from blenderkit import resolutions
+
+BLENDERKIT_EXPORT_DATA = sys.argv[-1]
+
+if __name__ == "__main__":
+ resolutions.run_bg(sys.argv[-1]) \ No newline at end of file
diff --git a/blenderkit/search.py b/blenderkit/search.py
index b681134c..5e4c41d9 100644
--- a/blenderkit/search.py
+++ b/blenderkit/search.py
@@ -29,8 +29,10 @@ if "bpy" in locals():
version_checker = reload(version_checker)
tasks_queue = reload(tasks_queue)
rerequests = reload(rerequests)
+ resolutions = reload(resolutions)
else:
- from blenderkit import paths, utils, categories, ui, colors, bkit_oauth, version_checker, tasks_queue, rerequests
+ from blenderkit import paths, utils, categories, ui, colors, bkit_oauth, version_checker, tasks_queue, rerequests, \
+ resolutions
import blenderkit
from bpy.app.handlers import persistent
@@ -58,6 +60,8 @@ import threading
import platform
import bpy
import copy
+import json
+import math
search_start_time = 0
prev_time = 0
@@ -103,12 +107,14 @@ def refresh_token_timer():
return max(3600, user_preferences.api_key_life - 3600)
+
def update_ad(ad):
if not ad.get('assetBaseId'):
try:
ad['assetBaseId'] = ad['asset_base_id'] # this should stay ONLY for compatibility with older scenes
ad['assetType'] = ad['asset_type'] # this should stay ONLY for compatibility with older scenes
- ad['verificationStatus'] = ad['verification_status'] # this should stay ONLY for compatibility with older scenes
+ ad['verificationStatus'] = ad[
+ 'verification_status'] # this should stay ONLY for compatibility with older scenes
ad['author'] = {}
ad['author']['id'] = ad['author_id'] # this should stay ONLY for compatibility with older scenes
ad['canDownload'] = ad['can_download'] # this should stay ONLY for compatibility with older scenes
@@ -116,6 +122,7 @@ def update_ad(ad):
print('BLenderKit failed to update older asset data')
return ad
+
def update_assets_data(): # updates assets data on scene load.
'''updates some properties that were changed on scenes with older assets.
The properties were mainly changed from snake_case to CamelCase to fit the data that is coming from the server.
@@ -223,9 +230,9 @@ def parse_result(r):
# utils.p('asset with no files-size')
asset_type = r['assetType']
if len(r['files']) > 0:
-
+ r['available_resolutions'] = []
allthumbs = []
- durl, tname, small_tname = '','',''
+ durl, tname, small_tname = '', '', ''
for f in r['files']:
if f['fileType'] == 'thumbnail':
tname = paths.extract_filename_from_url(f['fileThumbnailLarge'])
@@ -239,6 +246,13 @@ def parse_result(r):
durl = f['downloadUrl'].split('?')[0]
# fname = paths.extract_filename_from_url(f['filePath'])
+ if f['fileType'].find('resolution') > -1:
+ r['available_resolutions'].append(resolutions.resolutions[f['fileType']])
+ r['max_resolution'] = 0
+ if r['available_resolutions']:#should check only for non-empty sequences
+ r['max_resolution'] = max(r['available_resolutions'])
+
+
tooltip = generate_tooltip(r)
# for some reason, the id was still int on some occurances. investigate this.
r['author']['id'] = str(r['author']['id'])
@@ -248,7 +262,7 @@ def parse_result(r):
asset_data = {'thumbnail': tname,
'thumbnail_small': small_tname,
# 'thumbnails':allthumbs,
- 'download_url': durl,
+ # 'download_url': durl, #made obsolete since files are stored in orig form.
# 'id': r['id'],
# 'asset_base_id': r['assetBaseId'],#this should stay ONLY for compatibility with older scenes
# 'name': r['name'],
@@ -289,8 +303,20 @@ def parse_result(r):
asset_data['texture_size_meters'] = params.get('textureSizeMeters', 1.0)
asset_data.update(tdict)
- if r['assetBaseId'] in scene.get('assets used', {}).keys():
+
+ au = scene.get('assets used', {})
+ if au == {}:
+ scene['assets used'] = au
+ if r['assetBaseId'] in au.keys():
asset_data['downloaded'] = 100
+ # transcribe all urls already fetched from the server
+ r_previous = au[r['assetBaseId']]
+ if r_previous.get('files'):
+ for f in r_previous['files']:
+ if f.get('url'):
+ for f1 in r['files']:
+ if f1['fileType'] == f['fileType']:
+ f1['url'] = f['url']
# attempt to switch to use original data gradually, since the parsing as itself should become obsolete.
asset_data.update(r)
@@ -300,9 +326,11 @@ def parse_result(r):
# @bpy.app.handlers.persistent
def timer_update():
# this makes a first search after opening blender. showing latest assets.
+ # utils.p('timer search')
+
global first_time
preferences = bpy.context.preferences.addons['blenderkit'].preferences
- if first_time: # first time
+ if first_time and not bpy.app.background: # first time
first_time = False
if preferences.show_on_start:
# TODO here it should check if there are some results, and only open assetbar if this is the case, not search.
@@ -385,6 +413,8 @@ def timer_update():
props.report = 'Found %i results. ' % (s['search results orig']['count'])
if len(s['search results']) == 0:
tasks_queue.add_task((ui.add_report, ('No matching results found.',)))
+ # undo push
+ bpy.ops.wm.undo_push_context(message='Get BlenderKit search')
else:
print('error', error)
@@ -393,11 +423,11 @@ def timer_update():
# print('finished search thread')
mt('preview loading finished')
+
return .3
def load_previews():
-
scene = bpy.context.scene
# FIRST START SEARCH
props = scene.blenderkitUI
@@ -566,13 +596,8 @@ def generate_tooltip(mdata):
# write files size - this doesn't reflect true file size, since files size is computed from all asset files, including resolutions.
if mdata.get('filesSize'):
- fs = mdata['filesSize']
- fsmb = fs // (1024 * 1024)
- fskb = fs % 1024
- if fsmb == 0:
- t += 'files size: %iKB\n' % fskb
- else:
- t += 'files size: %iMB %iKB\n' % (fsmb, fskb)
+ fs = utils.files_size_to_text(mdata['filesSize'])
+ t += f'files size: {fs}\n'
# t = writeblockm(t, mparams, key='meshPolyType', pretext='mesh type', width = col_w)
# t = writeblockm(t, mparams, key='objectCount', pretext='nubmber of objects', width = col_w)
@@ -597,6 +622,18 @@ def generate_tooltip(mdata):
# t += '\n'
t = writeblockm(t, mdata, key='license', width=col_w)
+ if utils.profile_is_validator():
+ fs = mdata.get('files')
+ if fs:
+ resolutions = 'resolutions:'
+ for f in fs:
+ if f['fileType'].find('resolution') > -1:
+ resolutions += f['fileType'][11:] + ' '
+ resolutions += '\n'
+ t += resolutions
+
+ t = writeblockm(t, mdata, key='isFree', width=col_w)
+
# generator is for both upload preview and search, this is only after search
# if mdata.get('versionNumber'):
# # t = writeblockm(t, mdata, key='versionNumber', pretext='version', width = col_w)
@@ -607,7 +644,7 @@ def generate_tooltip(mdata):
# t += generate_author_textblock(adata)
# t += '\n'
- if len(t.split('\n')) < 6:
+ if len(t.split('\n')) < 11:
t += '\n'
t += get_random_tip(mdata)
t += '\n'
@@ -710,7 +747,7 @@ def write_gravatar(a_id, gravatar_path):
def fetch_gravatar(adata):
- utils.p('fetch gravatar')
+ # utils.p('fetch gravatar')
if adata.get('gravatarHash') is not None:
gravatar_path = paths.get_temp_dir(subdir='bkit_g/') + adata['gravatarHash'] + '.jpg'
@@ -808,7 +845,7 @@ def get_profile():
class Searcher(threading.Thread):
query = None
- def __init__(self, query, params,orig_result):
+ def __init__(self, query, params, orig_result):
super(Searcher, self).__init__()
self.query = query
self.params = params
@@ -885,7 +922,6 @@ class Searcher(threading.Thread):
if not params['get_next']:
urlquery = self.query_to_url()
-
try:
utils.p(urlquery)
r = rerequests.get(urlquery, headers=headers) # , params = rparameters)
@@ -897,7 +933,7 @@ class Searcher(threading.Thread):
reports = e
# props.report = e
return
- mt('response is back ')
+ mt('search response is back ')
try:
rdata = r.json()
except Exception as inst:
@@ -1182,9 +1218,59 @@ def add_search_process(query, params, orig_result):
thread = Searcher(query, params, orig_result)
thread.start()
- search_threads.append([thread, tempdir, query['asset_type'],{}])# 4th field is for results
+ search_threads.append([thread, tempdir, query['asset_type'], {}]) # 4th field is for results
- mt('thread started')
+ mt('search thread started')
+
+
+def get_search_simple(parameters, filepath=None, page_size=100, max_results=100000000, api_key=''):
+ '''
+ Searches and returns the
+
+
+ Parameters
+ ----------
+ parameters - dict of blenderkit elastic parameters
+ filepath - a file to save the results. If None, results are returned
+ page_size - page size for retrieved results
+ max_results - max results of the search
+ api_key - BlenderKit api key
+
+ Returns
+ -------
+ Returns search results as a list, and optionally saves to filepath
+
+ '''
+ headers = utils.get_headers(api_key)
+ url = paths.get_api_url() + 'search/'
+ requeststring = url + '?query='
+ for p in parameters.keys():
+ requeststring += f'+{p}:{parameters[p]}'
+
+ requeststring += '&page_size=' + str(page_size)
+ response = rerequests.get(requeststring, headers=headers) # , params = rparameters)
+ # print(r.json())
+ search_results = response.json()
+
+ results = []
+ results.extend(search_results['results'])
+ page_index = 2
+ page_count = math.ceil(search_results['count'] / page_size)
+ while search_results.get('next') and len(results) < max_results:
+ print(f'getting page {page_index} , total pages {page_count}')
+ response = rerequests.get(search_results['next'], headers=headers) # , params = rparameters)
+ search_results = response.json()
+ # print(search_results)
+ results.extend(search_results['results'])
+ page_index += 1
+
+ if not filepath:
+ return results
+
+ with open(filepath, 'w') as s:
+ json.dump(results, s)
+ print(f'retrieved {len(results)} assets from elastic search')
+ return results
def search(category='', get_next=False, author_id=''):
@@ -1216,21 +1302,18 @@ def search(category='', get_next=False, author_id=''):
props = scene.blenderkit_mat
query = build_query_material()
-
if ui_props.asset_type == 'TEXTURE':
if not hasattr(scene, 'blenderkit_tex'):
return;
# props = scene.blenderkit_tex
# query = build_query_texture()
-
if ui_props.asset_type == 'BRUSH':
if not hasattr(scene, 'blenderkit_brush'):
return;
props = scene.blenderkit_brush
query = build_query_brush()
-
if props.is_searching and get_next == True:
return;
@@ -1260,7 +1343,7 @@ def search(category='', get_next=False, author_id=''):
# query['keywords'] += '+is_free:true'
orig_results = scene.get(f'bkit {ui_props.asset_type.lower()} search orig', {})
if orig_results != {}:
- #ensure it's a copy in dict for what we are passing to thread:
+ # ensure it's a copy in dict for what we are passing to thread:
orig_results = orig_results.to_dict()
add_search_process(query, params, orig_results)
tasks_queue.add_task((ui.add_report, ('BlenderKit searching....', 2)))
diff --git a/blenderkit/tasks_queue.py b/blenderkit/tasks_queue.py
index c619a7b0..21eba33b 100644
--- a/blenderkit/tasks_queue.py
+++ b/blenderkit/tasks_queue.py
@@ -60,6 +60,7 @@ def add_task(task, wait = 0, only_last = False, fake_context = False, fake_conte
def queue_worker():
+ #utils.p('timer queue worker')
time_step = 2.0
q = get_queue()
@@ -68,6 +69,8 @@ def queue_worker():
# first round we get all tasks that are supposed to be stashed and run only once (only_last option)
# stashing finds tasks with the property only_last and same command and executes only the last one.
while not q.empty():
+ # print('queue while 1')
+
task = q.get()
if task.only_last:
#this now makes the keys not only by task, but also first argument.
@@ -77,6 +80,8 @@ def queue_worker():
stashed[str(task.command)+str(task.arguments[0])] = task
else:
back_to_queue.append(task)
+ if len(stashed.keys())>1:
+ print(stashed)
#return tasks to que except for stashed
for task in back_to_queue:
q.put(task)
@@ -104,6 +109,7 @@ def queue_worker():
except Exception as e:
utils.p('task failed:')
print(e)
+ # print('queue while 2')
for task in back_to_queue:
q.put(task)
return 2.0
diff --git a/blenderkit/ui.py b/blenderkit/ui.py
index 9985466a..a4af023e 100644
--- a/blenderkit/ui.py
+++ b/blenderkit/ui.py
@@ -46,6 +46,7 @@ from bpy_extras import view3d_utils
import mathutils
from mathutils import Vector
import time
+import datetime
import os
handler_2d = None
@@ -234,7 +235,7 @@ def draw_ratings_bgl():
rating_possible, rated, asset, asset_data = is_rating_possible()
if rating_possible: # (not rated or ui_props.rating_menu_on):
- print('rating is pssible', asset_data['name'])
+ # print('rating is pssible', asset_data['name'])
bkit_ratings = asset.bkit_ratings
bgcol = bpy.context.preferences.themes[0].user_interface.wcol_tooltip.inner
textcol = (1, 1, 1, 1)
@@ -243,7 +244,7 @@ def draw_ratings_bgl():
font_size = int(ui.rating_ui_scale * 20)
if ui.rating_button_on:
- print('should draw button')
+ # print('should draw button')
img = utils.get_thumbnail('star_white.png')
ui_bgl.draw_image(ui.rating_x,
@@ -631,11 +632,25 @@ def draw_callback_2d(self, context):
draw_callback_2d_upload_preview(self, context)
-def draw_downloader(x, y, percent=0, img=None):
+def draw_downloader(x, y, percent=0, img=None, text= ''):
if img is not None:
ui_bgl.draw_image(x, y, 50, 50, img, .5)
+
ui_bgl.draw_rect(x, y, 50, int(0.5 * percent), (.2, 1, .2, .3))
ui_bgl.draw_rect(x - 3, y - 3, 6, 6, (1, 0, 0, .3))
+ # if asset_data is not None:
+ # ui_bgl.draw_text(asset_data['name'], x, y, colors.TEXT)
+ # ui_bgl.draw_text(asset_data['filesSize'])
+ if text:
+ ui_bgl.draw_text(text, x, y - 15,12, colors.TEXT)
+ #asset_data and asset_data.get('filesSize'):
+ # fs = asset_data['filesSize']
+ # fsmb = fs // (1024 * 1024)
+ # fskb = fs % 1024
+ # if fsmb == 0:
+ # t += 'files size: %iKB\n' % fskb
+ # else:
+ # t += 'files size: %iMB %iKB\n' % (fsmb, fskb)
def draw_progress(x, y, text='', percent=None, color=colors.GREEN):
@@ -681,9 +696,9 @@ def draw_callback_2d_progress(self, context):
if loc is not None:
if asset_data['assetType'] == 'model':
# models now draw with star trek mode, no need to draw percent for the image.
- draw_downloader(loc[0], loc[1], percent=tcom.progress, img=img)
+ draw_downloader(loc[0], loc[1], percent=tcom.progress, img=img, text=tcom.report)
else:
- draw_downloader(loc[0], loc[1], percent=tcom.progress, img=img)
+ draw_downloader(loc[0], loc[1], percent=tcom.progress, img=img, text=tcom.report)
else:
@@ -718,6 +733,21 @@ def draw_callback_2d_upload_preview(self, context):
draw_tooltip(ui_props.bar_x, ui_props.bar_y, text=ui_props.tooltip, img=img)
+def is_upload_old(asset_data):
+ '''
+ estimates if the asset is far too long in the 'uploaded' state
+ This returns the number of days the validation is over the limit.
+ '''
+ date_time_str = asset_data["created"][:10]
+ # date_time_str = 'Jun 28 2018 7:40AM'
+ date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d')
+ today = date_time_obj.today()
+ age = today - date_time_obj
+ old = datetime.timedelta(days=7)
+ if age > old:
+ return (age.days - old.days)
+ return 0
+
def draw_callback_2d_search(self, context):
s = bpy.context.scene
@@ -746,7 +776,7 @@ def draw_callback_2d_search(self, context):
# 1,
# img,
# 1)
- if not ui_props.dragging and ui_props.hcount>0:
+ if not ui_props.dragging and ui_props.hcount > 0:
search_results = s.get('search results')
search_results_orig = s.get('search results orig')
if search_results == None:
@@ -804,7 +834,7 @@ def draw_callback_2d_search(self, context):
index = a + ui_props.scrolloffset + b * ui_props.wcount
iname = utils.previmg_name(index)
img = bpy.data.images.get(iname)
- if img is not None and img.size[0]>0 and img.size[1]>0:
+ if img is not None and img.size[0] > 0 and img.size[1] > 0:
w = int(ui_props.thumb_size * img.size[0] / max(img.size[0], img.size[1]))
h = int(ui_props.thumb_size * img.size[1] / max(img.size[0], img.size[1]))
crop = (0, 0, 1, 1)
@@ -825,9 +855,19 @@ def draw_callback_2d_search(self, context):
else:
ui_bgl.draw_rect(x, y, ui_props.thumb_size, ui_props.thumb_size, white)
+
result = search_results[index]
+ #code to inform validators that the validation is waiting too long and should be done asap
+ if result['verificationStatus'] == 'uploaded':
+ if utils.profile_is_validator():
+ over_limit = is_upload_old(result)
+ if over_limit:
+ redness = min(over_limit*.05,0.5)
+ red = (1, 0, 0, redness)
+ ui_bgl.draw_rect(x, y, ui_props.thumb_size, ui_props.thumb_size, red)
+
if result['downloaded'] > 0:
- ui_bgl.draw_rect(x, y - 2, int(w * result['downloaded'] / 100.0), 2, green)
+ ui_bgl.draw_rect(x, y , int(ui_props.thumb_size * result['downloaded'] / 100.0), 2, green)
# object type icons - just a test..., adds clutter/ not so userfull:
# icons = ('type_finished.png', 'type_template.png', 'type_particle_system.png')
@@ -867,7 +907,6 @@ def draw_callback_2d_search(self, context):
if not r['thumbnail']:
tpath = paths.get_addon_thumbnail_path('thumbnail_not_available.jpg')
-
img = bpy.data.images.get(iname)
if img == None or img.filepath != tpath:
# TODO replace it with a function
@@ -957,7 +996,6 @@ def mouse_raycast(context, mx, my):
snapped_rotation = snapped_normal.to_track_quat('Z', 'Y').to_euler()
-
if props.randomize_rotation and snapped_normal.angle(up) < math.radians(10.0):
randoffset = props.offset_rotation_amount + math.pi + (
random.random() - 0.5) * props.randomize_rotation_amount
@@ -1015,7 +1053,7 @@ def is_rating_possible():
ao = bpy.context.active_object
ui = bpy.context.scene.blenderkitUI
preferences = bpy.context.preferences.addons['blenderkit'].preferences
- #first test if user is logged in.
+ # first test if user is logged in.
if preferences.api_key == '':
return False, False, None, None
if bpy.context.scene.get('assets rated') is not None and ui.down_up == 'SEARCH':
@@ -1034,7 +1072,7 @@ def is_rating_possible():
ad = ao_check.get('asset_data')
if ad is not None and ad.get('assetBaseId') is not None:
- s['assets rated'] = s.get('assets rated',{})
+ s['assets rated'] = s.get('assets rated', {})
rated = s['assets rated'].get(ad['assetBaseId'])
# originally hidden for already rated assets
return True, rated, ao_check, ad
@@ -1127,7 +1165,7 @@ def interact_rating(r, mx, my, event):
bkit_ratings.rating_work_hours = wh
if event.type == 'LEFTMOUSE' and event.value == 'RELEASE':
- ui.last_rating_time = time.time() # this prop seems obsolete now?
+ ui.last_rating_time = time.time() # this prop seems obsolete now?
return True
else:
ui.rating_button_on = True
@@ -1169,6 +1207,8 @@ def mouse_in_region(r, mx, my):
def update_ui_size(area, region):
+ if bpy.app.background:
+ return
ui = bpy.context.scene.blenderkitUI
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
ui_scale = bpy.context.preferences.view.ui_scale
@@ -1208,7 +1248,6 @@ def update_ui_size(area, region):
ui.rating_y = ui.bar_y - ui.bar_height
-
class AssetBarOperator(bpy.types.Operator):
'''runs search and displays the asset bar at the same time'''
bl_idname = "view3d.blenderkit_asset_bar"
@@ -1258,13 +1297,12 @@ class AssetBarOperator(bpy.types.Operator):
areas = []
-
- #timers testing - seems timers might be causing crashes. testing it this way now.
+ # timers testing - seems timers might be causing crashes. testing it this way now.
if not user_preferences.use_timers:
- search.timer_update()
- download.timer_update()
- tasks_queue.queue_worker()
- bg_blender.bg_update()
+ search.timer_update()
+ download.timer_update()
+ tasks_queue.queue_worker()
+ bg_blender.bg_update()
if bpy.context.scene != self.scene:
self.exit_modal()
@@ -1300,8 +1338,6 @@ class AssetBarOperator(bpy.types.Operator):
update_ui_size(self.area, self.region)
-
-
# this was here to check if sculpt stroke is running, but obviously that didn't help,
# since the RELEASE event is cought by operator and thus there is no way to detect a stroke has ended...
if bpy.context.mode in ('SCULPT', 'PAINT_TEXTURE'):
@@ -1645,7 +1681,7 @@ class AssetBarOperator(bpy.types.Operator):
utils.automap(target_object, target_slot=target_slot,
tex_size=asset_data.get('texture_size_meters', 1.0))
bpy.ops.scene.blenderkit_download(True,
- asset_type=ui_props.asset_type,
+ # asset_type=ui_props.asset_type,
asset_index=asset_search_index,
model_location=loc,
model_rotation=rotation,
@@ -1662,14 +1698,14 @@ class AssetBarOperator(bpy.types.Operator):
rotation = s.cursor.rotation_euler
bpy.ops.scene.blenderkit_download(True,
- asset_type=ui_props.asset_type,
+ # asset_type=ui_props.asset_type,
asset_index=asset_search_index,
model_location=loc,
model_rotation=rotation,
target_object=target_object)
else:
- bpy.ops.scene.blenderkit_download(asset_type=ui_props.asset_type,
+ bpy.ops.scene.blenderkit_download(#asset_type=ui_props.asset_type,
asset_index=asset_search_index)
ui_props.dragging = False
@@ -1812,13 +1848,13 @@ class UndoWithContext(bpy.types.Operator):
def execute(self, context):
# C_dict = utils.get_fake_context(context)
- #w, a, r = get_largest_area(area_type=area_type)
+ # w, a, r = get_largest_area(area_type=area_type)
# wm = bpy.context.window_manager#bpy.data.window_managers[0]
# w = wm.windows[0]
#
# C_dict = {'window': w, 'screen': w.screen}
# bpy.ops.ed.undo_push(C_dict, 'INVOKE_REGION_WIN', message=self.message)
- bpy.ops.ed.undo_push( 'INVOKE_REGION_WIN', message=self.message)
+ bpy.ops.ed.undo_push('INVOKE_REGION_WIN', message=self.message)
return {'FINISHED'}
@@ -1876,12 +1912,12 @@ def register_ui():
if not wm.keyconfigs.addon:
return
km = wm.keyconfigs.addon.keymaps.new(name="Window", space_type='EMPTY')
- #asset bar shortcut
+ # asset bar shortcut
kmi = km.keymap_items.new(AssetBarOperator.bl_idname, 'SEMI_COLON', 'PRESS', ctrl=False, shift=False)
kmi.properties.keep_running = False
kmi.properties.do_search = False
addon_keymapitems.append(kmi)
- #fast rating shortcut
+ # fast rating shortcut
wm = bpy.context.window_manager
km = wm.keyconfigs.addon.keymaps['Window']
kmi = km.keymap_items.new(ratings.FastRateMenu.bl_idname, 'F', 'PRESS', ctrl=False, shift=False)
diff --git a/blenderkit/ui_panels.py b/blenderkit/ui_panels.py
index eaf5d454..a5de1a64 100644
--- a/blenderkit/ui_panels.py
+++ b/blenderkit/ui_panels.py
@@ -25,9 +25,10 @@ if "bpy" in locals():
download = importlib.reload(download)
categories = importlib.reload(categories)
icons = importlib.reload(icons)
- icons = importlib.reload(search)
+ search = importlib.reload(search)
+ resolutions = importlib.reload(resolutions)
else:
- from blenderkit import paths, ratings, utils, download, categories, icons, search
+ from blenderkit import paths, ratings, utils, download, categories, icons, search, resolutions
from bpy.types import (
Panel
@@ -370,7 +371,51 @@ class VIEW3D_PT_blenderkit_model_properties(Panel):
draw_panel_model_rating(self, context)
layout.label(text='Asset tools:')
- draw_asset_context_menu(self, context, ad)
+ draw_asset_context_menu(self, context, ad, from_panel=True)
+ # if 'rig' in ad['tags']:
+ # # layout.label(text = 'can make proxy')
+ # layout.operator('object.blenderkit_make_proxy', text = 'Make Armature proxy')
+ # fast upload, blocked by now
+ # else:
+ # op = layout.operator("object.blenderkit_upload", text='Store as private', icon='EXPORT')
+ # op.asset_type = 'MODEL'
+ # op.fast = True
+ # fun override project, not finished
+ # layout.operator('object.blenderkit_color_corrector')
+
+
+class NODE_PT_blenderkit_material_properties(Panel):
+ bl_category = "BlenderKit"
+ bl_idname = "NODE_PT_blenderkit_material_properties"
+ bl_space_type = 'NODE_EDITOR'
+ bl_region_type = 'UI'
+ bl_label = "Selected Material"
+ bl_context = "objectmode"
+
+ @classmethod
+ def poll(cls, context):
+ p = bpy.context.view_layer.objects.active is not None and bpy.context.active_object.active_material is not None
+ return p
+
+ def draw(self, context):
+ # draw asset properties here
+ layout = self.layout
+
+ m = bpy.context.active_object.active_material
+ # o = bpy.context.active_object
+ if m.get('asset_data') is None and m.blenderkit.id == '':
+ utils.label_multiline(layout,
+ text='To upload this asset to BlenderKit, go to the Find and Upload Assets panel.')
+ layout.prop(m, 'name')
+
+ if m.get('asset_data') is not None:
+ ad = m['asset_data']
+ layout.label(text=str(ad['name']))
+ layout.label(text='Ratings:')
+ draw_panel_material_ratings(self, context)
+
+ layout.label(text='Asset tools:')
+ draw_asset_context_menu(self, context, ad, from_panel = True)
# if 'rig' in ad['tags']:
# # layout.label(text = 'can make proxy')
# layout.operator('object.blenderkit_make_proxy', text = 'Make Armature proxy')
@@ -814,19 +859,27 @@ class VIEW3D_PT_blenderkit_import_settings(Panel):
if ui_props.asset_type == 'MODEL':
# noinspection PyCallByClass
props = s.blenderkit_models
- layout.label(text='Import method:')
- row = layout.row()
- row.prop(props, 'append_method', expand=True, icon_only=False)
layout.prop(props, 'randomize_rotation')
if props.randomize_rotation:
layout.prop(props, 'randomize_rotation_amount')
layout.prop(props, 'perpendicular_snap')
- if props.perpendicular_snap:
- layout.prop(props,'perpendicular_snap_threshold')
+ # if props.perpendicular_snap:
+ # layout.prop(props,'perpendicular_snap_threshold')
+
+ layout.label(text='Import method:')
+ row = layout.row()
+ row.prop(props, 'append_method', expand=True, icon_only=False)
if ui_props.asset_type == 'MATERIAL':
props = s.blenderkit_mat
layout.prop(props, 'automap')
+ layout.label(text='Import method:')
+ row = layout.row()
+
+ row.prop(props, 'append_method', expand=True, icon_only=False)
+
+ layout.prop(props, 'resolution')
+ # layout.prop(props, 'unpack_files')
class VIEW3D_PT_blenderkit_unified(Panel):
@@ -1028,13 +1081,13 @@ class BlenderKitWelcomeOperator(bpy.types.Operator):
return wm.invoke_props_dialog(self)
-def draw_asset_context_menu(self, context, asset_data):
+def draw_asset_context_menu(self, context, asset_data, from_panel=False):
layout = self.layout
ui_props = context.scene.blenderkitUI
- author_id = str(asset_data['author']['id'])
+ author_id = str(asset_data['author'].get('id'))
wm = bpy.context.window_manager
- if wm.get('bkit authors') is not None:
+ if wm.get('bkit authors') is not None and author_id is not None:
a = bpy.context.window_manager['bkit authors'].get(author_id)
if a is not None:
# utils.p('author:', a)
@@ -1048,7 +1101,7 @@ def draw_asset_context_menu(self, context, asset_data):
op.author_id = author_id
op = layout.operator('view3d.blenderkit_search', text='Search Similar')
- #build search string from description and tags:
+ # build search string from description and tags:
op.keywords = asset_data['name']
if asset_data.get('description'):
op.keywords += ' ' + asset_data.get('description')
@@ -1063,19 +1116,76 @@ def draw_asset_context_menu(self, context, asset_data):
# this checks if the menu got called from right-click in assetbar(then index is 0 - x) or
# from a panel(then replacement happens from the active model)
- if ui_props.active_index == -3:
+ if from_panel:
# called from addon panel
- o = utils.get_active_model()
- op.asset_base_id = o['asset_data']['assetBaseId']
+ op.asset_base_id = asset_data['assetBaseId']
else:
op.asset_index = ui_props.active_index
- op.asset_type = ui_props.asset_type
+ # op.asset_type = ui_props.asset_type
op.model_location = aob.location
op.model_rotation = aob.rotation_euler
op.target_object = aob.name
op.material_target_slot = aob.active_material_index
op.replace = True
+ op.replace_resolution = False
+
+ # resolution replacement operator
+ # if asset_data['downloaded'] == 100: # only show for downloaded/used assets
+ # if ui_props.asset_type in ('MODEL', 'MATERIAL'):
+ # layout.menu(OBJECT_MT_blenderkit_resolution_menu.bl_idname)
+
+ if ui_props.asset_type in ('MODEL', 'MATERIAL') and \
+ utils.get_param(asset_data, 'textureResolutionMax') is not None and \
+ utils.get_param(asset_data, 'textureResolutionMax') > 512:
+
+ s = bpy.context.scene
+
+ col = layout.column()
+ col.operator_context = 'INVOKE_DEFAULT'
+
+ if from_panel:
+ # Called from addon panel
+
+
+ if asset_data.get('resolution'):
+ op = col.operator('scene.blenderkit_download', text='Replace asset resolution')
+ op.asset_base_id = asset_data['assetBaseId']
+ if asset_data['assetType'] == 'MODEL':
+ o = utils.get_active_model()
+ op.model_location = o.location
+ op.model_rotation = o.rotation_euler
+ op.target_object = o.name
+ op.material_target_slot = o.active_material_index
+ elif asset_data['assetType'] == 'MATERIAL':
+ aob = bpy.context.active_object
+ op.model_location = aob.location
+ op.model_rotation = aob.rotation_euler
+ op.target_object = aob.name
+ op.material_target_slot = aob.active_material_index
+ op.replace_resolution = True
+ op.invoke_resolution = True
+ op.max_resolution = asset_data.get('max_resolution',
+ 0) # str(utils.get_param(asset_data, 'textureResolutionMax'))
+
+ elif asset_data['assetBaseId'] in s['assets used'].keys():
+ # called from asset bar:
+ op = col.operator('scene.blenderkit_download', text='Replace asset resolution')
+
+ op.asset_index = ui_props.active_index
+ # op.asset_type = ui_props.asset_type
+ op.replace_resolution = True
+ op.invoke_resolution = True
+ o = utils.get_active_model()
+ if o and o.get('asset_data'):
+ if o['asset_data']['assetBaseId'] == bpy.context.scene['search results'][ui_props.active_index]:
+ op.model_location = o.location
+ op.model_rotation = o.rotation_euler
+ op.max_resolution = asset_data.get('max_resolution',
+ 0) # str(utils.get_param(asset_data, 'textureResolutionMax'))
+
+ # print('operator res ', resolution)
+ # op.resolution = resolution
wm = bpy.context.window_manager
profile = wm.get('bkit profile')
@@ -1102,8 +1212,6 @@ def draw_asset_context_menu(self, context, asset_data):
op.asset_id = asset_data['id']
op.state = 'rejected'
-
-
if author_id == str(profile['user']['id']):
layout.label(text='Management tools:')
row = layout.row()
@@ -1119,6 +1227,51 @@ def draw_asset_context_menu(self, context, asset_data):
op.asset_id = asset_data['id']
op.asset_type = asset_data['assetType']
+ layout.operator_context = 'INVOKE_DEFAULT'
+ op = layout.operator('object.blenderkit_print_asset_debug', text='Print asset debug')
+ op.asset_id = asset_data['id']
+
+
+# def draw_asset_resolution_replace(self, context, resolution):
+# layout = self.layout
+# ui_props = bpy.context.scene.blenderkitUI
+#
+# op = layout.operator('scene.blenderkit_download', text=resolution)
+# if ui_props.active_index == -3:
+# # This happens if the command is called from addon panel
+# o = utils.get_active_model()
+# op.asset_base_id = o['asset_data']['assetBaseId']
+#
+# else:
+# op.asset_index = ui_props.active_index
+#
+# op.asset_type = ui_props.asset_type
+# if len(bpy.context.selected_objects) > 0: # and ui_props.asset_type == 'MODEL':
+# aob = bpy.context.active_object
+# op.model_location = aob.location
+# op.model_rotation = aob.rotation_euler
+# op.target_object = aob.name
+# op.material_target_slot = aob.active_material_index
+# op.replace_resolution = True
+# print('operator res ', resolution)
+# op.resolution = resolution
+
+
+# class OBJECT_MT_blenderkit_resolution_menu(bpy.types.Menu):
+# bl_label = "Replace Asset Resolution"
+# bl_idname = "OBJECT_MT_blenderkit_resolution_menu"
+#
+# def draw(self, context):
+# ui_props = context.scene.blenderkitUI
+#
+# # sr = bpy.context.scene['search results']
+#
+# # sr = bpy.context.scene['search results']
+# # asset_data = sr[ui_props.active_index]
+#
+# for k in resolutions.resolution_props_to_server.keys():
+# draw_asset_resolution_replace(self, context, k)
+
class OBJECT_MT_blenderkit_asset_menu(bpy.types.Menu):
bl_label = "Asset options:"
@@ -1130,7 +1283,7 @@ class OBJECT_MT_blenderkit_asset_menu(bpy.types.Menu):
# sr = bpy.context.scene['search results']
sr = bpy.context.scene['search results']
asset_data = sr[ui_props.active_index]
- draw_asset_context_menu(self, context, asset_data)
+ draw_asset_context_menu(self, context, asset_data, from_panel = False)
class OBJECT_MT_blenderkit_login_menu(bpy.types.Menu):
@@ -1320,7 +1473,7 @@ class VIEW3D_PT_blenderkit_downloads(Panel):
def draw(self, context):
layout = self.layout
- for i,threaddata in enumerate(download.download_threads):
+ for i, threaddata in enumerate(download.download_threads):
tcom = threaddata[2]
asset_data = threaddata[1]
row = layout.row()
@@ -1376,8 +1529,10 @@ classess = (
VIEW3D_PT_blenderkit_categories,
VIEW3D_PT_blenderkit_import_settings,
VIEW3D_PT_blenderkit_model_properties,
+ NODE_PT_blenderkit_material_properties,
# VIEW3D_PT_blenderkit_ratings,
VIEW3D_PT_blenderkit_downloads,
+ # OBJECT_MT_blenderkit_resolution_menu,
OBJECT_MT_blenderkit_asset_menu,
OBJECT_MT_blenderkit_login_menu,
UrlPopupDialog,
diff --git a/blenderkit/upload.py b/blenderkit/upload.py
index cd72fda7..c789313d 100644
--- a/blenderkit/upload.py
+++ b/blenderkit/upload.py
@@ -608,11 +608,11 @@ def start_upload(self, context, asset_type, reupload, upload_set):
try:
if 'MAINFILE' in upload_set:
json_metadata["verificationStatus"] = "uploading"
- r = rerequests.put(url, json=json_metadata, headers=headers, verify=True, immediate=True) # files = files,
+ r = rerequests.patch(url, json=json_metadata, headers=headers, verify=True, immediate=True) # files = files,
ui.add_report('uploaded metadata')
# parse the request
# print('uploaded metadata')
- # print(r.text)
+ print(r.text)
except requests.exceptions.RequestException as e:
print(e)
props.upload_state = str(e)
@@ -786,6 +786,51 @@ class UploadOperator(Operator):
return self.execute(context)
+
+class AssetDebugPrint(Operator):
+ """Change verification status"""
+ bl_idname = "object.blenderkit_print_asset_debug"
+ bl_description = "BlenderKit print asset data for debug purposes"
+ bl_label = "BlenderKit print asset data"
+ bl_options = {'REGISTER', 'UNDO', 'INTERNAL'}
+
+ # type of upload - model, material, textures, e.t.c.
+ asset_id: StringProperty(
+ name="asset id",
+ )
+
+ @classmethod
+ def poll(cls, context):
+ return True
+
+
+ def execute(self, context):
+ preferences = bpy.context.preferences.addons['blenderkit'].preferences
+
+ if not bpy.context.scene['search results']:
+ print('no search results found')
+ return {'CANCELLED'};
+ # update status in search results for validator's clarity
+ sr = bpy.context.scene['search results']
+ sro = bpy.context.scene['search results orig']['results']
+
+ result = None
+ for r in sr:
+ if r['id'] == self.asset_id:
+ result = r.to_dict()
+ if not result:
+ for r in sro:
+ if r['id'] == self.asset_id:
+ result = r.to_dict()
+ if not result:
+ ad = bpy.context.active_object.get('asset_data')
+ if ad:
+ result = ad.to_dict()
+ if result:
+ print(json.dumps(result, indent=4, sort_keys=True))
+ return {'FINISHED'}
+
+
class AssetVerificationStatusChange(Operator):
"""Change verification status"""
bl_idname = "object.blenderkit_change_status"
@@ -845,9 +890,11 @@ class AssetVerificationStatusChange(Operator):
def register_upload():
bpy.utils.register_class(UploadOperator)
+ bpy.utils.register_class(AssetDebugPrint)
bpy.utils.register_class(AssetVerificationStatusChange)
def unregister_upload():
bpy.utils.unregister_class(UploadOperator)
+ bpy.utils.unregister_class(AssetDebugPrint)
bpy.utils.unregister_class(AssetVerificationStatusChange)
diff --git a/blenderkit/upload_bg.py b/blenderkit/upload_bg.py
index 236793c0..a0e95535 100644
--- a/blenderkit/upload_bg.py
+++ b/blenderkit/upload_bg.py
@@ -77,7 +77,7 @@ class upload_in_chunks(object):
def upload_file(upload_data, f):
headers = utils.get_headers(upload_data['token'])
version_id = upload_data['id']
- bg_blender.progress('uploading %s' % f['type'])
+ bg_blender.progress(f"uploading {f['type']} {os.path.basename(f['file_path'])}")
upload_info = {
'assetId': version_id,
'fileType': f['type'],
@@ -89,7 +89,7 @@ def upload_file(upload_data, f):
upload = upload.json()
#
chunk_size = 1024 * 1024 * 2
- utils.pprint(upload)
+ # utils.pprint(upload)
# file gets uploaded here:
uploaded = False
# s3 upload is now the only option
@@ -100,7 +100,7 @@ def upload_file(upload_data, f):
data=upload_in_chunks(f['file_path'], chunk_size, f['type']),
stream=True, verify=True)
- if upload_response.status_code == 200:
+ if 250>upload_response.status_code >199:
uploaded = True
else:
print(upload_response.text)
@@ -111,6 +111,7 @@ def upload_file(upload_data, f):
time.sleep(1)
# confirm single file upload to bkit server
+ print(upload)
upload_done_url = paths.get_api_url() + 'uploads_s3/' + upload['id'] + '/upload-file/'
upload_response = rerequests.post(upload_done_url, headers=headers, verify=True)
@@ -120,6 +121,7 @@ def upload_file(upload_data, f):
def upload_files(upload_data, files):
+ '''uploads several files in one run'''
uploaded_all = True
for f in files:
uploaded = upload_file(upload_data, f)
@@ -130,8 +132,6 @@ def upload_files(upload_data, files):
if __name__ == "__main__":
-
-
try:
bg_blender.progress('preparing scene - append data')
with open(BLENDERKIT_EXPORT_DATA, 'r') as s:
diff --git a/blenderkit/utils.py b/blenderkit/utils.py
index 35ae93e4..59111623 100644
--- a/blenderkit/utils.py
+++ b/blenderkit/utils.py
@@ -31,6 +31,7 @@ from mathutils import Vector
import json
import os
import sys
+import shutil
ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000
BELOW_NORMAL_PRIORITY_CLASS = 0x00004000
@@ -130,6 +131,8 @@ def get_selected_replace_adepts():
# if no blenderkit - like objects were found, use the original selection.
if len(parents) == 0:
parents = obs
+ pprint('replace adepts')
+ pprint(str(parents))
return parents
@@ -323,6 +326,15 @@ def get_thumbnail(name):
return img
+def files_size_to_text(size):
+ fsmb = size // (1024 * 1024)
+ fskb = size % 1024
+ if fsmb == 0:
+ return f'{fskb}KB'
+ else:
+ return f'{fsmb}MB {fskb}KB'
+
+
def get_brush_props(context):
brush = get_active_brush()
if brush is not None:
@@ -332,11 +344,42 @@ def get_brush_props(context):
def p(text, text1='', text2='', text3='', text4='', text5=''):
'''debug printing depending on blender's debug value'''
+
if bpy.app.debug_value != 0:
+ print('\n')
+ # print('-----BKit debug-----\n')
+ # traceback.print_stack()
print(text, text1, text2, text3, text4, text5)
-
-
-def pprint(data):
+ # print('---------------------\n')
+
+
+def copy_asset(fp1, fp2):
+ '''synchronizes the asset between folders, including it's texture subdirectories'''
+ if 1:
+ p('copy asset')
+ p(fp1, fp2)
+ if not os.path.exists(fp2):
+ shutil.copyfile(fp1, fp2)
+ p('copied')
+ source_dir = os.path.dirname(fp1)
+ target_dir = os.path.dirname(fp2)
+ for subdir in os.scandir(source_dir):
+ if not subdir.is_dir():
+ continue
+ target_subdir = os.path.join(target_dir, subdir.name)
+ if os.path.exists(target_subdir):
+ continue
+ p(subdir, target_subdir)
+ shutil.copytree(subdir, target_subdir)
+ p('copied')
+
+ # except Exception as e:
+ # print('BlenderKit failed to copy asset')
+ # print(fp1, fp2)
+ # print(e)
+
+
+def pprint(data, data1=None, data2=None, data3=None, data4=None):
'''pretty print jsons'''
p(json.dumps(data, indent=4, sort_keys=True))
@@ -345,6 +388,8 @@ def get_hierarchy(ob):
'''get all objects in a tree'''
obs = []
doobs = [ob]
+ # pprint('get hierarchy')
+ pprint(ob.name)
while len(doobs) > 0:
o = doobs.pop()
doobs.extend(o.children)
@@ -497,14 +542,13 @@ def scale_uvs(ob, scale=1.0, pivot=Vector((.5, .5))):
# map uv cubic and switch of auto tex space and set it to 1,1,1
def automap(target_object=None, target_slot=None, tex_size=1, bg_exception=False, just_scale=False):
- from blenderkit import bg_blender as bg
s = bpy.context.scene
mat_props = s.blenderkit_mat
if mat_props.automap:
tob = bpy.data.objects[target_object]
# only automap mesh models
- if tob.type == 'MESH' and len(tob.data.polygons)>0:
- #check polycount for a rare case where no polys are in editmesh
+ if tob.type == 'MESH' and len(tob.data.polygons) > 0:
+ # check polycount for a rare case where no polys are in editmesh
actob = bpy.context.active_object
bpy.context.view_layer.objects.active = tob
@@ -573,6 +617,17 @@ def name_update():
asset.name = fname
+def get_param(asset_data, parameter_name):
+ if not asset_data.get('parameters'):
+ # this can appear in older version files.
+ return None
+
+ for p in asset_data['parameters']:
+ if p.get('parameterType') == parameter_name:
+ return p['value']
+ return None
+
+
def params_to_dict(params):
params_dict = {}
for p in params:
@@ -657,15 +712,16 @@ def get_fake_context(context, area_type='VIEW_3D'):
try:
context = context.copy()
+ # print('bk context copied successfully')
except Exception as e:
print(e)
- print('BlenderKit: context.copy() failed. probably a colliding addon.')
+ print('BlenderKit: context.copy() failed. Can be a colliding addon.')
context = {}
if context.get('area') is None or context.get('area').type != area_type:
w, a, r = get_largest_area(area_type=area_type)
if w:
- #sometimes there is no area of the requested type. Let's face it, some people use Blender without 3d view.
+ # sometimes there is no area of the requested type. Let's face it, some people use Blender without 3d view.
override = {'window': w, 'screen': w.screen, 'area': a, 'region': r}
C_dict.update(override)
# print(w,a,r)