Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVilém Duha <vilda.novak@gmail.com>2021-01-18 16:16:58 +0300
committerVilém Duha <vilda.novak@gmail.com>2021-01-18 16:16:58 +0300
commitaf50ac234088b0d7eeab54cd498c0318a1f95c76 (patch)
tree67ca7af94560f36f2a3a5a0d35e82ce5b4a07e6f
parentd59052f4d5ed74fdb49ddd4529215560af041715 (diff)
BlenderKit: fix T84766 - Specify utf-8 encoding for all json file write/reads
This is used on various places of the addon, and was introduced to fix (once more and better) T48766.
-rw-r--r--blenderkit/autothumb.py8
-rw-r--r--blenderkit/autothumb_material_bg.py2
-rw-r--r--blenderkit/autothumb_model_bg.py4
-rw-r--r--blenderkit/categories.py6
-rw-r--r--blenderkit/resolutions.py16
-rw-r--r--blenderkit/search.py8
-rw-r--r--blenderkit/upload.py4
-rw-r--r--blenderkit/upload_bg.py2
-rw-r--r--blenderkit/utils.py7
-rw-r--r--blenderkit/version_checker.py6
10 files changed, 32 insertions, 31 deletions
diff --git a/blenderkit/autothumb.py b/blenderkit/autothumb.py
index 0008d670..da6e6d29 100644
--- a/blenderkit/autothumb.py
+++ b/blenderkit/autothumb.py
@@ -120,7 +120,7 @@ def start_thumbnailer(self, context):
obnames = []
for ob in obs:
obnames.append(ob.name)
- with open(datafile, 'w') as s:
+ with open(datafile, 'w', encoding = 'utf-8') as s:
bkit = mainmodel.blenderkit
json.dump({
"type": "model",
@@ -131,7 +131,7 @@ def start_thumbnailer(self, context):
"thumbnail_resolution": bkit.thumbnail_resolution,
"thumbnail_samples": bkit.thumbnail_samples,
"thumbnail_denoising": bkit.thumbnail_denoising,
- }, s)
+ }, s, ensure_ascii=False, indent=4)
proc = subprocess.Popen([
binary_path,
@@ -190,7 +190,7 @@ def start_material_thumbnailer(self, context, wait=False):
# save a copy of actual scene but don't interfere with the users models
bpy.ops.wm.save_as_mainfile(filepath=filepath, compress=False, copy=True)
- with open(datafile, 'w') as s:
+ with open(datafile, 'w', encoding = 'utf-8') as s:
bkit = mat.blenderkit
json.dump({
"type": "material",
@@ -204,7 +204,7 @@ def start_material_thumbnailer(self, context, wait=False):
"thumbnail_denoising": bkit.thumbnail_denoising,
"adaptive_subdivision": bkit.adaptive_subdivision,
"texture_size_meters": bkit.texture_size_meters,
- }, s)
+ }, s, ensure_ascii=False, indent=4)
proc = subprocess.Popen([
binary_path,
diff --git a/blenderkit/autothumb_material_bg.py b/blenderkit/autothumb_material_bg.py
index 3e1c2f2b..d27d7b90 100644
--- a/blenderkit/autothumb_material_bg.py
+++ b/blenderkit/autothumb_material_bg.py
@@ -44,7 +44,7 @@ def unhide_collection(cname):
if __name__ == "__main__":
try:
bg_blender.progress('preparing thumbnail scene')
- with open(BLENDERKIT_EXPORT_DATA, 'r') as s:
+ with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
data = json.load(s)
# append_material(file_name, matname = None, link = False, fake_user = True)
mat = append_link.append_material(file_name=BLENDERKIT_EXPORT_FILE_INPUT, matname=data["material"], link=True,
diff --git a/blenderkit/autothumb_model_bg.py b/blenderkit/autothumb_model_bg.py
index ebb509ff..87acfa19 100644
--- a/blenderkit/autothumb_model_bg.py
+++ b/blenderkit/autothumb_model_bg.py
@@ -32,7 +32,7 @@ BLENDERKIT_EXPORT_DATA = sys.argv[-4]
def get_obnames():
- with open(BLENDERKIT_EXPORT_DATA, 'r') as s:
+ with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
data = json.load(s)
obnames = eval(data['models'])
return obnames
@@ -79,7 +79,7 @@ def render_thumbnails():
if __name__ == "__main__":
try:
- with open(BLENDERKIT_EXPORT_DATA, 'r') as s:
+ with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
data = json.load(s)
user_preferences = bpy.context.preferences.addons['blenderkit'].preferences
diff --git a/blenderkit/categories.py b/blenderkit/categories.py
index f452120b..71e14f64 100644
--- a/blenderkit/categories.py
+++ b/blenderkit/categories.py
@@ -168,7 +168,7 @@ def load_categories():
wm = bpy.context.window_manager
try:
- with open(categories_filepath, 'r') as catfile:
+ with open(categories_filepath, 'r', encoding='utf-8') as catfile:
wm['bkit_categories'] = json.load(catfile)
wm['active_category'] = {
@@ -207,8 +207,8 @@ def fetch_categories(API_key, force = False):
categories = rdata['results']
fix_category_counts(categories)
# filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off.
- with open(categories_filepath, 'w') as s:
- json.dump(categories, s, indent=4)
+ with open(categories_filepath, 'w', encoding = 'utf-8') as s:
+ json.dump(categories, s, ensure_ascii=False, indent=4)
tasks_queue.add_task((load_categories, ()))
except Exception as e:
bk_logger.debug('category fetching failed')
diff --git a/blenderkit/resolutions.py b/blenderkit/resolutions.py
index 03026c68..a5b5d723 100644
--- a/blenderkit/resolutions.py
+++ b/blenderkit/resolutions.py
@@ -632,8 +632,8 @@ def get_assets_search():
retries += 1
fpath = assets_db_path()
- with open(fpath, 'w') as s:
- json.dump(results, s)
+ with open(fpath, 'w', encoding = 'utf-8') as s:
+ json.dump(results, s, ensure_ascii=False, indent=4)
def get_assets_for_resolutions(page_size=100, max_results=100000000):
@@ -698,13 +698,13 @@ def get_materials_for_validation(page_size=100, max_results=100000000):
# retries += 1
#
# fpath = assets_db_path()
-# with open(fpath, 'w') as s:
-# json.dump(results, s)
+# with open(fpath, 'w', encoding = 'utf-8') as s:
+# json.dump(results, s, ensure_ascii=False, indent=4)
def load_assets_list(filepath):
if os.path.exists(filepath):
- with open(filepath, 'r') as s:
+ with open(filepath, 'r', encoding='utf-8') as s:
assets = json.load(s)
return assets
@@ -821,8 +821,8 @@ def send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True):
tempdir = tempfile.mkdtemp()
datafile = os.path.join(tempdir + 'resdata.json')
script_path = os.path.dirname(os.path.realpath(__file__))
- with open(datafile, 'w') as s:
- json.dump(data, s)
+ with open(datafile, 'w', encoding = 'utf-8') as s:
+ json.dump(data, s, ensure_ascii=False, indent=4)
print('opening Blender instance to do processing - ', command)
@@ -856,7 +856,7 @@ def write_data_back(asset_data):
def run_bg(datafile):
print('background file operation')
- with open(datafile, 'r') as f:
+ with open(datafile, 'r',encoding='utf-8') as f:
data = json.load(f)
bpy.app.debug_value = data['debug_value']
write_data_back(data['asset_data'])
diff --git a/blenderkit/search.py b/blenderkit/search.py
index 2252d3f1..484830e0 100644
--- a/blenderkit/search.py
+++ b/blenderkit/search.py
@@ -1017,8 +1017,8 @@ class Searcher(threading.Thread):
if params['get_next']:
rdata['results'][0:0] = self.result['results']
self.result = rdata
- # with open(json_filepath, 'w') as outfile:
- # json.dump(rdata, outfile)
+ # with open(json_filepath, 'w', encoding = 'utf-8') as outfile:
+ # json.dump(rdata, outfile, ensure_ascii=False, indent=4)
killthreads_sml = []
for k in thumb_sml_download_threads.keys():
@@ -1311,8 +1311,8 @@ def get_search_simple(parameters, filepath=None, page_size=100, max_results=1000
if not filepath:
return results
- with open(filepath, 'w') as s:
- json.dump(results, s)
+ with open(filepath, 'w', encoding = 'utf-8') as s:
+ json.dump(results, s, ensure_ascii=False, indent=4)
bk_logger.info(f'retrieved {len(results)} assets from elastic search')
return results
diff --git a/blenderkit/upload.py b/blenderkit/upload.py
index e811eaa3..69150230 100644
--- a/blenderkit/upload.py
+++ b/blenderkit/upload.py
@@ -893,8 +893,8 @@ class Uploader(threading.Thread):
}
datafile = os.path.join(self.export_data['temp_dir'], BLENDERKIT_EXPORT_DATA_FILE)
- with open(datafile, 'w') as s:
- json.dump(data, s)
+ with open(datafile, 'w', encoding = 'utf-8') as s:
+ json.dump(data, s, ensure_ascii=False, indent=4)
# non waiting method - not useful here..
# proc = subprocess.Popen([
diff --git a/blenderkit/upload_bg.py b/blenderkit/upload_bg.py
index 2e27dbf1..685e280a 100644
--- a/blenderkit/upload_bg.py
+++ b/blenderkit/upload_bg.py
@@ -126,7 +126,7 @@ def upload_files(upload_data, files):
if __name__ == "__main__":
try:
# bg_blender.progress('preparing scene - append data')
- with open(BLENDERKIT_EXPORT_DATA, 'r') as s:
+ with open(BLENDERKIT_EXPORT_DATA, 'r',encoding='utf-8') as s:
data = json.load(s)
bpy.app.debug_value = data.get('debug_value', 0)
diff --git a/blenderkit/utils.py b/blenderkit/utils.py
index e0ced9b1..94f795c1 100644
--- a/blenderkit/utils.py
+++ b/blenderkit/utils.py
@@ -254,7 +254,7 @@ def load_prefs():
fpath = paths.BLENDERKIT_SETTINGS_FILENAME
if os.path.exists(fpath):
try:
- with open(fpath, 'r') as s:
+ with open(fpath, 'r', encoding = 'utf-8') as s:
prefs = json.load(s)
user_preferences.api_key = prefs.get('API_key', '')
user_preferences.global_dir = prefs.get('global_dir', paths.default_global_dict())
@@ -262,6 +262,7 @@ def load_prefs():
except Exception as e:
print('failed to read addon preferences.')
print(e)
+ os.remove(fpath)
def save_prefs(self, context):
@@ -285,8 +286,8 @@ def save_prefs(self, context):
fpath = paths.BLENDERKIT_SETTINGS_FILENAME
if not os.path.exists(paths._presets):
os.makedirs(paths._presets)
- with open(fpath, 'w') as s:
- json.dump(prefs, s)
+ with open(fpath, 'w', encoding = 'utf-8') as s:
+ json.dump(prefs, s, ensure_ascii=False, indent=4)
except Exception as e:
print(e)
diff --git a/blenderkit/version_checker.py b/blenderkit/version_checker.py
index 49423dbb..993ff238 100644
--- a/blenderkit/version_checker.py
+++ b/blenderkit/version_checker.py
@@ -43,8 +43,8 @@ def check_version(url, api_key, module):
tempdir = paths.get_temp_dir()
ver_filepath = os.path.join(tempdir, 'addon_version.json')
- with open(ver_filepath, 'w') as s:
- json.dump(ver_online, s, indent=4)
+ with open(ver_filepath, 'w', encoding = 'utf-8') as s:
+ json.dump(ver_online, s, ensure_ascii=False, indent=4)
except:
print("couldn't check online for version updates")
@@ -56,7 +56,7 @@ def compare_versions(module):
tempdir = paths.get_temp_dir()
ver_filepath = os.path.join(tempdir, 'addon_version.json')
- with open(ver_filepath, 'r') as s:
+ with open(ver_filepath, 'r',encoding='utf-8') as s:
data = json.load(s)
ver_online = data['addonVersion2.8'].split('.')