diff options
author | Vilém Duha <vilda.novak@gmail.com> | 2020-01-13 14:46:46 +0300 |
---|---|---|
committer | Vilém Duha <vilda.novak@gmail.com> | 2020-01-13 17:50:40 +0300 |
commit | 59cc0c74d8a09e8aa999cfde9344e8fae065f94c (patch) | |
tree | 764abda948514d84b4ea1e8d03e34d660d803e7c /blenderkit | |
parent | 2ffb35765e88fa190ad2c8ca7ff01ebc74ed5837 (diff) |
BlenderKit: fix too many requests from addon
- for author data - now there's almost no risk of duplicated data, but can still be improved
- for categories - only requests categories from server once per day.
Diffstat (limited to 'blenderkit')
-rw-r--r-- | blenderkit/categories.py | 33 | ||||
-rw-r--r-- | blenderkit/search.py | 14 |
2 files changed, 32 insertions, 15 deletions
diff --git a/blenderkit/categories.py b/blenderkit/categories.py index 1d411499..8983e3ad 100644 --- a/blenderkit/categories.py +++ b/blenderkit/categories.py @@ -30,6 +30,7 @@ import requests import json import os import bpy +import time import shutil import threading @@ -106,22 +107,34 @@ def load_categories(): except: print('categories failed to read') -def fetch_categories(API_key): +# +catfetch_counter = 0 + + +def fetch_categories(API_key, force = False): url = paths.get_api_url() + 'categories/' headers = utils.get_headers(API_key) tempdir = paths.get_temp_dir() categories_filepath = os.path.join(tempdir, 'categories.json') + catfile_age = time.time() - os.path.getmtime(categories_filepath) + # global catfetch_counter + # catfetch_counter += 1 + # utils.p('fetching categories: ', catfetch_counter) + # utils.p('age of cat file', catfile_age) try: - r = rerequests.get(url, headers=headers) - rdata = r.json() - categories = rdata['results'] - fix_category_counts(categories) - # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off. - with open(categories_filepath, 'w') as s: - json.dump(categories, s, indent=4) + # read categories only once per day maximum, or when forced to do so. + if catfile_age > 86400 or force: + utils.p('requesting categories') + r = rerequests.get(url, headers=headers) + rdata = r.json() + categories = rdata['results'] + fix_category_counts(categories) + # filter_categories(categories) #TODO this should filter categories for search, but not for upload. by now off. + with open(categories_filepath, 'w') as s: + json.dump(categories, s, indent=4) tasks_queue.add_task((load_categories, ())) except Exception as e: utils.p('category fetching failed') @@ -131,6 +144,6 @@ def fetch_categories(API_key): shutil.copy(source_path, categories_filepath) -def fetch_categories_thread(API_key): - cat_thread = threading.Thread(target=fetch_categories, args=([API_key]), daemon=True) +def fetch_categories_thread(API_key, force = False): + cat_thread = threading.Thread(target=fetch_categories, args=([API_key, force]), daemon=True) cat_thread.start() diff --git a/blenderkit/search.py b/blenderkit/search.py index cdde9523..87bf45da 100644 --- a/blenderkit/search.py +++ b/blenderkit/search.py @@ -110,9 +110,10 @@ def fetch_server_data(): len(user_preferences.api_key) < 38 and \ user_preferences.api_key_timeout < time.time() + 3600: bkit_oauth.refresh_token_thread() - if api_key != '': + if api_key != '' and bpy.context.window_manager.get('bkit profile') == None: get_profile() - categories.fetch_categories_thread(api_key) + if bpy.context.window_manager.get('bkit_categories') is None: + categories.fetch_categories_thread(api_key) first_time = True @@ -622,6 +623,7 @@ def fetch_author(a_id, api_key): utils.p(e) utils.p('finish fetch') +# profile_counter =0 def get_author(r): a_id = str(r['author']['id']) @@ -630,11 +632,13 @@ def get_author(r): if authors == {}: bpy.context.window_manager['bkit authors'] = authors a = authors.get(a_id) - if a is None or a is '' or \ - (a.get('gravatarHash') is not None and a.get('gravatarImg') is None): - authors[a_id] = None + if a is None:# or a is '' or (a.get('gravatarHash') is not None and a.get('gravatarImg') is None): + authors[a_id] = '' thread = threading.Thread(target=fetch_author, args=(a_id, preferences.api_key), daemon=True) thread.start() + # global profile_counter + # profile_counter+=1 + # print(profile_counter,'author:', a_id) return a |