Welcome to mirror list, hosted at ThFree Co, Russian Federation.

resolutions.py « blenderkit - git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 018889e79c3f34b7d6e79778b205686dcf61c693 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
# ##### BEGIN GPL LICENSE BLOCK #####
#
#  This program is free software; you can redistribute it and/or
#  modify it under the terms of the GNU General Public License
#  as published by the Free Software Foundation; either version 2
#  of the License, or (at your option) any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License
#  along with this program; if not, write to the Free Software Foundation,
#  Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####


from blenderkit import paths, append_link, bg_blender, utils, download, search, rerequests, upload_bg, image_utils

import sys, json, os, time
import subprocess
import tempfile
import numpy as np
import bpy
import requests
import math
import threading

resolutions = {
    'resolution_0_5K': 512,
    'resolution_1K': 1024,
    'resolution_2K': 2048,
    'resolution_4K': 4096,
    'resolution_8K': 8192,
}
rkeys = list(resolutions.keys())

resolution_props_to_server = {

    '512': 'resolution_0_5K',
    '1024': 'resolution_1K',
    '2048': 'resolution_2K',
    '4096': 'resolution_4K',
    '8192': 'resolution_8K',
    'ORIGINAL': 'blend',
}


def get_current_resolution():
    actres = 0
    for i in bpy.data.images:
        if i.name != 'Render Result':
            actres = max(actres, i.size[0], i.size[1])
    return actres


def can_erase_alpha(na):
    alpha = na[3::4]
    alpha_sum = alpha.sum()
    if alpha_sum == alpha.size:
        print('image can have alpha erased')
    # print(alpha_sum, alpha.size)
    return alpha_sum == alpha.size


def is_image_black(na):
    r = na[::4]
    g = na[1::4]
    b = na[2::4]

    rgbsum = r.sum() + g.sum() + b.sum()

    # print('rgb sum', rgbsum, r.sum(), g.sum(), b.sum())
    if rgbsum == 0:
        print('image can have alpha channel dropped')
    return rgbsum == 0


def is_image_bw(na):
    r = na[::4]
    g = na[1::4]
    b = na[2::4]

    rg_equal = r == g
    gb_equal = g == b
    rgbequal = rg_equal.all() and gb_equal.all()
    if rgbequal:
        print('image is black and white, can have channels reduced')

    return rgbequal


def numpytoimage(a, iname, width=0, height=0, channels=3):
    t = time.time()
    foundimage = False

    for image in bpy.data.images:

        if image.name[:len(iname)] == iname and image.size[0] == a.shape[0] and image.size[1] == a.shape[1]:
            i = image
            foundimage = True
    if not foundimage:
        if channels == 4:
            bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0, 1), alpha=True,
                              generated_type='BLANK', float=True)
        if channels == 3:
            bpy.ops.image.new(name=iname, width=width, height=height, color=(0, 0, 0), alpha=False,
                              generated_type='BLANK', float=True)

    for image in bpy.data.images:
        # print(image.name[:len(iname)],iname, image.size[0],a.shape[0],image.size[1],a.shape[1])
        if image.name[:len(iname)] == iname and image.size[0] == width and image.size[1] == height:
            i = image

    # dropping this re-shaping code -  just doing flat array for speed and simplicity
    #    d = a.shape[0] * a.shape[1]
    #    a = a.swapaxes(0, 1)
    #    a = a.reshape(d)
    #    a = a.repeat(channels)
    #    a[3::4] = 1
    i.pixels.foreach_set(a)  # this gives big speedup!
    print('\ntime ' + str(time.time() - t))
    return i


def imagetonumpy(i):
    t = time.time()

    width = i.size[0]
    height = i.size[1]
    # print(i.channels)

    size = width * height * i.channels
    na = np.empty(size, np.float32)
    i.pixels.foreach_get(na)

    # dropping this re-shaping code -  just doing flat array for speed and simplicity
    #    na = na[::4]
    #    na = na.reshape(height, width, i.channels)
    #    na = na.swapaxnes(0, 1)

    # print('\ntime of image to numpy ' + str(time.time() - t))
    return na


def save_image_safely(teximage, filepath):
    '''
    Blender makes it really hard to save images... this is to fix it's crazy bad image saving.
    Would be worth investigating PIL or similar instead
    Parameters
    ----------
    teximage

    Returns
    -------

    '''
    JPEG_QUALITY = 98

    rs = bpy.context.scene.render
    ims = rs.image_settings

    orig_file_format = ims.file_format
    orig_quality = ims.quality
    orig_color_mode = ims.color_mode
    orig_compression = ims.compression

    ims.file_format = teximage.file_format
    if teximage.file_format == 'PNG':
        ims.color_mode = 'RGBA'
    elif teximage.channels == 3:
        ims.color_mode = 'RGB'
    else:
        ims.color_mode = 'BW'

    # all pngs with max compression
    if ims.file_format == 'PNG':
        ims.compression = 100
    # all jpgs brought to reasonable quality
    if ims.file_format == 'JPG':
        ims.quality = JPEG_QUALITY
    # it's actually very important not to try to change the image filepath and packed file filepath before saving,
    # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
    teximage.save_render(filepath=bpy.path.abspath(filepath), scene=bpy.context.scene)

    teximage.filepath = filepath
    for packed_file in teximage.packed_files:
        packed_file.filepath = filepath
    teximage.filepath_raw = filepath
    teximage.reload()

    ims.file_format = orig_file_format
    ims.quality = orig_quality
    ims.color_mode = orig_color_mode
    ims.compression = orig_compression


def extxchange_to_resolution(filepath):
    base, ext = os.path.splitext(filepath)
    if ext in ('.png', '.PNG'):
        ext = 'jpg'


def make_possible_reductions_on_image(teximage, input_filepath, do_reductions=False, do_downscale=False):
    '''checks the image and saves it to drive with possibly reduced channels.
    Also can remove the image from the asset if the image is pure black
    - it finds it's usages and replaces the inputs where the image is used
    with zero/black color.
    currently implemented file type conversions:
    PNG->JPG    
    '''
    colorspace = teximage.colorspace_settings.name
    teximage.colorspace_settings.name = 'Non-Color'

    JPEG_QUALITY = 90
    # is_image_black(na)
    # is_image_bw(na)

    rs = bpy.context.scene.render
    ims = rs.image_settings

    orig_file_format = ims.file_format
    orig_quality = ims.quality
    orig_color_mode = ims.color_mode
    orig_compression = ims.compression

    # if is_image_black(na):
    #     # just erase the image from the asset here, no need to store black images.
    #     pass;

    # fp = teximage.filepath
    fp = input_filepath
    if do_reductions:
        na = imagetonumpy(teximage)

        if can_erase_alpha(na):
            print(teximage.file_format)
            if teximage.file_format == 'PNG':
                print('changing type of image to JPG')
                base, ext = os.path.splitext(fp)
                teximage['original_extension'] = ext

                fp = fp.replace('.png', '.jpg')
                fp = fp.replace('.PNG', '.jpg')

                teximage.name = teximage.name.replace('.png', '.jpg')
                teximage.name = teximage.name.replace('.PNG', '.jpg')

                teximage.file_format = 'JPEG'
                ims.quality = JPEG_QUALITY
                ims.color_mode = 'RGB'

            if is_image_bw(na):
                ims.color_mode = 'BW'

    ims.file_format = teximage.file_format

    # all pngs with max compression
    if ims.file_format == 'PNG':
        ims.compression = 100
    # all jpgs brought to reasonable quality
    if ims.file_format == 'JPG':
        ims.quality = JPEG_QUALITY

    if do_downscale:
        downscale(teximage)

    # it's actually very important not to try to change the image filepath and packed file filepath before saving,
    # blender tries to re-pack the image after writing to image.packed_image.filepath and reverts any changes.
    teximage.save_render(filepath=bpy.path.abspath(fp), scene=bpy.context.scene)
    if len(teximage.packed_files) > 0:
        teximage.unpack(method='REMOVE')
    teximage.filepath = fp
    teximage.filepath_raw = fp
    teximage.reload()

    teximage.colorspace_settings.name = colorspace

    ims.file_format = orig_file_format
    ims.quality = orig_quality
    ims.color_mode = orig_color_mode
    ims.compression = orig_compression


def downscale(i):
    minsize = 128

    sx, sy = i.size[:]
    sx = round(sx / 2)
    sy = round(sy / 2)
    if sx > minsize and sy > minsize:
        i.scale(sx, sy)


def upload_resolutions(files, asset_data):
    preferences = bpy.context.preferences.addons['blenderkit'].preferences

    upload_data = {
        "name": asset_data['name'],
        "token": preferences.api_key,
        "id": asset_data['id']
    }

    uploaded = upload_bg.upload_files(upload_data, files)

    if uploaded:
        bg_blender.progress('upload finished successfully')
    else:
        bg_blender.progress('upload failed.')


def unpack_asset(data):
    utils.p('unpacking asset')
    asset_data = data['asset_data']
    # utils.pprint(asset_data)

    blend_file_name = os.path.basename(bpy.data.filepath)
    ext = os.path.splitext(blend_file_name)[1]

    resolution = asset_data.get('resolution', 'blend')
    # TODO - passing resolution inside asset data might not be the best solution
    tex_dir_path = paths.get_texture_directory(asset_data, resolution=resolution)
    tex_dir_abs = bpy.path.abspath(tex_dir_path)
    if not os.path.exists(tex_dir_abs):
        try:
            os.mkdir(tex_dir_abs)
        except Exception as e:
            print(e)
    bpy.data.use_autopack = False
    for image in bpy.data.images:
        if image.name != 'Render Result':
            # suffix = paths.resolution_suffix(data['suffix'])
            fp = get_texture_filepath(tex_dir_path, image, resolution=resolution)
            utils.p('unpacking file', image.name)
            utils.p(image.filepath, fp)

            for pf in image.packed_files:
                pf.filepath = fp  # bpy.path.abspath(fp)
            image.filepath = fp  # bpy.path.abspath(fp)
            image.filepath_raw = fp  # bpy.path.abspath(fp)
            image.save()
            if len(image.packed_files) > 0:
                image.unpack(method='REMOVE')

    bpy.ops.wm.save_mainfile(compress=False)
    # now try to delete the .blend1 file
    try:
        os.remove(bpy.data.filepath + '1')
    except Exception as e:
        print(e)


def patch_asset_empty(asset_id, api_key):
    '''
        This function patches the asset for the purpose of it getting a reindex.
        Should be removed once this is fixed on the server and
        the server is able to reindex after uploads of resolutions
        Returns
        -------
    '''
    upload_data = {
    }
    url = paths.get_api_url() + 'assets/' + str(asset_id) + '/'
    headers = utils.get_headers(api_key)
    try:
        r = rerequests.patch(url, json=upload_data, headers=headers, verify=True)  # files = files,
    except requests.exceptions.RequestException as e:
        print(e)
        return {'CANCELLED'}
    return {'FINISHED'}


def reduce_all_images(target_scale=1024):
    for img in bpy.data.images:
        if img.name != 'Render Result':
            print('scaling ', img.name, img.size[0], img.size[1])
            # make_possible_reductions_on_image(i)
            if max(img.size) > target_scale:
                ratio = float(target_scale) / float(max(img.size))
                print(ratio)
                # i.save()
                fp = '//tempimagestorage'
                # print('generated filename',fp)
                # for pf in img.packed_files:
                #     pf.filepath = fp  # bpy.path.abspath(fp)

                img.filepath = fp
                img.filepath_raw = fp
                print(int(img.size[0] * ratio), int(img.size[1] * ratio))
                img.scale(int(img.size[0] * ratio), int(img.size[1] * ratio))
                img.update()
                # img.save()
                # img.reload()
                img.pack()


def get_texture_filepath(tex_dir_path, image, resolution='blend'):
    image_file_name = bpy.path.basename(image.filepath)
    if image_file_name == '':
        image_file_name = image.name.split('.')[0]

    suffix = paths.resolution_suffix[resolution]

    fp = os.path.join(tex_dir_path, image_file_name)
    # check if there is allready an image with same name and thus also assigned path
    # (can happen easily with genearted tex sets and more materials)
    done = False
    fpn = fp
    i = 0
    while not done:
        is_solo = True
        for image1 in bpy.data.images:
            if image != image1 and image1.filepath == fpn:
                is_solo = False
                fpleft, fpext = os.path.splitext(fp)
                fpn = fpleft + str(i).zfill(3) + fpext
                i += 1
        if is_solo:
            done = True

    return fpn


def generate_lower_resolutions_hdr(asset_data, fpath):
    '''generates lower resolutions for HDR images'''
    hdr = bpy.data.images.load(fpath)
    actres = max(hdr.size[0], hdr.size[1])
    p2res = paths.round_to_closest_resolution(actres)
    original_filesize = os.path.getsize(fpath) # for comparison on the original level
    i = 0
    finished = False
    files = []
    while not finished:
        dirn = os.path.dirname(fpath)
        fn_strip, ext = os.path.splitext(fpath)
        ext = '.exr'
        if i>0:
            downscale(hdr)


        hdr_resolution_filepath = fn_strip + paths.resolution_suffix[p2res] + ext
        image_utils.img_save_as(hdr, filepath=hdr_resolution_filepath, file_format='OPEN_EXR', quality=20, color_mode='RGB', compression=15,
                    view_transform='Raw', exr_codec = 'DWAA')

        if os.path.exists(hdr_resolution_filepath):
            reduced_filesize = os.path.getsize(hdr_resolution_filepath)

        # compare file sizes
        print(f'HDR size was reduced from {original_filesize} to {reduced_filesize}')
        if reduced_filesize < original_filesize:
            # this limits from uploaidng especially same-as-original resolution files in case when there is no advantage.
            # usually however the advantage can be big also for same as original resolution
            files.append({
                "type": p2res,
                "index": 0,
                "file_path": hdr_resolution_filepath
            })

            print('prepared resolution file: ', p2res)

        if rkeys.index(p2res) == 0:
            finished = True
        else:
            p2res = rkeys[rkeys.index(p2res) - 1]
        i+=1

    print('uploading resolution files')
    upload_resolutions(files, asset_data)

    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    patch_asset_empty(asset_data['id'], preferences.api_key)


def generate_lower_resolutions(data):
    asset_data = data['asset_data']
    actres = get_current_resolution()
    # first let's skip procedural assets
    base_fpath = bpy.data.filepath

    s = bpy.context.scene

    print('current resolution of the asset ', actres)
    if actres > 0:
        p2res = paths.round_to_closest_resolution(actres)
        orig_res = p2res
        print(p2res)
        finished = False
        files = []
        # now skip assets that have lowest possible resolution already
        if p2res != [0]:
            original_textures_filesize = 0
            for i in bpy.data.images:
                abspath = bpy.path.abspath(i.filepath)
                if os.path.exists(abspath):
                    original_textures_filesize += os.path.getsize(abspath)

            while not finished:

                blend_file_name = os.path.basename(base_fpath)

                dirn = os.path.dirname(base_fpath)
                fn_strip, ext = os.path.splitext(blend_file_name)

                fn = fn_strip + paths.resolution_suffix[p2res] + ext
                fpath = os.path.join(dirn, fn)

                tex_dir_path = paths.get_texture_directory(asset_data, resolution=p2res)

                tex_dir_abs = bpy.path.abspath(tex_dir_path)
                if not os.path.exists(tex_dir_abs):
                    os.mkdir(tex_dir_abs)

                reduced_textures_filessize = 0
                for i in bpy.data.images:
                    if i.name != 'Render Result':

                        print('scaling ', i.name, i.size[0], i.size[1])
                        fp = get_texture_filepath(tex_dir_path, i, resolution=p2res)

                        if p2res == orig_res:
                            # first, let's link the image back to the original one.
                            i['blenderkit_original_path'] = i.filepath
                            # first round also makes reductions on the image, while keeping resolution
                            make_possible_reductions_on_image(i, fp, do_reductions=True, do_downscale=False)

                        else:
                            # lower resolutions only downscale
                            make_possible_reductions_on_image(i, fp, do_reductions=False, do_downscale=True)

                        abspath = bpy.path.abspath(i.filepath)
                        if os.path.exists(abspath):
                            reduced_textures_filessize += os.path.getsize(abspath)

                        i.pack()
                # save
                print(fpath)
                # save the file
                bpy.ops.wm.save_as_mainfile(filepath=fpath, compress=True, copy=True)
                # compare file sizes
                print(f'textures size was reduced from {original_textures_filesize} to {reduced_textures_filessize}')
                if reduced_textures_filessize < original_textures_filesize:
                    # this limits from uploaidng especially same-as-original resolution files in case when there is no advantage.
                    # usually however the advantage can be big also for same as original resolution
                    files.append({
                        "type": p2res,
                        "index": 0,
                        "file_path": fpath
                    })

                print('prepared resolution file: ', p2res)
                if rkeys.index(p2res) == 0:
                    finished = True
                else:
                    p2res = rkeys[rkeys.index(p2res) - 1]
            print('uploading resolution files')
            upload_resolutions(files, data['asset_data'])
            preferences = bpy.context.preferences.addons['blenderkit'].preferences
            patch_asset_empty(data['asset_data']['id'], preferences.api_key)
        return


def regenerate_thumbnail_material(data):
    # this should re-generate material thumbnail and re-upload it.
    # first let's skip procedural assets
    base_fpath = bpy.data.filepath
    blend_file_name = os.path.basename(base_fpath)
    bpy.ops.mesh.primitive_cube_add()
    aob = bpy.context.active_object
    bpy.ops.object.material_slot_add()
    aob.material_slots[0].material = bpy.data.materials[0]
    props = aob.active_material.blenderkit
    props.thumbnail_generator_type = 'BALL'
    props.thumbnail_background = False
    props.thumbnail_resolution = '256'
    # layout.prop(props, 'thumbnail_generator_type')
    # layout.prop(props, 'thumbnail_scale')
    # layout.prop(props, 'thumbnail_background')
    # if props.thumbnail_background:
    #     layout.prop(props, 'thumbnail_background_lightness')
    # layout.prop(props, 'thumbnail_resolution')
    # layout.prop(props, 'thumbnail_samples')
    # layout.prop(props, 'thumbnail_denoising')
    # layout.prop(props, 'adaptive_subdivision')
    # preferences = bpy.context.preferences.addons['blenderkit'].preferences
    # layout.prop(preferences, "thumbnail_use_gpu")
    # TODO: here it should call start_material_thumbnailer , but with the wait property on, so it can upload afterwards.
    bpy.ops.object.blenderkit_material_thumbnail()
    time.sleep(130)
    # save
    # this does the actual job

    return


def assets_db_path():
    dpath = os.path.dirname(bpy.data.filepath)
    fpath = os.path.join(dpath, 'all_assets.json')
    return fpath


def get_assets_search():
    # bpy.app.debug_value = 2

    results = []
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    url = paths.get_api_url() + 'search/all'
    i = 0
    while url is not None:
        headers = utils.get_headers(preferences.api_key)
        print('fetching assets from assets endpoint')
        print(url)
        retries = 0
        while retries < 3:
            r = rerequests.get(url, headers=headers)

            try:
                adata = r.json()
                url = adata.get('next')
                print(i)
                i += 1
            except Exception as e:
                print(e)
                print('failed to get next')
                if retries == 2:
                    url = None
            if adata.get('results') != None:
                results.extend(adata['results'])
                retries = 3
            print(f'fetched page {i}')
            retries += 1

    fpath = assets_db_path()
    with open(fpath, 'w', encoding = 'utf-8') as s:
        json.dump(results, s, ensure_ascii=False, indent=4)


def get_assets_for_resolutions(page_size=100, max_results=100000000):
    preferences = bpy.context.preferences.addons['blenderkit'].preferences

    dpath = os.path.dirname(bpy.data.filepath)
    filepath = os.path.join(dpath, 'assets_for_resolutions.json')
    params = {
        'order': '-created',
        'textureResolutionMax_gte': '100',
        #    'last_resolution_upload_lt':'2020-9-01'
    }
    search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results,
                             api_key=preferences.api_key)
    return filepath


def get_materials_for_validation(page_size=100, max_results=100000000):
    preferences = bpy.context.preferences.addons['blenderkit'].preferences
    dpath = os.path.dirname(bpy.data.filepath)
    filepath = os.path.join(dpath, 'materials_for_validation.json')
    params = {
        'order': '-created',
        'asset_type': 'material',
        'verification_status': 'uploaded'
    }
    search.get_search_simple(params, filepath=filepath, page_size=page_size, max_results=max_results,
                             api_key=preferences.api_key)
    return filepath


# This gets all assets in the database through the/assets endpoint. Currently not used, since we use elastic for everything.
# def get_assets_list():
#     bpy.app.debug_value = 2
#
#     results = []
#     preferences = bpy.context.preferences.addons['blenderkit'].preferences
#     url = paths.get_api_url() + 'assets/all'
#     i = 0
#     while url is not None:
#         headers = utils.get_headers(preferences.api_key)
#         print('fetching assets from assets endpoint')
#         print(url)
#         retries = 0
#         while retries < 3:
#             r = rerequests.get(url, headers=headers)
#
#             try:
#                 adata = r.json()
#                 url = adata.get('next')
#                 print(i)
#                 i += 1
#             except Exception as e:
#                 print(e)
#                 print('failed to get next')
#                 if retries == 2:
#                     url = None
#             if adata.get('results') != None:
#                 results.extend(adata['results'])
#                 retries = 3
#             print(f'fetched page {i}')
#             retries += 1
#
#     fpath = assets_db_path()
#     with open(fpath, 'w', encoding = 'utf-8') as s:
#         json.dump(results, s, ensure_ascii=False, indent=4)


def load_assets_list(filepath):
    if os.path.exists(filepath):
        with open(filepath, 'r', encoding='utf-8') as s:
            assets = json.load(s)
    return assets


def check_needs_resolutions(a):
    if a['verificationStatus'] == 'validated' and a['assetType'] in ('material', 'model', 'scene', 'hdr'):
        # the search itself now picks the right assets so there's no need to filter more than asset types.
        # TODO needs to check first if the upload date is older than resolution upload date, for that we need resolution upload date.
        for f in a['files']:
            if f['fileType'].find('resolution') > -1:
                return False

        return True
    return False


def download_asset(asset_data, resolution='blend', unpack=False, api_key=''):
    '''
    Download an asset non-threaded way.
    Parameters
    ----------
    asset_data - search result from elastic or assets endpoints from API

    Returns
    -------
    path to the resulting asset file or None if asset isn't accessible
    '''

    has_url = download.get_download_url(asset_data, download.get_scene_id(), api_key, tcom=None,
                                        resolution='blend')
    if has_url:
        fpath = download.download_file(asset_data)
        if fpath and unpack and asset_data['assetType'] != 'hdr':
            send_to_bg(asset_data, fpath, command='unpack', wait=True)
        return fpath

    return None


def generate_resolution_thread(asset_data, api_key):
    '''
    A thread that downloads file and only then starts an instance of Blender that generates the resolution
    Parameters
    ----------
    asset_data

    Returns
    -------

    '''

    fpath = download_asset(asset_data, unpack=True, api_key=api_key)
    if fpath:
        if asset_data['assetType'] != 'hdr':
            print('send to bg ', fpath)
            proc = send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True);
        else:
            generate_lower_resolutions_hdr(asset_data, fpath)
        # send_to_bg by now waits for end of the process.
        # time.sleep((5))


def iterate_for_resolutions(filepath, process_count=12, api_key='', do_checks = True):
    ''' iterate through all assigned assets, check for those which need generation and send them to res gen'''
    assets = load_assets_list(filepath)
    print(len(assets))
    threads = []
    for asset_data in assets:
        asset_data = search.parse_result(asset_data)
        if asset_data is not None:

            if not do_checks or check_needs_resolutions(asset_data):
                print('downloading and generating resolution for  %s' % asset_data['name'])
                # this is just a quick hack for not using original dirs in blendrkit...
                generate_resolution_thread(asset_data, api_key)
                # thread = threading.Thread(target=generate_resolution_thread, args=(asset_data, api_key))
                # thread.start()
                #
                # threads.append(thread)
                # print('processes ', len(threads))
                # while len(threads) > process_count - 1:
                #     for t in threads:
                #         if not t.is_alive():
                #             threads.remove(t)
                #         break;
                # else:
                #     print(f'Failed to generate resolution:{asset_data["name"]}')
            else:
                print('not generated resolutions:', asset_data['name'])


def send_to_bg(asset_data, fpath, command='generate_resolutions', wait=True):
    '''
    Send varioust task to a new blender instance that runs and closes after finishing the task.
    This function waits until the process finishes.
    The function tries to set the same bpy.app.debug_value in the instance of Blender that is run.
    Parameters
    ----------
    asset_data
    fpath - file that will be processed
    command - command which should be run in background.

    Returns
    -------
    None
    '''
    data = {
        'fpath': fpath,
        'debug_value': bpy.app.debug_value,
        'asset_data': asset_data,
        'command': command,
    }
    binary_path = bpy.app.binary_path
    tempdir = tempfile.mkdtemp()
    datafile = os.path.join(tempdir + 'resdata.json')
    script_path = os.path.dirname(os.path.realpath(__file__))
    with open(datafile, 'w', encoding = 'utf-8') as s:
        json.dump(data, s,  ensure_ascii=False, indent=4)

    print('opening Blender instance to do processing - ', command)

    if wait:
        proc = subprocess.run([
            binary_path,
            "--background",
            "-noaudio",
            fpath,
            "--python", os.path.join(script_path, "resolutions_bg.py"),
            "--", datafile
        ], bufsize=1, stdout=sys.stdout, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())

    else:
        # TODO this should be fixed to allow multithreading.
        proc = subprocess.Popen([
            binary_path,
            "--background",
            "-noaudio",
            fpath,
            "--python", os.path.join(script_path, "resolutions_bg.py"),
            "--", datafile
        ], bufsize=1, stdout=subprocess.PIPE, stdin=subprocess.PIPE, creationflags=utils.get_process_flags())
        return proc


def write_data_back(asset_data):
    '''ensures that the data in the resolution file is the same as in the database.'''
    pass;


def run_bg(datafile):
    print('background file operation')
    with open(datafile, 'r',encoding='utf-8') as f:
        data = json.load(f)
    bpy.app.debug_value = data['debug_value']
    write_data_back(data['asset_data'])
    if data['command'] == 'generate_resolutions':
        generate_lower_resolutions(data)
    elif data['command'] == 'unpack':
        unpack_asset(data)
    elif data['command'] == 'regen_thumbnail':
        regenerate_thumbnail_material(data)

# load_assets_list()
# generate_lower_resolutions()
# class TestOperator(bpy.types.Operator):
#     """Tooltip"""
#     bl_idname = "object.test_anything"
#     bl_label = "Test Operator"
#
#     @classmethod
#     def poll(cls, context):
#         return True
#
#     def execute(self, context):
#         iterate_for_resolutions()
#         return {'FINISHED'}
#
#
# def register():
#     bpy.utils.register_class(TestOperator)
#
#
# def unregister():
#     bpy.utils.unregister_class(TestOperator)