Welcome to mirror list, hosted at ThFree Co, Russian Federation.

utils.py « mesh_tissue - git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: f98bc6d02d7df9ba925f4e44850eadf92dee31b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
# SPDX-License-Identifier: GPL-2.0-or-later

import bpy
import threading
import numpy as np
import multiprocessing
from multiprocessing import Process, Pool

weight = []
n_threads = multiprocessing.cpu_count()

class ThreadVertexGroup(threading.Thread):
    def __init__ ( self, id, vertex_group, n_verts):
        self.id = id
        self.vertex_group = vertex_group
        self.n_verts = n_verts
        threading.Thread.__init__ ( self )

    def run (self):
        global weight
        global n_threads
        verts = np.arange(int(self.n_verts/8))*8 + self.id
        for v in verts:
            try:
                weight[v] = self.vertex_group.weight(v)
            except:
                pass

def thread_read_weight(_weight, vertex_group):
    global weight
    global n_threads
    print(n_threads)
    weight = _weight
    n_verts = len(weight)
    threads = [ThreadVertexGroup(i, vertex_group, n_verts) for i in range(n_threads)]
    for t in threads: t.start()
    for t in threads: t.join()
    return weight

def process_read_weight(id, vertex_group, n_verts):
    global weight
    global n_threads
    verts = np.arange(int(self.n_verts/8))*8 + self.id
    for v in verts:
        try:
            weight[v] = self.vertex_group.weight(v)
        except:
            pass


def read_weight(_weight, vertex_group):
    global weight
    global n_threads
    print(n_threads)
    weight = _weight
    n_verts = len(weight)
    n_cores = multiprocessing.cpu_count()
    pool = Pool(processes=n_cores)
    multiple_results = [pool.apply_async(process_read_weight, (i, vertex_group, n_verts)) for i in range(n_cores)]
    #processes = [Process(target=process_read_weight, args=(i, vertex_group, n_verts)) for i in range(n_threads)]
    #for t in processes: t.start()
    #for t in processes: t.join()
    return weight

#Recursively transverse layer_collection for a particular name
def recurLayerCollection(layerColl, collName):
    found = None
    if (layerColl.name == collName):
        return layerColl
    for layer in layerColl.children:
        found = recurLayerCollection(layer, collName)
        if found:
            return found

def auto_layer_collection():
    # automatically change active layer collection
    layer = bpy.context.view_layer.active_layer_collection
    layer_collection = bpy.context.view_layer.layer_collection
    if layer.hide_viewport or layer.collection.hide_viewport:
        collections = bpy.context.object.users_collection
        for c in collections:
            lc = recurLayerCollection(layer_collection, c.name)
            if not c.hide_viewport and not lc.hide_viewport:
                bpy.context.view_layer.active_layer_collection = lc

def lerp(a, b, t):
    return a + (b - a) * t

def _lerp2(v1, v2, v3, v4, v):
    v12 = v1.lerp(v2,v.x) # + (v2 - v1) * v.x
    v34 = v3.lerp(v4,v.x) # + (v4 - v3) * v.x
    return v12.lerp(v34, v.y)# + (v34 - v12) * v.y

def lerp2(v1, v2, v3, v4, v):
    v12 = v1 + (v2 - v1) * v.x
    v34 = v3 + (v4 - v3) * v.x
    return v12 + (v34 - v12) * v.y

def lerp3(v1, v2, v3, v4, v):
    loc = lerp2(v1.co, v2.co, v3.co, v4.co, v)
    nor = lerp2(v1.normal, v2.normal, v3.normal, v4.normal, v)
    nor.normalize()
    return loc + nor * v.z

def _convert_object_to_mesh(ob, apply_modifiers=True, preserve_status=True):
    if not apply_modifiers:
        mod_visibility = [m.show_viewport for m in ob.modifiers]
        for m in ob.modifiers:
            m.show_viewport = False
    if preserve_status:
        # store status
        mode = bpy.context.object.mode
        selected = bpy.context.selected_objects
        active = bpy.context.object
    # change status
    bpy.ops.object.mode_set(mode='OBJECT')
    bpy.ops.object.select_all(action='DESELECT')
    new_ob = ob.copy()
    new_ob.data = ob.data.copy()
    bpy.context.collection.objects.link(new_ob)
    bpy.context.view_layer.objects.active = new_ob
    new_ob.select_set(True)
    bpy.ops.object.convert(target='MESH')
    if preserve_status:
        # restore status
        bpy.ops.object.select_all(action='DESELECT')
        for o in selected: o.select_set(True)
        bpy.context.view_layer.objects.active = active
        bpy.ops.object.mode_set(mode=mode)
    if not apply_modifiers:
        for m,vis in zip(ob.modifiers,mod_visibility):
            m.show_viewport = vis
    return new_ob

def convert_object_to_mesh(ob, apply_modifiers=True, preserve_status=True):
    if not ob.name: return None
    if ob.type != 'MESH':
        if not apply_modifiers:
            mod_visibility = [m.show_viewport for m in ob.modifiers]
            for m in ob.modifiers: m.show_viewport = False
        #ob.modifiers.update()
        #dg = bpy.context.evaluated_depsgraph_get()
        #ob_eval = ob.evaluated_get(dg)
        #me = bpy.data.meshes.new_from_object(ob_eval, preserve_all_data_layers=True, depsgraph=dg)
        me = simple_to_mesh(ob)
        new_ob = bpy.data.objects.new(ob.data.name, me)
        new_ob.location, new_ob.matrix_world = ob.location, ob.matrix_world
        if not apply_modifiers:
            for m,vis in zip(ob.modifiers,mod_visibility): m.show_viewport = vis
    else:
        if apply_modifiers:
            new_ob = ob.copy()
            new_ob.data = simple_to_mesh(ob)
        else:
            new_ob = ob.copy()
            new_ob.data = ob.data.copy()
            new_ob.modifiers.clear()
    bpy.context.collection.objects.link(new_ob)
    if preserve_status:
        new_ob.select_set(False)
    else:
        for o in bpy.context.view_layer.objects: o.select_set(False)
        new_ob.select_set(True)
        bpy.context.view_layer.objects.active = new_ob
    return new_ob

def simple_to_mesh(ob):
    dg = bpy.context.evaluated_depsgraph_get()
    ob_eval = ob.evaluated_get(dg)
    me = bpy.data.meshes.new_from_object(ob_eval, preserve_all_data_layers=True, depsgraph=dg)
    me.calc_normals()
    return me

# Prevent Blender Crashes with handlers
def set_animatable_fix_handler(self, context):
    old_handlers = []
    blender_handlers = bpy.app.handlers.render_init
    for h in blender_handlers:
        if "turn_off_animatable" in str(h):
            old_handlers.append(h)
    for h in old_handlers: blender_handlers.remove(h)
    blender_handlers.append(turn_off_animatable)
    return

def turn_off_animatable(scene):
    for o in bpy.data.objects:
        o.tissue_tessellate.bool_run = False
        o.reaction_diffusion_settings.run = False
        #except: pass
    return