Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gltf2_blender_gather_animation_channels.py « exp « blender « io_scene_gltf2 - git.blender.org/blender-addons.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 5f81c995d715f6ad01a41ad135c3db2e423573e0 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
# SPDX-License-Identifier: Apache-2.0
# Copyright 2018-2021 The glTF-Blender-IO authors.

import bpy
import typing

from ..com.gltf2_blender_data_path import get_target_object_path, get_target_property_name, get_rotation_modes, get_delta_modes, is_location, is_rotation, is_scale
from io_scene_gltf2.io.com import gltf2_io
from io_scene_gltf2.io.com import gltf2_io_debug
from io_scene_gltf2.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_gltf2.blender.exp import gltf2_blender_gather_animation_samplers
from io_scene_gltf2.blender.exp import gltf2_blender_gather_animation_channel_target
from io_scene_gltf2.blender.exp import gltf2_blender_gather_animation_sampler_keyframes
from io_scene_gltf2.blender.exp import gltf2_blender_get
from io_scene_gltf2.blender.exp import gltf2_blender_gather_skins
from io_scene_gltf2.blender.exp import gltf2_blender_gather_drivers
from io_scene_gltf2.io.exp.gltf2_io_user_extensions import export_user_extensions
from io_scene_gltf2.blender.exp.gltf2_blender_gather_tree import VExportNode
from . import gltf2_blender_export_keys


def gather_channels_baked(obj_uuid, frame_range, export_settings):
    channels = []

    # If no animation in file, no need to bake
    if len(bpy.data.actions) == 0:
        return None

    blender_obj = export_settings['vtree'].nodes[obj_uuid].blender_object

    if frame_range is None:
        start_frame = min([v[0] for v in [a.frame_range for a in bpy.data.actions]])
        end_frame = max([v[1] for v in [a.frame_range for a in bpy.data.actions]])
    else:
        if blender_obj.animation_data and blender_obj.animation_data.action:
            # Coming from object parented to bone, and object is also animated. So using range action
            start_frame, end_frame = blender_obj.animation_data.action.frame_range[0], blender_obj.animation_data.action.frame_range[1]
        else:
            # Coming from object parented to bone, and object is not animated. So using range from armature
            start_frame, end_frame = frame_range

    # use action if exists, else obj_uuid
    # When an object need some forced baked, there are 2 situations:
    # - Non animated object, but there are some selection, so we need to bake
    # - Object parented to bone. So we need to bake, because of inverse transforms on non default TRS armatures
    # In this last case, there are 2 situations :
    # - Object is also animated, so use the action name as key for caching
    # - Object is not animated, so use obj_uuid as key for caching, like for non animated object (case 1)

    key_action = blender_obj.animation_data.action.name if blender_obj.animation_data and blender_obj.animation_data.action else obj_uuid

    for p in ["location", "rotation_quaternion", "scale"]:
        channel = gather_animation_channel(
            obj_uuid,
            (),
            export_settings,
            None,
            p,
            start_frame,
            end_frame,
            False,
            key_action, # Use obj uuid as action name for caching (or action name if case of object parented to bone and animated)
            None,
            False #If Object is not animated, don't keep animation for this channel
            )
        if channel is not None:
            channels.append(channel)

    return channels if len(channels) > 0 else None

@cached
def gather_animation_channels(obj_uuid: int,
                              blender_action: bpy.types.Action,
                              export_settings
                              ) -> typing.List[gltf2_io.AnimationChannel]:
    channels = []

    blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object

    # First calculate range of animation for baking
    # This is need if user set 'Force sampling' and in case we need to bake
    bake_range_start = None
    bake_range_end = None
    force_range = False
    # If range is manually set, use it. Else, calculate it
    if blender_action.use_frame_range is True:
        bake_range_start = blender_action.frame_start
        bake_range_end = blender_action.frame_end
        force_range = True # keyframe_points is read-only, we can't restrict here
    else:
        groups = __get_channel_groups(blender_action, blender_object, export_settings)
        # Note: channels has some None items only for SK if some SK are not animated
        for chans in groups:
            ranges = [channel.range() for channel in chans  if channel is not None]
            if bake_range_start is None:
                bake_range_start = min([channel.range()[0] for channel in chans  if channel is not None])
            else:
                bake_range_start = min(bake_range_start, min([channel.range()[0] for channel in chans  if channel is not None]))
            if bake_range_end is None:
                bake_range_end = max([channel.range()[1] for channel in chans  if channel is not None])
            else:
                bake_range_end = max(bake_range_end, max([channel.range()[1] for channel in chans  if channel is not None]))


    if blender_object.type == "ARMATURE" and export_settings['gltf_force_sampling'] is True:
        # We have to store sampled animation data for every deformation bones

        # Check that there are some anim in this action
        if bake_range_start is None:
            return []

        # Then bake all bones
        bones_to_be_animated = []
        bones_uuid = export_settings["vtree"].get_all_bones(obj_uuid)
        bones_to_be_animated = [blender_object.pose.bones[export_settings["vtree"].nodes[b].blender_bone.name] for b in bones_uuid]

        list_of_animated_bone_channels = []
        for channel_group in __get_channel_groups(blender_action, blender_object, export_settings):
            channel_group_sorted = __get_channel_group_sorted(channel_group, blender_object)
            list_of_animated_bone_channels.extend([(gltf2_blender_get.get_object_from_datapath(blender_object, get_target_object_path(i.data_path)).name, get_target_property_name(i.data_path)) for i in channel_group])

        for bone in bones_to_be_animated:
            for p in ["location", "rotation_quaternion", "scale"]:
                channel = gather_animation_channel(
                    obj_uuid,
                    (),
                    export_settings,
                    bone.name,
                    p,
                    bake_range_start,
                    bake_range_end,
                    force_range,
                    blender_action.name,
                    None,
                    (bone.name, p) in list_of_animated_bone_channels)
                if channel is not None:
                    channels.append(channel)


        # Retrieve animation on armature object itself, if any
        fcurves_armature = __gather_armature_object_channel_groups(blender_action, blender_object, export_settings)
        for channel_group in fcurves_armature:
            # No need to sort on armature, that can't have SK
            if len(channel_group) == 0:
                # Only errors on channels, ignoring
                continue
            channel = gather_animation_channel(obj_uuid, channel_group, export_settings, None, None, bake_range_start, bake_range_end, force_range, blender_action.name, None, True)
            if channel is not None:
                channels.append(channel)


        # Retrieve channels for drivers, if needed
        drivers_to_manage = gltf2_blender_gather_drivers.get_sk_drivers(obj_uuid, export_settings)
        for obj_driver_uuid, fcurves in drivers_to_manage:
            channel = gather_animation_channel(
                obj_uuid,
                fcurves,
                export_settings,
                None,
                None,
                bake_range_start,
                bake_range_end,
                force_range,
                blender_action.name,
                obj_driver_uuid,
                True)
            if channel is not None:
                channels.append(channel)

        # When An Object is parented to bone, and rest pose is used (not current frame)
        # If parenting is not done with same TRS than rest pose, this can lead to inconsistencies
        # So we need to bake object animation too, to be sure that correct TRS animation are used
        # Here, we want add these channels to same action that the armature
        if export_settings['gltf_selected'] is False and export_settings['gltf_current_frame'] is False:

            children_obj_parent_to_bones = []
            for bone_uuid in bones_uuid:
                children_obj_parent_to_bones.extend([child for child in export_settings['vtree'].nodes[bone_uuid].children if export_settings['vtree'].nodes[child].blender_type not in [VExportNode.BONE, VExportNode.ARMATURE]])
            for child_uuid in children_obj_parent_to_bones:

                channels_baked = gather_channels_baked(child_uuid, (bake_range_start, bake_range_end), export_settings)
                if channels_baked is not None:
                    channels.extend(channels_baked)

    else:
        done_paths = []
        for channel_group in __get_channel_groups(blender_action, blender_object, export_settings):
            channel_group_sorted = __get_channel_group_sorted(channel_group, blender_object)
            if len(channel_group_sorted) == 0:
                # Only errors on channels, ignoring
                continue
            channel = gather_animation_channel(
                obj_uuid,
                channel_group_sorted,
                export_settings,
                None,
                None,
                bake_range_start,
                bake_range_end,
                force_range,
                blender_action.name,
                None,
                True
                )
            if channel is not None:
                channels.append(channel)

            # Store already done channel path
            target = [c for c in channel_group_sorted if c is not None][0].data_path.split('.')[-1]
            path = {
                "delta_location": "location",
                "delta_rotation_euler": "rotation_quaternion",
                "location": "location",
                "rotation_axis_angle": "rotation_quaternion",
                "rotation_euler": "rotation_quaternion",
                "rotation_quaternion": "rotation_quaternion",
                "scale": "scale",
                "value": "weights"
            }.get(target)
            if path is not None:
                done_paths.append(path)
        done_paths = list(set(done_paths))

        if export_settings['gltf_selected'] is True and export_settings['vtree'].tree_troncated is True:
            start_frame = min([v[0] for v in [a.frame_range for a in bpy.data.actions]])
            end_frame = max([v[1] for v in [a.frame_range for a in bpy.data.actions]])
            to_be_done = ['location', 'rotation_quaternion', 'scale']
            to_be_done = [c for c in to_be_done if c not in done_paths]

            # In case of weight action, do nothing.
            # If there is only weight --> TRS is already managed at first
            if not (len(done_paths) == 1 and 'weights' in done_paths):
                for p in to_be_done:
                    channel = gather_animation_channel(
                        obj_uuid,
                        (),
                        export_settings,
                        None,
                        p,
                        start_frame,
                        end_frame,
                        force_range,
                        blender_action.name,
                        None,
                        False #If Object is not animated, don't keep animation for this channel
                        )

                    if channel is not None:
                        channels.append(channel)



    # resetting driver caches
    gltf2_blender_gather_drivers.get_sk_driver_values.reset_cache()
    gltf2_blender_gather_drivers.get_sk_drivers.reset_cache()
    # resetting bone caches
    gltf2_blender_gather_animation_sampler_keyframes.get_bone_matrix.reset_cache()

    return channels

def __get_channel_group_sorted(channels: typing.Tuple[bpy.types.FCurve], blender_object: bpy.types.Object):
    # if this is shapekey animation, we need to sort in same order than shapekeys
    # else, no need to sort
    if blender_object.type == "MESH":
        first_channel = channels[0]
        object_path = get_target_object_path(first_channel.data_path)
        if object_path:
            if not blender_object.data.shape_keys:
                # Something is wrong. Maybe the user assigned an armature action
                # to a mesh object. Returning without sorting
                return channels

            # This is shapekeys, we need to sort channels
            shapekeys_idx = {}
            cpt_sk = 0
            for sk in blender_object.data.shape_keys.key_blocks:
                if sk == sk.relative_key:
                    continue
                if sk.mute is True:
                    continue
                shapekeys_idx[sk.name] = cpt_sk
                cpt_sk += 1

            # Note: channels will have some None items only for SK if some SK are not animated
            idx_channel_mapping = []
            all_sorted_channels = []
            for sk_c in channels:
                try:
                    sk_name = blender_object.data.shape_keys.path_resolve(get_target_object_path(sk_c.data_path)).name
                    idx = shapekeys_idx[sk_name]
                    idx_channel_mapping.append((shapekeys_idx[sk_name], sk_c))
                except:
                    # Something is wrong. For example, an armature action linked to a mesh object
                    continue

            existing_idx = dict(idx_channel_mapping)
            for i in range(0, cpt_sk):
                if i not in existing_idx.keys():
                    all_sorted_channels.append(None)
                else:
                    all_sorted_channels.append(existing_idx[i])

            if all([i is None for i in all_sorted_channels]): # all channel in error, and some non keyed SK
                return channels             # This happen when an armature action is linked to a mesh object with non keyed SK

            return tuple(all_sorted_channels)

    # if not shapekeys, stay in same order, because order doesn't matter
    return channels

# This function can be called directly from gather_animation in case of bake animation (non animated selected object)
def gather_animation_channel(obj_uuid: str,
                               channels: typing.Tuple[bpy.types.FCurve],
                               export_settings,
                               bake_bone: typing.Union[str, None],
                               bake_channel: typing.Union[str, None],
                               bake_range_start,
                               bake_range_end,
                               force_range: bool,
                               action_name: str,
                               driver_obj_uuid,
                               node_channel_is_animated: bool
                               ) -> typing.Union[gltf2_io.AnimationChannel, None]:

    blender_object = export_settings['vtree'].nodes[obj_uuid].blender_object

    if not __filter_animation_channel(channels, blender_object, export_settings):
        return None

    __target= __gather_target(obj_uuid, channels, export_settings, bake_bone, bake_channel, driver_obj_uuid)
    if __target.path is not None:
        sampler = __gather_sampler(channels, obj_uuid, export_settings, bake_bone, bake_channel, bake_range_start, bake_range_end, force_range, action_name, driver_obj_uuid, node_channel_is_animated)

        if sampler is None:
            # After check, no need to animate this node for this channel
            return None


        animation_channel = gltf2_io.AnimationChannel(
            extensions=__gather_extensions(channels, blender_object, export_settings, bake_bone),
            extras=__gather_extras(channels, blender_object, export_settings, bake_bone),
            sampler=sampler,
            target=__target
        )

        export_user_extensions('gather_animation_channel_hook',
                               export_settings,
                               animation_channel,
                               channels,
                               blender_object,
                               bake_bone,
                               bake_channel,
                               bake_range_start,
                               bake_range_end,
                               action_name)

        return animation_channel
    return None


def __filter_animation_channel(channels: typing.Tuple[bpy.types.FCurve],
                               blender_object: bpy.types.Object,
                               export_settings
                               ) -> bool:
    return True


def __gather_extensions(channels: typing.Tuple[bpy.types.FCurve],
                        blender_object: bpy.types.Object,
                        export_settings,
                        bake_bone: typing.Union[str, None]
                        ) -> typing.Any:
    return None


def __gather_extras(channels: typing.Tuple[bpy.types.FCurve],
                    blender_object: bpy.types.Object,
                    export_settings,
                    bake_bone: typing.Union[str, None]
                    ) -> typing.Any:
    return None


def __gather_sampler(channels: typing.Tuple[bpy.types.FCurve],
                     obj_uuid: str,
                     export_settings,
                     bake_bone: typing.Union[str, None],
                     bake_channel: typing.Union[str, None],
                     bake_range_start,
                     bake_range_end,
                     force_range: bool,
                     action_name,
                     driver_obj_uuid,
                     node_channel_is_animated: bool
                     ) -> gltf2_io.AnimationSampler:

    need_rotation_correction = (export_settings[gltf2_blender_export_keys.CAMERAS] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.CAMERA) or \
        (export_settings[gltf2_blender_export_keys.LIGHTS] and export_settings['vtree'].nodes[obj_uuid].blender_type == VExportNode.LIGHT)

    return gltf2_blender_gather_animation_samplers.gather_animation_sampler(
        channels,
        obj_uuid,
        bake_bone,
        bake_channel,
        bake_range_start,
        bake_range_end,
        force_range,
        action_name,
        driver_obj_uuid,
        node_channel_is_animated,
        need_rotation_correction,
        export_settings
    )


def __gather_target(obj_uuid: str,
                    channels: typing.Tuple[bpy.types.FCurve],
                    export_settings,
                    bake_bone: typing.Union[str, None],
                    bake_channel: typing.Union[str, None],
                    driver_obj_uuid
                    ) -> gltf2_io.AnimationChannelTarget:
    return gltf2_blender_gather_animation_channel_target.gather_animation_channel_target(
        obj_uuid, channels, bake_bone, bake_channel, driver_obj_uuid, export_settings)


def __get_channel_groups(blender_action: bpy.types.Action, blender_object: bpy.types.Object, export_settings):
    targets = {}
    multiple_rotation_mode_detected = False
    delta_rotation_detection = [False, False] # Normal / Delta
    delta_location_detection = [False, False] # Normal / Delta
    delta_scale_detection = [False, False]    # Normal / Delta
    for fcurve in blender_action.fcurves:
        # In some invalid files, channel hasn't any keyframes ... this channel need to be ignored
        if len(fcurve.keyframe_points) == 0:
            continue
        try:
            target_property = get_target_property_name(fcurve.data_path)
        except:
            gltf2_io_debug.print_console("WARNING", "Invalid animation fcurve name on action {}".format(blender_action.name))
            continue
        object_path = get_target_object_path(fcurve.data_path)

        # find the object affected by this action
        if not object_path:
            target = blender_object
        else:
            try:
                target = gltf2_blender_get.get_object_from_datapath(blender_object, object_path)
                if blender_object.type == "MESH" and object_path.startswith("key_blocks"):
                    shape_key = blender_object.data.shape_keys.path_resolve(object_path)
                    if shape_key.mute is True:
                        continue
                    target = blender_object.data.shape_keys
            except ValueError as e:
                # if the object is a mesh and the action target path can not be resolved, we know that this is a morph
                # animation.
                if blender_object.type == "MESH":
                    try:
                        shape_key = blender_object.data.shape_keys.path_resolve(object_path)
                        if shape_key.mute is True:
                            continue
                        target = blender_object.data.shape_keys
                    except:
                        # Something is wrong, for example a bone animation is linked to an object mesh...
                        gltf2_io_debug.print_console("WARNING", "Animation target {} not found".format(object_path))
                        continue
                else:
                    gltf2_io_debug.print_console("WARNING", "Animation target {} not found".format(object_path))
                    continue

        # Detect that object or bone are not multiple keyed for euler and quaternion
        # Keep only the current rotation mode used by object
        rotation, rotation_modes = get_rotation_modes(target_property)
        delta = get_delta_modes(target_property)

        # Delta rotation management
        if is_rotation(target_property) :
            if delta is False:
                if delta_rotation_detection[1] is True: # normal rotation coming, but delta is already present
                    continue
                delta_rotation_detection[0] = True
            else:
                if delta_rotation_detection[0] is True: # delta rotation coming, but normal is already present
                    continue
                delta_rotation_detection[1] = True


            if rotation and target.rotation_mode not in rotation_modes:
                multiple_rotation_mode_detected = True
                continue

        # Delta location management
        if is_location(target_property):
            if delta is False:
                if delta_location_detection[1] is True: # normal location coming, but delta is already present
                    continue
                delta_location_detection[0] = True
            else:
                if delta_location_detection[0] is True: # delta location coming, but normal is already present
                    continue
                delta_location_detection[1] = True

        # Delta scale management
        if is_scale(target_property):
            if delta is False:
                if delta_scale_detection[1] is True: # normal scale coming, but delta is already present
                    continue
                delta_scale_detection[0] = True
            else:
                if delta_scale_detection[0] is True: # delta scale coming, but normal is already present
                    continue
                delta_scale_detection[1] = True

        # group channels by target object and affected property of the target
        target_properties = targets.get(target, {})
        channels = target_properties.get(target_property, [])
        channels.append(fcurve)
        target_properties[target_property] = channels
        targets[target] = target_properties

    groups = []
    for p in targets.values():
        groups += list(p.values())

    if multiple_rotation_mode_detected is True:
        gltf2_io_debug.print_console("WARNING", "Multiple rotation mode detected for {}".format(blender_object.name))

    return map(tuple, groups)

def __gather_armature_object_channel_groups(blender_action: bpy.types.Action, blender_object: bpy.types.Object, export_settings):

    targets = {}

    if blender_object.type != "ARMATURE":
        return tuple()

    delta_rotation_detection = [False, False] # Normal / Delta

    for fcurve in blender_action.fcurves:
        object_path = get_target_object_path(fcurve.data_path)
        if object_path != "":
            continue

        # In some invalid files, channel hasn't any keyframes ... this channel need to be ignored
        if len(fcurve.keyframe_points) == 0:
            continue
        try:
            target_property = get_target_property_name(fcurve.data_path)
        except:
            gltf2_io_debug.print_console("WARNING", "Invalid animation fcurve name on action {}".format(blender_action.name))
            continue
        target = gltf2_blender_get.get_object_from_datapath(blender_object, object_path)

        # Detect that armature is not multiple keyed for euler and quaternion
        # Keep only the current rotation mode used by bone
        rotation, rotation_modes = get_rotation_modes(target_property)
        delta = get_delta_modes(target_property)

        # Delta rotation management
        if delta is False:
            if delta_rotation_detection[1] is True: # normal rotation coming, but delta is already present
                continue
            delta_rotation_detection[0] = True
        else:
            if delta_rotation_detection[0] is True: # delta rotation coming, but normal is already present
                continue
            delta_rotation_detection[1] = True

        if rotation and target.rotation_mode not in rotation_modes:
            continue

        # group channels by target object and affected property of the target
        target_properties = targets.get(target, {})
        channels = target_properties.get(target_property, [])
        channels.append(fcurve)
        target_properties[target_property] = channels
        targets[target] = target_properties

    groups = []
    for p in targets.values():
        groups += list(p.values())

    return map(tuple, groups)