Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2021-03-31 07:03:19 +0300
committerCampbell Barton <ideasman42@gmail.com>2021-03-31 07:03:19 +0300
commit1beca76934b0557655ed86b5f7c0ead49e23130c (patch)
tree435748f4d757c4b838c1e04d3260c10de27f3e51
parentc59a7f44a17241ac764a612f67dbc64dc5178106 (diff)
PyAPI: add bl_rna_utils.decompose_data_path
Utility function for splitting an RNA path, to be used by `bpy.ops.wm.*`
-rw-r--r--release/scripts/modules/bl_rna_utils/data_path.py19
1 files changed, 15 insertions, 4 deletions
diff --git a/release/scripts/modules/bl_rna_utils/data_path.py b/release/scripts/modules/bl_rna_utils/data_path.py
index 330a3b7522d..42942b7a295 100644
--- a/release/scripts/modules/bl_rna_utils/data_path.py
+++ b/release/scripts/modules/bl_rna_utils/data_path.py
@@ -20,10 +20,15 @@
__all__ = (
"property_definition_from_data_path",
+ "decompose_data_path",
)
class _TokenizeDataPath:
- """Class to split up tokens of a data-path."""
+ """
+ Class to split up tokens of a data-path.
+
+ Note that almost all access generates new objects with additional paths,
+ with the exception of iteration which is the intended way to access the resulting data."""
__slots__ = (
"data_path",
)
@@ -49,6 +54,14 @@ class _TokenizeDataPath:
return iter(self.data_path)
+def decompose_data_path(data_path):
+ """
+ Return the components of a data path split into a list.
+ """
+ ns = {"base": _TokenizeDataPath(())}
+ return list(eval("base" + data_path, ns, ns))
+
+
def property_definition_from_data_path(base, data_path):
"""
Return an RNA property definition from an object and a data path.
@@ -56,9 +69,7 @@ def property_definition_from_data_path(base, data_path):
In Blender this is often used with ``context`` as the base and a
path that it references, for example ``.space_data.lock_camera``.
"""
- base_tokenize = _TokenizeDataPath(())
- data = list(eval("base_tokenize" + data_path))
- del base_tokenize
+ data = decompose_data_path(data_path)
while data and (not data[-1].startswith(".")):
data.pop()