Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTamito Kajiyama <rd6t-kjym@asahi-net.or.jp>2013-01-27 03:49:13 +0400
committerTamito Kajiyama <rd6t-kjym@asahi-net.or.jp>2013-01-27 03:49:13 +0400
commit556912792ad3c37c294256a558c96b39f264e7b5 (patch)
tree9b6ee8cf1ad92ee89c04f27a89be11599c5b40c0 /release/scripts
parent9251d628db0abe599d927d79170025d8545c8ace (diff)
parentc84383301c5a2582e95259a7e4468a23a3566401 (diff)
Merged changes in the trunk up to revision 54110.
Conflicts resolved: source/blender/blenfont/SConscript source/blender/blenkernel/intern/subsurf_ccg.c source/blender/makesdna/intern/makesdna.c source/blender/makesrna/intern/rna_scene.c
Diffstat (limited to 'release/scripts')
-rw-r--r--release/scripts/modules/addon_utils.py6
-rw-r--r--release/scripts/modules/bl_i18n_utils/bl_process_msg.py310
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/check_po.py119
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/clean_po.py27
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/import_po_from_branches.py56
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/merge_po.py105
-rw-r--r--release/scripts/modules/bl_i18n_utils/settings.py74
-rw-r--r--release/scripts/modules/bl_i18n_utils/spell_check_utils.py3
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_branches.py39
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_languages_menu.py12
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_mo.py10
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_po.py110
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_pot.py79
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_trunk.py28
-rw-r--r--release/scripts/modules/bl_i18n_utils/utils.py876
-rw-r--r--release/scripts/modules/bpy/path.py1
-rw-r--r--release/scripts/modules/bpy/utils.py3
-rw-r--r--release/scripts/modules/bpy_extras/anim_utils.py13
-rw-r--r--release/scripts/modules/bpy_extras/io_utils.py27
-rw-r--r--release/scripts/modules/bpy_extras/mesh_utils.py6
-rw-r--r--release/scripts/modules/bpy_extras/object_utils.py2
-rw-r--r--release/scripts/modules/bpy_extras/view3d_utils.py4
-rw-r--r--release/scripts/modules/bpy_restrict_state.py9
-rw-r--r--release/scripts/modules/bpy_types.py9
-rw-r--r--release/scripts/modules/rna_prop_ui.py3
-rw-r--r--release/scripts/presets/keyconfig/maya.py32
-rw-r--r--release/scripts/presets/operator/wm.collada_export/second_life_rigged.py1
-rw-r--r--release/scripts/presets/operator/wm.collada_export/second_life_static.py1
-rw-r--r--release/scripts/startup/bl_operators/__init__.py1
-rw-r--r--release/scripts/startup/bl_operators/node.py4
-rw-r--r--release/scripts/startup/bl_operators/object.py26
-rw-r--r--release/scripts/startup/bl_operators/object_align.py12
-rw-r--r--release/scripts/startup/bl_operators/object_quick_effects.py8
-rw-r--r--release/scripts/startup/bl_operators/object_randomize_transform.py8
-rw-r--r--release/scripts/startup/bl_operators/presets.py12
-rw-r--r--release/scripts/startup/bl_operators/rigidbody.py251
-rw-r--r--release/scripts/startup/bl_operators/uvcalc_follow_active.py15
-rw-r--r--release/scripts/startup/bl_operators/uvcalc_lightmap.py18
-rw-r--r--release/scripts/startup/bl_operators/wm.py23
-rw-r--r--release/scripts/startup/bl_ui/__init__.py3
-rw-r--r--release/scripts/startup/bl_ui/properties_data_mesh.py4
-rw-r--r--release/scripts/startup/bl_ui/properties_data_modifier.py56
-rw-r--r--release/scripts/startup/bl_ui/properties_game.py5
-rw-r--r--release/scripts/startup/bl_ui/properties_mask_common.py6
-rw-r--r--release/scripts/startup/bl_ui/properties_object.py1
-rw-r--r--release/scripts/startup/bl_ui/properties_object_constraint.py1
-rw-r--r--release/scripts/startup/bl_ui/properties_paint_common.py44
-rw-r--r--release/scripts/startup/bl_ui/properties_physics_common.py35
-rw-r--r--release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py2
-rw-r--r--release/scripts/startup/bl_ui/properties_physics_rigidbody.py133
-rw-r--r--release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py208
-rw-r--r--release/scripts/startup/bl_ui/properties_physics_smoke.py24
-rw-r--r--release/scripts/startup/bl_ui/properties_render.py3
-rw-r--r--release/scripts/startup/bl_ui/properties_scene.py200
-rw-r--r--release/scripts/startup/bl_ui/properties_texture.py39
-rw-r--r--release/scripts/startup/bl_ui/space_clip.py18
-rw-r--r--release/scripts/startup/bl_ui/space_dopesheet.py4
-rw-r--r--release/scripts/startup/bl_ui/space_image.py5
-rw-r--r--release/scripts/startup/bl_ui/space_info.py7
-rw-r--r--release/scripts/startup/bl_ui/space_text.py2
-rw-r--r--release/scripts/startup/bl_ui/space_time.py1
-rw-r--r--release/scripts/startup/bl_ui/space_userpref.py38
-rw-r--r--release/scripts/startup/bl_ui/space_view3d.py37
-rw-r--r--release/scripts/startup/bl_ui/space_view3d_toolbar.py35
64 files changed, 2243 insertions, 1011 deletions
diff --git a/release/scripts/modules/addon_utils.py b/release/scripts/modules/addon_utils.py
index 65ea91cf8ce..02115054396 100644
--- a/release/scripts/modules/addon_utils.py
+++ b/release/scripts/modules/addon_utils.py
@@ -208,9 +208,9 @@ def check(module_name):
if loaded_state is Ellipsis:
print("Warning: addon-module %r found module "
- "but without __addon_enabled__ field, "
- "possible name collision from file: %r" %
- (module_name, getattr(mod, "__file__", "<unknown>")))
+ "but without __addon_enabled__ field, "
+ "possible name collision from file: %r" %
+ (module_name, getattr(mod, "__file__", "<unknown>")))
loaded_state = False
diff --git a/release/scripts/modules/bl_i18n_utils/bl_process_msg.py b/release/scripts/modules/bl_i18n_utils/bl_process_msg.py
index 7e9266d0530..5d2f90f0da7 100644
--- a/release/scripts/modules/bl_i18n_utils/bl_process_msg.py
+++ b/release/scripts/modules/bl_i18n_utils/bl_process_msg.py
@@ -23,27 +23,41 @@
# You should not directly use this script, rather use update_msg.py!
import os
+import re
+import collections
+import copy
# XXX Relative import does not work here when used from Blender...
from bl_i18n_utils import settings
+import bpy
-#classes = set()
-
+print(dir(settings))
SOURCE_DIR = settings.SOURCE_DIR
CUSTOM_PY_UI_FILES = [os.path.abspath(os.path.join(SOURCE_DIR, p)) for p in settings.CUSTOM_PY_UI_FILES]
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
-COMMENT_PREFIX = settings.COMMENT_PREFIX
-CONTEXT_PREFIX = settings.CONTEXT_PREFIX
+MSG_COMMENT_PREFIX = settings.MSG_COMMENT_PREFIX
+MSG_CONTEXT_PREFIX = settings.MSG_CONTEXT_PREFIX
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
+#CONTEXT_DEFAULT = bpy.app.i18n.contexts.default # XXX Not yet! :)
UNDOC_OPS_STR = settings.UNDOC_OPS_STR
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
+##### Utils #####
+
+# check for strings like ": %d"
+ignore_reg = re.compile(r"^(?:[-*.()/\\+:%xWXYZ0-9]|%d|%f|%s|%r|\s)*$")
+filter_message = ignore_reg.match
+
+
def check(check_ctxt, messages, key, msgsrc):
+ """
+ Performs a set of checks over the given key (context, message)...
+ """
if check_ctxt is None:
return
multi_rnatip = check_ctxt.get("multi_rnatip")
@@ -73,7 +87,79 @@ def check(check_ctxt, messages, key, msgsrc):
undoc_ops.add(key)
+def print_warnings(check_ctxt, messages):
+ if check_ctxt is not None:
+ print("WARNINGS:")
+ keys = set()
+ for c in check_ctxt.values():
+ keys |= c
+ # XXX Temp, see below
+ keys -= check_ctxt["multi_rnatip"]
+ for key in keys:
+ if key in check_ctxt["undoc_ops"]:
+ print("\tThe following operators are undocumented:")
+ else:
+ print("\t“{}”|“{}”:".format(*key))
+ if key in check_ctxt["multi_lines"]:
+ print("\t\t-> newline in this message!")
+ if key in check_ctxt["not_capitalized"]:
+ print("\t\t-> message not capitalized!")
+ if key in check_ctxt["end_point"]:
+ print("\t\t-> message with endpoint!")
+ # XXX Hide this one for now, too much false positives.
+# if key in check_ctxt["multi_rnatip"]:
+# print("\t\t-> tip used in several RNA items")
+ if key in check_ctxt["py_in_rna"]:
+ print("\t\t-> RNA message also used in py UI code:")
+ print("\t\t{}".format("\n\t\t".join(messages[key])))
+
+
+def enable_addons(addons={}, support={}, disable=False):
+ """
+ Enable (or disable) addons based either on a set of names, or a set of 'support' types.
+ Returns the list of all affected addons (as fake modules)!
+ """
+ import addon_utils
+ import bpy
+
+ userpref = bpy.context.user_preferences
+ used_ext = {ext.module for ext in userpref.addons}
+
+ ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
+ if ((addons and mod.__name__ in addons) or
+ (not addons and addon_utils.module_bl_info(mod)["support"] in support))]
+
+ for mod in ret:
+ module_name = mod.__name__
+ if disable:
+ if module_name not in used_ext:
+ continue
+ print(" Disabling module ", module_name)
+ bpy.ops.wm.addon_disable(module=module_name)
+ else:
+ if module_name in used_ext:
+ continue
+ print(" Enabling module ", module_name)
+ bpy.ops.wm.addon_enable(module=module_name)
+
+ # XXX There are currently some problems with bpy/rna...
+ # *Very* tricky to solve!
+ # So this is a hack to make all newly added operator visible by
+ # bpy.types.OperatorProperties.__subclasses__()
+ for cat in dir(bpy.ops):
+ cat = getattr(bpy.ops, cat)
+ for op in dir(cat):
+ getattr(cat, op).get_rna()
+
+ return ret
+
+
+##### RNA #####
+
def dump_messages_rna(messages, check_ctxt):
+ """
+ Dump into messages dict all RNA-defined UI messages (labels en tooltips).
+ """
import bpy
def classBlackList():
@@ -257,20 +343,17 @@ def dump_messages_rna(messages, check_ctxt):
# Parse everything (recursively parsing from bpy_struct "class"...).
processed = process_cls_list(type(bpy.context).__base__.__subclasses__())
print("{} classes processed!".format(processed))
-# import pickle
-# global classes
-# classes = {str(c) for c in classes}
-# with open("/home/i7deb64/Bureau/tpck_2", "wb") as f:
-# pickle.dump(classes, f, protocol=0)
from bpy_extras.keyconfig_utils import KM_HIERARCHY
walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
-def dump_messages_pytext(messages, check_ctxt):
- """ dumps text inlined in the python user interface: eg.
+##### Python source code #####
+def dump_py_messages_from_files(messages, check_ctxt, files):
+ """
+ Dump text inlined in the python files given, e.g. 'My Name' in:
layout.prop("someprop", text="My Name")
"""
import ast
@@ -278,7 +361,6 @@ def dump_messages_pytext(messages, check_ctxt):
# -------------------------------------------------------------------------
# Gather function names
- import bpy
# key: func_id
# val: [(arg_kw, arg_pos), (arg_kw, arg_pos), ...]
func_translate_args = {}
@@ -290,15 +372,12 @@ def dump_messages_pytext(messages, check_ctxt):
# E.g. we don’t want to get strings inside subscripts (blah["foo"])!
stopper_nodes = {ast.Subscript, }
+ # For now only consider functions from UILayout...
for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
- # check it has a 'text' argument
+ # check it has one or more arguments as defined in translate_kw
for (arg_pos, (arg_kw, arg)) in enumerate(func.parameters.items()):
- if ((arg_kw in translate_kw) and
- (arg.is_output is False) and
- (arg.type == 'STRING')):
-
- func_translate_args.setdefault(func_id, []).append((arg_kw,
- arg_pos))
+ if ((arg_kw in translate_kw) and (arg.is_output is False) and (arg.type == 'STRING')):
+ func_translate_args.setdefault(func_id, []).append((arg_kw, arg_pos))
# print(func_translate_args)
check_ctxt_py = None
@@ -308,19 +387,23 @@ def dump_messages_pytext(messages, check_ctxt):
"not_capitalized": check_ctxt["not_capitalized"],
"end_point": check_ctxt["end_point"]}
- # -------------------------------------------------------------------------
- # Function definitions
-
+ # Helper function
def extract_strings(fp_rel, node):
- """ Recursively get strings, needed in case we have "Blah" + "Blah",
- passed as an argument in that case it wont evaluate to a string.
- However, break on some kind of stopper nodes, like e.g. Subscript.
"""
-
+ Recursively get strings, needed in case we have "Blah" + "Blah", passed as an argument in that case it won't
+ evaluate to a string. However, break on some kind of stopper nodes, like e.g. Subscript.
+ """
if type(node) == ast.Str:
eval_str = ast.literal_eval(node)
if eval_str:
- key = (CONTEXT_DEFAULT, eval_str)
+ # Parse optional context included in string!
+ # XXX Not yet!
+ #if bpy.app.i18n.context_sep in eval_str:
+ #key = eval_str.split(bpy.app.i18n.context_sep, 1)
+ if 0:
+ pass
+ else:
+ key = (CONTEXT_DEFAULT, eval_str)
msgsrc = "{}:{}".format(fp_rel, node.lineno)
check(check_ctxt_py, messages, key, msgsrc)
messages.setdefault(key, []).append(msgsrc)
@@ -330,10 +413,9 @@ def dump_messages_pytext(messages, check_ctxt):
if type(nd) not in stopper_nodes:
extract_strings(fp_rel, nd)
- def extract_strings_from_file(fp):
- filedata = open(fp, 'r', encoding="utf8")
- root_node = ast.parse(filedata.read(), fp, 'exec')
- filedata.close()
+ for fp in files:
+ with open(fp, 'r', encoding="utf8") as filedata:
+ root_node = ast.parse(filedata.read(), fp, 'exec')
fp_rel = os.path.relpath(fp, SOURCE_DIR)
@@ -361,72 +443,90 @@ def dump_messages_pytext(messages, check_ctxt):
if kw.arg == arg_kw:
extract_strings(fp_rel, kw.value)
- # -------------------------------------------------------------------------
- # Dump Messages
- mod_dir = os.path.join(SOURCE_DIR,
- "release",
- "scripts",
- "startup",
- "bl_ui")
+def dump_py_messages(messages, check_ctxt, addons):
+ mod_dir = os.path.join(SOURCE_DIR, "release", "scripts", "startup", "bl_ui")
- files = [os.path.join(mod_dir, fn)
- for fn in sorted(os.listdir(mod_dir))
- if not fn.startswith("_")
- if fn.endswith("py")
- ]
+ files = [os.path.join(mod_dir, fn) for fn in sorted(os.listdir(mod_dir))
+ if not fn.startswith("_") if fn.endswith("py")]
# Dummy Cycles has its py addon in its own dir!
files += CUSTOM_PY_UI_FILES
- for fp in files:
- extract_strings_from_file(fp)
+ # Add all addons we support in main translation file!
+ for mod in addons:
+ fn = mod.__file__
+ if os.path.basename(fn) == "__init__.py":
+ mod_dir = os.path.dirname(fn)
+ files += [fn for fn in sorted(os.listdir(mod_dir))
+ if os.path.isfile(fn) and os.path.splitext(fn)[1] == ".py"]
+ else:
+ files.append(fn)
+
+ dump_py_messages_from_files(messages, check_ctxt, files)
+##### Main functions! #####
+
def dump_messages(do_messages, do_checks):
- import collections
- import re
+ messages = getattr(collections, 'OrderedDict', dict)()
- def enable_addons():
- """For now, enable all official addons, before extracting msgids."""
- import addon_utils
- import bpy
+ messages[(CONTEXT_DEFAULT, "")] = []
- userpref = bpy.context.user_preferences
- used_ext = {ext.module for ext in userpref.addons}
- support = {"OFFICIAL"}
- # collect the categories that can be filtered on
- addons = [(mod, addon_utils.module_bl_info(mod)) for mod in
- addon_utils.modules(addon_utils.addons_fake_modules)]
+ # Enable all wanted addons.
+ # For now, enable all official addons, before extracting msgids.
+ addons = enable_addons(support={"OFFICIAL"})
- for mod, info in addons:
- module_name = mod.__name__
- if module_name in used_ext or info["support"] not in support:
- continue
- print(" Enabling module ", module_name)
- bpy.ops.wm.addon_enable(module=module_name)
+ check_ctxt = None
+ if do_checks:
+ check_ctxt = {"multi_rnatip": set(),
+ "multi_lines": set(),
+ "py_in_rna": set(),
+ "not_capitalized": set(),
+ "end_point": set(),
+ "undoc_ops": set()}
+
+ # get strings from RNA
+ dump_messages_rna(messages, check_ctxt)
- # XXX There are currently some problems with bpy/rna...
- # *Very* tricky to solve!
- # So this is a hack to make all newly added operator visible by
- # bpy.types.OperatorProperties.__subclasses__()
- for cat in dir(bpy.ops):
- cat = getattr(bpy.ops, cat)
- for op in dir(cat):
- getattr(cat, op).get_rna()
+ # get strings from UI layout definitions text="..." args
+ dump_py_messages(messages, check_ctxt, addons)
+
+ del messages[(CONTEXT_DEFAULT, "")]
+
+ print_warnings(check_ctxt, messages)
+
+ if do_messages:
+ print("Writing messages…")
+ num_written = 0
+ num_filtered = 0
+ with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
+ for (ctx, key), value in messages.items():
+ # filter out junk values
+ if filter_message(key):
+ num_filtered += 1
+ continue
+
+ # Remove newlines in key and values!
+ message_file.write("\n".join(MSG_COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
+ message_file.write("\n")
+ if ctx:
+ message_file.write(MSG_CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
+ message_file.write(key.replace("\n", "") + "\n")
+ num_written += 1
+
+ print("Written {} messages to: {} ({} were filtered out)."
+ "".format(num_written, FILE_NAME_MESSAGES, num_filtered))
- # check for strings like ": %d"
- ignore_reg = re.compile(r"^(?:[-*.()/\\+:%xWXYZ0-9]|%d|%f|%s|%r|\s)*$")
- filter_message = ignore_reg.match
+def dump_addon_messages(module_name, messages_formats, do_checks):
messages = getattr(collections, 'OrderedDict', dict)()
messages[(CONTEXT_DEFAULT, "")] = []
-
- # Enable all wanted addons.
- enable_addons()
+ minus_messages = copy.deepcopy(messages)
check_ctxt = None
+ minus_check_ctxt = None
if do_checks:
check_ctxt = {"multi_rnatip": set(),
"multi_lines": set(),
@@ -434,39 +534,44 @@ def dump_messages(do_messages, do_checks):
"not_capitalized": set(),
"end_point": set(),
"undoc_ops": set()}
+ minus_check_ctxt = copy.deepcopy(check_ctxt)
- # get strings from RNA
+ # Get current addon state (loaded or not):
+ was_loaded = addon_utils.check(module_name)[1]
+
+ # Enable our addon and get strings from RNA.
+ enable_addons(addons={module_name})
dump_messages_rna(messages, check_ctxt)
+ # Now disable our addon, and rescan RNA.
+ enable_addons(addons={module_name}, disable=True)
+ dump_messages_rna(minus_messages, minus_check_ctxt)
+
+ # Restore previous state if needed!
+ if was_loaded:
+ enable_addons(addons={module_name})
+
+ # and make the diff!
+ for key in minus_messages:
+ if k == (CONTEXT_DEFAULT, ""):
+ continue
+ del messages[k]
+
+ if check_ctxt:
+ for key in check_ctxt:
+ for warning in minus_check_ctxt[key]:
+ check_ctxt[key].remove(warning)
+
+ # and we are done with those!
+ del minus_messages
+ del minus_check_ctxt
+
# get strings from UI layout definitions text="..." args
dump_messages_pytext(messages, check_ctxt)
del messages[(CONTEXT_DEFAULT, "")]
- if do_checks:
- print("WARNINGS:")
- keys = set()
- for c in check_ctxt.values():
- keys |= c
- # XXX Temp, see below
- keys -= check_ctxt["multi_rnatip"]
- for key in keys:
- if key in check_ctxt["undoc_ops"]:
- print("\tThe following operators are undocumented:")
- else:
- print("\t“{}”|“{}”:".format(*key))
- if key in check_ctxt["multi_lines"]:
- print("\t\t-> newline in this message!")
- if key in check_ctxt["not_capitalized"]:
- print("\t\t-> message not capitalized!")
- if key in check_ctxt["end_point"]:
- print("\t\t-> message with endpoint!")
- # XXX Hide this one for now, too much false positives.
-# if key in check_ctxt["multi_rnatip"]:
-# print("\t\t-> tip used in several RNA items")
- if key in check_ctxt["py_in_rna"]:
- print("\t\t-> RNA message also used in py UI code:")
- print("\t\t{}".format("\n\t\t".join(messages[key])))
+ print_warnings
if do_messages:
print("Writing messages…")
@@ -491,6 +596,7 @@ def dump_messages(do_messages, do_checks):
"".format(num_written, FILE_NAME_MESSAGES, num_filtered))
+
def main():
try:
import bpy
diff --git a/release/scripts/modules/bl_i18n_utils/check_po.py b/release/scripts/modules/bl_i18n_utils/check_po.py
index 2e82047bb95..96f91ee0914 100755
--- a/release/scripts/modules/bl_i18n_utils/check_po.py
+++ b/release/scripts/modules/bl_i18n_utils/check_po.py
@@ -38,45 +38,47 @@ BRANCHES_DIR = settings.BRANCHES_DIR
FILE_NAME_POT = settings.FILE_NAME_POT
-def print_diff(ref_messages, messages, states):
+def print_diff(ref_msgs, msgs):
# Remove comments from messages list!
- messages = set(messages.keys()) - states["comm_msg"]
- unneeded = (messages - ref_messages)
- for msgid in unneeded:
- print('\tUnneeded message id "{}"'.format(msgid))
+ messages = set(msgs.msgs.keys()) - msgs.comm_msgs
+ unneeded = (messages - ref_msgs.msgs.keys())
+ for msgkey in unneeded:
+ print('\tUnneeded message context/id "{}"'.format(msgkey))
- missing = (ref_messages - messages)
- for msgid in missing:
- print('\tMissing message id "{}"'.format(msgid))
+ missing = (ref_msgs.msgs.keys() - messages)
+ for msgkey in missing:
+ print('\tMissing message context/id "{}"'.format(msgkey))
- for msgid in states["comm_msg"]:
- print('\tCommented message id "{}"'.format(msgid))
+ for msgid in msgs.comm_msgs:
+ print('\tCommented message context/id "{}"'.format(msgkey))
- print("\t{} unneeded messages, {} missing messages, {} commented messages." \
- "".format(len(unneeded), len(missing), len(states["comm_msg"])))
+ print("\t{} unneeded messages, {} missing messages, {} commented messages."
+ "".format(len(unneeded), len(missing), len(msgs.comm_msgs)))
return 0
-def process_po(ref_messages, po, glob_stats, do_stats, do_messages):
+#def process_po(ref_messages, po, glob_stats, do_stats, do_messages):
+def process_po(ref_messages, po, do_stats, do_messages):
print("Checking {}...".format(po))
ret = 0
- messages, states, stats = utils.parse_messages(po)
+ messages = utils.I18nMessages(kind='PO', src=po)
if do_messages:
- t = print_diff(ref_messages, messages, states)
+ t = print_diff(ref_messages, messages)
if t:
ret = t
if do_stats:
print("\tStats:")
- t = utils.print_stats(stats, glob_stats, prefix=" ")
+ t = messages.print_stats(prefix=" ")
if t:
ret = t
- if states["is_broken"]:
+ if messages.parsing_errors:
print("\tERROR! This .po is broken!")
ret = 1
return ret
+# XXX Quick update for new I18Nfoo objects, need rework!
def main():
import argparse
parser = argparse.ArgumentParser(description="Check po’s in branches " \
@@ -97,22 +99,21 @@ def main():
if args.pot:
global FILE_NAME_POT
FILE_NAME_POT = args.pot
- glob_stats = {"nbr" : 0.0,
- "lvl" : 0.0,
- "lvl_ttips" : 0.0,
- "lvl_trans_ttips" : 0.0,
- "lvl_ttips_in_trans": 0.0,
- "lvl_comm" : 0.0,
- "nbr_signs" : 0,
- "nbr_trans_signs" : 0,
- "contexts" : set()}
+ #glob_stats = {"nbr" : 0.0,
+ #"lvl" : 0.0,
+ #"lvl_ttips" : 0.0,
+ #"lvl_trans_ttips" : 0.0,
+ #"lvl_ttips_in_trans": 0.0,
+ #"lvl_comm" : 0.0,
+ #"nbr_signs" : 0,
+ #"nbr_trans_signs" : 0,
+ #"contexts" : set()}
ret = 0
pot_messages = None
if args.messages:
- pot_messages, u1, pot_stats = utils.parse_messages(FILE_NAME_POT)
- pot_messages = set(pot_messages.keys())
- glob_stats["nbr_signs"] = pot_stats["nbr_signs"]
+ pot_messages = utils.I18nMessages(kind='PO', src=FILE_NAME_POT)
+ #glob_stats["nbr_signs"] = pot_stats["nbr_signs"]
if args.langs:
for lang in args.langs:
@@ -121,16 +122,16 @@ def main():
else:
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
- t = process_po(pot_messages, po, glob_stats,
- args.stats, args.messages)
+ #t = process_po(pot_messages, po, glob_stats, args.stats, args.messages)
+ t = process_po(pot_messages, po, args.stats, args.messages)
if t:
ret = t
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
po = os.path.join(TRUNK_PO_DIR, po)
- t = process_po(pot_messages, po, glob_stats,
- args.stats, args.messages)
+ #t = process_po(pot_messages, po, glob_stats, args.stats, args.messages)
+ t = process_po(pot_messages, po, args.stats, args.messages)
if t:
ret = t
else:
@@ -138,35 +139,35 @@ def main():
for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
if po.endswith(".po"):
po = os.path.join(BRANCHES_DIR, lang, po)
- t = process_po(pot_messages, po, glob_stats,
- args.stats, args.messages)
+ #t = process_po(pot_messages, po, glob_stats, args.stats, args.messages)
+ t = process_po(pot_messages, po, args.stats, args.messages)
if t:
ret = t
- if args.stats and glob_stats["nbr"] != 0.0:
- nbr_contexts = len(glob_stats["contexts"] - {""})
- if nbr_contexts != 1:
- if nbr_contexts == 0:
- nbr_contexts = "No"
- _ctx_txt = "s are"
- else:
- _ctx_txt = " is"
- print("\nAverage stats for all {:.0f} processed files:\n"
- " {:>6.1%} done!\n"
- " {:>6.1%} of messages are tooltips.\n"
- " {:>6.1%} of tooltips are translated.\n"
- " {:>6.1%} of translated messages are tooltips.\n"
- " {:>6.1%} of messages are commented.\n"
- " The org msgids are currently made of {} signs.\n"
- " All processed translations are currently made of {} signs.\n"
- " {} specific context{} present:\n {}\n"
- "".format(glob_stats["nbr"], glob_stats["lvl"] / glob_stats["nbr"],
- glob_stats["lvl_ttips"] / glob_stats["nbr"],
- glob_stats["lvl_trans_ttips"] / glob_stats["nbr"],
- glob_stats["lvl_ttips_in_trans"] / glob_stats["nbr"],
- glob_stats["lvl_comm"] / glob_stats["nbr"], glob_stats["nbr_signs"],
- glob_stats["nbr_trans_signs"], nbr_contexts, _ctx_txt,
- "\n ".join(glob_stats["contexts"]-{""})))
+ #if args.stats and glob_stats["nbr"] != 0.0:
+ #nbr_contexts = len(glob_stats["contexts"] - {""})
+ #if nbr_contexts != 1:
+ #if nbr_contexts == 0:
+ #nbr_contexts = "No"
+ #_ctx_txt = "s are"
+ #else:
+ #_ctx_txt = " is"
+ #print("\nAverage stats for all {:.0f} processed files:\n"
+ #" {:>6.1%} done!\n"
+ #" {:>6.1%} of messages are tooltips.\n"
+ #" {:>6.1%} of tooltips are translated.\n"
+ #" {:>6.1%} of translated messages are tooltips.\n"
+ #" {:>6.1%} of messages are commented.\n"
+ #" The org msgids are currently made of {} signs.\n"
+ #" All processed translations are currently made of {} signs.\n"
+ #" {} specific context{} present:\n {}\n"
+ #"".format(glob_stats["nbr"], glob_stats["lvl"] / glob_stats["nbr"],
+ #glob_stats["lvl_ttips"] / glob_stats["nbr"],
+ #glob_stats["lvl_trans_ttips"] / glob_stats["nbr"],
+ #glob_stats["lvl_ttips_in_trans"] / glob_stats["nbr"],
+ #glob_stats["lvl_comm"] / glob_stats["nbr"], glob_stats["nbr_signs"],
+ #glob_stats["nbr_trans_signs"], nbr_contexts, _ctx_txt,
+ #"\n ".join(glob_stats["contexts"]-{""})))
return ret
diff --git a/release/scripts/modules/bl_i18n_utils/clean_po.py b/release/scripts/modules/bl_i18n_utils/clean_po.py
index 2924ad9fb74..da8d25cb9f4 100755
--- a/release/scripts/modules/bl_i18n_utils/clean_po.py
+++ b/release/scripts/modules/bl_i18n_utils/clean_po.py
@@ -39,30 +39,27 @@ BRANCHES_DIR = settings.BRANCHES_DIR
def do_clean(po, strict):
print("Cleaning {}...".format(po))
- messages, states, u1 = utils.parse_messages(po)
+ msgs = utils.I18nMessages(kind='PO', src=po)
- if strict and states["is_broken"]:
+ if strict and msgs.parsing_errors:
print("ERROR! This .po file is broken!")
return 1
- for msgkey in states["comm_msg"]:
- del messages[msgkey]
- utils.write_messages(po, messages, states["comm_msg"], states["fuzzy_msg"])
- print("Removed {} commented messages.".format(len(states["comm_msg"])))
+ nbr_rem = len(msgs.comm_msgs)
+ for msgkey in msgs.comm_msgs:
+ del msgs.msgs[msgkey]
+ msgs.write(kind='PO', dest=po)
+ print("Removed {} commented messages.".format(nbr_rem))
return 0
def main():
import argparse
- parser = argparse.ArgumentParser(description="Clean po’s in branches " \
- "or trunk (i.e. remove " \
- "all commented messages).")
- parser.add_argument('-t', '--trunk', action="store_true",
- help="Clean po’s in trunk rather than branches.")
- parser.add_argument('-s', '--strict', action="store_true",
- help="Raise an error if a po is broken.")
- parser.add_argument('langs', metavar='ISO_code', nargs='*',
- help="Restrict processed languages to those.")
+ parser = argparse.ArgumentParser(description="Clean po’s in branches or trunk (i.e. remove all commented "
+ "messages).")
+ parser.add_argument('-t', '--trunk', action="store_true", help="Clean po’s in trunk rather than branches.")
+ parser.add_argument('-s', '--strict', action="store_true", help="Raise an error if a po is broken.")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*', help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
diff --git a/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py b/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py
index 533dded3c57..956d2e96154 100755
--- a/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py
+++ b/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py
@@ -26,7 +26,6 @@ import os
import shutil
import sys
import subprocess
-from codecs import open
try:
import settings
@@ -48,14 +47,10 @@ PY3 = settings.PYTHON3_EXEC
def main():
import argparse
- parser = argparse.ArgumentParser(description="Import advanced enough po’s " \
- "from branches to trunk.")
- parser.add_argument('-t', '--threshold', type=int,
- help="Import threshold, as a percentage.")
- parser.add_argument('-s', '--strict', action="store_true",
- help="Raise an error if a po is broken.")
- parser.add_argument('langs', metavar='ISO_code', nargs='*',
- help="Restrict processed languages to those.")
+ parser = argparse.ArgumentParser(description="Import advanced enough po’s from branches to trunk.")
+ parser.add_argument('-t', '--threshold', type=float, help="Import threshold, as a percentage.")
+ parser.add_argument('-s', '--strict', action="store_true", help="Raise an error if a po is broken.")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*', help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
@@ -70,51 +65,40 @@ def main():
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
po_is_rtl = os.path.join(BRANCHES_DIR, lang, RTL_PREPROCESS_FILE)
- msgs, state, stats = utils.parse_messages(po)
- tot_msgs = stats["tot_msg"]
- trans_msgs = stats["trans_msg"]
+ msgs = utils.I18nMessages(iso=lang, kind='PO', src=po)
lvl = 0.0
- if tot_msgs:
- lvl = float(trans_msgs) / float(tot_msgs)
+ if msgs.nbr_msgs:
+ lvl = msgs.nbr_trans_msgs / msgs.nbr_msgs
if lvl > threshold:
- if state["is_broken"] and args.strict:
- print("{:<10}: {:>6.1%} done, but BROKEN, skipped." \
- "".format(lang, lvl))
+ if msgs.parsing_errors and args.strict:
+ print("{:<10}: {:>6.1%} done, but BROKEN, skipped.".format(lang, lvl))
ret = 1
else:
if os.path.exists(po_is_rtl):
- out_po = os.path.join(TRUNK_PO_DIR,
- ".".join((lang, "po")))
- out_raw_po = os.path.join(TRUNK_PO_DIR,
- "_".join((lang, "raw.po")))
+ out_po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
+ out_raw_po = os.path.join(TRUNK_PO_DIR, "_".join((lang, "raw.po")))
keys = []
trans = []
- for k, m in msgs.items():
+ for k, m in msgs.msgs.items():
keys.append(k)
- trans.append("".join(m["msgstr_lines"]))
+ trans.append(m.msgstr)
trans = rtl_preprocess.log2vis(trans)
for k, t in zip(keys, trans):
- # Mono-line for now...
- msgs[k]["msgstr_lines"] = [t]
- utils.write_messages(out_po, msgs, state["comm_msg"],
- state["fuzzy_msg"])
+ msgs.msgs[k].msgstr = t
+ msgs.write(kind='PO', dest=out_po)
# Also copies org po!
shutil.copy(po, out_raw_po)
- print("{:<10}: {:>6.1%} done, enough translated " \
- "messages, processed and copied to trunk." \
+ print("{:<10}: {:>6.1%} done, enough translated messages, processed and copied to trunk."
"".format(lang, lvl))
else:
shutil.copy(po, TRUNK_PO_DIR)
- print("{:<10}: {:>6.1%} done, enough translated " \
- "messages, copied to trunk.".format(lang, lvl))
+ print("{:<10}: {:>6.1%} done, enough translated messages, copied to trunk.".format(lang, lvl))
else:
- if state["is_broken"] and args.strict:
- print("{:<10}: {:>6.1%} done, BROKEN and not enough " \
- "translated messages, skipped".format(lang, lvl))
+ if msgs.parsing_errors and args.strict:
+ print("{:<10}: {:>6.1%} done, BROKEN and not enough translated messages, skipped".format(lang, lvl))
ret = 1
else:
- print("{:<10}: {:>6.1%} done, not enough translated " \
- "messages, skipped.".format(lang, lvl))
+ print("{:<10}: {:>6.1%} done, not enough translated messages, skipped.".format(lang, lvl))
return ret
diff --git a/release/scripts/modules/bl_i18n_utils/merge_po.py b/release/scripts/modules/bl_i18n_utils/merge_po.py
index baf67de2e81..610be0f15bd 100755
--- a/release/scripts/modules/bl_i18n_utils/merge_po.py
+++ b/release/scripts/modules/bl_i18n_utils/merge_po.py
@@ -37,26 +37,22 @@ except:
from . import (settings, utils)
+# XXX This is a quick hack to make it work with new I18n... objects! To be reworked!
def main():
import argparse
- parser = argparse.ArgumentParser(description="" \
- "Merge one or more .po files into the first dest one.\n" \
- "If a msgkey (msgctxt, msgid) is present in more than " \
- "one merged po, the one in the first file wins, unless " \
- "it’s marked as fuzzy and one later is not.\n" \
- "The fuzzy flag is removed if necessary.\n" \
- "All other comments are never modified.\n" \
- "Commented messages in dst will always remain " \
- "commented, and commented messages are never merged " \
+ parser = argparse.ArgumentParser(description=""
+ "Merge one or more .po files into the first dest one.\n"
+ "If a msgkey (msgctxt, msgid) is present in more than one merged po, the one in the first file "
+ "wins, unless it’s marked as fuzzy and one later is not.\n"
+ "The fuzzy flag is removed if necessary.\n"
+ "All other comments are never modified.\n"
+ "Commented messages in dst will always remain commented, and commented messages are never merged "
"from sources.")
- parser.add_argument('-s', '--stats', action="store_true",
- help="Show statistics info.")
+ parser.add_argument('-s', '--stats', action="store_true", help="Show statistics info.")
parser.add_argument('-r', '--replace', action="store_true",
help="Replace existing messages of same \"level\" already in dest po.")
- parser.add_argument('dst', metavar='dst.po',
- help="The dest po into which merge the others.")
- parser.add_argument('src', metavar='src.po', nargs='+',
- help="The po's to merge into the dst.po one.")
+ parser.add_argument('dst', metavar='dst.po', help="The dest po into which merge the others.")
+ parser.add_argument('src', metavar='src.po', nargs='+', help="The po's to merge into the dst.po one.")
args = parser.parse_args()
ret = 0
@@ -67,89 +63,78 @@ def main():
nbr_added = 0
nbr_unfuzzied = 0
- dst_messages, dst_states, dst_stats = utils.parse_messages(args.dst)
- if dst_states["is_broken"]:
+ dst_msgs = utils.I18nMessages(kind='PO', src=args.dst)
+ if dst_msgs.parsing_errors:
print("Dest po is BROKEN, aborting.")
return 1
if args.stats:
print("Dest po, before merging:")
- utils.print_stats(dst_stats, prefix="\t")
- # If we don’t want to replace existing valid translations, pre-populate
- # done_msgkeys and done_fuzzy_msgkeys.
+ dst_msgs.print_stats(prefix="\t")
+ # If we don’t want to replace existing valid translations, pre-populate done_msgkeys and done_fuzzy_msgkeys.
if not args.replace:
- done_msgkeys = dst_states["trans_msg"].copy()
- done_fuzzy_msgkeys = dst_states["fuzzy_msg"].copy()
+ done_msgkeys = dst_msgs.trans_msgs.copy()
+ done_fuzzy_msgkeys = dst_msgs.fuzzy_msgs.copy()
for po in args.src:
- messages, states, stats = utils.parse_messages(po)
- if states["is_broken"]:
+ msgs = utils.I18nMessages(kind='PO', src=po)
+ if msgs.parsing_errors:
print("\tSrc po {} is BROKEN, skipping.".format(po))
ret = 1
continue
print("\tMerging {}...".format(po))
if args.stats:
print("\t\tMerged po stats:")
- utils.print_stats(stats, prefix="\t\t\t")
- for msgkey, val in messages.items():
+ msgs.print_stats(prefix="\t\t\t")
+ for msgkey, msg in msgs.msgs.items():
msgctxt, msgid = msgkey
# This msgkey has already been completely merged, or is a commented one,
# or the new message is commented, skip it.
- if msgkey in (done_msgkeys | dst_states["comm_msg"] | states["comm_msg"]):
+ if msgkey in (done_msgkeys | dst_msgs.comm_msgs | msgs.comm_msgs):
continue
- is_ttip = utils.is_tooltip(msgid)
+ is_ttip = msg.is_tooltip
# New messages does not yet exists in dest.
- if msgkey not in dst_messages:
- dst_messages[msgkey] = messages[msgkey]
- if msgkey in states["fuzzy_msg"]:
+ if msgkey not in dst_msgs.msgs:
+ dst_msgs[msgkey] = msgs.msgs[msgkey]
+ if msgkey in msgs.fuzzy_msgs:
done_fuzzy_msgkeys.add(msgkey)
- dst_states["fuzzy_msg"].add(msgkey)
- elif msgkey in states["trans_msg"]:
+ dst_msgs.fuzzy_msgs.add(msgkey)
+ elif msgkey in msgs.trans_msgs:
done_msgkeys.add(msgkey)
- dst_states["trans_msg"].add(msgkey)
- dst_stats["trans_msg"] += 1
- if is_ttip:
- dst_stats["trans_ttips"] += 1
+ dst_msgs.trans_msgs.add(msgkey)
nbr_added += 1
- dst_stats["tot_msg"] += 1
- if is_ttip:
- dst_stats["tot_ttips"] += 1
# From now on, the new messages is already in dst.
# New message is neither translated nor fuzzy, skip it.
- elif msgkey not in (states["trans_msg"] | states["fuzzy_msg"]):
+ elif msgkey not in (msgs.trans_msgs | msgs.fuzzy_msgs):
continue
# From now on, the new message is either translated or fuzzy!
# The new message is translated.
- elif msgkey in states["trans_msg"]:
- dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
+ elif msgkey in msgs.trans_msgs:
+ dst_msgs.msgs[msgkey].msgstr = msg.msgstr
done_msgkeys.add(msgkey)
done_fuzzy_msgkeys.discard(msgkey)
- if msgkey in dst_states["fuzzy_msg"]:
- dst_states["fuzzy_msg"].remove(msgkey)
+ if msgkey in dst_msgs.fuzzy_msgs:
+ dst_msgs.fuzzy_msgs.remove(msgkey)
nbr_unfuzzied += 1
- if msgkey not in dst_states["trans_msg"]:
- dst_states["trans_msg"].add(msgkey)
- dst_stats["trans_msg"] += 1
- if is_ttip:
- dst_stats["trans_ttips"] += 1
+ if msgkey not in dst_msgs.trans_msgs:
+ dst_msgs.trans_msgs.add(msgkey)
else:
nbr_replaced += 1
nbr_merged += 1
- # The new message is fuzzy, org one is fuzzy too,
- # and this msgkey has not yet been merged.
- elif msgkey not in (dst_states["trans_msg"] | done_fuzzy_msgkeys):
- dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
+ # The new message is fuzzy, org one is fuzzy too, and this msgkey has not yet been merged.
+ elif msgkey not in (dst_msgs.trans_msgs | done_fuzzy_msgkeys):
+ dst_msgs[msgkey].msgstr = msg.msgstr
done_fuzzy_msgkeys.add(msgkey)
- dst_states["fuzzy_msg"].add(msgkey)
+ dst_msgs.fuzzy_msgs.add(msgkey)
nbr_merged += 1
nbr_replaced += 1
- utils.write_messages(args.dst, dst_messages, dst_states["comm_msg"], dst_states["fuzzy_msg"])
+ dst_msgs.write(kind='PO', dest=args.dst)
- print("Merged completed. {} messages were merged (among which {} were replaced), " \
- "{} were added, {} were \"un-fuzzied\"." \
- "".format(nbr_merged, nbr_replaced, nbr_added, nbr_unfuzzied))
+ print("Merged completed. {} messages were merged (among which {} were replaced), {} were added, "
+ "{} were \"un-fuzzied\".".format(nbr_merged, nbr_replaced, nbr_added, nbr_unfuzzied))
if args.stats:
+ dst_msgs.update_info()
print("Final merged po stats:")
- utils.print_stats(dst_stats, prefix="\t")
+ dst_msgs.print_stats(prefix="\t")
return ret
diff --git a/release/scripts/modules/bl_i18n_utils/settings.py b/release/scripts/modules/bl_i18n_utils/settings.py
index 0f09e8238ab..eb5fd4cd0fa 100644
--- a/release/scripts/modules/bl_i18n_utils/settings.py
+++ b/release/scripts/modules/bl_i18n_utils/settings.py
@@ -96,13 +96,62 @@ IMPORT_MIN_LEVEL = -1
IMPORT_LANGUAGES_SKIP = {'am', 'bg', 'fi', 'el', 'et', 'ko', 'ne', 'pl', 'ro', 'uz', 'uz@cyrillic'}
# The comment prefix used in generated messages.txt file.
-COMMENT_PREFIX = "#~ "
+MSG_COMMENT_PREFIX = "#~ "
+
+# The comment prefix used in generated messages.txt file.
+MSG_CONTEXT_PREFIX = "MSGCTXT:"
+
+# The default comment prefix used in po's.
+PO_COMMENT_PREFIX= "# "
# The comment prefix used to mark sources of msgids, in po's.
-COMMENT_PREFIX_SOURCE = "#: "
+PO_COMMENT_PREFIX_SOURCE = "#: "
-# The comment prefix used in generated messages.txt file.
-CONTEXT_PREFIX = "MSGCTXT:"
+# The comment prefix used to mark sources of msgids, in po's.
+PO_COMMENT_PREFIX_SOURCE_CUSTOM = "#. :src: "
+
+# The comment prefix used to comment entries in po's.
+PO_COMMENT_PREFIX_MSG= "#~ "
+
+# The comment prefix used to mark fuzzy msgids, in po's.
+PO_COMMENT_FUZZY = "#, fuzzy"
+
+# The prefix used to define context, in po's.
+PO_MSGCTXT = "msgctxt "
+
+# The prefix used to define msgid, in po's.
+PO_MSGID = "msgid "
+
+# The prefix used to define msgstr, in po's.
+PO_MSGSTR = "msgstr "
+
+# The 'header' key of po files.
+PO_HEADER_KEY = ("", "")
+
+PO_HEADER_MSGSTR = (
+ "Project-Id-Version: Blender {blender_ver} (r{blender_rev})\\n\n"
+ "Report-Msgid-Bugs-To: \\n\n"
+ "POT-Creation-Date: {time}\\n\n"
+ "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n\n"
+ "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n\n"
+ "Language-Team: LANGUAGE <LL@li.org>\\n\n"
+ "Language: {iso}\\n\n"
+ "MIME-Version: 1.0\\n\n"
+ "Content-Type: text/plain; charset=UTF-8\\n\n"
+ "Content-Transfer-Encoding: 8bit\n"
+)
+PO_HEADER_COMMENT_COPYRIGHT = (
+ "# Blender's translation file (po format).\n"
+ "# Copyright (C) {year} The Blender Foundation.\n"
+ "# This file is distributed under the same license as the Blender package.\n"
+ "#\n"
+)
+PO_HEADER_COMMENT = (
+ "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.\n"
+ "#"
+)
+
+TEMPLATE_ISO_ID = "__TEMPLATE__"
# Default context.
CONTEXT_DEFAULT = ""
@@ -121,8 +170,7 @@ PYGETTEXT_ALLOWED_EXTS = {".c", ".cpp", ".cxx", ".hpp", ".hxx", ".h"}
PYGETTEXT_MAX_MULTI_CTXT = 16
# Where to search contexts definitions, relative to SOURCE_DIR (defined below).
-PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont",
- "BLF_translation.h")
+PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont", "BLF_translation.h")
# Regex to extract contexts defined in BLF_translation.h
# XXX Not full-proof, but should be enough here!
@@ -146,11 +194,21 @@ _str_base = (
"(?P={_}2)" # And closing quote.
)
str_clean_re = _str_base.format(_="g", capt="P<clean>")
+_inbetween_str_re = (
+ # XXX Strings may have comments between their pieces too, not only spaces!
+ r"(?:\s*(?:"
+ # A C comment
+ r"/\*.*(?!\*/).\*/|"
+ # Or a C++ one!
+ r"//[^\n]*\n"
+ # And we are done!
+ r")?)*"
+)
# Here we have to consider two different cases (empty string and other).
_str_whole_re = (
_str_base.format(_="{_}1_", capt=":") +
# Optional loop start, this handles "split" strings...
- "(?:(?<=[\"'])\\s*(?=[\"'])(?:"
+ "(?:(?<=[\"'])" + _inbetween_str_re + "(?=[\"'])(?:"
+ _str_base.format(_="{_}2_", capt=":") +
# End of loop.
"))*"
@@ -231,6 +289,8 @@ WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
}
WARN_MSGID_NOT_CAPITALIZED_ALLOWED |= set(lng[2] for lng in LANGUAGES)
+PARSER_CACHE_HASH = 'sha1'
+
###############################################################################
# PATHS
diff --git a/release/scripts/modules/bl_i18n_utils/spell_check_utils.py b/release/scripts/modules/bl_i18n_utils/spell_check_utils.py
index 3fd039c7436..1dcba6a62d1 100644
--- a/release/scripts/modules/bl_i18n_utils/spell_check_utils.py
+++ b/release/scripts/modules/bl_i18n_utils/spell_check_utils.py
@@ -37,7 +37,7 @@ dict_uimsgs = {
"aren", # aren't
"betweens", # yuck! in-betweens!
"boolean", "booleans",
- "couldn", #couldn't
+ "couldn", # couldn't
"decrement",
"derivate",
"doesn", # doesn't
@@ -488,6 +488,7 @@ dict_uimsgs = {
# Files types/formats
"avi",
"attrac",
+ "autocad",
"autodesk",
"bmp",
"btx",
diff --git a/release/scripts/modules/bl_i18n_utils/update_branches.py b/release/scripts/modules/bl_i18n_utils/update_branches.py
index 4c38a2f71fb..1a22a9835dd 100755
--- a/release/scripts/modules/bl_i18n_utils/update_branches.py
+++ b/release/scripts/modules/bl_i18n_utils/update_branches.py
@@ -43,42 +43,31 @@ FILE_NAME_POT = settings.FILE_NAME_POT
def main():
import argparse
- parser = argparse.ArgumentParser(description="" \
- "Update all branches:\n" \
- "* Generate a temp messages.txt file.\n" \
- "* Use it to generate a temp .pot file.\n" \
- "* Use it to update all .po’s in /branches.")
- parser.add_argument('--pproc-contexts', action="store_true",
- help="Pre-process po’s to avoid having plenty of "
- "fuzzy msgids just because a context was "
- "added/changed!")
- parser.add_argument('-c', '--no_checks', default=True,
- action="store_false",
- help="No checks over UI messages.")
+ parser = argparse.ArgumentParser(description="Update all branches:\n"
+ "* Generate a temp messages.txt file.\n"
+ "* Use it to generate a blender.pot file.\n"
+ "* Use it to update all .po’s in /branches.")
+ #parser.add_argument('--pproc-contexts', action="store_true",
+ #help="Pre-process po’s to avoid having plenty of fuzzy msgids just because a context was "
+ #"added/changed!")
+ parser.add_argument('-c', '--no_checks', default=True, action="store_false", help="No checks over UI messages.")
parser.add_argument('-a', '--add', action="store_true",
- help="Add missing po’s (useful only when one or " \
- "more languages are given!).")
- parser.add_argument('langs', metavar='ISO_code', nargs='*',
- help="Restrict processed languages to those.")
+ help="Add missing po’s (useful only when one or more languages are given!).")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*', help="Restrict processed languages to those.")
args = parser.parse_args()
ret = 0
# Generate a temp messages file.
- dummy, msgfile = tempfile.mkstemp(suffix=".txt",
- prefix="blender_messages_")
+ dummy, msgfile = tempfile.mkstemp(suffix=".txt", prefix="blender_messages_")
os.close(dummy)
cmd = (PY3, "./update_msg.py", "-o", msgfile)
t = subprocess.call(cmd)
if t:
ret = t
- # Generate a temp pot file.
- # Back to having a pot file in trunk/po. It's quite useful for translators that want to start
+ # Generate blender.pot file in trunk/po. It's quite useful for translators that want to start
# a new translation and not not want to bother generating their own po from scratch!
-# dummy, potfile = tempfile.mkstemp(suffix=".pot",
-# prefix="blender_pot_")
-# os.close(dummy)
potfile = FILE_NAME_POT
cmd = [PY3, "./update_pot.py", "-i", msgfile, "-o", potfile]
if not args.no_checks:
@@ -93,8 +82,8 @@ def main():
if args.add:
cmd.append("-a")
cmd += args.langs
- if args.pproc_contexts:
- cmd.append("--pproc-contexts")
+ #if args.pproc_contexts:
+ #cmd.append("--pproc-contexts")
t = subprocess.call(cmd)
if t:
ret = t
diff --git a/release/scripts/modules/bl_i18n_utils/update_languages_menu.py b/release/scripts/modules/bl_i18n_utils/update_languages_menu.py
index 9b4cb20fadf..6263f1c1e64 100755
--- a/release/scripts/modules/bl_i18n_utils/update_languages_menu.py
+++ b/release/scripts/modules/bl_i18n_utils/update_languages_menu.py
@@ -73,11 +73,11 @@ def find_matching_po(languages, stats, forbidden):
def main():
import argparse
- parser = argparse.ArgumentParser(description=""
- "Update 'languages' text file used by Blender at runtime to build translations menu.")
+ parser = argparse.ArgumentParser(description="Update 'languages' text file used by Blender at runtime to build "
+ "translations menu.")
parser.add_argument('-m', '--min_translation', type=int, default=-100,
- help="Minimum level of translation, as a percentage "
- "(translations below this are commented out in menu).")
+ help="Minimum level of translation, as a percentage (translations below this are commented out "
+ "in menu).")
parser.add_argument('langs', metavar='ISO_code', nargs='*',
help="Unconditionally exclude those languages from the menu.")
args = parser.parse_args()
@@ -92,8 +92,8 @@ def main():
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po") and not po.endswith("_raw.po"):
lang = os.path.basename(po)[:-3]
- u1, u2, _stats = utils.parse_messages(os.path.join(TRUNK_PO_DIR, po))
- stats[lang] = _stats["trans_msg"] / _stats["tot_msg"]
+ msgs = utils.I18nMessages(kind='PO', src=os.path.join(TRUNK_PO_DIR, po))
+ stats[lang] = msgs.nbr_trans_msgs / msgs.nbr_msgs
# Generate languages file used by Blender's i18n system.
# First, match all entries in LANGUAGES to a lang in stats, if possible!
diff --git a/release/scripts/modules/bl_i18n_utils/update_mo.py b/release/scripts/modules/bl_i18n_utils/update_mo.py
index 7f68736593c..ab6fedc87a7 100755
--- a/release/scripts/modules/bl_i18n_utils/update_mo.py
+++ b/release/scripts/modules/bl_i18n_utils/update_mo.py
@@ -51,11 +51,11 @@ def process_po(po, lang, mo=None):
# show stats
cmd = (GETTEXT_MSGFMT_EXECUTABLE,
- "--statistics",
- po,
- "-o",
- mo or os.path.join(mo_dir, ".".join((DOMAIN, "mo"))),
- )
+ "--statistics",
+ po,
+ "-o",
+ mo or os.path.join(mo_dir, ".".join((DOMAIN, "mo"))),
+ )
print("Running ", " ".join(cmd))
ret = subprocess.call(cmd)
diff --git a/release/scripts/modules/bl_i18n_utils/update_po.py b/release/scripts/modules/bl_i18n_utils/update_po.py
index 4c6495c5cfe..6e03226f6d3 100755
--- a/release/scripts/modules/bl_i18n_utils/update_po.py
+++ b/release/scripts/modules/bl_i18n_utils/update_po.py
@@ -22,7 +22,7 @@
# Update po’s in the branches from blender.pot in /trunk/po dir.
-import subprocess
+import concurrent.futures
import os
import sys
from codecs import open
@@ -41,73 +41,25 @@ TRUNK_PO_DIR = settings.TRUNK_PO_DIR
FILE_NAME_POT = settings.FILE_NAME_POT
-def pproc_newcontext_po(po, pot_messages, pot_stats):
- print("Adding new contexts to {}...".format(po))
- messages, state, stats = utils.parse_messages(po)
- known_ctxt = stats["contexts"]
- print("Already known (present) context(s): {}".format(str(known_ctxt)))
-
- new_ctxt = set()
- added = 0
- # Only use valid already translated messages!
- allowed_keys = state["trans_msg"] - state["fuzzy_msg"] - state["comm_msg"]
- for key in pot_messages.keys():
- ctxt, msgid = key
- if ctxt in known_ctxt:
- continue
- new_ctxt.add(ctxt)
- for t_ctxt in known_ctxt:
- # XXX The first match will win, this might not be optimal...
- t_key = (t_ctxt, msgid)
- if t_key in allowed_keys:
- # Wrong comments (sources) will be removed by msgmerge...
- messages[key] = messages[t_key]
- messages[key]["msgctxt_lines"] = [ctxt]
- added += 1
-
- utils.write_messages(po, messages, state["comm_msg"], state["fuzzy_msg"])
- print("Finished!\n {} new context(s) was/were added {}, adding {} new "
- "messages.\n".format(len(new_ctxt), str(new_ctxt), added))
- return 0
-
-
-def process_po(po, lang):
+def process_po(data):
+ po, lang, pot_msgs = data
# update po file
- cmd = (GETTEXT_MSGMERGE_EXECUTABLE,
- "--update",
- "-w", "1", # XXX Ugly hack to prevent msgmerge merging
- # short source comments together!
- "--no-wrap",
- "--backup=none",
- "--lang={}".format(lang),
- po,
- FILE_NAME_POT,
- )
-
+ msg = utils.I18nMessages(iso=lang, kind='PO', src=po)
print("Updating {}...".format(po))
- print("Running ", " ".join(cmd))
- ret = subprocess.call(cmd)
- print("Finished!\n")
- return ret
+ msg.update(pot_msgs)
+ msg.write(kind='PO', dest=po)
+ print("Finished updating {}!\n".format(po))
+ return 0
def main():
import argparse
- parser = argparse.ArgumentParser(description="Write out messages.txt "
- "from Blender.")
- parser.add_argument('-t', '--trunk', action="store_true",
- help="Update po’s in /trunk/po rather than /branches.")
- parser.add_argument('-i', '--input', metavar="File",
- help="Input pot file path.")
- parser.add_argument('--pproc-contexts', action="store_true",
- help="Pre-process po’s to avoid having plenty of "
- "fuzzy msgids just because a context was "
- "added/changed!")
+ parser = argparse.ArgumentParser(description="Write out messages.txt from Blender.")
+ parser.add_argument('-t', '--trunk', action="store_true", help="Update po’s in /trunk/po rather than /branches.")
+ parser.add_argument('-i', '--input', metavar="File", help="Input pot file path.")
parser.add_argument('-a', '--add', action="store_true",
- help="Add missing po’s (useful only when one or "
- "more languages are given!).")
- parser.add_argument('langs', metavar='ISO_code', nargs='*',
- help="Restrict processed languages to those.")
+ help="Add missing po’s (useful only when one or more languages are given!).")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*', help="Restrict processed languages to those.")
args = parser.parse_args()
if args.input:
@@ -115,12 +67,8 @@ def main():
FILE_NAME_POT = args.input
ret = 0
- if args.pproc_contexts:
- _ctxt_proc = pproc_newcontext_po
- pot_messages, _a, pot_stats = utils.parse_messages(FILE_NAME_POT)
- else:
- _ctxt_proc = lambda a, b, c: 0
- pot_messages, pot_stats = None, None
+ pot_msgs = utils.I18nMessages(kind='PO', src=FILE_NAME_POT)
+ pool_data = []
if args.langs:
for lang in args.langs:
@@ -136,37 +84,27 @@ def main():
if not os.path.exists(po):
shutil.copy(FILE_NAME_POT, po)
if args.add or os.path.exists(po):
- t = _ctxt_proc(po, pot_messages, pot_stats)
- if t:
- ret = t
- t = process_po(po, lang)
- if t:
- ret = t
+ pool_data.append((po, lang, pot_msgs))
elif args.trunk:
for po in os.listdir(TRUNK_PO_DIR):
if po.endswith(".po"):
lang = os.path.basename(po)[:-3]
po = os.path.join(TRUNK_PO_DIR, po)
- t = _ctxt_proc(po, pot_messages, pot_stats)
- if t:
- ret = t
- t = process_po(po, lang)
- if t:
- ret = t
+ pool_data.append((po, lang, pot_msgs))
else:
for lang in os.listdir(BRANCHES_DIR):
po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
if os.path.exists(po):
- t = _ctxt_proc(po, pot_messages, pot_stats)
- if t:
- ret = t
- t = process_po(po, lang)
- if t:
- ret = t
+ pool_data.append((po, lang, pot_msgs))
+
+ with concurrent.futures.ProcessPoolExecutor() as executor:
+ for r in executor.map(process_po, pool_data, timeout=600):
+ if r != 0:
+ ret = r
return ret
if __name__ == "__main__":
print("\n\n *** Running {} *** \n".format(__file__))
- sys.exit(main())
+ sys.exit(main()) \ No newline at end of file
diff --git a/release/scripts/modules/bl_i18n_utils/update_pot.py b/release/scripts/modules/bl_i18n_utils/update_pot.py
index ecb5d837a09..e05e60937a8 100755
--- a/release/scripts/modules/bl_i18n_utils/update_pot.py
+++ b/release/scripts/modules/bl_i18n_utils/update_pot.py
@@ -23,6 +23,7 @@
# Update blender.pot file from messages.txt
import subprocess
+import collections
import os
import sys
import re
@@ -41,9 +42,11 @@ except:
LANGUAGES_CATEGORIES = settings.LANGUAGES_CATEGORIES
LANGUAGES = settings.LANGUAGES
-COMMENT_PREFIX = settings.COMMENT_PREFIX
-COMMENT_PREFIX_SOURCE = settings.COMMENT_PREFIX_SOURCE
-CONTEXT_PREFIX = settings.CONTEXT_PREFIX
+PO_COMMENT_PREFIX = settings.PO_COMMENT_PREFIX
+PO_COMMENT_PREFIX_SOURCE = settings.PO_COMMENT_PREFIX_SOURCE
+PO_COMMENT_PREFIX_SOURCE_CUSTOM = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM
+MSG_COMMENT_PREFIX = settings.MSG_COMMENT_PREFIX
+MSG_CONTEXT_PREFIX = settings.MSG_CONTEXT_PREFIX
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
FILE_NAME_POT = settings.FILE_NAME_POT
SOURCE_DIR = settings.SOURCE_DIR
@@ -79,6 +82,10 @@ _clean_str = re.compile(settings.str_clean_re).finditer
clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
+def _new_messages():
+ return getattr(collections, "OrderedDict", dict)()
+
+
def check_file(path, rel_path, messages):
def process_entry(ctxt, msg):
# Context.
@@ -207,20 +214,22 @@ def get_svnrev():
def gen_empty_pot():
+ blender_ver = ""
blender_rev = get_svnrev().decode()
utctime = time.gmtime()
time_str = time.strftime("%Y-%m-%d %H:%M+0000", utctime)
year_str = time.strftime("%Y", utctime)
- return utils.gen_empty_messages(blender_rev, time_str, year_str)
+ return utils.I18nMessages.gen_empty_messages("__POT__", blender_ver, blender_rev, time_str, year_str)
escape_re = tuple(re.compile(r[0]) for r in settings.ESCAPE_RE)
escape = lambda s, n: escape_re[n].sub(settings.ESCAPE_RE[n][1], s)
-def merge_messages(msgs, states, messages, do_checks, spell_cache):
- num_added = num_present = 0
+def merge_messages(msgs, messages, do_checks, spell_cache):
+ num_added = 0
+ num_present = msgs.nbr_msgs
for (context, msgid), srcs in messages.items():
if do_checks:
err = spell_check(msgid, spell_cache)
@@ -233,19 +242,15 @@ def merge_messages(msgs, states, messages, do_checks, spell_cache):
for n in range(len(escape_re)):
msgid = escape(msgid, n)
- srcs = [COMMENT_PREFIX_SOURCE + s for s in srcs]
-
key = (context, msgid)
- if key not in msgs:
- msgs[key] = {"msgid_lines": [msgid],
- "msgstr_lines": [""],
- "comment_lines": srcs,
- "msgctxt_lines": [context]}
+ if key not in msgs.msgs:
+ msg = utils.I18nMessage([context], [msgid], [""], [])
+ msg.sources = srcs
+ msgs.msgs[key] = msg
num_added += 1
else:
- # We need to merge comments!
- msgs[key]["comment_lines"].extend(srcs)
- num_present += 1
+ # We need to merge sources!
+ msgs.msgs[key].sources += srcs
return num_added, num_present
@@ -270,7 +275,7 @@ def main():
print("Running fake py gettext…")
# Not using any more xgettext, simpler to do it ourself!
- messages = utils.new_messages()
+ messages = _new_messages()
py_xgettext(messages)
print("Finished, found {} messages.".format(len(messages)))
@@ -281,55 +286,49 @@ def main():
spell_cache = set()
print("Generating POT file {}…".format(FILE_NAME_POT))
- msgs, states = gen_empty_pot()
- tot_messages, _a = merge_messages(msgs, states, messages,
- True, spell_cache)
+ msgs = gen_empty_pot()
+ tot_messages, _a = merge_messages(msgs, messages, True, spell_cache)
# add messages collected automatically from RNA
print("\tMerging RNA messages from {}…".format(FILE_NAME_MESSAGES))
- messages = utils.new_messages()
+ messages.clear()
with open(FILE_NAME_MESSAGES, encoding="utf-8") as f:
srcs = []
context = ""
for line in f:
line = utils.stripeol(line)
- if line.startswith(COMMENT_PREFIX):
- srcs.append(line[len(COMMENT_PREFIX):].strip())
- elif line.startswith(CONTEXT_PREFIX):
- context = line[len(CONTEXT_PREFIX):].strip()
+ if line.startswith(MSG_COMMENT_PREFIX):
+ srcs.append(line[len(MSG_COMMENT_PREFIX):].strip())
+ elif line.startswith(MSG_CONTEXT_PREFIX):
+ context = line[len(MSG_CONTEXT_PREFIX):].strip()
else:
key = (context, line)
messages[key] = srcs
srcs = []
context = ""
- num_added, num_present = merge_messages(msgs, states, messages,
- True, spell_cache)
+ num_added, num_present = merge_messages(msgs, messages, True, spell_cache)
tot_messages += num_added
- print("\tMerged {} messages ({} were already present)."
- "".format(num_added, num_present))
+ print("\tMerged {} messages ({} were already present).".format(num_added, num_present))
print("\tAdding languages labels...")
- messages = {(CONTEXT_DEFAULT, lng[1]):
- ("Languages’ labels from bl_i18n_utils/settings.py",)
- for lng in LANGUAGES}
- messages.update({(CONTEXT_DEFAULT, cat[1]):
- ("Language categories’ labels from bl_i18n_utils/settings.py",)
- for cat in LANGUAGES_CATEGORIES})
- num_added, num_present = merge_messages(msgs, states, messages,
- True, spell_cache)
+ messages.clear()
+ messages.update(((CONTEXT_DEFAULT, lng[1]), ("Languages’ labels from bl_i18n_utils/settings.py",))
+ for lng in LANGUAGES)
+ messages.update(((CONTEXT_DEFAULT, cat[1]), ("Language categories’ labels from bl_i18n_utils/settings.py",))
+ for cat in LANGUAGES_CATEGORIES)
+ num_added, num_present = merge_messages(msgs, messages, True, spell_cache)
tot_messages += num_added
print("\tAdded {} language messages.".format(num_added))
# Write back all messages into blender.pot.
- utils.write_messages(FILE_NAME_POT, msgs, states["comm_msg"],
- states["fuzzy_msg"])
+ msgs.write('PO', FILE_NAME_POT)
if SPELL_CACHE and spell_cache:
with open(SPELL_CACHE, 'wb') as f:
pickle.dump(spell_cache, f)
- print("Finished, total: {} messages!".format(tot_messages - 1))
+ print("Finished, total: {} messages!".format(tot_messages))
return 0
diff --git a/release/scripts/modules/bl_i18n_utils/update_trunk.py b/release/scripts/modules/bl_i18n_utils/update_trunk.py
index b84a227ae0a..d7d1a9741cb 100755
--- a/release/scripts/modules/bl_i18n_utils/update_trunk.py
+++ b/release/scripts/modules/bl_i18n_utils/update_trunk.py
@@ -25,8 +25,7 @@
# * Copy po’s from branches advanced enough.
# * Clean po’s in trunk.
# * Compile po’s in trunk in mo’s, keeping track of those failing.
-# * Remove po’s, mo’s (and their dir’s) that failed to compile or
-# are no more present in trunk.
+# * Remove po’s, mo’s (and their dir’s) that failed to compile or are no more present in trunk.
import subprocess
import os
@@ -50,25 +49,6 @@ LANGUAGES_FILE = settings.LANGUAGES_FILE
PY3 = settings.PYTHON3_EXEC
-def find_matching_po(languages, stats):
- """Match languages defined in LANGUAGES setting to relevant po, if possible!"""
- ret = []
- for uid, label, org_key in languages:
- key = org_key
- if key not in stats:
- # Try to simplify the key (eg from es_ES to es).
- if '_' in org_key:
- key = org_key[0:org_key.index('_')]
- if '@' in org_key:
- key = key + org_key[org_key.index('@'):]
- if key in stats:
- ret.append((stats[key], uid, label, org_key))
- else:
- # Mark invalid entries, so that we can put them in the languages file,
- # but commented!
- ret.append((0.0, -uid, label, org_key))
- return ret
-
def main():
import argparse
parser = argparse.ArgumentParser(description=""
@@ -99,7 +79,8 @@ def main():
os.remove(po)
# Copy po’s from branches.
- cmd = [PY3, "./import_po_from_branches.py", "-s"]
+ #cmd = [PY3, "./import_po_from_branches.py", "-s"]
+ cmd = [PY3, "./import_po_from_branches.py"]
if args.threshold is not None:
cmd += ["-t", str(args.threshold)]
if args.langs:
@@ -124,7 +105,8 @@ def main():
if args.langs and lang not in args.langs:
continue
- cmd = [PY3, "./clean_po.py", "-t", "-s", lang]
+ #cmd = [PY3, "./clean_po.py", "-t", "-s", lang]
+ cmd = [PY3, "./clean_po.py", "-t", lang]
t = subprocess.call(cmd)
if t:
ret = t
diff --git a/release/scripts/modules/bl_i18n_utils/utils.py b/release/scripts/modules/bl_i18n_utils/utils.py
index 9481f750092..1c2e62b4c4e 100644
--- a/release/scripts/modules/bl_i18n_utils/utils.py
+++ b/release/scripts/modules/bl_i18n_utils/utils.py
@@ -20,357 +20,661 @@
# Some misc utilities...
+import collections
+import copy
import os
+import re
import sys
-import collections
from bl_i18n_utils import settings
-COMMENT_PREFIX = settings.COMMENT_PREFIX
+PO_COMMENT_PREFIX = settings.PO_COMMENT_PREFIX
+PO_COMMENT_PREFIX_MSG = settings.PO_COMMENT_PREFIX_MSG
+PO_COMMENT_PREFIX_SOURCE = settings.PO_COMMENT_PREFIX_SOURCE
+PO_COMMENT_PREFIX_SOURCE_CUSTOM = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM
+PO_COMMENT_FUZZY = settings.PO_COMMENT_FUZZY
+PO_MSGCTXT = settings.PO_MSGCTXT
+PO_MSGID = settings.PO_MSGID
+PO_MSGSTR = settings.PO_MSGSTR
+
+PO_HEADER_KEY = settings.PO_HEADER_KEY
+PO_HEADER_COMMENT = settings.PO_HEADER_COMMENT
+PO_HEADER_COMMENT_COPYRIGHT = settings.PO_HEADER_COMMENT_COPYRIGHT
+PO_HEADER_MSGSTR = settings.PO_HEADER_MSGSTR
+
+PARSER_CACHE_HASH = settings.PARSER_CACHE_HASH
+
WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
+PARSER_CACHE_HASH = settings.PARSER_CACHE_HASH
+##### Misc Utils #####
+
def stripeol(s):
return s.rstrip("\n\r")
-# XXX For now, we assume that all messages > 30 chars are tooltips!
-def is_tooltip(msgid):
- return len(msgid) > 30
+_valid_po_path_re = re.compile(r"^\S+:[0-9]+$")
+def is_valid_po_path(path):
+ return bool(_valid_po_path_re.match(path))
+
+
+class I18nMessage:
+ """
+ Internal representation of a message.
+ """
+ __slots__ = ("msgctxt_lines", "msgid_lines", "msgstr_lines", "comment_lines", "is_fuzzy", "is_commented")
+
+ def __init__(self, msgctxt_lines=[], msgid_lines=[], msgstr_lines=[], comment_lines=[],
+ is_commented=False, is_fuzzy=False):
+ self.msgctxt_lines = msgctxt_lines
+ self.msgid_lines = msgid_lines
+ self.msgstr_lines = msgstr_lines
+ self.comment_lines = comment_lines
+ self.is_fuzzy = is_fuzzy
+ self.is_commented = is_commented
+
+ def _get_msgctxt(self):
+ return ("".join(self.msgctxt_lines)).replace("\\n", "\n")
+ def _set_msgctxt(self, ctxt):
+ self.msgctxt_lines = [ctxt]
+ msgctxt = property(_get_msgctxt, _set_msgctxt)
+
+ def _get_msgid(self):
+ return ("".join(self.msgid_lines)).replace("\\n", "\n")
+ def _set_msgid(self, msgid):
+ self.msgid_lines = [msgid]
+ msgid = property(_get_msgid, _set_msgid)
+
+ def _get_msgstr(self):
+ return ("".join(self.msgstr_lines)).replace("\\n", "\n")
+ def _set_msgstr(self, msgstr):
+ self.msgstr_lines = [msgstr]
+ msgstr = property(_get_msgstr, _set_msgstr)
+
+ def _get_sources(self):
+ lstrip1 = len(PO_COMMENT_PREFIX_SOURCE)
+ lstrip2 = len(PO_COMMENT_PREFIX_SOURCE_CUSTOM)
+ return ([l[lstrip1:] for l in self.comment_lines if l.startswith(PO_COMMENT_PREFIX_SOURCE)] +
+ [l[lstrip2:] for l in self.comment_lines if l.startswith(PO_COMMENT_PREFIX_SOURCE_CUSTOM)])
+ def _set_sources(self, sources):
+ # list.copy() is not available in py3.2 ...
+ cmmlines = []
+ cmmlines[:] = self.comment_lines
+ for l in cmmlines:
+ if l.startswith(PO_COMMENT_PREFIX_SOURCE) or l.startswith(PO_COMMENT_PREFIX_SOURCE_CUSTOM):
+ self.comment_lines.remove(l)
+ lines_src = []
+ lines_src_custom = []
+ for src in sources:
+ if is_valid_po_path(src):
+ lines_src.append(PO_COMMENT_PREFIX_SOURCE + src)
+ else:
+ lines_src_custom.append(PO_COMMENT_PREFIX_SOURCE_CUSTOM + src)
+ self.comment_lines += lines_src_custom + lines_src
+ sources = property(_get_sources, _set_sources)
+
+ def _get_is_tooltip(self):
+ # XXX For now, we assume that all messages > 30 chars are tooltips!
+ return len(self.msgid) > 30
+ is_tooltip = property(_get_is_tooltip)
+
+ def normalize(self, max_len=80):
+ """
+ Normalize this message, call this before exporting it...
+ Currently normalize msgctxt, msgid and msgstr lines to given max_len (if below 1, make them single line).
+ """
+ max_len -= 2 # The two quotes!
+ # We do not need the full power of textwrap... We just split first at escaped new lines, then into each line
+ # if needed... No word splitting, nor fancy spaces handling!
+ def _wrap(text, max_len, init_len):
+ if len(text) + init_len < max_len:
+ return [text]
+ lines = text.splitlines()
+ ret = []
+ for l in lines:
+ tmp = []
+ cur_len = 0
+ words = l.split(' ')
+ for w in words:
+ cur_len += len(w) + 1
+ if cur_len > (max_len - 1) and tmp:
+ ret.append(" ".join(tmp) + " ")
+ del tmp[:]
+ cur_len = len(w) + 1
+ tmp.append(w)
+ if tmp:
+ ret.append(" ".join(tmp))
+ return ret
+ if max_len < 1:
+ self.msgctxt_lines = self.msgctxt.replace("\n", "\\n\n").splitlines()
+ self.msgid_lines = self.msgid.replace("\n", "\\n\n").splitlines()
+ self.msgstr_lines = self.msgstr.replace("\n", "\\n\n").splitlines()
+ else:
+ init_len = len(PO_MSGCTXT) + 1
+ if self.is_commented:
+ init_len += len(PO_COMMENT_PREFIX_MSG)
+ self.msgctxt_lines = _wrap(self.msgctxt.replace("\n", "\\n\n"), max_len, init_len)
+ init_len = len(PO_MSGID) + 1
+ if self.is_commented:
+ init_len += len(PO_COMMENT_PREFIX_MSG)
+ self.msgid_lines = _wrap(self.msgid.replace("\n", "\\n\n"), max_len, init_len)
-def new_messages():
- return getattr(collections, 'OrderedDict', dict)()
+ init_len = len(PO_MSGSTR) + 1
+ if self.is_commented:
+ init_len += len(PO_COMMENT_PREFIX_MSG)
+ self.msgstr_lines = _wrap(self.msgstr.replace("\n", "\\n\n"), max_len, init_len)
-def parse_messages(fname):
+class I18nMessages:
"""
- Returns a tupple (messages, states, stats).
- messages is an odereddict of dicts
- {(ctxt, msgid): {msgid_lines:, msgstr_lines:,
- comment_lines:, msgctxt_lines:}}.
- states is a dict of three sets of (msgid, ctxt), and a boolean flag
- indicating the .po is somewhat broken
- {trans_msg:, fuzzy_msg:, comm_msg:, is_broken:}.
- stats is a dict of values
- {tot_msg:, trans_msg:, tot_ttips:, trans_ttips:, comm_msg:,
- nbr_signs:, nbr_trans_signs:, contexts: set()}.
- Note: This function will silently "arrange" mis-formated entries, thus
- using afterward write_messages() should always produce a po-valid file,
- though not correct!
+ Internal representation of messages for one language (iso code), with additional stats info.
"""
- tot_messages = 0
- tot_tooltips = 0
- trans_messages = 0
- trans_tooltips = 0
- comm_messages = 0
- nbr_signs = 0
- nbr_trans_signs = 0
- contexts = set()
- reading_msgid = False
- reading_msgstr = False
- reading_msgctxt = False
- reading_comment = False
- is_translated = False
- is_fuzzy = False
- is_commented = False
- is_broken = False
- msgid_lines = []
- msgstr_lines = []
- msgctxt_lines = []
- comment_lines = []
-
- messages = new_messages()
- translated_messages = set()
- fuzzy_messages = set()
- commented_messages = set()
-
- def clean_vars():
- nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
- reading_comment, is_fuzzy, is_translated, is_commented, \
- msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
- reading_msgid = reading_msgstr = reading_msgctxt = \
- reading_comment = False
- is_tooltip = is_fuzzy = is_translated = is_commented = False
+
+ # Avoid parsing again!
+ # Keys should be (pseudo) file-names, values are tuples (hash, I18nMessages)
+ # Note: only used by po parser currently!
+ _parser_cache = {}
+
+ def __init__(self, iso="__POT__", kind=None, key=None, src=None):
+ self.iso = iso
+ self.msgs = self._new_messages()
+ self.trans_msgs = set()
+ self.fuzzy_msgs = set()
+ self.comm_msgs = set()
+ self.ttip_msgs = set()
+ self.contexts = set()
+ self.nbr_msgs = 0
+ self.nbr_trans_msgs = 0
+ self.nbr_ttips = 0
+ self.nbr_trans_ttips = 0
+ self.nbr_comm_msgs = 0
+ self.nbr_signs = 0
+ self.nbr_trans_signs = 0
+ self.parsing_errors = []
+ if kind and src:
+ self.parse(kind, key, src)
+ self.update_info()
+
+ @staticmethod
+ def _new_messages():
+ return getattr(collections, 'OrderedDict', dict)()
+
+ @classmethod
+ def gen_empty_messages(cls, iso, blender_ver, blender_rev, time, year, default_copyright=True):
+ """Generate an empty I18nMessages object (only header is present!)."""
+ msgstr = PO_HEADER_MSGSTR.format(blender_ver=str(blender_ver), blender_rev=int(blender_rev),
+ time=str(time), iso=str(iso))
+ comment = ""
+ if default_copyright:
+ comment = PO_HEADER_COMMENT_COPYRIGHT.format(year=str(year))
+ comment = comment + PO_HEADER_COMMENT
+
+ msgs = cls(iso=iso)
+ msgs.msgs[PO_HEADER_KEY] = I18nMessage([], [""], [msgstr], [comment], False, True)
+ msgs.update_info()
+
+ return msgs
+
+ def normalize(self, max_len=80):
+ for msg in self.msgs.values():
+ msg.normalize(max_len)
+
+ def merge(self, replace=False, *args):
+ pass
+
+ def update(self, ref, use_similar=0.75, keep_old_commented=True):
+ """
+ Update this I18nMessage with the ref one. Translations from ref are never used. Source comments from ref
+ completely replace current ones. If use_similar is not 0.0, it will try to match new messages in ref with an
+ existing one. Messages no more found in ref will be marked as commented if keep_old_commented is True,
+ or removed.
+ """
+ import difflib
+ similar_pool = {}
+ if use_similar > 0.0:
+ for key in self.msgs:
+ similar_pool.setdefault(key[1], set()).add(key)
+
+ msgs = self._new_messages()
+ for (key, msg) in ref.msgs.items():
+ if key in self.msgs:
+ msgs[key] = self.msgs[key]
+ msgs[key].sources = msg.sources
+ else:
+ skey = None
+ if use_similar > 0.0:
+ # try to find some close key in existing messages...
+ tmp = difflib.get_close_matches(key[1], similar_pool, n=1, cutoff=use_similar)
+ if tmp:
+ tmp = tmp[0]
+ # Try to get the same context, else just get one...
+ skey = (key[0], tmp)
+ if skey not in similar_pool[tmp]:
+ skey = tuple(similar_pool[tmp])[0]
+ msgs[key] = msg
+ if skey:
+ msgs[key].msgstr = self.msgs[skey].msgstr
+ msgs[key].is_fuzzy = True
+ # Add back all "old" and already commented messages as commented ones, if required.
+ if keep_old_commented:
+ for key, msg in self.msgs.items():
+ if key not in msgs:
+ msgs[key] = msg
+ msgs[key].is_commented = True
+ # And finalize the update!
+ self.msgs = msgs
+
+ def update_info(self):
+ self.trans_msgs.clear()
+ self.fuzzy_msgs.clear()
+ self.comm_msgs.clear()
+ self.ttip_msgs.clear()
+ self.contexts.clear()
+ self.nbr_signs = 0
+ self.nbr_trans_signs = 0
+ for key, msg in self.msgs.items():
+ if key == PO_HEADER_KEY:
+ continue
+ if msg.is_commented:
+ self.comm_msgs.add(key)
+ else:
+ if msg.msgstr:
+ self.trans_msgs.add(key)
+ if msg.is_fuzzy:
+ self.fuzzy_msgs.add(key)
+ if msg.is_tooltip:
+ self.ttip_msgs.add(key)
+ self.contexts.add(key[0])
+ self.nbr_signs += len(msg.msgid)
+ self.nbr_trans_signs += len(msg.msgstr)
+ self.nbr_msgs = len(self.msgs)
+ self.nbr_trans_msgs = len(self.trans_msgs)
+ self.nbr_ttips = len(self.ttip_msgs)
+ self.nbr_trans_ttips = len(self.ttip_msgs & self.trans_msgs)
+ self.nbr_comm_msgs = len(self.comm_msgs)
+
+ def print_stats(self, prefix=""):
+ """
+ Print out some stats about an I18nMessages object.
+ """
+ lvl = 0.0
+ lvl_ttips = 0.0
+ lvl_comm = 0.0
+ lvl_trans_ttips = 0.0
+ lvl_ttips_in_trans = 0.0
+ if self.nbr_msgs > 0:
+ lvl = float(self.nbr_trans_msgs) / float(self.nbr_msgs)
+ lvl_ttips = float(self.nbr_ttips) / float(self.nbr_msgs)
+ lvl_comm = float(self.nbr_comm_msgs) / float(self.nbr_msgs + self.nbr_comm_msgs)
+ if self.nbr_ttips > 0:
+ lvl_trans_ttips = float(self.nbr_trans_ttips) / float(self.nbr_ttips)
+ if self.nbr_trans_msgs > 0:
+ lvl_ttips_in_trans = float(self.nbr_trans_ttips) / float(self.nbr_trans_msgs)
+
+ lines = ("",
+ "{:>6.1%} done! ({} translated messages over {}).\n"
+ "".format(lvl, self.nbr_trans_msgs, self.nbr_msgs),
+ "{:>6.1%} of messages are tooltips ({} over {}).\n"
+ "".format(lvl_ttips, self.nbr_ttips, self.nbr_msgs),
+ "{:>6.1%} of tooltips are translated ({} over {}).\n"
+ "".format(lvl_trans_ttips, self.nbr_trans_ttips, self.nbr_ttips),
+ "{:>6.1%} of translated messages are tooltips ({} over {}).\n"
+ "".format(lvl_ttips_in_trans, self.nbr_trans_ttips, self.nbr_trans_msgs),
+ "{:>6.1%} of messages are commented ({} over {}).\n"
+ "".format(lvl_comm, self.nbr_comm_msgs, self.nbr_comm_msgs + self.nbr_msgs),
+ "This translation is currently made of {} signs.\n".format(self.nbr_trans_signs))
+ print(prefix.join(lines))
+
+ def parse(self, kind, key, src):
+ del self.parsing_errors[:]
+ self.parsers[kind](self, src, key)
+ if self.parsing_errors:
+ print("WARNING! Errors while parsing {}:".format(key))
+ for line, error in self.parsing_errors:
+ print(" Around line {}: {}".format(line, error))
+ print("The parser solved them as well as it could...")
+ self.update_info()
+
+ def parse_messages_from_po(self, src, key=None):
+ """
+ Parse a po file.
+ Note: This function will silently "arrange" mis-formated entries, thus using afterward write_messages() should
+ always produce a po-valid file, though not correct!
+ """
+ reading_msgid = False
+ reading_msgstr = False
+ reading_msgctxt = False
+ reading_comment = False
+ is_commented = False
+ is_fuzzy = False
+ msgctxt_lines = []
msgid_lines = []
msgstr_lines = []
- msgctxt_lines = []
comment_lines = []
- def finalize_message():
- nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
- reading_comment, is_fuzzy, is_translated, is_commented, \
- msgid_lines, msgstr_lines, msgctxt_lines, comment_lines, \
- messages, translated_messages, fuzzy_messages, \
- commented_messages, \
- tot_messages, tot_tooltips, trans_messages, trans_tooltips, \
- comm_messages, nbr_signs, nbr_trans_signs, contexts
-
- msgid = "".join(msgid_lines)
- msgctxt = "".join(msgctxt_lines)
- msgkey = (msgctxt, msgid)
- is_ttip = is_tooltip(msgid)
-
- # Never allow overriding existing msgid/msgctxt pairs!
- if msgkey in messages:
- clean_vars()
- return
-
- nbr_signs += len(msgid)
- if is_commented:
- commented_messages.add(msgkey)
- elif is_fuzzy:
- fuzzy_messages.add(msgkey)
- elif is_translated:
- translated_messages.add(msgkey)
- nbr_trans_signs += len("".join(msgstr_lines))
- messages[msgkey] = {"msgid_lines" : msgid_lines,
- "msgstr_lines" : msgstr_lines,
- "comment_lines": comment_lines,
- "msgctxt_lines": msgctxt_lines}
-
- if is_commented:
- comm_messages += 1
- else:
- tot_messages += 1
- if is_ttip:
- tot_tooltips += 1
- if not is_fuzzy and is_translated:
- trans_messages += 1
- if is_ttip:
- trans_tooltips += 1
- if msgctxt not in contexts:
- contexts.add(msgctxt)
-
- clean_vars()
-
- with open(fname, 'r', encoding="utf-8") as f:
- for line_nr, line in enumerate(f):
- line = stripeol(line)
+ # Helper function
+ def finalize_message(self, line_nr):
+ nonlocal reading_msgid, reading_msgstr, reading_msgctxt, reading_comment
+ nonlocal is_commented, is_fuzzy, msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
+
+ msgid = "".join(msgid_lines)
+ msgctxt = "".join(msgctxt_lines)
+ msgkey = (msgctxt, msgid)
+
+ # Never allow overriding existing msgid/msgctxt pairs!
+ if msgkey in self.msgs:
+ self.parsing_errors.append((line_nr, "{} context/msgid is already in current messages!".format(msgkey)))
+ return
+
+ self.msgs[msgkey] = I18nMessage(msgctxt_lines, msgid_lines, msgstr_lines, comment_lines,
+ is_commented, is_fuzzy)
+
+ # Let's clean up and get ready for next message!
+ reading_msgid = reading_msgstr = reading_msgctxt = reading_comment = False
+ is_commented = is_fuzzy = False
+ msgctxt_lines = []
+ msgid_lines = []
+ msgstr_lines = []
+ comment_lines = []
+
+ # try to use src as file name...
+ if os.path.exists(src):
+ if not key:
+ key = src
+ with open(src, 'r', encoding="utf-8") as f:
+ src = f.read()
+
+ # Try to use values from cache!
+ curr_hash = None
+ if key and key in self._parser_cache:
+ old_hash, msgs = self._parser_cache[key]
+ import hashlib
+ curr_hash = hashlib.new(PARSER_CACHE_HASH, src.encode()).digest()
+ if curr_hash == old_hash:
+ self.msgs = copy.deepcopy(msgs) # we might edit self.msgs!
+ return
+
+ _comm_msgctxt = PO_COMMENT_PREFIX_MSG + PO_MSGCTXT
+ _len_msgctxt = len(PO_MSGCTXT + '"')
+ _len_comm_msgctxt = len(_comm_msgctxt + '"')
+ _comm_msgid = PO_COMMENT_PREFIX_MSG + PO_MSGID
+ _len_msgid = len(PO_MSGID + '"')
+ _len_comm_msgid = len(_comm_msgid + '"')
+ _comm_msgstr = PO_COMMENT_PREFIX_MSG + PO_MSGSTR
+ _len_msgstr = len(PO_MSGSTR + '"')
+ _len_comm_msgstr = len(_comm_msgstr + '"')
+ _len_comm_str = len(PO_COMMENT_PREFIX_MSG + '"')
+
+ # Main loop over all lines in src...
+ for line_nr, line in enumerate(src.splitlines()):
if line == "":
- finalize_message()
+ if reading_msgstr:
+ finalize_message(self, line_nr)
+ continue
- elif line.startswith("msgctxt") or \
- line.startswith("".join((COMMENT_PREFIX, "msgctxt"))):
+ elif line.startswith(PO_MSGCTXT) or line.startswith(_comm_msgctxt):
reading_comment = False
reading_ctxt = True
- if line.startswith(COMMENT_PREFIX):
+ if line.startswith(PO_COMMENT_PREFIX_MSG):
is_commented = True
- line = line[9 + len(COMMENT_PREFIX):-1]
+ line = line[_len_comm_msgctxt:-1]
else:
- line = line[9:-1]
+ line = line[_len_msgctxt:-1]
msgctxt_lines.append(line)
- elif line.startswith("msgid") or \
- line.startswith("".join((COMMENT_PREFIX, "msgid"))):
+ elif line.startswith(PO_MSGID) or line.startswith(_comm_msgid):
reading_comment = False
reading_msgid = True
- if line.startswith(COMMENT_PREFIX):
+ if line.startswith(PO_COMMENT_PREFIX_MSG):
+ if not is_commented and reading_ctxt:
+ self.parsing_errors.append((line_nr, "commented msgid following regular msgctxt"))
is_commented = True
- line = line[7 + len(COMMENT_PREFIX):-1]
+ line = line[_len_comm_msgid:-1]
else:
- line = line[7:-1]
+ line = line[_len_msgid:-1]
+ reading_ctxt = False
msgid_lines.append(line)
- elif line.startswith("msgstr") or \
- line.startswith("".join((COMMENT_PREFIX, "msgstr"))):
+ elif line.startswith(PO_MSGSTR) or line.startswith(_comm_msgstr):
if not reading_msgid:
- is_broken = True
+ self.parsing_errors.append((line_nr, "msgstr without a prior msgid"))
else:
reading_msgid = False
reading_msgstr = True
- if line.startswith(COMMENT_PREFIX):
- line = line[8 + len(COMMENT_PREFIX):-1]
+ if line.startswith(PO_COMMENT_PREFIX_MSG):
+ line = line[_len_comm_msgstr:-1]
if not is_commented:
- is_broken = True
+ self.parsing_errors.append((line_nr, "commented msgstr following regular msgid"))
else:
- line = line[8:-1]
+ line = line[_len_msgstr:-1]
if is_commented:
- is_broken = True
+ self.parsing_errors.append((line_nr, "regular msgstr following commented msgid"))
msgstr_lines.append(line)
- if line:
- is_translated = True
- elif line.startswith("#"):
- if reading_msgid:
- if is_commented:
- msgid_lines.append(line[1 + len(COMMENT_PREFIX):-1])
- else:
- msgid_lines.append(line)
- is_broken = True
- elif reading_msgstr:
- if is_commented:
- msgstr_lines.append(line[1 + len(COMMENT_PREFIX):-1])
- else:
- msgstr_lines.append(line)
- is_broken = True
+ elif line.startswith(PO_COMMENT_PREFIX[0]):
+ if line.startswith(PO_COMMENT_PREFIX_MSG):
+ if reading_msgctxt:
+ if is_commented:
+ msgctxt_lines.append(line[_len_comm_str:-1])
+ else:
+ msgctxt_lines.append(line)
+ self.parsing_errors.append((line_nr, "commented string while reading regular msgctxt"))
+ elif reading_msgid:
+ if is_commented:
+ msgid_lines.append(line[_len_comm_str:-1])
+ else:
+ msgid_lines.append(line)
+ self.parsing_errors.append((line_nr, "commented string while reading regular msgid"))
+ elif reading_msgstr:
+ if is_commented:
+ msgstr_lines.append(line[_len_comm_str:-1])
+ else:
+ msgstr_lines.append(line)
+ self.parsing_errors.append((line_nr, "commented string while reading regular msgstr"))
else:
- if line.startswith("#, fuzzy"):
+ if reading_msgctxt or reading_msgid or reading_msgstr:
+ self.parsing_errors.append((line_nr,
+ "commented string within msgctxt, msgid or msgstr scope, ignored"))
+ elif line.startswith(PO_COMMENT_FUZZY):
is_fuzzy = True
else:
comment_lines.append(line)
reading_comment = True
else:
- if reading_msgid:
+ if reading_msgctxt:
+ msgctxt_lines.append(line[1:-1])
+ elif reading_msgid:
msgid_lines.append(line[1:-1])
elif reading_msgstr:
line = line[1:-1]
msgstr_lines.append(line)
- if not is_translated and line:
- is_translated = True
else:
- is_broken = True
+ self.parsing_errors.append((line_nr, "regular string outside msgctxt, msgid or msgstr scope"))
+ #self.parsing_errors += (str(comment_lines), str(msgctxt_lines), str(msgid_lines), str(msgstr_lines))
# If no final empty line, last message is not finalized!
if reading_msgstr:
- finalize_message()
-
- return (messages,
- {"trans_msg": translated_messages,
- "fuzzy_msg": fuzzy_messages,
- "comm_msg" : commented_messages,
- "is_broken": is_broken},
- {"tot_msg" : tot_messages,
- "trans_msg" : trans_messages,
- "tot_ttips" : tot_tooltips,
- "trans_ttips" : trans_tooltips,
- "comm_msg" : comm_messages,
- "nbr_signs" : nbr_signs,
- "nbr_trans_signs": nbr_trans_signs,
- "contexts" : contexts})
-
-
-def write_messages(fname, messages, commented, fuzzy):
- "Write in fname file the content of messages (similar to parse_messages " \
- "returned values). commented and fuzzy are two sets containing msgid. " \
- "Returns the number of written messages."
- num = 0
- with open(fname, 'w', encoding="utf-8") as f:
- for msgkey, val in messages.items():
- msgctxt, msgid = msgkey
- f.write("\n".join(val["comment_lines"]))
- # Only mark as fuzzy if msgstr is not empty!
- if msgkey in fuzzy and "".join(val["msgstr_lines"]):
- f.write("\n#, fuzzy")
- if msgkey in commented:
- if msgctxt:
- f.write("\n{}msgctxt \"".format(COMMENT_PREFIX))
- f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
- val["msgctxt_lines"]))
- f.write("\"")
- f.write("\n{}msgid \"".format(COMMENT_PREFIX))
- f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
- val["msgid_lines"]))
- f.write("\"\n{}msgstr \"".format(COMMENT_PREFIX))
- f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
- val["msgstr_lines"]))
- f.write("\"\n\n")
- else:
- if msgctxt:
- f.write("\nmsgctxt \"")
- f.write("\"\n\"".join(val["msgctxt_lines"]))
- f.write("\"")
- f.write("\nmsgid \"")
- f.write("\"\n\"".join(val["msgid_lines"]))
- f.write("\"\nmsgstr \"")
- f.write("\"\n\"".join(val["msgstr_lines"]))
- f.write("\"\n\n")
- num += 1
- return num
-
-
-def gen_empty_messages(blender_rev, time_str, year_str):
- """Generate an empty messages & state data (only header if present!)."""
- header_key = ("", "")
-
- messages = new_messages()
- messages[header_key] = {
- "msgid_lines": [""],
- "msgctxt_lines": [],
- "msgstr_lines": [
- "Project-Id-Version: Blender r{}\\n"
- "".format(blender_rev),
- "Report-Msgid-Bugs-To: \\n",
- "POT-Creation-Date: {}\\n"
- "".format(time_str),
- "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n",
- "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n",
- "Language-Team: LANGUAGE <LL@li.org>\\n",
- "Language: \\n",
- "MIME-Version: 1.0\\n",
- "Content-Type: text/plain; charset=UTF-8\\n",
- "Content-Transfer-Encoding: 8bit\\n"
- ],
- "comment_lines": [
- "# Blender's translation file (po format).",
- "# Copyright (C) {} The Blender Foundation."
- "".format(year_str),
- "# This file is distributed under the same "
- "# license as the Blender package.",
- "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",
- "#",
- ],
- }
+ finalize_message(self, line_nr)
+
+ if key:
+ if not curr_hash:
+ import hashlib
+ curr_hash = hashlib.new(PARSER_CACHE_HASH, src.encode()).digest()
+ self._parser_cache[key] = (curr_hash, self.msgs)
+
+ def write(self, kind, dest):
+ self.writers[kind](self, dest)
+
+ def write_messages_to_po(self, fname):
+ """
+ Write messages in fname po file.
+ """
+ self.normalize(max_len=0) # No wrapping for now...
+ with open(fname, 'w', encoding="utf-8") as f:
+ for msg in self.msgs.values():
+ f.write("\n".join(msg.comment_lines))
+ # Only mark as fuzzy if msgstr is not empty!
+ if msg.is_fuzzy and msg.msgstr:
+ f.write("\n" + PO_COMMENT_FUZZY)
+ _p = PO_COMMENT_PREFIX_MSG if msg.is_commented else ""
+ _pmsgctxt = _p + PO_MSGCTXT
+ _pmsgid = _p + PO_MSGID
+ _pmsgstr = _p + PO_MSGSTR
+ chunks = []
+ if msg.msgctxt:
+ if len(msg.msgctxt_lines) > 1:
+ chunks += [
+ "\n" + _pmsgctxt + "\"\"\n" + _p + "\"",
+ ("\"\n" + _p + "\"").join(msg.msgctxt_lines),
+ "\"",
+ ]
+ else:
+ chunks += ["\n" + _pmsgctxt + "\"" + msg.msgctxt + "\""]
+ if len(msg.msgid_lines) > 1:
+ chunks += [
+ "\n" + _pmsgid + "\"\"\n" + _p + "\"",
+ ("\"\n" + _p + "\"").join(msg.msgid_lines),
+ "\"",
+ ]
+ else:
+ chunks += ["\n" + _pmsgid + "\"" + msg.msgid + "\""]
+ if len(msg.msgstr_lines) > 1:
+ chunks += [
+ "\n" + _pmsgstr + "\"\"\n" + _p + "\"",
+ ("\"\n" + _p + "\"").join(msg.msgstr_lines),
+ "\"",
+ ]
+ else:
+ chunks += ["\n" + _pmsgstr + "\"" + msg.msgstr + "\""]
+ chunks += ["\n\n"]
+ f.write("".join(chunks))
- states = {"trans_msg": set(),
- "fuzzy_msg": {header_key},
- "comm_msg": set(),
- "is_broken": False}
+ parsers = {
+ "PO": parse_messages_from_po,
+# "PYTUPLE": parse_messages_from_pytuple,
+ }
- return messages, states
+ writers = {
+ "PO": write_messages_to_po,
+ #"PYDICT": write_messages_to_pydict,
+ }
-def print_stats(stats, glob_stats=None, prefix=""):
+class I18n:
"""
- Print out some stats about a po file.
- glob_stats is for making global stats over several po's.
+ Internal representation of a whole translation set.
"""
- tot_msgs = stats["tot_msg"]
- trans_msgs = stats["trans_msg"]
- tot_ttips = stats["tot_ttips"]
- trans_ttips = stats["trans_ttips"]
- comm_msgs = stats["comm_msg"]
- nbr_signs = stats["nbr_signs"]
- nbr_trans_signs = stats["nbr_trans_signs"]
- contexts = stats["contexts"]
- lvl = lvl_ttips = lvl_trans_ttips = lvl_ttips_in_trans = lvl_comm = 0.0
-
- if tot_msgs > 0:
- lvl = float(trans_msgs) / float(tot_msgs)
- lvl_ttips = float(tot_ttips) / float(tot_msgs)
- lvl_comm = float(comm_msgs) / float(tot_msgs+comm_msgs)
- if tot_ttips > 0:
- lvl_trans_ttips = float(trans_ttips) / float(tot_ttips)
- if trans_msgs > 0:
- lvl_ttips_in_trans = float(trans_ttips) / float(trans_msgs)
-
- if glob_stats:
- glob_stats["nbr"] += 1.0
- glob_stats["lvl"] += lvl
- glob_stats["lvl_ttips"] += lvl_ttips
- glob_stats["lvl_trans_ttips"] += lvl_trans_ttips
- glob_stats["lvl_ttips_in_trans"] += lvl_ttips_in_trans
- glob_stats["lvl_comm"] += lvl_comm
- glob_stats["nbr_trans_signs"] += nbr_trans_signs
- if glob_stats["nbr_signs"] == 0:
- glob_stats["nbr_signs"] = nbr_signs
- glob_stats["contexts"] |= contexts
-
- lines = ("",
- "{:>6.1%} done! ({} translated messages over {}).\n"
- "".format(lvl, trans_msgs, tot_msgs),
- "{:>6.1%} of messages are tooltips ({} over {}).\n"
- "".format(lvl_ttips, tot_ttips, tot_msgs),
- "{:>6.1%} of tooltips are translated ({} over {}).\n"
- "".format(lvl_trans_ttips, trans_ttips, tot_ttips),
- "{:>6.1%} of translated messages are tooltips ({} over {}).\n"
- "".format(lvl_ttips_in_trans, trans_ttips, trans_msgs),
- "{:>6.1%} of messages are commented ({} over {}).\n"
- "".format(lvl_comm, comm_msgs, comm_msgs + tot_msgs),
- "This translation is currently made of {} signs.\n"
- "".format(nbr_trans_signs))
- print(prefix.join(lines))
- return 0
+
+ def __init__(self, src):
+ self.trans = {}
+ self.update_info()
+
+ def update_info(self):
+ self.nbr_trans = 0
+ self.lvl = 0.0
+ self.lvl_ttips = 0.0
+ self.lvl_trans_ttips = 0.0
+ self.lvl_ttips_in_trans = 0.0
+ self.lvl_comm = 0.0
+ self.nbr_signs = 0
+ self.nbr_trans_signs = 0
+ self.contexts = set()
+
+ if TEMPLATE_ISO_ID in self.trans:
+ self.nbr_trans = len(self.trans) - 1
+ self.nbr_signs = self.trans[TEMPLATE_ISO_ID].nbr_signs
+ else:
+ self.nbr_trans = len(self.trans)
+ for iso, msgs in self.trans.items():
+ msgs.update_info()
+ if msgs.nbr_msgs > 0:
+ self.lvl += float(msgs.nbr_trans_msgs) / float(msgs.nbr_msgs)
+ self.lvl_ttips += float(msgs.nbr_ttips) / float(msgs.nbr_msgs)
+ self.lvl_comm += float(msgs.nbr_comm_msgs) / float(msgs.nbr_msgs + msgs.nbr_comm_msgs)
+ if msgs.nbr_ttips > 0:
+ self.lvl_trans_ttips = float(msgs.nbr_trans_ttips) / float(msgs.nbr_ttips)
+ if msgs.nbr_trans_msgs > 0:
+ self.lvl_ttips_in_trans = float(msgs.nbr_trans_ttips) / float(msgs.nbr_trans_msgs)
+ if self.nbr_signs == 0:
+ self.nbr_signs = msgs.nbr_signs
+ self.nbr_trans_signs += msgs.nbr_trans_signs
+ self.contexts |= msgs.contexts
+
+ def print_stats(self, prefix="", print_msgs=True):
+ """
+ Print out some stats about an I18n object.
+ If print_msgs is True, it will also print all its translations' stats.
+ """
+ if print_msgs:
+ msgs_prefix = prefix + " "
+ for key, msgs in self.trans:
+ if key == TEMPLATE_ISO_ID:
+ continue
+ print(prefix + key + ":")
+ msgs.print_stats(prefix=msgs_prefix)
+ print(prefix)
+
+ nbr_contexts = len(self.contexts - {CONTEXT_DEFAULT})
+ if nbr_contexts != 1:
+ if nbr_contexts == 0:
+ nbr_contexts = "No"
+ _ctx_txt = "s are"
+ else:
+ _ctx_txt = " is"
+ lines = ("",
+ "Average stats for all {} translations:\n".format(self.nbr_trans),
+ " {:>6.1%} done!\n".format(self.lvl / self.nbr_trans),
+ " {:>6.1%} of messages are tooltips.\n".format(self.lvl_ttips / self.nbr_trans),
+ " {:>6.1%} of tooltips are translated.\n".format(self.lvl_trans_ttips / self.nbr_trans),
+ " {:>6.1%} of translated messages are tooltips.\n".format(self.lvl_ttips_in_trans / self.nbr_trans),
+ " {:>6.1%} of messages are commented.\n".format(self.lvl_comm / self.nbr_trans),
+ " The org msgids are currently made of {} signs.\n".format(self.nbr_signs),
+ " All processed translations are currently made of {} signs.\n".format(self.nbr_trans_signs),
+ " {} specific context{} present:\n {}\n"
+ "".format(self.nbr_contexts, _ctx_txt, "\n ".join(self.contexts - {CONTEXT_DEFAULT})),
+ "\n")
+ print(prefix.join(lines))
+
+
+##### Parsers #####
+
+#def parse_messages_from_pytuple(self, src, key=None):
+ #"""
+ #Returns a dict of tuples similar to the one returned by parse_messages_from_po (one per language, plus a 'pot'
+ #one keyed as '__POT__').
+ #"""
+ ## src may be either a string to be interpreted as py code, or a real tuple!
+ #if isinstance(src, str):
+ #src = eval(src)
+#
+ #curr_hash = None
+ #if key and key in _parser_cache:
+ #old_hash, ret = _parser_cache[key]
+ #import hashlib
+ #curr_hash = hashlib.new(PARSER_CACHE_HASH, str(src).encode()).digest()
+ #if curr_hash == old_hash:
+ #return ret
+#
+ #pot = new_messages()
+ #states = gen_states()
+ #stats = gen_stats()
+ #ret = {"__POT__": (pot, states, stats)}
+ #for msg in src:
+ #key = msg[0]
+ #messages[msgkey] = gen_message(msgid_lines, msgstr_lines, comment_lines, msgctxt_lines)
+ #pot[key] = gen_message(msgid_lines=[key[1]], msgstr_lines=[
+ #for lang, trans, (is_fuzzy, comments) in msg[2:]:
+ #if trans and not is_fuzzy:
+ #i18n_dict.setdefault(lang, dict())[key] = trans
+#
+ #if key:
+ #if not curr_hash:
+ #import hashlib
+ #curr_hash = hashlib.new(PARSER_CACHE_HASH, str(src).encode()).digest()
+ #_parser_cache[key] = (curr_hash, val)
+ #return ret \ No newline at end of file
diff --git a/release/scripts/modules/bpy/path.py b/release/scripts/modules/bpy/path.py
index 69ed4314f6f..6c91568cbc1 100644
--- a/release/scripts/modules/bpy/path.py
+++ b/release/scripts/modules/bpy/path.py
@@ -47,6 +47,7 @@ from _bpy_path import (extensions_audio,
extensions_image,
)
+
def abspath(path, start=None, library=None):
"""
Returns the absolute path relative to the current blend file
diff --git a/release/scripts/modules/bpy/utils.py b/release/scripts/modules/bpy/utils.py
index 25fe6c1d4e5..7b5de231b4b 100644
--- a/release/scripts/modules/bpy/utils.py
+++ b/release/scripts/modules/bpy/utils.py
@@ -237,7 +237,8 @@ def load_scripts(reload_scripts=False, refresh_scripts=False):
_addon_utils.reset_all(reload_scripts)
# run the active integration preset
- filepath = preset_find(_user_preferences.inputs.active_keyconfig, "keyconfig")
+ filepath = preset_find(_user_preferences.inputs.active_keyconfig,
+ "keyconfig")
if filepath:
keyconfig_set(filepath)
diff --git a/release/scripts/modules/bpy_extras/anim_utils.py b/release/scripts/modules/bpy_extras/anim_utils.py
index 7a5d1692971..20a9a412f26 100644
--- a/release/scripts/modules/bpy_extras/anim_utils.py
+++ b/release/scripts/modules/bpy_extras/anim_utils.py
@@ -169,15 +169,16 @@ def bake_action(frame_start,
euler_prev = None
for (f, matrix) in zip(frame_range, obj_info):
- obj.matrix_basis = matrix[name]
+ name = "Action Bake" # XXX: placeholder
+ obj.matrix_basis = matrix
- obj.keyframe_insert("location", -1, f, options)
+ obj.keyframe_insert("location", -1, f, name, options)
rotation_mode = obj.rotation_mode
if rotation_mode == 'QUATERNION':
- obj.keyframe_insert("rotation_quaternion", -1, f, options)
+ obj.keyframe_insert("rotation_quaternion", -1, f, name, options)
elif rotation_mode == 'AXIS_ANGLE':
- obj.keyframe_insert("rotation_axis_angle", -1, f, options)
+ obj.keyframe_insert("rotation_axis_angle", -1, f, name, options)
else: # euler, XYZ, ZXY etc
if euler_prev is not None:
euler = obj.rotation_euler.copy()
@@ -187,9 +188,9 @@ def bake_action(frame_start,
del euler
else:
euler_prev = obj.rotation_euler.copy()
- obj.keyframe_insert("rotation_euler", -1, f, options)
+ obj.keyframe_insert("rotation_euler", -1, f, name, options)
- obj.keyframe_insert("scale", -1, f, options)
+ obj.keyframe_insert("scale", -1, f, name, options)
# -------------------------------------------------------------------------
# Clean
diff --git a/release/scripts/modules/bpy_extras/io_utils.py b/release/scripts/modules/bpy_extras/io_utils.py
index 4457ecb43e6..21bad5ec1e4 100644
--- a/release/scripts/modules/bpy_extras/io_utils.py
+++ b/release/scripts/modules/bpy_extras/io_utils.py
@@ -79,20 +79,23 @@ class ExportHelper:
return {'RUNNING_MODAL'}
def check(self, context):
+ import os
change_ext = False
change_axis = _check_axis_conversion(self)
check_extension = self.check_extension
if check_extension is not None:
- filepath = bpy.path.ensure_ext(self.filepath,
- self.filename_ext
- if check_extension
- else "")
+ filepath = self.filepath
+ if os.path.basename(filepath):
+ filepath = bpy.path.ensure_ext(filepath,
+ self.filename_ext
+ if check_extension
+ else "")
- if filepath != self.filepath:
- self.filepath = filepath
- change_ext = True
+ if filepath != self.filepath:
+ self.filepath = filepath
+ change_ext = True
return (change_ext or change_axis)
@@ -338,7 +341,7 @@ path_reference_mode = EnumProperty(
('COPY', "Copy", "Copy the file to the destination path "
"(or subdirectory)"),
),
- default='AUTO'
+ default='AUTO',
)
@@ -477,10 +480,10 @@ def unique_name(key, name, name_dict, name_max=-1, clean_func=None, sep="."):
while name_new in name_dict_values:
count_str = "%03d" % count
name_new = "%.*s%s%s" % (name_max - (len(count_str) + 1),
- name_new_orig,
- sep,
- count_str,
- )
+ name_new_orig,
+ sep,
+ count_str,
+ )
count += 1
name_dict[key] = name_new
diff --git a/release/scripts/modules/bpy_extras/mesh_utils.py b/release/scripts/modules/bpy_extras/mesh_utils.py
index 0166f954dc9..8b93b5922e9 100644
--- a/release/scripts/modules/bpy_extras/mesh_utils.py
+++ b/release/scripts/modules/bpy_extras/mesh_utils.py
@@ -247,7 +247,7 @@ def edge_loops_from_tessfaces(mesh, tessfaces=None, seams=()):
break
i = ed_adj.index(context_loop[-2])
- context_loop.append(ed_adj[not i])
+ context_loop.append(ed_adj[not i])
# Dont look at this again
del ed_adj[:]
@@ -530,12 +530,12 @@ def face_random_points(num_points, tessfaces):
tris.append((verts[fv[0]].co,
verts[fv[1]].co,
verts[fv[2]].co,
- ))
+ ))
if len(fv) == 4:
tris.append((verts[fv[0]].co,
verts[fv[3]].co,
verts[fv[2]].co,
- ))
+ ))
tri_faces.append(tris)
# For each face, generate the required number of random points
diff --git a/release/scripts/modules/bpy_extras/object_utils.py b/release/scripts/modules/bpy_extras/object_utils.py
index 4e1385cff80..5797b8b7bf3 100644
--- a/release/scripts/modules/bpy_extras/object_utils.py
+++ b/release/scripts/modules/bpy_extras/object_utils.py
@@ -123,7 +123,7 @@ def object_data_add(context, obdata, operator=None, use_active_layer=True):
base.layers[scene.active_layer] = True
else:
base.layers = [True if i == scene.active_layer
- else False for i in range(len(scene.layers))]
+ else False for i in range(len(scene.layers))]
if v3d:
base.layers_from_view(context.space_data)
diff --git a/release/scripts/modules/bpy_extras/view3d_utils.py b/release/scripts/modules/bpy_extras/view3d_utils.py
index b2f366d5d1e..7a075e93e1a 100644
--- a/release/scripts/modules/bpy_extras/view3d_utils.py
+++ b/release/scripts/modules/bpy_extras/view3d_utils.py
@@ -50,7 +50,7 @@ def region_2d_to_vector_3d(region, rv3d, coord):
out = Vector(((2.0 * coord[0] / region.width) - 1.0,
(2.0 * coord[1] / region.height) - 1.0,
-0.5
- ))
+ ))
w = out.dot(persinv[3].xyz) + persinv[3][3]
@@ -89,7 +89,7 @@ def region_2d_to_origin_3d(region, rv3d, coord):
persinv = persmat.inverted()
origin_start = ((persinv.col[0].xyz * dx) +
(persinv.col[1].xyz * dy) +
- viewinv.translation)
+ viewinv.translation)
return origin_start
diff --git a/release/scripts/modules/bpy_restrict_state.py b/release/scripts/modules/bpy_restrict_state.py
index 21c69212731..4aa3c5de573 100644
--- a/release/scripts/modules/bpy_restrict_state.py
+++ b/release/scripts/modules/bpy_restrict_state.py
@@ -28,13 +28,21 @@ __all__ = (
import bpy as _bpy
+
class _RestrictContext():
__slots__ = ()
_real_data = _bpy.data
+ # safe, the pointer never changes
+ _real_pref = _bpy.context.user_preferences
+
@property
def window_manager(self):
return self._real_data.window_managers[0]
+ @property
+ def user_preferences(self):
+ return self._real_pref
+
class _RestrictData():
__slots__ = ()
@@ -46,6 +54,7 @@ _data_restrict = _RestrictData()
class RestrictBlend():
__slots__ = ("context", "data")
+
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
diff --git a/release/scripts/modules/bpy_types.py b/release/scripts/modules/bpy_types.py
index 1c861aa3be2..9161cc20d2c 100644
--- a/release/scripts/modules/bpy_types.py
+++ b/release/scripts/modules/bpy_types.py
@@ -484,13 +484,16 @@ class Text(bpy_types.ID):
if cont.type == 'PYTHON']
)
+
class NodeSocket(StructRNA): # , metaclass=RNAMeta
__slots__ = ()
@property
def links(self):
"""List of node links from or to this socket"""
- return tuple(link for link in self.id_data.links if link.from_socket == self or link.to_socket == self)
+ return tuple(link for link in self.id_data.links
+ if (link.from_socket == self or
+ link.to_socket == self))
# values are module: [(cls, path, line), ...]
@@ -612,6 +615,10 @@ class KeyingSetInfo(StructRNA, metaclass=RNAMeta):
__slots__ = ()
+class AddonPreferences(StructRNA, metaclass=RNAMeta):
+ __slots__ = ()
+
+
class _GenericUI:
__slots__ = ()
diff --git a/release/scripts/modules/rna_prop_ui.py b/release/scripts/modules/rna_prop_ui.py
index 32c8ed11bc5..12438795539 100644
--- a/release/scripts/modules/rna_prop_ui.py
+++ b/release/scripts/modules/rna_prop_ui.py
@@ -92,6 +92,9 @@ def draw(layout, context, context_member, property_type, use_edit=True):
if not rna_item:
return
+ if rna_item.id_data.library is not None:
+ use_edit = False
+
assert(isinstance(rna_item, property_type))
items = rna_item.items()
diff --git a/release/scripts/presets/keyconfig/maya.py b/release/scripts/presets/keyconfig/maya.py
index ec8efc8d371..cea2f451305 100644
--- a/release/scripts/presets/keyconfig/maya.py
+++ b/release/scripts/presets/keyconfig/maya.py
@@ -133,42 +133,42 @@ kmi = km.keymap_items.new('wm.context_toggle_enum', 'Z', 'PRESS', alt=True)
kmi.properties.data_path = 'space_data.viewport_shade'
kmi.properties.value_1 = 'TEXTURED'
kmi.properties.value_2 = 'SOLID'
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE')
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK')
kmi.properties.extend = False
kmi.properties.center = False
kmi.properties.object = False
kmi.properties.enumerate = False
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', shift=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', shift=True)
kmi.properties.extend = True
kmi.properties.center = False
kmi.properties.object = False
kmi.properties.enumerate = False
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', ctrl=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', ctrl=True)
kmi.properties.extend = False
kmi.properties.center = True
kmi.properties.object = False
kmi.properties.enumerate = False
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', alt=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', alt=True)
kmi.properties.extend = False
kmi.properties.center = False
kmi.properties.object = False
kmi.properties.enumerate = True
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', shift=True, ctrl=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', shift=True, ctrl=True)
kmi.properties.extend = True
kmi.properties.center = True
kmi.properties.object = False
kmi.properties.enumerate = False
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', ctrl=True, alt=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', ctrl=True, alt=True)
kmi.properties.extend = False
kmi.properties.center = True
kmi.properties.object = False
kmi.properties.enumerate = True
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', shift=True, alt=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', shift=True, alt=True)
kmi.properties.extend = True
kmi.properties.center = False
kmi.properties.object = False
kmi.properties.enumerate = True
-kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'RELEASE', shift=True, ctrl=True, alt=True)
+kmi = km.keymap_items.new('view3d.select', 'SELECTMOUSE', 'CLICK', shift=True, ctrl=True, alt=True)
kmi.properties.extend = True
kmi.properties.center = True
kmi.properties.object = False
@@ -218,7 +218,9 @@ kmi.properties.use = True
kmi = km.keymap_items.new('transform.mirror', 'M', 'PRESS', ctrl=True)
kmi = km.keymap_items.new('wm.context_toggle', 'TAB', 'PRESS', shift=True)
kmi.properties.data_path = 'tool_settings.use_snap'
-kmi = km.keymap_items.new('transform.snap_type', 'TAB', 'PRESS', shift=True, ctrl=True)
+kmi = km.keymap_items.new('WM_OT_context_menu_enum', 'TAB', 'PRESS', shift=True, ctrl=True);
+kmi.properties.data_path = 'tool_settings.snap_element'
+
kmi = km.keymap_items.new('view3d.enable_manipulator', 'W', 'PRESS')
kmi.properties.translate = True
kmi = km.keymap_items.new('view3d.enable_manipulator', 'E', 'PRESS')
@@ -308,8 +310,10 @@ kmi = km.keymap_items.new('object.subdivision_set', 'FOUR', 'PRESS', ctrl=True)
kmi.properties.level = 4
kmi = km.keymap_items.new('object.subdivision_set', 'FIVE', 'PRESS', ctrl=True)
kmi.properties.level = 5
-kmi = km.keymap_items.new('object.select_all', 'SELECTMOUSE', 'CLICK')
-kmi.properties.action = 'DESELECT'
+# TODO: only used to de-seletc everything when click outside of object,
+# and that's not best way to do this and this way is completely unpredictable
+#kmi = km.keymap_items.new('object.select_all', 'SELECTMOUSE', 'CLICK')
+#kmi.properties.action = 'DESELECT'
# Map Mesh
km = kc.keymaps.new('Mesh', space_type='EMPTY', region_type='WINDOW', modal=False)
@@ -395,8 +399,10 @@ kmi = km.keymap_items.new('wm.context_toggle_enum', 'O', 'PRESS', alt=True)
kmi.properties.data_path = 'tool_settings.proportional_edit'
kmi.properties.value_1 = 'DISABLED'
kmi.properties.value_2 = 'CONNECTED'
-kmi = km.keymap_items.new('mesh.select_all', 'SELECTMOUSE', 'CLICK')
-kmi.properties.action = 'DESELECT'
+# TODO: only used to de-seletc everything when click outside of object,
+# and that's not best way to do this and this way is completely unpredictable
+#kmi = km.keymap_items.new('mesh.select_all', 'SELECTMOUSE', 'CLICK')
+#kmi.properties.action = 'DESELECT'
kmi = km.keymap_items.new('object.subdivision_set', 'ZERO', 'PRESS', ctrl=True)
kmi.properties.level = 0
kmi = km.keymap_items.new('object.subdivision_set', 'ONE', 'PRESS', ctrl=True)
diff --git a/release/scripts/presets/operator/wm.collada_export/second_life_rigged.py b/release/scripts/presets/operator/wm.collada_export/second_life_rigged.py
index 2c695a22ff9..81769a82728 100644
--- a/release/scripts/presets/operator/wm.collada_export/second_life_rigged.py
+++ b/release/scripts/presets/operator/wm.collada_export/second_life_rigged.py
@@ -7,6 +7,7 @@ op.export_mesh_type_selection = 'view'
op.selected = True
op.include_children = False
op.include_armatures = True
+op.include_shapekeys = False
op.deform_bones_only = True
op.active_uv_only = True
op.include_uv_textures = True
diff --git a/release/scripts/presets/operator/wm.collada_export/second_life_static.py b/release/scripts/presets/operator/wm.collada_export/second_life_static.py
index 081788b7e9d..ad06909a276 100644
--- a/release/scripts/presets/operator/wm.collada_export/second_life_static.py
+++ b/release/scripts/presets/operator/wm.collada_export/second_life_static.py
@@ -7,6 +7,7 @@ op.export_mesh_type_selection = 'view'
op.selected = True
op.include_children = False
op.include_armatures = False
+op.include_shapekeys = False
op.deform_bones_only = False
op.active_uv_only = True
op.include_uv_textures = True
diff --git a/release/scripts/startup/bl_operators/__init__.py b/release/scripts/startup/bl_operators/__init__.py
index c12b0b00f54..64851a3a4c1 100644
--- a/release/scripts/startup/bl_operators/__init__.py
+++ b/release/scripts/startup/bl_operators/__init__.py
@@ -35,6 +35,7 @@ _modules = [
"object_randomize_transform",
"object_quick_effects",
"presets",
+ "rigidbody",
"screen_play_rendered_anim",
"sequencer",
"uvcalc_follow_active",
diff --git a/release/scripts/startup/bl_operators/node.py b/release/scripts/startup/bl_operators/node.py
index 39e00f94953..bc0224db765 100644
--- a/release/scripts/startup/bl_operators/node.py
+++ b/release/scripts/startup/bl_operators/node.py
@@ -22,6 +22,7 @@ import bpy
from bpy.types import Operator
from bpy.props import BoolProperty, EnumProperty, StringProperty
+
# Base class for node 'Add' operators
class NodeAddOperator():
@staticmethod
@@ -78,8 +79,9 @@ class NODE_OT_add_node(NodeAddOperator, Operator):
use_transform = BoolProperty(
name="Use Transform",
description="Start transform operator after inserting the node",
- default = False,
+ default=False,
)
+
def execute(self, context):
node = self.create_node(context, self.type)
diff --git a/release/scripts/startup/bl_operators/object.py b/release/scripts/startup/bl_operators/object.py
index 9e449f325d6..567ea830409 100644
--- a/release/scripts/startup/bl_operators/object.py
+++ b/release/scripts/startup/bl_operators/object.py
@@ -110,6 +110,12 @@ class SelectCamera(Operator):
bl_label = "Select Camera"
bl_options = {'REGISTER', 'UNDO'}
+ extend = BoolProperty(
+ name="Extend",
+ description="Extend the selection",
+ default=False
+ )
+
def execute(self, context):
scene = context.scene
view = context.space_data
@@ -123,6 +129,8 @@ class SelectCamera(Operator):
elif camera.name not in scene.objects:
self.report({'WARNING'}, "Active camera is not in this scene")
else:
+ if not self.extend:
+ bpy.ops.object.select_all(action='DESELECT')
context.scene.objects.active = camera
camera.select = True
return {'FINISHED'}
@@ -297,7 +305,7 @@ class ShapeTransfer(Operator):
('RELATIVE_EDGE',
"Relative Edge",
"Calculate relative position (using edges)",
- ),
+ ),
),
name="Transformation Mode",
description="Relative shape positions to the new shape method",
@@ -674,7 +682,7 @@ class TransformsToDeltasAnim(Operator):
"scale" : "delta_scale"
}
DELTA_PATHS = STANDARD_TO_DELTA_PATHS.values()
-
+
# try to apply on each selected object
success = False
for obj in context.selected_editable_objects:
@@ -684,7 +692,7 @@ class TransformsToDeltasAnim(Operator):
"No animation data to convert on object: %r" %
obj.name)
continue
-
+
# first pass over F-Curves: ensure that we don't have conflicting
# transforms already (e.g. if this was applied already) [#29110]
existingFCurves = {}
@@ -700,7 +708,7 @@ class TransformsToDeltasAnim(Operator):
else:
# non-transform - ignore
continue
-
+
# a delta path like this for the same index shouldn't
# exist already, otherwise we've got a conflict
if dpath in existingFCurves:
@@ -708,8 +716,9 @@ class TransformsToDeltasAnim(Operator):
if fcu.array_index in existingFCurves[dpath]:
# conflict
self.report({'ERROR'},
- "Object '%r' already has '%r' F-Curve(s). Remove these before trying again" %
- (obj.name, dpath))
+ "Object '%r' already has '%r' F-Curve(s). "
+ "Remove these before trying again" %
+ (obj.name, dpath))
return {'CANCELLED'}
else:
# no conflict here
@@ -717,8 +726,7 @@ class TransformsToDeltasAnim(Operator):
else:
# no conflict yet
existingFCurves[dpath] = [fcu.array_index]
-
-
+
# if F-Curve uses standard transform path
# just append "delta_" to this path
for fcu in adt.action.fcurves:
@@ -758,7 +766,7 @@ class DupliOffsetFromCursor(Operator):
@classmethod
def poll(cls, context):
- return context.active_object is not None
+ return (context.active_object is not None)
def execute(self, context):
scene = context.scene
diff --git a/release/scripts/startup/bl_operators/object_align.py b/release/scripts/startup/bl_operators/object_align.py
index a32bb8c5353..dd647733850 100644
--- a/release/scripts/startup/bl_operators/object_align.py
+++ b/release/scripts/startup/bl_operators/object_align.py
@@ -114,14 +114,15 @@ def GlobalBB_HQ(obj):
return Vector((left, front, up)), Vector((right, back, down))
-def align_objects(align_x,
+def align_objects(context,
+ align_x,
align_y,
align_z,
align_mode,
relative_to,
bb_quality):
- cursor = bpy.context.scene.cursor_location
+ cursor = context.scene.cursor_location
Left_Front_Up_SEL = [0.0, 0.0, 0.0]
Right_Back_Down_SEL = [0.0, 0.0, 0.0]
@@ -130,7 +131,7 @@ def align_objects(align_x,
objs = []
- for obj in bpy.context.selected_objects:
+ for obj in context.selected_objects:
matrix_world = obj.matrix_world.copy()
bb_world = [matrix_world * Vector(v[:]) for v in obj.bound_box]
objs.append((obj, bb_world))
@@ -150,7 +151,7 @@ def align_objects(align_x,
# Active Center
- if obj == bpy.context.active_object:
+ if obj == context.active_object:
center_active_x = (Left_Front_Up[0] + Right_Back_Down[0]) / 2.0
center_active_y = (Left_Front_Up[1] + Right_Back_Down[1]) / 2.0
@@ -386,7 +387,8 @@ class AlignObjects(Operator):
def execute(self, context):
align_axis = self.align_axis
- ret = align_objects('X' in align_axis,
+ ret = align_objects(context,
+ 'X' in align_axis,
'Y' in align_axis,
'Z' in align_axis,
self.align_mode,
diff --git a/release/scripts/startup/bl_operators/object_quick_effects.py b/release/scripts/startup/bl_operators/object_quick_effects.py
index cd0b63a6b78..47012f0c459 100644
--- a/release/scripts/startup/bl_operators/object_quick_effects.py
+++ b/release/scripts/startup/bl_operators/object_quick_effects.py
@@ -72,7 +72,7 @@ class QuickFur(Operator):
)
def execute(self, context):
- fake_context = bpy.context.copy()
+ fake_context = context.copy()
mesh_objects = [obj for obj in context.selected_objects
if obj.type == 'MESH']
@@ -161,7 +161,7 @@ class QuickExplode(Operator):
)
def execute(self, context):
- fake_context = bpy.context.copy()
+ fake_context = context.copy()
obj_act = context.active_object
if obj_act is None or obj_act.type != 'MESH':
@@ -311,7 +311,7 @@ class QuickSmoke(Operator):
)
def execute(self, context):
- fake_context = bpy.context.copy()
+ fake_context = context.copy()
mesh_objects = [obj for obj in context.selected_objects
if obj.type == 'MESH']
min_co = Vector((100000.0, 100000.0, 100000.0))
@@ -432,7 +432,7 @@ class QuickFluid(Operator):
)
def execute(self, context):
- fake_context = bpy.context.copy()
+ fake_context = context.copy()
mesh_objects = [obj for obj in context.selected_objects
if (obj.type == 'MESH' and not 0.0 in obj.dimensions)]
min_co = Vector((100000, 100000, 100000))
diff --git a/release/scripts/startup/bl_operators/object_randomize_transform.py b/release/scripts/startup/bl_operators/object_randomize_transform.py
index a6efc9dfd85..38110328603 100644
--- a/release/scripts/startup/bl_operators/object_randomize_transform.py
+++ b/release/scripts/startup/bl_operators/object_randomize_transform.py
@@ -23,7 +23,8 @@ from bpy.types import Operator
from mathutils import Vector
-def randomize_selected(seed, delta, loc, rot, scale, scale_even, scale_min):
+def randomize_selected(context, seed, delta,
+ loc, rot, scale, scale_even, scale_min):
import random
from random import uniform
@@ -33,7 +34,7 @@ def randomize_selected(seed, delta, loc, rot, scale, scale_even, scale_min):
def rand_vec(vec_range):
return Vector(uniform(-val, val) for val in vec_range)
- for obj in bpy.context.selected_objects:
+ for obj in context.selected_objects:
if loc:
if delta:
@@ -180,6 +181,7 @@ class RandomizeLocRotSize(Operator):
#scale_min = self.scale_min
scale_min = 0
- randomize_selected(seed, delta, loc, rot, scale, scale_even, scale_min)
+ randomize_selected(context, seed, delta,
+ loc, rot, scale, scale_even, scale_min)
return {'FINISHED'}
diff --git a/release/scripts/startup/bl_operators/presets.py b/release/scripts/startup/bl_operators/presets.py
index ee9769d8b43..dac7adecaff 100644
--- a/release/scripts/startup/bl_operators/presets.py
+++ b/release/scripts/startup/bl_operators/presets.py
@@ -320,13 +320,13 @@ class AddPresetFluid(AddPresetBase, Operator):
preset_menu = "FLUID_MT_presets"
preset_defines = [
- "fluid = bpy.context.fluid"
- ]
+ "fluid = bpy.context.fluid"
+ ]
preset_values = [
- "fluid.settings.viscosity_base",
- "fluid.settings.viscosity_exponent",
- ]
+ "fluid.settings.viscosity_base",
+ "fluid.settings.viscosity_exponent",
+ ]
preset_subdir = "fluid"
@@ -477,7 +477,7 @@ class AddPresetNodeColor(AddPresetBase, Operator):
class AddPresetInterfaceTheme(AddPresetBase, Operator):
"""Add a theme preset"""
bl_idname = "wm.interface_theme_preset_add"
- bl_label = "Add Tracking Settings Preset"
+ bl_label = "Add Theme Preset"
preset_menu = "USERPREF_MT_interface_theme_presets"
preset_subdir = "interface_theme"
diff --git a/release/scripts/startup/bl_operators/rigidbody.py b/release/scripts/startup/bl_operators/rigidbody.py
new file mode 100644
index 00000000000..bad86163932
--- /dev/null
+++ b/release/scripts/startup/bl_operators/rigidbody.py
@@ -0,0 +1,251 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8-80 compliant>
+
+import bpy
+from bpy.types import Operator
+from bpy.props import IntProperty
+from bpy.props import EnumProperty
+
+
+class CopyRigidbodySettings(Operator):
+ '''Copy Rigid Body settings from active object to selected'''
+ bl_idname = "rigidbody.object_settings_copy"
+ bl_label = "Copy Rigidbody Settings"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ return (obj and obj.rigid_body)
+
+ def execute(self, context):
+ obj = context.object
+ scn = context.scene
+
+ # deselect all but mesh objects
+ for o in context.selected_objects:
+ if o.type != 'MESH':
+ o.select = False
+
+ sel = context.selected_objects
+ if sel:
+ # add selected objects to active one groups and recalculate
+ bpy.ops.group.objects_add_active()
+ scn.frame_set(scn.frame_current)
+
+ # copy settings
+ for o in sel:
+ if o.rigid_body is None:
+ continue
+
+ o.rigid_body.type = obj.rigid_body.type
+ o.rigid_body.kinematic = obj.rigid_body.kinematic
+ o.rigid_body.mass = obj.rigid_body.mass
+ o.rigid_body.collision_shape = obj.rigid_body.collision_shape
+ o.rigid_body.use_margin = obj.rigid_body.use_margin
+ o.rigid_body.collision_margin = obj.rigid_body.collision_margin
+ o.rigid_body.friction = obj.rigid_body.friction
+ o.rigid_body.restitution = obj.rigid_body.restitution
+ o.rigid_body.use_deactivation = obj.rigid_body.use_deactivation
+ o.rigid_body.start_deactivated = obj.rigid_body.start_deactivated
+ o.rigid_body.deactivate_linear_velocity = obj.rigid_body.deactivate_linear_velocity
+ o.rigid_body.deactivate_angular_velocity = obj.rigid_body.deactivate_angular_velocity
+ o.rigid_body.linear_damping = obj.rigid_body.linear_damping
+ o.rigid_body.angular_damping = obj.rigid_body.angular_damping
+ o.rigid_body.collision_groups = obj.rigid_body.collision_groups
+
+ return {'FINISHED'}
+
+
+class BakeToKeyframes(Operator):
+ '''Bake rigid body transformations of selected objects to keyframes'''
+ bl_idname = "rigidbody.bake_to_keyframes"
+ bl_label = "Bake To Keyframes"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ frame_start = IntProperty(
+ name="Start Frame",
+ description="Start frame for baking",
+ min=0, max=300000,
+ default=1,
+ )
+ frame_end = IntProperty(
+ name="End Frame",
+ description="End frame for baking",
+ min=1, max=300000,
+ default=250,
+ )
+ step = IntProperty(
+ name="Frame Step",
+ description="Frame Step",
+ min=1, max=120,
+ default=1,
+ )
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ return (obj and obj.rigid_body)
+
+ def execute(self, context):
+ bake = []
+ objs = []
+ scene = context.scene
+ frame_orig = scene.frame_current
+ frames = list(range(self.frame_start, self.frame_end + 1, self.step))
+
+ # filter objects selection
+ for obj in context.selected_objects:
+ if not obj.rigid_body or obj.rigid_body.type != 'ACTIVE':
+ obj.select = False
+
+ objs = context.selected_objects
+
+ if objs:
+ # store transformation data
+ for f in list(range(self.frame_start, self.frame_end + 1)):
+ scene.frame_set(f)
+ if f in frames:
+ mat = {}
+ for i, obj in enumerate(objs):
+ mat[i] = obj.matrix_world.copy()
+ bake.append(mat)
+
+ # apply transformations as keyframes
+ for i, f in enumerate(frames):
+ scene.frame_set(f)
+ obj_prev = objs[0]
+ for j, obj in enumerate(objs):
+ mat = bake[i][j]
+
+ obj.location = mat.to_translation()
+
+ rot_mode = obj.rotation_mode
+ if rot_mode == 'QUATERNION':
+ obj.rotation_quaternion = mat.to_quaternion()
+ elif rot_mode == 'AXIS_ANGLE':
+ # this is a little roundabout but there's no better way right now
+ aa = mat.to_quaternion().to_axis_angle()
+ obj.rotation_axis_angle = (aa[1], ) + aa[0][:]
+ else: # euler
+ # make sure euler rotation is compatible to previous frame
+ obj.rotation_euler = mat.to_euler(rot_mode, obj_prev.rotation_euler)
+
+ obj_prev = obj
+
+ bpy.ops.anim.keyframe_insert(type='BUILTIN_KSI_LocRot', confirm_success=False)
+
+ # remove baked objects from simulation
+ bpy.ops.rigidbody.objects_remove()
+
+ # clean up keyframes
+ for obj in objs:
+ action = obj.animation_data.action
+ for fcu in action.fcurves:
+ keyframe_points = fcu.keyframe_points
+ i = 1
+ # remove unneeded keyframes
+ while i < len(keyframe_points) - 1:
+ val_prev = keyframe_points[i - 1].co[1]
+ val_next = keyframe_points[i + 1].co[1]
+ val = keyframe_points[i].co[1]
+
+ if abs(val - val_prev) + abs(val - val_next) < 0.0001:
+ keyframe_points.remove(keyframe_points[i])
+ else:
+ i += 1
+ # use linear interpolation for better visual results
+ for keyframe in keyframe_points:
+ keyframe.interpolation = 'LINEAR'
+
+ # return to the frame we started on
+ scene.frame_set(frame_orig)
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ scene = context.scene
+ self.frame_start = scene.frame_start
+ self.frame_end = scene.frame_end
+
+ wm = context.window_manager
+ return wm.invoke_props_dialog(self)
+
+
+class ConnectRigidBodies(Operator):
+
+
+ '''Connect selected rigid bodies to active'''
+ bl_idname = "rigidbody.connect"
+ bl_label = "ConnectRigidBodies"
+ bl_options = {'REGISTER', 'UNDO'}
+
+ con_type = EnumProperty(
+ name="Type",
+ description="Type of generated contraint",
+ items=(('FIXED', "Fixed", "Glues ridig bodies together"),
+ ('POINT', "Point", "Constrains rigid bodies to move aound common pivot point"),
+ ('HINGE', "Hinge", "Restricts rigid body rotation to one axis"),
+ ('SLIDER', "Slider", "Restricts rigid boddy translation to one axis"),
+ ('PISTON', "Piston", "Restricts rigid boddy translation and rotation to one axis"),
+ ('GENERIC', "Generic", "Restricts translation and rotation to specified axes"),
+ ('GENERIC_SPRING', "Generic Spring", "Restricts translation and rotation to specified axes with springs")),
+ default='FIXED',)
+
+ pivot_type = EnumProperty(
+ name="Location",
+ description="Constraint pivot location",
+ items=(('CENTER', "Center", "Pivot location is between the constrained rigid bodies"),
+ ('ACTIVE', "Active", "Pivot location is at the active object position"),
+ ('SELECTED', "Selected", "Pivot location is at the slected object position")),
+ default='CENTER',)
+
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ objs = context.selected_objects
+ return (obj and obj.rigid_body and (len(objs) > 1))
+
+ def execute(self, context):
+
+ objs = context.selected_objects
+ obj_act = context.active_object
+
+ for obj in objs:
+ if obj == obj_act:
+ continue
+ if self.pivot_type == 'ACTIVE':
+ loc = obj_act.location
+ elif self.pivot_type == 'SELECTED':
+ loc = obj.location
+ else:
+ loc = (obj_act.location + obj.location) / 2.0
+ bpy.ops.object.add(type='EMPTY', view_align=False, enter_editmode=False, location=loc)
+ bpy.ops.rigidbody.constraint_add()
+ con = context.active_object.rigid_body_constraint
+ con.type = self.con_type
+ con.object1 = obj_act
+ con.object2 = obj
+
+ return {'FINISHED'}
+
+ def invoke(self, context, event):
+ wm = context.window_manager
+ return wm.invoke_props_dialog(self)
diff --git a/release/scripts/startup/bl_operators/uvcalc_follow_active.py b/release/scripts/startup/bl_operators/uvcalc_follow_active.py
index 7b6013f3044..ee3ae2878dc 100644
--- a/release/scripts/startup/bl_operators/uvcalc_follow_active.py
+++ b/release/scripts/startup/bl_operators/uvcalc_follow_active.py
@@ -26,18 +26,18 @@ from bpy.types import Operator
def extend(obj, operator, EXTEND_MODE):
-
+
import bmesh
me = obj.data
# script will fail without UVs
if not me.uv_textures:
me.uv_textures.new()
-
+
bm = bmesh.from_edit_mesh(me)
-
+
f_act = bm.faces.active
uv_act = bm.loops.layers.uv.active
-
+
if f_act is None:
operator.report({'ERROR'}, "No active face")
return
@@ -57,7 +57,7 @@ def extend(obj, operator, EXTEND_MODE):
f.tag = False
# tag the active face True since we begin there
f_act.tag = True
-
+
def walk_face(f):
# all faces in this list must be tagged
f.tag = True
@@ -102,7 +102,6 @@ def extend(obj, operator, EXTEND_MODE):
else:
break
-
def extrapolate_uv(fac,
l_a_outer, l_a_inner,
l_b_outer, l_b_inner):
@@ -112,7 +111,7 @@ def extend(obj, operator, EXTEND_MODE):
def apply_uv(f_prev, l_prev, f_next):
l_a = [None, None, None, None]
l_b = [None, None, None, None]
-
+
l_a[0] = l_prev
l_a[1] = l_a[0].link_loop_next
l_a[2] = l_a[1].link_loop_next
@@ -133,7 +132,7 @@ def extend(obj, operator, EXTEND_MODE):
# +-----------+
# copy from this face to the one above.
- # get the other loops
+ # get the other loops
l_next = l_prev.link_loop_radial_next
if l_next.vert != l_prev.vert:
l_b[1] = l_next
diff --git a/release/scripts/startup/bl_operators/uvcalc_lightmap.py b/release/scripts/startup/bl_operators/uvcalc_lightmap.py
index 198b3660ff8..b24a71365b4 100644
--- a/release/scripts/startup/bl_operators/uvcalc_lightmap.py
+++ b/release/scripts/startup/bl_operators/uvcalc_lightmap.py
@@ -189,14 +189,14 @@ class prettyface(object):
def lightmap_uvpack(meshes,
- PREF_SEL_ONLY=True,
- PREF_NEW_UVLAYER=False,
- PREF_PACK_IN_ONE=False,
- PREF_APPLY_IMAGE=False,
- PREF_IMG_PX_SIZE=512,
- PREF_BOX_DIV=8,
- PREF_MARGIN_DIV=512
- ):
+ PREF_SEL_ONLY=True,
+ PREF_NEW_UVLAYER=False,
+ PREF_PACK_IN_ONE=False,
+ PREF_APPLY_IMAGE=False,
+ PREF_IMG_PX_SIZE=512,
+ PREF_BOX_DIV=8,
+ PREF_MARGIN_DIV=512
+ ):
"""
BOX_DIV if the maximum division of the UV map that
a box may be consolidated into.
@@ -516,7 +516,7 @@ def lightmap_uvpack(meshes,
def unwrap(operator, context, **kwargs):
- is_editmode = (bpy.context.object.mode == 'EDIT')
+ is_editmode = (context.object.mode == 'EDIT')
if is_editmode:
bpy.ops.object.mode_set(mode='OBJECT', toggle=False)
diff --git a/release/scripts/startup/bl_operators/wm.py b/release/scripts/startup/bl_operators/wm.py
index 00cc763c4e1..fa2cb5d5356 100644
--- a/release/scripts/startup/bl_operators/wm.py
+++ b/release/scripts/startup/bl_operators/wm.py
@@ -500,18 +500,16 @@ class WM_MT_context_menu_enum(Menu):
def draw(self, context):
data_path = self.data_path
- value = context_path_validate(bpy.context, data_path)
+ value = context_path_validate(context, data_path)
if value is Ellipsis:
return {'PASS_THROUGH'}
base_path, prop_string = data_path.rsplit(".", 1)
value_base = context_path_validate(context, base_path)
+ prop = value_base.bl_rna.properties[prop_string]
- values = [(i.name, i.identifier) for i in value_base.bl_rna.properties[prop_string].enum_items]
-
- for name, identifier in values:
- props = self.layout.operator("wm.context_set_enum", text=name)
- props.data_path = data_path
- props.value = identifier
+ layout = self.layout
+ layout.label(prop.name, icon=prop.icon)
+ layout.prop(value_base, prop_string, expand=True)
class WM_OT_context_menu_enum(Operator):
@@ -1255,13 +1253,6 @@ class WM_OT_copy_prev_settings(Operator):
else:
shutil.copytree(path_src, path_dst, symlinks=True)
- # in 2.57 and earlier windows installers, system scripts were copied
- # into the configuration directory, don't want to copy those
- system_script = os.path.join(path_dst, "scripts/modules/bpy_types.py")
- if os.path.isfile(system_script):
- shutil.rmtree(os.path.join(path_dst, "scripts"))
- shutil.rmtree(os.path.join(path_dst, "plugins"))
-
# don't loose users work if they open the splash later.
if bpy.data.is_saved is bpy.data.is_dirty is False:
bpy.ops.wm.read_homefile()
@@ -1596,7 +1587,7 @@ class WM_OT_addon_enable(Operator):
"version %d.%d.%d and might not "
"function (correctly), "
"though it is enabled") %
- info_ver)
+ info_ver)
return {'FINISHED'}
else:
return {'CANCELLED'}
@@ -1739,7 +1730,7 @@ class WM_OT_addon_install(Operator):
# don't use bpy.utils.script_paths("addons") because we may not be able to write to it.
path_addons = bpy.utils.user_resource('SCRIPTS', "addons", create=True)
else:
- path_addons = bpy.context.user_preferences.filepaths.script_directory
+ path_addons = context.user_preferences.filepaths.script_directory
if path_addons:
path_addons = os.path.join(path_addons, "addons")
diff --git a/release/scripts/startup/bl_ui/__init__.py b/release/scripts/startup/bl_ui/__init__.py
index 3a47b9d2d77..94f324d62d0 100644
--- a/release/scripts/startup/bl_ui/__init__.py
+++ b/release/scripts/startup/bl_ui/__init__.py
@@ -48,6 +48,8 @@ _modules = (
"properties_physics_dynamicpaint",
"properties_physics_field",
"properties_physics_fluid",
+ "properties_physics_rigidbody",
+ "properties_physics_rigidbody_constraint",
"properties_physics_smoke",
"properties_physics_softbody",
"properties_render",
@@ -134,6 +136,7 @@ def register():
def unregister():
bpy.utils.unregister_module(__name__)
+
# Define a default UIList, when a list does not need any custom drawing...
class UI_UL_list(bpy.types.UIList):
pass
diff --git a/release/scripts/startup/bl_ui/properties_data_mesh.py b/release/scripts/startup/bl_ui/properties_data_mesh.py
index 5fdb71b855f..dcef5aaa096 100644
--- a/release/scripts/startup/bl_ui/properties_data_mesh.py
+++ b/release/scripts/startup/bl_ui/properties_data_mesh.py
@@ -34,7 +34,8 @@ class MESH_MT_vertex_group_specials(Menu):
layout.operator("object.vertex_group_copy_to_linked", icon='LINK_AREA')
layout.operator("object.vertex_group_copy_to_selected", icon='LINK_AREA')
layout.operator("object.vertex_group_mirror", icon='ARROW_LEFTRIGHT')
- layout.operator("object.vertex_group_remove", icon='X', text="Delete All").all = True
+ layout.operator("object.vertex_group_remove", icon='X', text="Delete All Vertex Groups").all = True
+ layout.operator("object.vertex_group_remove_from", icon='X', text="Remove Selected from All Vertex Groups").all = True
layout.separator()
layout.operator("object.vertex_group_lock", icon='LOCKED', text="Lock All").action = 'LOCK'
layout.operator("object.vertex_group_lock", icon='UNLOCKED', text="UnLock All").action = 'UNLOCK'
@@ -193,7 +194,6 @@ class DATA_PT_vertex_groups(MeshButtonsPanel, Panel):
row = layout.row()
row.template_list("MESH_UL_vgroups", "", ob, "vertex_groups", ob.vertex_groups, "active_index", rows=rows)
-
col = row.column(align=True)
col.operator("object.vertex_group_add", icon='ZOOMIN', text="")
col.operator("object.vertex_group_remove", icon='ZOOMOUT', text="").all = False
diff --git a/release/scripts/startup/bl_ui/properties_data_modifier.py b/release/scripts/startup/bl_ui/properties_data_modifier.py
index df29f18853b..99f82b1e8a3 100644
--- a/release/scripts/startup/bl_ui/properties_data_modifier.py
+++ b/release/scripts/startup/bl_ui/properties_data_modifier.py
@@ -162,6 +162,44 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
sub.active = md.use_random_order
sub.prop(md, "seed")
+ def MESH_CACHE(self, layout, ob, md):
+ layout.prop(md, "cache_format")
+ layout.prop(md, "filepath")
+
+ layout.label(text="Evaluation:")
+ layout.prop(md, "factor", slider=True)
+ layout.prop(md, "deform_mode")
+ layout.prop(md, "interpolation")
+
+ layout.label(text="Time Mapping:")
+
+ row = layout.row()
+ row.prop(md, "time_mode", expand=True)
+ row = layout.row()
+ row.prop(md, "play_mode", expand=True)
+ if md.play_mode == 'SCENE':
+ layout.prop(md, "frame_start")
+ layout.prop(md, "frame_scale")
+ else:
+ time_mode = md.time_mode
+ if time_mode == 'FRAME':
+ layout.prop(md, "eval_frame")
+ elif time_mode == 'TIME':
+ layout.prop(md, "eval_time")
+ elif time_mode == 'FACTOR':
+ layout.prop(md, "eval_factor")
+
+ layout.label(text="Axis Mapping:")
+ split = layout.split(percentage=0.5, align=True)
+ split.alert = (md.forward_axis[-1] == md.up_axis[-1])
+ split.label("Forward/Up Axis:")
+ split.prop(md, "forward_axis", text="")
+ split.prop(md, "up_axis", text="")
+ split = layout.split(percentage=0.5)
+ split.label(text="Flip Axis:")
+ row = split.row()
+ row.prop(md, "flip_axis")
+
def CAST(self, layout, ob, md):
split = layout.split(percentage=0.25)
@@ -333,23 +371,24 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
def LAPLACIANSMOOTH(self, layout, ob, md):
layout.prop(md, "iterations")
-
+
split = layout.split(percentage=0.25)
-
+
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
-
+
col = split.column()
col.label(text="Lambda:")
col.prop(md, "lambda_factor", text="Factor")
col.prop(md, "lambda_border", text="Border")
-
+
col.separator()
col.prop(md, "use_volume_preserve")
-
+ col.prop(md, "use_normalized")
+
layout.label(text="Vertex Group:")
layout.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
@@ -558,7 +597,6 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
col = split.column()
-
def PARTICLE_INSTANCE(self, layout, ob, md):
layout.prop(md, "object")
layout.prop(md, "particle_system_index", text="Particle System")
@@ -1045,12 +1083,12 @@ class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
def UV_WARP(self, layout, ob, md):
split = layout.split()
col = split.column()
- col.prop(md, "center");
+ col.prop(md, "center")
col = split.column()
col.label(text="UV Axis:")
- col.prop(md, "axis_u", text="");
- col.prop(md, "axis_v", text="");
+ col.prop(md, "axis_u", text="")
+ col.prop(md, "axis_v", text="")
split = layout.split()
col = split.column()
diff --git a/release/scripts/startup/bl_ui/properties_game.py b/release/scripts/startup/bl_ui/properties_game.py
index 58b6aa6916c..a896855b217 100644
--- a/release/scripts/startup/bl_ui/properties_game.py
+++ b/release/scripts/startup/bl_ui/properties_game.py
@@ -199,6 +199,7 @@ class PHYSICS_PT_game_physics(PhysicsButtonsPanel, Panel):
col = split.column()
col.prop(game, "collision_mask")
+
class PHYSICS_PT_game_collision_bounds(PhysicsButtonsPanel, Panel):
bl_label = "Collision Bounds"
COMPAT_ENGINES = {'BLENDER_GAME'}
@@ -411,10 +412,10 @@ class RENDER_PT_game_system(RenderButtonsPanel, Panel):
col = row.column()
col.prop(gs, "use_display_lists")
col.active = gs.raster_storage != 'VERTEX_BUFFER_OBJECT'
-
+
row = layout.row()
row.prop(gs, "raster_storage")
-
+
row = layout.row()
row.label("Exit Key")
row.prop(gs, "exit_key", text="", event=True)
diff --git a/release/scripts/startup/bl_ui/properties_mask_common.py b/release/scripts/startup/bl_ui/properties_mask_common.py
index 9861db39f30..3bdb6f32076 100644
--- a/release/scripts/startup/bl_ui/properties_mask_common.py
+++ b/release/scripts/startup/bl_ui/properties_mask_common.py
@@ -26,7 +26,8 @@ from bpy.types import Menu, UIList
class MASK_UL_layers(UIList):
- def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
+ def draw_item(self, context, layout, data, item, icon,
+ active_data, active_propname, index):
# assert(isinstance(item, bpy.types.MaskLayer)
mask = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
@@ -86,7 +87,8 @@ class MASK_PT_layers:
rows = 5 if active_layer else 2
row = layout.row()
- row.template_list("MASK_UL_layers", "", mask, "layers", mask, "active_layer_index", rows=rows)
+ row.template_list("MASK_UL_layers", "", mask, "layers",
+ mask, "active_layer_index", rows=rows)
sub = row.column(align=True)
diff --git a/release/scripts/startup/bl_ui/properties_object.py b/release/scripts/startup/bl_ui/properties_object.py
index 951807488e7..b9dcdc19162 100644
--- a/release/scripts/startup/bl_ui/properties_object.py
+++ b/release/scripts/startup/bl_ui/properties_object.py
@@ -230,6 +230,7 @@ class OBJECT_PT_display(ObjectButtonsPanel, Panel):
col.prop(ob, "show_x_ray", text="X-Ray")
if ob.type == 'MESH':
col.prop(ob, "show_transparent", text="Transparency")
+ col.prop(ob, "show_all_edges")
class OBJECT_PT_duplication(ObjectButtonsPanel, Panel):
diff --git a/release/scripts/startup/bl_ui/properties_object_constraint.py b/release/scripts/startup/bl_ui/properties_object_constraint.py
index eb0929895f8..6478e49f464 100644
--- a/release/scripts/startup/bl_ui/properties_object_constraint.py
+++ b/release/scripts/startup/bl_ui/properties_object_constraint.py
@@ -172,7 +172,6 @@ class ConstraintButtonsPanel():
sub.active = con.use_rotation
sub.prop(con, "orient_weight", text="Rotation", slider=True)
-
def IK_COPY_POSE(self, context, layout, con):
self.target_template(layout, con)
self.ik_template(layout, con)
diff --git a/release/scripts/startup/bl_ui/properties_paint_common.py b/release/scripts/startup/bl_ui/properties_paint_common.py
index 53cf640beb9..022f2500940 100644
--- a/release/scripts/startup/bl_ui/properties_paint_common.py
+++ b/release/scripts/startup/bl_ui/properties_paint_common.py
@@ -70,34 +70,40 @@ class UnifiedPaintPanel():
# Used in both the View3D toolbar and texture properties
-def sculpt_brush_texture_settings(layout, brush):
+def brush_texture_settings(layout, brush, sculpt):
tex_slot = brush.texture_slot
layout.label(text="Brush Mapping:")
# map_mode
- layout.row().prop(tex_slot, "map_mode", text="")
- layout.separator()
-
- # angle and texture_angle_source
- col = layout.column()
- col.active = brush.sculpt_capabilities.has_texture_angle_source
- col.label(text="Angle:")
- if brush.sculpt_capabilities.has_random_texture_angle:
- col.prop(brush, "texture_angle_source_random", text="")
+ if sculpt:
+ layout.row().prop(tex_slot, "map_mode", text="")
+ layout.separator()
else:
- col.prop(brush, "texture_angle_source_no_random", text="")
-
+ layout.row().prop(tex_slot, "tex_paint_map_mode", text="")
+ layout.separator()
+
+ # angle and texture_angle_source
col = layout.column()
- col.active = brush.sculpt_capabilities.has_texture_angle
- col.prop(tex_slot, "angle", text="")
+ if sculpt:
+ col.active = brush.sculpt_capabilities.has_texture_angle_source
+ col.label(text="Angle:")
+ if brush.sculpt_capabilities.has_random_texture_angle:
+ col.prop(brush, "texture_angle_source_random", text="")
+ else:
+ col.prop(brush, "texture_angle_source_no_random", text="")
+
+ col = layout.column()
+ col.active = brush.sculpt_capabilities.has_texture_angle
+ col.prop(tex_slot, "angle", text="")
# scale and offset
split = layout.split()
split.prop(tex_slot, "offset")
split.prop(tex_slot, "scale")
-
- # texture_sample_bias
- col = layout.column(align=True)
- col.label(text="Sample Bias:")
- col.prop(brush, "texture_sample_bias", slider=True, text="")
+
+ if sculpt:
+ # texture_sample_bias
+ col = layout.column(align=True)
+ col.label(text="Sample Bias:")
+ col.prop(brush, "texture_sample_bias", slider=True, text="")
diff --git a/release/scripts/startup/bl_ui/properties_physics_common.py b/release/scripts/startup/bl_ui/properties_physics_common.py
index b70ff322765..17a44a523dc 100644
--- a/release/scripts/startup/bl_ui/properties_physics_common.py
+++ b/release/scripts/startup/bl_ui/properties_physics_common.py
@@ -44,6 +44,12 @@ def physics_add(self, layout, md, name, type, typeicon, toggles):
else:
sub.operator("object.modifier_add", text=name, icon=typeicon).type = type
+def physics_add_special(self, layout, data, name, addop, removeop, typeicon):
+ sub = layout.row(align=True)
+ if data:
+ sub.operator(removeop, text=name, icon='X')
+ else:
+ sub.operator(addop, text=name, icon=typeicon)
class PHYSICS_PT_add(PhysicButtonsPanel, Panel):
bl_label = ""
@@ -76,6 +82,18 @@ class PHYSICS_PT_add(PhysicButtonsPanel, Panel):
physics_add(self, col, context.fluid, "Fluid", 'FLUID_SIMULATION', 'MOD_FLUIDSIM', True)
physics_add(self, col, context.smoke, "Smoke", 'SMOKE', 'MOD_SMOKE', True)
+ if(ob.type == 'MESH'):
+ physics_add_special(self, col, ob.rigid_body, "Rigid Body",
+ "rigidbody.object_add",
+ "rigidbody.object_remove",
+ 'MESH_ICOSPHERE') # XXX: need dedicated icon
+
+ # all types of objects can have rigid body constraint
+ physics_add_special(self, col, ob.rigid_body_constraint, "Rigid Body Constraint",
+ "rigidbody.constraint_add",
+ "rigidbody.constraint_remove",
+ 'CONSTRAINT') # RB_TODO needs better icon
+
# cache-type can be 'PSYS' 'HAIR' 'SMOKE' etc
@@ -84,11 +102,12 @@ def point_cache_ui(self, context, cache, enabled, cachetype):
layout.context_pointer_set("point_cache", cache)
- row = layout.row()
- row.template_list("UI_UL_list", "", cache, "point_caches", cache.point_caches, "active_index", rows=2)
- col = row.column(align=True)
- col.operator("ptcache.add", icon='ZOOMIN', text="")
- col.operator("ptcache.remove", icon='ZOOMOUT', text="")
+ if not cachetype == 'RIGID_BODY':
+ row = layout.row()
+ row.template_list("UI_UL_list", "", cache, "point_caches", cache.point_caches, "active_index", rows=2)
+ col = row.column(align=True)
+ col.operator("ptcache.add", icon='ZOOMIN', text="")
+ col.operator("ptcache.remove", icon='ZOOMOUT', text="")
row = layout.row()
if cachetype in {'PSYS', 'HAIR', 'SMOKE'}:
@@ -131,13 +150,13 @@ def point_cache_ui(self, context, cache, enabled, cachetype):
row.enabled = enabled
row.prop(cache, "frame_start")
row.prop(cache, "frame_end")
- if cachetype not in {'SMOKE', 'CLOTH', 'DYNAMIC_PAINT'}:
+ if cachetype not in {'SMOKE', 'CLOTH', 'DYNAMIC_PAINT', 'RIGID_BODY'}:
row.prop(cache, "frame_step")
-
+
if cachetype != 'SMOKE':
layout.label(text=cache.info)
- if cachetype not in {'SMOKE', 'DYNAMIC_PAINT'}:
+ if cachetype not in {'SMOKE', 'DYNAMIC_PAINT', 'RIGID_BODY'}:
split = layout.split()
split.enabled = enabled and bpy.data.is_saved
diff --git a/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py b/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py
index 9393852b8a5..efc675ffe83 100644
--- a/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py
+++ b/release/scripts/startup/bl_ui/properties_physics_dynamicpaint.py
@@ -80,7 +80,7 @@ class PHYSICS_PT_dynamic_paint(PhysicButtonsPanel, Panel):
row = layout.row()
row.template_list("PHYSICS_UL_dynapaint_surfaces", "", canvas, "canvas_surfaces",
- canvas.canvas_surfaces, "active_index", rows=2)
+ canvas.canvas_surfaces, "active_index", rows=2)
col = row.column(align=True)
col.operator("dpaint.surface_slot_add", icon='ZOOMIN', text="")
diff --git a/release/scripts/startup/bl_ui/properties_physics_rigidbody.py b/release/scripts/startup/bl_ui/properties_physics_rigidbody.py
new file mode 100644
index 00000000000..f335c560f7b
--- /dev/null
+++ b/release/scripts/startup/bl_ui/properties_physics_rigidbody.py
@@ -0,0 +1,133 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+import bpy
+from bpy.types import Panel
+
+
+class PHYSICS_PT_rigidbody_panel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "physics"
+
+
+class PHYSICS_PT_rigid_body(PHYSICS_PT_rigidbody_panel, Panel):
+ bl_label = "Rigid Body"
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.object
+ rd = context.scene.render
+ return (ob and ob.rigid_body and (not rd.use_game_engine))
+
+ def draw_header(self, context):
+ obj = context.object
+ rbo = obj.rigid_body
+ if rbo is not None:
+ self.layout.prop(rbo, "enabled", text="");
+
+ def draw(self, context):
+ layout = self.layout
+
+ ob = context.object
+ rbo = ob.rigid_body
+
+ if rbo is not None:
+ layout.prop(rbo, "type", text="Type")
+ layout.prop(rbo, "kinematic", text="Animated")
+
+ if rbo.type == 'ACTIVE':
+ layout.prop(rbo, "mass")
+
+
+class PHYSICS_PT_rigid_body_collisions(PHYSICS_PT_rigidbody_panel, Panel):
+ bl_label = "Rigid Body Collisions"
+
+ @classmethod
+ def poll(cls, context):
+ return (context.object and context.object.rigid_body and
+ (not context.scene.render.use_game_engine))
+
+ def draw(self, context):
+ layout = self.layout
+
+ ob = context.object
+ rbo = ob.rigid_body
+
+ layout.prop(rbo, "collision_shape", text="Shape")
+
+ split = layout.split()
+
+ col = split.column()
+ col.label(text="Surface Response:")
+ col.prop(rbo, "friction")
+ col.prop(rbo, "restitution", text="Bounciness")
+
+ col = split.column()
+ col.label(text="Sensitivity:")
+ if rbo.collision_shape in {'MESH', 'CONE'}:
+ col.prop(rbo, "collision_margin", text="Margin")
+ else:
+ col.prop(rbo, "use_margin");
+ sub = col.column()
+ sub.active = rbo.use_margin
+ sub.prop(rbo, "collision_margin", text="Margin")
+
+ layout.prop(rbo, "collision_groups")
+
+
+class PHYSICS_PT_rigid_body_dynamics(PHYSICS_PT_rigidbody_panel, Panel):
+ bl_label = "Rigid Body Dynamics"
+ bl_default_closed = True
+
+ @classmethod
+ def poll(cls, context):
+ return (context.object and context.object.rigid_body and
+ context.object.rigid_body.type == 'ACTIVE' and
+ (not context.scene.render.use_game_engine))
+
+ def draw(self, context):
+ layout = self.layout
+
+ ob = context.object
+ rbo = ob.rigid_body
+
+ #col = layout.column(align=1)
+ #col.label(text="Activation:")
+ # XXX: settings such as activate on collison/etc.
+
+ split = layout.split();
+
+ col = split.column()
+ col.label(text="Deactivation:")
+ col.prop(rbo, "use_deactivation")
+ sub = col.column()
+ sub.active = rbo.use_deactivation
+ sub.prop(rbo, "start_deactivated")
+ sub.prop(rbo, "deactivate_linear_velocity", text="Linear Vel")
+ sub.prop(rbo, "deactivate_angular_velocity", text="Angular Vel")
+ # TODO: other params such as time?
+
+ col = split.column()
+ col.label(text="Damping:")
+ col.prop(rbo, "linear_damping", text="Translation")
+ col.prop(rbo, "angular_damping", text="Rotation")
+
+if __name__ == "__main__": # only for live edit.
+ bpy.utils.register_module(__name__)
diff --git a/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py b/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py
new file mode 100644
index 00000000000..49b760ef546
--- /dev/null
+++ b/release/scripts/startup/bl_ui/properties_physics_rigidbody_constraint.py
@@ -0,0 +1,208 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+import bpy
+from bpy.types import Panel
+
+
+class PHYSICS_PT_rigidbody_constraint_panel():
+ bl_space_type = 'PROPERTIES'
+ bl_region_type = 'WINDOW'
+ bl_context = "physics"
+
+
+class PHYSICS_PT_rigid_body_constraint(PHYSICS_PT_rigidbody_constraint_panel, Panel):
+ bl_label = "Rigid Body Constraint"
+
+ @classmethod
+ def poll(cls, context):
+ ob = context.object
+ rd = context.scene.render
+ return (ob and ob.rigid_body_constraint and (not rd.use_game_engine))
+
+ def draw(self, context):
+ layout = self.layout
+
+ ob = context.object
+ rbc = ob.rigid_body_constraint
+
+ layout.prop(rbc, "type")
+
+ row = layout.row()
+ row.prop(rbc, "enabled")
+ row.prop(rbc, "disable_collisions")
+
+ layout.prop(rbc, "object1")
+ layout.prop(rbc, "object2")
+
+ row = layout.row()
+ row.prop(rbc, "use_breaking")
+ sub = row.row()
+ sub.active = rbc.use_breaking
+ sub.prop(rbc, "breaking_threshold", text="Threshold")
+
+ row = layout.row()
+ row.prop(rbc, "override_solver_iterations", text="Override Iterations")
+ sub = row.row()
+ sub.active = rbc.override_solver_iterations
+ sub.prop(rbc, "num_solver_iterations", text="Iterations")
+
+ if rbc.type == 'HINGE':
+ col = layout.column(align=True)
+ col.label("Limits:")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_ang_z", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_ang_z
+ sub.prop(rbc, "limit_ang_z_lower", text="Lower")
+ sub.prop(rbc, "limit_ang_z_upper", text="Upper")
+
+ elif rbc.type == 'SLIDER':
+ col = layout.column(align=True)
+ col.label("Limits:")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_lin_x", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_lin_x
+ sub.prop(rbc, "limit_lin_x_lower", text="Lower")
+ sub.prop(rbc, "limit_lin_x_upper", text="Upper")
+
+ elif rbc.type == 'PISTON':
+ col = layout.column(align=True)
+ col.label("Limits:")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_lin_x", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_lin_x
+ sub.prop(rbc, "limit_lin_x_lower", text="Lower")
+ sub.prop(rbc, "limit_lin_x_upper", text="Upper")
+
+ col = layout.column(align=True)
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_ang_x", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_ang_x
+ sub.prop(rbc, "limit_ang_x_lower", text="Lower")
+ sub.prop(rbc, "limit_ang_x_upper", text="Upper")
+
+ elif rbc.type in {'GENERIC', 'GENERIC_SPRING'}:
+ col = layout.column(align=True)
+ col.label("Limits:")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_lin_x", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_lin_x
+ sub.prop(rbc, "limit_lin_x_lower", text="Lower")
+ sub.prop(rbc, "limit_lin_x_upper", text="Upper")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_lin_y", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_lin_y
+ sub.prop(rbc, "limit_lin_y_lower", text="Lower")
+ sub.prop(rbc, "limit_lin_y_upper", text="Upper")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_lin_z", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_lin_z
+ sub.prop(rbc, "limit_lin_z_lower", text="Lower")
+ sub.prop(rbc, "limit_lin_z_upper", text="Upper")
+
+ col = layout.column(align=True)
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_ang_x", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_ang_x
+ sub.prop(rbc, "limit_ang_x_lower", text="Lower")
+ sub.prop(rbc, "limit_ang_x_upper", text="Upper")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_ang_y", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_ang_y
+ sub.prop(rbc, "limit_ang_y_lower", text="Lower")
+ sub.prop(rbc, "limit_ang_y_upper", text="Upper")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.5
+ sub.prop(rbc, "use_limit_ang_z", toggle=True)
+ sub = row.row()
+ sub.active = rbc.use_limit_ang_z
+ sub.prop(rbc, "limit_ang_z_lower", text="Lower")
+ sub.prop(rbc, "limit_ang_z_upper", text="Upper")
+
+ if rbc.type == 'GENERIC_SPRING':
+ col = layout.column(align=True)
+ col.label("Springs:")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.1
+ sub.prop(rbc, "use_spring_x", toggle=True, text="X")
+ sub = row.row()
+ sub.active = rbc.use_spring_x
+ sub.prop(rbc, "spring_stiffness_x", text="Stiffness")
+ sub.prop(rbc, "spring_damping_x")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.1
+ sub.prop(rbc, "use_spring_y", toggle=True, text="Y")
+ sub = row.row()
+ sub.active = rbc.use_spring_y
+ sub.prop(rbc, "spring_stiffness_y", text="Stiffness")
+ sub.prop(rbc, "spring_damping_y")
+
+ row = col.row()
+ sub = row.row()
+ sub.scale_x = 0.1
+ sub.prop(rbc, "use_spring_z", toggle=True, text="Z")
+ sub = row.row()
+ sub.active = rbc.use_spring_z
+ sub.prop(rbc, "spring_stiffness_z", text="Stiffness")
+ sub.prop(rbc, "spring_damping_z")
+
+if __name__ == "__main__": # only for live edit.
+ bpy.utils.register_module(__name__)
diff --git a/release/scripts/startup/bl_ui/properties_physics_smoke.py b/release/scripts/startup/bl_ui/properties_physics_smoke.py
index ce5053f0ecf..487f43aa973 100644
--- a/release/scripts/startup/bl_ui/properties_physics_smoke.py
+++ b/release/scripts/startup/bl_ui/properties_physics_smoke.py
@@ -76,7 +76,7 @@ class PHYSICS_PT_smoke(PhysicButtonsPanel, Panel):
elif md.smoke_type == 'FLOW':
flow = md.flow_settings
-
+
layout.prop(flow, "smoke_flow_type", expand=False)
if flow.smoke_flow_type != "OUTFLOW":
@@ -118,7 +118,8 @@ class PHYSICS_PT_smoke(PhysicButtonsPanel, Panel):
col = split.column()
col.prop(coll, "collision_type")
-
+
+
class PHYSICS_PT_smoke_flow_advanced(PhysicButtonsPanel, Panel):
bl_label = "Smoke Flow Advanced"
bl_options = {'DEFAULT_CLOSED'}
@@ -132,7 +133,7 @@ class PHYSICS_PT_smoke_flow_advanced(PhysicButtonsPanel, Panel):
layout = self.layout
ob = context.object
flow = context.smoke.flow_settings
-
+
split = layout.split()
col = split.column()
@@ -147,11 +148,12 @@ class PHYSICS_PT_smoke_flow_advanced(PhysicButtonsPanel, Panel):
if flow.texture_map_type == "AUTO":
sub.prop(flow, "texture_size")
sub.prop(flow, "texture_offset")
-
+
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(flow, "density_vertex_group", ob, "vertex_groups", text="")
+
class PHYSICS_PT_smoke_fire(PhysicButtonsPanel, Panel):
bl_label = "Smoke Flames"
bl_options = {'DEFAULT_CLOSED'}
@@ -179,7 +181,8 @@ class PHYSICS_PT_smoke_fire(PhysicButtonsPanel, Panel):
col.prop(domain, "flame_ignition")
col.prop(domain, "flame_max_temp")
col.prop(domain, "flame_smoke_color")
-
+
+
class PHYSICS_PT_smoke_adaptive_domain(PhysicButtonsPanel, Panel):
bl_label = "Smoke Adaptive Domain"
bl_options = {'DEFAULT_CLOSED'}
@@ -199,10 +202,10 @@ class PHYSICS_PT_smoke_adaptive_domain(PhysicButtonsPanel, Panel):
domain = context.smoke.domain_settings
layout.active = domain.use_adaptive_domain
-
+
split = layout.split()
- split.enabled = not domain.point_cache.is_baked
-
+ split.enabled = (not domain.point_cache.is_baked)
+
col = split.column(align=True)
col.label(text="Resolution:")
col.prop(domain, "additional_res")
@@ -212,6 +215,7 @@ class PHYSICS_PT_smoke_adaptive_domain(PhysicButtonsPanel, Panel):
col.label(text="Advanced:")
col.prop(domain, "adapt_threshold")
+
class PHYSICS_PT_smoke_highres(PhysicButtonsPanel, Panel):
bl_label = "Smoke High Resolution"
bl_options = {'DEFAULT_CLOSED'}
@@ -249,6 +253,7 @@ class PHYSICS_PT_smoke_highres(PhysicButtonsPanel, Panel):
layout.prop(md, "show_high_resolution")
+
class PHYSICS_PT_smoke_groups(PhysicButtonsPanel, Panel):
bl_label = "Smoke Groups"
bl_options = {'DEFAULT_CLOSED'}
@@ -262,7 +267,7 @@ class PHYSICS_PT_smoke_groups(PhysicButtonsPanel, Panel):
def draw(self, context):
layout = self.layout
domain = context.smoke.domain_settings
-
+
split = layout.split()
col = split.column()
@@ -276,6 +281,7 @@ class PHYSICS_PT_smoke_groups(PhysicButtonsPanel, Panel):
col.label(text="Collision Group:")
col.prop(domain, "collision_group", text="")
+
class PHYSICS_PT_smoke_cache(PhysicButtonsPanel, Panel):
bl_label = "Smoke Cache"
bl_options = {'DEFAULT_CLOSED'}
diff --git a/release/scripts/startup/bl_ui/properties_render.py b/release/scripts/startup/bl_ui/properties_render.py
index 77bb2d3d50e..cb5a473dba1 100644
--- a/release/scripts/startup/bl_ui/properties_render.py
+++ b/release/scripts/startup/bl_ui/properties_render.py
@@ -43,6 +43,7 @@ class RENDER_MT_framerate_presets(Menu):
draw = Menu.draw_preset
+
class RenderButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
@@ -245,7 +246,7 @@ class RENDER_PT_performance(RenderButtonsPanel, Panel):
subsub = sub.column()
subsub.enabled = rd.threads_mode == 'FIXED'
subsub.prop(rd, "threads")
-
+
sub = col.column(align=True)
sub.label(text="Tile Size:")
sub.prop(rd, "tile_x", text="X")
diff --git a/release/scripts/startup/bl_ui/properties_scene.py b/release/scripts/startup/bl_ui/properties_scene.py
index 66a16daa22f..e0f4fd1f75b 100644
--- a/release/scripts/startup/bl_ui/properties_scene.py
+++ b/release/scripts/startup/bl_ui/properties_scene.py
@@ -21,6 +21,10 @@ import bpy
from bpy.types import Panel, UIList
from rna_prop_ui import PropertyPanel
+from bl_ui.properties_physics_common import (
+ point_cache_ui,
+ effector_weights_ui,
+ )
class SCENE_UL_keying_set_paths(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
@@ -59,36 +63,6 @@ class SCENE_PT_scene(SceneButtonsPanel, Panel):
layout.prop(scene, "active_clip", text="Active Clip")
-class SCENE_PT_audio(SceneButtonsPanel, Panel):
- bl_label = "Audio"
- COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
-
- def draw(self, context):
- layout = self.layout
-
- scene = context.scene
- rd = context.scene.render
- ffmpeg = rd.ffmpeg
-
- layout.prop(scene, "audio_volume")
- layout.operator("sound.bake_animation")
-
- split = layout.split()
-
- col = split.column()
- col.label("Listener:")
- col.prop(scene, "audio_distance_model", text="")
- col.prop(scene, "audio_doppler_speed", text="Speed")
- col.prop(scene, "audio_doppler_factor", text="Doppler")
-
- col = split.column()
- col.label("Format:")
- col.prop(ffmpeg, "audio_channels", text="")
- col.prop(ffmpeg, "audio_mixrate", text="Rate")
-
- layout.operator("sound.mixdown")
-
-
class SCENE_PT_unit(SceneButtonsPanel, Panel):
bl_label = "Units"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@@ -198,6 +172,63 @@ class SCENE_PT_keying_set_paths(SceneButtonsPanel, Panel):
col.prop(ksp, "bl_options")
+class SCENE_PT_color_management(SceneButtonsPanel, Panel):
+ bl_label = "Color Management"
+ bl_options = {'DEFAULT_CLOSED'}
+ COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = scene.render
+
+ col = layout.column()
+ col.label(text="Display:")
+ col.prop(scene.display_settings, "display_device")
+
+ col = layout.column()
+ col.separator()
+ col.label(text="Render:")
+ col.template_colormanaged_view_settings(scene, "view_settings")
+
+ col = layout.column()
+ col.separator()
+ col.label(text="Sequencer:")
+ col.prop(scene.sequencer_colorspace_settings, "name")
+
+
+class SCENE_PT_audio(SceneButtonsPanel, Panel):
+ bl_label = "Audio"
+ bl_options = {'DEFAULT_CLOSED'}
+ COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+ rd = context.scene.render
+ ffmpeg = rd.ffmpeg
+
+ layout.prop(scene, "audio_volume")
+ layout.operator("sound.bake_animation")
+
+ split = layout.split()
+
+ col = split.column()
+ col.label("Listener:")
+ col.prop(scene, "audio_distance_model", text="")
+ col.prop(scene, "audio_doppler_speed", text="Speed")
+ col.prop(scene, "audio_doppler_factor", text="Doppler")
+
+ col = split.column()
+ col.label("Format:")
+ col.prop(ffmpeg, "audio_channels", text="")
+ col.prop(ffmpeg, "audio_mixrate", text="Rate")
+
+ layout.operator("sound.mixdown")
+
+
class SCENE_PT_physics(SceneButtonsPanel, Panel):
bl_label = "Gravity"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@@ -215,6 +246,88 @@ class SCENE_PT_physics(SceneButtonsPanel, Panel):
layout.prop(scene, "gravity", text="")
+class SCENE_PT_rigid_body_world(SceneButtonsPanel, Panel):
+ bl_label = "Rigid Body World"
+ COMPAT_ENGINES = {'BLENDER_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ scene = context.scene
+ rd = scene.render
+ return scene and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw_header(self, context):
+ scene = context.scene
+ rbw = scene.rigidbody_world
+ if rbw is not None:
+ self.layout.prop(rbw, "enabled", text="")
+
+ def draw(self, context):
+ layout = self.layout
+
+ scene = context.scene
+
+ rbw = scene.rigidbody_world
+
+ if rbw is None:
+ layout.operator("rigidbody.world_add")
+ else:
+ layout.operator("rigidbody.world_remove")
+
+ col = layout.column()
+ col.active = rbw.enabled
+
+ col = col.column()
+ col.prop(rbw, "group")
+ col.prop(rbw, "constraints")
+
+ split = col.split()
+
+ col = split.column()
+ col.prop(rbw, "time_scale", text="Speed")
+ col.prop(rbw, "use_split_impulse")
+
+ col = split.column()
+ col.prop(rbw, "steps_per_second", text="Steps Per Second")
+ col.prop(rbw, "num_solver_iterations", text="Solver Iterations")
+
+
+class SCENE_PT_rigid_body_cache(SceneButtonsPanel, Panel):
+ bl_label = "Rigid Body Cache"
+ bl_options = {'DEFAULT_CLOSED'}
+ COMPAT_ENGINES = {'BLENDER_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ scene = context.scene
+ return scene and scene.rigidbody_world and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ scene = context.scene
+ rbw = scene.rigidbody_world
+
+ point_cache_ui(self, context, rbw.point_cache, rbw.point_cache.is_baked is False and rbw.enabled, 'RIGID_BODY')
+
+
+class SCENE_PT_rigid_body_field_weights(SceneButtonsPanel, Panel):
+ bl_label = "Rigid Body Field Weights"
+ bl_options = {'DEFAULT_CLOSED'}
+ COMPAT_ENGINES = {'BLENDER_RENDER'}
+
+ @classmethod
+ def poll(cls, context):
+ rd = context.scene.render
+ scene = context.scene
+ return scene and scene.rigidbody_world and (rd.engine in cls.COMPAT_ENGINES)
+
+ def draw(self, context):
+ scene = context.scene
+ rbw = scene.rigidbody_world
+
+ effector_weights_ui(self, context, rbw.effector_weights, 'RIGID_BODY')
+
+
class SCENE_PT_simplify(SceneButtonsPanel, Panel):
bl_label = "Simplify"
COMPAT_ENGINES = {'BLENDER_RENDER'}
@@ -243,33 +356,6 @@ class SCENE_PT_simplify(SceneButtonsPanel, Panel):
col.prop(rd, "simplify_ao_sss", text="AO and SSS")
-class SCENE_PT_color_management(Panel):
- bl_label = "Color Management"
- bl_space_type = 'PROPERTIES'
- bl_region_type = 'WINDOW'
- bl_context = "scene"
-
- def draw(self, context):
- layout = self.layout
-
- scene = context.scene
- rd = scene.render
-
- col = layout.column()
- col.label(text="Display:")
- col.prop(scene.display_settings, "display_device")
-
- col = layout.column()
- col.separator()
- col.label(text="Render:")
- col.template_colormanaged_view_settings(scene, "view_settings")
-
- col = layout.column()
- col.separator()
- col.label(text="Sequencer:")
- col.prop(scene.sequencer_colorspace_settings, "name")
-
-
class SCENE_PT_custom_props(SceneButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "scene"
diff --git a/release/scripts/startup/bl_ui/properties_texture.py b/release/scripts/startup/bl_ui/properties_texture.py
index 6842b324b0e..9e8fd0470cf 100644
--- a/release/scripts/startup/bl_ui/properties_texture.py
+++ b/release/scripts/startup/bl_ui/properties_texture.py
@@ -30,7 +30,7 @@ from bpy.types import (Brush,
from rna_prop_ui import PropertyPanel
-from bl_ui.properties_paint_common import sculpt_brush_texture_settings
+from bl_ui.properties_paint_common import brush_texture_settings
class TEXTURE_MT_specials(Menu):
@@ -432,6 +432,12 @@ class TEXTURE_PT_image_sampling(TextureTypePanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
+ if context.scene.render.engine == 'BLENDER_GAME':
+ self.draw_bge(context)
+ else:
+ self.draw_bi(context)
+
+ def draw_bi(self, context):
layout = self.layout
idblock = context_tex_datablock(context)
@@ -468,6 +474,33 @@ class TEXTURE_PT_image_sampling(TextureTypePanel, Panel):
texture_filter_common(tex, col)
+ def draw_bge(self, context):
+ layout = self.layout
+
+ idblock = context_tex_datablock(context)
+ tex = context.texture
+ slot = getattr(context, "texture_slot", None)
+
+ split = layout.split()
+
+ col = split.column()
+ col.label(text="Alpha:")
+ col.prop(tex, "use_calculate_alpha", text="Calculate")
+ col.prop(tex, "invert_alpha", text="Invert")
+
+ col = split.column()
+
+ #Only for Material based textures, not for Lamp/World...
+ if slot and isinstance(idblock, Material):
+ col.prop(tex, "use_normal_map")
+ row = col.row()
+ row.active = tex.use_normal_map
+ row.prop(slot, "normal_map_space", text="")
+
+ row = col.row()
+ row.active = not tex.use_normal_map
+ row.prop(tex, "use_derivative_map")
+
class TEXTURE_PT_image_mapping(TextureTypePanel, Panel):
bl_label = "Image Mapping"
@@ -884,8 +917,8 @@ class TEXTURE_PT_mapping(TextureSlotPanel, Panel):
split.prop(tex, "object", text="")
if isinstance(idblock, Brush):
- if context.sculpt_object:
- sculpt_brush_texture_settings(layout, idblock)
+ if context.sculpt_object or context.image_paint_object:
+ brush_texture_settings(layout, idblock, context.sculpt_object)
else:
if isinstance(idblock, Material):
split = layout.split(percentage=0.3)
diff --git a/release/scripts/startup/bl_ui/space_clip.py b/release/scripts/startup/bl_ui/space_clip.py
index 5fc57133767..db71bbe2390 100644
--- a/release/scripts/startup/bl_ui/space_clip.py
+++ b/release/scripts/startup/bl_ui/space_clip.py
@@ -23,14 +23,17 @@ from bpy.types import Panel, Header, Menu, UIList
class CLIP_UL_tracking_objects(UIList):
- def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
+ def draw_item(self, context, layout, data, item, icon,
+ active_data, active_propname, index):
# assert(isinstance(item, bpy.types.MovieTrackingObject)
tobj = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
- layout.label(tobj.name, icon='CAMERA_DATA' if tobj.is_camera else 'OBJECT_DATA')
+ layout.label(tobj.name, icon='CAMERA_DATA'
+ if tobj.is_camera else 'OBJECT_DATA')
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
- layout.label("", icon='CAMERA_DATA' if tobj.is_camera else 'OBJECT_DATA')
+ layout.label("", icon='CAMERA_DATA'
+ if tobj.is_camera else 'OBJECT_DATA')
class CLIP_HT_header(Header):
@@ -333,7 +336,8 @@ class CLIP_PT_tools_solve(CLIP_PT_tracking_panel, Panel):
col = layout.column(align=True)
col.active = not settings.use_tripod_solver
- col.prop(settings, "use_fallback_reconstruction", text="Allow Fallback")
+ col.prop(settings, "use_fallback_reconstruction",
+ text="Allow Fallback")
sub = col.column()
sub.active = settings.use_fallback_reconstruction
sub.prop(settings, "reconstruction_success_threshold")
@@ -482,7 +486,8 @@ class CLIP_PT_objects(CLIP_PT_clip_view_panel, Panel):
tracking = sc.clip.tracking
row = layout.row()
- row.template_list("CLIP_UL_tracking_objects", "", tracking, "objects", tracking, "active_object_index", rows=3)
+ row.template_list("CLIP_UL_tracking_objects", "", tracking, "objects",
+ tracking, "active_object_index", rows=3)
sub = row.column(align=True)
@@ -738,7 +743,8 @@ class CLIP_PT_stabilization(CLIP_PT_reconstruction_panel, Panel):
layout.active = stab.use_2d_stabilization
row = layout.row()
- row.template_list("UI_UL_list", "", stab, "tracks", stab, "active_track_index", rows=3)
+ row.template_list("UI_UL_list", "", stab, "tracks",
+ stab, "active_track_index", rows=3)
sub = row.column(align=True)
diff --git a/release/scripts/startup/bl_ui/space_dopesheet.py b/release/scripts/startup/bl_ui/space_dopesheet.py
index 5535070c1c4..094fa4a7c6d 100644
--- a/release/scripts/startup/bl_ui/space_dopesheet.py
+++ b/release/scripts/startup/bl_ui/space_dopesheet.py
@@ -275,8 +275,8 @@ class DOPESHEET_MT_key(Menu):
layout.operator("action.keyframe_insert")
layout.separator()
- layout.operator("action.frame_jump")
-
+ layout.operator("action.frame_jump")
+
layout.separator()
layout.operator("action.duplicate_move")
layout.operator("action.delete")
diff --git a/release/scripts/startup/bl_ui/space_image.py b/release/scripts/startup/bl_ui/space_image.py
index 1ea20d96386..cfedc5e1e00 100644
--- a/release/scripts/startup/bl_ui/space_image.py
+++ b/release/scripts/startup/bl_ui/space_image.py
@@ -20,7 +20,7 @@
import bpy
from bpy.types import Header, Menu, Panel
from bl_ui.properties_paint_common import UnifiedPaintPanel
-
+from bl_ui.properties_paint_common import brush_texture_settings
class ImagePaintPanel(UnifiedPaintPanel):
bl_space_type = 'IMAGE_EDITOR'
@@ -722,7 +722,8 @@ class IMAGE_PT_tools_brush_texture(BrushButtonsPanel, Panel):
col = layout.column()
col.template_ID_preview(brush, "texture", new="texture.new", rows=3, cols=8)
- col.prop(brush, "use_fixed_texture")
+
+ brush_texture_settings(col, brush, 0)
class IMAGE_PT_tools_brush_tool(BrushButtonsPanel, Panel):
diff --git a/release/scripts/startup/bl_ui/space_info.py b/release/scripts/startup/bl_ui/space_info.py
index 8df117e27a0..c0d1f725ab6 100644
--- a/release/scripts/startup/bl_ui/space_info.py
+++ b/release/scripts/startup/bl_ui/space_info.py
@@ -112,7 +112,7 @@ class INFO_MT_file(Menu):
layout.separator()
- layout.operator_context = 'EXEC_AREA' if context.blend_data.is_saved else 'INVOKE_AREA'
+ layout.operator_context = 'EXEC_AREA' if context.blend_data.is_saved else 'INVOKE_AREA'
layout.operator("wm.save_mainfile", text="Save", icon='FILE_TICK')
layout.operator_context = 'INVOKE_AREA'
@@ -124,8 +124,9 @@ class INFO_MT_file(Menu):
layout.operator("screen.userpref_show", text="User Preferences...", icon='PREFERENCES')
- layout.operator_context = 'EXEC_AREA'
+ layout.operator_context = 'INVOKE_AREA'
layout.operator("wm.save_homefile", icon='SAVE_PREFS')
+ layout.operator_context = 'EXEC_AREA'
layout.operator("wm.read_factory_settings", icon='LOAD_FACTORY')
layout.separator()
@@ -373,7 +374,7 @@ class INFO_MT_help(Menu):
layout = self.layout
layout.operator("wm.url_open", text="Manual", icon='HELP').url = "http://wiki.blender.org/index.php/Doc:2.6/Manual"
- layout.operator("wm.url_open", text="Release Log", icon='URL').url = "http://www.blender.org/development/release-logs/blender-265"
+ layout.operator("wm.url_open", text="Release Log", icon='URL').url = "http://www.blender.org/development/release-logs/blender-266"
layout.separator()
layout.operator("wm.url_open", text="Blender Website", icon='URL').url = "http://www.blender.org"
diff --git a/release/scripts/startup/bl_ui/space_text.py b/release/scripts/startup/bl_ui/space_text.py
index 960a945f1c6..65ec945c7da 100644
--- a/release/scripts/startup/bl_ui/space_text.py
+++ b/release/scripts/startup/bl_ui/space_text.py
@@ -152,7 +152,7 @@ class TEXT_MT_view(Menu):
layout = self.layout
layout.operator("text.properties", icon='MENU_PANEL')
-
+
layout.separator()
layout.operator("text.move",
diff --git a/release/scripts/startup/bl_ui/space_time.py b/release/scripts/startup/bl_ui/space_time.py
index cb9e2444793..6af9f377237 100644
--- a/release/scripts/startup/bl_ui/space_time.py
+++ b/release/scripts/startup/bl_ui/space_time.py
@@ -152,6 +152,7 @@ class TIME_MT_cache(Menu):
col.prop(st, "cache_cloth")
col.prop(st, "cache_smoke")
col.prop(st, "cache_dynamicpaint")
+ col.prop(st, "cache_rigidbody")
class TIME_MT_frame(Menu):
diff --git a/release/scripts/startup/bl_ui/space_userpref.py b/release/scripts/startup/bl_ui/space_userpref.py
index a9712b1557e..dad729077e0 100644
--- a/release/scripts/startup/bl_ui/space_userpref.py
+++ b/release/scripts/startup/bl_ui/space_userpref.py
@@ -24,25 +24,25 @@ import os
def ui_style_items(col, context):
""" UI Style settings """
-
+
split = col.split()
-
+
col = split.column()
col.label(text="Kerning Style:")
col.row().prop(context, "font_kerning_style", expand=True)
col.prop(context, "points")
-
+
col = split.column()
col.label(text="Shadow Offset:")
col.prop(context, "shadow_offset_x", text="X")
col.prop(context, "shadow_offset_y", text="Y")
-
+
col = split.column()
col.prop(context, "shadow")
col.prop(context, "shadowalpha")
col.prop(context, "shadowcolor")
-
+
def ui_items_general(col, context):
""" General UI Theme Settings (User Interface)
"""
@@ -200,7 +200,7 @@ class USERPREF_PT_interface(Panel):
col.prop(view, "show_playback_fps", text="Playback FPS")
col.prop(view, "use_global_scene")
col.prop(view, "object_origin_size")
-
+
col.separator()
col.separator()
col.separator()
@@ -763,13 +763,13 @@ class USERPREF_PT_theme(Panel):
colsub.row().prop(ui, "axis_x")
colsub.row().prop(ui, "axis_y")
colsub.row().prop(ui, "axis_z")
-
+
subsplit = row.split(percentage=0.85)
padding = subsplit.split(percentage=0.15)
colsub = padding.column()
colsub = padding.column()
-
+
layout.separator()
layout.separator()
elif theme.theme_area == 'BONE_COLOR_SETS':
@@ -797,16 +797,23 @@ class USERPREF_PT_theme(Panel):
colsub.row().prop(ui, "show_colored_constraints")
elif theme.theme_area == 'STYLE':
col = split.column()
-
+
style = context.user_preferences.ui_styles[0]
-
+
+ ui = style.panel_title
+ col.label(text="Panel Title:")
+ ui_style_items(col, ui)
+
+ col.separator()
+ col.separator()
+
ui = style.widget
col.label(text="Widget:")
ui_style_items(col, ui)
-
+
col.separator()
col.separator()
-
+
ui = style.widget_label
col.label(text="Widget Label:")
ui_style_items(col, ui)
@@ -1100,8 +1107,8 @@ class USERPREF_PT_addons(Panel):
used_ext = {ext.module for ext in userpref.addons}
userpref_addons_folder = os.path.join(userpref.filepaths.script_directory, "addons")
- scripts_addons_folder = bpy.utils.user_resource('SCRIPTS', "addons")
-
+ scripts_addons_folder = bpy.utils.user_resource('SCRIPTS', "addons")
+
# collect the categories that can be filtered on
addons = [(mod, addon_utils.module_bl_info(mod)) for mod in addon_utils.modules(addon_utils.addons_fake_modules)]
@@ -1152,7 +1159,7 @@ class USERPREF_PT_addons(Panel):
(filter == "Enabled" and is_enabled) or
(filter == "Disabled" and not is_enabled) or
(filter == "User" and (mod.__file__.startswith((scripts_addons_folder, userpref_addons_folder))))
- ):
+ ):
if search and search not in info["name"].lower():
if info["author"]:
@@ -1244,7 +1251,6 @@ class USERPREF_PT_addons(Panel):
box_prefs.label(text="Error (see console)", icon='ERROR')
del addon_preferences_class.layout
-
# Append missing scripts
# First collect scripts that are used but have no script file.
module_names = {mod.__name__ for mod, info in addons}
diff --git a/release/scripts/startup/bl_ui/space_view3d.py b/release/scripts/startup/bl_ui/space_view3d.py
index a35fb149aae..6c5cc93947b 100644
--- a/release/scripts/startup/bl_ui/space_view3d.py
+++ b/release/scripts/startup/bl_ui/space_view3d.py
@@ -1066,12 +1066,14 @@ class VIEW3D_MT_make_links(Menu):
def draw(self, context):
layout = self.layout
-
+ operator_context_default = layout.operator_context
if(len(bpy.data.scenes) > 10):
- layout.operator_context = 'INVOKE_DEFAULT'
+ layout.operator_context = 'INVOKE_REGION_WIN'
layout.operator("object.make_links_scene", text="Objects to Scene...", icon='OUTLINER_OB_EMPTY')
else:
+ layout.operator_context = 'EXEC_REGION_WIN'
layout.operator_menu_enum("object.make_links_scene", "scene", text="Objects to Scene...")
+ layout.operator_context = operator_context_default
layout.operator_enum("object.make_links_data", "type") # inline
@@ -1537,7 +1539,7 @@ class VIEW3D_MT_pose_group(Menu):
def draw(self, context):
layout = self.layout
-
+
pose = context.active_object.pose
layout.operator_context = 'EXEC_AREA'
@@ -1710,19 +1712,37 @@ class VIEW3D_MT_edit_mesh_specials(Menu):
layout.operator("mesh.subdivide", text="Subdivide").smoothness = 0.0
layout.operator("mesh.subdivide", text="Subdivide Smooth").smoothness = 1.0
+
+ layout.separator()
+
layout.operator("mesh.merge", text="Merge...")
layout.operator("mesh.remove_doubles")
+
+ layout.separator()
+
layout.operator("mesh.hide", text="Hide").unselected = False
layout.operator("mesh.reveal", text="Reveal")
layout.operator("mesh.select_all", text="Select Inverse").action = 'INVERT'
+
+ layout.separator()
+
layout.operator("mesh.flip_normals")
layout.operator("mesh.vertices_smooth", text="Smooth")
layout.operator("mesh.vertices_smooth_laplacian", text="Laplacian Smooth")
+
+ layout.separator()
+
layout.operator("mesh.inset")
layout.operator("mesh.bevel", text="Bevel")
layout.operator("mesh.bridge_edge_loops")
+
+ layout.separator()
+
layout.operator("mesh.faces_shade_smooth")
layout.operator("mesh.faces_shade_flat")
+
+ layout.separator()
+
layout.operator("mesh.blend_from_shape")
layout.operator("mesh.shape_propagate_to_all")
layout.operator("mesh.select_vertex_path")
@@ -1789,10 +1809,11 @@ class VIEW3D_MT_edit_mesh_vertices(Menu):
layout.operator("mesh.split")
layout.operator_menu_enum("mesh.separate", "type")
layout.operator("mesh.vert_connect")
- layout.operator("mesh.vert_slide")
+ layout.operator("transform.vert_slide")
layout.separator()
+ layout.operator("mesh.bevel").vertex_only = True
layout.operator("mesh.vertices_smooth")
layout.operator("mesh.remove_doubles")
layout.operator("mesh.sort_elements", text="Sort Vertices").elements = {'VERT'}
@@ -2394,9 +2415,6 @@ class VIEW3D_PT_view3d_display(Panel):
col.prop(view, "show_outline_selected")
col.prop(view, "show_all_objects_origin")
col.prop(view, "show_relationship_lines")
- if ob and ob.type == 'MESH':
- mesh = ob.data
- col.prop(mesh, "show_all_edges")
col = layout.column()
col.active = display_all
@@ -2421,7 +2439,10 @@ class VIEW3D_PT_view3d_display(Panel):
col.label(text="Shading:")
col.prop(gs, "material_mode", text="")
col.prop(view, "show_textured_solid")
-
+ if view.viewport_shade == 'SOLID':
+ col.prop(view, "use_matcap")
+ if view.use_matcap:
+ col.template_icon_view(view, "matcap_icon")
col.prop(view, "show_backface_culling")
layout.separator()
diff --git a/release/scripts/startup/bl_ui/space_view3d_toolbar.py b/release/scripts/startup/bl_ui/space_view3d_toolbar.py
index 3c56ff82a77..ca79c6cb7fa 100644
--- a/release/scripts/startup/bl_ui/space_view3d_toolbar.py
+++ b/release/scripts/startup/bl_ui/space_view3d_toolbar.py
@@ -20,7 +20,7 @@
import bpy
from bpy.types import Menu, Panel
from bl_ui.properties_paint_common import UnifiedPaintPanel
-from bl_ui.properties_paint_common import sculpt_brush_texture_settings
+from bl_ui.properties_paint_common import brush_texture_settings
class View3DPanel():
@@ -108,6 +108,33 @@ class VIEW3D_PT_tools_objectmode(View3DPanel, Panel):
draw_repeat_tools(context, layout)
draw_gpencil_tools(context, layout)
+ col = layout.column(align=True)
+
+
+class VIEW3D_PT_tools_rigidbody(View3DPanel, Panel):
+ bl_context = "objectmode"
+ bl_label = "Rigidbody Tools"
+ bl_options = {'DEFAULT_CLOSED'}
+
+ def draw(self, context):
+ layout = self.layout
+
+ col = layout.column(align=True)
+ col.label(text="Add/Remove:")
+ row = col.row()
+ row.operator("rigidbody.objects_add", text="Add Active").type = 'ACTIVE'
+ row.operator("rigidbody.objects_add", text="Add Passive").type = 'PASSIVE'
+ row = col.row()
+ row.operator("rigidbody.objects_remove", text="Remove")
+
+ col = layout.column(align=True)
+ col.label(text="Object Tools:")
+ col.operator("rigidbody.shape_change", text="Change Shape")
+ col.operator("rigidbody.mass_calculate", text="Calculate Mass")
+ col.operator("rigidbody.object_settings_copy", text="Copy from Active")
+ col.operator("rigidbody.bake_to_keyframes", text="Bake To Keyframes")
+ col.label(text="Constraints:")
+ col.operator("rigidbody.connect", text="Connect")
# ********** default tools for editmode_mesh ****************
@@ -720,12 +747,10 @@ class VIEW3D_PT_tools_brush_texture(Panel, View3DPaintPanel):
col = layout.column()
col.template_ID_preview(brush, "texture", new="texture.new", rows=3, cols=8)
- if brush.use_paint_image:
- col.prop(brush, "use_fixed_texture")
- if context.sculpt_object:
- sculpt_brush_texture_settings(col, brush)
+ brush_texture_settings(col, brush, context.sculpt_object)
+ if context.sculpt_object:
# use_texture_overlay and texture_overlay_alpha
col = layout.column(align=True)
col.active = brush.sculpt_capabilities.has_overlay