Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2012-07-03 15:32:42 +0400
committerCampbell Barton <ideasman42@gmail.com>2012-07-03 15:32:42 +0400
commitf42d6067890fd534f39fb1059edbc4d88b2748b6 (patch)
treecae7b5a402e4daed3ee3eb7e294ccd89f0690346 /release/scripts/modules/bl_i18n_utils
parent314a2758505aeba72e2c87f06c5a61c6ceb7773c (diff)
rename module to something less generic.
Diffstat (limited to 'release/scripts/modules/bl_i18n_utils')
-rw-r--r--release/scripts/modules/bl_i18n_utils/__init__.py21
-rw-r--r--release/scripts/modules/bl_i18n_utils/bl_process_msg.py546
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/check_po.py175
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/clean_po.py97
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/import_po_from_branches.py119
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/merge_po.py156
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/rtl_preprocess.py231
-rw-r--r--release/scripts/modules/bl_i18n_utils/settings.py286
-rw-r--r--release/scripts/modules/bl_i18n_utils/spell_check_utils.py490
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_branches.py104
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_mo.py91
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_msg.py69
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_po.py166
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_pot.py314
-rwxr-xr-xrelease/scripts/modules/bl_i18n_utils/update_trunk.py132
-rw-r--r--release/scripts/modules/bl_i18n_utils/user_settings.py23
-rw-r--r--release/scripts/modules/bl_i18n_utils/utils.py377
17 files changed, 3397 insertions, 0 deletions
diff --git a/release/scripts/modules/bl_i18n_utils/__init__.py b/release/scripts/modules/bl_i18n_utils/__init__.py
new file mode 100644
index 00000000000..4072247c6d6
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/__init__.py
@@ -0,0 +1,21 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+"""Package for translation (i18n) tools."""
diff --git a/release/scripts/modules/bl_i18n_utils/bl_process_msg.py b/release/scripts/modules/bl_i18n_utils/bl_process_msg.py
new file mode 100644
index 00000000000..fcbac8a6795
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/bl_process_msg.py
@@ -0,0 +1,546 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8-80 compliant>
+
+# Write out messages.txt from Blender.
+# XXX: This script is meant to be used from inside Blender!
+# You should not directly use this script, rather use update_msg.py!
+
+import os
+
+# Quite an ugly hack… But the simplest solution for now!
+#import sys
+#sys.path.append(os.path.abspath(os.path.dirname(__file__)))
+import i18n.settings as settings
+
+
+#classes = set()
+
+
+SOURCE_DIR = settings.SOURCE_DIR
+
+CUSTOM_PY_UI_FILES = [os.path.abspath(os.path.join(SOURCE_DIR, p))
+ for p in settings.CUSTOM_PY_UI_FILES]
+FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
+COMMENT_PREFIX = settings.COMMENT_PREFIX
+CONTEXT_PREFIX = settings.CONTEXT_PREFIX
+CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
+UNDOC_OPS_STR = settings.UNDOC_OPS_STR
+
+NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
+
+def check(check_ctxt, messages, key, msgsrc):
+ if check_ctxt is None:
+ return
+ multi_rnatip = check_ctxt.get("multi_rnatip")
+ multi_lines = check_ctxt.get("multi_lines")
+ py_in_rna = check_ctxt.get("py_in_rna")
+ not_capitalized = check_ctxt.get("not_capitalized")
+ end_point = check_ctxt.get("end_point")
+ undoc_ops = check_ctxt.get("undoc_ops")
+
+ if multi_rnatip is not None:
+ if key in messages and key not in multi_rnatip:
+ multi_rnatip.add(key)
+ if multi_lines is not None:
+ if '\n' in key[1]:
+ multi_lines.add(key)
+ if py_in_rna is not None:
+ if key in py_in_rna[1]:
+ py_in_rna[0].add(key)
+ if not_capitalized is not None:
+ if(key[1] not in NC_ALLOWED and key[1][0].isalpha() and
+ not key[1][0].isupper()):
+ not_capitalized.add(key)
+ if end_point is not None:
+ if key[1].strip().endswith('.'):
+ end_point.add(key)
+ if undoc_ops is not None:
+ if key[1] == UNDOC_OPS_STR:
+ undoc_ops.add(key)
+
+
+def dump_messages_rna(messages, check_ctxt):
+ import bpy
+
+ def classBlackList():
+ blacklist_rna_class = [# core classes
+ "Context", "Event", "Function", "UILayout",
+ "BlendData",
+ # registerable classes
+ "Panel", "Menu", "Header", "RenderEngine",
+ "Operator", "OperatorMacro", "Macro",
+ "KeyingSetInfo", "UnknownType",
+ # window classes
+ "Window",
+ ]
+
+ # ---------------------------------------------------------------------
+ # Collect internal operators
+
+ # extend with all internal operators
+ # note that this uses internal api introspection functions
+ # all possible operator names
+ op_ids = set(cls.bl_rna.identifier for cls in
+ bpy.types.OperatorProperties.__subclasses__()) | \
+ set(cls.bl_rna.identifier for cls in
+ bpy.types.Operator.__subclasses__()) | \
+ set(cls.bl_rna.identifier for cls in
+ bpy.types.OperatorMacro.__subclasses__())
+
+ get_instance = __import__("_bpy").ops.get_instance
+ path_resolve = type(bpy.context).__base__.path_resolve
+ for idname in op_ids:
+ op = get_instance(idname)
+ if 'INTERNAL' in path_resolve(op, "bl_options"):
+ blacklist_rna_class.append(idname)
+
+ # ---------------------------------------------------------------------
+ # Collect builtin classes we don't need to doc
+ blacklist_rna_class.append("Property")
+ blacklist_rna_class.extend(
+ [cls.__name__ for cls in
+ bpy.types.Property.__subclasses__()])
+
+ # ---------------------------------------------------------------------
+ # Collect classes which are attached to collections, these are api
+ # access only.
+ collection_props = set()
+ for cls_id in dir(bpy.types):
+ cls = getattr(bpy.types, cls_id)
+ for prop in cls.bl_rna.properties:
+ if prop.type == 'COLLECTION':
+ prop_cls = prop.srna
+ if prop_cls is not None:
+ collection_props.add(prop_cls.identifier)
+ blacklist_rna_class.extend(sorted(collection_props))
+
+ return blacklist_rna_class
+
+ blacklist_rna_class = classBlackList()
+
+ def filterRNA(bl_rna):
+ rid = bl_rna.identifier
+ if rid in blacklist_rna_class:
+ print(" skipping", rid)
+ return True
+ return False
+
+ check_ctxt_rna = check_ctxt_rna_tip = None
+ if check_ctxt:
+ check_ctxt_rna = {"multi_lines": check_ctxt.get("multi_lines"),
+ "not_capitalized": check_ctxt.get("not_capitalized"),
+ "end_point": check_ctxt.get("end_point"),
+ "undoc_ops": check_ctxt.get("undoc_ops")}
+ check_ctxt_rna_tip = check_ctxt_rna
+ check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
+
+ # -------------------------------------------------------------------------
+ # Function definitions
+
+ def walkProperties(bl_rna):
+ import bpy
+
+ # Get our parents' properties, to not export them multiple times.
+ bl_rna_base = bl_rna.base
+ if bl_rna_base:
+ bl_rna_base_props = bl_rna_base.properties.values()
+ else:
+ bl_rna_base_props = ()
+
+ for prop in bl_rna.properties:
+ # Only write this property if our parent hasn't got it.
+ if prop in bl_rna_base_props:
+ continue
+ if prop.identifier == "rna_type":
+ continue
+
+ msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
+ context = getattr(prop, "translation_context", CONTEXT_DEFAULT)
+ if prop.name and (prop.name != prop.identifier or context):
+ key = (context, prop.name)
+ check(check_ctxt_rna, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+ if prop.description:
+ key = (CONTEXT_DEFAULT, prop.description)
+ check(check_ctxt_rna_tip, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+ if isinstance(prop, bpy.types.EnumProperty):
+ for item in prop.enum_items:
+ msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier,
+ prop.identifier,
+ item.identifier)
+ if item.name and item.name != item.identifier:
+ key = (CONTEXT_DEFAULT, item.name)
+ check(check_ctxt_rna, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+ if item.description:
+ key = (CONTEXT_DEFAULT, item.description)
+ check(check_ctxt_rna_tip, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+
+ def walkRNA(bl_rna):
+ if filterRNA(bl_rna):
+ return
+
+ msgsrc = ".".join(("bpy.types", bl_rna.identifier))
+ context = getattr(bl_rna, "translation_context", CONTEXT_DEFAULT)
+
+ if bl_rna.name and (bl_rna.name != bl_rna.identifier or context):
+ key = (context, bl_rna.name)
+ check(check_ctxt_rna, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+
+ if bl_rna.description:
+ key = (CONTEXT_DEFAULT, bl_rna.description)
+ check(check_ctxt_rna_tip, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+
+ if hasattr(bl_rna, 'bl_label') and bl_rna.bl_label:
+ key = (context, bl_rna.bl_label)
+ check(check_ctxt_rna, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+
+ walkProperties(bl_rna)
+
+ def walkClass(cls):
+ walkRNA(cls.bl_rna)
+
+ def walk_keymap_hierarchy(hier, msgsrc_prev):
+ for lvl in hier:
+ msgsrc = "{}.{}".format(msgsrc_prev, lvl[1])
+ messages.setdefault((CONTEXT_DEFAULT, lvl[0]), []).append(msgsrc)
+
+ if lvl[3]:
+ walk_keymap_hierarchy(lvl[3], msgsrc)
+
+ # -------------------------------------------------------------------------
+ # Dump Messages
+
+ def process_cls_list(cls_list):
+ if not cls_list:
+ return 0
+
+ def full_class_id(cls):
+ """ gives us 'ID.Lamp.AreaLamp' which is best for sorting.
+ """
+ cls_id = ""
+ bl_rna = cls.bl_rna
+ while bl_rna:
+ cls_id = "{}.{}".format(bl_rna.identifier, cls_id)
+ bl_rna = bl_rna.base
+ return cls_id
+
+ cls_list.sort(key=full_class_id)
+ processed = 0
+ for cls in cls_list:
+ walkClass(cls)
+# classes.add(cls)
+ # Recursively process subclasses.
+ processed += process_cls_list(cls.__subclasses__()) + 1
+ return processed
+
+ # Parse everything (recursively parsing from bpy_struct "class"...).
+ processed = process_cls_list(type(bpy.context).__base__.__subclasses__())
+ print("{} classes processed!".format(processed))
+# import pickle
+# global classes
+# classes = {str(c) for c in classes}
+# with open("/home/i7deb64/Bureau/tpck_2", "wb") as f:
+# pickle.dump(classes, f, protocol=0)
+
+ from bpy_extras.keyconfig_utils import KM_HIERARCHY
+
+ walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
+
+
+
+def dump_messages_pytext(messages, check_ctxt):
+ """ dumps text inlined in the python user interface: eg.
+
+ layout.prop("someprop", text="My Name")
+ """
+ import ast
+
+ # -------------------------------------------------------------------------
+ # Gather function names
+
+ import bpy
+ # key: func_id
+ # val: [(arg_kw, arg_pos), (arg_kw, arg_pos), ...]
+ func_translate_args = {}
+
+ # so far only 'text' keywords, but we may want others translated later
+ translate_kw = ("text", )
+
+ # Break recursive nodes look up on some kind of nodes.
+ # E.g. we don’t want to get strings inside subscripts (blah["foo"])!
+ stopper_nodes = {ast.Subscript,}
+
+ for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
+ # check it has a 'text' argument
+ for (arg_pos, (arg_kw, arg)) in enumerate(func.parameters.items()):
+ if ((arg_kw in translate_kw) and
+ (arg.is_output == False) and
+ (arg.type == 'STRING')):
+
+ func_translate_args.setdefault(func_id, []).append((arg_kw,
+ arg_pos))
+ # print(func_translate_args)
+
+ check_ctxt_py = None
+ if check_ctxt:
+ check_ctxt_py = {"py_in_rna": (check_ctxt["py_in_rna"], messages.copy()),
+ "multi_lines": check_ctxt["multi_lines"],
+ "not_capitalized": check_ctxt["not_capitalized"],
+ "end_point": check_ctxt["end_point"]}
+
+ # -------------------------------------------------------------------------
+ # Function definitions
+
+ def extract_strings(fp_rel, node):
+ """ Recursively get strings, needed in case we have "Blah" + "Blah",
+ passed as an argument in that case it wont evaluate to a string.
+ However, break on some kind of stopper nodes, like e.g. Subscript.
+ """
+
+ if type(node) == ast.Str:
+ eval_str = ast.literal_eval(node)
+ if eval_str:
+ key = (CONTEXT_DEFAULT, eval_str)
+ msgsrc = "{}:{}".format(fp_rel, node.lineno)
+ check(check_ctxt_py, messages, key, msgsrc)
+ messages.setdefault(key, []).append(msgsrc)
+ return
+
+ for nd in ast.iter_child_nodes(node):
+ if type(nd) not in stopper_nodes:
+ extract_strings(fp_rel, nd)
+
+ def extract_strings_from_file(fp):
+ filedata = open(fp, 'r', encoding="utf8")
+ root_node = ast.parse(filedata.read(), fp, 'exec')
+ filedata.close()
+
+ fp_rel = os.path.relpath(fp, SOURCE_DIR)
+
+ for node in ast.walk(root_node):
+ if type(node) == ast.Call:
+ # print("found function at")
+ # print("%s:%d" % (fp, node.lineno))
+
+ # lambda's
+ if type(node.func) == ast.Name:
+ continue
+
+ # getattr(self, con.type)(context, box, con)
+ if not hasattr(node.func, "attr"):
+ continue
+
+ translate_args = func_translate_args.get(node.func.attr, ())
+
+ # do nothing if not found
+ for arg_kw, arg_pos in translate_args:
+ if arg_pos < len(node.args):
+ extract_strings(fp_rel, node.args[arg_pos])
+ else:
+ for kw in node.keywords:
+ if kw.arg == arg_kw:
+ extract_strings(fp_rel, kw.value)
+
+ # -------------------------------------------------------------------------
+ # Dump Messages
+
+ mod_dir = os.path.join(SOURCE_DIR,
+ "release",
+ "scripts",
+ "startup",
+ "bl_ui")
+
+ files = [os.path.join(mod_dir, fn)
+ for fn in sorted(os.listdir(mod_dir))
+ if not fn.startswith("_")
+ if fn.endswith("py")
+ ]
+
+ # Dummy Cycles has its py addon in its own dir!
+ files += CUSTOM_PY_UI_FILES
+
+ for fp in files:
+ extract_strings_from_file(fp)
+
+
+def dump_messages(do_messages, do_checks):
+ import collections
+
+ def enable_addons():
+ """For now, enable all official addons, before extracting msgids."""
+ import addon_utils
+ import bpy
+
+ userpref = bpy.context.user_preferences
+ used_ext = {ext.module for ext in userpref.addons}
+ support = {"OFFICIAL"}
+ # collect the categories that can be filtered on
+ addons = [(mod, addon_utils.module_bl_info(mod)) for mod in
+ addon_utils.modules(addon_utils.addons_fake_modules)]
+
+ for mod, info in addons:
+ module_name = mod.__name__
+ if module_name in used_ext or info["support"] not in support:
+ continue
+ print(" Enabling module ", module_name)
+ bpy.ops.wm.addon_enable(module=module_name)
+
+ # XXX There are currently some problems with bpy/rna...
+ # *Very* tricky to solve!
+ # So this is a hack to make all newly added operator visible by
+ # bpy.types.OperatorProperties.__subclasses__()
+ for cat in dir(bpy.ops):
+ cat = getattr(bpy.ops, cat)
+ for op in dir(cat):
+ getattr(cat, op).get_rna()
+
+ # check for strings like ": %d"
+ ignore = ("%d", "%f", "%s", "%r", # string formatting
+ "*", ".", "(", ")", "-", "/", "\\", "+", ":", "#", "%"
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "x", # used on its own eg: 100x200
+ "X", "Y", "Z", "W", # used alone. no need to include
+ )
+
+ def filter_message(msg):
+ msg_tmp = msg
+ for ign in ignore:
+ msg_tmp = msg_tmp.replace(ign, "")
+ if not msg_tmp.strip():
+ return True
+ # we could filter out different strings here
+ return False
+
+ if hasattr(collections, 'OrderedDict'):
+ messages = collections.OrderedDict()
+ else:
+ messages = {}
+
+ messages[(CONTEXT_DEFAULT, "")] = []
+
+ # Enable all wanted addons.
+ enable_addons()
+
+ check_ctxt = None
+ if do_checks:
+ check_ctxt = {"multi_rnatip": set(),
+ "multi_lines": set(),
+ "py_in_rna": set(),
+ "not_capitalized": set(),
+ "end_point": set(),
+ "undoc_ops": set()}
+
+ # get strings from RNA
+ dump_messages_rna(messages, check_ctxt)
+
+ # get strings from UI layout definitions text="..." args
+ dump_messages_pytext(messages, check_ctxt)
+
+ del messages[(CONTEXT_DEFAULT, "")]
+
+ if do_checks:
+ print("WARNINGS:")
+ keys = set()
+ for c in check_ctxt.values():
+ keys |= c
+ # XXX Temp, see below
+ c -= check_ctxt["multi_rnatip"]
+ for key in keys:
+ if key in check_ctxt["undoc_ops"]:
+ print("\tThe following operators are undocumented:")
+ else:
+ print("\t“{}”|“{}”:".format(*key))
+ if key in check_ctxt["multi_lines"]:
+ print("\t\t-> newline in this message!")
+ if key in check_ctxt["not_capitalized"]:
+ print("\t\t-> message not capitalized!")
+ if key in check_ctxt["end_point"]:
+ print("\t\t-> message with endpoint!")
+ # XXX Hide this one for now, too much false positives.
+# if key in check_ctxt["multi_rnatip"]:
+# print("\t\t-> tip used in several RNA items")
+ if key in check_ctxt["py_in_rna"]:
+ print("\t\t-> RNA message also used in py UI code:")
+ print("\t\t{}".format("\n\t\t".join(messages[key])))
+
+ if do_messages:
+ print("Writing messages…")
+ num_written = 0
+ num_filtered = 0
+ with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
+ for (ctx, key), value in messages.items():
+ # filter out junk values
+ if filter_message(key):
+ num_filtered += 1
+ continue
+
+ # Remove newlines in key and values!
+ message_file.write("\n".join(COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
+ message_file.write("\n")
+ if ctx:
+ message_file.write(CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
+ message_file.write(key.replace("\n", "") + "\n")
+ num_written += 1
+
+ print("Written {} messages to: {} ({} were filtered out)." \
+ "".format(num_written, FILE_NAME_MESSAGES, num_filtered))
+
+
+def main():
+ try:
+ import bpy
+ except ImportError:
+ print("This script must run from inside blender")
+ return
+
+ import sys
+ back_argv = sys.argv
+ sys.argv = sys.argv[sys.argv.index("--") + 1:]
+
+ import argparse
+ parser = argparse.ArgumentParser(description="Process UI messages " \
+ "from inside Blender.")
+ parser.add_argument('-c', '--no_checks', default=True,
+ action="store_false",
+ help="No checks over UI messages.")
+ parser.add_argument('-m', '--no_messages', default=True,
+ action="store_false",
+ help="No export of UI messages.")
+ parser.add_argument('-o', '--output', help="Output messages file path.")
+ args = parser.parse_args()
+
+ if args.output:
+ global FILE_NAME_MESSAGES
+ FILE_NAME_MESSAGES = args.output
+
+ dump_messages(do_messages=args.no_messages, do_checks=args.no_checks)
+
+ sys.argv = back_argv
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ main()
diff --git a/release/scripts/modules/bl_i18n_utils/check_po.py b/release/scripts/modules/bl_i18n_utils/check_po.py
new file mode 100755
index 00000000000..a688d38df88
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/check_po.py
@@ -0,0 +1,175 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Check po’s in branches (or in trunk) for missing/unneeded messages.
+
+import os
+import sys
+from codecs import open
+
+import settings
+import utils
+
+TRUNK_PO_DIR = settings.TRUNK_PO_DIR
+BRANCHES_DIR = settings.BRANCHES_DIR
+
+FILE_NAME_POT = settings.FILE_NAME_POT
+
+
+def print_diff(ref_messages, messages, states):
+ # Remove comments from messages list!
+ messages = set(messages.keys()) - states["comm_msg"]
+ unneeded = (messages - ref_messages)
+ for msgid in unneeded:
+ print('\tUnneeded message id "{}"'.format(msgid))
+
+ missing = (ref_messages - messages)
+ for msgid in missing:
+ print('\tMissing message id "{}"'.format(msgid))
+
+ for msgid in states["comm_msg"]:
+ print('\tCommented message id "{}"'.format(msgid))
+
+ print("\t{} unneeded messages, {} missing messages, {} commented messages." \
+ "".format(len(unneeded), len(missing), len(states["comm_msg"])))
+ return 0
+
+
+def process_po(ref_messages, po, glob_stats, do_stats, do_messages):
+ print("Checking {}...".format(po))
+ ret = 0
+
+ messages, states, stats = utils.parse_messages(po)
+ if do_messages:
+ t = print_diff(ref_messages, messages, states)
+ if t:
+ ret = t
+ if do_stats:
+ print("\tStats:")
+ t = utils.print_stats(stats, glob_stats, prefix=" ")
+ if t:
+ ret = t
+ if states["is_broken"]:
+ print("\tERROR! This .po is broken!")
+ ret = 1
+ return ret
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Check po’s in branches " \
+ "(or in trunk) for missing" \
+ "/unneeded messages.")
+ parser.add_argument('-s', '--stats', action="store_true",
+ help="Print po’s stats.")
+ parser.add_argument('-m', '--messages', action="store_true",
+ help="Print po’s missing/unneeded/commented messages.")
+ parser.add_argument('-t', '--trunk', action="store_true",
+ help="Check po’s in /trunk/po rather than /branches.")
+ parser.add_argument('-p', '--pot',
+ help="Specify the .pot file used as reference.")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+
+ if args.pot:
+ global FILE_NAME_POT
+ FILE_NAME_POT = args.pot
+ glob_stats = {"nbr" : 0.0,
+ "lvl" : 0.0,
+ "lvl_ttips" : 0.0,
+ "lvl_trans_ttips" : 0.0,
+ "lvl_ttips_in_trans": 0.0,
+ "lvl_comm" : 0.0,
+ "nbr_signs" : 0,
+ "nbr_trans_signs" : 0,
+ "contexts" : set()}
+ ret = 0
+
+ pot_messages = None
+ if args.messages:
+ pot_messages, u1, pot_stats = utils.parse_messages(FILE_NAME_POT)
+ pot_messages = set(pot_messages.keys())
+ glob_stats["nbr_signs"] = pot_stats["nbr_signs"]
+
+ if args.langs:
+ for lang in args.langs:
+ if args.trunk:
+ po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
+ else:
+ po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
+ if os.path.exists(po):
+ t = process_po(pot_messages, po, glob_stats,
+ args.stats, args.messages)
+ if t:
+ ret = t
+ elif args.trunk:
+ for po in os.listdir(TRUNK_PO_DIR):
+ if po.endswith(".po"):
+ po = os.path.join(TRUNK_PO_DIR, po)
+ t = process_po(pot_messages, po, glob_stats,
+ args.stats, args.messages)
+ if t:
+ ret = t
+ else:
+ for lang in os.listdir(BRANCHES_DIR):
+ for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
+ if po.endswith(".po"):
+ po = os.path.join(BRANCHES_DIR, lang, po)
+ t = process_po(pot_messages, po, glob_stats,
+ args.stats, args.messages)
+ if t:
+ ret = t
+
+ if args.stats and glob_stats["nbr"] != 0.0:
+ nbr_contexts = len(glob_stats["contexts"]-{""})
+ if nbr_contexts != 1:
+ if nbr_contexts == 0:
+ nbr_contexts = "No"
+ _ctx_txt = "s are"
+ else:
+ _ctx_txt = " is"
+ print("\nAverage stats for all {:.0f} processed files:\n" \
+ " {:>6.1%} done!\n" \
+ " {:>6.1%} of messages are tooltips.\n" \
+ " {:>6.1%} of tooltips are translated.\n" \
+ " {:>6.1%} of translated messages are tooltips.\n" \
+ " {:>6.1%} of messages are commented.\n" \
+ " The org msgids are currently made of {} signs.\n" \
+ " All processed translations are currently made of {} signs.\n" \
+ " {} specific context{} present:\n {}\n" \
+ "".format(glob_stats["nbr"], glob_stats["lvl"]/glob_stats["nbr"],
+ glob_stats["lvl_ttips"]/glob_stats["nbr"],
+ glob_stats["lvl_trans_ttips"]/glob_stats["nbr"],
+ glob_stats["lvl_ttips_in_trans"]/glob_stats["nbr"],
+ glob_stats["lvl_comm"]/glob_stats["nbr"], glob_stats["nbr_signs"],
+ glob_stats["nbr_trans_signs"], nbr_contexts, _ctx_txt,
+ "\n ".join(glob_stats["contexts"]-{""})))
+
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ print(" *** WARNING! Number of tooltips is only an estimation! ***\n")
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/clean_po.py b/release/scripts/modules/bl_i18n_utils/clean_po.py
new file mode 100755
index 00000000000..7e91b41065c
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/clean_po.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Clean (i.e. remove commented messages) po’s in branches or trunk.
+
+import os
+import sys
+import collections
+from codecs import open
+
+import settings
+import utils
+
+TRUNK_PO_DIR = settings.TRUNK_PO_DIR
+BRANCHES_DIR = settings.BRANCHES_DIR
+
+
+def do_clean(po, strict):
+ print("Cleaning {}...".format(po))
+ messages, states, u1 = utils.parse_messages(po)
+
+ if strict and states["is_broken"]:
+ print("ERROR! This .po file is broken!")
+ return 1
+
+ for msgkey in states["comm_msg"]:
+ del messages[msgkey]
+ utils.write_messages(po, messages, states["comm_msg"], states["fuzzy_msg"])
+ print("Removed {} commented messages.".format(len(states["comm_msg"])))
+ return 0
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Clean po’s in branches " \
+ "or trunk (i.e. remove " \
+ "all commented messages).")
+ parser.add_argument('-t', '--trunk', action="store_true",
+ help="Clean po’s in trunk rather than branches.")
+ parser.add_argument('-s', '--strict', action="store_true",
+ help="Raise an error if a po is broken.")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+
+ ret = 0
+
+ if args.langs:
+ for lang in args.langs:
+ if args.trunk:
+ po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
+ else:
+ po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
+ if os.path.exists(po):
+ t = do_clean(po, args.strict)
+ if t:
+ ret = t
+ elif args.trunk:
+ for po in os.listdir(TRUNK_PO_DIR):
+ if po.endswith(".po"):
+ po = os.path.join(TRUNK_PO_DIR, po)
+ t = do_clean(po, args.strict)
+ if t:
+ ret = t
+ else:
+ for lang in os.listdir(BRANCHES_DIR):
+ for po in os.listdir(os.path.join(BRANCHES_DIR, lang)):
+ if po.endswith(".po"):
+ po = os.path.join(BRANCHES_DIR, lang, po)
+ t = do_clean(po, args.strict)
+ if t:
+ ret = t
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py b/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py
new file mode 100755
index 00000000000..4739a98920f
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/import_po_from_branches.py
@@ -0,0 +1,119 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Import in trunk/po all po from branches translated above the given threshold.
+
+import os
+import shutil
+import sys
+import subprocess
+from codecs import open
+
+import settings
+import utils
+import rtl_preprocess
+
+
+TRUNK_PO_DIR = settings.TRUNK_PO_DIR
+BRANCHES_DIR = settings.BRANCHES_DIR
+
+RTL_PREPROCESS_FILE = settings.RTL_PREPROCESS_FILE
+
+PY3 = settings.PYTHON3_EXEC
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Import advanced enough po’s " \
+ "from branches to trunk.")
+ parser.add_argument('-t', '--threshold', type=int,
+ help="Import threshold, as a percentage.")
+ parser.add_argument('-s', '--strict', action="store_true",
+ help="Raise an error if a po is broken.")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+
+ ret = 0
+
+ threshold = float(settings.IMPORT_MIN_LEVEL)/100.0
+ if args.threshold is not None:
+ threshold = float(args.threshold)/100.0
+
+ for lang in os.listdir(BRANCHES_DIR):
+ if args.langs and lang not in args.langs:
+ continue
+ po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
+ if os.path.exists(po):
+ po_is_rtl = os.path.join(BRANCHES_DIR, lang, RTL_PREPROCESS_FILE)
+ msgs, state, stats = utils.parse_messages(po)
+ tot_msgs = stats["tot_msg"]
+ trans_msgs = stats["trans_msg"]
+ lvl = 0.0
+ if tot_msgs:
+ lvl = float(trans_msgs)/float(tot_msgs)
+ if lvl > threshold:
+ if state["is_broken"] and args.strict:
+ print("{:<10}: {:>6.1%} done, but BROKEN, skipped." \
+ "".format(lang, lvl))
+ ret = 1
+ else:
+ if os.path.exists(po_is_rtl):
+ out_po = os.path.join(TRUNK_PO_DIR,
+ ".".join((lang, "po")))
+ out_raw_po = os.path.join(TRUNK_PO_DIR,
+ "_".join((lang, "raw.po")))
+ keys = []
+ trans = []
+ for k, m in msgs.items():
+ keys.append(k)
+ trans.append("".join(m["msgstr_lines"]))
+ trans = rtl_preprocess.log2vis(trans)
+ for k, t in zip(keys, trans):
+ # Mono-line for now...
+ msgs[k]["msgstr_lines"] = [t]
+ utils.write_messages(out_po, msgs, state["comm_msg"],
+ state["fuzzy_msg"])
+ # Also copies org po!
+ shutil.copy(po, out_raw_po)
+ print("{:<10}: {:>6.1%} done, enough translated " \
+ "messages, processed and copied to trunk." \
+ "".format(lang, lvl))
+ else:
+ shutil.copy(po, TRUNK_PO_DIR)
+ print("{:<10}: {:>6.1%} done, enough translated " \
+ "messages, copied to trunk.".format(lang, lvl))
+ else:
+ if state["is_broken"] and args.strict:
+ print("{:<10}: {:>6.1%} done, BROKEN and not enough " \
+ "translated messages, skipped".format(lang, lvl))
+ ret = 1
+ else:
+ print("{:<10}: {:>6.1%} done, not enough translated " \
+ "messages, skipped.".format(lang, lvl))
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/merge_po.py b/release/scripts/modules/bl_i18n_utils/merge_po.py
new file mode 100755
index 00000000000..1a55cd670b0
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/merge_po.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Merge one or more .po files into the first dest one.
+# If a msgkey is present in more than one merged po, the one in the first file wins, unless
+# it’s marked as fuzzy and one later is not.
+# The fuzzy flag is removed if necessary.
+# All other comments are never modified.
+# However, commented messages in dst will always remain commented, and commented messages are
+# never merged from sources.
+
+import sys
+from codecs import open
+
+import utils
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="" \
+ "Merge one or more .po files into the first dest one.\n" \
+ "If a msgkey (msgid, msgctxt) is present in more than " \
+ "one merged po, the one in the first file wins, unless " \
+ "it’s marked as fuzzy and one later is not.\n" \
+ "The fuzzy flag is removed if necessary.\n" \
+ "All other comments are never modified.\n" \
+ "Commented messages in dst will always remain " \
+ "commented, and commented messages are never merged " \
+ "from sources.")
+ parser.add_argument('-s', '--stats', action="store_true",
+ help="Show statistics info.")
+ parser.add_argument('-r', '--replace', action="store_true",
+ help="Replace existing messages of same \"level\" already in dest po.")
+ parser.add_argument('dst', metavar='dst.po',
+ help="The dest po into which merge the others.")
+ parser.add_argument('src', metavar='src.po', nargs='+',
+ help="The po's to merge into the dst.po one.")
+ args = parser.parse_args()
+
+
+ ret = 0
+ done_msgkeys = set()
+ done_fuzzy_msgkeys = set()
+ nbr_merged = 0
+ nbr_replaced = 0
+ nbr_added = 0
+ nbr_unfuzzied = 0
+
+ dst_messages, dst_states, dst_stats = utils.parse_messages(args.dst)
+ if dst_states["is_broken"]:
+ print("Dest po is BROKEN, aborting.")
+ return 1
+ if args.stats:
+ print("Dest po, before merging:")
+ utils.print_stats(dst_stats, prefix="\t")
+ # If we don’t want to replace existing valid translations, pre-populate
+ # done_msgkeys and done_fuzzy_msgkeys.
+ if not args.replace:
+ done_msgkeys = dst_states["trans_msg"].copy()
+ done_fuzzy_msgkeys = dst_states["fuzzy_msg"].copy()
+ for po in args.src:
+ messages, states, stats = utils.parse_messages(po)
+ if states["is_broken"]:
+ print("\tSrc po {} is BROKEN, skipping.".format(po))
+ ret = 1
+ continue
+ print("\tMerging {}...".format(po))
+ if args.stats:
+ print("\t\tMerged po stats:")
+ utils.print_stats(stats, prefix="\t\t\t")
+ for msgkey, val in messages.items():
+ msgctxt, msgid = msgkey
+ # This msgkey has already been completely merged, or is a commented one,
+ # or the new message is commented, skip it.
+ if msgkey in (done_msgkeys | dst_states["comm_msg"] | states["comm_msg"]):
+ continue
+ is_ttip = utils.is_tooltip(msgid)
+ # New messages does not yet exists in dest.
+ if msgkey not in dst_messages:
+ dst_messages[msgkey] = messages[msgkey]
+ if msgkey in states["fuzzy_msg"]:
+ done_fuzzy_msgkeys.add(msgkey)
+ dst_states["fuzzy_msg"].add(msgkey)
+ elif msgkey in states["trans_msg"]:
+ done_msgkeys.add(msgkey)
+ dst_states["trans_msg"].add(msgkey)
+ dst_stats["trans_msg"] += 1
+ if is_ttip:
+ dst_stats["trans_ttips"] += 1
+ nbr_added += 1
+ dst_stats["tot_msg"] += 1
+ if is_ttip:
+ dst_stats["tot_ttips"] += 1
+ # From now on, the new messages is already in dst.
+ # New message is neither translated nor fuzzy, skip it.
+ elif msgkey not in (states["trans_msg"] | states["fuzzy_msg"]):
+ continue
+ # From now on, the new message is either translated or fuzzy!
+ # The new message is translated.
+ elif msgkey in states["trans_msg"]:
+ dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
+ done_msgkeys.add(msgkey)
+ done_fuzzy_msgkeys.discard(msgkey)
+ if msgkey in dst_states["fuzzy_msg"]:
+ dst_states["fuzzy_msg"].remove(msgkey)
+ nbr_unfuzzied += 1
+ if msgkey not in dst_states["trans_msg"]:
+ dst_states["trans_msg"].add(msgkey)
+ dst_stats["trans_msg"] += 1
+ if is_ttip:
+ dst_stats["trans_ttips"] += 1
+ else:
+ nbr_replaced += 1
+ nbr_merged += 1
+ # The new message is fuzzy, org one is fuzzy too,
+ # and this msgkey has not yet been merged.
+ elif msgkey not in (dst_states["trans_msg"] | done_fuzzy_msgkeys):
+ dst_messages[msgkey]["msgstr_lines"] = messages[msgkey]["msgstr_lines"]
+ done_fuzzy_msgkeys.add(msgkey)
+ dst_states["fuzzy_msg"].add(msgkey)
+ nbr_merged += 1
+ nbr_replaced += 1
+
+ utils.write_messages(args.dst, dst_messages, dst_states["comm_msg"], dst_states["fuzzy_msg"])
+
+ print("Merged completed. {} messages were merged (among which {} were replaced), " \
+ "{} were added, {} were \"un-fuzzied\"." \
+ "".format(nbr_merged, nbr_replaced, nbr_added, nbr_unfuzzied))
+ if args.stats:
+ print("Final merged po stats:")
+ utils.print_stats(dst_stats, prefix="\t")
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/rtl_preprocess.py b/release/scripts/modules/bl_i18n_utils/rtl_preprocess.py
new file mode 100755
index 00000000000..c6fc5fc787e
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/rtl_preprocess.py
@@ -0,0 +1,231 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Preprocess right-to-left languages.
+# You can use it either standalone, or through import_po_from_branches or
+# update_trunk.
+#
+# Notes: This has been tested on Linux, not 100% it will work nicely on
+# Windows or OsX.
+# This uses ctypes, as there is no py3 binding for fribidi currently.
+# This implies you only need the compiled C library to run it.
+# Finally, note that it handles some formating/escape codes (like
+# \", %s, %x12, %.4f, etc.), protecting them from ugly (evil) fribidi,
+# which seems completely unaware of such things (as unicode is...).
+
+import sys
+import ctypes
+
+import settings
+import utils
+
+FRIBIDI_LIB = settings.FRIBIDI_LIB
+
+###### Import C library and recreate "defines". #####
+fbd = ctypes.CDLL(FRIBIDI_LIB)
+
+
+#define FRIBIDI_MASK_NEUTRAL 0x00000040L /* Is neutral */
+FRIBIDI_PAR_ON = 0x00000040
+
+
+#define FRIBIDI_FLAG_SHAPE_MIRRORING 0x00000001
+#define FRIBIDI_FLAG_REORDER_NSM 0x00000002
+
+#define FRIBIDI_FLAG_SHAPE_ARAB_PRES 0x00000100
+#define FRIBIDI_FLAG_SHAPE_ARAB_LIGA 0x00000200
+#define FRIBIDI_FLAG_SHAPE_ARAB_CONSOLE 0x00000400
+
+#define FRIBIDI_FLAG_REMOVE_BIDI 0x00010000
+#define FRIBIDI_FLAG_REMOVE_JOINING 0x00020000
+#define FRIBIDI_FLAG_REMOVE_SPECIALS 0x00040000
+
+#define FRIBIDI_FLAGS_DEFAULT ( \
+# FRIBIDI_FLAG_SHAPE_MIRRORING | \
+# FRIBIDI_FLAG_REORDER_NSM | \
+# FRIBIDI_FLAG_REMOVE_SPECIALS )
+
+#define FRIBIDI_FLAGS_ARABIC ( \
+# FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
+# FRIBIDI_FLAG_SHAPE_ARAB_LIGA )
+
+FRIBIDI_FLAG_SHAPE_MIRRORING = 0x00000001
+FRIBIDI_FLAG_REORDER_NSM = 0x00000002
+FRIBIDI_FLAG_REMOVE_SPECIALS = 0x00040000
+
+FRIBIDI_FLAG_SHAPE_ARAB_PRES = 0x00000100
+FRIBIDI_FLAG_SHAPE_ARAB_LIGA = 0x00000200
+
+FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | \
+ FRIBIDI_FLAG_REORDER_NSM | \
+ FRIBIDI_FLAG_REMOVE_SPECIALS
+
+FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
+ FRIBIDI_FLAG_SHAPE_ARAB_LIGA
+
+##### Kernel processing funcs. #####
+def protect_format_seq(msg):
+ """
+ Find some specific escaping/formating sequences (like \", %s, etc.,
+ and protect them from any modification!
+ """
+ LRE = "\u202A"
+ PDF = "\u202C"
+ # Most likely incomplete, but seems to cover current needs.
+ format_codes = set("tslfd")
+ digits = set(".0123456789")
+
+ idx = 0
+ ret = []
+ ln = len(msg)
+ while idx < ln:
+ dlt = 1
+ # \" or \'
+ if idx < (ln - 1) and msg[idx] == '\\' and msg[idx + 1] in "\"\'":
+ dlt = 2
+ # %x12
+ elif idx < (ln - 2) and msg[idx] == '%' and msg[idx + 1] in "x" and \
+ msg[idx + 2] in digits:
+ dlt = 2
+ while (idx + dlt + 1) < ln and msg[idx + dlt + 1] in digits:
+ dlt += 1
+ # %.4f
+ elif idx < (ln - 3) and msg[idx] == '%' and msg[idx + 1] in digits:
+ dlt = 2
+ while (idx + dlt + 1) < ln and msg[idx + dlt + 1] in digits:
+ dlt += 1
+ if (idx + dlt + 1) < ln and msg[idx + dlt + 1] in format_codes:
+ dlt += 1
+ else:
+ dlt = 1
+ # %s
+ elif idx < (ln - 1) and msg[idx] == '%' and \
+ msg[idx + 1] in format_codes:
+ dlt = 2
+
+ if dlt > 1:
+ ret.append(LRE)
+ ret += msg[idx:idx + dlt]
+ idx += dlt
+ if dlt > 1:
+ ret.append(PDF)
+
+ return "".join(ret)
+
+
+def log2vis(msgs):
+ """
+ Globally mimics deprecated fribidi_log2vis.
+ msgs should be an iterable of messages to rtl-process.
+ """
+ for msg in msgs:
+ msg = protect_format_seq(msg)
+
+ fbc_str = ctypes.create_unicode_buffer(msg)
+ ln = len(fbc_str) - 1
+# print(fbc_str.value, ln)
+ btypes = (ctypes.c_int * ln)()
+ embed_lvl = (ctypes.c_uint8 * ln)()
+ pbase_dir = ctypes.c_int(FRIBIDI_PAR_ON)
+ jtypes = (ctypes.c_uint8 * ln)()
+ flags = FRIBIDI_FLAGS_DEFAULT | FRIBIDI_FLAGS_ARABIC
+
+ # Find out direction of each char.
+ fbd.fribidi_get_bidi_types(fbc_str, ln, ctypes.byref(btypes))
+
+# print(*btypes)
+
+ fbd.fribidi_get_par_embedding_levels(btypes, ln,
+ ctypes.byref(pbase_dir),
+ embed_lvl)
+
+# print(*embed_lvl)
+
+ # Joinings for arabic chars.
+ fbd.fribidi_get_joining_types(fbc_str, ln, jtypes)
+# print(*jtypes)
+ fbd.fribidi_join_arabic(btypes, ln, embed_lvl, jtypes)
+# print(*jtypes)
+
+ # Final Shaping!
+ fbd.fribidi_shape(flags, embed_lvl, ln, jtypes, fbc_str)
+
+# print(fbc_str.value)
+# print(*(ord(c) for c in fbc_str))
+ # And now, the reordering.
+ # Note that here, we expect a single line, so no need to do
+ # fancy things...
+ fbd.fribidi_reorder_line(flags, btypes, ln, 0, pbase_dir, embed_lvl,
+ fbc_str, None)
+# print(fbc_str.value)
+# print(*(ord(c) for c in fbc_str))
+
+ yield fbc_str.value
+
+##### Command line stuff. #####
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="" \
+ "Preprocesses right-to-left languages.\n" \
+ "You can use it either standalone, or through " \
+ "import_po_from_branches or update_trunk.\n\n" \
+ "Note: This has been tested on Linux, not 100% it will " \
+ "work nicely on Windows or OsX.\n" \
+ "Note: This uses ctypes, as there is no py3 binding for " \
+ "fribidi currently. This implies you only need the " \
+ "compiled C library to run it.\n" \
+ "Note: It handles some formating/escape codes (like " \
+ "\\\", %s, %x12, %.4f, etc.), protecting them from ugly " \
+ "(evil) fribidi, which seems completely unaware of such " \
+ "things (as unicode is...).")
+ parser.add_argument('dst', metavar='dst.po',
+ help="The dest po into which write the " \
+ "pre-processed messages.")
+ parser.add_argument('src', metavar='src.po',
+ help="The po's to pre-process messages.")
+ args = parser.parse_args()
+
+
+ msgs, state, u1 = utils.parse_messages(args.src)
+ if state["is_broken"]:
+ print("Source po is BROKEN, aborting.")
+ return 1
+
+ keys = []
+ trans = []
+ for key, val in msgs.items():
+ keys.append(key)
+ trans.append("".join(val["msgstr_lines"]))
+ trans = log2vis(trans)
+ for key, trn in zip(keys, trans):
+ # Mono-line for now...
+ msgs[key]["msgstr_lines"] = [trn]
+
+ utils.write_messages(args.dst, msgs, state["comm_msg"], state["fuzzy_msg"])
+
+ print("RTL pre-process completed.")
+ return 0
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/settings.py b/release/scripts/modules/bl_i18n_utils/settings.py
new file mode 100644
index 00000000000..7ee81c1dc47
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/settings.py
@@ -0,0 +1,286 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Global settings used by all scripts in this dir.
+# XXX Before any use of the tools in this dir, please make a copy of this file
+# named "setting.py"
+# XXX This is a template, most values should be OK, but some you’ll have to
+# edit (most probably, BLENDER_EXEC and SOURCE_DIR).
+
+import os.path
+
+
+###############################################################################
+# MISC
+###############################################################################
+
+# The min level of completeness for a po file to be imported from /branches
+# into /trunk, as a percentage. -1 means "import everything".
+IMPORT_MIN_LEVEL = -1
+
+# The comment prefix used in generated messages.txt file.
+COMMENT_PREFIX = "#~ "
+
+# The comment prefix used to mark sources of msgids, in po's.
+COMMENT_PREFIX_SOURCE = "#: "
+
+# The comment prefix used in generated messages.txt file.
+CONTEXT_PREFIX = "MSGCTXT:"
+
+# Default context.
+CONTEXT_DEFAULT = ""
+
+# Undocumented operator placeholder string.
+UNDOC_OPS_STR = "(undocumented operator)"
+
+# The gettext domain.
+DOMAIN = "blender"
+
+# Our own "gettext" stuff.
+# File type (ext) to parse.
+PYGETTEXT_ALLOWED_EXTS = {".c", ".cpp", ".cxx", ".hpp", ".hxx", ".h"}
+
+# Where to search contexts definitions, relative to SOURCE_DIR (defined below).
+PYGETTEXT_CONTEXTS_DEFSRC = os.path.join("source", "blender", "blenfont",
+ "BLF_translation.h")
+
+# Regex to extract contexts defined in BLF_translation.h
+# XXX Not full-proof, but should be enough here!
+PYGETTEXT_CONTEXTS = "#define\\s+(BLF_I18NCONTEXT_[A-Z_0-9]+)\\s+\"([^\"]*)\""
+
+# Keywords' regex.
+# XXX Most unfortunately, we can't use named backreferences inside character sets,
+# which makes the regexes even more twisty... :/
+_str_base = (
+ # Match void string
+ "(?P<{_}1>[\"'])(?P={_}1)" # Get opening quote (' or "), and closing immediately.
+ "|"
+ # Or match non-void string
+ "(?P<{_}2>[\"'])" # Get opening quote (' or ").
+ "(?{capt}(?:"
+ # This one is for crazy things like "hi \\\\\" folks!"...
+ r"(?:(?!<\\)(?:\\\\)*\\(?=(?P={_}2)))|"
+ # The most common case.
+ ".(?!(?P={_}2))"
+ ")+.)" # Don't forget the last char!
+ "(?P={_}2)" # And closing quote.
+)
+str_clean_re = _str_base.format(_="g", capt="P<clean>")
+# Here we have to consider two different cases (empty string and other).
+_str_whole_re = (
+ _str_base.format(_="{_}1_", capt=":") +
+ # Optional loop start, this handles "split" strings...
+ "(?:(?<=[\"'])\\s*(?=[\"'])(?:"
+ + _str_base.format(_="{_}2_", capt=":") +
+ # End of loop.
+ "))*"
+)
+_ctxt_re = r"(?P<ctxt_raw>(?:" + _str_whole_re.format(_="_ctxt") + r")|(?:[A-Z_0-9]+))"
+_msg_re = r"(?P<msg_raw>" + _str_whole_re.format(_="_msg") + r")"
+PYGETTEXT_KEYWORDS = (() +
+ tuple((r"{}\(\s*" + _msg_re + r"\s*\)").format(it)
+ for it in ("IFACE_", "TIP_", "N_")) +
+ tuple((r"{}\(\s*" + _ctxt_re + r"\s*,\s*"+ _msg_re + r"\s*\)").format(it)
+ for it in ("CTX_IFACE_", "CTX_TIP_", "CTX_N_"))
+)
+#GETTEXT_KEYWORDS = ("IFACE_", "CTX_IFACE_:1c,2", "TIP_", "CTX_TIP_:1c,2",
+# "N_", "CTX_N_:1c,2")
+
+# Should po parser warn when finding a first letter not capitalized?
+WARN_MSGID_NOT_CAPITALIZED = True
+
+# Strings that should not raise above warning!
+WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
+ "", # Simplifies things... :p
+ "sin(x) / x",
+ "fBM",
+ "sqrt(x*x+y*y+z*z)",
+ "iTaSC",
+ "bItasc",
+ "px",
+ "mm",
+ "fStop",
+ "sRGB",
+ "iso-8859-15",
+ "utf-8",
+ "ascii",
+ "re",
+ "y",
+ "ac3",
+ "flac",
+ "mkv",
+ "mp2",
+ "mp3",
+ "ogg",
+ "wav",
+ "iTaSC parameters",
+ "vBVH",
+ "rv",
+ "en_US",
+ "fr_FR",
+ "it_IT",
+ "ru_RU",
+ "zh_CN",
+ "es",
+ "zh_TW",
+ "ar_EG",
+ "pt",
+ "bg_BG",
+ "ca_AD",
+ "hr_HR",
+ "cs_CZ",
+ "nl_NL",
+ "fi_FI",
+ "de_DE",
+ "el_GR",
+ "id_ID",
+ "ja_JP",
+ "ky_KG",
+ "ko_KR",
+ "ne_NP",
+ "fa_IR",
+ "pl_PL",
+ "ro_RO",
+ "sr_RS",
+ "sr_RS@latin",
+ "sv_SE",
+ "uk_UA",
+ "tr_TR",
+ "hu_HU",
+ "available with", # Is part of multi-line msg.
+ "virtual parents", # Is part of multi-line msg.
+ "description", # Addons' field. :/
+ "location", # Addons' field. :/
+ "author", # Addons' field. :/
+ "in memory to enable editing!", # Is part of multi-line msg.
+ "iScale",
+ "dx",
+ "p0",
+ "res",
+}
+
+
+###############################################################################
+# PATHS
+###############################################################################
+
+# The tools path, should be OK.
+TOOLS_DIR = os.path.join(os.path.dirname(__file__))
+
+# The Python3 executable.You’ll likely have to edit it in your user_settings.py
+# if you’re under Windows.
+PYTHON3_EXEC = "python3"
+
+# The Blender executable!
+# This is just an example, you’ll most likely have to edit it in your
+# user_settings.py!
+BLENDER_EXEC = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
+ "blender"))
+
+# The xgettext tool. You’ll likely have to edit it in your user_settings.py
+# if you’re under Windows.
+GETTEXT_XGETTEXT_EXECUTABLE = "xgettext"
+
+# The gettext msgmerge tool. You’ll likely have to edit it in your
+# user_settings.py if you’re under Windows.
+GETTEXT_MSGMERGE_EXECUTABLE = "msgmerge"
+
+# The gettext msgfmt "compiler". You’ll likely have to edit it in your
+# user_settings.py if you’re under Windows.
+GETTEXT_MSGFMT_EXECUTABLE = "msgfmt"
+
+# The svn binary... You’ll likely have to edit it in your
+# user_settings.py if you’re under Windows.
+SVN_EXECUTABLE = "svn"
+
+# The FriBidi C compiled library (.so under Linux, .dll under windows...).
+# You’ll likely have to edit it in your user_settings.py if you’re under
+# Windows., e.g. using the included one:
+# FRIBIDI_LIB = os.path.join(TOOLS_DIR, "libfribidi.dll")
+FRIBIDI_LIB = "libfribidi.so.0"
+
+# The name of the (currently empty) file that must be present in a po's
+# directory to enable rtl-preprocess.
+RTL_PREPROCESS_FILE = "is_rtl"
+
+# The Blender source root path.
+# This is just an example, you’ll most likely have to override it in your
+# user_settings.py!
+SOURCE_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
+ "..", "..", "blender_msgs"))
+
+# The bf-translation repository (you'll likely have to override this in your
+# user_settings.py).
+I18N_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..",
+ "..", "..", "i18n"))
+
+# The /branches path (overriden in bf-translation's i18n_override_settings.py).
+BRANCHES_DIR = os.path.join(I18N_DIR, "branches")
+
+# The /trunk path (overriden in bf-translation's i18n_override_settings.py).
+TRUNK_DIR = os.path.join(I18N_DIR, "trunk")
+
+# The /trunk/po path (overriden in bf-translation's i18n_override_settings.py).
+TRUNK_PO_DIR = os.path.join(TRUNK_DIR, "po")
+
+# The /trunk/mo path (overriden in bf-translation's i18n_override_settings.py).
+TRUNK_MO_DIR = os.path.join(TRUNK_DIR, "locale")
+
+# The file storing Blender-generated messages.
+FILE_NAME_MESSAGES = os.path.join(TRUNK_PO_DIR, "messages.txt")
+
+# The Blender source path to check for i18n macros.
+POTFILES_SOURCE_DIR = os.path.join(SOURCE_DIR, "source")
+
+# The "source" file storing which files should be processed by xgettext,
+# used to create FILE_NAME_POTFILES
+FILE_NAME_SRC_POTFILES = os.path.join(TRUNK_PO_DIR, "_POTFILES.in")
+
+# The final (generated) file storing which files
+# should be processed by xgettext.
+FILE_NAME_POTFILES = os.path.join(TRUNK_PO_DIR, "POTFILES.in")
+
+# The template messages file.
+FILE_NAME_POT = os.path.join(TRUNK_PO_DIR, ".".join((DOMAIN, "pot")))
+
+# Other py files that should be searched for ui strings, relative to SOURCE_DIR.
+# Needed for Cycles, currently...
+CUSTOM_PY_UI_FILES = [os.path.join("intern", "cycles", "blender",
+ "addon", "ui.py"),
+ ]
+
+
+# A cache storing validated msgids, to avoid re-spellchecking them.
+SPELL_CACHE = os.path.join("/tmp", ".spell_cache")
+
+
+# Custom override settings must be one dir above i18n tools itself!
+import sys
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
+try:
+ from i18n_override_settings import *
+except ImportError: # If no i18n_override_settings available, it’s no error!
+ pass
+
+# Override with custom user settings, if available.
+try:
+ from user_settings import *
+except ImportError: # If no user_settings available, it’s no error!
+ pass
diff --git a/release/scripts/modules/bl_i18n_utils/spell_check_utils.py b/release/scripts/modules/bl_i18n_utils/spell_check_utils.py
new file mode 100644
index 00000000000..3999c01a896
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/spell_check_utils.py
@@ -0,0 +1,490 @@
+# ##### BEGIN GPL LICENSE BLOCK #####
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ##### END GPL LICENSE BLOCK #####
+
+# <pep8 compliant>
+
+import re
+
+
+_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
+_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
+_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
+_reg = re.compile(_valid_words)
+
+
+def split_words(text):
+ return [w for w in _reg.findall(text) if w]
+
+
+# These must be all lower case for comparisons
+dict_uimsgs = {
+ # OK words
+ "aren", # aren't
+ "betweens", # yuck! in-betweens!
+ "boolean", "booleans",
+ "decrement",
+ "doesn", # doesn't
+ "fader",
+ "hoc", # ad-hoc
+ "indices",
+ "iridas",
+ "isn", # isn't
+ "iterable",
+ "kyrgyz",
+ "latin",
+ "merchantability",
+ "mplayer",
+ "vertices",
+
+ # Merged words
+ "addon", "addons",
+ "antialiasing",
+ "arcsine", "arccosine", "arctangent",
+ "autoclip",
+ "autocomplete",
+ "autoname",
+ "autosave",
+ "autoscale",
+ "autosmooth",
+ "autosplit",
+ "backface",
+ "backimage",
+ "backscattered",
+ "bandnoise",
+ "bindcode",
+ "bitrate",
+ "blendin",
+ "bonesize",
+ "boundbox",
+ "boxpack",
+ "buffersize",
+ "builtin", "builtins",
+ "chunksize",
+ "de",
+ "defocus",
+ "denoise",
+ "despill", "despilling",
+ "filebrowser",
+ "filelist",
+ "filename", "filenames",
+ "filepath", "filepaths",
+ "forcefield", "forcefields",
+ "fulldome", "fulldomes",
+ "fullscreen",
+ "gridline",
+ "hemi",
+ "inscatter",
+ "lightless",
+ "lookup", "lookups",
+ "mathutils",
+ "midlevel",
+ "midground",
+ "mixdown",
+ "multi",
+ "multifractal",
+ "multires", "multiresolution",
+ "multisampling",
+ "multitexture",
+ "namespace",
+ "keyconfig",
+ "playhead",
+ "polyline",
+ "popup", "popups",
+ "pre",
+ "precalculate",
+ "prefetch",
+ "premultiply", "premultiplied",
+ "prepass",
+ "prepend",
+ "preprocess", "preprocessing",
+ "preseek",
+ "readonly",
+ "realtime",
+ "rekey",
+ "remesh",
+ "reprojection",
+ "resize",
+ "restpose",
+ "retarget", "retargets", "retargeting", "retargeted",
+ "ringnoise",
+ "rolloff",
+ "screencast", "screenshot", "screenshots",
+ "selfcollision",
+ "singletexture",
+ "startup",
+ "stateful",
+ "starfield",
+ "subflare", "subflares",
+ "subframe", "subframes",
+ "subclass", "subclasses", "subclassing",
+ "subdirectory", "subdirectories", "subdir", "subdirs",
+ "submodule", "submodules",
+ "subpath",
+ "subsize",
+ "substep", "substeps",
+ "targetless",
+ "textbox", "textboxes",
+ "tilemode",
+ "timestamp", "timestamps",
+ "timestep", "timesteps",
+ "un",
+ "unbake",
+ "uncomment",
+ "undeformed",
+ "undistort",
+ "ungroup",
+ "unhide",
+ "unindent",
+ "unkeyed",
+ "unpremultiply",
+ "unprojected",
+ "unreacted",
+ "unregister",
+ "unselected",
+ "unsubdivided",
+ "unshadowed",
+ "unspill",
+ "unstitchable",
+ "vectorscope",
+ "worldspace",
+ "workflow",
+
+ # Neologisms, slangs
+ "automagic", "automagically",
+ "blobby",
+ "blockiness", "blocky",
+ "collider", "colliders",
+ "deformer", "deformers",
+ "editability",
+ "keyer",
+ "lacunarity",
+ "numerics",
+ "occluder",
+ "passepartout",
+ "perspectively",
+ "polygonization",
+ "selectability",
+ "slurph",
+ "trackability",
+ "transmissivity",
+ "rasterized", "rasterization",
+ "renderer", "renderable", "renderability",
+
+ # Abbreviations
+ "aero",
+ "amb",
+ "anim",
+ "bool",
+ "calc",
+ "config", "configs",
+ "const",
+ "coord", "coords",
+ "dof",
+ "dupli", "duplis",
+ "eg",
+ "esc",
+ "fac",
+ "grless",
+ "http",
+ "init",
+ "kbit",
+ "lensdist",
+ "loc", "rot", "pos",
+ "lorem",
+ "luma",
+ "multicam",
+ "num",
+ "ok",
+ "ortho",
+ "persp",
+ "pref", "prefs",
+ "prev",
+ "param",
+ "premul",
+ "quad", "quads",
+ "quat", "quats",
+ "recalc", "recalcs",
+ "refl",
+ "spec",
+ "struct", "structs",
+ "tex",
+ "tri", "tris",
+ "uv", "uvs", "uvw", "uw", "uvmap",
+ "vec",
+ "vert", "verts",
+ "vis",
+ "xyz", "xzy", "yxz", "yzx", "zxy", "zyx",
+ "xy", "xz", "yx", "yz", "zx", "zy",
+
+ # General computer/science terms
+ "boid", "boids",
+ "equisolid",
+ "euler", "eulers",
+ "hashable",
+ "intrinsics",
+ "isosurface",
+ "jitter", "jittering", "jittered",
+ "keymap", "keymaps",
+ "lambertian",
+ "laplacian",
+ "metadata",
+ "nand", "xnor",
+ "normals",
+ "numpad",
+ "octree",
+ "opengl",
+ "pulldown", "pulldowns",
+ "quantized",
+ "samplerate",
+ "scrollback",
+ "scrollbar",
+ "scroller",
+ "searchable",
+ "spacebar",
+ "tooltip", "tooltips",
+ "trackpad",
+ "unicode",
+ "viewport", "viewports",
+ "viscoelastic",
+ "wildcard", "wildcards",
+
+ # General computer graphics terms
+ "anaglyph",
+ "bezier", "beziers",
+ "bicubic",
+ "bilinear",
+ "blackpoint", "whitepoint",
+ "blinn",
+ "bokeh",
+ "catadioptric",
+ "centroid",
+ "chrominance",
+ "codec", "codecs",
+ "collada",
+ "compositing",
+ "crossfade",
+ "deinterlace",
+ "dropoff",
+ "eigenvectors",
+ "equirectangular",
+ "fisheye",
+ "framerate",
+ "gimbal",
+ "grayscale",
+ "icosphere",
+ "lightmap",
+ "lossless", "lossy",
+ "midtones",
+ "mipmap", "mipmaps", "mip",
+ "ngon", "ngons",
+ "nurb", "nurbs",
+ "perlin",
+ "phong",
+ "radiosity",
+ "raytrace", "raytracing", "raytraced",
+ "renderfarm",
+ "shader", "shaders",
+ "specular", "specularity",
+ "spillmap",
+ "sobel",
+ "tonemap",
+ "toon",
+ "timecode",
+ "voronoi",
+ "voxel", "voxels",
+ "wireframe",
+ "zmask",
+ "ztransp",
+
+ # Blender terms
+ "bbone",
+ "breakdowner",
+ "bspline",
+ "bweight",
+ "datablock", "datablocks",
+ "dopesheet",
+ "dupliface", "duplifaces",
+ "dupliframe", "dupliframes",
+ "dupliobject", "dupliob",
+ "dupligroup",
+ "duplivert",
+ "fcurve", "fcurves",
+ "fluidsim",
+ "frameserver",
+ "enum",
+ "keyframe", "keyframes", "keyframing", "keyframed",
+ "metaball", "metaballs",
+ "metaelement", "metaelements",
+ "metastrip", "metastrips",
+ "movieclip",
+ "nabla",
+ "navmesh",
+ "outliner",
+ "paintmap", "paintmaps",
+ "polygroup", "polygroups",
+ "poselib",
+ "pushpull",
+ "pyconstraint", "pyconstraints",
+ "shapekey", "shapekeys",
+ "shrinkfatten",
+ "shrinkwrap",
+ "softbody",
+ "stucci",
+ "sunsky",
+ "subsurf",
+ "texface",
+ "timeline", "timelines",
+ "tosphere",
+ "vcol", "vcols",
+ "vgroup", "vgroups",
+ "vinterlace",
+ "wetmap", "wetmaps",
+ "wpaint",
+
+ # Algorithm names
+ "beckmann",
+ "catmull",
+ "catrom",
+ "chebychev",
+ "kutta",
+ "lennard",
+ "minkowsky",
+ "minnaert",
+ "musgrave",
+ "nayar",
+ "netravali",
+ "oren",
+ "prewitt",
+ "runge",
+ "verlet",
+ "worley",
+
+ # Acronyms
+ "aa", "msaa",
+ "api",
+ "asc", "cdl",
+ "ascii",
+ "atrac",
+ "bw",
+ "ccd",
+ "cmd",
+ "cpus",
+ "ctrl",
+ "cw", "ccw",
+ "dev",
+ "djv",
+ "dpi",
+ "dvar",
+ "dx",
+ "fh",
+ "fov",
+ "fft",
+ "gfx",
+ "gl",
+ "glsl",
+ "gpl",
+ "gpu", "gpus",
+ "hc",
+ "hdr",
+ "hh", "mm", "ss", "ff", # hh:mm:ss:ff timecode
+ "hsv", "hsva",
+ "id",
+ "itu",
+ "lhs",
+ "lmb", "mmb", "rmb",
+ "mux",
+ "ndof",
+ "ppc",
+ "px",
+ "qmc",
+ "rgb", "rgba",
+ "rhs",
+ "rv",
+ "sdl",
+ "sl",
+ "smpte",
+ "svn",
+ "ui",
+ "unix",
+ "vbo", "vbos",
+ "ycc", "ycca",
+ "yuv", "yuva",
+
+ # Blender acronyms
+ "bge",
+ "bli",
+ "bvh",
+ "dbvt",
+ "dop", # BLI K-Dop BVH
+ "ik",
+ "nla",
+ "qbvh",
+ "rna",
+ "rvo",
+ "simd",
+ "sph",
+ "svbvh",
+
+ # CG acronyms
+ "ao",
+ "bsdf",
+ "ior",
+ "mocap",
+
+ # Files types/formats
+ "avi",
+ "attrac",
+ "autodesk",
+ "bmp",
+ "btx",
+ "cineon",
+ "dpx",
+ "dxf",
+ "eps",
+ "exr",
+ "fbx",
+ "ffmpeg",
+ "flac",
+ "gzip",
+ "ico",
+ "jpg", "jpeg",
+ "matroska",
+ "mdd",
+ "mkv",
+ "mpeg", "mjpeg",
+ "mtl",
+ "ogg",
+ "openjpeg",
+ "piz",
+ "png",
+ "po",
+ "quicktime",
+ "rle",
+ "sgi",
+ "stl",
+ "svg",
+ "targa", "tga",
+ "tiff",
+ "theora",
+ "vorbis",
+ "wav",
+ "xiph",
+ "xml",
+ "xna",
+ "xvid",
+}
diff --git a/release/scripts/modules/bl_i18n_utils/update_branches.py b/release/scripts/modules/bl_i18n_utils/update_branches.py
new file mode 100755
index 00000000000..1a856b14944
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/update_branches.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Update all branches:
+# * Generate a temp messages.txt file.
+# * Use it to generate a temp .pot file.
+# * Use it to update all .po’s in /branches.
+
+import subprocess
+import os
+import sys
+import tempfile
+
+import settings
+
+PY3 = settings.PYTHON3_EXEC
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="" \
+ "Update all branches:\n" \
+ "* Generate a temp messages.txt file.\n" \
+ "* Use it to generate a temp .pot file.\n" \
+ "* Use it to update all .po’s in /branches.")
+ parser.add_argument('--pproc-contexts', action="store_true",
+ help="Pre-process po’s to avoid having plenty of "
+ "fuzzy msgids just because a context was "
+ "added/changed!")
+ parser.add_argument('-c', '--no_checks', default=True,
+ action="store_false",
+ help="No checks over UI messages.")
+ parser.add_argument('-a', '--add', action="store_true",
+ help="Add missing po’s (useful only when one or " \
+ "more languages are given!).")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+
+ ret = 0
+
+ # Generate a temp messages file.
+ dummy, msgfile = tempfile.mkstemp(suffix=".txt",
+ prefix="blender_messages_")
+ os.close(dummy)
+ cmd = (PY3, "./update_msg.py", "-o", msgfile)
+ t = subprocess.call(cmd)
+ if t:
+ ret = t
+
+ # Regenerate POTFILES.in.
+# cmd = (PY3, "./update_potinput.py")
+# t = subprocess.call(cmd)
+# if t:
+# ret = t
+
+ # Generate a temp pot file.
+ dummy, potfile = tempfile.mkstemp(suffix=".pot",
+ prefix="blender_pot_")
+ os.close(dummy)
+ cmd = [PY3, "./update_pot.py", "-i", msgfile, "-o", potfile]
+ if not args.no_checks:
+ cmd.append("-c")
+ t = subprocess.call(cmd)
+ if t:
+ ret = t
+
+ # Update branches’ po files.
+ cmd = [PY3, "./update_po.py", "-i", potfile]
+ if args.langs:
+ if args.add:
+ cmd.append("-a")
+ cmd += args.langs
+ if args.pproc_contexts:
+ cmd.append("--pproc-contexts")
+ t = subprocess.call(cmd)
+ if t:
+ ret = t
+
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/update_mo.py b/release/scripts/modules/bl_i18n_utils/update_mo.py
new file mode 100755
index 00000000000..9804eb8ce34
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/update_mo.py
@@ -0,0 +1,91 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Create or update mo’s under /trunk/locale/…
+
+import subprocess
+import os
+import sys
+
+import settings
+import utils
+
+
+GETTEXT_MSGFMT_EXECUTABLE = settings.GETTEXT_MSGFMT_EXECUTABLE
+
+SOURCE_DIR = settings.SOURCE_DIR
+TRUNK_MO_DIR = settings.TRUNK_MO_DIR
+TRUNK_PO_DIR = settings.TRUNK_PO_DIR
+
+DOMAIN = settings.DOMAIN
+
+
+def process_po(po, lang):
+ mo_dir = os.path.join(TRUNK_MO_DIR, lang, "LC_MESSAGES")
+
+ # Create dirs if not existing!
+ os.makedirs(mo_dir, exist_ok = True)
+ # show stats
+ cmd = (GETTEXT_MSGFMT_EXECUTABLE,
+ "--statistics",
+ po,
+ "-o",
+ os.path.join(mo_dir, ".".join((DOMAIN, "mo"))),
+ )
+
+ print("Running ", " ".join(cmd))
+ ret = subprocess.call(cmd)
+ print("Finished.")
+ return ret
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Create or update mo’s " \
+ "under {}.".format(TRUNK_MO_DIR))
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+ ret = 0
+
+ if args.langs:
+ for lang in args.langs:
+ po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
+ if os.path.exists(po):
+ t = process_po(po, lang)
+ if t:
+ ret = t
+ else:
+ for po in os.listdir(TRUNK_PO_DIR):
+ if po.endswith(".po") and not po.endswith("_raw.po"):
+ lang = os.path.basename(po)[:-3]
+ po = os.path.join(TRUNK_PO_DIR, po)
+ t = process_po(po, lang)
+ if t:
+ ret = t
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/update_msg.py b/release/scripts/modules/bl_i18n_utils/update_msg.py
new file mode 100755
index 00000000000..e5154632cfe
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/update_msg.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8-80 compliant>
+
+# Write out messages.txt from Blender.
+
+import os
+import sys
+import subprocess
+
+import settings
+
+
+BLENDER_ARGS = [
+ settings.BLENDER_EXEC,
+ "--background",
+ "--factory-startup",
+ "--python",
+ os.path.join(settings.TOOLS_DIR, "bl_process_msg.py"),
+ "--",
+ "-m",
+]
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Write out messages.txt " \
+ "from Blender.")
+ parser.add_argument('-c', '--no_checks', default=True,
+ action="store_false",
+ help="No checks over UI messages.")
+ parser.add_argument('-b', '--blender', help="Blender executable path.")
+ parser.add_argument('-o', '--output', help="Output messages file path.")
+ args = parser.parse_args()
+ if args.blender:
+ BLENDER_ARGS[0] = args.blender
+ if not args.no_checks:
+ BLENDER_ARGS.append("-c")
+ if args.output:
+ BLENDER_ARGS.append("-o")
+ BLENDER_ARGS.append(args.output)
+ ret = subprocess.call(BLENDER_ARGS)
+
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ ret = main()
+ if ret:
+ raise(Exception(ret))
diff --git a/release/scripts/modules/bl_i18n_utils/update_po.py b/release/scripts/modules/bl_i18n_utils/update_po.py
new file mode 100755
index 00000000000..042b46c03f2
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/update_po.py
@@ -0,0 +1,166 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Update po’s in the branches from blender.pot in /trunk/po dir.
+
+import subprocess
+import os
+import sys
+from codecs import open
+import shutil
+
+import settings
+import utils
+
+GETTEXT_MSGMERGE_EXECUTABLE = settings.GETTEXT_MSGMERGE_EXECUTABLE
+BRANCHES_DIR = settings.BRANCHES_DIR
+TRUNK_PO_DIR = settings.TRUNK_PO_DIR
+FILE_NAME_POT = settings.FILE_NAME_POT
+
+
+def pproc_newcontext_po(po, pot_messages, pot_stats):
+ print("Adding new contexts to {}...".format(po))
+ messages, state, stats = utils.parse_messages(po)
+ known_ctxt = stats["contexts"]
+ print("Already known (present) context(s): {}".format(str(known_ctxt)))
+
+ new_ctxt = set()
+ added = 0
+ # Only use valid already translated messages!
+ allowed_keys = state["trans_msg"] - state["fuzzy_msg"] - state["comm_msg"]
+ for key in pot_messages.keys():
+ ctxt, msgid = key
+ if ctxt in known_ctxt:
+ continue
+ new_ctxt.add(ctxt)
+ for t_ctxt in known_ctxt:
+ # XXX The first match will win, this might not be optimal...
+ t_key = (t_ctxt, msgid)
+ if t_key in allowed_keys:
+ # Wrong comments (sources) will be removed by msgmerge...
+ messages[key] = messages[t_key]
+ messages[key]["msgctxt_lines"] = [ctxt]
+ added += 1
+
+ utils.write_messages(po, messages, state["comm_msg"], state["fuzzy_msg"])
+ print("Finished!\n {} new context(s) was/were added {}, adding {} new "
+ "messages.\n".format(len(new_ctxt), str(new_ctxt), added))
+ return 0
+
+
+def process_po(po, lang):
+ # update po file
+ cmd = (GETTEXT_MSGMERGE_EXECUTABLE,
+ "--update",
+ "--no-wrap",
+ "--backup=none",
+ "--lang={}".format(lang),
+ po,
+ FILE_NAME_POT,
+ )
+
+ print("Updating {}...".format(po))
+ print("Running ", " ".join(cmd))
+ ret = subprocess.call(cmd)
+ print("Finished!\n")
+ return ret
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="Write out messages.txt "
+ "from Blender.")
+ parser.add_argument('-t', '--trunk', action="store_true",
+ help="Update po’s in /trunk/po rather than /branches.")
+ parser.add_argument('-i', '--input', metavar="File",
+ help="Input pot file path.")
+ parser.add_argument('--pproc-contexts', action="store_true",
+ help="Pre-process po’s to avoid having plenty of "
+ "fuzzy msgids just because a context was "
+ "added/changed!")
+ parser.add_argument('-a', '--add', action="store_true",
+ help="Add missing po’s (useful only when one or "
+ "more languages are given!).")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+ if args.input:
+ global FILE_NAME_POT
+ FILE_NAME_POT = args.input
+ ret = 0
+
+ if args.pproc_contexts:
+ _ctxt_proc = pproc_newcontext_po
+ pot_messages, _a, pot_stats = utils.parse_messages(FILE_NAME_POT)
+ else:
+ _ctxt_proc = lambda a, b, c: 0
+ pot_messages, pot_stats = None, None
+
+ if args.langs:
+ for lang in args.langs:
+ if args.trunk:
+ dr = TRUNK_PO_DIR
+ po = os.path.join(dr, ".".join((lang, "po")))
+ else:
+ dr = os.path.join(BRANCHES_DIR, lang)
+ po = os.path.join(dr, ".".join((lang, "po")))
+ if args.add:
+ if not os.path.exists(dr):
+ os.makedirs(dr)
+ if not os.path.exists(po):
+ shutil.copy(FILE_NAME_POT, po)
+ if args.add or os.path.exists(po):
+ t = _ctxt_proc(po, pot_messages, pot_stats)
+ if t:
+ ret = t
+ t = process_po(po, lang)
+ if t:
+ ret = t
+ elif args.trunk:
+ for po in os.listdir(TRUNK_PO_DIR):
+ if po.endswith(".po"):
+ lang = os.path.basename(po)[:-3]
+ po = os.path.join(TRUNK_PO_DIR, po)
+ t = _ctxt_proc(po, pot_messages, pot_stats)
+ if t:
+ ret = t
+ t = process_po(po, lang)
+ if t:
+ ret = t
+ else:
+ for lang in os.listdir(BRANCHES_DIR):
+ po = os.path.join(BRANCHES_DIR, lang, ".".join((lang, "po")))
+ if os.path.exists(po):
+ t = _ctxt_proc(po, pot_messages, pot_stats)
+ if t:
+ ret = t
+ t = process_po(po, lang)
+ if t:
+ ret = t
+
+ return ret
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/update_pot.py b/release/scripts/modules/bl_i18n_utils/update_pot.py
new file mode 100755
index 00000000000..ceef51aa072
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/update_pot.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Update blender.pot file from messages.txt
+
+import subprocess
+import os
+import sys
+import re
+#from codecs import open
+import tempfile
+import argparse
+import time
+import pickle
+
+import settings
+import utils
+
+
+COMMENT_PREFIX = settings.COMMENT_PREFIX
+COMMENT_PREFIX_SOURCE = settings.COMMENT_PREFIX_SOURCE
+CONTEXT_PREFIX = settings.CONTEXT_PREFIX
+FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
+#FILE_NAME_POTFILES = settings.FILE_NAME_POTFILES
+FILE_NAME_POT = settings.FILE_NAME_POT
+SOURCE_DIR = settings.SOURCE_DIR
+POTFILES_DIR = settings.POTFILES_SOURCE_DIR
+SRC_POTFILES = settings.FILE_NAME_SRC_POTFILES
+
+#GETTEXT_XGETTEXT_EXECUTABLE = settings.GETTEXT_XGETTEXT_EXECUTABLE
+#GETTEXT_KEYWORDS = settings.GETTEXT_KEYWORDS
+CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
+PYGETTEXT_ALLOWED_EXTS = settings.PYGETTEXT_ALLOWED_EXTS
+
+SVN_EXECUTABLE = settings.SVN_EXECUTABLE
+
+WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
+NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
+
+SPELL_CACHE = settings.SPELL_CACHE
+
+
+#def generate_valid_potfiles(final_potfiles):
+# "Generates a temp potfiles.in with aboslute paths."
+# with open(FILE_NAME_POTFILES, 'r', 'utf-8') as f, \
+# open(final_potfiles, 'w', 'utf-8') as w:
+# for line in f:
+# line = utils.stripeol(line)
+# if line:
+# w.write("".join((os.path.join(SOURCE_DIR,
+# os.path.normpath(line)), "\n")))
+
+# Do this only once!
+# Get contexts defined in blf.
+CONTEXTS = {}
+with open(os.path.join(SOURCE_DIR, settings.PYGETTEXT_CONTEXTS_DEFSRC)) as f:
+ reg = re.compile(settings.PYGETTEXT_CONTEXTS)
+ f = f.read()
+ # This regex is supposed to yield tuples
+ # (key=C_macro_name, value=C_string).
+ CONTEXTS = dict(m.groups() for m in reg.finditer(f))
+
+# Build regexes to extract messages (with optinal contexts) from C source.
+pygettexts = tuple(re.compile(r).search
+ for r in settings.PYGETTEXT_KEYWORDS)
+_clean_str = re.compile(settings.str_clean_re).finditer
+clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
+
+def check_file(path, rel_path, messages):
+ with open(path, encoding="utf-8") as f:
+ f = f.read()
+ for srch in pygettexts:
+ m = srch(f)
+ line = pos =0
+ while m:
+ d = m.groupdict()
+ # Context.
+ ctxt = d.get("ctxt_raw")
+ if ctxt:
+ if ctxt in CONTEXTS:
+ ctxt = CONTEXTS[ctxt]
+ elif '"' in ctxt or "'" in ctxt:
+ ctxt = clean_str(ctxt)
+ else:
+ print("WARNING: raw context “{}” couldn’t be resolved!"
+ "".format(ctxt))
+ ctxt = CONTEXT_DEFAULT
+ else:
+ ctxt = CONTEXT_DEFAULT
+ # Message.
+ msg = d.get("msg_raw")
+ if msg:
+ if '"' in msg or "'" in msg:
+ msg = clean_str(msg)
+ else:
+ print("WARNING: raw message “{}” couldn’t be resolved!"
+ "".format(msg))
+ msg = ""
+ else:
+ msg = ""
+ # Line.
+ line += f[pos:m.start()].count('\n')
+ # And we are done for this item!
+ messages.setdefault((ctxt, msg), []).append(":".join((rel_path, str(line))))
+ pos = m.end()
+ line += f[m.start():pos].count('\n')
+ m = srch(f, pos)
+
+
+def py_xgettext(messages):
+ with open(SRC_POTFILES) as src:
+ forbidden = set()
+ forced = set()
+ for l in src:
+ if l[0] == '-':
+ forbidden.add(l[1:].rstrip('\n'))
+ elif l[0] != '#':
+ forced.add(l.rstrip('\n'))
+ for root, dirs, files in os.walk(POTFILES_DIR):
+ if "/.svn" in root:
+ continue
+ for fname in files:
+ if os.path.splitext(fname)[1] not in PYGETTEXT_ALLOWED_EXTS:
+ continue
+ path = os.path.join(root, fname)
+ rel_path = os.path.relpath(path, SOURCE_DIR)
+ if rel_path in forbidden | forced:
+ continue
+ check_file(path, rel_path, messages)
+ for path in forced:
+ if os.path.exists(path):
+ check_file(os.path.join(SOURCE_DIR, path), path, messages)
+
+
+# Spell checking!
+import enchant
+dict_spelling = enchant.Dict("en_US")
+
+from spell_check_utils import (dict_uimsgs,
+ split_words,
+ )
+
+_spell_checked = set()
+def spell_check(txt, cache):
+ ret = []
+
+ if cache is not None and txt in cache:
+ return ret
+
+ for w in split_words(txt):
+ w_lower = w.lower()
+ if w_lower in dict_uimsgs | _spell_checked:
+ continue
+ if not dict_spelling.check(w):
+ ret.append("{}: suggestions are ({})"
+ .format(w, "'" + "', '".join(dict_spelling.suggest(w))
+ + "'"))
+ else:
+ _spell_checked.add(w_lower)
+
+ if not ret:
+ if cache is not None:
+ cache.add(txt)
+
+ return ret
+
+
+def get_svnrev():
+ cmd = [SVN_EXECUTABLE,
+ "info",
+ "--xml",
+ SOURCE_DIR,
+ ]
+ xml = subprocess.check_output(cmd)
+ return re.search(b'revision="(\d+)"', xml).group(1)
+
+
+def gen_empty_pot():
+ blender_rev = get_svnrev()
+ utctime = time.gmtime()
+ time_str = time.strftime("%Y-%m-%d %H:%M+0000", utctime)
+ year_str = time.strftime("%Y", utctime)
+
+ return utils.gen_empty_messages(blender_rev, time_str, year_str)
+
+
+def merge_messages(msgs, states, messages, do_checks, spell_cache):
+ num_added = num_present = 0
+ for (context, msgid), srcs in messages.items():
+ if do_checks:
+ err = spell_check(msgid, spell_cache)
+ if err:
+ print("WARNING: spell check failed on “" + msgid + "”:")
+ print("\t\t" + "\n\t\t".join(err))
+ print("\tFrom:\n\t\t" + "\n\t\t".join(srcs))
+
+ # Escape some chars in msgid!
+ msgid = msgid.replace("\\", "\\\\")
+ msgid = msgid.replace("\"", "\\\"")
+ msgid = msgid.replace("\t", "\\t")
+
+ srcs = [COMMENT_PREFIX_SOURCE + s for s in srcs]
+
+ key = (context, msgid)
+ if key not in msgs:
+ msgs[key] = {"msgid_lines": [msgid],
+ "msgstr_lines": [""],
+ "comment_lines": srcs,
+ "msgctxt_lines": [context]}
+ num_added += 1
+ else:
+ # We need to merge comments!
+ msgs[key]["comment_lines"].extend(srcs)
+ num_present += 1
+
+ return num_added, num_present
+
+
+def main():
+ parser = argparse.ArgumentParser(description="Update blender.pot file " \
+ "from messages.txt")
+ parser.add_argument('-w', '--warning', action="store_true",
+ help="Show warnings.")
+ parser.add_argument('-i', '--input', metavar="File",
+ help="Input messages file path.")
+ parser.add_argument('-o', '--output', metavar="File",
+ help="Output pot file path.")
+
+ args = parser.parse_args()
+ if args.input:
+ global FILE_NAME_MESSAGES
+ FILE_NAME_MESSAGES = args.input
+ if args.output:
+ global FILE_NAME_POT
+ FILE_NAME_POT = args.output
+
+ print("Running fake py gettext…")
+ # Not using any more xgettext, simpler to do it ourself!
+ messages = {}
+ py_xgettext(messages)
+ print("Finished, found {} messages.".format(len(messages)))
+
+ if SPELL_CACHE and os.path.exists(SPELL_CACHE):
+ with open(SPELL_CACHE, 'rb') as f:
+ spell_cache = pickle.load(f)
+ else:
+ spell_cache = set()
+ print(len(spell_cache))
+
+ print("Generating POT file {}…".format(FILE_NAME_POT))
+ msgs, states = gen_empty_pot()
+ tot_messages, _a = merge_messages(msgs, states, messages,
+ True, spell_cache)
+
+ # add messages collected automatically from RNA
+ print("\tMerging RNA messages from {}…".format(FILE_NAME_MESSAGES))
+ messages = {}
+ with open(FILE_NAME_MESSAGES, encoding="utf-8") as f:
+ srcs = []
+ context = ""
+ for line in f:
+ line = utils.stripeol(line)
+
+ if line.startswith(COMMENT_PREFIX):
+ srcs.append(line[len(COMMENT_PREFIX):].strip())
+ elif line.startswith(CONTEXT_PREFIX):
+ context = line[len(CONTEXT_PREFIX):].strip()
+ else:
+ key = (context, line)
+ messages[key] = srcs
+ srcs = []
+ context = ""
+ num_added, num_present = merge_messages(msgs, states, messages,
+ True, spell_cache)
+ tot_messages += num_added
+ print("\tMerged {} messages ({} were already present)."
+ "".format(num_added, num_present))
+
+ # Write back all messages into blender.pot.
+ utils.write_messages(FILE_NAME_POT, msgs, states["comm_msg"],
+ states["fuzzy_msg"])
+
+ print(len(spell_cache))
+ if SPELL_CACHE and spell_cache:
+ with open(SPELL_CACHE, 'wb') as f:
+ pickle.dump(spell_cache, f)
+
+ print("Finished, total: {} messages!".format(tot_messages - 1))
+
+ return 0
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/update_trunk.py b/release/scripts/modules/bl_i18n_utils/update_trunk.py
new file mode 100755
index 00000000000..f4a2e0b3f8f
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/update_trunk.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python3
+
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Update trunk from branches:
+# * Remove po’s in trunk.
+# * Copy po’s from branches advanced enough.
+# * Clean po’s in trunk.
+# * Compile po’s in trunk in mo’s, keeping track of those failing.
+# * Remove po’s, mo’s (and their dir’s) that failed to compile or
+# are no more present in trunk.
+
+import subprocess
+import os
+import sys
+import shutil
+
+import settings
+
+TRUNK_PO_DIR = settings.TRUNK_PO_DIR
+TRUNK_MO_DIR = settings.TRUNK_MO_DIR
+
+PY3 = settings.PYTHON3_EXEC
+
+
+def main():
+ import argparse
+ parser = argparse.ArgumentParser(description="" \
+ "Update trunk from branches:\n" \
+ "* Remove po’s in trunk.\n" \
+ "* Copy po’s from branches advanced enough.\n" \
+ "* Clean po’s in trunk.\n" \
+ "* Compile po’s in trunk in mo’s, keeping " \
+ "track of those failing.\n" \
+ "* Remove po’s and mo’s (and their dir’s) that " \
+ "failed to compile or are no more present in trunk.")
+ parser.add_argument('-t', '--threshold', type=int,
+ help="Import threshold, as a percentage.")
+ parser.add_argument('-p', '--po', action="store_false",
+ help="Do not remove failing po’s.")
+ parser.add_argument('-m', '--mo', action="store_false",
+ help="Do not remove failing mo’s.")
+ parser.add_argument('langs', metavar='ISO_code', nargs='*',
+ help="Restrict processed languages to those.")
+ args = parser.parse_args()
+
+
+ ret = 0
+ failed = set()
+
+ # Remove po’s in trunk.
+ for po in os.listdir(TRUNK_PO_DIR):
+ if po.endswith(".po"):
+ lang = os.path.basename(po)[:-3]
+ if args.langs and lang not in args.langs:
+ continue
+ po = os.path.join(TRUNK_PO_DIR, po)
+ os.remove(po)
+
+ # Copy po’s from branches.
+ cmd = [PY3, "./import_po_from_branches.py", "-s"]
+ if args.threshold is not None:
+ cmd += ["-t", str(args.threshold)]
+ if args.langs:
+ cmd += args.langs
+ t = subprocess.call(cmd)
+ if t:
+ ret = t
+
+ # Add in failed all mo’s no more having relevant po’s in trunk.
+ for lang in os.listdir(TRUNK_MO_DIR):
+ if lang == ".svn":
+ continue # !!!
+ if not os.path.exists(os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))):
+ failed.add(lang)
+
+ # Check and compile each po separatly, to keep track of those failing.
+ # XXX There should not be any failing at this stage, import step is
+ # supposed to have already filtered them out!
+ for po in os.listdir(TRUNK_PO_DIR):
+ if po.endswith(".po") and not po.endswith("_raw.po"):
+ lang = os.path.basename(po)[:-3]
+ if args.langs and lang not in args.langs:
+ continue
+
+ cmd = [PY3, "./clean_po.py", "-t", "-s", lang]
+ t = subprocess.call(cmd)
+ if t:
+ ret = t
+ failed.add(lang)
+ continue
+
+ cmd = [PY3, "./update_mo.py", lang]
+ t = subprocess.call(cmd)
+ if t:
+ ret = t
+ failed.add(lang)
+
+ # Remove failing po’s, mo’s and related dir’s.
+ for lang in failed:
+ print("Lang “{}” failed, removing it...".format(lang))
+ if args.po:
+ po = os.path.join(TRUNK_PO_DIR, ".".join((lang, "po")))
+ if os.path.exists(po):
+ os.remove(po)
+ if args.mo:
+ mo = os.path.join(TRUNK_MO_DIR, lang)
+ if os.path.exists(mo):
+ shutil.rmtree(mo)
+
+
+if __name__ == "__main__":
+ print("\n\n *** Running {} *** \n".format(__file__))
+ sys.exit(main())
diff --git a/release/scripts/modules/bl_i18n_utils/user_settings.py b/release/scripts/modules/bl_i18n_utils/user_settings.py
new file mode 100644
index 00000000000..23d9783cd0f
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/user_settings.py
@@ -0,0 +1,23 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+import os
+
+import settings
diff --git a/release/scripts/modules/bl_i18n_utils/utils.py b/release/scripts/modules/bl_i18n_utils/utils.py
new file mode 100644
index 00000000000..dfed2088878
--- /dev/null
+++ b/release/scripts/modules/bl_i18n_utils/utils.py
@@ -0,0 +1,377 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# <pep8 compliant>
+
+# Some misc utilities...
+
+import os
+import sys
+import collections
+from codecs import open
+
+import settings
+
+
+COMMENT_PREFIX = settings.COMMENT_PREFIX
+WARN_NC = settings.WARN_MSGID_NOT_CAPITALIZED
+NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
+
+
+def stripeol(s):
+ return s.rstrip("\n\r")
+
+
+# XXX For now, we assume that all messages > 30 chars are tooltips!
+def is_tooltip(msgid):
+ return len(msgid) > 30
+
+def parse_messages(fname):
+ """
+ Returns a tupple (messages, states, stats).
+ messages is an odereddict of dicts
+ {(ctxt, msgid): {msgid_lines:, msgstr_lines:,
+ comment_lines:, msgctxt_lines:}}.
+ states is a dict of three sets of (msgid, ctxt), and a boolean flag
+ indicating the .po is somewhat broken
+ {trans_msg:, fuzzy_msg:, comm_msg:, is_broken:}.
+ stats is a dict of values
+ {tot_msg:, trans_msg:, tot_ttips:, trans_ttips:, comm_msg:,
+ nbr_signs:, nbr_trans_signs:, contexts: set()}.
+ Note: This function will silently "arrange" mis-formated entries, thus
+ using afterward write_messages() should always produce a po-valid file,
+ though not correct!
+ """
+ tot_messages = 0
+ tot_tooltips = 0
+ trans_messages = 0
+ trans_tooltips = 0
+ comm_messages = 0
+ nbr_signs = 0
+ nbr_trans_signs = 0
+ contexts = set()
+ reading_msgid = False
+ reading_msgstr = False
+ reading_msgctxt = False
+ reading_comment = False
+ is_translated = False
+ is_fuzzy = False
+ is_commented = False
+ is_broken = False
+ msgid_lines = []
+ msgstr_lines = []
+ msgctxt_lines = []
+ comment_lines = []
+
+ messages = getattr(collections, 'OrderedDict', dict)()
+ translated_messages = set()
+ fuzzy_messages = set()
+ commented_messages = set()
+
+
+ def clean_vars():
+ nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
+ reading_comment, is_fuzzy, is_translated, is_commented, \
+ msgid_lines, msgstr_lines, msgctxt_lines, comment_lines
+ reading_msgid = reading_msgstr = reading_msgctxt = \
+ reading_comment = False
+ is_tooltip = is_fuzzy = is_translated = is_commented = False
+ msgid_lines = []
+ msgstr_lines = []
+ msgctxt_lines = []
+ comment_lines = []
+
+
+ def finalize_message():
+ nonlocal reading_msgid, reading_msgstr, reading_msgctxt, \
+ reading_comment, is_fuzzy, is_translated, is_commented, \
+ msgid_lines, msgstr_lines, msgctxt_lines, comment_lines, \
+ messages, translated_messages, fuzzy_messages, \
+ commented_messages, \
+ tot_messages, tot_tooltips, trans_messages, trans_tooltips, \
+ comm_messages, nbr_signs, nbr_trans_signs, contexts
+
+ msgid = "".join(msgid_lines)
+ msgctxt = "".join(msgctxt_lines)
+ msgkey = (msgctxt, msgid)
+ is_ttip = is_tooltip(msgid)
+
+ # Never allow overriding existing msgid/msgctxt pairs!
+ if msgkey in messages:
+ clean_vars()
+ return
+
+ nbr_signs += len(msgid)
+ if is_commented:
+ commented_messages.add(msgkey)
+ elif is_fuzzy:
+ fuzzy_messages.add(msgkey)
+ elif is_translated:
+ translated_messages.add(msgkey)
+ nbr_trans_signs += len("".join(msgstr_lines))
+ messages[msgkey] = {"msgid_lines" : msgid_lines,
+ "msgstr_lines" : msgstr_lines,
+ "comment_lines": comment_lines,
+ "msgctxt_lines": msgctxt_lines}
+
+ if is_commented:
+ comm_messages += 1
+ else:
+ tot_messages += 1
+ if is_ttip:
+ tot_tooltips += 1
+ if not is_fuzzy and is_translated:
+ trans_messages += 1
+ if is_ttip:
+ trans_tooltips += 1
+ if msgctxt not in contexts:
+ contexts.add(msgctxt)
+
+ clean_vars()
+
+
+ with open(fname, 'r', "utf-8") as f:
+ for line_nr, line in enumerate(f):
+ line = stripeol(line)
+ if line == "":
+ finalize_message()
+
+ elif line.startswith("msgctxt") or \
+ line.startswith("".join((COMMENT_PREFIX, "msgctxt"))):
+ reading_comment = False
+ reading_ctxt = True
+ if line.startswith(COMMENT_PREFIX):
+ is_commented = True
+ line = line[9+len(COMMENT_PREFIX):-1]
+ else:
+ line = line[9:-1]
+ msgctxt_lines.append(line)
+
+ elif line.startswith("msgid") or \
+ line.startswith("".join((COMMENT_PREFIX, "msgid"))):
+ reading_comment = False
+ reading_msgid = True
+ if line.startswith(COMMENT_PREFIX):
+ is_commented = True
+ line = line[7+len(COMMENT_PREFIX):-1]
+ else:
+ line = line[7:-1]
+ msgid_lines.append(line)
+
+ elif line.startswith("msgstr") or \
+ line.startswith("".join((COMMENT_PREFIX, "msgstr"))):
+ if not reading_msgid:
+ is_broken = True
+ else:
+ reading_msgid = False
+ reading_msgstr = True
+ if line.startswith(COMMENT_PREFIX):
+ line = line[8+len(COMMENT_PREFIX):-1]
+ if not is_commented:
+ is_broken = True
+ else:
+ line = line[8:-1]
+ if is_commented:
+ is_broken = True
+ msgstr_lines.append(line)
+ if line:
+ is_translated = True
+
+ elif line.startswith("#"):
+ if reading_msgid:
+ if is_commented:
+ msgid_lines.append(line[1+len(COMMENT_PREFIX):-1])
+ else:
+ msgid_lines.append(line)
+ is_broken = True
+ elif reading_msgstr:
+ if is_commented:
+ msgstr_lines.append(line[1+len(COMMENT_PREFIX):-1])
+ else:
+ msgstr_lines.append(line)
+ is_broken = True
+ else:
+ if line.startswith("#, fuzzy"):
+ is_fuzzy = True
+ else:
+ comment_lines.append(line)
+ reading_comment = True
+
+ else:
+ if reading_msgid:
+ msgid_lines.append(line[1:-1])
+ elif reading_msgstr:
+ line = line[1:-1]
+ msgstr_lines.append(line)
+ if not is_translated and line:
+ is_translated = True
+ else:
+ is_broken = True
+
+ # If no final empty line, last message is not finalized!
+ if reading_msgstr:
+ finalize_message()
+
+
+ return (messages,
+ {"trans_msg": translated_messages,
+ "fuzzy_msg": fuzzy_messages,
+ "comm_msg" : commented_messages,
+ "is_broken": is_broken},
+ {"tot_msg" : tot_messages,
+ "trans_msg" : trans_messages,
+ "tot_ttips" : tot_tooltips,
+ "trans_ttips" : trans_tooltips,
+ "comm_msg" : comm_messages,
+ "nbr_signs" : nbr_signs,
+ "nbr_trans_signs": nbr_trans_signs,
+ "contexts" : contexts})
+
+
+def write_messages(fname, messages, commented, fuzzy):
+ "Write in fname file the content of messages (similar to parse_messages " \
+ "returned values). commented and fuzzy are two sets containing msgid. " \
+ "Returns the number of written messages."
+ num = 0
+ with open(fname, 'w', "utf-8") as f:
+ for msgkey, val in messages.items():
+ msgctxt, msgid = msgkey
+ f.write("\n".join(val["comment_lines"]))
+ # Only mark as fuzzy if msgstr is not empty!
+ if msgkey in fuzzy and "".join(val["msgstr_lines"]):
+ f.write("\n#, fuzzy")
+ if msgkey in commented:
+ if msgctxt:
+ f.write("\n{}msgctxt \"".format(COMMENT_PREFIX))
+ f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
+ val["msgctxt_lines"]))
+ f.write("\"")
+ f.write("\n{}msgid \"".format(COMMENT_PREFIX))
+ f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
+ val["msgid_lines"]))
+ f.write("\"\n{}msgstr \"".format(COMMENT_PREFIX))
+ f.write("\"\n{}\"".format(COMMENT_PREFIX).join(
+ val["msgstr_lines"]))
+ f.write("\"\n\n")
+ else:
+ if msgctxt:
+ f.write("\nmsgctxt \"")
+ f.write("\"\n\"".join(val["msgctxt_lines"]))
+ f.write("\"")
+ f.write("\nmsgid \"")
+ f.write("\"\n\"".join(val["msgid_lines"]))
+ f.write("\"\nmsgstr \"")
+ f.write("\"\n\"".join(val["msgstr_lines"]))
+ f.write("\"\n\n")
+ num += 1
+ return num
+
+
+def gen_empty_messages(blender_rev, time_str, year_str):
+ """Generate an empty messages & state data (only header if present!)."""
+ header_key = ("", "")
+
+ messages = getattr(collections, 'OrderedDict', dict)()
+ messages[header_key] = {
+ "msgid_lines": [""],
+ "msgctxt_lines": [],
+ "msgstr_lines": [
+ "Project-Id-Version: Blender r{}\\n"
+ "".format(blender_rev),
+ "Report-Msgid-Bugs-To: \\n",
+ "POT-Creation-Date: {}\\n"
+ "".format(time_str),
+ "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n",
+ "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n",
+ "Language-Team: LANGUAGE <LL@li.org>\\n",
+ "Language: \\n",
+ "MIME-Version: 1.0\\n",
+ "Content-Type: text/plain; charset=UTF-8\\n",
+ "Content-Transfer-Encoding: 8bit\\n"
+ ],
+ "comment_lines": [
+ "# Blender's translation file (po format).",
+ "# Copyright (C) {} The Blender Foundation."
+ "".format(year_str),
+ "# This file is distributed under the same "
+ "# license as the Blender package.",
+ "# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.",
+ "#",
+ ],
+ }
+
+ states = {"trans_msg": set(),
+ "fuzzy_msg": {header_key},
+ "comm_msg": set(),
+ "is_broken": False}
+
+ return messages, states
+
+
+def print_stats(stats, glob_stats=None, prefix=""):
+ """
+ Print out some stats about a po file.
+ glob_stats is for making global stats over several po's.
+ """
+ tot_msgs = stats["tot_msg"]
+ trans_msgs = stats["trans_msg"]
+ tot_ttips = stats["tot_ttips"]
+ trans_ttips = stats["trans_ttips"]
+ comm_msgs = stats["comm_msg"]
+ nbr_signs = stats["nbr_signs"]
+ nbr_trans_signs = stats["nbr_trans_signs"]
+ contexts = stats["contexts"]
+ lvl = lvl_ttips = lvl_trans_ttips = lvl_ttips_in_trans = lvl_comm = 0.0
+
+ if tot_msgs > 0:
+ lvl = float(trans_msgs)/float(tot_msgs)
+ lvl_ttips = float(tot_ttips)/float(tot_msgs)
+ lvl_comm = float(comm_msgs)/float(tot_msgs+comm_msgs)
+ if tot_ttips > 0:
+ lvl_trans_ttips = float(trans_ttips)/float(tot_ttips)
+ if trans_msgs > 0:
+ lvl_ttips_in_trans = float(trans_ttips)/float(trans_msgs)
+
+ if glob_stats:
+ glob_stats["nbr"] += 1.0
+ glob_stats["lvl"] += lvl
+ glob_stats["lvl_ttips"] += lvl_ttips
+ glob_stats["lvl_trans_ttips"] += lvl_trans_ttips
+ glob_stats["lvl_ttips_in_trans"] += lvl_ttips_in_trans
+ glob_stats["lvl_comm"] += lvl_comm
+ glob_stats["nbr_trans_signs"] += nbr_trans_signs
+ if glob_stats["nbr_signs"] == 0:
+ glob_stats["nbr_signs"] = nbr_signs
+ glob_stats["contexts"] |= contexts
+
+ lines = ("",
+ "{:>6.1%} done! ({} translated messages over {}).\n"
+ "".format(lvl, trans_msgs, tot_msgs),
+ "{:>6.1%} of messages are tooltips ({} over {}).\n"
+ "".format(lvl_ttips, tot_ttips, tot_msgs),
+ "{:>6.1%} of tooltips are translated ({} over {}).\n"
+ "".format(lvl_trans_ttips, trans_ttips, tot_ttips),
+ "{:>6.1%} of translated messages are tooltips ({} over {}).\n"
+ "".format(lvl_ttips_in_trans, trans_ttips, trans_msgs),
+ "{:>6.1%} of messages are commented ({} over {}).\n"
+ "".format(lvl_comm, comm_msgs, comm_msgs+tot_msgs),
+ "This translation is currently made of {} signs.\n"
+ "".format(nbr_trans_signs))
+ print(prefix.join(lines))
+ return 0
+