Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIan Thompson <quornian@googlemail.com>2008-07-15 11:34:46 +0400
committerIan Thompson <quornian@googlemail.com>2008-07-15 11:34:46 +0400
commitaeb4d0c631537d93de084301cac6a5dc981b6655 (patch)
treec82ff43f2732cac35447b657b8adc9306261b74f /release
parentdbb61988fdaa6085912dee6ab8f5569e63ef88fb (diff)
Created a BPy module BPyTextPlugin to centralize functions used across the text plugin scripts. Also created two more scripts to handle imports and member suggestions.
Diffstat (limited to 'release')
-rw-r--r--release/scripts/bpymodules/BPyTextPlugin.py271
-rw-r--r--release/scripts/textplugin_imports.py71
-rw-r--r--release/scripts/textplugin_membersuggest.py67
-rw-r--r--release/scripts/textplugin_suggest.py327
4 files changed, 453 insertions, 283 deletions
diff --git a/release/scripts/bpymodules/BPyTextPlugin.py b/release/scripts/bpymodules/BPyTextPlugin.py
new file mode 100644
index 00000000000..38bdab82a2d
--- /dev/null
+++ b/release/scripts/bpymodules/BPyTextPlugin.py
@@ -0,0 +1,271 @@
+import bpy, sys
+import __builtin__, tokenize
+from tokenize import generate_tokens
+# TODO: Remove the dependency for a full Python installation. Currently only the
+# tokenize module is required
+
+# Context types
+NORMAL = 0
+SINGLE_QUOTE = 1
+DOUBLE_QUOTE = 2
+COMMENT = 3
+
+# Python keywords
+KEYWORDS = ['and', 'del', 'from', 'not', 'while', 'as', 'elif', 'global',
+ 'or', 'with', 'assert', 'else', 'if', 'pass', 'yield',
+ 'break', 'except', 'import', 'print', 'class', 'exec', 'in',
+ 'raise', 'continue', 'finally', 'is', 'return', 'def', 'for',
+ 'lambda', 'try' ]
+
+
+def suggest_cmp(x, y):
+ """Use this method when sorting a list for suggestions"""
+
+ return cmp(x[0], y[0])
+
+def get_module(name):
+ """Returns the module specified by its name. This module is imported and as
+ such will run any initialization code specified within the module."""
+
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def is_module(m):
+ """Taken from the inspect module of the standard Python installation"""
+
+ return isinstance(m, type(bpy))
+
+def type_char(v):
+ if is_module(v):
+ return 'm'
+ elif callable(v):
+ return 'f'
+ else:
+ return 'v'
+
+def get_context(line, cursor):
+ """Establishes the context of the cursor in the given line
+
+ Returns one of:
+ NORMAL - Cursor is in a normal context
+ SINGLE_QUOTE - Cursor is inside a single quoted string
+ DOUBLE_QUOTE - Cursor is inside a double quoted string
+ COMMENT - Cursor is inside a comment
+
+ """
+
+ # Detect context (in string or comment)
+ in_str = 0 # 1-single quotes, 2-double quotes
+ for i in range(cursor):
+ if not in_str:
+ if line[i] == "'": in_str = 1
+ elif line[i] == '"': in_str = 2
+ elif line[i] == '#': return 3 # In a comment so quit
+ else:
+ if in_str == 1:
+ if line[i] == "'":
+ in_str = 0
+ # In again if ' escaped, out again if \ escaped, and so on
+ for a in range(1, i+1):
+ if line[i-a] == '\\': in_str = 1-in_str
+ else: break
+ elif in_str == 2:
+ if line[i] == '"':
+ in_str = 0
+ # In again if " escaped, out again if \ escaped, and so on
+ for a in range(1, i+1):
+ if line[i-a] == '\\': in_str = 2-in_str
+ else: break
+ return in_str
+
+def current_line(txt):
+ """Extracts the Python script line at the cursor in the Blender Text object
+ provided and cursor position within this line as the tuple pair (line,
+ cursor)"""
+
+ (lineindex, cursor) = txt.getCursorPos()
+ lines = txt.asLines()
+ line = lines[lineindex]
+
+ # Join previous lines to this line if spanning
+ i = lineindex - 1
+ while i > 0:
+ earlier = lines[i].rstrip()
+ if earlier.endswith('\\'):
+ line = earlier[:-1] + ' ' + line
+ cursor += len(earlier)
+ i -= 1
+
+ # Join later lines while there is an explicit joining character
+ i = lineindex
+ while i < len(lines)-1 and line[i].rstrip().endswith('\\'):
+ later = lines[i+1].strip()
+ line = line + ' ' + later[:-1]
+
+ return line, cursor
+
+def get_targets(line, cursor):
+ """Parses a period separated string of valid names preceding the cursor and
+ returns them as a list in the same order."""
+
+ targets = []
+ i = cursor - 1
+ while i >= 0 and (line[i].isalnum() or line[i] == '_' or line[i] == '.'):
+ i -= 1
+
+ pre = line[i+1:cursor]
+ return pre.split('.')
+
+def get_imports(txt):
+ """Returns a dictionary which maps symbol names in the source code to their
+ respective modules.
+
+ The line 'from Blender import Text as BText' for example, results in the
+ mapping 'BText' : <module 'Blender.Text' (built-in)>
+
+ Note that this method imports the modules to provide this mapping as as such
+ will execute any initilization code found within.
+ """
+
+ # Unfortunately, generate_tokens may fail if the script leaves brackets or
+ # strings open or there are other syntax errors. For now we return an empty
+ # dictionary until an alternative parse method is implemented.
+ try:
+ txt.reset()
+ tokens = generate_tokens(txt.readline)
+ except:
+ return dict()
+
+ imports = dict()
+ step = 0
+
+ for type, string, start, end, line in tokens:
+ store = False
+
+ # Default, look for 'from' or 'import' to start
+ if step == 0:
+ if string == 'from':
+ tmp = []
+ step = 1
+ elif string == 'import':
+ fromname = None
+ tmp = []
+ step = 2
+
+ # Found a 'from', create fromname in form '???.???...'
+ elif step == 1:
+ if string == 'import':
+ fromname = '.'.join(tmp)
+ tmp = []
+ step = 2
+ elif type == tokenize.NAME:
+ tmp.append(string)
+ elif string != '.':
+ step = 0 # Invalid syntax
+
+ # Found 'import', fromname is populated or None, create impname
+ elif step == 2:
+ if string == 'as':
+ impname = '.'.join(tmp)
+ step = 3
+ elif type == tokenize.NAME:
+ tmp.append(string)
+ elif string != '.':
+ impname = '.'.join(tmp)
+ symbol = impname
+ store = True
+
+ # Found 'as', change symbol to this value and go back to step 2
+ elif step == 3:
+ if type == tokenize.NAME:
+ symbol = string
+ else:
+ store = True
+
+ # Both impname and symbol have now been populated so we can import
+ if store:
+
+ # Handle special case of 'import *'
+ if impname == '*':
+ parent = get_module(fromname)
+ for symbol, attr in parent.__dict__.items():
+ imports[symbol] = attr
+
+ else:
+ # Try importing the name as a module
+ try:
+ if fromname:
+ module = get_module(fromname +'.'+ impname)
+ else:
+ module = get_module(impname)
+ imports[symbol] = module
+ except:
+ # Try importing name as an attribute of the parent
+ try:
+ module = __import__(fromname, globals(), locals(), [impname])
+ imports[symbol] = getattr(module, impname)
+ except:
+ pass
+
+ # More to import from the same module?
+ if string == ',':
+ tmp = []
+ step = 2
+ else:
+ step = 0
+
+ return imports
+
+
+def get_builtins():
+ """Returns a dictionary of built-in modules, functions and variables."""
+
+ return __builtin__.__dict__
+
+def get_defs(txt):
+ """Returns a dictionary which maps definition names in the source code to
+ a list of their parameter names.
+
+ The line 'def doit(one, two, three): print one' for example, results in the
+ mapping 'doit' : [ 'one', 'two', 'three' ]
+ """
+
+ # See above for problems with generate_tokens
+ try:
+ txt.reset()
+ tokens = generate_tokens(txt.readline)
+ except:
+ return dict()
+
+ defs = dict()
+ step = 0
+
+ for type, string, start, end, line in tokens:
+
+ # Look for 'def'
+ if step == 0:
+ if string == 'def':
+ name = None
+ step = 1
+
+ # Found 'def', look for name followed by '('
+ elif step == 1:
+ if type == tokenize.NAME:
+ name = string
+ params = []
+ elif name and string == '(':
+ step = 2
+
+ # Found 'def' name '(', now identify the parameters upto ')'
+ # TODO: Handle ellipsis '...'
+ elif step == 2:
+ if type == tokenize.NAME:
+ params.append(string)
+ elif string == ')':
+ defs[name] = params
+ step = 0
+
+ return defs
diff --git a/release/scripts/textplugin_imports.py b/release/scripts/textplugin_imports.py
new file mode 100644
index 00000000000..af335eb5418
--- /dev/null
+++ b/release/scripts/textplugin_imports.py
@@ -0,0 +1,71 @@
+#!BPY
+"""
+Name: 'Import Complete'
+Blender: 246
+Group: 'TextPlugin'
+Shortcut: 'Space'
+Tooltip: 'Lists modules when import or from is typed'
+"""
+
+# Only run if we have the required modules
+OK = False
+try:
+ import bpy, sys
+ from BPyTextPlugin import *
+ OK = True
+except:
+ pass
+
+def main():
+ txt = bpy.data.texts.active
+ line, c = current_line(txt)
+
+ # Check we are in a normal context
+ if get_context(line, c) != 0:
+ return
+
+ pos = line.rfind('from ', 0, c)
+
+ # No 'from' found
+ if pos == -1:
+ # Check instead for straight 'import'
+ pos2 = line.rfind('import ', 0, c)
+ if pos2 != -1 and pos2 == c-7:
+ items = [(m, 'm') for m in sys.builtin_module_names]
+ items.sort(cmp = suggest_cmp)
+ txt.suggest(items, '')
+
+ # Immediate 'from' before cursor
+ elif pos == c-5:
+ items = [(m, 'm') for m in sys.builtin_module_names]
+ items.sort(cmp = suggest_cmp)
+ txt.suggest(items, '')
+
+ # Found 'from' earlier
+ else:
+ pos2 = line.rfind('import ', pos+5, c)
+
+ # No 'import' found after 'from' so suggest it
+ if pos2 == -1:
+ txt.suggest([('import', 'k')], '')
+
+ # Immediate 'import' before cursor and after 'from...'
+ elif pos2 == c-7 or line[c-2] == ',':
+ between = line[pos+5:pos2-1].strip()
+ try:
+ mod = get_module(between)
+ except:
+ print 'Module not found:', between
+ return
+
+ items = [('*', 'k')]
+ for (k,v) in mod.__dict__.items():
+ if is_module(v): t = 'm'
+ elif callable(v): t = 'f'
+ else: t = 'v'
+ items.append((k, t))
+ items.sort(cmp = suggest_cmp)
+ txt.suggest(items, '')
+
+if OK:
+ main()
diff --git a/release/scripts/textplugin_membersuggest.py b/release/scripts/textplugin_membersuggest.py
new file mode 100644
index 00000000000..d1ab588ba86
--- /dev/null
+++ b/release/scripts/textplugin_membersuggest.py
@@ -0,0 +1,67 @@
+#!BPY
+"""
+Name: 'Member Suggest'
+Blender: 246
+Group: 'TextPlugin'
+Shortcut: 'Period'
+Tooltip: 'Lists members of the object preceding the cursor in the current text \
+space'
+"""
+
+# Only run if we have the required modules
+try:
+ import bpy
+ from BPyTextPlugin import *
+ OK = True
+except:
+ OK = False
+
+def main():
+ txt = bpy.data.texts.active
+ (line, c) = current_line(txt)
+
+ # Check we are in a normal context
+ if get_context(line, c) != NORMAL:
+ return
+
+ pre = get_targets(line, c)
+
+ if len(pre) <= 1:
+ return
+
+ list = []
+
+ imports = get_imports(txt)
+
+ # Identify the root (root.sub.sub.)
+ if imports.has_key(pre[0]):
+ obj = imports[pre[0]]
+ else:
+ return
+
+ # Step through sub-attributes
+ try:
+ for name in pre[1:-1]:
+ obj = getattr(obj, name)
+ except:
+ print "Attribute not found '%s' in '%s'" % (name, '.'.join(pre))
+ return
+
+ try:
+ attr = obj.__dict__.keys()
+ except:
+ attr = dir(obj)
+
+ for k in attr:
+ v = getattr(obj, k)
+ if is_module(v): t = 'm'
+ elif callable(v): t = 'f'
+ else: t = 'v'
+ list.append((k, t))
+
+ if list != []:
+ list.sort(cmp = suggest_cmp)
+ txt.suggest(list, pre[-1])
+
+if OK:
+ main()
diff --git a/release/scripts/textplugin_suggest.py b/release/scripts/textplugin_suggest.py
index c4fce9ad7a9..8e14dffca9c 100644
--- a/release/scripts/textplugin_suggest.py
+++ b/release/scripts/textplugin_suggest.py
@@ -1,301 +1,62 @@
#!BPY
"""
-Name: 'Suggest'
-Blender: 243
+Name: 'Suggest All'
+Blender: 246
Group: 'TextPlugin'
Shortcut: 'Ctrl+Space'
-Tooltip: 'Suggests completions for the word at the cursor in a python script'
+Tooltip: 'Performs suggestions based on the context of the cursor'
"""
-import bpy, __builtin__, token
-from Blender import Text
-from StringIO import StringIO
-from inspect import *
-from tokenize import generate_tokens
+# Only run if we have the required modules
+try:
+ import bpy
+ from BPyTextPlugin import *
+ OK = True
+except:
+ OK = False
-TK_TYPE = 0
-TK_TOKEN = 1
-TK_START = 2 #(srow, scol)
-TK_END = 3 #(erow, ecol)
-TK_LINE = 4
-TK_ROW = 0
-TK_COL = 1
-
-execs = [] # Used to establish the same import context across defs
-
-keywords = ['and', 'del', 'from', 'not', 'while', 'as', 'elif', 'global',
- 'or', 'with', 'assert', 'else', 'if', 'pass', 'yield',
- 'break', 'except', 'import', 'print', 'class', 'exec', 'in',
- 'raise', 'continue', 'finally', 'is', 'return', 'def', 'for',
- 'lambda', 'try' ]
-
-
-def getBuiltins():
- builtins = []
- bi = dir(__builtin__)
- for k in bi:
- v = getattr(__builtin__, k)
- if ismodule(v): t='m'
- elif callable(v): t='f'
- else: t='v'
- builtins.append((k, t))
- return builtins
-
-
-def getKeywords():
- global keywords
- return [(k, 'k') for k in keywords]
-
-
-def getTokens(txt):
- txt.reset()
- g = generate_tokens(txt.readline)
- tokens = []
- for t in g: tokens.append(t)
- return tokens
-
-
-def isNameChar(s):
- return (s.isalnum() or s == '_')
-
-
-# Returns words preceding the cursor that are separated by periods as a list in
-# the same order
-def getCompletionSymbols(txt):
- (l, c)= txt.getCursorPos()
- lines = txt.asLines()
- line = lines[l]
- a=0
- for a in range(1, c+1):
- if not isNameChar(line[c-a]) and line[c-a]!='.':
- a -= 1
- break
- return line[c-a:c].split('.')
-
-
-def getImports(txt):
- imports = []
-
- # Unfortunately, tokenize may fail if the script leaves brackets or strings
- # open. For now we return an empty list until I have a better idea. Maybe
- # parse manually.
- try:
- tokens = getTokens(txt)
- except:
- return []
-
- for i in range(1, len(tokens)):
-
- # Handle all import statements
- if tokens[i-1][TK_TOKEN] == 'import':
-
- # Find 'from' if it exists
- fr = -1
- for a in range(1, i):
- if tokens[i-a][TK_TYPE] == token.NEWLINE: break
- if tokens[i-a][TK_TOKEN] == 'from':
- fr = i-a
- break
-
- # Handle: import ___[.___][,___[.___]]
- if fr<0:
- parent = ''
-
- # Handle: from ___[.___] import ___[,___]
- else: # fr>=0:
- parent = ''.join([t[TK_TOKEN] for t in tokens[fr+1:i-1]])
-
- module = ''
- while i < len(tokens)-1:
- if tokens[i][TK_TYPE] == token.NAME:
-
- # Get the module name
- module = module + tokens[i][TK_TOKEN]
-
- if tokens[i+1][TK_TOKEN] == '.':
- module += '.'
- i += 1
- else:
- # Add the module name and parent to the dict
- imports.append((module, parent))
- module = ''
-
- elif tokens[i][TK_TOKEN]!=',':
- break
-
- i += 1
-
- # Process imports for: from ___ import *
- for imp,frm in imports:
- print imp, frm
- if frm == '':
- try: __import__(imp)
- except: print '^ERR^'
- else:
- try: __import__(frm, globals(), locals(), [imp])
- except: print '^ERR^'
-
-
-
-# Returns a list of tuples of symbol names and their types (name, type) where
-# type is one of:
-# m (module/class) Has its own members (includes classes)
-# v (variable) Has a type which may have its own members
-# f (function) Callable and may have a return type (with its own members)
-# It also updates the global import context (via execs)
-def getGlobals(txt):
- global execs
-
- # Unfortunately, tokenize may fail if the script leaves brackets or strings
- # open. For now we return an empty list, leaving builtins and keywords as
- # the only globals. (on the TODO list)
- try:
- tokens = getTokens(txt)
- except:
- return []
-
- globals = dict()
- for i in range(len(tokens)):
-
- # Handle all import statements
- if i>=1 and tokens[i-1][TK_TOKEN]=='import':
-
- # Find 'from' if it exists
- fr= -1
- for a in range(1, i):
- if tokens[i-a][TK_TYPE]==token.NEWLINE: break
- if tokens[i-a][TK_TOKEN]=='from':
- fr=i-a
- break
-
- # Handle: import ___[,___]
- if fr<0:
-
- while True:
- if tokens[i][TK_TYPE]==token.NAME:
- # Add the import to the execs list
- x = tokens[i][TK_LINE].strip()
- k = tokens[i][TK_TOKEN]
- execs.append(x)
- exec 'try: '+x+'\nexcept: pass'
-
- # Add the symbol name to the return list
- globals[k] = 'm'
- elif tokens[i][TK_TOKEN]!=',':
- break
- i += 1
-
- # Handle statement: from ___[.___] import ___[,___]
- else: # fr>=0:
-
- # Add the import to the execs list
- x = tokens[i][TK_LINE].strip()
- execs.append(x)
- exec 'try: '+x+'\nexcept: pass'
-
- # Import parent module so we can process it for sub modules
- parent = ''.join([t[TK_TOKEN] for t in tokens[fr+1:i-1]])
- exec 'try: import '+parent+'\nexcept: pass'
-
- # All submodules, functions, etc.
- if tokens[i][TK_TOKEN]=='*':
-
- # Add each symbol name to the return list
- d = eval(parent).__dict__.items()
- for k,v in d:
- if not globals.has_key(k) or not globals[k]:
- t='v'
- if ismodule(v): t='m'
- elif callable(v): t='f'
- globals[k] = t
-
- # Specific function, submodule, etc.
- else:
- while True:
- if tokens[i][TK_TYPE]==token.NAME:
- k = tokens[i][TK_TOKEN]
- if not globals.has_key(k) or not globals[k]:
- t='v'
- try:
- v = eval(parent+'.'+k)
- if ismodule(v): t='m'
- elif callable(v): t='f'
- except: pass
- globals[k] = t
- elif tokens[i][TK_TOKEN]!=',':
- break
- i += 1
-
- elif tokens[i][TK_TYPE]==token.NAME and tokens[i][TK_TOKEN] not in keywords and (i==0 or tokens[i-1][TK_TOKEN]!='.'):
- k = tokens[i][TK_TOKEN]
- if not globals.has_key(k) or not globals[k]:
- t=None
- if (i>0 and tokens[i-1][TK_TOKEN]=='def'):
- t='f'
- else:
- t='v'
- globals[k] = t
-
- return globals.items()
-
-
-def globalSuggest(txt, cs):
- globals = getGlobals(txt)
- return globals
-
-
-# Only works for 'static' members (eg. Text.Get)
-def memberSuggest(txt, cs):
- global execs
+def main():
+ txt = bpy.data.texts.active
+ (line, c) = current_line(txt)
- # Populate the execs for imports
- getGlobals(txt)
+ # Check we are in a normal context
+ if get_context(line, c) != NORMAL:
+ return
- # Sometimes we have conditional includes which will fail if the module
- # cannot be found. So we protect outselves in a try block
- for x in execs:
- exec 'try: '+x+'\nexcept: pass'
+ # Check that which precedes the cursor and perform the following:
+ # Period(.) - Run textplugin_membersuggest.py
+ # 'import' or 'from' - Run textplugin_imports.py
+ # Other - Continue this script (global suggest)
+ pre = get_targets(line, c)
- suggestions = dict()
- (row, col) = txt.getCursorPos()
+ count = len(pre)
- sub = cs[len(cs)-1]
+ if count > 1: # Period found
+ import textplugin_membersuggest
+ textplugin_membersuggest.main()
+ return
+ # Look for 'import' or 'from'
+ elif line.rfind('import ', 0, c) == c-7 or line.rfind('from ', 0, c) == c-5:
+ import textplugin_imports
+ textplugin_imports.main()
+ return
- m=None
- pre='.'.join(cs[:-1])
- try:
- m = eval(pre)
- except:
- print pre+ ' not found or not imported.'
+ list = []
- if m!=None:
- for k,v in m.__dict__.items():
- if ismodule(v): t='m'
- elif callable(v): t='f'
- else: t='v'
- suggestions[k] = t
+ for k in KEYWORDS:
+ list.append((k, 'k'))
- return suggestions.items()
-
-
-def cmp0(x, y):
- return cmp(x[0], y[0])
-
-
-def main():
- txt = bpy.data.texts.active
- if txt==None: return
+ for k, v in get_builtins().items():
+ list.append((k, type_char(v)))
- cs = getCompletionSymbols(txt)
+ for k, v in get_imports(txt).items():
+ list.append((k, type_char(v)))
- if len(cs)<=1:
- l = globalSuggest(txt, cs)
- l.extend(getBuiltins())
- l.extend(getKeywords())
- else:
- l = memberSuggest(txt, cs)
+ for k, v in get_defs(txt).items():
+ list.append((k, 'f'))
- l.sort(cmp=cmp0)
- txt.suggest(l, cs[len(cs)-1])
+ list.sort(cmp = suggest_cmp)
+ txt.suggest(list, pre[-1])
-main()
+if OK:
+ main()