diff --git a/Makefile b/Makefile index 8ee4aa14f2b0b..dd30f065bd00f 100644 --- a/Makefile +++ b/Makefile @@ -31,7 +31,7 @@ inplace: test-code: in $(NOSETESTS) -s -v sklearn test-sphinxext: - $(NOSETESTS) -s -v doc/sphinxext/ + $(NOSETESTS) -s -v doc/sphinxext/ -e numpy_ext test-doc: ifeq ($(BITS),64) $(NOSETESTS) -s -v doc/*.rst doc/modules/ doc/datasets/ \ diff --git a/build_tools/travis/flake8_diff.sh b/build_tools/travis/flake8_diff.sh index b39d16a4c9af0..ae4396b263f92 100755 --- a/build_tools/travis/flake8_diff.sh +++ b/build_tools/travis/flake8_diff.sh @@ -121,7 +121,7 @@ echo '-------------------------------------------------------------------------- # We need the following command to exit with 0 hence the echo in case # there is no match MODIFIED_FILES=$(git diff --name-only $COMMIT_RANGE | grep -v 'sklearn/externals' | \ - grep -v 'doc/sphinxext/sphinx_gallery' || echo "no_match") + grep -v 'doc/sphinxext/sphinx_gallery' | grep -v 'doc/sphinxext' || echo "no_match") if [[ "$MODIFIED_FILES" == "no_match" ]]; then echo "No file outside sklearn/externals and doc/sphinxext/sphinx_gallery has been modified" diff --git a/doc/conf.py b/doc/conf.py index e60fc167f9d49..9dfe5f3a5cb15 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -38,6 +38,11 @@ 'sphinx_issues', ] +# this is needed for some reason... +# http://stackoverflow.com/questions/12206334/sphinx-autosummary-toctree-contains-reference-to-nonexisting-document-warnings +numpydoc_show_class_members = False + + # pngmath / imgmath compatibility layer for different sphinx versions import sphinx from distutils.version import LooseVersion diff --git a/doc/sphinxext/numpy_ext/__init__.py b/doc/sphinxext/numpy_ext/__init__.py index e69de29bb2d1d..0fce2cf747e23 100644 --- a/doc/sphinxext/numpy_ext/__init__.py +++ b/doc/sphinxext/numpy_ext/__init__.py @@ -0,0 +1,3 @@ +from __future__ import division, absolute_import, print_function + +from .numpydoc import setup diff --git a/doc/sphinxext/numpy_ext/comment_eater.py b/doc/sphinxext/numpy_ext/comment_eater.py new file mode 100644 index 0000000000000..8cddd3305f0bc --- /dev/null +++ b/doc/sphinxext/numpy_ext/comment_eater.py @@ -0,0 +1,169 @@ +from __future__ import division, absolute_import, print_function + +import sys +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import compiler +import inspect +import textwrap +import tokenize + +from .compiler_unparse import unparse + + +class Comment(object): + """ A comment block. + """ + is_comment = True + def __init__(self, start_lineno, end_lineno, text): + # int : The first line number in the block. 1-indexed. + self.start_lineno = start_lineno + # int : The last line number. Inclusive! + self.end_lineno = end_lineno + # str : The text block including '#' character but not any leading spaces. + self.text = text + + def add(self, string, start, end, line): + """ Add a new comment line. + """ + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + self.text += string + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno, self.text) + + +class NonComment(object): + """ A non-comment block of code. + """ + is_comment = False + def __init__(self, start_lineno, end_lineno): + self.start_lineno = start_lineno + self.end_lineno = end_lineno + + def add(self, string, start, end, line): + """ Add lines to the block. + """ + if string.strip(): + # Only add if not entirely whitespace. + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno) + + +class CommentBlocker(object): + """ Pull out contiguous comment blocks. + """ + def __init__(self): + # Start with a dummy. + self.current_block = NonComment(0, 0) + + # All of the blocks seen so far. + self.blocks = [] + + # The index mapping lines of code to their associated comment blocks. + self.index = {} + + def process_file(self, file): + """ Process a file object. + """ + if sys.version_info[0] >= 3: + nxt = file.__next__ + else: + nxt = file.next + for token in tokenize.generate_tokens(nxt): + self.process_token(*token) + self.make_index() + + def process_token(self, kind, string, start, end, line): + """ Process a single token. + """ + if self.current_block.is_comment: + if kind == tokenize.COMMENT: + self.current_block.add(string, start, end, line) + else: + self.new_noncomment(start[0], end[0]) + else: + if kind == tokenize.COMMENT: + self.new_comment(string, start, end, line) + else: + self.current_block.add(string, start, end, line) + + def new_noncomment(self, start_lineno, end_lineno): + """ We are transitioning from a noncomment to a comment. + """ + block = NonComment(start_lineno, end_lineno) + self.blocks.append(block) + self.current_block = block + + def new_comment(self, string, start, end, line): + """ Possibly add a new comment. + + Only adds a new comment if this comment is the only thing on the line. + Otherwise, it extends the noncomment block. + """ + prefix = line[:start[1]] + if prefix.strip(): + # Oops! Trailing comment, not a comment block. + self.current_block.add(string, start, end, line) + else: + # A comment block. + block = Comment(start[0], end[0], string) + self.blocks.append(block) + self.current_block = block + + def make_index(self): + """ Make the index mapping lines of actual code to their associated + prefix comments. + """ + for prev, block in zip(self.blocks[:-1], self.blocks[1:]): + if not block.is_comment: + self.index[block.start_lineno] = prev + + def search_for_comment(self, lineno, default=None): + """ Find the comment block just before the given line number. + + Returns None (or the specified default) if there is no such block. + """ + if not self.index: + self.make_index() + block = self.index.get(lineno, None) + text = getattr(block, 'text', default) + return text + + +def strip_comment_marker(text): + """ Strip # markers at the front of a block of comment text. + """ + lines = [] + for line in text.splitlines(): + lines.append(line.lstrip('#')) + text = textwrap.dedent('\n'.join(lines)) + return text + + +def get_class_traits(klass): + """ Yield all of the documentation for trait definitions on a class object. + """ + # FIXME: gracefully handle errors here or in the caller? + source = inspect.getsource(klass) + cb = CommentBlocker() + cb.process_file(StringIO(source)) + mod_ast = compiler.parse(source) + class_ast = mod_ast.node.nodes[0] + for node in class_ast.code.nodes: + # FIXME: handle other kinds of assignments? + if isinstance(node, compiler.ast.Assign): + name = node.nodes[0].name + rhs = unparse(node.expr).strip() + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + yield name, rhs, doc + diff --git a/doc/sphinxext/numpy_ext/compiler_unparse.py b/doc/sphinxext/numpy_ext/compiler_unparse.py new file mode 100644 index 0000000000000..8933a83db3f23 --- /dev/null +++ b/doc/sphinxext/numpy_ext/compiler_unparse.py @@ -0,0 +1,865 @@ +""" Turn compiler.ast structures back into executable python code. + + The unparse method takes a compiler.ast tree and transforms it back into + valid python code. It is incomplete and currently only works for + import statements, function calls, function definitions, assignments, and + basic expressions. + + Inspired by python-2.5-svn/Demo/parser/unparse.py + + fixme: We may want to move to using _ast trees because the compiler for + them is about 6 times faster than compiler.compile. +""" +from __future__ import division, absolute_import, print_function + +import sys +from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +def unparse(ast, single_line_functions=False): + s = StringIO() + UnparseCompilerAst(ast, s, single_line_functions) + return s.getvalue().lstrip() + +op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, + 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +class UnparseCompilerAst: + """ Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. + """ + + ######################################################################### + # object interface. + ######################################################################### + + def __init__(self, tree, file = sys.stdout, single_line_functions=False): + """ Unparser(tree, file=sys.stdout) -> None. + + Print the source for tree to file. + """ + self.f = file + self._single_func = single_line_functions + self._do_indent = True + self._indent = 0 + self._dispatch(tree) + self._write("\n") + self.f.flush() + + ######################################################################### + # Unparser private interface. + ######################################################################### + + ### format, output, and dispatch methods ################################ + + def _fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + if self._do_indent: + self._write("\n"+" "*self._indent + text) + else: + self._write(text) + + def _write(self, text): + "Append a piece of text to the current line." + self.f.write(text) + + def _enter(self): + "Print ':', and increase the indentation." + self._write(": ") + self._indent += 1 + + def _leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def _dispatch(self, tree): + "_dispatcher function, _dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self._dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + return + meth(tree) + + + ######################################################################### + # compiler.ast unparsing methods. + # + # There should be one method per concrete grammar type. They are + # organized in alphabetical order. + ######################################################################### + + def _Add(self, t): + self.__binary_op(t, '+') + + def _And(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") and (") + self._write(")") + + def _AssAttr(self, t): + """ Handle assigning an attribute of an object + """ + self._dispatch(t.expr) + self._write('.'+t.attrname) + + def _Assign(self, t): + """ Expression Assignment such as "a = 1". + + This only handles assignment in expressions. Keyword assignment + is handled separately. + """ + self._fill() + for target in t.nodes: + self._dispatch(target) + self._write(" = ") + self._dispatch(t.expr) + if not self._do_indent: + self._write('; ') + + def _AssName(self, t): + """ Name on left hand side of expression. + + Treat just like a name on the right side of an expression. + """ + self._Name(t) + + def _AssTuple(self, t): + """ Tuple on left hand side of an expression. + """ + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + def _AugAssign(self, t): + """ +=,-=,*=,/=,**=, etc. operations + """ + + self._fill() + self._dispatch(t.node) + self._write(' '+t.op+' ') + self._dispatch(t.expr) + if not self._do_indent: + self._write(';') + + def _Bitand(self, t): + """ Bit and operation. + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" & ") + + def _Bitor(self, t): + """ Bit or operation + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" | ") + + def _CallFunc(self, t): + """ Function call. + """ + self._dispatch(t.node) + self._write("(") + comma = False + for e in t.args: + if comma: self._write(", ") + else: comma = True + self._dispatch(e) + if t.star_args: + if comma: self._write(", ") + else: comma = True + self._write("*") + self._dispatch(t.star_args) + if t.dstar_args: + if comma: self._write(", ") + else: comma = True + self._write("**") + self._dispatch(t.dstar_args) + self._write(")") + + def _Compare(self, t): + self._dispatch(t.expr) + for op, expr in t.ops: + self._write(" " + op + " ") + self._dispatch(expr) + + def _Const(self, t): + """ A constant value such as an integer value, 3, or a string, "hello". + """ + self._dispatch(t.value) + + def _Decorators(self, t): + """ Handle function decorators (eg. @has_units) + """ + for node in t.nodes: + self._dispatch(node) + + def _Dict(self, t): + self._write("{") + for i, (k, v) in enumerate(t.items): + self._dispatch(k) + self._write(": ") + self._dispatch(v) + if i < len(t.items)-1: + self._write(", ") + self._write("}") + + def _Discard(self, t): + """ Node for when return value is ignored such as in "foo(a)". + """ + self._fill() + self._dispatch(t.expr) + + def _Div(self, t): + self.__binary_op(t, '/') + + def _Ellipsis(self, t): + self._write("...") + + def _From(self, t): + """ Handle "from xyz import foo, bar as baz". + """ + # fixme: Are From and ImportFrom handled differently? + self._fill("from ") + self._write(t.modname) + self._write(" import ") + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Function(self, t): + """ Handle function definitions + """ + if t.decorators is not None: + self._fill("@") + self._dispatch(t.decorators) + self._fill("def "+t.name + "(") + defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) + for i, arg in enumerate(zip(t.argnames, defaults)): + self._write(arg[0]) + if arg[1] is not None: + self._write('=') + self._dispatch(arg[1]) + if i < len(t.argnames)-1: + self._write(', ') + self._write(")") + if self._single_func: + self._do_indent = False + self._enter() + self._dispatch(t.code) + self._leave() + self._do_indent = True + + def _Getattr(self, t): + """ Handle getting an attribute of an object + """ + if isinstance(t.expr, (Div, Mul, Sub, Add)): + self._write('(') + self._dispatch(t.expr) + self._write(')') + else: + self._dispatch(t.expr) + + self._write('.'+t.attrname) + + def _If(self, t): + self._fill() + + for i, (compare,code) in enumerate(t.tests): + if i == 0: + self._write("if ") + else: + self._write("elif ") + self._dispatch(compare) + self._enter() + self._fill() + self._dispatch(code) + self._leave() + self._write("\n") + + if t.else_ is not None: + self._write("else") + self._enter() + self._fill() + self._dispatch(t.else_) + self._leave() + self._write("\n") + + def _IfExp(self, t): + self._dispatch(t.then) + self._write(" if ") + self._dispatch(t.test) + + if t.else_ is not None: + self._write(" else (") + self._dispatch(t.else_) + self._write(")") + + def _Import(self, t): + """ Handle "import xyz.foo". + """ + self._fill("import ") + + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Keyword(self, t): + """ Keyword value assignment within function calls and definitions. + """ + self._write(t.name) + self._write("=") + self._dispatch(t.expr) + + def _List(self, t): + self._write("[") + for i,node in enumerate(t.nodes): + self._dispatch(node) + if i < len(t.nodes)-1: + self._write(", ") + self._write("]") + + def _Module(self, t): + if t.doc is not None: + self._dispatch(t.doc) + self._dispatch(t.node) + + def _Mul(self, t): + self.__binary_op(t, '*') + + def _Name(self, t): + self._write(t.name) + + def _NoneType(self, t): + self._write("None") + + def _Not(self, t): + self._write('not (') + self._dispatch(t.expr) + self._write(')') + + def _Or(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") or (") + self._write(")") + + def _Pass(self, t): + self._write("pass\n") + + def _Printnl(self, t): + self._fill("print ") + if t.dest: + self._write(">> ") + self._dispatch(t.dest) + self._write(", ") + comma = False + for node in t.nodes: + if comma: self._write(', ') + else: comma = True + self._dispatch(node) + + def _Power(self, t): + self.__binary_op(t, '**') + + def _Return(self, t): + self._fill("return ") + if t.value: + if isinstance(t.value, Tuple): + text = ', '.join([ name.name for name in t.value.asList() ]) + self._write(text) + else: + self._dispatch(t.value) + if not self._do_indent: + self._write('; ') + + def _Slice(self, t): + self._dispatch(t.expr) + self._write("[") + if t.lower: + self._dispatch(t.lower) + self._write(":") + if t.upper: + self._dispatch(t.upper) + #if t.step: + # self._write(":") + # self._dispatch(t.step) + self._write("]") + + def _Sliceobj(self, t): + for i, node in enumerate(t.nodes): + if i != 0: + self._write(":") + if not (isinstance(node, Const) and node.value is None): + self._dispatch(node) + + def _Stmt(self, tree): + for node in tree.nodes: + self._dispatch(node) + + def _Sub(self, t): + self.__binary_op(t, '-') + + def _Subscript(self, t): + self._dispatch(t.expr) + self._write("[") + for i, value in enumerate(t.subs): + if i != 0: + self._write(",") + self._dispatch(value) + self._write("]") + + def _TryExcept(self, t): + self._fill("try") + self._enter() + self._dispatch(t.body) + self._leave() + + for handler in t.handlers: + self._fill('except ') + self._dispatch(handler[0]) + if handler[1] is not None: + self._write(', ') + self._dispatch(handler[1]) + self._enter() + self._dispatch(handler[2]) + self._leave() + + if t.else_: + self._fill("else") + self._enter() + self._dispatch(t.else_) + self._leave() + + def _Tuple(self, t): + + if not t.nodes: + # Empty tuple. + self._write("()") + else: + self._write("(") + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + self._write(")") + + def _UnaryAdd(self, t): + self._write("+") + self._dispatch(t.expr) + + def _UnarySub(self, t): + self._write("-") + self._dispatch(t.expr) + + def _With(self, t): + self._fill('with ') + self._dispatch(t.expr) + if t.vars: + self._write(' as ') + self._dispatch(t.vars.name) + self._enter() + self._dispatch(t.body) + self._leave() + self._write('\n') + + def _int(self, t): + self._write(repr(t)) + + def __binary_op(self, t, symbol): + # Check if parenthesis are needed on left side and then dispatch + has_paren = False + left_class = str(t.left.__class__) + if (left_class in op_precedence.keys() and + op_precedence[left_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.left) + if has_paren: + self._write(')') + # Write the appropriate symbol for operator + self._write(symbol) + # Check if parenthesis are needed on the right side and then dispatch + has_paren = False + right_class = str(t.right.__class__) + if (right_class in op_precedence.keys() and + op_precedence[right_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.right) + if has_paren: + self._write(')') + + def _float(self, t): + # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' + # We prefer str here. + self._write(str(t)) + + def _str(self, t): + self._write(repr(t)) + + def _tuple(self, t): + self._write(str(t)) + + ######################################################################### + # These are the methods from the _ast modules unparse. + # + # As our needs to handle more advanced code increase, we may want to + # modify some of the methods below so that they work for compiler.ast. + ######################################################################### + +# # stmt +# def _Expr(self, tree): +# self._fill() +# self._dispatch(tree.value) +# +# def _Import(self, t): +# self._fill("import ") +# first = True +# for a in t.names: +# if first: +# first = False +# else: +# self._write(", ") +# self._write(a.name) +# if a.asname: +# self._write(" as "+a.asname) +# +## def _ImportFrom(self, t): +## self._fill("from ") +## self._write(t.module) +## self._write(" import ") +## for i, a in enumerate(t.names): +## if i == 0: +## self._write(", ") +## self._write(a.name) +## if a.asname: +## self._write(" as "+a.asname) +## # XXX(jpe) what is level for? +## +# +# def _Break(self, t): +# self._fill("break") +# +# def _Continue(self, t): +# self._fill("continue") +# +# def _Delete(self, t): +# self._fill("del ") +# self._dispatch(t.targets) +# +# def _Assert(self, t): +# self._fill("assert ") +# self._dispatch(t.test) +# if t.msg: +# self._write(", ") +# self._dispatch(t.msg) +# +# def _Exec(self, t): +# self._fill("exec ") +# self._dispatch(t.body) +# if t.globals: +# self._write(" in ") +# self._dispatch(t.globals) +# if t.locals: +# self._write(", ") +# self._dispatch(t.locals) +# +# def _Print(self, t): +# self._fill("print ") +# do_comma = False +# if t.dest: +# self._write(">>") +# self._dispatch(t.dest) +# do_comma = True +# for e in t.values: +# if do_comma:self._write(", ") +# else:do_comma=True +# self._dispatch(e) +# if not t.nl: +# self._write(",") +# +# def _Global(self, t): +# self._fill("global") +# for i, n in enumerate(t.names): +# if i != 0: +# self._write(",") +# self._write(" " + n) +# +# def _Yield(self, t): +# self._fill("yield") +# if t.value: +# self._write(" (") +# self._dispatch(t.value) +# self._write(")") +# +# def _Raise(self, t): +# self._fill('raise ') +# if t.type: +# self._dispatch(t.type) +# if t.inst: +# self._write(", ") +# self._dispatch(t.inst) +# if t.tback: +# self._write(", ") +# self._dispatch(t.tback) +# +# +# def _TryFinally(self, t): +# self._fill("try") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# self._fill("finally") +# self._enter() +# self._dispatch(t.finalbody) +# self._leave() +# +# def _excepthandler(self, t): +# self._fill("except ") +# if t.type: +# self._dispatch(t.type) +# if t.name: +# self._write(", ") +# self._dispatch(t.name) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _ClassDef(self, t): +# self._write("\n") +# self._fill("class "+t.name) +# if t.bases: +# self._write("(") +# for a in t.bases: +# self._dispatch(a) +# self._write(", ") +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _FunctionDef(self, t): +# self._write("\n") +# for deco in t.decorators: +# self._fill("@") +# self._dispatch(deco) +# self._fill("def "+t.name + "(") +# self._dispatch(t.args) +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _For(self, t): +# self._fill("for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# def _While(self, t): +# self._fill("while ") +# self._dispatch(t.test) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# # expr +# def _Str(self, tree): +# self._write(repr(tree.s)) +## +# def _Repr(self, t): +# self._write("`") +# self._dispatch(t.value) +# self._write("`") +# +# def _Num(self, t): +# self._write(repr(t.n)) +# +# def _ListComp(self, t): +# self._write("[") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write("]") +# +# def _GeneratorExp(self, t): +# self._write("(") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write(")") +# +# def _comprehension(self, t): +# self._write(" for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# for if_clause in t.ifs: +# self._write(" if ") +# self._dispatch(if_clause) +# +# def _IfExp(self, t): +# self._dispatch(t.body) +# self._write(" if ") +# self._dispatch(t.test) +# if t.orelse: +# self._write(" else ") +# self._dispatch(t.orelse) +# +# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} +# def _UnaryOp(self, t): +# self._write(self.unop[t.op.__class__.__name__]) +# self._write("(") +# self._dispatch(t.operand) +# self._write(")") +# +# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", +# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", +# "FloorDiv":"//", "Pow": "**"} +# def _BinOp(self, t): +# self._write("(") +# self._dispatch(t.left) +# self._write(")" + self.binop[t.op.__class__.__name__] + "(") +# self._dispatch(t.right) +# self._write(")") +# +# boolops = {_ast.And: 'and', _ast.Or: 'or'} +# def _BoolOp(self, t): +# self._write("(") +# self._dispatch(t.values[0]) +# for v in t.values[1:]: +# self._write(" %s " % self.boolops[t.op.__class__]) +# self._dispatch(v) +# self._write(")") +# +# def _Attribute(self,t): +# self._dispatch(t.value) +# self._write(".") +# self._write(t.attr) +# +## def _Call(self, t): +## self._dispatch(t.func) +## self._write("(") +## comma = False +## for e in t.args: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## for e in t.keywords: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## if t.starargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("*") +## self._dispatch(t.starargs) +## if t.kwargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("**") +## self._dispatch(t.kwargs) +## self._write(")") +# +# # slice +# def _Index(self, t): +# self._dispatch(t.value) +# +# def _ExtSlice(self, t): +# for i, d in enumerate(t.dims): +# if i != 0: +# self._write(': ') +# self._dispatch(d) +# +# # others +# def _arguments(self, t): +# first = True +# nonDef = len(t.args)-len(t.defaults) +# for a in t.args[0:nonDef]: +# if first:first = False +# else: self._write(", ") +# self._dispatch(a) +# for a,d in zip(t.args[nonDef:], t.defaults): +# if first:first = False +# else: self._write(", ") +# self._dispatch(a), +# self._write("=") +# self._dispatch(d) +# if t.vararg: +# if first:first = False +# else: self._write(", ") +# self._write("*"+t.vararg) +# if t.kwarg: +# if first:first = False +# else: self._write(", ") +# self._write("**"+t.kwarg) +# +## def _keyword(self, t): +## self._write(t.arg) +## self._write("=") +## self._dispatch(t.value) +# +# def _Lambda(self, t): +# self._write("lambda ") +# self._dispatch(t.args) +# self._write(": ") +# self._dispatch(t.body) + + + diff --git a/doc/sphinxext/numpy_ext/docscrape.py b/doc/sphinxext/numpy_ext/docscrape.py index 93097893fcaf0..ed1760b0cc482 100644 --- a/doc/sphinxext/numpy_ext/docscrape.py +++ b/doc/sphinxext/numpy_ext/docscrape.py @@ -1,17 +1,15 @@ """Extract reference documentation from the NumPy source tree. """ +from __future__ import division, absolute_import, print_function import inspect import textwrap import re import pydoc from warnings import warn -# Try Python 2 first, otherwise load from Python 3 -try: - from StringIO import StringIO -except: - from io import StringIO +import collections +import sys class Reader(object): @@ -64,7 +62,7 @@ def read_to_condition(self, condition_func): return self[start:self._l] self._l += 1 if self.eof(): - return self[start:self._l + 1] + return self[start:self._l+1] return [] def read_to_next_empty_line(self): @@ -72,6 +70,7 @@ def read_to_next_empty_line(self): def is_empty(line): return not line.strip() + return self.read_to_condition(is_empty) def read_to_next_unindented_line(self): @@ -89,8 +88,17 @@ def is_empty(self): return not ''.join(self._str).strip() -class NumpyDocString(object): +class ParseError(Exception): + def __str__(self): + message = self.message + if hasattr(self, 'docstring'): + message = "%s in %r" % (message, self.docstring) + return message + + +class NumpyDocString(collections.Mapping): def __init__(self, docstring, config={}): + orig_docstring = docstring docstring = textwrap.dedent(docstring).split('\n') self._doc = Reader(docstring) @@ -100,6 +108,7 @@ def __init__(self, docstring, config={}): 'Extended Summary': [], 'Parameters': [], 'Returns': [], + 'Yields': [], 'Raises': [], 'Warns': [], 'Other Parameters': [], @@ -113,7 +122,11 @@ def __init__(self, docstring, config={}): 'index': {} } - self._parse() + try: + self._parse() + except ParseError as e: + e.docstring = orig_docstring + raise def __getitem__(self, key): return self._parsed_data[key] @@ -124,6 +137,12 @@ def __setitem__(self, key, val): else: self._parsed_data[key] = val + def __iter__(self): + return iter(self._parsed_data) + + def __len__(self): + return len(self._parsed_data) + def _is_at_section(self): self._doc.seek_next_non_empty_line() @@ -135,8 +154,8 @@ def _is_at_section(self): if l1.startswith('.. index::'): return True - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-' * len(l1)) or l2.startswith('=' * len(l1)) + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) def _strip(self, doc): i = 0 @@ -149,7 +168,7 @@ def _strip(self, doc): if line.strip(): break - return doc[i:len(doc) - j] + return doc[i:len(doc)-j] def _read_to_next_section(self): section = self._doc.read_to_next_empty_line() @@ -213,7 +232,7 @@ def parse_item_name(text): return g[3], None else: return g[2], g[1] - raise ValueError("%s is not a item name" % text) + raise ParseError("%s is not a item name" % text) def push_item(name, rest): if not name: @@ -241,7 +260,8 @@ def push_item(name, rest): current_func = None if ',' in line: for func in line.split(','): - push_item(func, []) + if func.strip(): + push_item(func, []) elif line.strip(): current_func = line elif current_func is not None: @@ -273,13 +293,17 @@ def _parse_summary(self): if self._is_at_section(): return - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: self['Summary'] = summary if not self._is_at_section(): @@ -289,12 +313,23 @@ def _parse(self): self._doc.reset() self._parse_summary() - for (section, content) in self._read_sections(): + sections = list(self._read_sections()) + section_names = set([section for section, content in sections]) + + has_returns = 'Returns' in section_names + has_yields = 'Yields' in section_names + # We could do more tests, but we are not. Arbitrarily. + if has_returns and has_yields: + msg = 'Docstring contains both a Returns and Yields section.' + raise ValueError(msg) + + for (section, content) in sections: if not section.startswith('..'): - section = ' '.join([s.capitalize() - for s in section.split(' ')]) - if section in ('Parameters', 'Attributes', 'Methods', - 'Returns', 'Raises', 'Warns'): + section = (s.capitalize() for s in section.split(' ')) + section = ' '.join(section) + if section in ('Parameters', 'Returns', 'Yields', 'Raises', + 'Warns', 'Other Parameters', 'Attributes', + 'Methods'): self[section] = self._parse_param_list(content) elif section.startswith('.. index::'): self['index'] = self._parse_index(section, content) @@ -306,12 +341,12 @@ def _parse(self): # string conversion routines def _str_header(self, name, symbol='-'): - return [name, len(name) * symbol] + return [name, len(name)*symbol] def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' ' * indent + line] + out += [' '*indent + line] return out def _str_signature(self): @@ -337,7 +372,10 @@ def _str_param_list(self, name): if self[name]: out += self._str_header(name) for param, param_type, desc in self[name]: - out += ['%s : %s' % (param, param_type)] + if param_type: + out += ['%s : %s' % (param, param_type)] + else: + out += [param] out += self._str_indent(desc) out += [''] return out @@ -380,7 +418,7 @@ def _str_index(self): idx = self['index'] out = [] out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.iteritems(): + for section, references in idx.items(): if section == 'default': continue out += [' :%s: %s' % (section, ', '.join(references))] @@ -391,7 +429,8 @@ def __str__(self, func_role=''): out += self._str_signature() out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Raises'): + for param_list in ('Parameters', 'Returns', 'Yields', + 'Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_section('Warnings') out += self._str_see_also(func_role) @@ -404,7 +443,7 @@ def __str__(self, func_role=''): def indent(str, indent=4): - indent_str = ' ' * indent + indent_str = ' '*indent if str is None: return indent_str lines = str.split('\n') @@ -417,7 +456,7 @@ def dedent_lines(lines): def header(text, style='-'): - return text + '\n' + style * len(text) + '\n' + return text + '\n' + style*len(text) + '\n' class FunctionDoc(NumpyDocString): @@ -434,12 +473,17 @@ def __init__(self, func, role='func', doc=None, config={}): if not self['Signature'] and func is not None: func, func_name = self.get_func() try: - # try to read signature - argspec = inspect.getargspec(func) - argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*', '\*') - signature = '%s%s' % (func_name, argspec) - except TypeError as e: + try: + signature = str(inspect.signature(func)) + except (AttributeError, ValueError): + # try to read signature, backward compat for older Python + if sys.version_info[0] >= 3: + argspec = inspect.getfullargspec(func) + else: + argspec = inspect.getargspec(func) + signature = inspect.formatargspec(*argspec) + signature = '%s%s' % (func_name, signature.replace('*', '\*')) + except TypeError: signature = '%s()' % func_name self['Signature'] = signature @@ -471,12 +515,18 @@ def __str__(self): class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config=None): + config={}): if not inspect.isclass(cls) and cls is not None: raise ValueError("Expected a class or None, but got %r" % cls) self._cls = cls + self.show_inherited_members = config.get( + 'show_inherited_class_members', True) + if modulename and not modulename.endswith('.'): modulename += '.' self._mod = modulename @@ -488,24 +538,48 @@ def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, NumpyDocString.__init__(self, doc) - if config is not None and config.get('show_class_members', True): - if not self['Methods']: - self['Methods'] = [(name, '', '') - for name in sorted(self.methods)] - if not self['Attributes']: - self['Attributes'] = [(name, '', '') - for name in sorted(self.properties)] + if config.get('show_class_members', True): + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append((name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list @property def methods(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) - if not name.startswith('_') and callable(func)] + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, collections.Callable) + and self._is_show_member(name))] @property def properties(self): if self._cls is None: return [] return [name for name, func in inspect.getmembers(self._cls) - if not name.startswith('_') and func is None] + if (not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isgetsetdescriptor(func)) + and self._is_show_member(name))] + + def _is_show_member(self, name): + if self.show_inherited_members: + return True # show all class members + if name not in self._cls.__dict__: + return False # class member is inherited, we do not show it + return True diff --git a/doc/sphinxext/numpy_ext/docscrape_sphinx.py b/doc/sphinxext/numpy_ext/docscrape_sphinx.py index ca28300eab8a4..482978c3c026b 100644 --- a/doc/sphinxext/numpy_ext/docscrape_sphinx.py +++ b/doc/sphinxext/numpy_ext/docscrape_sphinx.py @@ -1,17 +1,29 @@ +from __future__ import division, absolute_import, print_function + +import sys import re import inspect import textwrap import pydoc -from .docscrape import NumpyDocString -from .docscrape import FunctionDoc -from .docscrape import ClassDoc +import sphinx +import collections + +from .docscrape import NumpyDocString, FunctionDoc, ClassDoc + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config=None): - config = {} if config is None else config - self.use_plots = config.get('use_plots', False) + def __init__(self, docstring, config={}): NumpyDocString.__init__(self, docstring, config=config) + self.load_config(config) + + def load_config(self, config): + self.use_plots = config.get('use_plots', False) + self.class_members_toctree = config.get('class_members_toctree', True) # string conversion routines def _str_header(self, name, symbol='`'): @@ -23,7 +35,7 @@ def _str_field_list(self, name): def _str_indent(self, doc, indent=4): out = [] for line in doc: - out += [' ' * indent + line] + out += [' '*indent + line] return out def _str_signature(self): @@ -39,16 +51,37 @@ def _str_summary(self): def _str_extended_summary(self): return self['Extended Summary'] + [''] - def _str_param_list(self, name): + def _str_returns(self, name='Returns'): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent([param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) out += [''] - out += self._str_indent(desc, 8) + return out + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_field_list(name) + out += [''] + for param, param_type, desc in self[name]: + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent(['**%s**' % param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) out += [''] return out @@ -78,28 +111,36 @@ def _str_member_list(self, name): others = [] for param, param_type, desc in self[name]: param = param.strip() - if not self._obj or hasattr(self._obj, param): + + # Check if the referenced member can have a docstring or not + param_obj = getattr(self._obj, param, None) + if not (callable(param_obj) + or isinstance(param_obj, property) + or inspect.isgetsetdescriptor(param_obj)): + param_obj = None + + if param_obj and (pydoc.getdoc(param_obj) or not desc): + # Referenced object has a docstring autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: - # GAEL: Toctree commented out below because it creates - # hundreds of sphinx warnings - # out += ['.. autosummary::', ' :toctree:', ''] - out += ['.. autosummary::', ''] - out += autosum + out += ['.. autosummary::'] + if self.class_members_toctree: + out += [' :toctree:'] + out += [''] + autosum if others: - maxlen_0 = max([len(x[0]) for x in others]) - maxlen_1 = max([len(x[1]) for x in others]) - hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10 - fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) - n_indent = maxlen_0 + maxlen_1 + 4 - out += [hdr] + maxlen_0 = max(3, max([len(x[0]) + 4 for x in others])) + hdr = sixu("=") * maxlen_0 + sixu(" ") + sixu("=") * 10 + fmt = sixu('%%%ds %%s ') % (maxlen_0,) + out += ['', '', hdr] for param, param_type, desc in others: - out += [fmt % (param.strip(), param_type)] - out += self._str_indent(desc, n_indent) + desc = sixu(" ").join(x.strip() for x in desc).strip() + if param_type: + desc = "(%s) %s" % (param_type, desc) + out += [fmt % ("**" + param.strip() + "**", desc)] out += [hdr] out += [''] return out @@ -136,7 +177,7 @@ def _str_index(self): return out out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.iteritems(): + for section, references in idx.items(): if section == 'default': continue elif section == 'refguide': @@ -155,7 +196,6 @@ def _str_references(self): out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it - import sphinx # local import to avoid test dependency if sphinx.__version__ >= "0.6": out += ['.. only:: latex', ''] else: @@ -188,14 +228,17 @@ def __str__(self, indent=0, func_role="obj"): out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Raises', 'Attributes'): + out += self._str_param_list('Parameters') + out += self._str_returns('Returns') + out += self._str_returns('Yields') + for param_list in ('Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() - for param_list in ('Methods',): + for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out) @@ -203,19 +246,20 @@ def __str__(self, indent=0, func_role="obj"): class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): - self.use_plots = config.get('use_plots', False) + self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): - self.use_plots = config.get('use_plots', False) + self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config=None): + def __init__(self, obj, doc=None, config={}): self._f = obj + self.load_config(config) SphinxDocString.__init__(self, doc, config=config) @@ -225,7 +269,7 @@ def get_doc_object(obj, what=None, doc=None, config={}): what = 'class' elif inspect.ismodule(obj): what = 'module' - elif callable(obj): + elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' diff --git a/doc/sphinxext/numpy_ext/linkcode.py b/doc/sphinxext/numpy_ext/linkcode.py new file mode 100644 index 0000000000000..1ad3ab82cb49c --- /dev/null +++ b/doc/sphinxext/numpy_ext/linkcode.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" + linkcode + ~~~~~~~~ + + Add external links to module code in Python object descriptions. + + :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import collections + +warnings.warn("This extension has been accepted to Sphinx upstream. " + "Use the version from there (Sphinx >= 1.2) " + "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", + FutureWarning, stacklevel=1) + + +from docutils import nodes + +from sphinx import addnodes +from sphinx.locale import _ +from sphinx.errors import SphinxError + +class LinkcodeError(SphinxError): + category = "linkcode error" + +def doctree_read(app, doctree): + env = app.builder.env + + resolve_target = getattr(env.config, 'linkcode_resolve', None) + if not isinstance(env.config.linkcode_resolve, collections.Callable): + raise LinkcodeError( + "Function `linkcode_resolve` is not given in conf.py") + + domain_keys = dict( + py=['module', 'fullname'], + c=['names'], + cpp=['names'], + js=['object', 'fullname'], + ) + + for objnode in doctree.traverse(addnodes.desc): + domain = objnode.get('domain') + uris = set() + for signode in objnode: + if not isinstance(signode, addnodes.desc_signature): + continue + + # Convert signode to a specified format + info = {} + for key in domain_keys.get(domain, []): + value = signode.get(key) + if not value: + value = '' + info[key] = value + if not info: + continue + + # Call user code to resolve the link + uri = resolve_target(domain, info) + if not uri: + # no source + continue + + if uri in uris or not uri: + # only one link per name, please + continue + uris.add(uri) + + onlynode = addnodes.only(expr='html') + onlynode += nodes.reference('', '', internal=False, refuri=uri) + onlynode[0] += nodes.inline('', _('[source]'), + classes=['viewcode-link']) + signode += onlynode + +def setup(app): + app.connect('doctree-read', doctree_read) + app.add_config_value('linkcode_resolve', None, '') diff --git a/doc/sphinxext/numpy_ext/numpydoc.py b/doc/sphinxext/numpy_ext/numpydoc.py index 6ff03e0d7e593..322f57637b984 100644 --- a/doc/sphinxext/numpy_ext/numpydoc.py +++ b/doc/sphinxext/numpy_ext/numpydoc.py @@ -13,54 +13,67 @@ - Extract the signature from the docstring, if it can't be determined otherwise. -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt """ +from __future__ import division, absolute_import, print_function -from __future__ import unicode_literals - -import sys # Only needed to check Python version -import os +import sys import re import pydoc -from .docscrape_sphinx import get_doc_object -from .docscrape_sphinx import SphinxDocString +import sphinx import inspect +import collections + +if sphinx.__version__ < '1.0.1': + raise RuntimeError("Sphinx 1.0.1 or newer is required") + +from .docscrape_sphinx import get_doc_object, SphinxDocString +from sphinx.util.compat import Directive + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') def mangle_docstrings(app, what, name, obj, options, lines, reference_offset=[0]): + cfg = {'use_plots': app.config.numpydoc_use_plots, + 'show_class_members': app.config.numpydoc_show_class_members, + 'show_inherited_class_members': + app.config.numpydoc_show_inherited_class_members, + 'class_members_toctree': app.config.numpydoc_class_members_toctree} - cfg = dict(use_plots=app.config.numpydoc_use_plots, - show_class_members=app.config.numpydoc_show_class_members) - + u_NL = sixu('\n') if what == 'module': # Strip top title - title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', - re.I | re.S) - lines[:] = title_re.sub('', "\n".join(lines)).split("\n") + pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*' + title_re = re.compile(sixu(pattern), re.I | re.S) + lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) else: - doc = get_doc_object(obj, what, "\n".join(lines), config=cfg) - if sys.version_info[0] < 3: - lines[:] = unicode(doc).splitlines() + doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg) + if sys.version_info[0] >= 3: + doc = str(doc) else: - lines[:] = str(doc).splitlines() + doc = unicode(doc) + lines[:] = doc.split(u_NL) - if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ - obj.__name__: + if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and + obj.__name__): if hasattr(obj, '__module__'): - v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__)) + v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) else: v = dict(full_name=obj.__name__) - lines += [u'', u'.. htmlonly::', ''] - lines += [u' %s' % x for x in + lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] + lines += [sixu(' %s') % x for x in (app.config.numpydoc_edit_link % v).split("\n")] # replace reference numbers so that there are no duplicates references = [] for line in lines: line = line.strip() - m = re.match(r'^.. \[([a-z0-9_.-])\]', line, re.I) + m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) if m: references.append(m.group(1)) @@ -69,66 +82,67 @@ def mangle_docstrings(app, what, name, obj, options, lines, if references: for i, line in enumerate(lines): for r in references: - if re.match(r'^\d+$', r): - new_r = "R%d" % (reference_offset[0] + int(r)) + if re.match(sixu('^\\d+$'), r): + new_r = sixu("R%d") % (reference_offset[0] + int(r)) else: - new_r = u"%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace(u'[%s]_' % r, - u'[%s]_' % new_r) - lines[i] = lines[i].replace(u'.. [%s]' % r, - u'.. [%s]' % new_r) + new_r = sixu("%s%d") % (r, reference_offset[0]) + lines[i] = lines[i].replace(sixu('[%s]_') % r, + sixu('[%s]_') % new_r) + lines[i] = lines[i].replace(sixu('.. [%s]') % r, + sixu('.. [%s]') % new_r) reference_offset[0] += len(references) -def mangle_signature(app, what, name, obj, - options, sig, retann): +def mangle_signature(app, what, name, obj, options, sig, retann): # Do not try to inspect classes that don't define `__init__` if (inspect.isclass(obj) and (not hasattr(obj, '__init__') or - 'initializes x; see ' in pydoc.getdoc(obj.__init__))): + 'initializes x; see ' in pydoc.getdoc(obj.__init__))): return '', '' - if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): + if not (isinstance(obj, collections.Callable) or + hasattr(obj, '__argspec_is_invalid_')): return + if not hasattr(obj, '__doc__'): return doc = SphinxDocString(pydoc.getdoc(obj)) if doc['Signature']: - sig = re.sub("^[^(]*", "", doc['Signature']) - return sig, '' + sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) + return sig, sixu('') def setup(app, get_doc_object_=get_doc_object): + if not hasattr(app, 'add_config_value'): + return # probably called by nose, better bail out + global get_doc_object get_doc_object = get_doc_object_ - if sys.version_info[0] < 3: - app.connect(b'autodoc-process-docstring', mangle_docstrings) - app.connect(b'autodoc-process-signature', mangle_signature) - else: - app.connect('autodoc-process-docstring', mangle_docstrings) - app.connect('autodoc-process-signature', mangle_signature) + app.connect('autodoc-process-docstring', mangle_docstrings) + app.connect('autodoc-process-signature', mangle_signature) app.add_config_value('numpydoc_edit_link', None, False) app.add_config_value('numpydoc_use_plots', None, False) app.add_config_value('numpydoc_show_class_members', True, True) + app.add_config_value('numpydoc_show_inherited_class_members', True, True) + app.add_config_value('numpydoc_class_members_toctree', True, True) # Extra mangling domains app.add_domain(NumpyPythonDomain) app.add_domain(NumpyCDomain) + + metadata = {'parallel_read_safe': True} + return metadata -#----------------------------------------------------------------------------- +# ------------------------------------------------------------------------------ # Docstring-mangling domains -#----------------------------------------------------------------------------- +# ------------------------------------------------------------------------------ -try: - import sphinx # lazy to avoid test dependency -except ImportError: - CDomain = PythonDomain = object -else: - from sphinx.domains.c import CDomain - from sphinx.domains.python import PythonDomain +from docutils.statemachine import ViewList +from sphinx.domains.c import CDomain +from sphinx.domains.python import PythonDomain class ManglingDomainBase(object): @@ -139,7 +153,7 @@ def __init__(self, *a, **kw): self.wrap_mangling_directives() def wrap_mangling_directives(self): - for name, objtype in self.directive_mangling_map.items(): + for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype) @@ -155,6 +169,7 @@ class NumpyPythonDomain(ManglingDomainBase, PythonDomain): 'staticmethod': 'function', 'attribute': 'attribute', } + indices = [] class NumpyCDomain(ManglingDomainBase, CDomain): @@ -168,6 +183,31 @@ class NumpyCDomain(ManglingDomainBase, CDomain): } +def match_items(lines, content_old): + """Create the right items for lines. + + This tries to recover where a line in ``lines`` + came from before mangling. + + It assumes that missing or new lines + are always empty. + """ + items_new = [] + lines_old = content_old.data + items_old = content_old.items + j = 0 + for i, line in enumerate(lines): + # go to next non-empty line in old: + # line.strip("") checks whether the string is all whitespace + while j < len(lines_old) - 1 and not lines_old[j].strip(" "): + j += 1 + items_new.append(items_old[j]) + if line.strip(" ") and j < len(lines_old) - 1: + j += 1 + assert(len(items_new) == len(lines)) + return items_new + + def wrap_mangling_directive(base_directive, objtype): class directive(base_directive): def run(self): @@ -183,9 +223,10 @@ def run(self): lines = list(self.content) mangle_docstrings(env.app, objtype, name, None, None, lines) - # local import to avoid testing dependency - from docutils.statemachine import ViewList - self.content = ViewList(lines, self.content.parent) + if self.content: + items = match_items(lines, self.content) + self.content = ViewList(lines, items=items, + parent=self.content.parent) return base_directive.run(self) diff --git a/doc/sphinxext/numpy_ext/phantom_import.py b/doc/sphinxext/numpy_ext/phantom_import.py new file mode 100644 index 0000000000000..9a60b4a35b18f --- /dev/null +++ b/doc/sphinxext/numpy_ext/phantom_import.py @@ -0,0 +1,167 @@ +""" +============== +phantom_import +============== + +Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar +extensions to use docstrings loaded from an XML file. + +This extension loads an XML file in the Pydocweb format [1] and +creates a dummy module that contains the specified docstrings. This +can be used to get the current docstrings from a Pydocweb instance +without needing to rebuild the documented module. + +.. [1] http://code.google.com/p/pydocweb + +""" +from __future__ import division, absolute_import, print_function + +import imp, sys, compiler, types, os, inspect, re + +def setup(app): + app.connect('builder-inited', initialize) + app.add_config_value('phantom_import_file', None, True) + +def initialize(app): + fn = app.config.phantom_import_file + if (fn and os.path.isfile(fn)): + print("[numpydoc] Phantom importing modules from", fn, "...") + import_phantom_module(fn) + +#------------------------------------------------------------------------------ +# Creating 'phantom' modules from an XML description +#------------------------------------------------------------------------------ +def import_phantom_module(xml_file): + """ + Insert a fake Python module to sys.modules, based on a XML file. + + The XML file is expected to conform to Pydocweb DTD. The fake + module will contain dummy objects, which guarantee the following: + + - Docstrings are correct. + - Class inheritance relationships are correct (if present in XML). + - Function argspec is *NOT* correct (even if present in XML). + Instead, the function signature is prepended to the function docstring. + - Class attributes are *NOT* correct; instead, they are dummy objects. + + Parameters + ---------- + xml_file : str + Name of an XML file to read + + """ + import lxml.etree as etree + + object_cache = {} + + tree = etree.parse(xml_file) + root = tree.getroot() + + # Sort items so that + # - Base classes come before classes inherited from them + # - Modules come before their contents + all_nodes = dict([(n.attrib['id'], n) for n in root]) + + def _get_bases(node, recurse=False): + bases = [x.attrib['ref'] for x in node.findall('base')] + if recurse: + j = 0 + while True: + try: + b = bases[j] + except IndexError: break + if b in all_nodes: + bases.extend(_get_bases(all_nodes[b])) + j += 1 + return bases + + type_index = ['module', 'class', 'callable', 'object'] + + def base_cmp(a, b): + x = cmp(type_index.index(a.tag), type_index.index(b.tag)) + if x != 0: return x + + if a.tag == 'class' and b.tag == 'class': + a_bases = _get_bases(a, recurse=True) + b_bases = _get_bases(b, recurse=True) + x = cmp(len(a_bases), len(b_bases)) + if x != 0: return x + if a.attrib['id'] in b_bases: return -1 + if b.attrib['id'] in a_bases: return 1 + + return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + + nodes = root.getchildren() + nodes.sort(base_cmp) + + # Create phantom items + for node in nodes: + name = node.attrib['id'] + doc = (node.text or '').decode('string-escape') + "\n" + if doc == "\n": doc = "" + + # create parent, if missing + parent = name + while True: + parent = '.'.join(parent.split('.')[:-1]) + if not parent: break + if parent in object_cache: break + obj = imp.new_module(parent) + object_cache[parent] = obj + sys.modules[parent] = obj + + # create object + if node.tag == 'module': + obj = imp.new_module(name) + obj.__doc__ = doc + sys.modules[name] = obj + elif node.tag == 'class': + bases = [object_cache[b] for b in _get_bases(node) + if b in object_cache] + bases.append(object) + init = lambda self: None + init.__doc__ = doc + obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) + obj.__name__ = name.split('.')[-1] + elif node.tag == 'callable': + funcname = node.attrib['id'].split('.')[-1] + argspec = node.attrib.get('argspec') + if argspec: + argspec = re.sub('^[^(]*', '', argspec) + doc = "%s%s\n\n%s" % (funcname, argspec, doc) + obj = lambda: 0 + obj.__argspec_is_invalid_ = True + if sys.version_info[0] >= 3: + obj.__name__ = funcname + else: + obj.func_name = funcname + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__objclass__ = object_cache[parent] + else: + class Dummy(object): pass + obj = Dummy() + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__get__ = lambda: None + object_cache[name] = obj + + if parent: + if inspect.ismodule(object_cache[parent]): + obj.__module__ = parent + setattr(object_cache[parent], name.split('.')[-1], obj) + + # Populate items + for node in root: + obj = object_cache.get(node.attrib['id']) + if obj is None: continue + for ref in node.findall('ref'): + if node.tag == 'class': + if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) + else: + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) diff --git a/doc/sphinxext/numpy_ext/plot_directive.py b/doc/sphinxext/numpy_ext/plot_directive.py new file mode 100644 index 0000000000000..2014f857076c1 --- /dev/null +++ b/doc/sphinxext/numpy_ext/plot_directive.py @@ -0,0 +1,642 @@ +""" +A special directive for generating a matplotlib plot. + +.. warning:: + + This is a hacked version of plot_directive.py from Matplotlib. + It's very much subject to change! + + +Usage +----- + +Can be used like this:: + + .. plot:: examples/example.py + + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3], [4,5,6]) + + .. plot:: + + A plotting example: + + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3], [4,5,6]) + +The content is interpreted as doctest formatted if it has a line starting +with ``>>>``. + +The ``plot`` directive supports the options + + format : {'python', 'doctest'} + Specify the format of the input + + include-source : bool + Whether to display the source code. Default can be changed in conf.py + +and the ``image`` directive options ``alt``, ``height``, ``width``, +``scale``, ``align``, ``class``. + +Configuration options +--------------------- + +The plot directive has the following configuration options: + + plot_include_source + Default value for the include-source option + + plot_pre_code + Code that should be executed before each plot. + + plot_basedir + Base directory, to which plot:: file names are relative to. + (If None or empty, file names are relative to the directoly where + the file containing the directive is.) + + plot_formats + File formats to generate. List of tuples or strings:: + + [(suffix, dpi), suffix, ...] + + that determine the file format and the DPI. For entries whose + DPI was omitted, sensible defaults are chosen. + + plot_html_show_formats + Whether to show links to the files in HTML. + +TODO +---- + +* Refactor Latex output; now it's plain images, but it would be nice + to make them appear side-by-side, or in floats. + +""" +from __future__ import division, absolute_import, print_function + +import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback +import sphinx + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import warnings +warnings.warn("A plot_directive module is also available under " + "matplotlib.sphinxext; expect this numpydoc.plot_directive " + "module to be deprecated after relevant features have been " + "integrated there.", + FutureWarning, stacklevel=2) + + +#------------------------------------------------------------------------------ +# Registration hook +#------------------------------------------------------------------------------ + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_config_value('plot_pre_code', '', True) + app.add_config_value('plot_include_source', False, True) + app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) + app.add_config_value('plot_basedir', None, True) + app.add_config_value('plot_html_show_formats', True, True) + + app.add_directive('plot', plot_directive, True, (0, 1, False), + **plot_directive_options) + +#------------------------------------------------------------------------------ +# plot:: directive +#------------------------------------------------------------------------------ +from docutils.parsers.rst import directives +from docutils import nodes + +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) +plot_directive.__doc__ = __doc__ + +def _option_boolean(arg): + if not arg or not arg.strip(): + # no argument given, assume used as a flag + return True + elif arg.strip().lower() in ('no', '0', 'false'): + return False + elif arg.strip().lower() in ('yes', '1', 'true'): + return True + else: + raise ValueError('"%s" unknown boolean' % arg) + +def _option_format(arg): + return directives.choice(arg, ('python', 'lisp')) + +def _option_align(arg): + return directives.choice(arg, ("top", "middle", "bottom", "left", "center", + "right")) + +plot_directive_options = {'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': _option_align, + 'class': directives.class_option, + 'include-source': _option_boolean, + 'format': _option_format, + } + +#------------------------------------------------------------------------------ +# Generating output +#------------------------------------------------------------------------------ + +from docutils import nodes, utils + +try: + # Sphinx depends on either Jinja or Jinja2 + import jinja2 + def format_template(template, **kw): + return jinja2.Template(template).render(**kw) +except ImportError: + import jinja + def format_template(template, **kw): + return jinja.from_string(template, **kw) + +TEMPLATE = """ +{{ source_code }} + +{{ only_html }} + + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} + + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.png + {%- for option in options %} + {{ option }} + {% endfor %} + + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {% endfor %} + +{{ only_latex }} + + {% for img in images %} + .. image:: {{ build_dir }}/{{ img.basename }}.pdf + {% endfor %} + +""" + +class ImageFile(object): + def __init__(self, basename, dirname): + self.basename = basename + self.dirname = dirname + self.formats = [] + + def filename(self, format): + return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) + + def filenames(self): + return [self.filename(fmt) for fmt in self.formats] + +def run(arguments, content, options, state_machine, state, lineno): + if arguments and content: + raise RuntimeError("plot:: directive can't have both args and content") + + document = state_machine.document + config = document.settings.env.config + + options.setdefault('include-source', config.plot_include_source) + + # determine input + rst_file = document.attributes['source'] + rst_dir = os.path.dirname(rst_file) + + if arguments: + if not config.plot_basedir: + source_file_name = os.path.join(rst_dir, + directives.uri(arguments[0])) + else: + source_file_name = os.path.join(setup.confdir, config.plot_basedir, + directives.uri(arguments[0])) + code = open(source_file_name, 'r').read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join(map(str, content))) + counter = document.attributes.get('_plot_counter', 0) + 1 + document.attributes['_plot_counter'] = counter + base, ext = os.path.splitext(os.path.basename(source_file_name)) + output_base = '%s-%d.py' % (base, counter) + + base, source_ext = os.path.splitext(output_base) + if source_ext in ('.py', '.rst', '.txt'): + output_base = base + else: + source_ext = '' + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace('.', '-') + + # is it in doctest format? + is_doctest = contains_doctest(code) + if 'format' in options: + if options['format'] == 'python': + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), + 'plot_directive', + source_rel_dir) + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, + source_rel_dir)) + + # how to link to files from the RST file + dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), + source_rel_dir).replace(os.path.sep, '/') + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + source_link = dest_dir_link + '/' + output_base + source_ext + + # make figures + try: + results = makefig(code, source_file_name, build_dir, output_base, + config) + errors = [] + except PlotError as err: + reporter = state.memo.reporter + sm = reporter.system_message( + 2, "Exception occurred in plotting %s: %s" % (output_base, err), + line=lineno) + results = [(code, [])] + errors = [sm] + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s' % row.rstrip() + for row in code_piece.split('\n')] + source_code = "\n".join(lines) + else: + source_code = "" + + opts = [':%s: %s' % (key, val) for key, val in list(options.items()) + if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + + if j == 0: + src_link = source_link + else: + src_link = None + + result = format_template( + TEMPLATE, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.plot_html_show_formats) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + shutil.copyfile(fn, os.path.join(dest_dir, + os.path.basename(fn))) + + # copy script (if necessary) + if source_file_name == rst_file: + target_name = os.path.join(dest_dir, output_base + source_ext) + f = open(target_name, 'w') + f.write(unescape_doctest(code)) + f.close() + + return errors + + +#------------------------------------------------------------------------------ +# Run code and capture figures +#------------------------------------------------------------------------------ + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.image as image +from matplotlib import _pylab_helpers + +import exceptions + +def contains_doctest(text): + try: + # check if it's valid Python as-is + compile(text, '', 'exec') + return False + except SyntaxError: + pass + r = re.compile(r'^\s*>>>', re.M) + m = r.search(text) + return bool(m) + +def unescape_doctest(text): + """ + Extract code from a piece of text, which contains either Python code + or doctests. + + """ + if not contains_doctest(text): + return text + + code = "" + for line in text.split("\n"): + m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + if m: + code += m.group(2) + "\n" + elif line.strip(): + code += "# " + line.strip() + "\n" + else: + code += "\n" + return code + +def split_code_at_show(text): + """ + Split code at plt.show() + + """ + + parts = [] + is_doctest = contains_doctest(text) + + part = [] + for line in text.split("\n"): + if (not is_doctest and line.strip() == 'plt.show()') or \ + (is_doctest and line.strip() == '>>> plt.show()'): + part.append(line) + parts.append("\n".join(part)) + part = [] + else: + part.append(line) + if "\n".join(part).strip(): + parts.append("\n".join(part)) + return parts + +class PlotError(RuntimeError): + pass + +def run_code(code, code_path, ns=None): + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. + pwd = os.getcwd() + old_sys_path = list(sys.path) + if code_path is not None: + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Redirect stdout + stdout = sys.stdout + sys.stdout = StringIO() + + # Reset sys.argv + old_sys_argv = sys.argv + sys.argv = [code_path] + + try: + try: + code = unescape_doctest(code) + if ns is None: + ns = {} + if not ns: + exec(setup.config.plot_pre_code, ns) + exec(code, ns) + except (Exception, SystemExit) as err: + raise PlotError(traceback.format_exc()) + finally: + os.chdir(pwd) + sys.argv = old_sys_argv + sys.path[:] = old_sys_path + sys.stdout = stdout + return ns + + +#------------------------------------------------------------------------------ +# Generating figures +#------------------------------------------------------------------------------ + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime) + + +def makefig(code, code_path, output_dir, output_base, config): + """ + Run a pyplot script *code* and save the images under *output_dir* + with file names derived from *output_base* + + """ + + # -- Parse format list + default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} + formats = [] + for fmt in config.plot_formats: + if isinstance(fmt, str): + formats.append((fmt, default_dpi.get(fmt, 80))) + elif type(fmt) in (tuple, list) and len(fmt)==2: + formats.append((str(fmt[0]), int(fmt[1]))) + else: + raise PlotError('invalid image format "%r" in plot_formats' % fmt) + + # -- Try to determine if all images already exist + + code_pieces = split_code_at_show(code) + + # Look for single-figure output files first + all_exists = True + img = ImageFile(output_base, output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + if all_exists: + return [(code, [img])] + + # Then look for multi-figure output files + results = [] + all_exists = True + for i, code_piece in enumerate(code_pieces): + images = [] + for j in range(1000): + img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + # assume that if we have one, we have them all + if not all_exists: + all_exists = (j > 0) + break + images.append(img) + if not all_exists: + break + results.append((code_piece, images)) + + if all_exists: + return results + + # -- We didn't find the files, so build them + + results = [] + ns = {} + + for i, code_piece in enumerate(code_pieces): + # Clear between runs + plt.close('all') + + # Run code + run_code(code_piece, code_path, ns) + + # Collect images + images = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for j, figman in enumerate(fig_managers): + if len(fig_managers) == 1 and len(code_pieces) == 1: + img = ImageFile(output_base, output_dir) + else: + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), + output_dir) + images.append(img) + for format, dpi in formats: + try: + figman.canvas.figure.savefig(img.filename(format), dpi=dpi) + except exceptions.BaseException as err: + raise PlotError(traceback.format_exc()) + img.formats.append(format) + + # Results + results.append((code_piece, images)) + + return results + + +#------------------------------------------------------------------------------ +# Relative pathnames +#------------------------------------------------------------------------------ + +try: + from os.path import relpath +except ImportError: + # Copied from Python 2.7 + if 'posix' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir + + if not path: + raise ValueError("no path specified") + + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + elif 'nt' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir, splitunc + + if not path: + raise ValueError("no path specified") + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = splitunc(path) + unc_start, rest = splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + else: + raise RuntimeError("Unsupported platform (no relpath available!)") diff --git a/doc/sphinxext/numpy_ext/tests/test_docscrape.py b/doc/sphinxext/numpy_ext/tests/test_docscrape.py new file mode 100644 index 0000000000000..634bef444fa66 --- /dev/null +++ b/doc/sphinxext/numpy_ext/tests/test_docscrape.py @@ -0,0 +1,913 @@ +# -*- encoding:utf-8 -*- +from __future__ import division, absolute_import, print_function + +import sys, textwrap + +from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc +from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc +from nose.tools import * + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') + + +doc_txt = '''\ + numpy.multivariate_normal(mean, cov, shape=None, spam=None) + + Draw values from a multivariate normal distribution with specified + mean and covariance. + + The multivariate normal or Gaussian distribution is a generalisation + of the one-dimensional normal distribution to higher dimensions. + + Parameters + ---------- + mean : (N,) ndarray + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + + cov : (N, N) ndarray + Covariance matrix of the distribution. + shape : tuple of ints + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + + Returns + ------- + out : ndarray + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + list of str + This is not a real return value. It exists to test + anonymous return values. + + Other Parameters + ---------------- + spam : parrot + A parrot off its mortal coil. + + Raises + ------ + RuntimeError + Some error + + Warns + ----- + RuntimeWarning + Some warning + + Warnings + -------- + Certain warnings apply. + + Notes + ----- + Instead of specifying the full covariance matrix, popular + approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + + This geometrical property can be seen in two dimensions by plotting + generated data-points: + + >>> mean = [0,0] + >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + + >>> x,y = multivariate_normal(mean,cov,5000).T + >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + + Note that the covariance matrix must be symmetric and non-negative + definite. + + References + ---------- + .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 + .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + + See Also + -------- + some, other, funcs + otherfunc : relationship + + Examples + -------- + >>> mean = (1,2) + >>> cov = [[1,0],[1,0]] + >>> x = multivariate_normal(mean,cov,(3,3)) + >>> print x.shape + (3, 3, 2) + + The following is probably true, given that 0.6 is roughly twice the + standard deviation: + + >>> print list( (x[0,0,:] - mean) < 0.6 ) + [True, True] + + .. index:: random + :refguide: random;distributions, random;gauss + + ''' +doc = NumpyDocString(doc_txt) + +doc_yields_txt = """ +Test generator + +Yields +------ +a : int + The number of apples. +b : int + The number of bananas. +int + The number of unknowns. +""" +doc_yields = NumpyDocString(doc_yields_txt) + + +def test_signature(): + assert doc['Signature'].startswith('numpy.multivariate_normal(') + assert doc['Signature'].endswith('spam=None)') + +def test_summary(): + assert doc['Summary'][0].startswith('Draw values') + assert doc['Summary'][-1].endswith('covariance.') + +def test_extended_summary(): + assert doc['Extended Summary'][0].startswith('The multivariate normal') + +def test_parameters(): + assert_equal(len(doc['Parameters']), 3) + assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) + + arg, arg_type, desc = doc['Parameters'][1] + assert_equal(arg_type, '(N, N) ndarray') + assert desc[0].startswith('Covariance matrix') + assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' + +def test_other_parameters(): + assert_equal(len(doc['Other Parameters']), 1) + assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) + arg, arg_type, desc = doc['Other Parameters'][0] + assert_equal(arg_type, 'parrot') + assert desc[0].startswith('A parrot off its mortal coil') + +def test_returns(): + assert_equal(len(doc['Returns']), 2) + arg, arg_type, desc = doc['Returns'][0] + assert_equal(arg, 'out') + assert_equal(arg_type, 'ndarray') + assert desc[0].startswith('The drawn samples') + assert desc[-1].endswith('distribution.') + + arg, arg_type, desc = doc['Returns'][1] + assert_equal(arg, 'list of str') + assert_equal(arg_type, '') + assert desc[0].startswith('This is not a real') + assert desc[-1].endswith('anonymous return values.') + +def test_yields(): + section = doc_yields['Yields'] + assert_equal(len(section), 3) + truth = [('a', 'int', 'apples.'), + ('b', 'int', 'bananas.'), + ('int', '', 'unknowns.')] + for (arg, arg_type, desc), (arg_, arg_type_, end) in zip(section, truth): + assert_equal(arg, arg_) + assert_equal(arg_type, arg_type_) + assert desc[0].startswith('The number of') + assert desc[0].endswith(end) + +def test_returnyield(): + doc_text = """ +Test having returns and yields. + +Returns +------- +int + The number of apples. + +Yields +------ +a : int + The number of apples. +b : int + The number of bananas. + +""" + assert_raises(ValueError, NumpyDocString, doc_text) + +def test_notes(): + assert doc['Notes'][0].startswith('Instead') + assert doc['Notes'][-1].endswith('definite.') + assert_equal(len(doc['Notes']), 17) + +def test_references(): + assert doc['References'][0].startswith('..') + assert doc['References'][-1].endswith('2001.') + +def test_examples(): + assert doc['Examples'][0].startswith('>>>') + assert doc['Examples'][-1].endswith('True]') + +def test_index(): + assert_equal(doc['index']['default'], 'random') + assert_equal(len(doc['index']), 2) + assert_equal(len(doc['index']['refguide']), 2) + +def non_blank_line_by_line_compare(a,b): + a = textwrap.dedent(a) + b = textwrap.dedent(b) + a = [l.rstrip() for l in a.split('\n') if l.strip()] + b = [l.rstrip() for l in b.split('\n') if l.strip()] + for n,line in enumerate(a): + if not line == b[n]: + raise AssertionError("Lines %s of a and b differ: " + "\n>>> %s\n<<< %s\n" % + (n,line,b[n])) +def test_str(): + # doc_txt has the order of Notes and See Also sections flipped. + # This should be handled automatically, and so, one thing this test does + # is to make sure that See Also precedes Notes in the output. + non_blank_line_by_line_compare(str(doc), +"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) + +Draw values from a multivariate normal distribution with specified +mean and covariance. + +The multivariate normal or Gaussian distribution is a generalisation +of the one-dimensional normal distribution to higher dimensions. + +Parameters +---------- +mean : (N,) ndarray + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + +cov : (N, N) ndarray + Covariance matrix of the distribution. +shape : tuple of ints + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + +Returns +------- +out : ndarray + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. +list of str + This is not a real return value. It exists to test + anonymous return values. + +Other Parameters +---------------- +spam : parrot + A parrot off its mortal coil. + +Raises +------ +RuntimeError + Some error + +Warns +----- +RuntimeWarning + Some warning + +Warnings +-------- +Certain warnings apply. + +See Also +-------- +`some`_, `other`_, `funcs`_ + +`otherfunc`_ + relationship + +Notes +----- +Instead of specifying the full covariance matrix, popular +approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + +This geometrical property can be seen in two dimensions by plotting +generated data-points: + +>>> mean = [0,0] +>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + +>>> x,y = multivariate_normal(mean,cov,5000).T +>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + +Note that the covariance matrix must be symmetric and non-negative +definite. + +References +---------- +.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 +.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + +Examples +-------- +>>> mean = (1,2) +>>> cov = [[1,0],[1,0]] +>>> x = multivariate_normal(mean,cov,(3,3)) +>>> print x.shape +(3, 3, 2) + +The following is probably true, given that 0.6 is roughly twice the +standard deviation: + +>>> print list( (x[0,0,:] - mean) < 0.6 ) +[True, True] + +.. index:: random + :refguide: random;distributions, random;gauss""") + + +def test_yield_str(): + non_blank_line_by_line_compare(str(doc_yields), +"""Test generator + +Yields +------ +a : int + The number of apples. +b : int + The number of bananas. +int + The number of unknowns. + +.. index:: """) + + +def test_sphinx_str(): + sphinx_doc = SphinxDocString(doc_txt) + non_blank_line_by_line_compare(str(sphinx_doc), +""" +.. index:: random + single: random;distributions, random;gauss + +Draw values from a multivariate normal distribution with specified +mean and covariance. + +The multivariate normal or Gaussian distribution is a generalisation +of the one-dimensional normal distribution to higher dimensions. + +:Parameters: + + **mean** : (N,) ndarray + + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + + **cov** : (N, N) ndarray + + Covariance matrix of the distribution. + + **shape** : tuple of ints + + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + +:Returns: + + **out** : ndarray + + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + + list of str + + This is not a real return value. It exists to test + anonymous return values. + +:Other Parameters: + + **spam** : parrot + + A parrot off its mortal coil. + +:Raises: + + **RuntimeError** + + Some error + +:Warns: + + **RuntimeWarning** + + Some warning + +.. warning:: + + Certain warnings apply. + +.. seealso:: + + :obj:`some`, :obj:`other`, :obj:`funcs` + + :obj:`otherfunc` + relationship + +.. rubric:: Notes + +Instead of specifying the full covariance matrix, popular +approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + +This geometrical property can be seen in two dimensions by plotting +generated data-points: + +>>> mean = [0,0] +>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + +>>> x,y = multivariate_normal(mean,cov,5000).T +>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + +Note that the covariance matrix must be symmetric and non-negative +definite. + +.. rubric:: References + +.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 +.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + +.. only:: latex + + [1]_, [2]_ + +.. rubric:: Examples + +>>> mean = (1,2) +>>> cov = [[1,0],[1,0]] +>>> x = multivariate_normal(mean,cov,(3,3)) +>>> print x.shape +(3, 3, 2) + +The following is probably true, given that 0.6 is roughly twice the +standard deviation: + +>>> print list( (x[0,0,:] - mean) < 0.6 ) +[True, True] +""") + + +def test_sphinx_yields_str(): + sphinx_doc = SphinxDocString(doc_yields_txt) + non_blank_line_by_line_compare(str(sphinx_doc), +"""Test generator + +:Yields: + + **a** : int + + The number of apples. + + **b** : int + + The number of bananas. + + int + + The number of unknowns. +""") + + +doc2 = NumpyDocString(""" + Returns array of indices of the maximum values of along the given axis. + + Parameters + ---------- + a : {array_like} + Array to look in. + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis""") + +def test_parameters_without_extended_description(): + assert_equal(len(doc2['Parameters']), 2) + +doc3 = NumpyDocString(""" + my_signature(*params, **kwds) + + Return this and that. + """) + +def test_escape_stars(): + signature = str(doc3).split('\n')[0] + assert_equal(signature, 'my_signature(\*params, \*\*kwds)') + + def my_func(a, b, **kwargs): + pass + + fdoc = FunctionDoc(func=my_func) + assert_equal(fdoc['Signature'], 'my_func(a, b, \*\*kwargs)') + +doc4 = NumpyDocString( + """a.conj() + + Return an array with all complex-valued elements conjugated.""") + +def test_empty_extended_summary(): + assert_equal(doc4['Extended Summary'], []) + +doc5 = NumpyDocString( + """ + a.something() + + Raises + ------ + LinAlgException + If array is singular. + + Warns + ----- + SomeWarning + If needed + """) + +def test_raises(): + assert_equal(len(doc5['Raises']), 1) + name,_,desc = doc5['Raises'][0] + assert_equal(name,'LinAlgException') + assert_equal(desc,['If array is singular.']) + +def test_warns(): + assert_equal(len(doc5['Warns']), 1) + name,_,desc = doc5['Warns'][0] + assert_equal(name,'SomeWarning') + assert_equal(desc,['If needed']) + +def test_see_also(): + doc6 = NumpyDocString( + """ + z(x,theta) + + See Also + -------- + func_a, func_b, func_c + func_d : some equivalent func + foo.func_e : some other func over + multiple lines + func_f, func_g, :meth:`func_h`, func_j, + func_k + :obj:`baz.obj_q` + :class:`class_j`: fubar + foobar + """) + + assert len(doc6['See Also']) == 12 + for func, desc, role in doc6['See Also']: + if func in ('func_a', 'func_b', 'func_c', 'func_f', + 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): + assert(not desc) + else: + assert(desc) + + if func == 'func_h': + assert role == 'meth' + elif func == 'baz.obj_q': + assert role == 'obj' + elif func == 'class_j': + assert role == 'class' + else: + assert role is None + + if func == 'func_d': + assert desc == ['some equivalent func'] + elif func == 'foo.func_e': + assert desc == ['some other func over', 'multiple lines'] + elif func == 'class_j': + assert desc == ['fubar', 'foobar'] + +def test_see_also_print(): + class Dummy(object): + """ + See Also + -------- + func_a, func_b + func_c : some relationship + goes here + func_d + """ + pass + + obj = Dummy() + s = str(FunctionDoc(obj, role='func')) + assert(':func:`func_a`, :func:`func_b`' in s) + assert(' some relationship' in s) + assert(':func:`func_d`' in s) + +doc7 = NumpyDocString(""" + + Doc starts on second line. + + """) + +def test_empty_first_line(): + assert doc7['Summary'][0].startswith('Doc starts') + + +def test_no_summary(): + str(SphinxDocString(""" + Parameters + ----------""")) + + +def test_unicode(): + doc = SphinxDocString(""" + öäöäöäöäöåååå + + öäöäöäööäååå + + Parameters + ---------- + ååå : äää + ööö + + Returns + ------- + ååå : ööö + äää + + """) + assert isinstance(doc['Summary'][0], str) + assert doc['Summary'][0] == 'öäöäöäöäöåååå' + +def test_plot_examples(): + cfg = dict(use_plots=True) + + doc = SphinxDocString(""" + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3],[4,5,6]) + >>> plt.show() + """, config=cfg) + assert 'plot::' in str(doc), str(doc) + + doc = SphinxDocString(""" + Examples + -------- + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3],[4,5,6]) + plt.show() + """, config=cfg) + assert str(doc).count('plot::') == 1, str(doc) + +def test_class_members(): + + class Dummy(object): + """ + Dummy class. + + """ + def spam(self, a, b): + """Spam\n\nSpam spam.""" + pass + def ham(self, c, d): + """Cheese\n\nNo cheese.""" + pass + @property + def spammity(self): + """Spammity index""" + return 0.95 + + class Ignorable(object): + """local class, to be ignored""" + pass + + for cls in (ClassDoc, SphinxClassDoc): + doc = cls(Dummy, config=dict(show_class_members=False)) + assert 'Methods' not in str(doc), (cls, str(doc)) + assert 'spam' not in str(doc), (cls, str(doc)) + assert 'ham' not in str(doc), (cls, str(doc)) + assert 'spammity' not in str(doc), (cls, str(doc)) + assert 'Spammity index' not in str(doc), (cls, str(doc)) + + doc = cls(Dummy, config=dict(show_class_members=True)) + assert 'Methods' in str(doc), (cls, str(doc)) + assert 'spam' in str(doc), (cls, str(doc)) + assert 'ham' in str(doc), (cls, str(doc)) + assert 'spammity' in str(doc), (cls, str(doc)) + + if cls is SphinxClassDoc: + assert '.. autosummary::' in str(doc), str(doc) + else: + assert 'Spammity index' in str(doc), str(doc) + + class SubDummy(Dummy): + """ + Subclass of Dummy class. + + """ + def ham(self, c, d): + """Cheese\n\nNo cheese.\nOverloaded Dummy.ham""" + pass + + def bar(self, a, b): + """Bar\n\nNo bar""" + pass + + for cls in (ClassDoc, SphinxClassDoc): + doc = cls(SubDummy, config=dict(show_class_members=True, + show_inherited_class_members=False)) + assert 'Methods' in str(doc), (cls, str(doc)) + assert 'spam' not in str(doc), (cls, str(doc)) + assert 'ham' in str(doc), (cls, str(doc)) + assert 'bar' in str(doc), (cls, str(doc)) + assert 'spammity' not in str(doc), (cls, str(doc)) + + if cls is SphinxClassDoc: + assert '.. autosummary::' in str(doc), str(doc) + else: + assert 'Spammity index' not in str(doc), str(doc) + + doc = cls(SubDummy, config=dict(show_class_members=True, + show_inherited_class_members=True)) + assert 'Methods' in str(doc), (cls, str(doc)) + assert 'spam' in str(doc), (cls, str(doc)) + assert 'ham' in str(doc), (cls, str(doc)) + assert 'bar' in str(doc), (cls, str(doc)) + assert 'spammity' in str(doc), (cls, str(doc)) + + if cls is SphinxClassDoc: + assert '.. autosummary::' in str(doc), str(doc) + else: + assert 'Spammity index' in str(doc), str(doc) + +def test_duplicate_signature(): + # Duplicate function signatures occur e.g. in ufuncs, when the + # automatic mechanism adds one, and a more detailed comes from the + # docstring itself. + + doc = NumpyDocString( + """ + z(x1, x2) + + z(a, theta) + """) + + assert doc['Signature'].strip() == 'z(a, theta)' + + +class_doc_txt = """ + Foo + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Aaa. + jac : callable ``jac(t, y, *jac_args)`` + Bbb. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + x : float + Some parameter + + Methods + ------- + a + b + c + + Examples + -------- + For usage examples, see `ode`. +""" + +def test_class_members_doc(): + doc = ClassDoc(None, class_doc_txt) + non_blank_line_by_line_compare(str(doc), + """ + Foo + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Aaa. + jac : callable ``jac(t, y, *jac_args)`` + Bbb. + + Examples + -------- + For usage examples, see `ode`. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + x : float + Some parameter + + Methods + ------- + a + + b + + c + + .. index:: + + """) + +def test_class_members_doc_sphinx(): + class Foo: + @property + def x(self): + """Test attribute""" + return None + + doc = SphinxClassDoc(Foo, class_doc_txt) + non_blank_line_by_line_compare(str(doc), + """ + Foo + + :Parameters: + + **f** : callable ``f(t, y, *f_args)`` + + Aaa. + + **jac** : callable ``jac(t, y, *jac_args)`` + + Bbb. + + .. rubric:: Examples + + For usage examples, see `ode`. + + .. rubric:: Attributes + + .. autosummary:: + :toctree: + + x + + === ========== + t (float) Current time. + y (ndarray) Current variable values. + === ========== + + .. rubric:: Methods + + === ========== + a + b + c + === ========== + + """) + +if __name__ == "__main__": + import nose + nose.run() diff --git a/doc/sphinxext/numpy_ext/tests/test_linkcode.py b/doc/sphinxext/numpy_ext/tests/test_linkcode.py new file mode 100644 index 0000000000000..340166a485fcd --- /dev/null +++ b/doc/sphinxext/numpy_ext/tests/test_linkcode.py @@ -0,0 +1,5 @@ +from __future__ import division, absolute_import, print_function + +import numpydoc.linkcode + +# No tests at the moment... diff --git a/doc/sphinxext/numpy_ext/tests/test_phantom_import.py b/doc/sphinxext/numpy_ext/tests/test_phantom_import.py new file mode 100644 index 0000000000000..80fae08f4edf8 --- /dev/null +++ b/doc/sphinxext/numpy_ext/tests/test_phantom_import.py @@ -0,0 +1,12 @@ +from __future__ import division, absolute_import, print_function + +import sys +from nose import SkipTest + +def test_import(): + if sys.version_info[0] >= 3: + raise SkipTest("phantom_import not ported to Py3") + + import numpydoc.phantom_import + +# No tests at the moment... diff --git a/doc/sphinxext/numpy_ext/tests/test_plot_directive.py b/doc/sphinxext/numpy_ext/tests/test_plot_directive.py new file mode 100644 index 0000000000000..1ea10769472cb --- /dev/null +++ b/doc/sphinxext/numpy_ext/tests/test_plot_directive.py @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +import sys +from nose import SkipTest + +def test_import(): + if sys.version_info[0] >= 3: + raise SkipTest("plot_directive not ported to Python 3 (use the one from Matplotlib instead)") + import numpydoc.plot_directive + +# No tests at the moment... diff --git a/doc/sphinxext/numpy_ext/tests/test_traitsdoc.py b/doc/sphinxext/numpy_ext/tests/test_traitsdoc.py new file mode 100644 index 0000000000000..fe5078c49605b --- /dev/null +++ b/doc/sphinxext/numpy_ext/tests/test_traitsdoc.py @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +import sys +from nose import SkipTest + +def test_import(): + if sys.version_info[0] >= 3: + raise SkipTest("traitsdoc not ported to Python3") + import numpydoc.traitsdoc + +# No tests at the moment... diff --git a/doc/sphinxext/numpy_ext/traitsdoc.py b/doc/sphinxext/numpy_ext/traitsdoc.py new file mode 100644 index 0000000000000..2468565a6ca1e --- /dev/null +++ b/doc/sphinxext/numpy_ext/traitsdoc.py @@ -0,0 +1,143 @@ +""" +========= +traitsdoc +========= + +Sphinx extension that handles docstrings in the Numpy standard format, [1] +and support Traits [2]. + +This extension can be used as a replacement for ``numpydoc`` when support +for Traits is required. + +.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [2] http://code.enthought.com/projects/traits/ + +""" +from __future__ import division, absolute_import, print_function + +import inspect +import os +import pydoc +import collections + +from . import docscrape +from . import docscrape_sphinx +from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString + +from . import numpydoc + +from . import comment_eater + +class SphinxTraitsDoc(SphinxClassDoc): + def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + docstring = pydoc.getdoc(cls) + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = docscrape.Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Description': [], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Yields': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Traits': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'References': '', + 'Example': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Description'] + self['Extended Summary'] + [''] + + def __str__(self, indent=0, func_role="func"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Traits', 'Methods', + 'Returns', 'Yields', 'Raises'): + out += self._str_param_list(param_list) + out += self._str_see_also("obj") + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Example') + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +def looks_like_issubclass(obj, classname): + """ Return True if the object has a class or superclass with the given class + name. + + Ignores old-style classes. + """ + t = obj + if t.__name__ == classname: + return True + for klass in t.__mro__: + if klass.__name__ == classname: + return True + return False + +def get_doc_object(obj, what=None, config=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif isinstance(obj, collections.Callable): + what = 'function' + else: + what = 'object' + if what == 'class': + doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) + if looks_like_issubclass(obj, 'HasTraits'): + for name, trait, comment in comment_eater.get_class_traits(obj): + # Exclude private traits. + if not name.startswith('_'): + doc['Traits'].append((name, trait, comment.splitlines())) + return doc + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '', config=config) + else: + return SphinxDocString(pydoc.getdoc(obj), config=config) + +def setup(app): + # init numpydoc + numpydoc.setup(app, get_doc_object) +