From 894990a6084289e1dcc0a4f902041a9fd8ed92bd Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Mon, 23 Apr 2018 15:51:07 +0200
Subject: [PATCH] Fix all doc warnings

---
 beat/core/algorithm.py            |  38 ++++-----
 beat/core/database.py             |  12 +--
 beat/core/dataformat.py           |  38 ++++-----
 beat/core/dock.py                 |  25 +++---
 beat/core/execution/base.py       |  77 +++++++++---------
 beat/core/execution/docker.py     | 125 +++++++++++++++---------------
 beat/core/execution/local.py      |  97 +++++++++++------------
 beat/core/execution/remote.py     |  57 +++++++-------
 beat/core/execution/subprocess.py |  77 +++++++++---------
 beat/core/experiment.py           |  78 ++++++++++---------
 beat/core/hash.py                 |   2 +-
 beat/core/library.py              |  26 +++----
 beat/core/plotter.py              |  48 ++++++------
 beat/core/stats.py                |   2 +-
 beat/core/toolchain.py            |  41 +++++-----
 doc/api.rst                       |  83 ++++++++++++++++++++
 doc/conf.py                       |  17 +++-
 doc/dataformats.rst               |   2 +-
 doc/nitpick-exceptions.txt        |   7 ++
 19 files changed, 483 insertions(+), 369 deletions(-)
 create mode 100644 doc/api.rst
 create mode 100644 doc/nitpick-exceptions.txt

diff --git a/beat/core/algorithm.py b/beat/core/algorithm.py
index ac4d0086..d15c3bb8 100755
--- a/beat/core/algorithm.py
+++ b/beat/core/algorithm.py
@@ -54,31 +54,31 @@ class Algorithm(BackendAlgorithm):
     and output declaration, grouping, synchronization details, parameters and
     splittability). The actual algorithm is not directly treated by this class -
     it can, however, provide you with a loader for actually running the
-    algorithmic code (see :py:meth:`Algorithm.runner`).
+    algorithmic code (see :py:meth:`.runner`).
 
 
     Parameters:
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the algorithm. It
-        must validate against the schema defined for algorithms. If a string is
-        passed, it is supposed to be a valid path to an algorithm in the
-        designated prefix area. If a tuple is passed (or a list), then we
-        consider that the first element represents the algorithm declaration,
-        while the second, the code for the algorithm (either in its source format
-        or as a binary blob). If ``None`` is passed, loads our default prototype
-        for algorithms (source code will be in Python).
-
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up algorithm loading times as dataformats that are already
-        loaded may be re-used.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        algorithm. It must validate against the schema defined for algorithms.
+        If a string is passed, it is supposed to be a valid path to an
+        algorithm in the designated prefix area. If a tuple is passed (or a
+        list), then we consider that the first element represents the algorithm
+        declaration, while the second, the code for the algorithm (either in
+        its source format or as a binary blob). If ``None`` is passed, loads
+        our default prototype for algorithms (source code will be in Python).
+
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up algorithm loading times as dataformats
+        that are already loaded may be re-used.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used.
 
 
     Attributes:
diff --git a/beat/core/database.py b/beat/core/database.py
index 396c4897..ce0a6db8 100755
--- a/beat/core/database.py
+++ b/beat/core/database.py
@@ -62,12 +62,12 @@ class Database(BackendDatabase):
         it is supposed to be a valid path to an database in the designated prefix
         area.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names
-        to loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee
-        that the cache is refreshed as appropriate in case the underlying
-        dataformats change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
 
 
     Attributes:
diff --git a/beat/core/dataformat.py b/beat/core/dataformat.py
index 28ffc59d..96102b10 100755
--- a/beat/core/dataformat.py
+++ b/beat/core/dataformat.py
@@ -51,25 +51,25 @@ class DataFormat(BackendDataFormat):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the data format. It
-        must validate against the schema defined for data formats. If a string is
-        passed, it is supposed to be a valid path to an data format in the
-        designated prefix area. If ``None`` is passed, loads our default
-        prototype for data formats.
-
-      parent (tuple, optional): The parent DataFormat for this format. If set to
-        ``None``, this means this dataformat is the first one on the hierarchy
-        tree. If set to a tuple, the contents are ``(format-instance,
-        field-name)``, which indicates the originating object that is this
-        object's parent and the name of the field on that object that points to
-        this one.
-
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up data format loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        data format. It must validate against the schema defined for data
+        formats. If a string is passed, it is supposed to be a valid path to an
+        data format in the designated prefix area. If ``None`` is passed, loads
+        our default prototype for data formats.
+
+      parent (:py:class:`tuple`, Optional): The parent DataFormat for this
+        format. If set to ``None``, this means this dataformat is the first one
+        on the hierarchy tree. If set to a tuple, the contents are
+        ``(format-instance, field-name)``, which indicates the originating
+        object that is this object's parent and the name of the field on that
+        object that points to this one.
+
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up data format loading times as
+        dataformats that are already loaded may be re-used. If you use this
+        parameter, you must guarantee that the cache is refreshed as
+        appropriate in case the underlying dataformats change.
 
     Attributes:
 
diff --git a/beat/core/dock.py b/beat/core/dock.py
index 62556f87..71fc480f 100755
--- a/beat/core/dock.py
+++ b/beat/core/dock.py
@@ -135,9 +135,9 @@ class Host(object):
 
         Parameters:
 
-          raise_on_errors (bool, Optional): If we should raise an exception
-            (``RuntimeError``) in case installed environments override each other
-            and we can't know which to use.
+          raise_on_errors (:py:class:`bool`, Optional): If we should raise an
+            exception (:py:exc:`RuntimeError`) in case installed environments
+            override each other and we can't know which to use.
 
 
         Raises:
@@ -317,14 +317,14 @@ class Host(object):
 
           container (:py:class:`Container`): The container.
 
-          virtual_memory_in_megabytes (int, Optional): The maximum amount of memory
-            the user process can consume on the host. If not specified, a memory
-            limit is not set.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The maximum
+            amount of memory the user process can consume on the host. If not
+            specified, a memory limit is not set.
 
-          max_cpu_percent (float, Optional): The maximum amount of CPU the user
-            process may consume on the host. The value ``100`` equals to using 100%
-            of a single core. If not specified, then a CPU limitation is not put in
-            place.
+          max_cpu_percent (:py:class:`float`, Optional): The maximum amount of
+            CPU the user process may consume on the host. The value ``100``
+            equals to using 100% of a single core. If not specified, then a CPU
+            limitation is not put in place.
 
         """
 
@@ -420,8 +420,9 @@ class Host(object):
 
         Parameters:
 
-          timeout (float, Optional): A timeout in seconds to wait for the user
-            process to finish. If a timeout value is not given, waits forever.
+          timeout (:py:class:`float`, Optional): A timeout in seconds to wait
+            for the user process to finish. If a timeout value is not given,
+            waits forever.
         '''
         (status, stdout, stderr) = self._exec(['docker', 'wait', container.id],
                                               timeout=timeout)
diff --git a/beat/core/execution/base.py b/beat/core/execution/base.py
index 6b4e70e8..200ac5c3 100755
--- a/beat/core/execution/base.py
+++ b/beat/core/execution/base.py
@@ -59,36 +59,37 @@ class BaseExecutor(object):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -105,7 +106,7 @@ class BaseExecutor(object):
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -277,20 +278,20 @@ class BaseExecutor(object):
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
 
           timeout_in_minutes (int): The number of minutes to wait for the user
             process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
-            be applied.
+            killed with ``signal.SIGKILL``. If set to zero, no timeout will be
+            applied.
 
         Returns:
 
diff --git a/beat/core/execution/docker.py b/beat/core/execution/docker.py
index 15af38f3..e1d8dcc6 100755
--- a/beat/core/execution/docker.py
+++ b/beat/core/execution/docker.py
@@ -50,9 +50,9 @@ class DockerExecutor(RemoteExecutor):
 
     Parameters:
 
-      host (:py:class:Host): A configured docker host that will execute the
-        user process. If the host does not have access to the required
-        environment, an exception will be raised.
+      host (:py:class:`.dock.Host`): A configured docker host that will
+        execute the user process. If the host does not have access to the
+        required environment, an exception will be raised.
 
       prefix (str): Establishes the prefix of your installation.
 
@@ -61,36 +61,37 @@ class DockerExecutor(RemoteExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -103,11 +104,11 @@ class DockerExecutor(RemoteExecutor):
       data (dict): The original data for this executor, as loaded by our JSON
         decoder.
 
-      algorithm (beat.core.algorithm.Algorithm): An object representing the
-        algorithm to be run.
+      algorithm (.algorithm.Algorithm): An object representing the algorithm to
+        be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -116,11 +117,11 @@ class DockerExecutor(RemoteExecutor):
         for that particular combination of details. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
-      input_list (beat.core.inputs.InputList): A list of inputs that will be
-        served to the algorithm.
+      input_list (beat.backend.python.inputs.InputList): A list of inputs that
+        will be served to the algorithm.
 
-      output_list (beat.core.outputs.OutputList): A list of outputs that the
-        algorithm will produce.
+      output_list (beat.backend.python.outputs.OutputList): A list of outputs
+        that the algorithm will produce.
 
       data_sources (list): A list with all data-sources created by our execution
         loader.
@@ -152,40 +153,42 @@ class DockerExecutor(RemoteExecutor):
         The execution interface follows the backend API as described in our
         documentation.
 
-        We use green subprocesses this implementation. Each co-process is linked
-        to us via 2 uni-directional pipes which work as datain and dataout
-        end-points. The parent process (i.e. the current one) establishes the
-        connection to the child and then can pass/receive commands, data and logs.
+        We use green subprocesses this implementation. Each co-process is
+        linked to us via 2 uni-directional pipes which work as datain and
+        dataout end-points. The parent process (i.e. the current one)
+        establishes the connection to the child and then can pass/receive
+        commands, data and logs.
 
-        Usage of the data pipes (datain, dataout) is **synchronous** - you send a
-        command and block for an answer. The co-process is normally controlled by
-        the current process, except for data requests, which are user-code driven.
-        The nature of our problem does not require an *asynchronous* implementation
-        which, in turn, would require a much more complex set of dependencies (on
-        asyncio or Twisted for example).
+        Usage of the data pipes (datain, dataout) is **synchronous** - you send
+        a command and block for an answer. The co-process is normally
+        controlled by the current process, except for data requests, which are
+        user-code driven.  The nature of our problem does not require an
+        *asynchronous* implementation which, in turn, would require a much more
+        complex set of dependencies (on asyncio or Twisted for example).
 
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
+
+          timeout_in_minutes (:py:class:`int`, Optional): The number of minutes
+            to wait for the user process to execute. After this amount of time,
+            the user process is killed with ``signal.SIGKILL``. If set to zero,
+            no timeout will be applied.
 
-          timeout_in_minutes (int): The number of minutes to wait for the user
-            process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
-            be applied.
 
         Returns:
 
-          dict: A dictionary which is JSON formattable containing the summary of
-            this block execution.
+          dict: A dictionary which is JSON formattable containing the summary
+          of this block execution.
 
         """
 
diff --git a/beat/core/execution/local.py b/beat/core/execution/local.py
index 6bc35648..3fe1435b 100755
--- a/beat/core/execution/local.py
+++ b/beat/core/execution/local.py
@@ -70,41 +70,42 @@ class LocalExecutor(BaseExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
-
-      custom_root_folders (dict, optional): A dictionary where the keys are database
-        identifiers (`<db_name>/<version>`) and the values are paths to the
-        given database's files. These values will override the value found
-        in the database's metadata.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
+
+      custom_root_folders (:py:class:`dict`, Optional): A dictionary where the
+        keys are database identifiers (``<db_name>/<version>``) and the values
+        are paths to the given database's files. These values will override the
+        value found in the database's metadata.
 
 
     Attributes:
@@ -117,11 +118,11 @@ class LocalExecutor(BaseExecutor):
       data (dict): The original data for this executor, as loaded by our JSON
         decoder.
 
-      algorithm (beat.core.algorithm.Algorithm): An object representing the
+      algorithm (.algorithm.Algorithm): An object representing the
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -130,11 +131,11 @@ class LocalExecutor(BaseExecutor):
         for that particular combination of details. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
-      input_list (beat.core.inputs.InputList): A list of inputs that will be
-        served to the algorithm.
+      input_list (beat.backend.python.inputs.InputList): A list of inputs that
+        will be served to the algorithm.
 
-      output_list (beat.core.outputs.OutputList): A list of outputs that the
-        algorithm will produce.
+      output_list (beat.backend.python.outputs.OutputList): A list of outputs
+        that the algorithm will produce.
 
       data_sources (list): A list with all data-sources created by our execution
         loader.
@@ -203,20 +204,20 @@ class LocalExecutor(BaseExecutor):
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
 
           timeout_in_minutes (int): The number of minutes to wait for the user
             process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
-            be applied.
+            killed with ``signal.SIGKILL``. If set to zero, no timeout will be
+            applied.
 
         Returns:
 
diff --git a/beat/core/execution/remote.py b/beat/core/execution/remote.py
index 9276a85a..39839c6f 100755
--- a/beat/core/execution/remote.py
+++ b/beat/core/execution/remote.py
@@ -53,36 +53,37 @@ class RemoteExecutor(BaseExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -99,7 +100,7 @@ class RemoteExecutor(BaseExecutor):
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
diff --git a/beat/core/execution/subprocess.py b/beat/core/execution/subprocess.py
index 96b4bdbb..10692c9e 100755
--- a/beat/core/execution/subprocess.py
+++ b/beat/core/execution/subprocess.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+
 # vim: set fileencoding=utf-8 :
 
 ###############################################################################
@@ -77,36 +77,37 @@ class SubprocessExecutor(RemoteExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -123,7 +124,7 @@ class SubprocessExecutor(RemoteExecutor):
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -180,19 +181,19 @@ class SubprocessExecutor(RemoteExecutor):
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
 
           timeout_in_minutes (int): The number of minutes to wait for the user
             process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
+            killed with ``signal.SIGKILL``. If set to zero, no timeout will
             be applied.
 
         Returns:
diff --git a/beat/core/experiment.py b/beat/core/experiment.py
index 8b45a0cc..9896697f 100755
--- a/beat/core/experiment.py
+++ b/beat/core/experiment.py
@@ -86,42 +86,43 @@ class Experiment(object):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the experiment. It
-        must validate against the schema defined for toolchains. If a string is
-        passed, it is supposed to be a valid path to an experiment in the
-        designated prefix area. If ``None`` is passed, loads our default
-        prototype for toolchains. If a tuple is passed (or a list), then we
-        consider that the first element represents the experiment, while the
-        second, the toolchain definition. The toolchain bit can be defined as a
-        dictionary or as a string (pointing to a valid path in the designated
-        prefix area).
-
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up experiment loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up experiment loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up experiment loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        experiment. It must validate against the schema defined for toolchains.
+        If a string is passed, it is supposed to be a valid path to an
+        experiment in the designated prefix area. If ``None`` is passed, loads
+        our default prototype for toolchains. If a tuple is passed (or a list),
+        then we consider that the first element represents the experiment,
+        while the second, the toolchain definition. The toolchain bit can be
+        defined as a dictionary or as a string (pointing to a valid path in the
+        designated prefix area).
+
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up experiment loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up experiment loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up experiment loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -1051,8 +1052,9 @@ class Experiment(object):
 
         Parameters:
 
-          storage (Storage, optional): If you pass a new storage, then this object
-            will be written to that storage point rather than its default.
+          storage (:py:class:`.Storage`, Optional): If you pass a new storage,
+            then this object will be written to that storage point rather than
+            its default.
 
         """
 
diff --git a/beat/core/hash.py b/beat/core/hash.py
index 65b49632..07d4f674 100755
--- a/beat/core/hash.py
+++ b/beat/core/hash.py
@@ -83,7 +83,7 @@ def hashAnalyzer(analyzer_name, algorithm_name, algorithm_hash,
 
 
 def hashJSONStr(contents, description):
-    """Hashes the JSON string contents using :py:func:`hashlib.sha256`
+    """Hashes the JSON string contents using ``hashlib.sha256``
 
     Excludes description changes
     """
diff --git a/beat/core/library.py b/beat/core/library.py
index 24591be2..1e9f2e59 100755
--- a/beat/core/library.py
+++ b/beat/core/library.py
@@ -56,19 +56,19 @@ class Library(BackendLibrary):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the library. It
-        must validate against the schema defined for libraries. If a string is
-        passed, it is supposed to be a valid path to an library in the designated
-        prefix area. If a tuple is passed (or a list), then we consider that the
-        first element represents the library declaration, while the second, the
-        code for the library (either in its source format or as a binary blob).
-        If ``None`` is passed, loads our default prototype for libraries (source
-        code will be in Python).
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        library. It must validate against the schema defined for libraries. If
+        a string is passed, it is supposed to be a valid path to an library in
+        the designated prefix area. If a tuple is passed (or a list), then we
+        consider that the first element represents the library declaration,
+        while the second, the code for the library (either in its source format
+        or as a binary blob).  If ``None`` is passed, loads our default
+        prototype for libraries (source code will be in Python).
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used.
 
 
     Attributes:
diff --git a/beat/core/plotter.py b/beat/core/plotter.py
index d3efa5bc..134bd364 100755
--- a/beat/core/plotter.py
+++ b/beat/core/plotter.py
@@ -112,24 +112,24 @@ class Plotter(object):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the plotter. It
-        must validate against the schema defined for plotters. If a string is
-        passed, it is supposed to be a valid path to a plotter in the
+      data (:py:class:`object`, Optional): The piece of data representing the
+        plotter. It must validate against the schema defined for plotters. If a
+        string is passed, it is supposed to be a valid path to a plotter in the
         designated prefix area. If a tuple is passed (or a list), then we
         consider that the first element represents the plotter declaration,
         while the second, the code for the plotter (either in its source format
-        or as a binary blob). If ``None`` is passed, loads our default prototype
-        for plotters (source code will be in Python).
+        or as a binary blob). If ``None`` is passed, loads our default
+        prototype for plotters (source code will be in Python).
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up algorithm loading times as dataformats that are already
-        loaded may be re-used.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up algorithm loading times as dataformats
+        that are already loaded may be re-used.
 
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used.
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used.
 
 
     Attributes:
@@ -144,9 +144,8 @@ class Plotter(object):
       storage (object): A simple object that provides information about file
         paths for this algorithm
 
-      dataformat (obj): An object of type
-        :py:class:`beat.core.dataformat.DataFormat` that represents the
-        dataformat to which this plotter is applicable.
+      dataformat (obj): An object of type :py:class:`.dataformat.DataFormat`
+        that represents the dataformat to which this plotter is applicable.
 
       libraries (dict): A mapping object defining other libraries this plotter
         needs to load so it can work properly.
@@ -366,14 +365,16 @@ class Plotter(object):
 
           klass (str): The name of the class to load the runnable algorithm from
 
-          exc (class): If passed, must be a valid exception class that will be
-            used to report errors in the read-out of this plotter's code.
+          exc (:std:term:`class`): If passed, must be a valid exception class
+            that will be used to report errors in the read-out of this
+            plotter's code.
 
         Returns:
 
-          :py:class:`beat.core.algorithm.Runner`: An instance of the algorithm,
-            which will be constructed, but not setup.  You **must** set it up
-            before using the ``process`` method.
+          :py:class:`beat.backend.python.algorithm.Runner`: An instance of the
+            algorithm, which will be constructed, but not setup.  You **must**
+            set it up before using the ``process`` method.
+
         """
 
         if not self._name:
@@ -450,8 +451,9 @@ class Plotter(object):
 
         Parameters:
 
-          storage (Storage, optional): If you pass a new storage, then this object
-            will be written to that storage point rather than its default.
+          storage (:py:class:`.Storage`, Optional): If you pass a new storage,
+            then this object will be written to that storage point rather than
+            its default.
 
         """
 
diff --git a/beat/core/stats.py b/beat/core/stats.py
index 1f4af9a1..0ae24a34 100755
--- a/beat/core/stats.py
+++ b/beat/core/stats.py
@@ -47,7 +47,7 @@ class Statistics(object):
 
     Parameters:
 
-      data (object, optional): The piece of data representing the
+      data (:py:class:`object`, Optional): The piece of data representing the
         statistics the be read, it must validate against our pre-defined
         execution schema. If the input is ``None`` or empty, then start a new
         statistics from scratch.
diff --git a/beat/core/toolchain.py b/beat/core/toolchain.py
index 87553192..c322931d 100755
--- a/beat/core/toolchain.py
+++ b/beat/core/toolchain.py
@@ -71,11 +71,11 @@ class Toolchain(object):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the toolchain.
-        It must validate against the schema defined for toolchains. If a string
-        is passed, it is supposed to be a valid path to an toolchain in the
-        designated prefix area. If ``None`` is passed, loads our default
-        prototype for toolchains.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        toolchain.  It must validate against the schema defined for toolchains.
+        If a string is passed, it is supposed to be a valid path to an
+        toolchain in the designated prefix area. If ``None`` is passed, loads
+        our default prototype for toolchains.
 
 
     Attributes:
@@ -362,20 +362,22 @@ class Toolchain(object):
           title (str): A title for the generated drawing. If ``None`` is given,
             then prints out the toolchain name.
 
-          label_callback (function): A python function that is called back each
-            time a label needs to be inserted into a block. The prototype of this
-            function is ``label_callback(type, name)``. ``type`` may be one of
-            ``dataset``, ``block`` or ``analyzer``. This callback is used by the
-            experiment class to complement diagram information before plotting.
+          label_callback (:std:term:`function`): A python function that is
+            called back each time a label needs to be inserted into a block.
+            The prototype of this function is ``label_callback(type, name)``.
+            ``type`` may be one of ``dataset``, ``block`` or ``analyzer``. This
+            callback is used by the experiment class to complement diagram
+            information before plotting.
 
-          edge_callback (function): A python function that is called back each
-            time an edge needs to be inserted into the graph. The prototype of this
-            function is ``edge_callback(start)``. ``start`` is the name of the
-            starting point for the connection, it should determine the dataformat
-            for the connection.
+          edge_callback (:std:term:`function`): A python function that is
+            called back each time an edge needs to be inserted into the graph.
+            The prototype of this function is ``edge_callback(start)``.
+            ``start`` is the name of the starting point for the connection, it
+            should determine the dataformat for the connection.
 
-          result_callback (function): A function to draw ports on analyzer blocks.
-            The prototype of this function is ``result_callback(name, color)``.
+          result_callback (:std:term:`function`): A function to draw ports on
+            analyzer blocks.  The prototype of this function is
+            ``result_callback(name, color)``.
 
 
         Returns
@@ -546,8 +548,9 @@ class Toolchain(object):
 
         Parameters:
 
-          storage (Storage, optional): If you pass a new storage, then this object
-            will be written to that storage point rather than its default.
+          storage (:py:class:`.Storage`, Optional): If you pass a new storage,
+            then this object will be written to that storage point rather than
+            its default.
 
         """
 
diff --git a/doc/api.rst b/doc/api.rst
new file mode 100644
index 00000000..c6fa7013
--- /dev/null
+++ b/doc/api.rst
@@ -0,0 +1,83 @@
+.. vim: set fileencoding=utf-8 :
+
+.. Copyright (c) 2016 Idiap Research Institute, http:..www.idiap.ch.          ..
+.. Contact: beat.support@idiap.ch                                             ..
+..                                                                            ..
+.. This file is part of the beat.core module of the BEAT platform.            ..
+..                                                                            ..
+.. Commercial License Usage                                                   ..
+.. Licensees holding valid commercial BEAT licenses may use this file in      ..
+.. accordance with the terms contained in a written agreement between you     ..
+.. and Idiap. For further information contact tto@idiap.ch                    ..
+..                                                                            ..
+.. Alternatively, this file may be used under the terms of the GNU Affero     ..
+.. Public License version 3 as published by the Free Software and appearing   ..
+.. in the file LICENSE.AGPL included in the packaging of this file.           ..
+.. The BEAT platform is distributed in the hope that it will be useful, but   ..
+.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ..
+.. or FITNESS FOR A PARTICULAR PURPOSE.                                       ..
+..                                                                            ..
+.. You should have received a copy of the GNU Affero Public License along     ..
+.. with the BEAT platform. If not, see http:..www.gnu.org.licenses..          ..
+
+
+=====
+ API
+=====
+
+This section includes information for using the Python API of ``beat.core``.
+
+.. notice order is important!
+
+.. automodule:: beat.core.algorithm
+
+.. automodule:: beat.core.baseformat
+
+.. automodule:: beat.core.data
+
+.. automodule:: beat.core.data_loaders
+
+.. automodule:: beat.core.database
+
+.. automodule:: beat.core.dataformat
+
+.. automodule:: beat.core.dock
+
+.. automodule:: beat.core.drawing
+
+.. automodule:: beat.core.environments
+
+.. automodule:: beat.core.execution.base
+
+.. automodule:: beat.core.execution.docker
+
+.. automodule:: beat.core.execution.local
+
+.. automodule:: beat.core.execution.remote
+
+.. automodule:: beat.core.execution.subprocess
+
+.. automodule:: beat.core.experiment
+
+.. automodule:: beat.core.hash
+
+.. automodule:: beat.core.inputs
+
+.. automodule:: beat.core.library
+
+.. automodule:: beat.core.loader
+
+.. automodule:: beat.core.message_handler
+
+.. automodule:: beat.core.outputs
+
+.. automodule:: beat.core.plotter
+
+.. automodule:: beat.core.stats
+
+.. automodule:: beat.core.toolchain
+
+.. automodule:: beat.core.utils
+
+.. automodule:: beat.core.worker
+
diff --git a/doc/conf.py b/doc/conf.py
index f9538296..c8c2cd20 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -254,20 +254,29 @@ autodoc_default_flags = [
   ]
 
 # For inter-documentation mapping:
+doc_server = [
+    'http://www.idiap.ch/software/beat/docs/beat/%(name)s/%(version)s/',
+    'http://www.idiap.ch/software/beat/docs/beat/%(name)s/master/',
+    'http://www.idiap.ch/software/beat/docs/beat/%(name)s/1.5.x/',
+    'http://www.idiap.ch/software/bob/docs/beat/%(name)s/%(version)s/',
+    'http://www.idiap.ch/software/bob/docs/beat/%(name)s/master/',
+    'http://www.idiap.ch/software/bob/docs/beat/%(name)s/1.5.x/',
+    ]
+doc_server = '|'.join(doc_server)
 from bob.extension.utils import link_documentation, load_requirements
 sphinx_requirements = "extra-intersphinx.txt"
 if os.path.exists(sphinx_requirements):
   intersphinx_mapping = link_documentation(
       additional_packages=['python','numpy'] + \
-          load_requirements(sphinx_requirements)
-          )
+          load_requirements(sphinx_requirements),
+      server=doc_server,
+      )
 else:
-  intersphinx_mapping = link_documentation()
+  intersphinx_mapping = link_documentation(server=doc_server)
 
 # Adds simplejson, pyzmq links
 intersphinx_mapping['http://simplejson.readthedocs.io/en/stable/'] = None
 intersphinx_mapping['http://pyzmq.readthedocs.io/en/stable/'] = None
-intersphinx_mapping['http://six.readthedocs.io'] = None
 intersphinx_mapping['http://python-jsonschema.readthedocs.io/en/stable/'] = None
 intersphinx_mapping['https://docker-py.readthedocs.io/en/stable/'] = None
 
diff --git a/doc/dataformats.rst b/doc/dataformats.rst
index d89ebc00..8a959589 100644
--- a/doc/dataformats.rst
+++ b/doc/dataformats.rst
@@ -127,7 +127,7 @@ The following primitive data types are available in the BEAT platform:
 
 .. note::
 
-   All primitive types are implemented using their :py:mod:`NumPy`
+   All primitive types are implemented using their :py:mod:`numpy`
    counterparts.
 
 When determining if a block of data corresponds to a data format, the platform
diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt
new file mode 100644
index 00000000..bbf66497
--- /dev/null
+++ b/doc/nitpick-exceptions.txt
@@ -0,0 +1,7 @@
+# Not available in Python 2.7, but ok in Python 3.x
+py:exc TypeError
+py:exc RuntimeError
+py:exc ValueError
+py:exc KeyError
+py:class tuple
+py:class list
-- 
GitLab