diff --git a/beat/core/algorithm.py b/beat/core/algorithm.py
index ac4d00868992a1d413d7564a970d84ff574ff6cf..d15c3bb880d5f9645e0d75e7c1941966f52ee343 100755
--- a/beat/core/algorithm.py
+++ b/beat/core/algorithm.py
@@ -54,31 +54,31 @@ class Algorithm(BackendAlgorithm):
     and output declaration, grouping, synchronization details, parameters and
     splittability). The actual algorithm is not directly treated by this class -
     it can, however, provide you with a loader for actually running the
-    algorithmic code (see :py:meth:`Algorithm.runner`).
+    algorithmic code (see :py:meth:`.runner`).
 
 
     Parameters:
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the algorithm. It
-        must validate against the schema defined for algorithms. If a string is
-        passed, it is supposed to be a valid path to an algorithm in the
-        designated prefix area. If a tuple is passed (or a list), then we
-        consider that the first element represents the algorithm declaration,
-        while the second, the code for the algorithm (either in its source format
-        or as a binary blob). If ``None`` is passed, loads our default prototype
-        for algorithms (source code will be in Python).
-
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up algorithm loading times as dataformats that are already
-        loaded may be re-used.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        algorithm. It must validate against the schema defined for algorithms.
+        If a string is passed, it is supposed to be a valid path to an
+        algorithm in the designated prefix area. If a tuple is passed (or a
+        list), then we consider that the first element represents the algorithm
+        declaration, while the second, the code for the algorithm (either in
+        its source format or as a binary blob). If ``None`` is passed, loads
+        our default prototype for algorithms (source code will be in Python).
+
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up algorithm loading times as dataformats
+        that are already loaded may be re-used.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used.
 
 
     Attributes:
diff --git a/beat/core/database.py b/beat/core/database.py
index 396c489794175a3e0b49d6ebe47fc949a2f72221..ce0a6db80c22119f73d179f91ea559a4844dd99f 100755
--- a/beat/core/database.py
+++ b/beat/core/database.py
@@ -62,12 +62,12 @@ class Database(BackendDatabase):
         it is supposed to be a valid path to an database in the designated prefix
         area.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names
-        to loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee
-        that the cache is refreshed as appropriate in case the underlying
-        dataformats change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
 
 
     Attributes:
diff --git a/beat/core/dataformat.py b/beat/core/dataformat.py
index 28ffc59d771b6b2d937b08a81d11ece52c9dfbe8..96102b103ae5f744d7ad2984e3055635978a990e 100755
--- a/beat/core/dataformat.py
+++ b/beat/core/dataformat.py
@@ -51,25 +51,25 @@ class DataFormat(BackendDataFormat):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the data format. It
-        must validate against the schema defined for data formats. If a string is
-        passed, it is supposed to be a valid path to an data format in the
-        designated prefix area. If ``None`` is passed, loads our default
-        prototype for data formats.
-
-      parent (tuple, optional): The parent DataFormat for this format. If set to
-        ``None``, this means this dataformat is the first one on the hierarchy
-        tree. If set to a tuple, the contents are ``(format-instance,
-        field-name)``, which indicates the originating object that is this
-        object's parent and the name of the field on that object that points to
-        this one.
-
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up data format loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        data format. It must validate against the schema defined for data
+        formats. If a string is passed, it is supposed to be a valid path to an
+        data format in the designated prefix area. If ``None`` is passed, loads
+        our default prototype for data formats.
+
+      parent (:py:class:`tuple`, Optional): The parent DataFormat for this
+        format. If set to ``None``, this means this dataformat is the first one
+        on the hierarchy tree. If set to a tuple, the contents are
+        ``(format-instance, field-name)``, which indicates the originating
+        object that is this object's parent and the name of the field on that
+        object that points to this one.
+
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up data format loading times as
+        dataformats that are already loaded may be re-used. If you use this
+        parameter, you must guarantee that the cache is refreshed as
+        appropriate in case the underlying dataformats change.
 
     Attributes:
 
diff --git a/beat/core/dock.py b/beat/core/dock.py
index 62556f873db37cac9c7f03466de30c2f04e0b19c..71fc480fd3bb86afd4c35f64424b27809f252c73 100755
--- a/beat/core/dock.py
+++ b/beat/core/dock.py
@@ -135,9 +135,9 @@ class Host(object):
 
         Parameters:
 
-          raise_on_errors (bool, Optional): If we should raise an exception
-            (``RuntimeError``) in case installed environments override each other
-            and we can't know which to use.
+          raise_on_errors (:py:class:`bool`, Optional): If we should raise an
+            exception (:py:exc:`RuntimeError`) in case installed environments
+            override each other and we can't know which to use.
 
 
         Raises:
@@ -317,14 +317,14 @@ class Host(object):
 
           container (:py:class:`Container`): The container.
 
-          virtual_memory_in_megabytes (int, Optional): The maximum amount of memory
-            the user process can consume on the host. If not specified, a memory
-            limit is not set.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The maximum
+            amount of memory the user process can consume on the host. If not
+            specified, a memory limit is not set.
 
-          max_cpu_percent (float, Optional): The maximum amount of CPU the user
-            process may consume on the host. The value ``100`` equals to using 100%
-            of a single core. If not specified, then a CPU limitation is not put in
-            place.
+          max_cpu_percent (:py:class:`float`, Optional): The maximum amount of
+            CPU the user process may consume on the host. The value ``100``
+            equals to using 100% of a single core. If not specified, then a CPU
+            limitation is not put in place.
 
         """
 
@@ -420,8 +420,9 @@ class Host(object):
 
         Parameters:
 
-          timeout (float, Optional): A timeout in seconds to wait for the user
-            process to finish. If a timeout value is not given, waits forever.
+          timeout (:py:class:`float`, Optional): A timeout in seconds to wait
+            for the user process to finish. If a timeout value is not given,
+            waits forever.
         '''
         (status, stdout, stderr) = self._exec(['docker', 'wait', container.id],
                                               timeout=timeout)
diff --git a/beat/core/execution/base.py b/beat/core/execution/base.py
index 6b4e70e8b40a598a5f82c8065cdbd27e09e9383c..200ac5c3ad9e61530abd43c2e119eb58af9b313b 100755
--- a/beat/core/execution/base.py
+++ b/beat/core/execution/base.py
@@ -59,36 +59,37 @@ class BaseExecutor(object):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -105,7 +106,7 @@ class BaseExecutor(object):
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -277,20 +278,20 @@ class BaseExecutor(object):
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
 
           timeout_in_minutes (int): The number of minutes to wait for the user
             process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
-            be applied.
+            killed with ``signal.SIGKILL``. If set to zero, no timeout will be
+            applied.
 
         Returns:
 
diff --git a/beat/core/execution/docker.py b/beat/core/execution/docker.py
index 15af38f30c880a97943edf1b1fbaceb6ac0624bd..e1d8dcc6e99153dafde91063cbc4cf2bc842f0b2 100755
--- a/beat/core/execution/docker.py
+++ b/beat/core/execution/docker.py
@@ -50,9 +50,9 @@ class DockerExecutor(RemoteExecutor):
 
     Parameters:
 
-      host (:py:class:Host): A configured docker host that will execute the
-        user process. If the host does not have access to the required
-        environment, an exception will be raised.
+      host (:py:class:`.dock.Host`): A configured docker host that will
+        execute the user process. If the host does not have access to the
+        required environment, an exception will be raised.
 
       prefix (str): Establishes the prefix of your installation.
 
@@ -61,36 +61,37 @@ class DockerExecutor(RemoteExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -103,11 +104,11 @@ class DockerExecutor(RemoteExecutor):
       data (dict): The original data for this executor, as loaded by our JSON
         decoder.
 
-      algorithm (beat.core.algorithm.Algorithm): An object representing the
-        algorithm to be run.
+      algorithm (.algorithm.Algorithm): An object representing the algorithm to
+        be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -116,11 +117,11 @@ class DockerExecutor(RemoteExecutor):
         for that particular combination of details. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
-      input_list (beat.core.inputs.InputList): A list of inputs that will be
-        served to the algorithm.
+      input_list (beat.backend.python.inputs.InputList): A list of inputs that
+        will be served to the algorithm.
 
-      output_list (beat.core.outputs.OutputList): A list of outputs that the
-        algorithm will produce.
+      output_list (beat.backend.python.outputs.OutputList): A list of outputs
+        that the algorithm will produce.
 
       data_sources (list): A list with all data-sources created by our execution
         loader.
@@ -152,40 +153,42 @@ class DockerExecutor(RemoteExecutor):
         The execution interface follows the backend API as described in our
         documentation.
 
-        We use green subprocesses this implementation. Each co-process is linked
-        to us via 2 uni-directional pipes which work as datain and dataout
-        end-points. The parent process (i.e. the current one) establishes the
-        connection to the child and then can pass/receive commands, data and logs.
+        We use green subprocesses this implementation. Each co-process is
+        linked to us via 2 uni-directional pipes which work as datain and
+        dataout end-points. The parent process (i.e. the current one)
+        establishes the connection to the child and then can pass/receive
+        commands, data and logs.
 
-        Usage of the data pipes (datain, dataout) is **synchronous** - you send a
-        command and block for an answer. The co-process is normally controlled by
-        the current process, except for data requests, which are user-code driven.
-        The nature of our problem does not require an *asynchronous* implementation
-        which, in turn, would require a much more complex set of dependencies (on
-        asyncio or Twisted for example).
+        Usage of the data pipes (datain, dataout) is **synchronous** - you send
+        a command and block for an answer. The co-process is normally
+        controlled by the current process, except for data requests, which are
+        user-code driven.  The nature of our problem does not require an
+        *asynchronous* implementation which, in turn, would require a much more
+        complex set of dependencies (on asyncio or Twisted for example).
 
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
+
+          timeout_in_minutes (:py:class:`int`, Optional): The number of minutes
+            to wait for the user process to execute. After this amount of time,
+            the user process is killed with ``signal.SIGKILL``. If set to zero,
+            no timeout will be applied.
 
-          timeout_in_minutes (int): The number of minutes to wait for the user
-            process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
-            be applied.
 
         Returns:
 
-          dict: A dictionary which is JSON formattable containing the summary of
-            this block execution.
+          dict: A dictionary which is JSON formattable containing the summary
+          of this block execution.
 
         """
 
diff --git a/beat/core/execution/local.py b/beat/core/execution/local.py
index 6bc35648b54df86eafc853887490a792b820d620..3fe1435b1be82b41db937764a1a271d6777aa3c8 100755
--- a/beat/core/execution/local.py
+++ b/beat/core/execution/local.py
@@ -70,41 +70,42 @@ class LocalExecutor(BaseExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
-
-      custom_root_folders (dict, optional): A dictionary where the keys are database
-        identifiers (`<db_name>/<version>`) and the values are paths to the
-        given database's files. These values will override the value found
-        in the database's metadata.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
+
+      custom_root_folders (:py:class:`dict`, Optional): A dictionary where the
+        keys are database identifiers (``<db_name>/<version>``) and the values
+        are paths to the given database's files. These values will override the
+        value found in the database's metadata.
 
 
     Attributes:
@@ -117,11 +118,11 @@ class LocalExecutor(BaseExecutor):
       data (dict): The original data for this executor, as loaded by our JSON
         decoder.
 
-      algorithm (beat.core.algorithm.Algorithm): An object representing the
+      algorithm (.algorithm.Algorithm): An object representing the
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -130,11 +131,11 @@ class LocalExecutor(BaseExecutor):
         for that particular combination of details. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
-      input_list (beat.core.inputs.InputList): A list of inputs that will be
-        served to the algorithm.
+      input_list (beat.backend.python.inputs.InputList): A list of inputs that
+        will be served to the algorithm.
 
-      output_list (beat.core.outputs.OutputList): A list of outputs that the
-        algorithm will produce.
+      output_list (beat.backend.python.outputs.OutputList): A list of outputs
+        that the algorithm will produce.
 
       data_sources (list): A list with all data-sources created by our execution
         loader.
@@ -203,20 +204,20 @@ class LocalExecutor(BaseExecutor):
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
 
           timeout_in_minutes (int): The number of minutes to wait for the user
             process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
-            be applied.
+            killed with ``signal.SIGKILL``. If set to zero, no timeout will be
+            applied.
 
         Returns:
 
diff --git a/beat/core/execution/remote.py b/beat/core/execution/remote.py
index 9276a85aa4757eb2182712a0c69b2a887fe8fcb1..39839c6f4c04dba977c7bf30db25451713dd2e7b 100755
--- a/beat/core/execution/remote.py
+++ b/beat/core/execution/remote.py
@@ -53,36 +53,37 @@ class RemoteExecutor(BaseExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -99,7 +100,7 @@ class RemoteExecutor(BaseExecutor):
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
diff --git a/beat/core/execution/subprocess.py b/beat/core/execution/subprocess.py
index 96b4bdbb5bd13439071b91f4ec75af9193ef9468..10692c9e5f68656810fc88ecf476536f84337729 100755
--- a/beat/core/execution/subprocess.py
+++ b/beat/core/execution/subprocess.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+
 # vim: set fileencoding=utf-8 :
 
 ###############################################################################
@@ -77,36 +77,37 @@ class SubprocessExecutor(RemoteExecutor):
         string is passed, it is supposed to be a fully qualified absolute path to
         a JSON file containing the block execution information.
 
-      cache (str, optional): If your cache is not located under
+      cache (:py:class:`str`, Optional): If your cache is not located under
         ``<prefix>/cache``, then specify a full path here. It will be used
         instead.
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up database loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up database loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up database loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -123,7 +124,7 @@ class SubprocessExecutor(RemoteExecutor):
         algorithm to be run.
 
       databases (dict): A dictionary in which keys are strings with database
-        names and values are :py:class:`database.Database`, representing the
+        names and values are :py:class:`.database.Database`, representing the
         databases required for running this block. The dictionary may be empty
         in case all inputs are taken from the file cache.
 
@@ -180,19 +181,19 @@ class SubprocessExecutor(RemoteExecutor):
 
         Parameters:
 
-          virtual_memory_in_megabytes (int, Optional): The amount of virtual memory
-            (in Megabytes) available for the job. If set to zero, no limit will be
-            applied.
+          virtual_memory_in_megabytes (:py:class:`int`, Optional): The amount
+            of virtual memory (in Megabytes) available for the job. If set to
+            zero, no limit will be applied.
 
-          max_cpu_percent (int, Optional): The maximum amount of CPU usage allowed
-            in a system. This number must be an integer number between 0 and
-            ``100*number_of_cores`` in your system. For instance, if your system
-            has 2 cores, this number can go between 0 and 200. If it is <= 0, then
-            we don't track CPU usage.
+          max_cpu_percent (:py:class:`int`, Optional): The maximum amount of
+            CPU usage allowed in a system. This number must be an integer
+            number between 0 and ``100*number_of_cores`` in your system. For
+            instance, if your system has 2 cores, this number can go between 0
+            and 200. If it is <= 0, then we don't track CPU usage.
 
           timeout_in_minutes (int): The number of minutes to wait for the user
             process to execute. After this amount of time, the user process is
-            killed with :py:attr:`signal.SIGKILL`. If set to zero, no timeout will
+            killed with ``signal.SIGKILL``. If set to zero, no timeout will
             be applied.
 
         Returns:
diff --git a/beat/core/experiment.py b/beat/core/experiment.py
index 8b45a0cc618763793151953e06f7c9d28f1a8ce7..9896697f41afd20e406044da8f02b0666786e93f 100755
--- a/beat/core/experiment.py
+++ b/beat/core/experiment.py
@@ -86,42 +86,43 @@ class Experiment(object):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the experiment. It
-        must validate against the schema defined for toolchains. If a string is
-        passed, it is supposed to be a valid path to an experiment in the
-        designated prefix area. If ``None`` is passed, loads our default
-        prototype for toolchains. If a tuple is passed (or a list), then we
-        consider that the first element represents the experiment, while the
-        second, the toolchain definition. The toolchain bit can be defined as a
-        dictionary or as a string (pointing to a valid path in the designated
-        prefix area).
-
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up experiment loading times as dataformats that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying dataformats
-        change.
-
-      database_cache (dict, optional): A dictionary mapping database names to
-        loaded databases. This parameter is optional and, if passed, may
-        greatly speed-up experiment loading times as databases that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying databases
-        change.
-
-      algorithm_cache (dict, optional): A dictionary mapping algorithm names to
-        loaded algorithms. This parameter is optional and, if passed, may
-        greatly speed-up experiment loading times as algorithms that are already
-        loaded may be re-used. If you use this parameter, you must guarantee that
-        the cache is refreshed as appropriate in case the underlying algorithms
-        change.
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used. If you use this parameter, you must guarantee that the cache
-        is refreshed as appropriate in case the underlying libraries change.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        experiment. It must validate against the schema defined for toolchains.
+        If a string is passed, it is supposed to be a valid path to an
+        experiment in the designated prefix area. If ``None`` is passed, loads
+        our default prototype for toolchains. If a tuple is passed (or a list),
+        then we consider that the first element represents the experiment,
+        while the second, the toolchain definition. The toolchain bit can be
+        defined as a dictionary or as a string (pointing to a valid path in the
+        designated prefix area).
+
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up experiment loading times as dataformats
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying dataformats change.
+
+      database_cache (:py:class:`dict`, Optional): A dictionary mapping
+        database names to loaded databases. This parameter is optional and, if
+        passed, may greatly speed-up experiment loading times as databases that
+        are already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying databases change.
+
+      algorithm_cache (:py:class:`dict`, Optional): A dictionary mapping
+        algorithm names to loaded algorithms. This parameter is optional and,
+        if passed, may greatly speed-up experiment loading times as algorithms
+        that are already loaded may be re-used. If you use this parameter, you
+        must guarantee that the cache is refreshed as appropriate in case the
+        underlying algorithms change.
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used. If you use this parameter, you must
+        guarantee that the cache is refreshed as appropriate in case the
+        underlying libraries change.
 
 
     Attributes:
@@ -1051,8 +1052,9 @@ class Experiment(object):
 
         Parameters:
 
-          storage (Storage, optional): If you pass a new storage, then this object
-            will be written to that storage point rather than its default.
+          storage (:py:class:`.Storage`, Optional): If you pass a new storage,
+            then this object will be written to that storage point rather than
+            its default.
 
         """
 
diff --git a/beat/core/hash.py b/beat/core/hash.py
index 65b49632a2730a2dc857c905f1894f19f1b94706..07d4f674a2c48fe193b39f8229c5a5c233bc45b1 100755
--- a/beat/core/hash.py
+++ b/beat/core/hash.py
@@ -83,7 +83,7 @@ def hashAnalyzer(analyzer_name, algorithm_name, algorithm_hash,
 
 
 def hashJSONStr(contents, description):
-    """Hashes the JSON string contents using :py:func:`hashlib.sha256`
+    """Hashes the JSON string contents using ``hashlib.sha256``
 
     Excludes description changes
     """
diff --git a/beat/core/library.py b/beat/core/library.py
index 24591be2a51f540c3565d2a7c05b81f80facb5e9..1e9f2e595a237f22361dc7f320b84793cc547176 100755
--- a/beat/core/library.py
+++ b/beat/core/library.py
@@ -56,19 +56,19 @@ class Library(BackendLibrary):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the library. It
-        must validate against the schema defined for libraries. If a string is
-        passed, it is supposed to be a valid path to an library in the designated
-        prefix area. If a tuple is passed (or a list), then we consider that the
-        first element represents the library declaration, while the second, the
-        code for the library (either in its source format or as a binary blob).
-        If ``None`` is passed, loads our default prototype for libraries (source
-        code will be in Python).
-
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        library. It must validate against the schema defined for libraries. If
+        a string is passed, it is supposed to be a valid path to an library in
+        the designated prefix area. If a tuple is passed (or a list), then we
+        consider that the first element represents the library declaration,
+        while the second, the code for the library (either in its source format
+        or as a binary blob).  If ``None`` is passed, loads our default
+        prototype for libraries (source code will be in Python).
+
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used.
 
 
     Attributes:
diff --git a/beat/core/plotter.py b/beat/core/plotter.py
index d3efa5bc554704d02fc1a492a470527fc5befdf3..134bd364b54ab4eed5b1a54aa10faa440f03bd0f 100755
--- a/beat/core/plotter.py
+++ b/beat/core/plotter.py
@@ -112,24 +112,24 @@ class Plotter(object):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the plotter. It
-        must validate against the schema defined for plotters. If a string is
-        passed, it is supposed to be a valid path to a plotter in the
+      data (:py:class:`object`, Optional): The piece of data representing the
+        plotter. It must validate against the schema defined for plotters. If a
+        string is passed, it is supposed to be a valid path to a plotter in the
         designated prefix area. If a tuple is passed (or a list), then we
         consider that the first element represents the plotter declaration,
         while the second, the code for the plotter (either in its source format
-        or as a binary blob). If ``None`` is passed, loads our default prototype
-        for plotters (source code will be in Python).
+        or as a binary blob). If ``None`` is passed, loads our default
+        prototype for plotters (source code will be in Python).
 
-      dataformat_cache (dict, optional): A dictionary mapping dataformat names to
-        loaded dataformats. This parameter is optional and, if passed, may
-        greatly speed-up algorithm loading times as dataformats that are already
-        loaded may be re-used.
+      dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
+        dataformat names to loaded dataformats. This parameter is optional and,
+        if passed, may greatly speed-up algorithm loading times as dataformats
+        that are already loaded may be re-used.
 
-      library_cache (dict, optional): A dictionary mapping library names to
-        loaded libraries. This parameter is optional and, if passed, may greatly
-        speed-up library loading times as libraries that are already loaded may
-        be re-used.
+      library_cache (:py:class:`dict`, Optional): A dictionary mapping library
+        names to loaded libraries. This parameter is optional and, if passed,
+        may greatly speed-up library loading times as libraries that are
+        already loaded may be re-used.
 
 
     Attributes:
@@ -144,9 +144,8 @@ class Plotter(object):
       storage (object): A simple object that provides information about file
         paths for this algorithm
 
-      dataformat (obj): An object of type
-        :py:class:`beat.core.dataformat.DataFormat` that represents the
-        dataformat to which this plotter is applicable.
+      dataformat (obj): An object of type :py:class:`.dataformat.DataFormat`
+        that represents the dataformat to which this plotter is applicable.
 
       libraries (dict): A mapping object defining other libraries this plotter
         needs to load so it can work properly.
@@ -366,14 +365,16 @@ class Plotter(object):
 
           klass (str): The name of the class to load the runnable algorithm from
 
-          exc (class): If passed, must be a valid exception class that will be
-            used to report errors in the read-out of this plotter's code.
+          exc (:std:term:`class`): If passed, must be a valid exception class
+            that will be used to report errors in the read-out of this
+            plotter's code.
 
         Returns:
 
-          :py:class:`beat.core.algorithm.Runner`: An instance of the algorithm,
-            which will be constructed, but not setup.  You **must** set it up
-            before using the ``process`` method.
+          :py:class:`beat.backend.python.algorithm.Runner`: An instance of the
+            algorithm, which will be constructed, but not setup.  You **must**
+            set it up before using the ``process`` method.
+
         """
 
         if not self._name:
@@ -450,8 +451,9 @@ class Plotter(object):
 
         Parameters:
 
-          storage (Storage, optional): If you pass a new storage, then this object
-            will be written to that storage point rather than its default.
+          storage (:py:class:`.Storage`, Optional): If you pass a new storage,
+            then this object will be written to that storage point rather than
+            its default.
 
         """
 
diff --git a/beat/core/stats.py b/beat/core/stats.py
index 1f4af9a19646a720157af1986b41af2e10a74ed9..0ae24a34936625537d542ae4e93c90ee5c2e6763 100755
--- a/beat/core/stats.py
+++ b/beat/core/stats.py
@@ -47,7 +47,7 @@ class Statistics(object):
 
     Parameters:
 
-      data (object, optional): The piece of data representing the
+      data (:py:class:`object`, Optional): The piece of data representing the
         statistics the be read, it must validate against our pre-defined
         execution schema. If the input is ``None`` or empty, then start a new
         statistics from scratch.
diff --git a/beat/core/toolchain.py b/beat/core/toolchain.py
index 87553192f05e3cd620886f7627cbcde12d47e05d..c322931d1668a36954d0049ba040e4578fa71066 100755
--- a/beat/core/toolchain.py
+++ b/beat/core/toolchain.py
@@ -71,11 +71,11 @@ class Toolchain(object):
 
       prefix (str): Establishes the prefix of your installation.
 
-      data (object, optional): The piece of data representing the toolchain.
-        It must validate against the schema defined for toolchains. If a string
-        is passed, it is supposed to be a valid path to an toolchain in the
-        designated prefix area. If ``None`` is passed, loads our default
-        prototype for toolchains.
+      data (:py:class:`object`, Optional): The piece of data representing the
+        toolchain.  It must validate against the schema defined for toolchains.
+        If a string is passed, it is supposed to be a valid path to an
+        toolchain in the designated prefix area. If ``None`` is passed, loads
+        our default prototype for toolchains.
 
 
     Attributes:
@@ -362,20 +362,22 @@ class Toolchain(object):
           title (str): A title for the generated drawing. If ``None`` is given,
             then prints out the toolchain name.
 
-          label_callback (function): A python function that is called back each
-            time a label needs to be inserted into a block. The prototype of this
-            function is ``label_callback(type, name)``. ``type`` may be one of
-            ``dataset``, ``block`` or ``analyzer``. This callback is used by the
-            experiment class to complement diagram information before plotting.
+          label_callback (:std:term:`function`): A python function that is
+            called back each time a label needs to be inserted into a block.
+            The prototype of this function is ``label_callback(type, name)``.
+            ``type`` may be one of ``dataset``, ``block`` or ``analyzer``. This
+            callback is used by the experiment class to complement diagram
+            information before plotting.
 
-          edge_callback (function): A python function that is called back each
-            time an edge needs to be inserted into the graph. The prototype of this
-            function is ``edge_callback(start)``. ``start`` is the name of the
-            starting point for the connection, it should determine the dataformat
-            for the connection.
+          edge_callback (:std:term:`function`): A python function that is
+            called back each time an edge needs to be inserted into the graph.
+            The prototype of this function is ``edge_callback(start)``.
+            ``start`` is the name of the starting point for the connection, it
+            should determine the dataformat for the connection.
 
-          result_callback (function): A function to draw ports on analyzer blocks.
-            The prototype of this function is ``result_callback(name, color)``.
+          result_callback (:std:term:`function`): A function to draw ports on
+            analyzer blocks.  The prototype of this function is
+            ``result_callback(name, color)``.
 
 
         Returns
@@ -546,8 +548,9 @@ class Toolchain(object):
 
         Parameters:
 
-          storage (Storage, optional): If you pass a new storage, then this object
-            will be written to that storage point rather than its default.
+          storage (:py:class:`.Storage`, Optional): If you pass a new storage,
+            then this object will be written to that storage point rather than
+            its default.
 
         """
 
diff --git a/doc/api.rst b/doc/api.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c6fa7013ae74a7c172d0960b9deb0f1f60503b46
--- /dev/null
+++ b/doc/api.rst
@@ -0,0 +1,83 @@
+.. vim: set fileencoding=utf-8 :
+
+.. Copyright (c) 2016 Idiap Research Institute, http:..www.idiap.ch.          ..
+.. Contact: beat.support@idiap.ch                                             ..
+..                                                                            ..
+.. This file is part of the beat.core module of the BEAT platform.            ..
+..                                                                            ..
+.. Commercial License Usage                                                   ..
+.. Licensees holding valid commercial BEAT licenses may use this file in      ..
+.. accordance with the terms contained in a written agreement between you     ..
+.. and Idiap. For further information contact tto@idiap.ch                    ..
+..                                                                            ..
+.. Alternatively, this file may be used under the terms of the GNU Affero     ..
+.. Public License version 3 as published by the Free Software and appearing   ..
+.. in the file LICENSE.AGPL included in the packaging of this file.           ..
+.. The BEAT platform is distributed in the hope that it will be useful, but   ..
+.. WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ..
+.. or FITNESS FOR A PARTICULAR PURPOSE.                                       ..
+..                                                                            ..
+.. You should have received a copy of the GNU Affero Public License along     ..
+.. with the BEAT platform. If not, see http:..www.gnu.org.licenses..          ..
+
+
+=====
+ API
+=====
+
+This section includes information for using the Python API of ``beat.core``.
+
+.. notice order is important!
+
+.. automodule:: beat.core.algorithm
+
+.. automodule:: beat.core.baseformat
+
+.. automodule:: beat.core.data
+
+.. automodule:: beat.core.data_loaders
+
+.. automodule:: beat.core.database
+
+.. automodule:: beat.core.dataformat
+
+.. automodule:: beat.core.dock
+
+.. automodule:: beat.core.drawing
+
+.. automodule:: beat.core.environments
+
+.. automodule:: beat.core.execution.base
+
+.. automodule:: beat.core.execution.docker
+
+.. automodule:: beat.core.execution.local
+
+.. automodule:: beat.core.execution.remote
+
+.. automodule:: beat.core.execution.subprocess
+
+.. automodule:: beat.core.experiment
+
+.. automodule:: beat.core.hash
+
+.. automodule:: beat.core.inputs
+
+.. automodule:: beat.core.library
+
+.. automodule:: beat.core.loader
+
+.. automodule:: beat.core.message_handler
+
+.. automodule:: beat.core.outputs
+
+.. automodule:: beat.core.plotter
+
+.. automodule:: beat.core.stats
+
+.. automodule:: beat.core.toolchain
+
+.. automodule:: beat.core.utils
+
+.. automodule:: beat.core.worker
+
diff --git a/doc/conf.py b/doc/conf.py
index f9538296dbaf59b4868fa8391f21dc0ae74cdac8..c8c2cd20c5a7c5d997bcd0fd0939f5aa1d56280a 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -254,20 +254,29 @@ autodoc_default_flags = [
   ]
 
 # For inter-documentation mapping:
+doc_server = [
+    'http://www.idiap.ch/software/beat/docs/beat/%(name)s/%(version)s/',
+    'http://www.idiap.ch/software/beat/docs/beat/%(name)s/master/',
+    'http://www.idiap.ch/software/beat/docs/beat/%(name)s/1.5.x/',
+    'http://www.idiap.ch/software/bob/docs/beat/%(name)s/%(version)s/',
+    'http://www.idiap.ch/software/bob/docs/beat/%(name)s/master/',
+    'http://www.idiap.ch/software/bob/docs/beat/%(name)s/1.5.x/',
+    ]
+doc_server = '|'.join(doc_server)
 from bob.extension.utils import link_documentation, load_requirements
 sphinx_requirements = "extra-intersphinx.txt"
 if os.path.exists(sphinx_requirements):
   intersphinx_mapping = link_documentation(
       additional_packages=['python','numpy'] + \
-          load_requirements(sphinx_requirements)
-          )
+          load_requirements(sphinx_requirements),
+      server=doc_server,
+      )
 else:
-  intersphinx_mapping = link_documentation()
+  intersphinx_mapping = link_documentation(server=doc_server)
 
 # Adds simplejson, pyzmq links
 intersphinx_mapping['http://simplejson.readthedocs.io/en/stable/'] = None
 intersphinx_mapping['http://pyzmq.readthedocs.io/en/stable/'] = None
-intersphinx_mapping['http://six.readthedocs.io'] = None
 intersphinx_mapping['http://python-jsonschema.readthedocs.io/en/stable/'] = None
 intersphinx_mapping['https://docker-py.readthedocs.io/en/stable/'] = None
 
diff --git a/doc/dataformats.rst b/doc/dataformats.rst
index d89ebc004252050f681131fc81e8d958b741277b..8a95958959fc2209c047cef64bad703858f80d3b 100644
--- a/doc/dataformats.rst
+++ b/doc/dataformats.rst
@@ -127,7 +127,7 @@ The following primitive data types are available in the BEAT platform:
 
 .. note::
 
-   All primitive types are implemented using their :py:mod:`NumPy`
+   All primitive types are implemented using their :py:mod:`numpy`
    counterparts.
 
 When determining if a block of data corresponds to a data format, the platform
diff --git a/doc/nitpick-exceptions.txt b/doc/nitpick-exceptions.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bbf66497aae37c58e3ba358616ebeca6e00afcad
--- /dev/null
+++ b/doc/nitpick-exceptions.txt
@@ -0,0 +1,7 @@
+# Not available in Python 2.7, but ok in Python 3.x
+py:exc TypeError
+py:exc RuntimeError
+py:exc ValueError
+py:exc KeyError
+py:class tuple
+py:class list