Commit 59d4f847 authored by André Anjos's avatar André Anjos 💬 Committed by Samuel GAIST

Remove all warnings from documentation

parent d373224f
......@@ -78,8 +78,8 @@ class Runner(object):
Parameters:
module (module): The preloaded module containing the algorithm as
returned by :py:func:`beat.core.loader.load_module`.
module (:std:term:`module`): The preloaded module containing the
algorithm as returned by :py:func:`.loader.load_module`.
obj_name (str): The name of the object within the module you're interested
on
......@@ -87,9 +87,9 @@ class Runner(object):
algorithm (object): The algorithm instance that is used for parameter
checking.
exc (class): The class to use as base exception when translating the
exception from the user code. Read the documention of :py:func:`run`
for more details.
exc (:std:term:`class`): The class to use as base exception when
translating the exception from the user code. Read the documention of
:py:func:`.loader.run` for more details.
'''
......@@ -266,15 +266,15 @@ class Algorithm(object):
name (str): The fully qualified algorithm name (e.g. ``user/algo/1``)
dataformat_cache (dict, optional): A dictionary mapping dataformat names to
loaded dataformats. This parameter is optional and, if passed, may
greatly speed-up algorithm loading times as dataformats that are already
loaded may be re-used.
dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
dataformat names to loaded dataformats. This parameter is optional and,
if passed, may greatly speed-up algorithm loading times as dataformats
that are already loaded may be re-used.
library_cache (dict, optional): A dictionary mapping library names to
loaded libraries. This parameter is optional and, if passed, may greatly
speed-up library loading times as libraries that are already loaded may
be re-used.
library_cache (:py:class:`dict`, Optional): A dictionary mapping library
names to loaded libraries. This parameter is optional and, if passed,
may greatly speed-up library loading times as libraries that are
already loaded may be re-used.
Attributes:
......@@ -283,7 +283,7 @@ class Algorithm(object):
dataformats (dict): A dictionary containing all pre-loaded dataformats used
by this algorithm. Data format objects will be of type
:py:class:`beat.core.dataformat.DataFormat`.
:py:class:`.dataformat.DataFormat`.
libraries (dict): A mapping object defining other libraries this algorithm
needs to load so it can work properly.
......@@ -741,12 +741,12 @@ class Algorithm(object):
klass (str): The name of the class to load the runnable algorithm from
exc (class): If passed, must be a valid exception class that will be
used to report errors in the read-out of this algorithm's code.
exc (:std:term:`class`): If passed, must be a valid exception class
that will be used to report errors in the read-out of this algorithm's code.
Returns:
:py:class:`beat.core.algorithm.Runner`: An instance of the algorithm,
:py:class:`Runner`: An instance of the algorithm,
which will be constructed, but not setup. You **must** set it up
before using the ``process`` method.
"""
......@@ -807,8 +807,9 @@ class Algorithm(object):
Parameters:
storage (Storage, optional): If you pass a new storage, then this object
will be written to that storage point rather than its default.
storage (:py:class:`.Storage`, Optional): If you pass a new storage,
then this object will be written to that storage point rather than
its default.
"""
......
......@@ -66,15 +66,16 @@ def setup_scalar(formatname, attrname, dtype, value, casting, add_defaults):
dtype (numpy.dtype): The datatype of every element on the array
value (object, optional): A representation of the value. This object will
be cast into a scalar with the dtype defined by the ``dtype`` parameter.
value (:std:term:`object`, Optional): A representation of the value. This
object will be cast into a scalar with the dtype defined by the
``dtype`` parameter.
casting (str): See :py:func:`numpy.can_cast` for a description of possible
values for this field.
add_defaults (bool): If we should use defaults for missing attributes. In
case this value is set to ``True``, missing attributes are set with
defaults, otherwise, a :py:class:`TypeError` is raise if a missing
defaults, otherwise, a :py:exc:`TypeError` is raise if a missing
attribute is found.
Returns:
......@@ -140,20 +141,20 @@ def setup_array(formatname, attrname, shape, dtype, value, casting,
attrname (str): The name of this attribute (e.g. ``value``). This value is
only used for informational purposes
shape (tuple): The shape of the array
shape (:py:class:`tuple`): The shape of the array
dtype (numpy.dtype): The datatype of every element on the array
value (object, optional): A representation of the value. This object will
be cast into a numpy array with the dtype defined by the ``dtype``
parameter.
value (:std:term:`object`, Optional): A representation of the value. This
object will be cast into a numpy array with the dtype defined by the
``dtype`` parameter.
casting (str): See :py:func:`numpy.can_cast` for a description of possible
values for this field.
add_defaults (bool): If we should use defaults for missing attributes. In
case this value is set to ``True``, missing attributes are set with
defaults, otherwise, a :py:class:`TypeError` is raise if a missing
defaults, otherwise, a :py:exc:`TypeError` is raise if a missing
attribute is found.
......@@ -230,10 +231,10 @@ def pack_array(dtype, value, fd):
dtype (numpy.dtype): The datatype of the array (taken from the format
descriptor)
value (object, optional): The :py:class:`numpy.ndarray` representing the
value to be encoded
value (:std:term:`object`, Optional): The :py:class:`numpy.ndarray`
representing the value to be encoded
fd (fd): The file where to encode the input
fd (:std:term:`file`): The file where to encode the input
"""
......@@ -265,9 +266,10 @@ def pack_scalar(dtype, value, fd):
dtype (numpy.dtype): The datatype of the scalar (taken from the format
descriptor)
value (object, optional): An object representing the value to be encoded
value (:std:term:`object`, Optional): An object representing the value to
be encoded
fd (fd): The file where to encode the input
fd (:std:term:`file`): The file where to encode the input
"""
......@@ -306,11 +308,11 @@ def unpack_array(shape, dtype, fd):
Parameters:
shape (tuple): The shape of the array
shape (:py:class:`tuple`): The shape of the array
dtype (numpy.dtype): The datatype of every element on the array
fd (fd): The file where to encode the input
fd (:std:term:`file`): The file where to encode the input
Returns:
......@@ -351,11 +353,11 @@ def unpack_scalar(dtype, fd):
dtype (numpy.dtype): The datatype of every element on the array
fd (fd): The file where to encode the input
fd (:std:term:`file`): The file where to encode the input
Returns:
scalar: which among other options, can be a numpy scalar (``int8``,
object: which among other options, can be a numpy scalar (``int8``,
``float32``, ``bool_``, etc) or a string (``str``). Advances readout of
``fd``.
......@@ -407,10 +409,10 @@ class baseformat(object):
Parameters:
data (dict, optional): A dictionary representing the data input, matching
the keywords defined at the resolved format. A value of ``None``, if
passed, effectively results in the same as passing an empty dictionary
``{}``.
data (:py:class:`dict`, Optional): A dictionary representing the data
input, matching the keywords defined at the resolved format. A
value of ``None``, if passed, effectively results in the same as
passing an empty dictionary ``{}``.
casting (str): See :py:func:`numpy.can_cast` for a description of
possible values for this field. By default, it is set to ``'safe'``.
......@@ -418,8 +420,8 @@ class baseformat(object):
add_defaults (bool): If we should use defaults for missing attributes. In
case this value is set to `True`, missing attributes are set with
defaults, otherwise, a ``TypeError`` is raise if a missing attribute is
found.
defaults, otherwise, a :py:exc:`TypeError` is raise if a missing
attribute is found.
"""
......@@ -486,7 +488,7 @@ class baseformat(object):
This method will make the object pickle itself on the file descritor
``fd``. If you'd like to write the contents of this file into a string, use
the :py:mod:`six.BytesIO`.
the :py:data:`six.BytesIO`.
"""
for key in sorted(self._format.keys()):
......@@ -530,7 +532,7 @@ class baseformat(object):
"""Loads a binary representation of this object from a string
Effectively, this method just calls :py:meth:`baseformat.unpack_from` with
a :py:class:`six.BytesIO` wrapped around the input string.
a :py:data:`six.BytesIO` wrapped around the input string.
"""
return self.unpack_from(six.BytesIO(s))
......
......@@ -765,7 +765,7 @@ class DataSink(object):
Parameters:
data (beat.core.baseformat.baseformat): The block of data to write
data (baseformat.baseformat): The block of data to write
start_data_index (int): Start index of the written data
......@@ -814,7 +814,7 @@ class StdoutDataSink(DataSink):
Parameters:
data (beat.core.baseformat.baseformat) The block of data to write
data (baseformat.baseformat) The block of data to write
start_data_index (int): Start index of the written data
......@@ -864,7 +864,7 @@ class CachedDataSink(DataSink):
filename (str): Name of the file to generate
dataformat (beat.core.dataformat.DataFormat): The dataformat to be used
dataformat (dataformat.DataFormat): The dataformat to be used
inside this file. All objects stored inside this file will respect that
format.
......@@ -955,7 +955,7 @@ class CachedDataSink(DataSink):
Parameters:
data (beat.core.baseformat.baseformat): The block of data to write
data (baseformat.baseformat): The block of data to write
start_data_index (int): Start index of the written data
......@@ -1025,8 +1025,7 @@ def load_data_index(cache_root, hash_path):
cache_root (str): The path to the root of the cache directory
hash_path (str): The hashed path of the input you wish to load the indexes
for, as it is returned by the utility function
:py:func:`beat.core.hash.toPath`.
for, as it is returned by the utility function :py:func:`.hash.toPath`.
Returns:
......
......@@ -45,10 +45,9 @@ class DataView(object):
"""Provides access to a subset of data from a group of inputs synchronized
together
Data views are created from a data loader
(see :py:class:`beat.backend.python.data_loaders.DataLoader`), which are
Data views are created from a data loader (see :py:class:`DataLoader`), which are
provided to the algorithms of types 'sequential' and 'autonomous'
(see :py:class:`beat.backend.python.data_loaders.DataLoaderList`).
(see :py:class:`DataLoaderList`).
Example:
......@@ -62,10 +61,11 @@ class DataView(object):
Parameters:
data_loader (:py:class:`beat.backend.python.data_loaders.DataLoader`):
Name of the data channel of the group of inputs
data_loader (:py:class:`DataLoader`): Name of the data channel of the
group of inputs
data_indices (list of tuples): Data indices to consider
data_indices (:py:class:`list`): Data indices to consider as a list of
tuples
Attributes:
......@@ -146,7 +146,7 @@ class DataLoader(object):
"""Provides access to data from a group of inputs synchronized together
Data loaders are provided to the algorithms of types 'sequential' and
'autonomous' (see :py:class:`beat.backend.python.data_loaders.DataLoaderList`).
'autonomous' (see :py:class:`DataLoaderList`).
Example:
......@@ -270,7 +270,7 @@ class DataLoaderList(object):
One group of inputs is always considered as the **main** one, and is used to
drive the algorithm. The usage of the other groups is left to the algorithm.
See :py:class:`beat.backend.python.data_loaders.DataLoader`
See :py:class:`DataLoader`
Example:
......@@ -297,7 +297,7 @@ class DataLoaderList(object):
Attributes:
main_loader (beat.backend.python.data_loaders.DataLoader): Main data loader
main_loader (DataLoader): Main data loader
"""
......@@ -309,7 +309,7 @@ class DataLoaderList(object):
def add(self, data_loader):
"""Add a data loader to the list
:param beat.backend.python.data_loaders.DataLoader data_loader: The data
:param DataLoader data_loader: The data
loader to add
"""
if self.main_loader is None:
......
......@@ -83,17 +83,17 @@ class Runner(object):
db_name (str): The full name of the database object for this view
module (module): The preloaded module containing the database views as
returned by :py:func:`beat.core.loader.load_module`.
module (:std:term:`module`): The preloaded module containing the database
views as returned by :py:func:`.loader.load_module`.
prefix (str): Establishes the prefix of your installation.
root_folder (str, path): The path pointing to the root folder of this
database
exc (class): The class to use as base exception when translating the
exception from the user code. Read the documention of :py:func:`run`
for more details.
exc (:std:term:`class`): The class to use as base exception when
translating the exception from the user code. Read the documention of
:py:func:`.loader.run` for more details.
*args: Constructor parameters for the database view. Normally, none.
......@@ -197,12 +197,12 @@ class Database(object):
name (str): The fully qualified database name (e.g. ``db/1``)
dataformat_cache (dict, optional): A dictionary mapping dataformat names
to loaded dataformats. This parameter is optional and, if passed, may
greatly speed-up database loading times as dataformats that are already
loaded may be re-used. If you use this parameter, you must guarantee
that the cache is refreshed as appropriate in case the underlying
dataformats change.
dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
dataformat names to loaded dataformats. This parameter is optional and,
if passed, may greatly speed-up database loading times as dataformats
that are already loaded may be re-used. If you use this parameter, you
must guarantee that the cache is refreshed as appropriate in case the
underlying dataformats change.
Attributes:
......@@ -386,8 +386,8 @@ class Database(object):
name (str): The name of the set in the protocol where to retrieve the
view from
exc (class): If passed, must be a valid exception class that will be
used to report errors in the read-out of this database's view.
exc (:std:term:`class`): If passed, must be a valid exception class
that will be used to report errors in the read-out of this database's view.
Returns:
......@@ -455,8 +455,9 @@ class Database(object):
Parameters:
storage (Storage, optional): If you pass a new storage, then this object
will be written to that storage point rather than its default.
storage (:py:class:`.Storage`, Optional): If you pass a new storage,
then this object will be written to that storage point rather than
its default.
"""
......@@ -520,30 +521,37 @@ class View(object):
For instance, assuming a view providing that kind of data:
----------- ----------- ----------- ----------- ----------- -----------
| image | | image | | image | | image | | image | | image |
----------- ----------- ----------- ----------- ----------- -----------
----------- ----------- ----------- ----------- ----------- -----------
| file_id | | file_id | | file_id | | file_id | | file_id | | file_id |
----------- ----------- ----------- ----------- ----------- -----------
----------------------------------- -----------------------------------
| client_id | | client_id |
----------------------------------- -----------------------------------
.. code-block:: text
----------- ----------- ----------- ----------- ----------- -----------
| image | | image | | image | | image | | image | | image |
----------- ----------- ----------- ----------- ----------- -----------
----------- ----------- ----------- ----------- ----------- -----------
| file_id | | file_id | | file_id | | file_id | | file_id | | file_id |
----------- ----------- ----------- ----------- ----------- -----------
----------------------------------- -----------------------------------
| client_id | | client_id |
----------------------------------- -----------------------------------
a list like the following should be generated:
[
(client_id=1, file_id=1, image=filename1),
(client_id=1, file_id=2, image=filename2),
(client_id=1, file_id=3, image=filename3),
(client_id=2, file_id=4, image=filename4),
(client_id=2, file_id=5, image=filename5),
(client_id=2, file_id=6, image=filename6),
...
]
DO NOT store images, sound files or data loadable from a file in the list!
Store the path of the file to load instead.
.. code-block:: python
[
(client_id=1, file_id=1, image=filename1),
(client_id=1, file_id=2, image=filename2),
(client_id=1, file_id=3, image=filename3),
(client_id=2, file_id=4, image=filename4),
(client_id=2, file_id=5, image=filename5),
(client_id=2, file_id=6, image=filename6),
...
]
.. warning::
DO NOT store images, sound files or data loadable from a file in the
list! Store the path of the file to load instead.
"""
raise NotImplementedError
......
......@@ -88,19 +88,19 @@ class DataFormat(object):
data (str, dict): The fully qualified algorithm name (e.g. ``user/algo/1``)
or a dictionary representing the data format (for analyzer results).
parent (tuple, optional): The parent DataFormat for this format. If set to
``None``, this means this dataformat is the first one on the hierarchy
tree. If set to a tuple, the contents are ``(format-instance,
field-name)``, which indicates the originating object that is this
object's parent and the name of the field on that object that points to
this one.
dataformat_cache (dict, optional): A dictionary mapping dataformat names to
loaded dataformats. This parameter is optional and, if passed, may
greatly speed-up data format loading times as dataformats that are already
loaded may be re-used. If you use this parameter, you must guarantee that
the cache is refreshed as appropriate in case the underlying dataformats
change.
parent (:py:class:`tuple`, Optional): The parent DataFormat for this
format. If set to ``None``, this means this dataformat is the first one
on the hierarchy tree. If set to a tuple, the contents are
``(format-instance, field-name)``, which indicates the originating
object that is this object's parent and the name of the field on that
object that points to this one.
dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
dataformat names to loaded dataformats. This parameter is optional and,
if passed, may greatly speed-up data format loading times as
dataformats that are already loaded may be re-used. If you use this
parameter, you must guarantee that the cache is refreshed as
appropriate in case the underlying dataformats change.
Attributes:
......@@ -125,7 +125,7 @@ class DataFormat(object):
referenced (dict): A dictionary pointing to all loaded dataformats.
parent (beat.core.dataformat.DataFormat): The pointer to the
parent (dataformat.DataFormat): The pointer to the
dataformat to which the current format is part of. It is useful for
internal error reporting.
......@@ -395,11 +395,11 @@ class DataFormat(object):
Parameters:
data (dict, str, fd): This parameter represents the data to be validated.
It may be a dictionary with the JSON representation of a data blob or,
else, a binary blob (represented by either a string or a file
descriptor object) from which the data will be read. If problems occur,
an exception is raised.
data (dict, str, :std:term:`file`): This parameter represents the data to be
validated. It may be a dictionary with the JSON representation of
a data blob or, else, a binary blob (represented by either a string
or a file descriptor object) from which the data will be read. If
problems occur, an exception is raised.
Returns:
......@@ -466,8 +466,9 @@ class DataFormat(object):
Parameters:
storage (Storage, optional): If you pass a new storage, then this object
will be written to that storage point rather than its default.
storage (:py:class:`.Storage`, Optional): If you pass a new storage,
then this object will be written to that storage point rather than
its default.
"""
......
......@@ -53,19 +53,19 @@ class DBExecutor(object):
string is passed, it is supposed to be a fully qualified absolute path to
a JSON file containing the block execution information.
dataformat_cache (dict, optional): A dictionary mapping dataformat names to
loaded dataformats. This parameter is optional and, if passed, may
greatly speed-up database loading times as dataformats that are already
loaded may be re-used. If you use this parameter, you must guarantee that
the cache is refreshed as appropriate in case the underlying dataformats
change.
dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
dataformat names to loaded dataformats. This parameter is optional and,
if passed, may greatly speed-up database loading times as dataformats
that are already loaded may be re-used. If you use this parameter, you
must guarantee that the cache is refreshed as appropriate in case the
underlying dataformats change.
database_cache (dict, optional): A dictionary mapping database names to
loaded databases. This parameter is optional and, if passed, may
greatly speed-up database loading times as databases that are already
loaded may be re-used. If you use this parameter, you must guarantee that
the cache is refreshed as appropriate in case the underlying databases
change.
database_cache (:py:class:`dict`, Optional): A dictionary mapping
database names to loaded databases. This parameter is optional and, if
passed, may greatly speed-up database loading times as databases that
are already loaded may be re-used. If you use this parameter, you must
guarantee that the cache is refreshed as appropriate in case the
underlying databases change.
Attributes:
......@@ -77,7 +77,7 @@ class DBExecutor(object):
decoder.
databases (dict): A dictionary in which keys are strings with database
names and values are :py:class:`database.Database`, representing the
names and values are :py:class:`.database.Database`, representing the
databases required for running this block. The dictionary may be empty
in case all inputs are taken from the file cache.
......@@ -86,8 +86,8 @@ class DBExecutor(object):
for that particular combination of details. The dictionary may be empty
in case all inputs are taken from the file cache.
input_list (beat.core.inputs.InputList): A list of inputs that will be
served to the algorithm.
input_list (inputs.InputList): A list of inputs that will be served to
the algorithm.
data_sources (list): A list with all data-sources created by our execution
loader.
......
......@@ -55,26 +55,26 @@ class Executor(object):
directory (str): The path to a directory containing all the information
required to run the user experiment.
dataformat_cache (dict, optional): A dictionary mapping dataformat names to
loaded dataformats. This parameter is optional and, if passed, may
greatly speed-up database loading times as dataformats that are already
loaded may be re-used. If you use this parameter, you must guarantee that
the cache is refreshed as appropriate in case the underlying dataformats
change.
database_cache (dict, optional): A dictionary mapping database names to
loaded databases. This parameter is optional and, if passed, may
greatly speed-up database loading times as databases that are already
loaded may be re-used. If you use this parameter, you must guarantee that
the cache is refreshed as appropriate in case the underlying databases
change.
library_cache (dict, optional): A dictionary mapping library names to
loaded libraries. This parameter is optional and, if passed, may greatly
speed-up library loading times as libraries that are already loaded may
be re-used. If you use this parameter, you must guarantee that the cache
is refreshed as appropriate in case the underlying libraries change.
"""
dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping
dataformat names to loaded dataformats. This parameter is optional and,
if passed, may greatly speed-up database loading times as dataformats
that are already loaded may be re-used. If you use this parameter, you
must guarantee that the cache is refreshed as appropriate in case the
underlying dataformats change.
database_cache (:py:class:`dict`, Optional): A dictionary mapping
database names to loaded databases. This parameter is optional and, if
passed, may greatly speed-up database loading times as databases that
are already loaded may be re-used. If you use this parameter, you must
guarantee that the cache is refreshed as appropriate in case the
underlying databases change.
library_cache (:py:class:`dict`, Optional): A dictionary mapping library
names to loaded libraries. This parameter is optional and, if passed,
may greatly speed-up library loading times as libraries that are
already loaded may be re-used. If you use this parameter, you must
guarantee that the cache is refreshed as appropriate in case the
underlying libraries change. """
def __init__(self, socket, directory, dataformat_cache=None,
database_cache=None, library_cache=None, cache_root='/cache',
......
......@@ -41,7 +41,9 @@ import os
def _sha256(s):
"""A python2/3 replacement for :py:func:`haslib.sha256`"""
"""A python2/3 shortcut for :py:func:`haslib.sha256.hexdigest` to will
ensure that the given string is unicode before going further.
"""
try:
if isinstance(s, six.string_types):
s = six.u(s)
......@@ -104,7 +106,7 @@ def hash(dictionary_or_string):
def hashJSON(contents, description):
"""Hashes the pre-loaded JSON object using :py:func:`hashlib.sha256`
"""Hashes the pre-loaded JSON object using :py:meth:`hashlib.hash.hexdigest`
Excludes description changes
"""
......@@ -120,7 +122,7 @@ def hashJSON(contents, description):
def hashJSONFile(path, description):
"""Hashes the JSON file contents using :py:func:`hashlib.sha256`
"""Hashes the JSON file contents using :py:meth:`hashlib.hash.hexdigest`
Excludes description changes
"""
......@@ -138,7 +140,7 @@ def hashJSONFile(path, description):
def hashFileContents(path):
"""Hashes the file contents using :py:func:`hashlib.sha256`."""
"""Hashes the file contents using :py:meth:`hashlib.hash.hexdigest`."""
with open(path, 'rb') as f:
sha256 = hashlib.sha256()
......
......@@ -47,7 +47,7 @@ class Input(object):
(legacy) data source
A list of those inputs must be provided to the algorithms (see
:py:class:`beat.backend.python.inputs.InputList`)
:py:class:`InputList`)
Parameters:
......@@ -59,11 +59,11 @@ class Input(object):
Attributes:
group (beat.core.inputs.InputGroup): Group containing this input
group (InputGroup): Group containing this input
name (str): Name of the input (algorithm-specific)
data (beat.core.baseformat.baseformat): The last block of data received on
data (baseformat.baseformat): The last block of data received on
the input
data_index (int): Index of the last block of data received on the input
......@@ -144,9 +144,9 @@ class InputGroup:
"""Represents a group of inputs synchronized together
A group implementing this interface is provided to the algorithms (see
:py:class:`beat.backend.python.inputs.InputList`).
:py:class:`InputList`).
See :py:class:`beat.core.inputs.Input`
See :py:class:`Input`
Example:
......@@ -170,7 +170,7 @@ class InputGroup:
channel (str): Name of the data channel of the group
synchronization_listener (beat.core.outputs.SynchronizationListener):
synchronization_listener (outputs.SynchronizationListener):
Synchronization listener to use
restricted_access (bool): Indicates if the algorithm can freely use the
......@@ -187,7 +187,7 @@ class InputGroup:
channel (str): Name of the data channel of the group
synchronization_listener (beat.core.outputs.SynchronizationListener):
synchronization_listener (outputs.SynchronizationListener):
Synchronization listener used
"""
......@@ -231,8 +231,7 @@ class InputGroup:
Parameters:
input (beat.backend.python.inputs.Input or beat.backend.python.inputs.RemoteInput):
The input to add
input (Input): The input to add
"""
......@@ -299,8 +298,8 @@ class InputList:
One group of inputs is always considered as the **main** one, and is used to
drive the algorithm. The usage of the other groups is left to the algorithm.
See :py:class:`beat.core.inputs.Input`
See :py:class:`beat.core.inputs.InputGroup`
See :py:class:`Input`
See :py:class:`InputGroup`
Example:
......@@ -335,7 +334,7 @@ class InputList:
Attributes:
main_group (beat.core.inputs.InputGroup): Main group (for data-driven
main_group (InputGroup): Main group (for data-driven
algorithms)
"""
......@@ -348,7 +347,7 @@ class InputList:
def add(self, group):
"""Add a group to the list
:param beat.core.platform.inputs.InputGroup group: The group to add
:param InputGroup group: The group to add
"""
if group.restricted_access and (self.main_group is None):
self.main_group = group
......
......@@ -83,10 +83,10 @@ class Library(object):