diff --git a/beat/backend/python/algorithm.py b/beat/backend/python/algorithm.py index b12b6de9ac34b6d9c82e051b0095b7fdc23e82e1..3eaa746aaa8dd1b121578781f75775ee50427435 100644 --- a/beat/backend/python/algorithm.py +++ b/beat/backend/python/algorithm.py @@ -78,8 +78,8 @@ class Runner(object): Parameters: - module (module): The preloaded module containing the algorithm as - returned by :py:func:`beat.core.loader.load_module`. + module (:std:term:`module`): The preloaded module containing the + algorithm as returned by :py:func:`.loader.load_module`. obj_name (str): The name of the object within the module you're interested on @@ -87,9 +87,9 @@ class Runner(object): algorithm (object): The algorithm instance that is used for parameter checking. - exc (class): The class to use as base exception when translating the - exception from the user code. Read the documention of :py:func:`run` - for more details. + exc (:std:term:`class`): The class to use as base exception when + translating the exception from the user code. Read the documention of + :py:func:`.loader.run` for more details. ''' @@ -266,15 +266,15 @@ class Algorithm(object): name (str): The fully qualified algorithm name (e.g. ``user/algo/1``) - dataformat_cache (dict, optional): A dictionary mapping dataformat names to - loaded dataformats. This parameter is optional and, if passed, may - greatly speed-up algorithm loading times as dataformats that are already - loaded may be re-used. + dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping + dataformat names to loaded dataformats. This parameter is optional and, + if passed, may greatly speed-up algorithm loading times as dataformats + that are already loaded may be re-used. - library_cache (dict, optional): A dictionary mapping library names to - loaded libraries. This parameter is optional and, if passed, may greatly - speed-up library loading times as libraries that are already loaded may - be re-used. + library_cache (:py:class:`dict`, Optional): A dictionary mapping library + names to loaded libraries. This parameter is optional and, if passed, + may greatly speed-up library loading times as libraries that are + already loaded may be re-used. Attributes: @@ -283,7 +283,7 @@ class Algorithm(object): dataformats (dict): A dictionary containing all pre-loaded dataformats used by this algorithm. Data format objects will be of type - :py:class:`beat.core.dataformat.DataFormat`. + :py:class:`.dataformat.DataFormat`. libraries (dict): A mapping object defining other libraries this algorithm needs to load so it can work properly. @@ -741,12 +741,12 @@ class Algorithm(object): klass (str): The name of the class to load the runnable algorithm from - exc (class): If passed, must be a valid exception class that will be - used to report errors in the read-out of this algorithm's code. + exc (:std:term:`class`): If passed, must be a valid exception class + that will be used to report errors in the read-out of this algorithm's code. Returns: - :py:class:`beat.core.algorithm.Runner`: An instance of the algorithm, + :py:class:`Runner`: An instance of the algorithm, which will be constructed, but not setup. You **must** set it up before using the ``process`` method. """ @@ -807,8 +807,9 @@ class Algorithm(object): Parameters: - storage (Storage, optional): If you pass a new storage, then this object - will be written to that storage point rather than its default. + storage (:py:class:`.Storage`, Optional): If you pass a new storage, + then this object will be written to that storage point rather than + its default. """ diff --git a/beat/backend/python/baseformat.py b/beat/backend/python/baseformat.py index 90f16edd9f2386b42f825c7ce3add4b1ad4a0c0f..20dbf9dc7c0ce203362d3d5eff60f29675e1d025 100644 --- a/beat/backend/python/baseformat.py +++ b/beat/backend/python/baseformat.py @@ -66,15 +66,16 @@ def setup_scalar(formatname, attrname, dtype, value, casting, add_defaults): dtype (numpy.dtype): The datatype of every element on the array - value (object, optional): A representation of the value. This object will - be cast into a scalar with the dtype defined by the ``dtype`` parameter. + value (:std:term:`object`, Optional): A representation of the value. This + object will be cast into a scalar with the dtype defined by the + ``dtype`` parameter. casting (str): See :py:func:`numpy.can_cast` for a description of possible values for this field. add_defaults (bool): If we should use defaults for missing attributes. In case this value is set to ``True``, missing attributes are set with - defaults, otherwise, a :py:class:`TypeError` is raise if a missing + defaults, otherwise, a :py:exc:`TypeError` is raise if a missing attribute is found. Returns: @@ -140,20 +141,20 @@ def setup_array(formatname, attrname, shape, dtype, value, casting, attrname (str): The name of this attribute (e.g. ``value``). This value is only used for informational purposes - shape (tuple): The shape of the array + shape (:py:class:`tuple`): The shape of the array dtype (numpy.dtype): The datatype of every element on the array - value (object, optional): A representation of the value. This object will - be cast into a numpy array with the dtype defined by the ``dtype`` - parameter. + value (:std:term:`object`, Optional): A representation of the value. This + object will be cast into a numpy array with the dtype defined by the + ``dtype`` parameter. casting (str): See :py:func:`numpy.can_cast` for a description of possible values for this field. add_defaults (bool): If we should use defaults for missing attributes. In case this value is set to ``True``, missing attributes are set with - defaults, otherwise, a :py:class:`TypeError` is raise if a missing + defaults, otherwise, a :py:exc:`TypeError` is raise if a missing attribute is found. @@ -230,10 +231,10 @@ def pack_array(dtype, value, fd): dtype (numpy.dtype): The datatype of the array (taken from the format descriptor) - value (object, optional): The :py:class:`numpy.ndarray` representing the - value to be encoded + value (:std:term:`object`, Optional): The :py:class:`numpy.ndarray` + representing the value to be encoded - fd (fd): The file where to encode the input + fd (:std:term:`file`): The file where to encode the input """ @@ -265,9 +266,10 @@ def pack_scalar(dtype, value, fd): dtype (numpy.dtype): The datatype of the scalar (taken from the format descriptor) - value (object, optional): An object representing the value to be encoded + value (:std:term:`object`, Optional): An object representing the value to + be encoded - fd (fd): The file where to encode the input + fd (:std:term:`file`): The file where to encode the input """ @@ -306,11 +308,11 @@ def unpack_array(shape, dtype, fd): Parameters: - shape (tuple): The shape of the array + shape (:py:class:`tuple`): The shape of the array dtype (numpy.dtype): The datatype of every element on the array - fd (fd): The file where to encode the input + fd (:std:term:`file`): The file where to encode the input Returns: @@ -351,11 +353,11 @@ def unpack_scalar(dtype, fd): dtype (numpy.dtype): The datatype of every element on the array - fd (fd): The file where to encode the input + fd (:std:term:`file`): The file where to encode the input Returns: - scalar: which among other options, can be a numpy scalar (``int8``, + object: which among other options, can be a numpy scalar (``int8``, ``float32``, ``bool_``, etc) or a string (``str``). Advances readout of ``fd``. @@ -407,10 +409,10 @@ class baseformat(object): Parameters: - data (dict, optional): A dictionary representing the data input, matching - the keywords defined at the resolved format. A value of ``None``, if - passed, effectively results in the same as passing an empty dictionary - ``{}``. + data (:py:class:`dict`, Optional): A dictionary representing the data + input, matching the keywords defined at the resolved format. A + value of ``None``, if passed, effectively results in the same as + passing an empty dictionary ``{}``. casting (str): See :py:func:`numpy.can_cast` for a description of possible values for this field. By default, it is set to ``'safe'``. @@ -418,8 +420,8 @@ class baseformat(object): add_defaults (bool): If we should use defaults for missing attributes. In case this value is set to `True`, missing attributes are set with - defaults, otherwise, a ``TypeError`` is raise if a missing attribute is - found. + defaults, otherwise, a :py:exc:`TypeError` is raise if a missing + attribute is found. """ @@ -486,7 +488,7 @@ class baseformat(object): This method will make the object pickle itself on the file descritor ``fd``. If you'd like to write the contents of this file into a string, use - the :py:mod:`six.BytesIO`. + the :py:data:`six.BytesIO`. """ for key in sorted(self._format.keys()): @@ -530,7 +532,7 @@ class baseformat(object): """Loads a binary representation of this object from a string Effectively, this method just calls :py:meth:`baseformat.unpack_from` with - a :py:class:`six.BytesIO` wrapped around the input string. + a :py:data:`six.BytesIO` wrapped around the input string. """ return self.unpack_from(six.BytesIO(s)) diff --git a/beat/backend/python/data.py b/beat/backend/python/data.py index a9f699a7f1526c93f5d3f445f207a78da11054d1..1d4035f00b6bfd0aa72cbad5fc05a521f46a4f05 100644 --- a/beat/backend/python/data.py +++ b/beat/backend/python/data.py @@ -765,7 +765,7 @@ class DataSink(object): Parameters: - data (beat.core.baseformat.baseformat): The block of data to write + data (baseformat.baseformat): The block of data to write start_data_index (int): Start index of the written data @@ -814,7 +814,7 @@ class StdoutDataSink(DataSink): Parameters: - data (beat.core.baseformat.baseformat) The block of data to write + data (baseformat.baseformat) The block of data to write start_data_index (int): Start index of the written data @@ -864,7 +864,7 @@ class CachedDataSink(DataSink): filename (str): Name of the file to generate - dataformat (beat.core.dataformat.DataFormat): The dataformat to be used + dataformat (dataformat.DataFormat): The dataformat to be used inside this file. All objects stored inside this file will respect that format. @@ -955,7 +955,7 @@ class CachedDataSink(DataSink): Parameters: - data (beat.core.baseformat.baseformat): The block of data to write + data (baseformat.baseformat): The block of data to write start_data_index (int): Start index of the written data @@ -1025,8 +1025,7 @@ def load_data_index(cache_root, hash_path): cache_root (str): The path to the root of the cache directory hash_path (str): The hashed path of the input you wish to load the indexes - for, as it is returned by the utility function - :py:func:`beat.core.hash.toPath`. + for, as it is returned by the utility function :py:func:`.hash.toPath`. Returns: diff --git a/beat/backend/python/data_loaders.py b/beat/backend/python/data_loaders.py index 63fa5c528f95c209d7a484b02652aef11cb86b80..c3b26199fd7dd736e7bc022036448f7c146d9a6f 100644 --- a/beat/backend/python/data_loaders.py +++ b/beat/backend/python/data_loaders.py @@ -45,10 +45,9 @@ class DataView(object): """Provides access to a subset of data from a group of inputs synchronized together - Data views are created from a data loader - (see :py:class:`beat.backend.python.data_loaders.DataLoader`), which are + Data views are created from a data loader (see :py:class:`DataLoader`), which are provided to the algorithms of types 'sequential' and 'autonomous' - (see :py:class:`beat.backend.python.data_loaders.DataLoaderList`). + (see :py:class:`DataLoaderList`). Example: @@ -62,10 +61,11 @@ class DataView(object): Parameters: - data_loader (:py:class:`beat.backend.python.data_loaders.DataLoader`): - Name of the data channel of the group of inputs + data_loader (:py:class:`DataLoader`): Name of the data channel of the + group of inputs - data_indices (list of tuples): Data indices to consider + data_indices (:py:class:`list`): Data indices to consider as a list of + tuples Attributes: @@ -146,7 +146,7 @@ class DataLoader(object): """Provides access to data from a group of inputs synchronized together Data loaders are provided to the algorithms of types 'sequential' and - 'autonomous' (see :py:class:`beat.backend.python.data_loaders.DataLoaderList`). + 'autonomous' (see :py:class:`DataLoaderList`). Example: @@ -270,7 +270,7 @@ class DataLoaderList(object): One group of inputs is always considered as the **main** one, and is used to drive the algorithm. The usage of the other groups is left to the algorithm. - See :py:class:`beat.backend.python.data_loaders.DataLoader` + See :py:class:`DataLoader` Example: @@ -297,7 +297,7 @@ class DataLoaderList(object): Attributes: - main_loader (beat.backend.python.data_loaders.DataLoader): Main data loader + main_loader (DataLoader): Main data loader """ @@ -309,7 +309,7 @@ class DataLoaderList(object): def add(self, data_loader): """Add a data loader to the list - :param beat.backend.python.data_loaders.DataLoader data_loader: The data + :param DataLoader data_loader: The data loader to add """ if self.main_loader is None: diff --git a/beat/backend/python/database.py b/beat/backend/python/database.py index 30c385d24c6e3b0d5327b90fc6be066488024355..45d4fe3b2ca0f936e3cd38569a1e636fef8622e5 100644 --- a/beat/backend/python/database.py +++ b/beat/backend/python/database.py @@ -83,17 +83,17 @@ class Runner(object): db_name (str): The full name of the database object for this view - module (module): The preloaded module containing the database views as - returned by :py:func:`beat.core.loader.load_module`. + module (:std:term:`module`): The preloaded module containing the database + views as returned by :py:func:`.loader.load_module`. prefix (str): Establishes the prefix of your installation. root_folder (str, path): The path pointing to the root folder of this database - exc (class): The class to use as base exception when translating the - exception from the user code. Read the documention of :py:func:`run` - for more details. + exc (:std:term:`class`): The class to use as base exception when + translating the exception from the user code. Read the documention of + :py:func:`.loader.run` for more details. *args: Constructor parameters for the database view. Normally, none. @@ -197,12 +197,12 @@ class Database(object): name (str): The fully qualified database name (e.g. ``db/1``) - dataformat_cache (dict, optional): A dictionary mapping dataformat names - to loaded dataformats. This parameter is optional and, if passed, may - greatly speed-up database loading times as dataformats that are already - loaded may be re-used. If you use this parameter, you must guarantee - that the cache is refreshed as appropriate in case the underlying - dataformats change. + dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping + dataformat names to loaded dataformats. This parameter is optional and, + if passed, may greatly speed-up database loading times as dataformats + that are already loaded may be re-used. If you use this parameter, you + must guarantee that the cache is refreshed as appropriate in case the + underlying dataformats change. Attributes: @@ -386,8 +386,8 @@ class Database(object): name (str): The name of the set in the protocol where to retrieve the view from - exc (class): If passed, must be a valid exception class that will be - used to report errors in the read-out of this database's view. + exc (:std:term:`class`): If passed, must be a valid exception class + that will be used to report errors in the read-out of this database's view. Returns: @@ -455,8 +455,9 @@ class Database(object): Parameters: - storage (Storage, optional): If you pass a new storage, then this object - will be written to that storage point rather than its default. + storage (:py:class:`.Storage`, Optional): If you pass a new storage, + then this object will be written to that storage point rather than + its default. """ @@ -520,30 +521,37 @@ class View(object): For instance, assuming a view providing that kind of data: - ----------- ----------- ----------- ----------- ----------- ----------- - | image | | image | | image | | image | | image | | image | - ----------- ----------- ----------- ----------- ----------- ----------- - ----------- ----------- ----------- ----------- ----------- ----------- - | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | - ----------- ----------- ----------- ----------- ----------- ----------- - ----------------------------------- ----------------------------------- - | client_id | | client_id | - ----------------------------------- ----------------------------------- + .. code-block:: text + + ----------- ----------- ----------- ----------- ----------- ----------- + | image | | image | | image | | image | | image | | image | + ----------- ----------- ----------- ----------- ----------- ----------- + ----------- ----------- ----------- ----------- ----------- ----------- + | file_id | | file_id | | file_id | | file_id | | file_id | | file_id | + ----------- ----------- ----------- ----------- ----------- ----------- + ----------------------------------- ----------------------------------- + | client_id | | client_id | + ----------------------------------- ----------------------------------- a list like the following should be generated: - [ - (client_id=1, file_id=1, image=filename1), - (client_id=1, file_id=2, image=filename2), - (client_id=1, file_id=3, image=filename3), - (client_id=2, file_id=4, image=filename4), - (client_id=2, file_id=5, image=filename5), - (client_id=2, file_id=6, image=filename6), - ... - ] - - DO NOT store images, sound files or data loadable from a file in the list! - Store the path of the file to load instead. + .. code-block:: python + + [ + (client_id=1, file_id=1, image=filename1), + (client_id=1, file_id=2, image=filename2), + (client_id=1, file_id=3, image=filename3), + (client_id=2, file_id=4, image=filename4), + (client_id=2, file_id=5, image=filename5), + (client_id=2, file_id=6, image=filename6), + ... + ] + + .. warning:: + + DO NOT store images, sound files or data loadable from a file in the + list! Store the path of the file to load instead. + """ raise NotImplementedError diff --git a/beat/backend/python/dataformat.py b/beat/backend/python/dataformat.py index 2897760c60d8d4848dbcbc531c4d0f69ae3a3343..36914ae370ffc6bd30b255fad71f588470b3c9e8 100644 --- a/beat/backend/python/dataformat.py +++ b/beat/backend/python/dataformat.py @@ -88,19 +88,19 @@ class DataFormat(object): data (str, dict): The fully qualified algorithm name (e.g. ``user/algo/1``) or a dictionary representing the data format (for analyzer results). - parent (tuple, optional): The parent DataFormat for this format. If set to - ``None``, this means this dataformat is the first one on the hierarchy - tree. If set to a tuple, the contents are ``(format-instance, - field-name)``, which indicates the originating object that is this - object's parent and the name of the field on that object that points to - this one. - - dataformat_cache (dict, optional): A dictionary mapping dataformat names to - loaded dataformats. This parameter is optional and, if passed, may - greatly speed-up data format loading times as dataformats that are already - loaded may be re-used. If you use this parameter, you must guarantee that - the cache is refreshed as appropriate in case the underlying dataformats - change. + parent (:py:class:`tuple`, Optional): The parent DataFormat for this + format. If set to ``None``, this means this dataformat is the first one + on the hierarchy tree. If set to a tuple, the contents are + ``(format-instance, field-name)``, which indicates the originating + object that is this object's parent and the name of the field on that + object that points to this one. + + dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping + dataformat names to loaded dataformats. This parameter is optional and, + if passed, may greatly speed-up data format loading times as + dataformats that are already loaded may be re-used. If you use this + parameter, you must guarantee that the cache is refreshed as + appropriate in case the underlying dataformats change. Attributes: @@ -125,7 +125,7 @@ class DataFormat(object): referenced (dict): A dictionary pointing to all loaded dataformats. - parent (beat.core.dataformat.DataFormat): The pointer to the + parent (dataformat.DataFormat): The pointer to the dataformat to which the current format is part of. It is useful for internal error reporting. @@ -395,11 +395,11 @@ class DataFormat(object): Parameters: - data (dict, str, fd): This parameter represents the data to be validated. - It may be a dictionary with the JSON representation of a data blob or, - else, a binary blob (represented by either a string or a file - descriptor object) from which the data will be read. If problems occur, - an exception is raised. + data (dict, str, :std:term:`file`): This parameter represents the data to be + validated. It may be a dictionary with the JSON representation of + a data blob or, else, a binary blob (represented by either a string + or a file descriptor object) from which the data will be read. If + problems occur, an exception is raised. Returns: @@ -466,8 +466,9 @@ class DataFormat(object): Parameters: - storage (Storage, optional): If you pass a new storage, then this object - will be written to that storage point rather than its default. + storage (:py:class:`.Storage`, Optional): If you pass a new storage, + then this object will be written to that storage point rather than + its default. """ diff --git a/beat/backend/python/dbexecution.py b/beat/backend/python/dbexecution.py index 0bb54c6d6c80d596b2821d4476d2d38ef9a15a11..658a83ee99c6b97a14df733a44c4f3bbd3af5b36 100644 --- a/beat/backend/python/dbexecution.py +++ b/beat/backend/python/dbexecution.py @@ -53,19 +53,19 @@ class DBExecutor(object): string is passed, it is supposed to be a fully qualified absolute path to a JSON file containing the block execution information. - dataformat_cache (dict, optional): A dictionary mapping dataformat names to - loaded dataformats. This parameter is optional and, if passed, may - greatly speed-up database loading times as dataformats that are already - loaded may be re-used. If you use this parameter, you must guarantee that - the cache is refreshed as appropriate in case the underlying dataformats - change. + dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping + dataformat names to loaded dataformats. This parameter is optional and, + if passed, may greatly speed-up database loading times as dataformats + that are already loaded may be re-used. If you use this parameter, you + must guarantee that the cache is refreshed as appropriate in case the + underlying dataformats change. - database_cache (dict, optional): A dictionary mapping database names to - loaded databases. This parameter is optional and, if passed, may - greatly speed-up database loading times as databases that are already - loaded may be re-used. If you use this parameter, you must guarantee that - the cache is refreshed as appropriate in case the underlying databases - change. + database_cache (:py:class:`dict`, Optional): A dictionary mapping + database names to loaded databases. This parameter is optional and, if + passed, may greatly speed-up database loading times as databases that + are already loaded may be re-used. If you use this parameter, you must + guarantee that the cache is refreshed as appropriate in case the + underlying databases change. Attributes: @@ -77,7 +77,7 @@ class DBExecutor(object): decoder. databases (dict): A dictionary in which keys are strings with database - names and values are :py:class:`database.Database`, representing the + names and values are :py:class:`.database.Database`, representing the databases required for running this block. The dictionary may be empty in case all inputs are taken from the file cache. @@ -86,8 +86,8 @@ class DBExecutor(object): for that particular combination of details. The dictionary may be empty in case all inputs are taken from the file cache. - input_list (beat.core.inputs.InputList): A list of inputs that will be - served to the algorithm. + input_list (inputs.InputList): A list of inputs that will be served to + the algorithm. data_sources (list): A list with all data-sources created by our execution loader. diff --git a/beat/backend/python/executor.py b/beat/backend/python/executor.py index 7284cfc92bedcde957c50b2e69fa78598c53ffe4..25dff590bd2ac6548e120a2272f86cd5e877ca48 100644 --- a/beat/backend/python/executor.py +++ b/beat/backend/python/executor.py @@ -55,26 +55,26 @@ class Executor(object): directory (str): The path to a directory containing all the information required to run the user experiment. - dataformat_cache (dict, optional): A dictionary mapping dataformat names to - loaded dataformats. This parameter is optional and, if passed, may - greatly speed-up database loading times as dataformats that are already - loaded may be re-used. If you use this parameter, you must guarantee that - the cache is refreshed as appropriate in case the underlying dataformats - change. - - database_cache (dict, optional): A dictionary mapping database names to - loaded databases. This parameter is optional and, if passed, may - greatly speed-up database loading times as databases that are already - loaded may be re-used. If you use this parameter, you must guarantee that - the cache is refreshed as appropriate in case the underlying databases - change. - - library_cache (dict, optional): A dictionary mapping library names to - loaded libraries. This parameter is optional and, if passed, may greatly - speed-up library loading times as libraries that are already loaded may - be re-used. If you use this parameter, you must guarantee that the cache - is refreshed as appropriate in case the underlying libraries change. - """ + dataformat_cache (:py:class:`dict`, Optional): A dictionary mapping + dataformat names to loaded dataformats. This parameter is optional and, + if passed, may greatly speed-up database loading times as dataformats + that are already loaded may be re-used. If you use this parameter, you + must guarantee that the cache is refreshed as appropriate in case the + underlying dataformats change. + + database_cache (:py:class:`dict`, Optional): A dictionary mapping + database names to loaded databases. This parameter is optional and, if + passed, may greatly speed-up database loading times as databases that + are already loaded may be re-used. If you use this parameter, you must + guarantee that the cache is refreshed as appropriate in case the + underlying databases change. + + library_cache (:py:class:`dict`, Optional): A dictionary mapping library + names to loaded libraries. This parameter is optional and, if passed, + may greatly speed-up library loading times as libraries that are + already loaded may be re-used. If you use this parameter, you must + guarantee that the cache is refreshed as appropriate in case the + underlying libraries change. """ def __init__(self, socket, directory, dataformat_cache=None, database_cache=None, library_cache=None, cache_root='/cache', diff --git a/beat/backend/python/hash.py b/beat/backend/python/hash.py index b4aeb5e6a7a9047fffaf6340d6eb9bff524a81c8..e906c24def48ee9b60bdcbf32a1f645f954019df 100644 --- a/beat/backend/python/hash.py +++ b/beat/backend/python/hash.py @@ -41,7 +41,9 @@ import os def _sha256(s): - """A python2/3 replacement for :py:func:`haslib.sha256`""" + """A python2/3 shortcut for :py:func:`haslib.sha256.hexdigest` to will + ensure that the given string is unicode before going further. + """ try: if isinstance(s, six.string_types): s = six.u(s) @@ -104,7 +106,7 @@ def hash(dictionary_or_string): def hashJSON(contents, description): - """Hashes the pre-loaded JSON object using :py:func:`hashlib.sha256` + """Hashes the pre-loaded JSON object using :py:meth:`hashlib.hash.hexdigest` Excludes description changes """ @@ -120,7 +122,7 @@ def hashJSON(contents, description): def hashJSONFile(path, description): - """Hashes the JSON file contents using :py:func:`hashlib.sha256` + """Hashes the JSON file contents using :py:meth:`hashlib.hash.hexdigest` Excludes description changes """ @@ -138,7 +140,7 @@ def hashJSONFile(path, description): def hashFileContents(path): - """Hashes the file contents using :py:func:`hashlib.sha256`.""" + """Hashes the file contents using :py:meth:`hashlib.hash.hexdigest`.""" with open(path, 'rb') as f: sha256 = hashlib.sha256() diff --git a/beat/backend/python/inputs.py b/beat/backend/python/inputs.py index cda45f0fce42c9e8f694e2b2a424cd8ccdf477d0..23aba7045660ef3336f98dfb4927418a27b7612a 100644 --- a/beat/backend/python/inputs.py +++ b/beat/backend/python/inputs.py @@ -47,7 +47,7 @@ class Input(object): (legacy) data source A list of those inputs must be provided to the algorithms (see - :py:class:`beat.backend.python.inputs.InputList`) + :py:class:`InputList`) Parameters: @@ -59,11 +59,11 @@ class Input(object): Attributes: - group (beat.core.inputs.InputGroup): Group containing this input + group (InputGroup): Group containing this input name (str): Name of the input (algorithm-specific) - data (beat.core.baseformat.baseformat): The last block of data received on + data (baseformat.baseformat): The last block of data received on the input data_index (int): Index of the last block of data received on the input @@ -144,9 +144,9 @@ class InputGroup: """Represents a group of inputs synchronized together A group implementing this interface is provided to the algorithms (see - :py:class:`beat.backend.python.inputs.InputList`). + :py:class:`InputList`). - See :py:class:`beat.core.inputs.Input` + See :py:class:`Input` Example: @@ -170,7 +170,7 @@ class InputGroup: channel (str): Name of the data channel of the group - synchronization_listener (beat.core.outputs.SynchronizationListener): + synchronization_listener (outputs.SynchronizationListener): Synchronization listener to use restricted_access (bool): Indicates if the algorithm can freely use the @@ -187,7 +187,7 @@ class InputGroup: channel (str): Name of the data channel of the group - synchronization_listener (beat.core.outputs.SynchronizationListener): + synchronization_listener (outputs.SynchronizationListener): Synchronization listener used """ @@ -231,8 +231,7 @@ class InputGroup: Parameters: - input (beat.backend.python.inputs.Input or beat.backend.python.inputs.RemoteInput): - The input to add + input (Input): The input to add """ @@ -299,8 +298,8 @@ class InputList: One group of inputs is always considered as the **main** one, and is used to drive the algorithm. The usage of the other groups is left to the algorithm. - See :py:class:`beat.core.inputs.Input` - See :py:class:`beat.core.inputs.InputGroup` + See :py:class:`Input` + See :py:class:`InputGroup` Example: @@ -335,7 +334,7 @@ class InputList: Attributes: - main_group (beat.core.inputs.InputGroup): Main group (for data-driven + main_group (InputGroup): Main group (for data-driven algorithms) """ @@ -348,7 +347,7 @@ class InputList: def add(self, group): """Add a group to the list - :param beat.core.platform.inputs.InputGroup group: The group to add + :param InputGroup group: The group to add """ if group.restricted_access and (self.main_group is None): self.main_group = group diff --git a/beat/backend/python/library.py b/beat/backend/python/library.py index b6f03daf7a905951b038730a6c0b9fd127f1d49c..8eac3ea7f8830cbc0e30a9685ae7afdb4826b605 100644 --- a/beat/backend/python/library.py +++ b/beat/backend/python/library.py @@ -83,10 +83,10 @@ class Library(object): name (str): The fully qualified algorithm name (e.g. ``user/algo/1``) - library_cache (dict, optional): A dictionary mapping library names to - loaded libraries. This parameter is optional and, if passed, may greatly - speed-up library loading times as libraries that are already loaded may - be re-used. + library_cache (:py:class:`dict`, Optional): A dictionary mapping library + names to loaded libraries. This parameter is optional and, if passed, + may greatly speed-up library loading times as libraries that are + already loaded may be re-used. Attributes: @@ -328,8 +328,9 @@ class Library(object): Parameters: - storage (Storage, optional): If you pass a new storage, then this object - will be written to that storage point rather than its default. + storage (:py:class:`.Storage`, Optional): If you pass a new + storage, then this object will be written to that storage point + rather than its default. """ diff --git a/beat/backend/python/loader.py b/beat/backend/python/loader.py index 205e2626f5d1647f29e1da1f6b3cd38cb1bef0ff..fcbc6849c63ca7fcf46b1a08954eba6d2ec7fdaf 100644 --- a/beat/backend/python/loader.py +++ b/beat/backend/python/loader.py @@ -56,7 +56,8 @@ def load_module(name, path, uses): Returns: - module: A valid Python module you can use in an Algorithm or Library. + :std:term:`module`: A valid Python module you can use in an Algorithm or + Library. ''' @@ -87,9 +88,9 @@ def run(obj, method, exc=None, *args, **kwargs): method (str): The method name to execute on the object - exc (class, optional): The class to use as base exception when translating - the exception from the user code. If you set it to ``None``, then just - re-throws the user raised exception. + exc (:std:term:`class`, Optional): The class to use as base exception when + translating the exception from the user code. If you set it to + ``None``, then just re-throws the user raised exception. *args: Arguments to the object method, passed unchanged diff --git a/beat/backend/python/outputs.py b/beat/backend/python/outputs.py index 10deaca92a019a1464b48b59ee97a9d85acbce98..041732c0638ca6092f9651b63173f6b097f25e5a 100644 --- a/beat/backend/python/outputs.py +++ b/beat/backend/python/outputs.py @@ -56,14 +56,14 @@ class Output(object): """Represents one output of a processing block A list of outputs implementing this interface is provided to the algorithms - (see :py:class:`beat.core.outputs.OutputList`). + (see :py:class:`OutputList`). Parameters: name (str): Name of the output - data_sink (beat.core.data.DataSink): Sink of data to be used by the output, + data_sink (data.DataSink): Sink of data to be used by the output, pre-configured with the correct data format. @@ -71,7 +71,7 @@ class Output(object): name (str): Name of the output (algorithm-specific) - data_sink (beat.core.data.DataSink): Sink of data used by the output + data_sink (data.DataSink): Sink of data used by the output last_written_data_index (int): Index of the last block of data written by the output @@ -111,7 +111,7 @@ class Output(object): Parameters: - data (beat.core.baseformat.baseformat): The block of data to write, or + data (baseformat.baseformat): The block of data to write, or None (if the algorithm doesn't want to write any data) end_data_index (int): Last index of the written data (see the section @@ -172,7 +172,7 @@ class OutputList: A list implementing this interface is provided to the algorithms - See :py:class:`beat.core.outputs.Output`. + See :py:class:`Output`. Example: @@ -224,7 +224,7 @@ class OutputList: Parameters: - input (beat.core.outputs.Output): The output to add + input (Output): The output to add """ diff --git a/doc/api.rst b/doc/api.rst index 4da05b63646a2fb41c9b4ff28a02c301b16b288f..13dc64adb6d9d3e7e0f28a9b1179dd4c0c5cdcef 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -6,32 +6,34 @@ This section includes information for using the Python API of ``beat.backend.python``. -.. automodule:: beat.backend.python.algorithm +.. notice order is important! + +.. automodule:: beat.backend.python.loader + +.. automodule:: beat.backend.python.hash .. automodule:: beat.backend.python.baseformat -.. automodule:: beat.backend.python.data +.. automodule:: beat.backend.python.dataformat -.. automodule:: beat.backend.python.data_loaders +.. automodule:: beat.backend.python.algorithm .. automodule:: beat.backend.python.database -.. automodule:: beat.backend.python.dataformat +.. automodule:: beat.backend.python.data + +.. automodule:: beat.backend.python.data_loaders .. automodule:: beat.backend.python.dbexecution .. automodule:: beat.backend.python.executor -.. automodule:: beat.backend.python.hash - .. automodule:: beat.backend.python.helpers .. automodule:: beat.backend.python.inputs .. automodule:: beat.backend.python.library -.. automodule:: beat.backend.python.loader - .. automodule:: beat.backend.python.message_handler .. automodule:: beat.backend.python.outputs diff --git a/doc/conf.py b/doc/conf.py index 1c4964543214657d21c712bcf45bd9836e4869e1..6d70015947a13529579e2c776d0c58a652ee2f11 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -240,6 +240,10 @@ if os.path.exists(sphinx_requirements): else: intersphinx_mapping = link_documentation() +# Adds simplejson, pyzmq links +intersphinx_mapping['http://simplejson.readthedocs.io/en/stable/'] = None +intersphinx_mapping['http://pyzmq.readthedocs.io/en/stable/'] = None +intersphinx_mapping['http://six.readthedocs.io'] = None # We want to remove all private (i.e. _. or __.__) members # that are not in the list of accepted functions