Commit 911e551b authored by André Anjos's avatar André Anjos 💬

Fix all documentation warnings

parent f1227c20
Pipeline #4806 passed with stages
in 23 minutes and 44 seconds
......@@ -238,7 +238,7 @@ static auto forward_doc = bob::extension::FunctionDoc(
"Computes the BIC or IEC score for the given input vector, which results of a comparison vector of two (facial) images",
"The resulting value is returned as a single float value. "
"The score itself is the log-likelihood score of the given input vector belonging to the intrapersonal class.\n\n"
".. note:: the :py:meth:`__call__` function is an alias for this function",
".. note:: the ``__call__`` method is an alias for this one",
true
)
.add_prototype("input", "score")
......
......@@ -20,7 +20,7 @@
static auto LDA_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".FisherLDATrainer",
"Trains a :py:class:`Machine` to perform Fisher's Linear Discriminant Analysis (LDA).",
"Trains a :py:class:`bob.learn.linear.Machine` to perform Fisher's Linear Discriminant Analysis (LDA).",
"LDA finds the projection matrix W that allows us to linearly project the data matrix X to another (sub) space in which the between-class and within-class variances are jointly optimized: the between-class variance is maximized while the with-class is minimized. "
"The (inverse) cost function for this criteria can be posed as the following:\n\n"
".. math::\n\n"
......
......@@ -39,7 +39,7 @@ static auto CGLogReg_doc = bob::extension::ClassDoc(
.add_parameter("convergence_threshold", "float", "[Default: ``1e-5``] The convergence threshold for the conjugate gradient algorithm")
.add_parameter("max_iterations", "int", "[Default: ``10000``] The maximum number of iterations for the conjugate gradient algorithm")
.add_parameter("reg", "float", "[Default: ``0.``] The regularization factor lambda. If you set this to the value of ``0.``, then the algorithm will apply **no** regularization whatsoever")\
.add_parameter("mean_std_norm", "bool", "[Default: ``False``] Performs mean and standard-deviation normalization (whitening) of the input data before training the (resulting) :py:class:`Machine`. Setting this to ``True`` is recommended for large data sets with significant amplitude variations between dimensions")
.add_parameter("mean_std_norm", "bool", "[Default: ``False``] Performs mean and standard-deviation normalization (whitening) of the input data before training the (resulting) :py:class:`bob.learn.linear.Machine`. Setting this to ``True`` is recommended for large data sets with significant amplitude variations between dimensions")
.add_parameter("other", ":py:class:`CGLogRegTrainer`", "If you decide to copy construct from another object of the same type, pass it using this parameter")
);
static int PyBobLearnLinearCGLogRegTrainer_init_parameters
......
......@@ -38,7 +38,7 @@ static auto Machine_doc = bob::extension::ClassDoc(
"The machine is remains **uninitialized**. "
"With the second form, the user passes a 2D array with 64-bit floats containing weight matrix to be used as the :py:attr:`weights` matrix by the new machine. "
"In the third form the user passes a :py:class:`bob.io.base.HDF5File` opened for reading, which points to the machine information to be loaded in memory. "
"Finally, in the last form (copy constructor), the user passes another :py:class:`Machine` that will be deep copied."
"Finally, in the last form (copy constructor), the user passes another :py:class:`bob.learn.linear.Machine` that will be deep copied."
)
.add_prototype("[input_size], [output_size])", "")
.add_prototype("weights", "")
......@@ -48,7 +48,7 @@ static auto Machine_doc = bob::extension::ClassDoc(
.add_parameter("output_size", "int", "[Default: 0] The dimensionality of the output data")
.add_parameter("weights", "array_like(2D, float)", "A weight matrix to initialize the :py:attr:`weights`")
.add_parameter("config", ":py:class:`bob.io.base.HDF5File`", "The HDF5 file open for reading")
.add_parameter("other", ":py:class:`Machine`", "The machine to copy construct")
.add_parameter("other", ":py:class:`bob.learn.linear.Machine`", "The machine to copy construct")
);
static int PyBobLearnLinearMachine_init_sizes
......@@ -575,7 +575,7 @@ static auto forward = bob::extension::FunctionDoc(
"If one provides a 1D array, the ``output`` array, if provided, should also be 1D, matching the output size of this machine. "
"If one provides a 2D array, it is considered a set of vertically stacked 1D arrays (one input per row) and a 2D array is produced or expected in ``output``. "
"The ``output`` array in this case shall have the same number of rows as the ``input`` array and as many columns as the output size for this machine.\n\n"
".. note:: The :py:meth:`__call__` function is an alias for this method.",
".. note:: The ``__call__`` method is an alias for this method.",
true
)
.add_prototype("input, [output]", "output")
......@@ -730,7 +730,7 @@ static auto is_similar_to = bob::extension::FunctionDoc(
true
)
.add_prototype("other, [r_epsilon], [a_epsilon]", "similar")
.add_parameter("other", ":py:class:`Machine`", "The other machine to compare with")
.add_parameter("other", ":py:class:`bob.learn.linear.Machine`", "The other machine to compare with")
.add_parameter("r_epsilon", "float", "[Default: ``1e-5``] The relative precision")
.add_parameter("a_epsilon", "float", "[Default: ``1e-8``] The absolute precision")
.add_return("similar", "bool", "``True`` if the ``other`` machine is similar to this one, otherwise ``False``")
......
......@@ -46,7 +46,7 @@ static auto PCA_doc = bob::extension::ClassDoc(
" XX^T &= U S^2 U^*\n\n"
"If :math:`X` has zero mean, we can conclude by inspection that the :math:`U` matrix obtained by SVD contains the eigen-vectors of the covariance matrix of :math:`X` (:math:`XX^T`) and :math:`S^2/(m-1)` corresponds to its eigen values.\n\n"
".. note:: Our implementation uses LAPACK's ``dgesdd`` to compute the solution to this linear equation.\n\n"
"The corresponding :py:class:`Machine` and returned eigen-values of :math:`\\Sigma`, are pre-sorted in descending order (the first eigen-vector - or column - of the weight matrix in the :py:class:`Machine` corresponds to the highest eigen-value obtained).\n\n"
"The corresponding :py:class:`bob.learn.linear.Machine` and returned eigen-values of :math:`\\Sigma`, are pre-sorted in descending order (the first eigen-vector - or column - of the weight matrix in the :py:class:`bob.learn.linear.Machine` corresponds to the highest eigen-value obtained).\n\n"
".. note::\n\n"
" One question you should pose yourself is which of the methods to choose.\n"
" Here is some advice: you should prefer the covariance method over SVD when the number of samples (rows of :math:`X`) is greater than the number of features (columns of :math:`X`).\n"
......@@ -170,7 +170,7 @@ static auto train = bob::extension::FunctionDoc(
"Trains a linear machine to perform the PCA (aka. KLT)",
"The resulting machine will have the same number of inputs as columns in ``X`` and :math:`K` eigen-vectors, where :math:`K=\\min{(S-1,F)}`, with :math:`S` being the number of rows in ``X`` (samples) and :math:`F` the number of columns (or features). "
"The vectors are arranged by decreasing eigen-value automatically -- there is no need to sort the results.\n\n"
"The user may provide or not an object of type :py:class:`Machine` that will be set by this method. "
"The user may provide or not an object of type :py:class:`bob.learn.linear.Machine` that will be set by this method. "
"If provided, machine should have the correct number of inputs and outputs matching, respectively, the number of columns in the input data array ``X`` and the output of the method :py:meth:`output_size`.\n\n"
"The input data matrix ``X`` should correspond to a 64-bit floating point array organized in such a way that every row corresponds to a new observation of the phenomena (i.e., a new sample) and every column corresponds to a different feature.\n\n"
"This method returns a tuple consisting of the trained machine and a 1D 64-bit floating point array containing the eigen-values calculated while computing the KLT. "
......
......@@ -21,7 +21,7 @@
static auto Whitening_doc = bob::extension::ClassDoc(
BOB_EXT_MODULE_PREFIX ".WhiteningTrainer",
"Trains a linear :py:class:`Machine` to perform Cholesky whitening.",
"Trains a linear :py:class:`bob.learn.linear.Machine` to perform Cholesky whitening.",
"The whitening transformation is a decorrelation method that converts the covariance matrix of a set of samples into the identity matrix :math:`I`. "
"This effectively linearly transforms random variables such that the resulting variables are uncorrelated and have the same variances as the original random variables. "
"This transformation is invertible. "
......@@ -124,7 +124,7 @@ static PyObject* PyBobLearnLinearWhiteningTrainer_RichCompare
static auto train = bob::extension::FunctionDoc(
"train",
"Trains a linear machine to perform Cholesky whitening",
"The user may provide or not an object of type :py:class:`Machine` that will be set by this method. "
"The user may provide or not an object of type :py:class:`bob.learn.linear.Machine` that will be set by this method. "
"In such a case, the machine should have a shape that matches ``(X.shape[1], X.shape[1])``. "
"If the user does not provide a machine to be set, then a new one will be allocated internally. "
"In both cases, the resulting machine is always returned by this method.\n\n"
......
......@@ -6,10 +6,8 @@
C++ API
=========
.. todo:: Correct the C++ API (it seems to be a copy of bob.learn.activation)
The C++ API of ``bob.learn.linear`` allows users to leverage from automatic
converters for classes in :py:class:`bob.learn.linear`. To use the C API,
converters for classes in :py:mod:`bob.learn.linear`. To use the C API,
clients should first, include the header file ``<bob.learn.linear/api.h>`` on
their compilation units and then, make sure to call once
``import_bob_learn_linear()`` at their module instantiation, as explained at
......@@ -29,67 +27,38 @@ the import function:
if (!m) return 0;
if (import_bob_blitz() < 0) return 0;
if (import_bob_io() < 0) return 0;
if (import_bob_learn_activation() < 0) return 0;
// imports dependencies
if (import_bob_blitz() < 0) {
PyErr_Print();
PyErr_SetString(PyExc_ImportError, "cannot import module");
return 0;
}
if (import_bob_io_base() < 0) {
PyErr_Print();
PyErr_SetString(PyExc_ImportError, "cannot import module");
return 0;
}
if (import_bob_learn_activation() < 0) {
PyErr_Print();
PyErr_SetString(PyExc_ImportError, "cannot import module");
return 0;
}
if (import_bob_learn_linear() < 0) {
PyErr_Print();
PyErr_SetString(PyExc_ImportError, "cannot import module");
return 0;
}
return m;
}
Activation Functors
-------------------
.. cpp:type:: PyBobMachineActivationObject
The pythonic object representation for a ``bob::machine::Activation``
object. It is the base class of all activation functors available in
|project|. In C/C++ code, we recommend you only manipulate objects like this
to keep your code agnostic to the activation type being used.
.. code-block:: cpp
typedef struct {
PyObject_HEAD
bob::machine::Activation* base;
} PyBobMachineActivationObject;
.. cpp:member:: bob::machine::Activation* base
A pointer to the activation functor virtual implementation.
.. cpp:function:: int PyBobMachineActivation_Check(PyObject* o)
Checks if the input object ``o`` is a ``PyBobMachineActivationObject``.
Returns ``1`` if it is, and ``0`` otherwise.
.. note::
Other object definitions exist for each of the specializations for
activation functors found in |project|. They are exported through the module
C-API, but we don't recommend using them since you'd loose generality. In
case you do absolutely need to use any of these derivations, they have all
the same object configuration:
.. code-block:: c++
typedef struct {
PyBobMachineActivationObject parent;
bob::machine::<Subtype>Activation* base;
} PyBobMachine<Subtype>ActivationObject;
Presently, ``<Subtype>`` can be one of:
* Identity
* Linear
* Logistic
* HyperbolicTangent
* MultipliedHyperbolicTangent
Linear Machines
---------------
Type objects are also named consistently like
``PyBobMachine<Subtype>_Type``.
.. todo:: Write this section
.. include:: links.rst
.. vim: set fileencoding=utf-8 :
.. Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
.. Wed Mar 14 12:31:35 2012 +0100
.. modified by Elie Khoury <elie.khoury@idiap.ch>
.. Mon May 06 15:50:20 2013 +0100
.. consolidated by Andre Anjos <andre.anjos@idiap.ch>
.. Wed 15 Jan 2014 12:20:47 CET
..
.. Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
.. Thu 13 Oct 2016 16:01:27 CEST
.. testsetup:: *
......@@ -58,7 +51,8 @@ in double-precision. Here is how to use a
array([ 0.55, 0.55])
As was shown in the above example, the way to pass data through a machine is to
call its :py:meth:`bob.learn.linear.Machine.forward` function, for which the :py:meth:`bob.learn.linear.Machine.__call__` funciton is an alias.
call its :py:meth:`bob.learn.linear.Machine.forward` method, for which the
``__call__`` method is an alias.
The first thing to notice about machines is that they can be stored and
retrieved in :py:class:`bob.io.base.HDF5File`. To save the before
......@@ -93,7 +87,7 @@ produces, in a tuple format like ``(input_size, output_size)``:
>>> machine.shape
(2, 2)
A :py:class:`bob.learn.linear.Machine`` also supports pre-setting
A :py:class:`bob.learn.linear.Machine` also supports pre-setting
normalization vectors that are applied to every input :math:`x`. You can set a
subtraction factor and a division factor, so that the actual input :math:`x'`
that is fed to the matrix :math:`W` is :math:`x' = (x - s) ./ d`. The variables
......
# This is a c type confused as c++ type by Sphinx (not our fault)
cpp:type PyObject
......@@ -9,9 +9,13 @@
This section includes information for using the pure Python API of ``bob.learn.linear``.
Classes
Summary
-------
Classes
=======
.. autosummary::
bob.learn.linear.Machine
bob.learn.linear.PCATrainer
......@@ -19,12 +23,11 @@ Classes
bob.learn.linear.WCCNTrainer
bob.learn.linear.WhiteningTrainer
bob.learn.linear.CGLogRegTrainer
bob.learn.linear.BICMachine
bob.learn.linear.BICTrainer
Functions
---------
=========
.. autosummary::
bob.learn.linear.get_config
......@@ -32,4 +35,8 @@ Functions
bob.learn.linear.bic_intra_extra_pairs_between_factors
Reference
---------
.. automodule:: bob.learn.linear
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment