From 93d9b59e29ec482cabeb6a2c543f0c2598aa1c3e Mon Sep 17 00:00:00 2001 From: Manuel Gunther <siebenkopf@googlemail.com> Date: Tue, 17 Nov 2015 18:22:56 -0700 Subject: [PATCH] Cosmetic corrections of documentation --- bob/learn/linear/lda.cpp | 6 ++++-- bob/learn/linear/logreg.cpp | 3 ++- bob/learn/linear/machine.cpp | 9 ++++++--- bob/learn/linear/pca.cpp | 8 +++++--- bob/learn/linear/wccn.cpp | 3 ++- 5 files changed, 19 insertions(+), 10 deletions(-) diff --git a/bob/learn/linear/lda.cpp b/bob/learn/linear/lda.cpp index 79a0b73..ca6d4ee 100644 --- a/bob/learn/linear/lda.cpp +++ b/bob/learn/linear/lda.cpp @@ -188,7 +188,8 @@ static auto train = bob::extension::FunctionDoc( "To accomplish this, either prepare a list with all your class observations organized in 2D arrays or pass a 3D array in which the first dimension (depth) contains as many elements as classes you want to discriminate.\n\n" ".. note::\n\n" " We set at most :py:meth:`output_size` eigen-values and vectors on the passed machine.\n" - " You can compress the machine output further using :py:meth:`Machine.resize` if necessary." + " You can compress the machine output further using :py:meth:`Machine.resize` if necessary.", + true ) .add_prototype("X, [machine]", "machine, eigen_values") .add_parameter("X", "[array_like(2D, floats)] or array_like(3D, floats)", "The input data, separated to contain the training data per class in the first dimension") @@ -282,7 +283,8 @@ static auto output_size = bob::extension::FunctionDoc( "This method should be used to setup linear machines and input vectors prior to feeding them into this trainer.\n\n" "The value of ``X`` should be a sequence over as many 2D 64-bit floating point number arrays as classes in the problem. " "All arrays will be checked for conformance (identical number of columns). " - "To accomplish this, either prepare a list with all your class observations organized in 2D arrays or pass a 3D array in which the first dimension (depth) contains as many elements as classes you want to discriminate." + "To accomplish this, either prepare a list with all your class observations organized in 2D arrays or pass a 3D array in which the first dimension (depth) contains as many elements as classes you want to discriminate.", + true ) .add_prototype("X","size") .add_parameter("X", "[array_like(2D, floats)] or array_like(3D, floats)", "The input data, separated to contain the training data per class in the first dimension") diff --git a/bob/learn/linear/logreg.cpp b/bob/learn/linear/logreg.cpp index 27b3986..3199564 100644 --- a/bob/learn/linear/logreg.cpp +++ b/bob/learn/linear/logreg.cpp @@ -143,7 +143,8 @@ static auto train = bob::extension::FunctionDoc( "train", "Trains a linear machine to perform linear logistic regression", "The resulting machine will have the same number of inputs as columns in ``negatives`` and ``positives`` and a single output. " - "This method always returns a machine, which will be identical to the one provided (if the user passed one) or a new one allocated internally." + "This method always returns a machine, which will be identical to the one provided (if the user passed one) or a new one allocated internally.", + true ) .add_prototype("negatives, positives, [machine]", "machine") .add_parameter("negatives, positives", "array_like(2D, float)", "``negatives`` and ``positives`` should be arrays organized in such a way that every row corresponds to a new observation of the phenomena (i.e., a new sample) and every column corresponds to a different feature") diff --git a/bob/learn/linear/machine.cpp b/bob/learn/linear/machine.cpp index e3aa415..8d425ed 100644 --- a/bob/learn/linear/machine.cpp +++ b/bob/learn/linear/machine.cpp @@ -575,7 +575,8 @@ static auto forward = bob::extension::FunctionDoc( "If one provides a 1D array, the ``output`` array, if provided, should also be 1D, matching the output size of this machine. " "If one provides a 2D array, it is considered a set of vertically stacked 1D arrays (one input per row) and a 2D array is produced or expected in ``output``. " "The ``output`` array in this case shall have the same number of rows as the ``input`` array and as many columns as the output size for this machine.\n\n" - ".. note:: The :py:meth:`__call__` function is an alias for this method." + ".. note:: The :py:meth:`__call__` function is an alias for this method.", + true ) .add_prototype("input, [output]", "output") .add_parameter("input", "array_like(1D or 2D, float)", "The array that should be projected; must be compatible with :py:attr:`shape` [0]") @@ -725,7 +726,8 @@ BOB_CATCH_MEMBER("save", 0) static auto is_similar_to = bob::extension::FunctionDoc( "is_similar_to", "Compares this LinearMachine with the ``other`` one to be approximately the same", - "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the relative and absolute precision for the :py:attr:`weights`, :py:attr:`biases` and any other values internal to this machine." + "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the relative and absolute precision for the :py:attr:`weights`, :py:attr:`biases` and any other values internal to this machine.", + true ) .add_prototype("other, [r_epsilon], [a_epsilon]", "similar") .add_parameter("other", ":py:class:`Machine`", "The other machine to compare with") @@ -764,7 +766,8 @@ static auto resize = bob::extension::FunctionDoc( ".. note::\n\n" " Use this method to force data compression.\n" " All will work out given most relevant factors to be preserved are organized on the top of the weight matrix.\n" - " In this way, reducing the system size will suppress less relevant projections." + " In this way, reducing the system size will suppress less relevant projections.", + true ) .add_prototype("input, output") .add_parameter("input", "int", "The input dimension to be set") diff --git a/bob/learn/linear/pca.cpp b/bob/learn/linear/pca.cpp index bd78af1..765bcc7 100644 --- a/bob/learn/linear/pca.cpp +++ b/bob/learn/linear/pca.cpp @@ -167,14 +167,15 @@ static PyObject* PyBobLearnLinearPCATrainer_RichCompare static auto train = bob::extension::FunctionDoc( "train", - "Trains a linear machine to perform the PCA (aka. KLT)" + "Trains a linear machine to perform the PCA (aka. KLT)", "The resulting machine will have the same number of inputs as columns in ``X`` and :math:`K` eigen-vectors, where :math:`K=\\min{(S-1,F)}`, with :math:`S` being the number of rows in ``X`` (samples) and :math:`F` the number of columns (or features). " "The vectors are arranged by decreasing eigen-value automatically -- there is no need to sort the results.\n\n" "The user may provide or not an object of type :py:class:`Machine` that will be set by this method. " "If provided, machine should have the correct number of inputs and outputs matching, respectively, the number of columns in the input data array ``X`` and the output of the method :py:meth:`output_size`.\n\n" "The input data matrix ``X`` should correspond to a 64-bit floating point array organized in such a way that every row corresponds to a new observation of the phenomena (i.e., a new sample) and every column corresponds to a different feature.\n\n" "This method returns a tuple consisting of the trained machine and a 1D 64-bit floating point array containing the eigen-values calculated while computing the KLT. " - "The eigen-value ordering matches that of eigen-vectors set in the machine." + "The eigen-value ordering matches that of eigen-vectors set in the machine.", + true ) .add_prototype("X, [machine]", "machine, eigen_values") .add_parameter("X", "array_like(2D, floats)", "The input data to train on") @@ -232,7 +233,8 @@ static auto output_size = bob::extension::FunctionDoc( "Calculates the maximum possible rank for the covariance matrix of the given ``X``", "Returns the maximum number of non-zero eigen values that can be generated by this trainer, given ``X``. " "This number (K) depends on the size of X and is calculated as follows :math:`K=\\min{(S-1,F)}`, with :math:`S` being the number of rows in ``data`` (samples) and :math:`F` the number of columns (or features).\n\n" - "This method should be used to setup linear machines and input vectors prior to feeding them into the :py:meth:`train` function." + "This method should be used to setup linear machines and input vectors prior to feeding them into the :py:meth:`train` function.", + true ) .add_prototype("X","size") .add_parameter("X", "array_like(2D, floats)", "The input data that should be trained on") diff --git a/bob/learn/linear/wccn.cpp b/bob/learn/linear/wccn.cpp index d6d4b9d..1a88fe4 100644 --- a/bob/learn/linear/wccn.cpp +++ b/bob/learn/linear/wccn.cpp @@ -122,7 +122,8 @@ static auto train = bob::extension::FunctionDoc( "The user may provide or not an object of type :py:class:`bob.learn.linear.Machine` that will be set by this method. " "In such a case, the machine should have a shape that matches ``(X.shape[1], X.shape[1])``. " "If the user does not provide a machine to be set, then a new one will be allocated internally. " - "In both cases, the resulting machine is always returned." + "In both cases, the resulting machine is always returned.", + true ) .add_prototype("X, [machine]", "machine") .add_parameter("X", "[array_like(2D,float)] or array_like(3D, float)", "The training data arranged by class") -- GitLab