[sphinx] Fixed warnings

parent 82a4e253
Pipeline #4890 passed with stages
in 9 minutes and 53 seconds
from .LossFunction import LossFunction
import numpy import numpy
from . import LossFunction
class ExponentialLoss (LossFunction):
""" The class implements the exponential loss function for the boosting framework."""
def loss(self, targets, scores):
"""The function computes the exponential loss values using prediction scores and targets.
It can be used in classification tasks, e.g., in combination with the StumpTrainer.
Keyword parameters: class ExponentialLoss(LossFunction):
""" The class implements the exponential loss function for the boosting framework."""
targets (float <#samples, #outputs>): The target values that should be reached. def loss(self, targets, scores):
"""The function computes the exponential loss values using prediction scores and targets.
It can be used in classification tasks, e.g., in combination with the StumpTrainer.
scores (float <#samples, #outputs>): The scores provided by the classifier. Keyword parameters:
Returns targets (float <#samples, #outputs>): The target values that should be reached.
(float <#samples, #outputs>): The loss values for the samples, always >= 0
"""
return numpy.exp(-(targets * scores))
scores (float <#samples, #outputs>): The scores provided by the classifier.
def loss_gradient(self, targets, scores): Returns
"""The function computes the gradient of the exponential loss function using prediction scores and targets. (float <#samples, #outputs>): The loss values for the samples, always >= 0
"""
return numpy.exp(-(targets * scores))
Keyword parameters: def loss_gradient(self, targets, scores):
"""The function computes the gradient of the exponential loss function using prediction scores and targets.
targets (float <#samples, #outputs>): The target values that should be reached. Keyword parameters:
scores (float <#samples, #outputs>): The scores provided by the classifier. targets (float <#samples, #outputs>): The target values that should be reached.
Returns scores (float <#samples, #outputs>): The scores provided by the classifier.
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
"""
loss = numpy.exp(-(targets * scores))
return -targets * loss
Returns
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
"""
loss = numpy.exp(-(targets * scores))
return -targets * loss
from .LossFunction import LossFunction from . import LossFunction
import numpy import numpy
class LogitLoss(LossFunction):
""" The class to implement the logit loss function for the boosting framework."""
def loss(self, targets, scores): class LogitLoss(LossFunction):
"""The function computes the logit loss values using prediction scores and targets. """ The class to implement the logit loss function for the boosting framework."""
Keyword parameters: def loss(self, targets, scores):
"""The function computes the logit loss values using prediction scores and targets.
targets (float <#samples, #outputs>): The target values that should be reached. Keyword parameters:
scores (float <#samples, #outputs>): The scores provided by the classifier. targets (float <#samples, #outputs>): The target values that should be reached.
Returns scores (float <#samples, #outputs>): The scores provided by the classifier.
(float <#samples, #outputs>): The loss values for the samples, which is always >= 0
"""
e = numpy.exp(-(targets * scores))
return numpy.log(1. + e)
Returns
(float <#samples, #outputs>): The loss values for the samples, which is always >= 0
"""
e = numpy.exp(-(targets * scores))
return numpy.log(1. + e)
def loss_gradient(self, targets, scores): def loss_gradient(self, targets, scores):
"""The function computes the gradient of the logit loss function using prediction scores and targets. """The function computes the gradient of the logit loss function using prediction scores and targets.
Keyword parameters: Keyword parameters:
targets (float <#samples, #outputs>): The target values that should be reached. targets (float <#samples, #outputs>): The target values that should be reached.
scores (float <#samples, #outputs>): The scores provided by the classifier. scores (float <#samples, #outputs>): The scores provided by the classifier.
Returns Returns
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets. loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
""" """
e = numpy.exp(-(targets * scores)) e = numpy.exp(-(targets * scores))
denom = 1./(1. + e) denom = 1. / (1. + e)
return -targets * e * denom return -targets * e * denom
import numpy import numpy
class LossFunction:
"""This is a base class for all loss functions implemented in pure python.
It is simply a python re-implementation of the :py:class:`bob.learn.boosting.LossFunction` class.
This class provides the interface for the L-BFGS optimizer. class LossFunction(object):
Please overwrite the loss() and loss_gradient() function (see below) in derived loss classes. """This is a base class for all loss functions implemented in pure python.
""" It is simply a python re-implementation of the :py:class:`bob.learn.boosting.LossFunction` class.
def loss(self, targets, scores): This class provides the interface for the L-BFGS optimizer.
"""This function is to compute the loss for the given targets and scores. Please overwrite the loss() and loss_gradient() function (see below) in derived loss classes.
"""
Keyword parameters:
targets (float <#samples, #outputs>): The target values that should be reached. def __init__(self):
pass
scores (float <#samples, #outputs>): The scores provided by the classifier. def loss(self, targets, scores):
"""This function is to compute the loss for the given targets and scores.
Returns Keyword parameters:
(float <#samples, #outputs>) or (float <#samples, 1>): The loss based on the given scores and targets.
Depending on the intended task, one of the two output variants should be chosen.
For classification tasks, please use the former way (#samples, #outputs), while for regression tasks, use the latter (#samples, 1).
"""
raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
targets (float <#samples, #outputs>): The target values that should be reached.
def loss_gradient(self, targets, scores): scores (float <#samples, #outputs>): The scores provided by the classifier.
"""This function is to compute the gradient of the loss for the given targets and scores.
Keyword parameters: Returns
(float <#samples, #outputs>) or (float <#samples, 1>): The loss based on the given scores and targets.
Depending on the intended task, one of the two output variants should be chosen.
For classification tasks, please use the former way (#samples, #outputs), while for regression tasks, use the latter (#samples, 1).
"""
raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
targets (float <#samples, #outputs>): The target values that should be reached. def loss_gradient(self, targets, scores):
"""This function is to compute the gradient of the loss for the given targets and scores.
scores (float <#samples, #outputs>): The scores provided by the classifier. Keyword parameters:
Returns targets (float <#samples, #outputs>): The target values that should be reached.
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
"""
raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
scores (float <#samples, #outputs>): The scores provided by the classifier.
def loss_sum(self, alpha, targets, previous_scores, current_scores): Returns
"""The function computes the sum of the loss which is used to find the optimized values of alpha (x). loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
"""
raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
The functions computes sum of loss values which is required during the line search step for the optimization of the alpha. def loss_sum(self, alpha, targets, previous_scores, current_scores):
This function is given as the input for the L-BFGS optimization function. """The function computes the sum of the loss which is used to find the optimized values of alpha (x).
Keyword parameters: The functions computes sum of loss values which is required during the line search step for the optimization of the alpha.
This function is given as the input for the L-BFGS optimization function.
alpha (float): The current value of the alpha. Keyword parameters:
targets (float <#samples, #outputs>): The targets for the samples alpha (float): The current value of the alpha.
previous_scores (float <#samples, #outputs>): The cumulative prediction scores of the samples until the previous round of the boosting. targets (float <#samples, #outputs>): The targets for the samples
current_scores (float <#samples, #outputs>): The prediction scores of the samples for the current round of the boosting. previous_scores (float <#samples, #outputs>): The cumulative prediction scores of the samples until the previous round of the boosting.
Returns current_scores (float <#samples, #outputs>): The prediction scores of the samples for the current round of the boosting.
(float <#outputs>) The sum of the loss values for the current value of the alpha Returns
"""
# compute the scores and loss for the current alpha (float <#outputs>) The sum of the loss values for the current value of the alpha
scores = previous_scores + alpha * current_scores """
losses = self.loss(targets, scores)
# compute the sum of the loss # compute the scores and loss for the current alpha
return numpy.sum(losses, 0) scores = previous_scores + alpha * current_scores
losses = self.loss(targets, scores)
# compute the sum of the loss
return numpy.sum(losses, 0)
def loss_gradient_sum(self, alpha, targets, previous_scores, current_scores): def loss_gradient_sum(self, alpha, targets, previous_scores, current_scores):
"""The function computes the gradient as the sum of the derivatives per sample which is used to find the optimized values of alpha. """The function computes the gradient as the sum of the derivatives per sample which is used to find the optimized values of alpha.
The functions computes sum of loss values which is required during the line search step for the optimization of the alpha. The functions computes sum of loss values which is required during the line search step for the optimization of the alpha.
This function is given as the input for the L-BFGS optimization function. This function is given as the input for the L-BFGS optimization function.
Keyword parameters: Keyword parameters:
alpha (float): The current value of the alpha. alpha (float): The current value of the alpha.
targets (float <#samples, #outputs>): The targets for the samples targets (float <#samples, #outputs>): The targets for the samples
previous_scores (float <#samples, #outputs>): The cumulative prediction scores of the samples until the previous round of the boosting. previous_scores (float <#samples, #outputs>): The cumulative prediction scores of the samples until the previous round of the boosting.
current_scores (float <#samples, #outputs>): The prediction scores of the samples for the current round of the boosting. current_scores (float <#samples, #outputs>): The prediction scores of the samples for the current round of the boosting.
Returns Returns
(float <#outputs>) The sum of the loss gradient for the current value of the alpha. (float <#outputs>) The sum of the loss gradient for the current value of the alpha.
""" """
# compute the loss gradient for the updated score # compute the loss gradient for the updated score
scores = previous_scores + alpha * current_scores scores = previous_scores + alpha * current_scores
loss_gradients = self.loss_gradient(targets, scores) loss_gradients = self.loss_gradient(targets, scores)
# take the sum of the loss gradient values # take the sum of the loss gradient values
return numpy.sum(loss_gradients * current_scores, 0) return numpy.sum(loss_gradients * current_scores, 0)
from .LossFunction import LossFunction from . import LossFunction
import numpy import numpy
class TangentialLoss (LossFunction):
"""Tangent loss function, as described in http://www.svcl.ucsd.edu/projects/LossDesign/TangentBoost.html."""
def loss(self, targets, scores): class TangentialLoss(LossFunction):
"""The function computes the logit loss values using prediction scores and targets. """Tangent loss function, as described in http://www.svcl.ucsd.edu/projects/LossDesign/TangentBoost.html."""
Keyword parameters: def loss(self, targets, scores):
"""The function computes the logit loss values using prediction scores and targets.
targets (float <#samples, #outputs>): The target values that should be reached. Keyword parameters:
scores (float <#samples, #outputs>): The scores provided by the classifier. targets (float <#samples, #outputs>): The target values that should be reached.
Returns scores (float <#samples, #outputs>): The scores provided by the classifier.
(float <#samples, #outputs>): The loss values for the samples, always >= 0
"""
return (2. * numpy.arctan(targets * scores) - 1.)**2
def loss_gradient(self, targets, scores): Returns
"""The function computes the gradient of the tangential loss function using prediction scores and targets. (float <#samples, #outputs>): The loss values for the samples, always >= 0
"""
return (2. * numpy.arctan(targets * scores) - 1.) ** 2
Keyword parameters: def loss_gradient(self, targets, scores):
"""The function computes the gradient of the tangential loss function using prediction scores and targets.
targets (float <#samples, #outputs>): The target values that should be reached. Keyword parameters:
scores (float <#samples, #outputs>): The scores provided by the classifier. targets (float <#samples, #outputs>): The target values that should be reached.
Returns scores (float <#samples, #outputs>): The scores provided by the classifier.
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
"""
m = targets * scores
numer = 4. * (2. * numpy.arctan(m) - 1.)
denom = 1. + m**2
return numer/denom
Returns
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
"""
m = targets * scores
numer = 4. * (2. * numpy.arctan(m) - 1.)
denom = 1. + m ** 2
return numer / denom
...@@ -11,7 +11,7 @@ from bob.learn.boosting.version import module as __version__ ...@@ -11,7 +11,7 @@ from bob.learn.boosting.version import module as __version__
from bob.learn.boosting.version import api as __api_version__ from bob.learn.boosting.version import api as __api_version__
# include loss functions # include loss functions
from bob.learn.boosting import LossFunction # Just to get the documentation for it from bob.learn.boosting.LossFunction import LossFunction # Just to get the documentation for it
from bob.learn.boosting.ExponentialLoss import ExponentialLoss from bob.learn.boosting.ExponentialLoss import ExponentialLoss
from bob.learn.boosting.LogitLoss import LogitLoss from bob.learn.boosting.LogitLoss import LogitLoss
from bob.learn.boosting.TangentialLoss import TangentialLoss from bob.learn.boosting.TangentialLoss import TangentialLoss
...@@ -34,5 +34,19 @@ def get_config(): ...@@ -34,5 +34,19 @@ def get_config():
return bob.extension.get_config(__name__, version.externals, version.api) return bob.extension.get_config(__name__, version.externals, version.api)
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args: obj.__module__ = __name__
__appropriate__(
LossFunction,
)
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith('_')]
...@@ -245,7 +245,7 @@ static PyObject* boostedMachine_add( ...@@ -245,7 +245,7 @@ static PyObject* boostedMachine_add(
static auto boostedMachine_forward_doc = bob::extension::FunctionDoc( static auto boostedMachine_forward_doc = bob::extension::FunctionDoc(
"forward", "forward",
"Returns the prediction for the given feature vector(s)", "Returns the prediction for the given feature vector(s)",
".. note:: The :py:func:`__call__` function is an alias for this function.\n\n" ".. note:: The ``__call__`` function is an alias for this function.\n\n"
"This function can be called in six different ways:\n\n" "This function can be called in six different ways:\n\n"
"1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n" "1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n"
"2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n" "2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n"
......
...@@ -157,7 +157,7 @@ static PyObject* lutMachine_lut( ...@@ -157,7 +157,7 @@ static PyObject* lutMachine_lut(
static auto lutMachine_forward_doc = bob::extension::FunctionDoc( static auto lutMachine_forward_doc = bob::extension::FunctionDoc(
"forward", "forward",
"Returns the prediction for the given feature vector(s)", "Returns the prediction for the given feature vector(s)",
".. note:: The :py:func:`__call__` function is an alias for this function.\n\n" ".. note:: The ``__call__`` function is an alias for this function.\n\n"
"This function can be called in four different ways:\n\n" "This function can be called in four different ways:\n\n"
"1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n" "1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n"
"2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n" "2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n"
......
...@@ -129,7 +129,7 @@ static PyObject* stumpMachine_polarity( ...@@ -129,7 +129,7 @@ static PyObject* stumpMachine_polarity(
static auto stumpMachine_forward_doc = bob::extension::FunctionDoc( static auto stumpMachine_forward_doc = bob::extension::FunctionDoc(
"forward", "forward",
"Returns the prediction for the given feature vector(s)", "Returns the prediction for the given feature vector(s)",
".. note:: The :py:func:`__call__` function is an alias for this function.\n\n" ".. note:: The ``__call__`` function is an alias for this function.\n\n"
".. todo:: write more detailed documentation", ".. todo:: write more detailed documentation",
true true
) )
......
...@@ -26,7 +26,7 @@ Available trainers in :py:mod:`bob.learn.boosting` are: ...@@ -26,7 +26,7 @@ Available trainers in :py:mod:`bob.learn.boosting` are:
* :py:class:`bob.learn.boosting.Boosting` : Trains a strong machine of type :py:class:`bob.learn.boosting.BoostedMachine`. * :py:class:`bob.learn.boosting.Boosting` : Trains a strong machine of type :py:class:`bob.learn.boosting.BoostedMachine`.
* :py:class:`bob.learn.boosting.LUTTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.LUTMachine`. * :py:class:`bob.learn.boosting.LUTTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.LUTMachine`.
* :py:class:`bob.learn.boosting.StrumTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.StumpMachine`. * :py:class:`bob.learn.boosting.StumpTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.StumpMachine`.
Loss functions Loss functions
...@@ -40,9 +40,9 @@ A base class loss function :py:class:`bob.learn.boosting.LossFunction` is called ...@@ -40,9 +40,9 @@ A base class loss function :py:class:`bob.learn.boosting.LossFunction` is called
Not all combinations of loss functions and weak trainers make sense. Not all combinations of loss functions and weak trainers make sense.
Here is a list of useful combinations: Here is a list of useful combinations:
1. :py:class:`bob.learn.boosting.ExponentialLoss` with :py:class:`bob.learn.boosting.StrumTrainer` (uni-variate classification only). 1. :py:class:`bob.learn.boosting.ExponentialLoss` with :py:class:`bob.learn.boosting.StumpTrainer` (uni-variate classification only).
2. :py:class:`bob.learn.boosting.LogitLoss` with :py:class:`bob.learn.boosting.StrumTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification). 2. :py:class:`bob.learn.boosting.LogitLoss` with :py:class:`bob.learn.boosting.StumpTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification).
3. :py:class:`bob.learn.boosting.TangentialLoss` with :py:class:`bob.learn.boosting.StrumTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification). 3. :py:class:`bob.learn.boosting.TangentialLoss` with :py:class:`bob.learn.boosting.StumpTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification).
4. :py:class:`bob.learn.boosting.JesorskyLoss` with :py:class:`bob.learn.boosting.LUTTrainer` (multi-variate regression only). 4. :py:class:`bob.learn.boosting.JesorskyLoss` with :py:class:`bob.learn.boosting.LUTTrainer` (multi-variate regression only).
Details Details
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment