Commit f535c004 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

[sphinx] Fixed warnings

parent 82a4e253
Pipeline #4890 passed with stages
in 9 minutes and 53 seconds
from .LossFunction import LossFunction
import numpy import numpy
from . import LossFunction
class ExponentialLoss (LossFunction):
""" The class implements the exponential loss function for the boosting framework."""
class ExponentialLoss(LossFunction):
""" The class implements the exponential loss function for the boosting framework."""
def loss(self, targets, scores): def loss(self, targets, scores):
"""The function computes the exponential loss values using prediction scores and targets. """The function computes the exponential loss values using prediction scores and targets.
...@@ -21,7 +20,6 @@ class ExponentialLoss (LossFunction): ...@@ -21,7 +20,6 @@ class ExponentialLoss (LossFunction):
""" """
return numpy.exp(-(targets * scores)) return numpy.exp(-(targets * scores))
def loss_gradient(self, targets, scores): def loss_gradient(self, targets, scores):
"""The function computes the gradient of the exponential loss function using prediction scores and targets. """The function computes the gradient of the exponential loss function using prediction scores and targets.
...@@ -36,4 +34,3 @@ class ExponentialLoss (LossFunction): ...@@ -36,4 +34,3 @@ class ExponentialLoss (LossFunction):
""" """
loss = numpy.exp(-(targets * scores)) loss = numpy.exp(-(targets * scores))
return -targets * loss return -targets * loss
from .LossFunction import LossFunction from . import LossFunction
import numpy import numpy
class LogitLoss(LossFunction): class LogitLoss(LossFunction):
""" The class to implement the logit loss function for the boosting framework.""" """ The class to implement the logit loss function for the boosting framework."""
...@@ -20,7 +21,6 @@ class LogitLoss(LossFunction): ...@@ -20,7 +21,6 @@ class LogitLoss(LossFunction):
e = numpy.exp(-(targets * scores)) e = numpy.exp(-(targets * scores))
return numpy.log(1. + e) return numpy.log(1. + e)
def loss_gradient(self, targets, scores): def loss_gradient(self, targets, scores):
"""The function computes the gradient of the logit loss function using prediction scores and targets. """The function computes the gradient of the logit loss function using prediction scores and targets.
...@@ -34,5 +34,5 @@ class LogitLoss(LossFunction): ...@@ -34,5 +34,5 @@ class LogitLoss(LossFunction):
loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets. loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
""" """
e = numpy.exp(-(targets * scores)) e = numpy.exp(-(targets * scores))
denom = 1./(1. + e) denom = 1. / (1. + e)
return -targets * e * denom return -targets * e * denom
import numpy import numpy
class LossFunction:
class LossFunction(object):
"""This is a base class for all loss functions implemented in pure python. """This is a base class for all loss functions implemented in pure python.
It is simply a python re-implementation of the :py:class:`bob.learn.boosting.LossFunction` class. It is simply a python re-implementation of the :py:class:`bob.learn.boosting.LossFunction` class.
...@@ -8,6 +9,9 @@ class LossFunction: ...@@ -8,6 +9,9 @@ class LossFunction:
Please overwrite the loss() and loss_gradient() function (see below) in derived loss classes. Please overwrite the loss() and loss_gradient() function (see below) in derived loss classes.
""" """
def __init__(self):
pass
def loss(self, targets, scores): def loss(self, targets, scores):
"""This function is to compute the loss for the given targets and scores. """This function is to compute the loss for the given targets and scores.
...@@ -24,7 +28,6 @@ class LossFunction: ...@@ -24,7 +28,6 @@ class LossFunction:
""" """
raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.") raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
def loss_gradient(self, targets, scores): def loss_gradient(self, targets, scores):
"""This function is to compute the gradient of the loss for the given targets and scores. """This function is to compute the gradient of the loss for the given targets and scores.
...@@ -39,7 +42,6 @@ class LossFunction: ...@@ -39,7 +42,6 @@ class LossFunction:
""" """
raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.") raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
def loss_sum(self, alpha, targets, previous_scores, current_scores): def loss_sum(self, alpha, targets, previous_scores, current_scores):
"""The function computes the sum of the loss which is used to find the optimized values of alpha (x). """The function computes the sum of the loss which is used to find the optimized values of alpha (x).
...@@ -68,7 +70,6 @@ class LossFunction: ...@@ -68,7 +70,6 @@ class LossFunction:
# compute the sum of the loss # compute the sum of the loss
return numpy.sum(losses, 0) return numpy.sum(losses, 0)
def loss_gradient_sum(self, alpha, targets, previous_scores, current_scores): def loss_gradient_sum(self, alpha, targets, previous_scores, current_scores):
"""The function computes the gradient as the sum of the derivatives per sample which is used to find the optimized values of alpha. """The function computes the gradient as the sum of the derivatives per sample which is used to find the optimized values of alpha.
......
from .LossFunction import LossFunction from . import LossFunction
import numpy import numpy
class TangentialLoss (LossFunction):
class TangentialLoss(LossFunction):
"""Tangent loss function, as described in http://www.svcl.ucsd.edu/projects/LossDesign/TangentBoost.html.""" """Tangent loss function, as described in http://www.svcl.ucsd.edu/projects/LossDesign/TangentBoost.html."""
def loss(self, targets, scores): def loss(self, targets, scores):
...@@ -17,7 +18,7 @@ class TangentialLoss (LossFunction): ...@@ -17,7 +18,7 @@ class TangentialLoss (LossFunction):
Returns Returns
(float <#samples, #outputs>): The loss values for the samples, always >= 0 (float <#samples, #outputs>): The loss values for the samples, always >= 0
""" """
return (2. * numpy.arctan(targets * scores) - 1.)**2 return (2. * numpy.arctan(targets * scores) - 1.) ** 2
def loss_gradient(self, targets, scores): def loss_gradient(self, targets, scores):
"""The function computes the gradient of the tangential loss function using prediction scores and targets. """The function computes the gradient of the tangential loss function using prediction scores and targets.
...@@ -33,6 +34,5 @@ class TangentialLoss (LossFunction): ...@@ -33,6 +34,5 @@ class TangentialLoss (LossFunction):
""" """
m = targets * scores m = targets * scores
numer = 4. * (2. * numpy.arctan(m) - 1.) numer = 4. * (2. * numpy.arctan(m) - 1.)
denom = 1. + m**2 denom = 1. + m ** 2
return numer/denom return numer / denom
...@@ -11,7 +11,7 @@ from bob.learn.boosting.version import module as __version__ ...@@ -11,7 +11,7 @@ from bob.learn.boosting.version import module as __version__
from bob.learn.boosting.version import api as __api_version__ from bob.learn.boosting.version import api as __api_version__
# include loss functions # include loss functions
from bob.learn.boosting import LossFunction # Just to get the documentation for it from bob.learn.boosting.LossFunction import LossFunction # Just to get the documentation for it
from bob.learn.boosting.ExponentialLoss import ExponentialLoss from bob.learn.boosting.ExponentialLoss import ExponentialLoss
from bob.learn.boosting.LogitLoss import LogitLoss from bob.learn.boosting.LogitLoss import LogitLoss
from bob.learn.boosting.TangentialLoss import TangentialLoss from bob.learn.boosting.TangentialLoss import TangentialLoss
...@@ -34,5 +34,19 @@ def get_config(): ...@@ -34,5 +34,19 @@ def get_config():
return bob.extension.get_config(__name__, version.externals, version.api) return bob.extension.get_config(__name__, version.externals, version.api)
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args: obj.__module__ = __name__
__appropriate__(
LossFunction,
)
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith('_')]
...@@ -245,7 +245,7 @@ static PyObject* boostedMachine_add( ...@@ -245,7 +245,7 @@ static PyObject* boostedMachine_add(
static auto boostedMachine_forward_doc = bob::extension::FunctionDoc( static auto boostedMachine_forward_doc = bob::extension::FunctionDoc(
"forward", "forward",
"Returns the prediction for the given feature vector(s)", "Returns the prediction for the given feature vector(s)",
".. note:: The :py:func:`__call__` function is an alias for this function.\n\n" ".. note:: The ``__call__`` function is an alias for this function.\n\n"
"This function can be called in six different ways:\n\n" "This function can be called in six different ways:\n\n"
"1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n" "1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n"
"2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n" "2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n"
......
...@@ -157,7 +157,7 @@ static PyObject* lutMachine_lut( ...@@ -157,7 +157,7 @@ static PyObject* lutMachine_lut(
static auto lutMachine_forward_doc = bob::extension::FunctionDoc( static auto lutMachine_forward_doc = bob::extension::FunctionDoc(
"forward", "forward",
"Returns the prediction for the given feature vector(s)", "Returns the prediction for the given feature vector(s)",
".. note:: The :py:func:`__call__` function is an alias for this function.\n\n" ".. note:: The ``__call__`` function is an alias for this function.\n\n"
"This function can be called in four different ways:\n\n" "This function can be called in four different ways:\n\n"
"1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n" "1. ``(uint16 <#inputs>)`` will compute and return the uni-variate prediction for a single feature vector.\n"
"2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n" "2. ``(uint16 <#samples,#inputs>, float <#samples>)`` will compute the uni-variate prediction for several feature vectors.\n"
......
...@@ -129,7 +129,7 @@ static PyObject* stumpMachine_polarity( ...@@ -129,7 +129,7 @@ static PyObject* stumpMachine_polarity(
static auto stumpMachine_forward_doc = bob::extension::FunctionDoc( static auto stumpMachine_forward_doc = bob::extension::FunctionDoc(
"forward", "forward",
"Returns the prediction for the given feature vector(s)", "Returns the prediction for the given feature vector(s)",
".. note:: The :py:func:`__call__` function is an alias for this function.\n\n" ".. note:: The ``__call__`` function is an alias for this function.\n\n"
".. todo:: write more detailed documentation", ".. todo:: write more detailed documentation",
true true
) )
......
...@@ -26,7 +26,7 @@ Available trainers in :py:mod:`bob.learn.boosting` are: ...@@ -26,7 +26,7 @@ Available trainers in :py:mod:`bob.learn.boosting` are:
* :py:class:`bob.learn.boosting.Boosting` : Trains a strong machine of type :py:class:`bob.learn.boosting.BoostedMachine`. * :py:class:`bob.learn.boosting.Boosting` : Trains a strong machine of type :py:class:`bob.learn.boosting.BoostedMachine`.
* :py:class:`bob.learn.boosting.LUTTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.LUTMachine`. * :py:class:`bob.learn.boosting.LUTTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.LUTMachine`.
* :py:class:`bob.learn.boosting.StrumTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.StumpMachine`. * :py:class:`bob.learn.boosting.StumpTrainer` : Trains a weak machine of type :py:class:`bob.learn.boosting.StumpMachine`.
Loss functions Loss functions
...@@ -40,9 +40,9 @@ A base class loss function :py:class:`bob.learn.boosting.LossFunction` is called ...@@ -40,9 +40,9 @@ A base class loss function :py:class:`bob.learn.boosting.LossFunction` is called
Not all combinations of loss functions and weak trainers make sense. Not all combinations of loss functions and weak trainers make sense.
Here is a list of useful combinations: Here is a list of useful combinations:
1. :py:class:`bob.learn.boosting.ExponentialLoss` with :py:class:`bob.learn.boosting.StrumTrainer` (uni-variate classification only). 1. :py:class:`bob.learn.boosting.ExponentialLoss` with :py:class:`bob.learn.boosting.StumpTrainer` (uni-variate classification only).
2. :py:class:`bob.learn.boosting.LogitLoss` with :py:class:`bob.learn.boosting.StrumTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification). 2. :py:class:`bob.learn.boosting.LogitLoss` with :py:class:`bob.learn.boosting.StumpTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification).
3. :py:class:`bob.learn.boosting.TangentialLoss` with :py:class:`bob.learn.boosting.StrumTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification). 3. :py:class:`bob.learn.boosting.TangentialLoss` with :py:class:`bob.learn.boosting.StumpTrainer` or :py:class:`bob.learn.boosting.LUTTrainer` (uni-variate or multi-variate classification).
4. :py:class:`bob.learn.boosting.JesorskyLoss` with :py:class:`bob.learn.boosting.LUTTrainer` (multi-variate regression only). 4. :py:class:`bob.learn.boosting.JesorskyLoss` with :py:class:`bob.learn.boosting.LUTTrainer` (multi-variate regression only).
Details Details
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment