LossFunction.py 4.25 KB
Newer Older
1
2
3
import numpy


4
5
6
class LossFunction(object):
    """This is a base class for all loss functions implemented in pure python.
    It is simply a python re-implementation of the :py:class:`bob.learn.boosting.LossFunction` class.
7

8
9
10
    This class provides the interface for the L-BFGS optimizer.
    Please overwrite the loss() and loss_gradient() function (see below) in derived loss classes.
    """
11

12
13
    def __init__(self):
        pass
14

15
16
    def loss(self, targets, scores):
        """This function is to compute the loss for the given targets and scores.
17

18
        Keyword parameters:
19

20
          targets (float <#samples, #outputs>): The target values that should be reached.
21

22
          scores (float <#samples, #outputs>): The scores provided by the classifier.
23

24
25
26
27
28
29
        Returns
          (float <#samples, #outputs>) or (float <#samples, 1>): The loss based on the given scores and targets.
          Depending on the intended task, one of the two output variants should be chosen.
          For classification tasks, please use the former way (#samples, #outputs), while for regression tasks, use the latter (#samples, 1).
        """
        raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
30

31
32
    def loss_gradient(self, targets, scores):
        """This function is to compute the gradient of the loss for the given targets and scores.
33

34
        Keyword parameters:
35

36
          targets (float <#samples, #outputs>): The target values that should be reached.
37

38
          scores (float <#samples, #outputs>): The scores provided by the classifier.
39

40
41
42
43
        Returns
          loss (float <#samples, #outputs>): The gradient of the loss based on the given scores and targets.
        """
        raise NotImplementedError("This is a pure abstract function. Please implement that in your derived class.")
44

45
46
    def loss_sum(self, alpha, targets, previous_scores, current_scores):
        """The function computes the sum of the loss which is used to find the optimized values of alpha (x).
47

48
49
        The functions computes sum of loss values which is required during the line search step for the optimization of the alpha.
        This function is given as the input for the L-BFGS optimization function.
50

51
        Keyword parameters:
52

53
          alpha (float): The current value of the alpha.
54

55
          targets (float <#samples, #outputs>): The targets for the samples
56

57
          previous_scores (float <#samples, #outputs>): The cumulative prediction scores of the samples until the previous round of the boosting.
58

59
          current_scores (float <#samples, #outputs>): The prediction scores of the samples for the current round of the boosting.
60

61
        Returns
62

63
64
          (float <#outputs>) The sum of the loss values for the current value of the alpha
        """
65

66
67
68
        # compute the scores and loss for the current alpha
        scores = previous_scores + alpha * current_scores
        losses = self.loss(targets, scores)
69

70
71
        # compute the sum of the loss
        return numpy.sum(losses, 0)
72

73
74
    def loss_gradient_sum(self, alpha, targets, previous_scores, current_scores):
        """The function computes the gradient as the sum of the derivatives per sample which is used to find the optimized values of alpha.
75

76
77
        The functions computes sum of loss values which is required during the line search step for the optimization of the alpha.
        This function is given as the input for the L-BFGS optimization function.
78

79
        Keyword parameters:
80

81
          alpha (float): The current value of the alpha.
82

83
          targets (float <#samples, #outputs>): The targets for the samples
84

85
          previous_scores (float <#samples, #outputs>): The cumulative prediction scores of the samples until the previous round of the boosting.
86

87
          current_scores (float <#samples, #outputs>): The prediction scores of the samples for the current round of the boosting.
88

89
90
91
        Returns
          (float <#outputs>) The sum of the loss gradient for the current value of the alpha.
        """
92

93
94
95
        # compute the loss gradient for the updated score
        scores = previous_scores + alpha * current_scores
        loss_gradients = self.loss_gradient(targets, scores)
96

97
98
        # take the sum of the loss gradient values
        return numpy.sum(loss_gradients * current_scores, 0)