log_loss.py 4.3 KB
Newer Older
Francois Marelli's avatar
Francois Marelli committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
import torch
from torch.nn import MSELoss
from torch.nn import L1Loss


class LogMSELoss(MSELoss):
    r"""Creates a criterion that measures the logarithmic mean squared error between
    `n` elements in the input `x` and target `y`:

    :math:`{loss}(x, y)  = \log( 1/n \sum |x_i - y_i|^2 + epsilon)`

    `x` and `y` arbitrary shapes with a total of `n` elements each.

    The sum operation still operates over all the elements, and divides by `n`.

    The division by `n` can be avoided if one sets the internal variable
    `size_average` to ``False``.

    To get a batch of losses, a loss per batch element, set `reduce` to
    ``False``. These losses are not averaged and are not affected by
    `size_average`.

    The epsilon is a positive float used to avoid log(0) leading to NaN.

    Args:
        size_average (bool, optional): By default, the losses are averaged
           over observations for each minibatch. However, if the field
           size_average is set to ``False``, the losses are instead summed for
           each minibatch. Only applies when reduce is ``True``. Default: ``True``
        reduce (bool, optional): By default, the losses are averaged
           over observations for each minibatch, or summed, depending on
           size_average. When reduce is ``False``, returns a loss per batch
           element instead and ignores size_average. Default: ``True``
        epsilon (float, optional): add a small positive term to the MSE before
            taking the log to avoid NaN with log(0). Default: ``0.05``

    Shape:
        - Input: :math:`(N, *)` where `*` means, any number of additional
          dimensions
        - Target: :math:`(N, *)`, same shape as the input

    Examples::

        >>> loss = neural_filters.LogMSELoss()
        >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
        >>> target = autograd.Variable(torch.randn(3, 5))
        >>> output = loss(input, target)
        >>> output.backward()
    """

    def __init__(self, size_average=True, reduce=True, epsilon=0.05):
        super().__init__(size_average, reduce)
        self.epsilon = epsilon

    def forward(self, input, target):
        loss = super().forward(input, target)
        return torch.log(loss + self.epsilon)

class LogL1Loss(L1Loss):
    r"""Creates a criterion that measures the logarithm of the mean absolute value of the
    element-wise difference between input `x` and target `y`:

    :math:`{loss}(x, y)  = \log( 1/n \sum |x_i - y_i| + epsilon )`

    `x` and `y` arbitrary shapes with a total of `n` elements each.

    The sum operation still operates over all the elements, and divides by `n`.

    The division by `n` can be avoided if one sets the constructor argument
    `size_average=False`.

    The epsilon is a positive float used to avoid log(0) leading to NaN.

    Args:
        size_average (bool, optional): By default, the losses are averaged
           over observations for each minibatch. However, if the field
           size_average is set to ``False``, the losses are instead summed for
           each minibatch. Ignored when reduce is ``False``. Default: ``True``
        reduce (bool, optional): By default, the losses are averaged or summed
           for each minibatch. When reduce is ``False``, the loss function returns
           a loss per batch element instead and ignores size_average.
           Default: ``True``
        epsilon (float, optional): add a small positive term to the MSE before
            taking the log to avoid NaN with log(0). Default: ``0.05``

    Shape:
        - Input: :math:`(N, *)` where `*` means, any number of additional
          dimensions
        - Target: :math:`(N, *)`, same shape as the input
        - Output: scalar. If reduce is ``False``, then
          :math:`(N, *)`, same shape as the input

    Examples::

        >>> loss = neural_filters.LogL1Loss()
        >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
        >>> target = autograd.Variable(torch.randn(3, 5))
        >>> output = loss(input, target)
        >>> output.backward()
    """

    def __init__(self, size_average=True, reduce=True, epsilon=0.05):
        super().__init__(size_average, reduce)
        self.epsilon = epsilon

    def forward(self, input, target):
        loss = super().forward(input, target)
        return torch.log(loss + self.epsilon)