Skip to content
Snippets Groups Projects
Commit 6322cf95 authored by Francois Marelli's avatar Francois Marelli
Browse files

LogL1Loss

parent 352aed53
Branches
Tags
No related merge requests found
import torch import torch
from torch.nn import MSELoss from torch.nn import MSELoss
from torch.nn import L1Loss
class LogMSELoss(MSELoss): class LogMSELoss(MSELoss):
r"""Creates a criterion that measures the logarithmic mean squared error between r"""Creates a criterion that measures the logarithmic mean squared error between
...@@ -45,10 +47,62 @@ class LogMSELoss(MSELoss): ...@@ -45,10 +47,62 @@ class LogMSELoss(MSELoss):
>>> output = loss(input, target) >>> output = loss(input, target)
>>> output.backward() >>> output.backward()
""" """
def __init__(self, size_average=True, reduce=True, epsilon=0.05):
super().__init__(size_average, reduce)
self.epsilon = epsilon
def forward(self, input, target):
loss = super().forward(input, target)
return torch.log(loss + self.epsilon)
class LogL1Loss(L1Loss):
r"""Creates a criterion that measures the logarithm of the mean absolute value of the
element-wise difference between input `x` and target `y`:
:math:`{loss}(x, y) = \log( 1/n \sum |x_i - y_i| + epsilon )`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the constructor argument
`size_average=False`.
The epsilon is a positive float used to avoid log(0) leading to NaN.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Ignored when reduce is ``False``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed
for each minibatch. When reduce is ``False``, the loss function returns
a loss per batch element instead and ignores size_average.
Default: ``True``
epsilon (float, optional): add a small positive term to the MSE before
taking the log to avoid NaN with log(0). Default: ``0.05``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If reduce is ``False``, then
:math:`(N, *)`, same shape as the input
Examples::
>>> loss = neural_filters.LogL1Loss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True, epsilon=0.05): def __init__(self, size_average=True, reduce=True, epsilon=0.05):
super(LogMSELoss, self).__init__(size_average, reduce) super().__init__(size_average, reduce)
self.epsilon = epsilon self.epsilon = epsilon
def forward(self, input, target): def forward(self, input, target):
loss = super(LogMSELoss, self).forward(input, target) loss = super().forward(input, target)
return torch.log(loss + self.epsilon) return torch.log(loss + self.epsilon)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment