diff --git a/neural_filters/LogMSELoss.py b/neural_filters/LogMSELoss.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9b230c9f24e32e6b3e7f4d1066b48d6477ebed --- /dev/null +++ b/neural_filters/LogMSELoss.py @@ -0,0 +1,54 @@ +import torch +from torch.nn import MSELoss + +class LogMSELoss(MSELoss): + r"""Creates a criterion that measures the logarithmic mean squared error between + `n` elements in the input `x` and target `y`: + + :math:`{loss}(x, y) = \log( 1/n \sum |x_i - y_i|^2 + epsilon)` + + `x` and `y` arbitrary shapes with a total of `n` elements each. + + The sum operation still operates over all the elements, and divides by `n`. + + The division by `n` can be avoided if one sets the internal variable + `size_average` to ``False``. + + To get a batch of losses, a loss per batch element, set `reduce` to + ``False``. These losses are not averaged and are not affected by + `size_average`. + + The epsilon is a positive float used to avoid log(0) leading to NaN. + + Args: + size_average (bool, optional): By default, the losses are averaged + over observations for each minibatch. However, if the field + size_average is set to ``False``, the losses are instead summed for + each minibatch. Only applies when reduce is ``True``. Default: ``True`` + reduce (bool, optional): By default, the losses are averaged + over observations for each minibatch, or summed, depending on + size_average. When reduce is ``False``, returns a loss per batch + element instead and ignores size_average. Default: ``True`` + epsilon (float, optional): add a small positive term to the MSE before + taking the log to avoid NaN with log(0). Default: ``0.05`` + + Shape: + - Input: :math:`(N, *)` where `*` means, any number of additional + dimensions + - Target: :math:`(N, *)`, same shape as the input + + Examples:: + + >>> loss = neural_filters.LogMSELoss() + >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True) + >>> target = autograd.Variable(torch.randn(3, 5)) + >>> output = loss(input, target) + >>> output.backward() + """ + def __init__(self, size_average=True, reduce=True, epsilon=0.05): + super(LogMSELoss, self).__init__(size_average, reduce) + self.epsilon = epsilon + + def forward(self, input, target): + loss = super(LogMSELoss, self).forward(input, target) + return torch.log(loss + self.epsilon) \ No newline at end of file diff --git a/neural_filters/__init__.py b/neural_filters/__init__.py index ee336dc23478f6af12e4a7ee63ddb0497803bd08..2007602f0440a8f7ef4efef1dbe00d48ab1c8e51 100644 --- a/neural_filters/__init__.py +++ b/neural_filters/__init__.py @@ -2,4 +2,5 @@ from .NeuralFilterCell import * from .NeuralFilter1P import * from .NeuralFilter2R import * from .NeuralFilter2CC import * -from .NeuralFilter2CD import * \ No newline at end of file +from .NeuralFilter2CD import * +from .LogMSELoss import *