From 96a6087f6a4bf8452f4c4d399e001c9c43d73188 Mon Sep 17 00:00:00 2001
From: Francois Marelli
Date: Thu, 22 Mar 2018 11:42:37 +0100
Subject: [PATCH] LogMSELoss
---
neural_filters/LogMSELoss.py | 54 ++++++++++++++++++++++++++++++++++++
neural_filters/__init__.py | 3 +-
2 files changed, 56 insertions(+), 1 deletion(-)
create mode 100644 neural_filters/LogMSELoss.py
diff --git a/neural_filters/LogMSELoss.py b/neural_filters/LogMSELoss.py
new file mode 100644
index 0000000..7f9b230
--- /dev/null
+++ b/neural_filters/LogMSELoss.py
@@ -0,0 +1,54 @@
+import torch
+from torch.nn import MSELoss
+
+class LogMSELoss(MSELoss):
+ r"""Creates a criterion that measures the logarithmic mean squared error between
+ `n` elements in the input `x` and target `y`:
+
+ :math:`{loss}(x, y) = \log( 1/n \sum |x_i - y_i|^2 + epsilon)`
+
+ `x` and `y` arbitrary shapes with a total of `n` elements each.
+
+ The sum operation still operates over all the elements, and divides by `n`.
+
+ The division by `n` can be avoided if one sets the internal variable
+ `size_average` to ``False``.
+
+ To get a batch of losses, a loss per batch element, set `reduce` to
+ ``False``. These losses are not averaged and are not affected by
+ `size_average`.
+
+ The epsilon is a positive float used to avoid log(0) leading to NaN.
+
+ Args:
+ size_average (bool, optional): By default, the losses are averaged
+ over observations for each minibatch. However, if the field
+ size_average is set to ``False``, the losses are instead summed for
+ each minibatch. Only applies when reduce is ``True``. Default: ``True``
+ reduce (bool, optional): By default, the losses are averaged
+ over observations for each minibatch, or summed, depending on
+ size_average. When reduce is ``False``, returns a loss per batch
+ element instead and ignores size_average. Default: ``True``
+ epsilon (float, optional): add a small positive term to the MSE before
+ taking the log to avoid NaN with log(0). Default: ``0.05``
+
+ Shape:
+ - Input: :math:`(N, *)` where `*` means, any number of additional
+ dimensions
+ - Target: :math:`(N, *)`, same shape as the input
+
+ Examples::
+
+ >>> loss = neural_filters.LogMSELoss()
+ >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
+ >>> target = autograd.Variable(torch.randn(3, 5))
+ >>> output = loss(input, target)
+ >>> output.backward()
+ """
+ def __init__(self, size_average=True, reduce=True, epsilon=0.05):
+ super(LogMSELoss, self).__init__(size_average, reduce)
+ self.epsilon = epsilon
+
+ def forward(self, input, target):
+ loss = super(LogMSELoss, self).forward(input, target)
+ return torch.log(loss + self.epsilon)
\ No newline at end of file
diff --git a/neural_filters/__init__.py b/neural_filters/__init__.py
index ee336dc..2007602 100644
--- a/neural_filters/__init__.py
+++ b/neural_filters/__init__.py
@@ -2,4 +2,5 @@ from .NeuralFilterCell import *
from .NeuralFilter1P import *
from .NeuralFilter2R import *
from .NeuralFilter2CC import *
-from .NeuralFilter2CD import *
\ No newline at end of file
+from .NeuralFilter2CD import *
+from .LogMSELoss import *
--
2.21.0