diff git a/neural_filters/LogMSELoss.py b/neural_filters/LogMSELoss.py
deleted file mode 100644
index 7f9b230c9f24e32e6b3e7f4d1066b48d6477ebed..0000000000000000000000000000000000000000
 a/neural_filters/LogMSELoss.py
+++ /dev/null
@@ 1,54 +0,0 @@
import torch
from torch.nn import MSELoss

class LogMSELoss(MSELoss):
 r"""Creates a criterion that measures the logarithmic mean squared error between
 `n` elements in the input `x` and target `y`:

 :math:`{loss}(x, y) = \log( 1/n \sum x_i  y_i^2 + epsilon)`

 `x` and `y` arbitrary shapes with a total of `n` elements each.

 The sum operation still operates over all the elements, and divides by `n`.

 The division by `n` can be avoided if one sets the internal variable
 `size_average` to ``False``.

 To get a batch of losses, a loss per batch element, set `reduce` to
 ``False``. These losses are not averaged and are not affected by
 `size_average`.

 The epsilon is a positive float used to avoid log(0) leading to NaN.

 Args:
 size_average (bool, optional): By default, the losses are averaged
 over observations for each minibatch. However, if the field
 size_average is set to ``False``, the losses are instead summed for
 each minibatch. Only applies when reduce is ``True``. Default: ``True``
 reduce (bool, optional): By default, the losses are averaged
 over observations for each minibatch, or summed, depending on
 size_average. When reduce is ``False``, returns a loss per batch
 element instead and ignores size_average. Default: ``True``
 epsilon (float, optional): add a small positive term to the MSE before
 taking the log to avoid NaN with log(0). Default: ``0.05``

 Shape:
  Input: :math:`(N, *)` where `*` means, any number of additional
 dimensions
  Target: :math:`(N, *)`, same shape as the input

 Examples::

 >>> loss = neural_filters.LogMSELoss()
 >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
 >>> target = autograd.Variable(torch.randn(3, 5))
 >>> output = loss(input, target)
 >>> output.backward()
 """
 def __init__(self, size_average=True, reduce=True, epsilon=0.05):
 super(LogMSELoss, self).__init__(size_average, reduce)
 self.epsilon = epsilon

 def forward(self, input, target):
 loss = super(LogMSELoss, self).forward(input, target)
 return torch.log(loss + self.epsilon)
\ No newline at end of file
diff git a/neural_filters/log_loss.py b/neural_filters/log_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..c892b8f393be6be71fb52b3970e2a3061cd912d0
 /dev/null
+++ b/neural_filters/log_loss.py
@@ 0,0 +1,108 @@
+import torch
+from torch.nn import MSELoss
+from torch.nn import L1Loss
+
+
+class LogMSELoss(MSELoss):
+ r"""Creates a criterion that measures the logarithmic mean squared error between
+ `n` elements in the input `x` and target `y`:
+
+ :math:`{loss}(x, y) = \log( 1/n \sum x_i  y_i^2 + epsilon)`
+
+ `x` and `y` arbitrary shapes with a total of `n` elements each.
+
+ The sum operation still operates over all the elements, and divides by `n`.
+
+ The division by `n` can be avoided if one sets the internal variable
+ `size_average` to ``False``.
+
+ To get a batch of losses, a loss per batch element, set `reduce` to
+ ``False``. These losses are not averaged and are not affected by
+ `size_average`.
+
+ The epsilon is a positive float used to avoid log(0) leading to NaN.
+
+ Args:
+ size_average (bool, optional): By default, the losses are averaged
+ over observations for each minibatch. However, if the field
+ size_average is set to ``False``, the losses are instead summed for
+ each minibatch. Only applies when reduce is ``True``. Default: ``True``
+ reduce (bool, optional): By default, the losses are averaged
+ over observations for each minibatch, or summed, depending on
+ size_average. When reduce is ``False``, returns a loss per batch
+ element instead and ignores size_average. Default: ``True``
+ epsilon (float, optional): add a small positive term to the MSE before
+ taking the log to avoid NaN with log(0). Default: ``0.05``
+
+ Shape:
+  Input: :math:`(N, *)` where `*` means, any number of additional
+ dimensions
+  Target: :math:`(N, *)`, same shape as the input
+
+ Examples::
+
+ >>> loss = neural_filters.LogMSELoss()
+ >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
+ >>> target = autograd.Variable(torch.randn(3, 5))
+ >>> output = loss(input, target)
+ >>> output.backward()
+ """
+
+ def __init__(self, size_average=True, reduce=True, epsilon=0.05):
+ super().__init__(size_average, reduce)
+ self.epsilon = epsilon
+
+ def forward(self, input, target):
+ loss = super().forward(input, target)
+ return torch.log(loss + self.epsilon)
+
+class LogL1Loss(L1Loss):
+ r"""Creates a criterion that measures the logarithm of the mean absolute value of the
+ elementwise difference between input `x` and target `y`:
+
+ :math:`{loss}(x, y) = \log( 1/n \sum x_i  y_i + epsilon )`
+
+ `x` and `y` arbitrary shapes with a total of `n` elements each.
+
+ The sum operation still operates over all the elements, and divides by `n`.
+
+ The division by `n` can be avoided if one sets the constructor argument
+ `size_average=False`.
+
+ The epsilon is a positive float used to avoid log(0) leading to NaN.
+
+ Args:
+ size_average (bool, optional): By default, the losses are averaged
+ over observations for each minibatch. However, if the field
+ size_average is set to ``False``, the losses are instead summed for
+ each minibatch. Ignored when reduce is ``False``. Default: ``True``
+ reduce (bool, optional): By default, the losses are averaged or summed
+ for each minibatch. When reduce is ``False``, the loss function returns
+ a loss per batch element instead and ignores size_average.
+ Default: ``True``
+ epsilon (float, optional): add a small positive term to the MSE before
+ taking the log to avoid NaN with log(0). Default: ``0.05``
+
+ Shape:
+  Input: :math:`(N, *)` where `*` means, any number of additional
+ dimensions
+  Target: :math:`(N, *)`, same shape as the input
+  Output: scalar. If reduce is ``False``, then
+ :math:`(N, *)`, same shape as the input
+
+ Examples::
+
+ >>> loss = neural_filters.LogL1Loss()
+ >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
+ >>> target = autograd.Variable(torch.randn(3, 5))
+ >>> output = loss(input, target)
+ >>> output.backward()
+ """
+
+ def __init__(self, size_average=True, reduce=True, epsilon=0.05):
+ super().__init__(size_average, reduce)
+ self.epsilon = epsilon
+
+ def forward(self, input, target):
+ loss = super().forward(input, target)
+ return torch.log(loss + self.epsilon)