From 38b26d132fe12172ccae97410c3b6ba06e782fb7 Mon Sep 17 00:00:00 2001
From: =?UTF8?q?Fran=C3=A7ois=20MARELLI?=
Date: Thu, 7 Jun 2018 12:51:42 +0200
Subject: [PATCH] Delete log_loss.py

neural_filters/log_loss.py  108 
1 file changed, 108 deletions()
delete mode 100644 neural_filters/log_loss.py
diff git a/neural_filters/log_loss.py b/neural_filters/log_loss.py
deleted file mode 100644
index c892b8f..0000000
 a/neural_filters/log_loss.py
+++ /dev/null
@@ 1,108 +0,0 @@
import torch
from torch.nn import MSELoss
from torch.nn import L1Loss


class LogMSELoss(MSELoss):
 r"""Creates a criterion that measures the logarithmic mean squared error between
 `n` elements in the input `x` and target `y`:

 :math:`{loss}(x, y) = \log( 1/n \sum x_i  y_i^2 + epsilon)`

 `x` and `y` arbitrary shapes with a total of `n` elements each.

 The sum operation still operates over all the elements, and divides by `n`.

 The division by `n` can be avoided if one sets the internal variable
 `size_average` to ``False``.

 To get a batch of losses, a loss per batch element, set `reduce` to
 ``False``. These losses are not averaged and are not affected by
 `size_average`.

 The epsilon is a positive float used to avoid log(0) leading to NaN.

 Args:
 size_average (bool, optional): By default, the losses are averaged
 over observations for each minibatch. However, if the field
 size_average is set to ``False``, the losses are instead summed for
 each minibatch. Only applies when reduce is ``True``. Default: ``True``
 reduce (bool, optional): By default, the losses are averaged
 over observations for each minibatch, or summed, depending on
 size_average. When reduce is ``False``, returns a loss per batch
 element instead and ignores size_average. Default: ``True``
 epsilon (float, optional): add a small positive term to the MSE before
 taking the log to avoid NaN with log(0). Default: ``0.05``

 Shape:
  Input: :math:`(N, *)` where `*` means, any number of additional
 dimensions
  Target: :math:`(N, *)`, same shape as the input

 Examples::

 >>> loss = neural_filters.LogMSELoss()
 >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
 >>> target = autograd.Variable(torch.randn(3, 5))
 >>> output = loss(input, target)
 >>> output.backward()
 """

 def __init__(self, size_average=True, reduce=True, epsilon=0.05):
 super().__init__(size_average, reduce)
 self.epsilon = epsilon

 def forward(self, input, target):
 loss = super().forward(input, target)
 return torch.log(loss + self.epsilon)

class LogL1Loss(L1Loss):
 r"""Creates a criterion that measures the logarithm of the mean absolute value of the
 elementwise difference between input `x` and target `y`:

 :math:`{loss}(x, y) = \log( 1/n \sum x_i  y_i + epsilon )`

 `x` and `y` arbitrary shapes with a total of `n` elements each.

 The sum operation still operates over all the elements, and divides by `n`.

 The division by `n` can be avoided if one sets the constructor argument
 `size_average=False`.

 The epsilon is a positive float used to avoid log(0) leading to NaN.

 Args:
 size_average (bool, optional): By default, the losses are averaged
 over observations for each minibatch. However, if the field
 size_average is set to ``False``, the losses are instead summed for
 each minibatch. Ignored when reduce is ``False``. Default: ``True``
 reduce (bool, optional): By default, the losses are averaged or summed
 for each minibatch. When reduce is ``False``, the loss function returns
 a loss per batch element instead and ignores size_average.
 Default: ``True``
 epsilon (float, optional): add a small positive term to the MSE before
 taking the log to avoid NaN with log(0). Default: ``0.05``

 Shape:
  Input: :math:`(N, *)` where `*` means, any number of additional
 dimensions
  Target: :math:`(N, *)`, same shape as the input
  Output: scalar. If reduce is ``False``, then
 :math:`(N, *)`, same shape as the input

 Examples::

 >>> loss = neural_filters.LogL1Loss()
 >>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
 >>> target = autograd.Variable(torch.randn(3, 5))
 >>> output = loss(input, target)
 >>> output.backward()
 """

 def __init__(self, size_average=True, reduce=True, epsilon=0.05):
 super().__init__(size_average, reduce)
 self.epsilon = epsilon

 def forward(self, input, target):
 loss = super().forward(input, target)
 return torch.log(loss + self.epsilon)

2.21.0