Skip to content
Snippets Groups Projects
Commit 38b26d13 authored by François MARELLI's avatar François MARELLI
Browse files

Delete log_loss.py

parent 33d0db50
No related branches found
No related tags found
No related merge requests found
import torch
from torch.nn import MSELoss
from torch.nn import L1Loss
class LogMSELoss(MSELoss):
r"""Creates a criterion that measures the logarithmic mean squared error between
`n` elements in the input `x` and target `y`:
:math:`{loss}(x, y) = \log( 1/n \sum |x_i - y_i|^2 + epsilon)`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to ``False``.
To get a batch of losses, a loss per batch element, set `reduce` to
``False``. These losses are not averaged and are not affected by
`size_average`.
The epsilon is a positive float used to avoid log(0) leading to NaN.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Only applies when reduce is ``True``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged
over observations for each minibatch, or summed, depending on
size_average. When reduce is ``False``, returns a loss per batch
element instead and ignores size_average. Default: ``True``
epsilon (float, optional): add a small positive term to the MSE before
taking the log to avoid NaN with log(0). Default: ``0.05``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = neural_filters.LogMSELoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True, epsilon=0.05):
super().__init__(size_average, reduce)
self.epsilon = epsilon
def forward(self, input, target):
loss = super().forward(input, target)
return torch.log(loss + self.epsilon)
class LogL1Loss(L1Loss):
r"""Creates a criterion that measures the logarithm of the mean absolute value of the
element-wise difference between input `x` and target `y`:
:math:`{loss}(x, y) = \log( 1/n \sum |x_i - y_i| + epsilon )`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the constructor argument
`size_average=False`.
The epsilon is a positive float used to avoid log(0) leading to NaN.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to ``False``, the losses are instead summed for
each minibatch. Ignored when reduce is ``False``. Default: ``True``
reduce (bool, optional): By default, the losses are averaged or summed
for each minibatch. When reduce is ``False``, the loss function returns
a loss per batch element instead and ignores size_average.
Default: ``True``
epsilon (float, optional): add a small positive term to the MSE before
taking the log to avoid NaN with log(0). Default: ``0.05``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar. If reduce is ``False``, then
:math:`(N, *)`, same shape as the input
Examples::
>>> loss = neural_filters.LogL1Loss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, size_average=True, reduce=True, epsilon=0.05):
super().__init__(size_average, reduce)
self.epsilon = epsilon
def forward(self, input, target):
loss = super().forward(input, target)
return torch.log(loss + self.epsilon)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment