NeuralFilterCell.py 3.03 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
"""
NeuralFilterCell
**************

This module implements a basic trainable all-pole first order filter using pyTorch


Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/

Written by Francois Marelli <Francois.Marelli@idiap.ch>

This file is part of neural_filters.

neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.

neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.

You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.

"""

import torch
from torch.nn import Parameter
from torch.nn import functional as F


class NeuralFilterCell(torch.nn.Module):
    """
    A trainable first-order all-pole filter :math:`\\frac{1}{1 - P z^{-1}}`

    * **hidden_size** (int) - the size of the data vector
    """

    def __init__(self, hidden_size):
        super(NeuralFilterCell, self).__init__()

        self.hidden_size = hidden_size

        self.bias_forget = Parameter(torch.Tensor(hidden_size))

        self.reset_parameters()

    def reset_parameters(self):
        self.bias_forget.data[0]=0

    def __repr__(self):
        s = '{name}({input_size}, {hidden_size}'
        return s.format(name=self.__class__.__name__, **self.__dict__)

    def check_forward_input(self, input):
        if input.size(-1) != self.hidden_size:
            raise RuntimeError(
                "input has inconsistent input_size(-1): got {}, expected {}".format(
                    input.size(1), self.hidden_size))

    def check_forward_hidden(self, input, hx):
        if input.size(1) != hx.size(0):
            raise RuntimeError(
                "Input batch size {} doesn't match hidden batch size {}".format(
                    input.size(1), hx.size(0)))

        if hx.size(1) != self.hidden_size:
            raise RuntimeError(
                "hidden has inconsistent hidden_size: got {}, expected {}".format(
                     hx.size(1), self.hidden_size))

    def step(self, input, hidden):
        forgetgate = F.sigmoid(self.bias_forget)
        next = (forgetgate * hidden) + input
        return next

    def forward(self, input, hx=None):
        if hx is None:
            hx = torch.autograd.Variable(input.data.new(input.size(1),
                                                         self.hidden_size
                                                         ).zero_(), requires_grad=False)

        self.check_forward_input(input)
        self.check_forward_hidden(input, hx)

        hidden = hx

        output = []
        steps = range(input.size(0))
        for i in steps:
            hidden = self.step(input[i], hidden)
            output.append(hidden)

        output = torch.cat(output, 0).view(input.size(0), *output[0].size())

        return output, hidden