Skip to content
Snippets Groups Projects
Commit 321f76a2 authored by Francois Marelli's avatar Francois Marelli
Browse files

First order all-pole

parent 304edcb9
No related branches found
No related tags found
No related merge requests found
*~
docs/html
docs/sphinx/_build
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
......
"""
NeuralFilter1P
**************
This module implements a trainable all-pole first order filter using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
import torch
from torch.nn import Parameter
from torch.nn import functional as F
import math
class NeuralFilter1P(torch.nn.Module):
"""
A trainable first-order all-pole filter :math:`\\frac{K}{1 - P z^{-1}}`
* **input_size** (int) - the size of the input vector
* **hidden_size** (int) - the size of the output vector
"""
def __init__(self, input_size, hidden_size):
super(NeuralFilter1P, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.Tensor(hidden_size, input_size))
self.bias_ih = Parameter(torch.Tensor(2 * hidden_size))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, input, hx=None):
if hx is None:
vhx = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
hx = (vhx, vhx)
self.check_forward_input(input)
self.check_forward_hidden(input, hx[0], '[0]')
self.check_forward_hidden(input, hx[1], '[1]')
hidden = hx
output = []
steps = range(input.size(0))
for i in steps:
hidden = self.step(input[i], hidden)
output.append(hidden[0])
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return output, hidden
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
return s.format(name=self.__class__.__name__, **self.__dict__)
def check_forward_input(self, input):
if input.size(-1) != self.input_size:
raise RuntimeError(
"input has inconsistent input_size(-1): got {}, expected {}".format(
input.size(1), self.input_size))
def check_forward_hidden(self, input, hx, hidden_label=''):
if input.size(1) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden{} batch size {}".format(
input.size(1), hidden_label, hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden{} has inconsistent hidden_size: got {}, expected {}".format(
hidden_label, hx.size(1), self.hidden_size))
def step(self, input, hidden):
hx, cx = hidden
gates = F.linear(input, self.weight_ih, self.bias_ih)
forgetgate, cellgate = gates.chunk(2, 1)
forgetgate = F.sigmoid(forgetgate)
cy = (forgetgate * cx) + cellgate
hy = cy
return hy, cy
test = NeuralFilter1P(2, 2)
from .NeuralFilter1P import *
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment