From 4ccf91e999d5d9558e254feb32e48cab8940a654 Mon Sep 17 00:00:00 2001 From: Francois Marelli Date: Wed, 21 Feb 2018 14:17:15 +0100 Subject: [PATCH] Basic cell and second order --- neural_filters/NeuralFilter1P.py | 53 +++------------- neural_filters/NeuralFilter2R.py | 60 ++++++++++++++++++ neural_filters/NeuralFilterCell.py | 97 ++++++++++++++++++++++++++++++ neural_filters/__init__.py | 4 +- setup.py | 19 ++++++ 5 files changed, 186 insertions(+), 47 deletions(-) create mode 100644 neural_filters/NeuralFilter2R.py create mode 100644 neural_filters/NeuralFilterCell.py create mode 100644 setup.py diff --git a/neural_filters/NeuralFilter1P.py b/neural_filters/NeuralFilter1P.py index e264f81..887def8 100644 --- a/neural_filters/NeuralFilter1P.py +++ b/neural_filters/NeuralFilter1P.py @@ -2,7 +2,7 @@ NeuralFilter1P ************** -This module implements a trainable all-pole first order filter using pyTorch +This module implements a trainable all-pole first order with linear combination input filter using pyTorch Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ @@ -29,27 +29,24 @@ import torch from torch.nn import Parameter from torch.nn import functional as F import math +from . import NeuralFilterCell - -class NeuralFilter1P(torch.nn.Module): +class NeuralFilter1P(NeuralFilterCell): """ - A trainable first-order all-pole filter :math:`\\frac{K}{1 - P z^{-1}}` + A trainable first-order all-pole filter :math:`\\frac{K}{1 - P z^{-1}}` with bias on the input * **input_size** (int) - the size of the input vector * **hidden_size** (int) - the size of the output vector """ def __init__(self, input_size, hidden_size): - super(NeuralFilter1P, self).__init__() + super(NeuralFilter1P, self).__init__(hidden_size) self.input_size = input_size - self.hidden_size = hidden_size self.weight_in = Parameter(torch.Tensor(hidden_size, input_size)) self.bias_in = Parameter(torch.Tensor(hidden_size)) - self.bias_forget = Parameter(torch.Tensor(hidden_size)) - self.reset_parameters() def reset_parameters(self): @@ -57,9 +54,7 @@ class NeuralFilter1P(torch.nn.Module): for weight in self.parameters(): weight.data.uniform_(-stdv, stdv) - def __repr__(self): - s = '{name}({input_size}, {hidden_size}' - return s.format(name=self.__class__.__name__, **self.__dict__) + super(NeuralFilter1P, self).reset_parameters() def check_forward_input(self, input): if input.size(-1) != self.input_size: @@ -67,41 +62,7 @@ class NeuralFilter1P(torch.nn.Module): "input has inconsistent input_size(-1): got {}, expected {}".format( input.size(1), self.input_size)) - def check_forward_hidden(self, input, hx): - if input.size(1) != hx.size(0): - raise RuntimeError( - "Input batch size {} doesn't match hidden batch size {}".format( - input.size(1), hx.size(0))) - - if hx.size(1) != self.hidden_size: - raise RuntimeError( - "hidden has inconsistent hidden_size: got {}, expected {}".format( - hx.size(1), self.hidden_size)) - def step(self, input, hidden): in_gate = F.linear(input, self.weight_in, self.bias_in) - forgetgate = F.sigmoid(self.bias_forget) - next = (forgetgate * hidden) + in_gate - + next = super(NeuralFilter1P, self).step(in_gate, hidden) return next - - def forward(self, input, hx=None): - if hx is None: - hx = torch.autograd.Variable(input.data.new(input.size(1), - self.hidden_size - ).zero_(), requires_grad=False) - - self.check_forward_input(input) - self.check_forward_hidden(input, hx) - - hidden = hx - - output = [] - steps = range(input.size(0)) - for i in steps: - hidden = self.step(input[i], hidden) - output.append(hidden) - - output = torch.cat(output, 0).view(input.size(0), *output[0].size()) - - return output, hidden diff --git a/neural_filters/NeuralFilter2R.py b/neural_filters/NeuralFilter2R.py new file mode 100644 index 0000000..6994d85 --- /dev/null +++ b/neural_filters/NeuralFilter2R.py @@ -0,0 +1,60 @@ +""" +NeuralFilter2R +************** + +This module implements a trainable all-pole second order filter with real poles using pyTorch + + +Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ + +Written by Francois Marelli + +This file is part of neural_filters. + +neural_filters is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3 as +published by the Free Software Foundation. + +neural_filters is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with neural_filters. If not, see . + +""" + +from . import NeuralFilterCell + +import torch + +class NeuralFilter2R (torch.nn.Module): + """ + A trainable second-order all-(real)pole filter :math:`\\frac{1}{1 - P_{1} z^{-1}} \\frac{1}{1 - P_{2} z^{-1}}` + + * **hidden_size** (int) - the size of data vector + """ + + def __init__(self, hidden_size): + super(NeuralFilter2R, self).__init__() + + self.hidden_size = hidden_size + + self.first_cell = NeuralFilterCell(self.hidden_size) + self.second_cell = NeuralFilterCell(self.hidden_size) + + def __repr__(self): + s = '{name}({input_size}, {hidden_size}' + return s.format(name=self.__class__.__name__, **self.__dict__) + + def forward(self, input, hx=None): + if hx is None: + hx = torch.autograd.Variable(input.data.new(input.size(1), + self.hidden_size + ).zero_(), requires_grad=False) + + interm, interm_hidden = self.first_cell(input) + output, hidden = self.second_cell(interm) + + return output, hidden diff --git a/neural_filters/NeuralFilterCell.py b/neural_filters/NeuralFilterCell.py new file mode 100644 index 0000000..5a3c2df --- /dev/null +++ b/neural_filters/NeuralFilterCell.py @@ -0,0 +1,97 @@ +""" +NeuralFilterCell +************** + +This module implements a basic trainable all-pole first order filter using pyTorch + + +Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ + +Written by Francois Marelli + +This file is part of neural_filters. + +neural_filters is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License version 3 as +published by the Free Software Foundation. + +neural_filters is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with neural_filters. If not, see . + +""" + +import torch +from torch.nn import Parameter +from torch.nn import functional as F + + +class NeuralFilterCell(torch.nn.Module): + """ + A trainable first-order all-pole filter :math:`\\frac{1}{1 - P z^{-1}}` + + * **hidden_size** (int) - the size of the data vector + """ + + def __init__(self, hidden_size): + super(NeuralFilterCell, self).__init__() + + self.hidden_size = hidden_size + + self.bias_forget = Parameter(torch.Tensor(hidden_size)) + + self.reset_parameters() + + def reset_parameters(self): + self.bias_forget.data[0]=0 + + def __repr__(self): + s = '{name}({input_size}, {hidden_size}' + return s.format(name=self.__class__.__name__, **self.__dict__) + + def check_forward_input(self, input): + if input.size(-1) != self.hidden_size: + raise RuntimeError( + "input has inconsistent input_size(-1): got {}, expected {}".format( + input.size(1), self.hidden_size)) + + def check_forward_hidden(self, input, hx): + if input.size(1) != hx.size(0): + raise RuntimeError( + "Input batch size {} doesn't match hidden batch size {}".format( + input.size(1), hx.size(0))) + + if hx.size(1) != self.hidden_size: + raise RuntimeError( + "hidden has inconsistent hidden_size: got {}, expected {}".format( + hx.size(1), self.hidden_size)) + + def step(self, input, hidden): + forgetgate = F.sigmoid(self.bias_forget) + next = (forgetgate * hidden) + input + return next + + def forward(self, input, hx=None): + if hx is None: + hx = torch.autograd.Variable(input.data.new(input.size(1), + self.hidden_size + ).zero_(), requires_grad=False) + + self.check_forward_input(input) + self.check_forward_hidden(input, hx) + + hidden = hx + + output = [] + steps = range(input.size(0)) + for i in steps: + hidden = self.step(input[i], hidden) + output.append(hidden) + + output = torch.cat(output, 0).view(input.size(0), *output[0].size()) + + return output, hidden diff --git a/neural_filters/__init__.py b/neural_filters/__init__.py index a7fa73e..516060e 100644 --- a/neural_filters/__init__.py +++ b/neural_filters/__init__.py @@ -1 +1,3 @@ -from .NeuralFilter1P import * \ No newline at end of file +from .NeuralFilterCell import * +from .NeuralFilter1P import * +from .NeuralFilter2R import * \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..538c37d --- /dev/null +++ b/setup.py @@ -0,0 +1,19 @@ +from setuptools import setup + +setup( + name='neural_filters', + version='0.1', + description='Linear filters for neural networks in pyTorch', + author='Idiap research institute - Francois Marelli', + author_email='francois.marelli@idiap.ch', + classifiers=[ + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: GNU GPL v3', + 'Programming Language :: Python :: 3.6', + ], + install_requires=['torch'], +) -- 2.21.0