Commit 4ccf91e9 authored by Francois Marelli's avatar Francois Marelli

Basic cell and second order

parent 94c76b95
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
NeuralFilter1P NeuralFilter1P
************** **************
This module implements a trainable all-pole first order filter using pyTorch This module implements a trainable all-pole first order with linear combination input filter using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/ Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
...@@ -29,27 +29,24 @@ import torch ...@@ -29,27 +29,24 @@ import torch
from torch.nn import Parameter from torch.nn import Parameter
from torch.nn import functional as F from torch.nn import functional as F
import math import math
from . import NeuralFilterCell
class NeuralFilter1P(NeuralFilterCell):
class NeuralFilter1P(torch.nn.Module):
""" """
A trainable first-order all-pole filter :math:`\\frac{K}{1 - P z^{-1}}` A trainable first-order all-pole filter :math:`\\frac{K}{1 - P z^{-1}}` with bias on the input
* **input_size** (int) - the size of the input vector * **input_size** (int) - the size of the input vector
* **hidden_size** (int) - the size of the output vector * **hidden_size** (int) - the size of the output vector
""" """
def __init__(self, input_size, hidden_size): def __init__(self, input_size, hidden_size):
super(NeuralFilter1P, self).__init__() super(NeuralFilter1P, self).__init__(hidden_size)
self.input_size = input_size self.input_size = input_size
self.hidden_size = hidden_size
self.weight_in = Parameter(torch.Tensor(hidden_size, input_size)) self.weight_in = Parameter(torch.Tensor(hidden_size, input_size))
self.bias_in = Parameter(torch.Tensor(hidden_size)) self.bias_in = Parameter(torch.Tensor(hidden_size))
self.bias_forget = Parameter(torch.Tensor(hidden_size))
self.reset_parameters() self.reset_parameters()
def reset_parameters(self): def reset_parameters(self):
...@@ -57,9 +54,7 @@ class NeuralFilter1P(torch.nn.Module): ...@@ -57,9 +54,7 @@ class NeuralFilter1P(torch.nn.Module):
for weight in self.parameters(): for weight in self.parameters():
weight.data.uniform_(-stdv, stdv) weight.data.uniform_(-stdv, stdv)
def __repr__(self): super(NeuralFilter1P, self).reset_parameters()
s = '{name}({input_size}, {hidden_size}'
return s.format(name=self.__class__.__name__, **self.__dict__)
def check_forward_input(self, input): def check_forward_input(self, input):
if input.size(-1) != self.input_size: if input.size(-1) != self.input_size:
...@@ -67,41 +62,7 @@ class NeuralFilter1P(torch.nn.Module): ...@@ -67,41 +62,7 @@ class NeuralFilter1P(torch.nn.Module):
"input has inconsistent input_size(-1): got {}, expected {}".format( "input has inconsistent input_size(-1): got {}, expected {}".format(
input.size(1), self.input_size)) input.size(1), self.input_size))
def check_forward_hidden(self, input, hx):
if input.size(1) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden batch size {}".format(
input.size(1), hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden has inconsistent hidden_size: got {}, expected {}".format(
hx.size(1), self.hidden_size))
def step(self, input, hidden): def step(self, input, hidden):
in_gate = F.linear(input, self.weight_in, self.bias_in) in_gate = F.linear(input, self.weight_in, self.bias_in)
forgetgate = F.sigmoid(self.bias_forget) next = super(NeuralFilter1P, self).step(in_gate, hidden)
next = (forgetgate * hidden) + in_gate
return next return next
def forward(self, input, hx=None):
if hx is None:
hx = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
self.check_forward_input(input)
self.check_forward_hidden(input, hx)
hidden = hx
output = []
steps = range(input.size(0))
for i in steps:
hidden = self.step(input[i], hidden)
output.append(hidden)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return output, hidden
"""
NeuralFilter2R
**************
This module implements a trainable all-pole second order filter with real poles using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
from . import NeuralFilterCell
import torch
class NeuralFilter2R (torch.nn.Module):
"""
A trainable second-order all-(real)pole filter :math:`\\frac{1}{1 - P_{1} z^{-1}} \\frac{1}{1 - P_{2} z^{-1}}`
* **hidden_size** (int) - the size of data vector
"""
def __init__(self, hidden_size):
super(NeuralFilter2R, self).__init__()
self.hidden_size = hidden_size
self.first_cell = NeuralFilterCell(self.hidden_size)
self.second_cell = NeuralFilterCell(self.hidden_size)
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, input, hx=None):
if hx is None:
hx = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
interm, interm_hidden = self.first_cell(input)
output, hidden = self.second_cell(interm)
return output, hidden
"""
NeuralFilterCell
**************
This module implements a basic trainable all-pole first order filter using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
import torch
from torch.nn import Parameter
from torch.nn import functional as F
class NeuralFilterCell(torch.nn.Module):
"""
A trainable first-order all-pole filter :math:`\\frac{1}{1 - P z^{-1}}`
* **hidden_size** (int) - the size of the data vector
"""
def __init__(self, hidden_size):
super(NeuralFilterCell, self).__init__()
self.hidden_size = hidden_size
self.bias_forget = Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
self.bias_forget.data[0]=0
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
return s.format(name=self.__class__.__name__, **self.__dict__)
def check_forward_input(self, input):
if input.size(-1) != self.hidden_size:
raise RuntimeError(
"input has inconsistent input_size(-1): got {}, expected {}".format(
input.size(1), self.hidden_size))
def check_forward_hidden(self, input, hx):
if input.size(1) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden batch size {}".format(
input.size(1), hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden has inconsistent hidden_size: got {}, expected {}".format(
hx.size(1), self.hidden_size))
def step(self, input, hidden):
forgetgate = F.sigmoid(self.bias_forget)
next = (forgetgate * hidden) + input
return next
def forward(self, input, hx=None):
if hx is None:
hx = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
self.check_forward_input(input)
self.check_forward_hidden(input, hx)
hidden = hx
output = []
steps = range(input.size(0))
for i in steps:
hidden = self.step(input[i], hidden)
output.append(hidden)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return output, hidden
from .NeuralFilter1P import * from .NeuralFilterCell import *
\ No newline at end of file from .NeuralFilter1P import *
from .NeuralFilter2R import *
\ No newline at end of file
from setuptools import setup
setup(
name='neural_filters',
version='0.1',
description='Linear filters for neural networks in pyTorch',
author='Idiap research institute - Francois Marelli',
author_email='francois.marelli@idiap.ch',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU GPL v3',
'Programming Language :: Python :: 3.6',
],
install_requires=['torch'],
)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment