Skip to content
Snippets Groups Projects
Commit dc3243af authored by Francois Marelli's avatar Francois Marelli
Browse files

New filters, denoms, gradients

parent 4ccf91e9
Branches
Tags
No related merge requests found
......@@ -49,6 +49,10 @@ class NeuralFilter1P(NeuralFilterCell):
self.reset_parameters()
def __repr__(self):
s = '{name}({input_size},{hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
......
"""
NeuralFilter2CC
***************
This module implements a trainable all-pole second order filter with complex conjugate poles using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
import torch
from torch.nn import Parameter
from torch.nn import functional as F
import numpy as np
class NeuralFilter2CC(torch.nn.Module):
"""
A trainable second-order all-pole filter :math:`\\frac{1}{1 - 2 P \\cos(\\theta) z^{-1} + P^{2} z^{-2}}`
* **hidden_size** (int) - the size of the data vector
"""
def __init__(self, hidden_size):
super(NeuralFilter2CC, self).__init__()
self.hidden_size = hidden_size
self.bias_theta = Parameter(torch.Tensor(hidden_size))
self.bias_modulus = Parameter(torch.Tensor(hidden_size))
self.reset_parameters()
def reset_parameters(self):
self.bias_modulus.data.zero_()
self.bias_theta.data.zero_()
def __repr__(self):
s = '{name}({hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def check_forward_input(self, input):
if input.size(-1) != self.hidden_size:
raise RuntimeError(
"input has inconsistent input_size(-1): got {}, expected {}".format(
input.size(1), self.hidden_size))
def check_forward_hidden(self, input, hx):
if input.size(1) != hx.size(0):
raise RuntimeError(
"Input batch size {} doesn't match hidden batch size {}".format(
input.size(1), hx.size(0)))
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden has inconsistent hidden_size: got {}, expected {}".format(
hx.size(1), self.hidden_size))
def step(self, input, delayed, delayed2):
modulus = F.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta)
next = input + 2 * cosangle * modulus * delayed - modulus.pow(2) * delayed2
return next
def forward(self, input, delayed=None, delayed2=None):
if delayed is None:
delayed = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
if delayed2 is None:
delayed2 = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
self.check_forward_input(input)
self.check_forward_hidden(input, delayed)
self.check_forward_hidden(input, delayed2)
d1 = delayed
d2 = delayed2
output = []
steps = range(input.size(0))
for i in steps:
next = self.step(input[i], d1, d2)
output.append(next)
d2, d1 = d1, next
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return output, d1, d2
def print_param(self):
modulus = F.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta)
p1 = -2 * cosangle * modulus
p2 = modulus.pow(2)
print('{}\t{}'.format(p1.data[0], p2.data[0]))
@property
def denominator(self):
modulus = F.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta)
p1 = -2 * cosangle * modulus
p2 = modulus.pow(2)
p1 = p1.data.numpy()
p2 = p2.data.numpy()
p1 = p1.reshape(p1.size,1)
p2 = p2.reshape(p2.size, 1)
one = np.ones(p1.shape)
denom = np.concatenate((one, p1, p2), axis=1)
return denom
@property
def gradients(self):
mod_grad = self.bias_modulus.grad
if mod_grad is not None:
mod_grad = mod_grad.data.numpy()
mod_grad = mod_grad.reshape(mod_grad.size, 1)
cos_grad = self.bias_theta.grad.data.numpy()
cos_grad = cos_grad.reshape(cos_grad.size, 1)
return np.concatenate((mod_grad, cos_grad), axis=1)
else:
return np.zeros((self.hidden_size, 2))
"""
NeuralFilter2CD
***************
This module implements a trainable critically damped all-pole second order filter with real poles using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
from . import NeuralFilterCell
import torch
import numpy as np
class NeuralFilter2CD (torch.nn.Module):
"""
A trainable second-order critically damped all-pole filter :math:`\\frac{1}{(1 - P z^{-1})^{2}}`
* **hidden_size** (int) - the size of data vector
"""
def __init__(self, hidden_size):
super(NeuralFilter2CD, self).__init__()
self.hidden_size = hidden_size
self.cell = NeuralFilterCell(self.hidden_size)
def __repr__(self):
s = '{name}({hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, input, hx=None):
if hx is None:
hx = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
interm, interm_hidden = self.cell(input, hx)
output, hidden = self.cell(interm)
return output, hidden
@property
def denominator(self):
first = self.cell.denominator
denom = np.zeros((first.shape[0], 3))
for i in range(self.hidden_size):
denom[i] = np.polymul(first[i], first[i])
return denom
@property
def gradients(self):
return self.cell.gradients
......@@ -28,6 +28,7 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
from . import NeuralFilterCell
import torch
import numpy as np
class NeuralFilter2R (torch.nn.Module):
"""
......@@ -45,7 +46,7 @@ class NeuralFilter2R (torch.nn.Module):
self.second_cell = NeuralFilterCell(self.hidden_size)
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
s = '{name}({hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, input, hx=None):
......@@ -54,7 +55,22 @@ class NeuralFilter2R (torch.nn.Module):
self.hidden_size
).zero_(), requires_grad=False)
interm, interm_hidden = self.first_cell(input)
interm, interm_hidden = self.first_cell(input, hx)
output, hidden = self.second_cell(interm)
return output, hidden
@property
def denominator(self):
first = self.first_cell.denominator
second = self.second_cell.denominator
denom = np.zeros((first.shape[0],3))
for i in range(self.hidden_size):
denom[i] = np.polymul(first[i], second[i])
return denom
@property
def gradients(self):
first = self.first_cell.gradients
second = self.second_cell.gradients
return np.concatenate((first, second), axis=1)
\ No newline at end of file
......@@ -28,6 +28,7 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
import torch
from torch.nn import Parameter
from torch.nn import functional as F
import numpy as np
class NeuralFilterCell(torch.nn.Module):
......@@ -47,10 +48,10 @@ class NeuralFilterCell(torch.nn.Module):
self.reset_parameters()
def reset_parameters(self):
self.bias_forget.data[0]=0
self.bias_forget.data.zero_()
def __repr__(self):
s = '{name}({input_size}, {hidden_size}'
s = '{name}({hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def check_forward_input(self, input):
......@@ -68,7 +69,7 @@ class NeuralFilterCell(torch.nn.Module):
if hx.size(1) != self.hidden_size:
raise RuntimeError(
"hidden has inconsistent hidden_size: got {}, expected {}".format(
hx.size(1), self.hidden_size))
hx.size(1), self.hidden_size))
def step(self, input, hidden):
forgetgate = F.sigmoid(self.bias_forget)
......@@ -78,8 +79,8 @@ class NeuralFilterCell(torch.nn.Module):
def forward(self, input, hx=None):
if hx is None:
hx = torch.autograd.Variable(input.data.new(input.size(1),
self.hidden_size
).zero_(), requires_grad=False)
self.hidden_size
).zero_(), requires_grad=False)
self.check_forward_input(input)
self.check_forward_hidden(input, hx)
......@@ -95,3 +96,20 @@ class NeuralFilterCell(torch.nn.Module):
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
return output, hidden
@property
def gradients(self):
grad = self.bias_forget.grad
if grad is not None:
gradient = grad.data.numpy()
return gradient.reshape((gradient.size, 1))
else:
return np.zeros((self.hidden_size, 1))
@property
def denominator(self):
forgetgate = F.sigmoid(self.bias_forget).data.numpy()
forgetgate = forgetgate.reshape((forgetgate.size, 1))
one = np.ones(forgetgate.shape)
denom = np.concatenate((one, -forgetgate), axis=1)
return denom
from .NeuralFilterCell import *
from .NeuralFilter1P import *
from .NeuralFilter2R import *
\ No newline at end of file
from .NeuralFilter2R import *
from .NeuralFilter2CC import *
from .NeuralFilter2CD import *
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment