Commit 82217ab9 authored by M. François's avatar M. François

deprecated torch functions

v 1.2
parent b00df262
...@@ -28,7 +28,6 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>. ...@@ -28,7 +28,6 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
import numpy as np import numpy as np
import torch import torch
from torch.nn import Parameter from torch.nn import Parameter
from torch.nn import functional as F
from torch.nn._functions.rnn import Recurrent, VariableRecurrent from torch.nn._functions.rnn import Recurrent, VariableRecurrent
from torch.nn.utils.rnn import PackedSequence from torch.nn.utils.rnn import PackedSequence
...@@ -97,7 +96,7 @@ class NeuralFilter(torch.nn.Module): ...@@ -97,7 +96,7 @@ class NeuralFilter(torch.nn.Module):
def step(self, input_var, hidden, a=None): def step(self, input_var, hidden, a=None):
if a is None: if a is None:
a = F.sigmoid(self.bias_forget) a = torch.sigmoid(self.bias_forget)
next_state = (a * hidden) + input_var next_state = (a * hidden) + input_var
return next_state return next_state
...@@ -117,7 +116,7 @@ class NeuralFilter(torch.nn.Module): ...@@ -117,7 +116,7 @@ class NeuralFilter(torch.nn.Module):
self.check_forward_args(input_var, hidden, batch_sizes) self.check_forward_args(input_var, hidden, batch_sizes)
# compute this once for all steps for efficiency # compute this once for all steps for efficiency
a = F.sigmoid(self.bias_forget) a = torch.sigmoid(self.bias_forget)
func = Recurrent(self.step) if batch_sizes is None else VariableRecurrent(self.step) func = Recurrent(self.step) if batch_sizes is None else VariableRecurrent(self.step)
nexth, output = func(input_var, hidden, (a,), batch_sizes) nexth, output = func(input_var, hidden, (a,), batch_sizes)
...@@ -138,7 +137,7 @@ class NeuralFilter(torch.nn.Module): ...@@ -138,7 +137,7 @@ class NeuralFilter(torch.nn.Module):
@property @property
def denominator(self): def denominator(self):
forgetgate = F.sigmoid(self.bias_forget).detach().cpu().numpy() forgetgate = torch.sigmoid(self.bias_forget).detach().cpu().numpy()
forgetgate = forgetgate.reshape((forgetgate.size, 1)) forgetgate = forgetgate.reshape((forgetgate.size, 1))
one = np.ones(forgetgate.shape) one = np.ones(forgetgate.shape)
denom = np.concatenate((one, -forgetgate), axis=1) denom = np.concatenate((one, -forgetgate), axis=1)
......
...@@ -114,7 +114,7 @@ class NeuralFilter2CC(torch.nn.Module): ...@@ -114,7 +114,7 @@ class NeuralFilter2CC(torch.nn.Module):
def step(self, input_var, hidden, a=None, b=None): def step(self, input_var, hidden, a=None, b=None):
if a is None or b is None: if a is None or b is None:
modulus = F.sigmoid(self.bias_modulus) modulus = torch.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta) cosangle = F.tanh(self.bias_theta)
a = 2 * cosangle * modulus a = 2 * cosangle * modulus
b = - modulus.pow(2) b = - modulus.pow(2)
...@@ -140,7 +140,7 @@ class NeuralFilter2CC(torch.nn.Module): ...@@ -140,7 +140,7 @@ class NeuralFilter2CC(torch.nn.Module):
self.check_forward_args(input_var, hidden, batch_sizes) self.check_forward_args(input_var, hidden, batch_sizes)
# do not recompute this at each step to gain efficiency # do not recompute this at each step to gain efficiency
modulus = F.sigmoid(self.bias_modulus) modulus = torch.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta) cosangle = F.tanh(self.bias_theta)
a = 2 * cosangle * modulus a = 2 * cosangle * modulus
b = - modulus.pow(2) b = - modulus.pow(2)
...@@ -154,7 +154,7 @@ class NeuralFilter2CC(torch.nn.Module): ...@@ -154,7 +154,7 @@ class NeuralFilter2CC(torch.nn.Module):
return output, nexth, modulus return output, nexth, modulus
def print_param(self): def print_param(self):
modulus = F.sigmoid(self.bias_modulus) modulus = torch.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta) cosangle = F.tanh(self.bias_theta)
p1 = -2 * cosangle * modulus p1 = -2 * cosangle * modulus
p2 = modulus.pow(2) p2 = modulus.pow(2)
...@@ -162,7 +162,7 @@ class NeuralFilter2CC(torch.nn.Module): ...@@ -162,7 +162,7 @@ class NeuralFilter2CC(torch.nn.Module):
@property @property
def denominator(self): def denominator(self):
modulus = F.sigmoid(self.bias_modulus) modulus = torch.sigmoid(self.bias_modulus)
cosangle = F.tanh(self.bias_theta) cosangle = F.tanh(self.bias_theta)
p1 = -2 * cosangle * modulus p1 = -2 * cosangle * modulus
p2 = modulus.pow(2) p2 = modulus.pow(2)
......
...@@ -25,10 +25,10 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>. ...@@ -25,10 +25,10 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
""" """
from . import NeuralFilter
import torch
import numpy as np import numpy as np
import torch
from . import NeuralFilter
class NeuralFilter2R(torch.nn.Module): class NeuralFilter2R(torch.nn.Module):
......
...@@ -2,7 +2,7 @@ from setuptools import setup, find_packages ...@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
setup( setup(
name='neural-filters', name='neural-filters',
version='1.1', version='1.2',
description='Linear filters for neural networks in pyTorch', description='Linear filters for neural networks in pyTorch',
author='Idiap research institute - Francois Marelli', author='Idiap research institute - Francois Marelli',
author_email='francois.marelli@idiap.ch', author_email='francois.marelli@idiap.ch',
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment