Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
neural_filters
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
software
neural_filters
Commits
86e3aa0c
Commit
86e3aa0c
authored
May 08, 2018
by
M. François
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
PackedSequence compliance
parent
b0ec956f
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
90 additions
and
134 deletions
+90
-134
neural_filters/__init__.py
neural_filters/__init__.py
+0
-1
neural_filters/neural_filter.py
neural_filters/neural_filter.py
+45
-32
neural_filters/neural_filter_1L.py
neural_filters/neural_filter_1L.py
+0
-68
neural_filters/neural_filter_2CC.py
neural_filters/neural_filter_2CC.py
+45
-33
No files found.
neural_filters/__init__.py
View file @
86e3aa0c
...
...
@@ -27,7 +27,6 @@ def atanh(x):
from
.log_loss
import
*
from
.neural_filter
import
*
from
.neural_filter_1L
import
*
from
.neural_filter_2CC
import
*
from
.neural_filter_2CD
import
*
from
.neural_filter_2R
import
*
neural_filters/neural_filter.py
View file @
86e3aa0c
...
...
@@ -25,12 +25,15 @@ along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
import
numpy
as
np
import
torch
from
torch.nn
import
Parameter
from
torch.nn
import
functional
as
F
import
numpy
as
np
from
torch.nn._functions.rnn
import
Recurrent
,
VariableRecurrent
from
torch.nn.utils.rnn
import
PackedSequence
from
.
import
asig
from
.
import
INIT_MODULUS
,
asig
class
NeuralFilter
(
torch
.
nn
.
Module
):
"""
...
...
@@ -66,53 +69,63 @@ class NeuralFilter(torch.nn.Module):
s
=
'{name}({hidden_size})'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
def
check_forward_input
(
self
,
input_state
):
if
input_state
.
size
(
-
1
)
!=
self
.
hidden_size
:
def
check_forward_args
(
self
,
input_var
,
hidden
,
batch_sizes
):
is_input_packed
=
batch_sizes
is
not
None
expected_input_dim
=
2
if
is_input_packed
else
3
if
input_var
.
dim
()
!=
expected_input_dim
:
raise
RuntimeError
(
"input has inconsistent input_size(-1): got {}, expected {}"
.
format
(
input_state
.
size
(
1
),
self
.
hidden_size
))
def
check_forward_hidden
(
self
,
input_state
,
hx
):
if
input_state
.
size
(
1
)
!=
hx
.
size
(
0
):
'input must have {} dimensions, got {}'
.
format
(
expected_input_dim
,
input_var
.
dim
()))
if
self
.
hidden_size
!=
input_var
.
size
(
-
1
):
raise
RuntimeError
(
"Input batch size {} doesn't match hidden batch size {}"
.
format
(
input_state
.
size
(
1
),
hx
.
size
(
0
)))
'input.size(-1) must be equal to hidden_size. Expected {}, got {}'
.
format
(
self
.
input_size
,
input_var
.
size
(
-
1
)))
if
hx
.
size
(
1
)
!=
self
.
hidden_size
:
raise
RuntimeError
(
"hidden has inconsistent hidden_size: got {}, expected {}"
.
format
(
hx
.
size
(
1
),
self
.
hidden_size
))
if
is_input_packed
:
mini_batch
=
int
(
batch_sizes
[
0
])
else
:
mini_batch
=
input_var
.
size
(
1
)
expected_hidden_size
=
(
mini_batch
,
self
.
hidden_size
)
def
check_hidden_size
(
hx
,
expected_hidden_size
,
msg
=
'Expected hidden size {}, got {}'
):
if
tuple
(
hx
.
size
())
!=
expected_hidden_size
:
raise
RuntimeError
(
msg
.
format
(
expected_hidden_size
,
tuple
(
hx
.
size
())))
def
step
(
self
,
input_state
,
hidden
,
a
=
None
):
check_hidden_size
(
hidden
,
expected_hidden_size
,
'Expected hidden[0] size {}, got {}'
)
def
step
(
self
,
input_var
,
hidden
,
a
=
None
):
if
a
is
None
:
a
=
F
.
sigmoid
(
self
.
bias_forget
)
next_state
=
(
a
*
hidden
)
+
input_
state
next_state
=
(
a
*
hidden
)
+
input_
var
return
next_state
def
forward
(
self
,
input_state
,
hx
=
None
):
if
hx
is
None
:
hx
=
torch
.
autograd
.
Variable
(
input_state
.
data
.
new
(
input_state
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
def
forward
(
self
,
input_var
,
hidden
=
None
):
is_packed
=
isinstance
(
input_var
,
PackedSequence
)
if
is_packed
:
input_var
,
batch_sizes
=
input_var
max_batch_size
=
int
(
batch_sizes
[
0
])
else
:
batch_sizes
=
None
max_batch_size
=
input_var
.
size
(
1
)
self
.
check_forward_input
(
input_state
)
self
.
check_forward_hidden
(
input_state
,
hx
)
if
hidden
is
None
:
hidden
=
input_var
.
data
.
new_zeros
(
max_batch_size
,
self
.
hidden_size
,
requires_grad
=
False
)
hidden
=
hx
self
.
check_forward_args
(
input_var
,
hidden
,
batch_sizes
)
# compute this once for all steps for efficiency
a
=
F
.
sigmoid
(
self
.
bias_forget
)
output
=
[]
steps
=
range
(
input_state
.
size
(
0
))
for
i
in
steps
:
hidden
=
self
.
step
(
input_state
[
i
],
hidden
,
a
=
a
)
output
.
append
(
hidden
)
func
=
Recurrent
(
self
.
step
)
if
batch_sizes
is
None
else
VariableRecurrent
(
self
.
step
)
nexth
,
output
=
func
(
input_var
,
hidden
,
(
a
,),
batch_sizes
)
output
=
torch
.
cat
(
output
,
0
).
view
(
input_state
.
size
(
0
),
*
output
[
0
].
size
())
if
is_packed
:
output
=
PackedSequence
(
output
,
batch_sizes
)
return
output
,
hidden
return
output
,
nexth
@
property
def
gradients
(
self
):
...
...
neural_filters/neural_filter_1L.py
deleted
100644 → 0
View file @
b0ec956f
"""
NeuralFilter1P
**************
This module implements a trainable all-pole first order with linear combination input filter using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
import
torch
from
torch.nn
import
Parameter
from
torch.nn
import
functional
as
F
from
.
import
NeuralFilter
class
NeuralFilter1L
(
NeuralFilter
):
"""
A trainable first-order all-pole filter :math:`
\
\
frac{K}{1 - P z^{-1}}` with bias on the input
* **input_size** (int) - the size of the input vector
* **hidden_size** (int) - the size of the output vector
"""
def
__init__
(
self
,
input_size
,
hidden_size
):
super
(
NeuralFilter1L
,
self
).
__init__
(
hidden_size
)
self
.
input_size
=
input_size
self
.
weight_in
=
Parameter
(
torch
.
Tensor
(
hidden_size
,
input_size
))
self
.
bias_in
=
Parameter
(
torch
.
Tensor
(
hidden_size
))
self
.
reset_parameters
()
def
__repr__
(
self
):
s
=
'{name}({input_size},{hidden_size})'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
def
reset_parameters
(
self
,
init
=
None
):
super
(
NeuralFilter1L
,
self
).
reset_parameters
(
init
)
def
check_forward_input
(
self
,
input_state
):
if
input_state
.
size
(
-
1
)
!=
self
.
input_size
:
raise
RuntimeError
(
"input has inconsistent input_size(-1): got {}, expected {}"
.
format
(
input_state
.
size
(
1
),
self
.
input_size
))
def
step
(
self
,
input_state
,
hidden
):
in_gate
=
F
.
linear
(
input_state
,
self
.
weight_in
,
self
.
bias_in
)
next_state
=
super
(
NeuralFilter1L
,
self
).
step
(
in_gate
,
hidden
)
return
next_state
neural_filters/neural_filter_2CC.py
View file @
86e3aa0c
...
...
@@ -29,6 +29,8 @@ import numpy as np
import
torch
from
torch.nn
import
Parameter
from
torch.nn
import
functional
as
F
from
torch.nn._functions.rnn
import
Recurrent
,
VariableRecurrent
from
torch.nn.utils.rnn
import
PackedSequence
from
.
import
MIN_ANGLE
,
MAX_ANGLE
,
INIT_MODULUS
,
asig
,
atanh
...
...
@@ -82,22 +84,33 @@ class NeuralFilter2CC(torch.nn.Module):
s
=
'{name}({hidden_size})'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
def
check_forward_input
(
self
,
input_var
):
if
input_var
.
size
(
-
1
)
!=
self
.
hidden_size
:
def
check_forward_args
(
self
,
input_var
,
hidden
,
batch_sizes
):
is_input_packed
=
batch_sizes
is
not
None
expected_input_dim
=
2
if
is_input_packed
else
3
if
input_var
.
dim
()
!=
expected_input_dim
:
raise
RuntimeError
(
"input has inconsistent input_size(-1): got {}, expected {}"
.
format
(
input_var
.
size
(
1
),
self
.
hidden_size
))
def
check_forward_hidden
(
self
,
input_var
,
hx
):
if
input_var
.
size
(
1
)
!=
hx
.
size
(
0
):
'input must have {} dimensions, got {}'
.
format
(
expected_input_dim
,
input_var
.
dim
()))
if
self
.
hidden_size
!=
input_var
.
size
(
-
1
):
raise
RuntimeError
(
"Input batch size {} doesn't match hidden batch size {}"
.
format
(
input_var
.
size
(
1
),
hx
.
size
(
0
)))
'input.size(-1) must be equal to hidden_size. Expected {}, got {}'
.
format
(
self
.
input_size
,
input_var
.
size
(
-
1
)))
if
hx
.
size
(
1
)
!=
self
.
hidden_size
:
raise
RuntimeError
(
"hidden has inconsistent hidden_size: got {}, expected {}"
.
format
(
hx
.
size
(
1
),
self
.
hidden_size
))
if
is_input_packed
:
mini_batch
=
int
(
batch_sizes
[
0
])
else
:
mini_batch
=
input_var
.
size
(
1
)
expected_hidden_size
=
(
mini_batch
,
self
.
hidden_size
)
def
check_hidden_size
(
hx
,
expected_hidden_size
,
msg
=
'Expected hidden size {}, got {}'
):
if
tuple
(
hx
.
size
())
!=
expected_hidden_size
:
raise
RuntimeError
(
msg
.
format
(
expected_hidden_size
,
tuple
(
hx
.
size
())))
check_hidden_size
(
hidden
[
0
],
expected_hidden_size
,
'Expected hidden[0] size {}, got {}'
)
check_hidden_size
(
hidden
[
1
],
expected_hidden_size
,
'Expected hidden[1] size {}, got {}'
)
def
step
(
self
,
input_var
,
hidden
,
a
=
None
,
b
=
None
):
if
a
is
None
or
b
is
None
:
...
...
@@ -108,21 +121,23 @@ class NeuralFilter2CC(torch.nn.Module):
next_state
=
input_var
+
a
*
hidden
[
0
]
+
b
*
hidden
[
1
]
return
next_state
return
next_state
,
hidden
[
0
]
def
forward
(
self
,
input_var
,
hidden
=
None
):
is_packed
=
isinstance
(
input_var
,
PackedSequence
)
if
is_packed
:
input_var
,
batch_sizes
=
input_var
max_batch_size
=
int
(
batch_sizes
[
0
])
else
:
batch_sizes
=
None
max_batch_size
=
input_var
.
size
(
1
)
def
forward
(
self
,
input_var
,
hidden
=
(
None
,
None
)):
h0
,
h1
=
hidden
if
h0
is
None
:
h0
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
if
hidden
is
None
:
h
=
input_var
.
new_zeros
(
max_batch_size
,
self
.
hidden_size
,
requires_grad
=
False
)
if
h1
is
None
:
h1
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
hidden
=
(
h
,
h
)
self
.
check_forward_input
(
input_var
)
self
.
check_forward_hidden
(
input_var
,
h0
)
self
.
check_forward_hidden
(
input_var
,
h1
)
self
.
check_forward_args
(
input_var
,
hidden
,
batch_sizes
)
# do not recompute this at each step to gain efficiency
modulus
=
F
.
sigmoid
(
self
.
bias_modulus
)
...
...
@@ -130,16 +145,13 @@ class NeuralFilter2CC(torch.nn.Module):
a
=
2
*
cosangle
*
modulus
b
=
-
modulus
.
pow
(
2
)
output
=
[]
steps
=
range
(
input_var
.
size
(
0
))
for
i
in
steps
:
next_state
=
self
.
step
(
input_var
[
i
],
(
h0
,
h1
),
a
=
a
,
b
=
b
)
output
.
append
(
next_state
)
h1
,
h0
=
h0
,
next_state
func
=
Recurrent
(
self
.
step
)
if
batch_sizes
is
None
else
VariableRecurrent
(
self
.
step
)
nexth
,
output
=
func
(
input_var
,
hidden
,
(
a
,
b
),
batch_sizes
)
output
=
torch
.
cat
(
output
,
0
).
view
(
input_var
.
size
(
0
),
*
output
[
0
].
size
())
if
is_packed
:
output
=
PackedSequence
(
output
,
batch_sizes
)
return
output
,
(
h0
,
h1
)
return
output
,
nexth
def
print_param
(
self
):
modulus
=
F
.
sigmoid
(
self
.
bias_modulus
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment