Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
neural_filters
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
software
neural_filters
Commits
321f76a2
Commit
321f76a2
authored
Feb 16, 2018
by
Francois Marelli
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
First order all-pole
parent
304edcb9
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
120 additions
and
0 deletions
+120
-0
.gitignore
.gitignore
+4
-0
neural_filters/NeuralFilter1P.py
neural_filters/NeuralFilter1P.py
+115
-0
neural_filters/__init__.py
neural_filters/__init__.py
+1
-0
No files found.
.gitignore
View file @
321f76a2
*~
docs/html
docs/sphinx/_build
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
...
...
neural_filters/NeuralFilter1P.py
0 → 100644
View file @
321f76a2
"""
NeuralFilter1P
**************
This module implements a trainable all-pole first order filter using pyTorch
Copyright (c) 2018 Idiap Research Institute, http://www.idiap.ch/
Written by Francois Marelli <Francois.Marelli@idiap.ch>
This file is part of neural_filters.
neural_filters is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License version 3 as
published by the Free Software Foundation.
neural_filters is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with neural_filters. If not, see <http://www.gnu.org/licenses/>.
"""
import
torch
from
torch.nn
import
Parameter
from
torch.nn
import
functional
as
F
import
math
class
NeuralFilter1P
(
torch
.
nn
.
Module
):
"""
A trainable first-order all-pole filter :math:`
\
\
frac{K}{1 - P z^{-1}}`
* **input_size** (int) - the size of the input vector
* **hidden_size** (int) - the size of the output vector
"""
def
__init__
(
self
,
input_size
,
hidden_size
):
super
(
NeuralFilter1P
,
self
).
__init__
()
self
.
input_size
=
input_size
self
.
hidden_size
=
hidden_size
self
.
weight_ih
=
Parameter
(
torch
.
Tensor
(
hidden_size
,
input_size
))
self
.
bias_ih
=
Parameter
(
torch
.
Tensor
(
2
*
hidden_size
))
self
.
reset_parameters
()
def
reset_parameters
(
self
):
stdv
=
1.0
/
math
.
sqrt
(
self
.
hidden_size
)
for
weight
in
self
.
parameters
():
weight
.
data
.
uniform_
(
-
stdv
,
stdv
)
def
forward
(
self
,
input
,
hx
=
None
):
if
hx
is
None
:
vhx
=
torch
.
autograd
.
Variable
(
input
.
data
.
new
(
input
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
hx
=
(
vhx
,
vhx
)
self
.
check_forward_input
(
input
)
self
.
check_forward_hidden
(
input
,
hx
[
0
],
'[0]'
)
self
.
check_forward_hidden
(
input
,
hx
[
1
],
'[1]'
)
hidden
=
hx
output
=
[]
steps
=
range
(
input
.
size
(
0
))
for
i
in
steps
:
hidden
=
self
.
step
(
input
[
i
],
hidden
)
output
.
append
(
hidden
[
0
])
output
=
torch
.
cat
(
output
,
0
).
view
(
input
.
size
(
0
),
*
output
[
0
].
size
())
return
output
,
hidden
def
__repr__
(
self
):
s
=
'{name}({input_size}, {hidden_size}'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
def
check_forward_input
(
self
,
input
):
if
input
.
size
(
-
1
)
!=
self
.
input_size
:
raise
RuntimeError
(
"input has inconsistent input_size(-1): got {}, expected {}"
.
format
(
input
.
size
(
1
),
self
.
input_size
))
def
check_forward_hidden
(
self
,
input
,
hx
,
hidden_label
=
''
):
if
input
.
size
(
1
)
!=
hx
.
size
(
0
):
raise
RuntimeError
(
"Input batch size {} doesn't match hidden{} batch size {}"
.
format
(
input
.
size
(
1
),
hidden_label
,
hx
.
size
(
0
)))
if
hx
.
size
(
1
)
!=
self
.
hidden_size
:
raise
RuntimeError
(
"hidden{} has inconsistent hidden_size: got {}, expected {}"
.
format
(
hidden_label
,
hx
.
size
(
1
),
self
.
hidden_size
))
def
step
(
self
,
input
,
hidden
):
hx
,
cx
=
hidden
gates
=
F
.
linear
(
input
,
self
.
weight_ih
,
self
.
bias_ih
)
forgetgate
,
cellgate
=
gates
.
chunk
(
2
,
1
)
forgetgate
=
F
.
sigmoid
(
forgetgate
)
cy
=
(
forgetgate
*
cx
)
+
cellgate
hy
=
cy
return
hy
,
cy
test
=
NeuralFilter1P
(
2
,
2
)
neural_filters/__init__.py
0 → 100644
View file @
321f76a2
from
.NeuralFilter1P
import
*
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment