Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
neural_filters
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
software
neural_filters
Commits
20aa0846
Commit
20aa0846
authored
Feb 16, 2018
by
Francois Marelli
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Dimension bug fix
parent
8ccef496
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
30 additions
and
38 deletions
+30
-38
__init__.py
__init__.py
+1
-0
neural_filters/NeuralFilter1P.py
neural_filters/NeuralFilter1P.py
+29
-38
No files found.
__init__.py
0 → 100644
View file @
20aa0846
from
.neural_filters
import
*
\ No newline at end of file
neural_filters/NeuralFilter1P.py
View file @
20aa0846
...
@@ -44,8 +44,10 @@ class NeuralFilter1P(torch.nn.Module):
...
@@ -44,8 +44,10 @@ class NeuralFilter1P(torch.nn.Module):
self
.
input_size
=
input_size
self
.
input_size
=
input_size
self
.
hidden_size
=
hidden_size
self
.
hidden_size
=
hidden_size
self
.
weight_ih
=
Parameter
(
torch
.
Tensor
(
hidden_size
,
input_size
))
self
.
weight_in
=
Parameter
(
torch
.
Tensor
(
hidden_size
,
input_size
))
self
.
bias_ih
=
Parameter
(
torch
.
Tensor
(
2
*
hidden_size
))
self
.
bias_in
=
Parameter
(
torch
.
Tensor
(
hidden_size
))
self
.
bias_forget
=
Parameter
(
torch
.
Tensor
(
hidden_size
))
self
.
reset_parameters
()
self
.
reset_parameters
()
...
@@ -54,29 +56,6 @@ class NeuralFilter1P(torch.nn.Module):
...
@@ -54,29 +56,6 @@ class NeuralFilter1P(torch.nn.Module):
for
weight
in
self
.
parameters
():
for
weight
in
self
.
parameters
():
weight
.
data
.
uniform_
(
-
stdv
,
stdv
)
weight
.
data
.
uniform_
(
-
stdv
,
stdv
)
def
forward
(
self
,
input
,
hx
=
None
):
if
hx
is
None
:
vhx
=
torch
.
autograd
.
Variable
(
input
.
data
.
new
(
input
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
hx
=
(
vhx
,
vhx
)
self
.
check_forward_input
(
input
)
self
.
check_forward_hidden
(
input
,
hx
[
0
],
'[0]'
)
self
.
check_forward_hidden
(
input
,
hx
[
1
],
'[1]'
)
hidden
=
hx
output
=
[]
steps
=
range
(
input
.
size
(
0
))
for
i
in
steps
:
hidden
=
self
.
step
(
input
[
i
],
hidden
)
output
.
append
(
hidden
[
0
])
output
=
torch
.
cat
(
output
,
0
).
view
(
input
.
size
(
0
),
*
output
[
0
].
size
())
return
output
,
hidden
def
__repr__
(
self
):
def
__repr__
(
self
):
s
=
'{name}({input_size}, {hidden_size}'
s
=
'{name}({input_size}, {hidden_size}'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
...
@@ -87,29 +66,41 @@ class NeuralFilter1P(torch.nn.Module):
...
@@ -87,29 +66,41 @@ class NeuralFilter1P(torch.nn.Module):
"input has inconsistent input_size(-1): got {}, expected {}"
.
format
(
"input has inconsistent input_size(-1): got {}, expected {}"
.
format
(
input
.
size
(
1
),
self
.
input_size
))
input
.
size
(
1
),
self
.
input_size
))
def
check_forward_hidden
(
self
,
input
,
hx
,
hidden_label
=
''
):
def
check_forward_hidden
(
self
,
input
,
hx
):
if
input
.
size
(
1
)
!=
hx
.
size
(
0
):
if
input
.
size
(
1
)
!=
hx
.
size
(
0
):
raise
RuntimeError
(
raise
RuntimeError
(
"Input batch size {} doesn't match hidden
{}
batch size {}"
.
format
(
"Input batch size {} doesn't match hidden batch size {}"
.
format
(
input
.
size
(
1
),
h
idden_label
,
h
x
.
size
(
0
)))
input
.
size
(
1
),
hx
.
size
(
0
)))
if
hx
.
size
(
1
)
!=
self
.
hidden_size
:
if
hx
.
size
(
1
)
!=
self
.
hidden_size
:
raise
RuntimeError
(
raise
RuntimeError
(
"hidden
{}
has inconsistent hidden_size: got {}, expected {}"
.
format
(
"hidden has inconsistent hidden_size: got {}, expected {}"
.
format
(
hidden_label
,
hx
.
size
(
1
),
self
.
hidden_size
))
hx
.
size
(
1
),
self
.
hidden_size
))
def
step
(
self
,
input
,
hidden
):
def
step
(
self
,
input
,
hidden
):
hx
,
cx
=
hidden
in_gate
=
F
.
linear
(
input
,
self
.
weight_in
,
self
.
bias_in
)
forgetgate
=
F
.
sigmoid
(
self
.
bias_forget
)
next
=
(
forgetgate
*
hidden
)
+
in_gate
return
next
gates
=
F
.
linear
(
input
,
self
.
weight_ih
,
self
.
bias_ih
)
def
forward
(
self
,
input
,
hx
=
None
):
forgetgate
,
cellgate
=
gates
.
chunk
(
2
,
1
)
if
hx
is
None
:
hx
=
torch
.
autograd
.
Variable
(
input
.
data
.
new
(
input
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
forgetgate
=
F
.
sigmoid
(
forgetgate
)
self
.
check_forward_input
(
input
)
self
.
check_forward_hidden
(
input
,
hx
)
cy
=
(
forgetgate
*
cx
)
+
cellgate
hidden
=
hx
hy
=
cy
return
hy
,
cy
output
=
[]
steps
=
range
(
input
.
size
(
0
))
for
i
in
steps
:
hidden
=
self
.
step
(
input
[
i
],
hidden
)
output
.
append
(
hidden
)
output
=
torch
.
cat
(
output
,
0
).
view
(
input
.
size
(
0
),
*
output
[
0
].
size
())
test
=
NeuralFilter1P
(
2
,
2
)
return
output
,
hidden
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment