Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
N
neural_filters
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
software
neural_filters
Commits
b0ec956f
Commit
b0ec956f
authored
May 07, 2018
by
M. François
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
tuple hidden
parent
b315607e
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
21 additions
and
33 deletions
+21
-33
neural_filters/neural_filter_2CC.py
neural_filters/neural_filter_2CC.py
+13
-15
neural_filters/neural_filter_2CD.py
neural_filters/neural_filter_2CD.py
+4
-9
neural_filters/neural_filter_2R.py
neural_filters/neural_filter_2R.py
+4
-9
No files found.
neural_filters/neural_filter_2CC.py
View file @
b0ec956f
...
...
@@ -99,32 +99,30 @@ class NeuralFilter2CC(torch.nn.Module):
"hidden has inconsistent hidden_size: got {}, expected {}"
.
format
(
hx
.
size
(
1
),
self
.
hidden_size
))
def
step
(
self
,
input_var
,
delayed
,
delayed2
,
a
=
None
,
b
=
None
):
def
step
(
self
,
input_var
,
hidden
,
a
=
None
,
b
=
None
):
if
a
is
None
or
b
is
None
:
modulus
=
F
.
sigmoid
(
self
.
bias_modulus
)
cosangle
=
F
.
tanh
(
self
.
bias_theta
)
a
=
2
*
cosangle
*
modulus
b
=
-
modulus
.
pow
(
2
)
next_state
=
input_var
+
a
*
delayed
+
b
*
delayed2
next_state
=
input_var
+
a
*
hidden
[
0
]
+
b
*
hidden
[
1
]
return
next_state
def
forward
(
self
,
input_var
,
delayed
=
None
,
delayed2
=
None
):
if
delayed
is
None
:
delayed
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
def
forward
(
self
,
input_var
,
hidden
=
(
None
,
None
)):
h0
,
h1
=
hidden
if
h0
is
None
:
h0
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
if
delayed2
is
None
:
delayed2
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
if
h1
is
None
:
h1
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
self
.
check_forward_input
(
input_var
)
self
.
check_forward_hidden
(
input_var
,
delayed
)
self
.
check_forward_hidden
(
input_var
,
delayed2
)
d1
=
delayed
d2
=
delayed2
self
.
check_forward_hidden
(
input_var
,
h0
)
self
.
check_forward_hidden
(
input_var
,
h1
)
# do not recompute this at each step to gain efficiency
modulus
=
F
.
sigmoid
(
self
.
bias_modulus
)
...
...
@@ -135,13 +133,13 @@ class NeuralFilter2CC(torch.nn.Module):
output
=
[]
steps
=
range
(
input_var
.
size
(
0
))
for
i
in
steps
:
next_state
=
self
.
step
(
input_var
[
i
],
d1
,
d2
,
a
=
a
,
b
=
b
)
next_state
=
self
.
step
(
input_var
[
i
],
(
h0
,
h1
)
,
a
=
a
,
b
=
b
)
output
.
append
(
next_state
)
d2
,
d1
=
d1
,
next_state
h1
,
h0
=
h0
,
next_state
output
=
torch
.
cat
(
output
,
0
).
view
(
input_var
.
size
(
0
),
*
output
[
0
].
size
())
return
output
,
d1
,
d2
return
output
,
(
h0
,
h1
)
def
print_param
(
self
):
modulus
=
F
.
sigmoid
(
self
.
bias_modulus
)
...
...
neural_filters/neural_filter_2CD.py
View file @
b0ec956f
...
...
@@ -52,16 +52,11 @@ class NeuralFilter2CD(torch.nn.Module):
s
=
'{name}({hidden_size})'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
def
forward
(
self
,
input_var
,
hx
=
None
):
if
hx
is
None
:
hx
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
def
forward
(
self
,
input_var
,
hx
=
(
None
,
None
)):
inter
,
inter_hidden
=
self
.
cell
(
input_var
,
hx
[
0
])
output
,
hidden
=
self
.
cell
(
inter
,
hx
[
1
])
inter
,
inter_hidden
=
self
.
cell
(
input_var
,
hx
)
output
,
hidden
=
self
.
cell
(
inter
)
return
output
,
hidden
return
output
,
(
inter_hidden
,
hidden
)
@
property
def
denominator
(
self
):
...
...
neural_filters/neural_filter_2R.py
View file @
b0ec956f
...
...
@@ -64,16 +64,11 @@ class NeuralFilter2R(torch.nn.Module):
s
=
'{name}({hidden_size})'
return
s
.
format
(
name
=
self
.
__class__
.
__name__
,
**
self
.
__dict__
)
def
forward
(
self
,
input_var
,
hx
=
None
):
if
hx
is
None
:
hx
=
torch
.
autograd
.
Variable
(
input_var
.
data
.
new
(
input_var
.
size
(
1
),
self
.
hidden_size
).
zero_
(),
requires_grad
=
False
)
def
forward
(
self
,
input_var
,
hx
=
(
None
,
None
)):
interm
,
interm_hidden
=
self
.
first_cell
(
input_var
,
hx
[
0
])
output
,
hidden
=
self
.
second_cell
(
interm
,
hx
[
1
])
interm
,
interm_hidden
=
self
.
first_cell
(
input_var
,
hx
)
output
,
hidden
=
self
.
second_cell
(
interm
)
return
output
,
hidden
return
output
,
(
interm_hidden
,
hidden
)
@
property
def
denominator
(
self
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment