Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.mlp
Commits
a68b2cd8
Commit
a68b2cd8
authored
Jun 06, 2017
by
Manuel Günther
Browse files
use checked functions from bob::math
parent
4241c715
Pipeline
#10433
passed with stages
in 9 minutes and 20 seconds
Changes
2
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/mlp/cxx/machine.cpp
View file @
a68b2cd8
...
...
@@ -217,7 +217,7 @@ void bob::learn::mlp::Machine::forward_ (const blitz::Array<double,1>& input,
//input -> hidden[0]; hidden[0] -> hidden[1], ..., hidden[N-2] -> hidden[N-1]
for
(
size_t
j
=
1
;
j
<
m_weight
.
size
();
++
j
)
{
bob
::
math
::
prod
_
(
m_buffer
[
j
-
1
],
m_weight
[
j
-
1
],
m_buffer
[
j
]);
bob
::
math
::
prod
(
m_buffer
[
j
-
1
],
m_weight
[
j
-
1
],
m_buffer
[
j
]);
m_buffer
[
j
]
+=
m_bias
[
j
-
1
];
for
(
int
i
=
0
;
i
<
m_buffer
[
j
].
extent
(
0
);
++
i
)
{
m_buffer
[
j
](
i
)
=
m_hidden_activation
->
f
(
m_buffer
[
j
](
i
));
...
...
@@ -225,7 +225,7 @@ void bob::learn::mlp::Machine::forward_ (const blitz::Array<double,1>& input,
}
//hidden[N-1] -> output
bob
::
math
::
prod
_
(
m_buffer
.
back
(),
m_weight
.
back
(),
output
);
bob
::
math
::
prod
(
m_buffer
.
back
(),
m_weight
.
back
(),
output
);
output
+=
m_bias
.
back
();
for
(
int
i
=
0
;
i
<
output
.
extent
(
0
);
++
i
)
{
output
(
i
)
=
m_output_activation
->
f
(
output
(
i
));
...
...
bob/learn/mlp/cxx/trainer.cpp
View file @
a68b2cd8
...
...
@@ -135,8 +135,8 @@ void bob::learn::mlp::Trainer::forward_step(const bob::learn::mlp::Machine& mach
boost
::
shared_ptr
<
bob
::
learn
::
activation
::
Activation
>
output_actfun
=
machine
.
getOutputActivation
();
for
(
size_t
k
=
0
;
k
<
machine_weight
.
size
();
++
k
)
{
//for all layers
if
(
k
==
0
)
bob
::
math
::
prod
_
(
input
,
machine_weight
[
k
],
m_output
[
k
]);
else
bob
::
math
::
prod
_
(
m_output
[
k
-
1
],
machine_weight
[
k
],
m_output
[
k
]);
if
(
k
==
0
)
bob
::
math
::
prod
(
input
,
machine_weight
[
k
],
m_output
[
k
]);
else
bob
::
math
::
prod
(
m_output
[
k
-
1
],
machine_weight
[
k
],
m_output
[
k
]);
boost
::
shared_ptr
<
bob
::
learn
::
activation
::
Activation
>
cur_actfun
=
(
k
==
(
machine_weight
.
size
()
-
1
)
?
output_actfun
:
hidden_actfun
);
for
(
int
i
=
0
;
i
<
(
int
)
m_batch_size
;
++
i
)
{
//for every example
...
...
@@ -164,7 +164,7 @@ void bob::learn::mlp::Trainer::backward_step
//all other layers
boost
::
shared_ptr
<
bob
::
learn
::
activation
::
Activation
>
hidden_actfun
=
machine
.
getHiddenActivation
();
for
(
size_t
k
=
m_H
;
k
>
0
;
--
k
)
{
bob
::
math
::
prod
_
(
m_error
[
k
],
machine_weight
[
k
].
transpose
(
1
,
0
),
m_error
[
k
-
1
]);
bob
::
math
::
prod
(
m_error
[
k
],
machine_weight
[
k
].
transpose
(
1
,
0
),
m_error
[
k
-
1
]);
for
(
int
i
=
0
;
i
<
(
int
)
m_batch_size
;
++
i
)
{
//for every example
for
(
int
j
=
0
;
j
<
m_error
[
k
-
1
].
extent
(
1
);
++
j
)
{
//for all variables
m_error
[
k
-
1
](
i
,
j
)
*=
hidden_actfun
->
f_prime_from_f
(
m_output
[
k
-
1
](
i
,
j
));
...
...
@@ -175,8 +175,8 @@ void bob::learn::mlp::Trainer::backward_step
//calculate the derivatives of the cost w.r.t. the weights and biases
for
(
size_t
k
=
0
;
k
<
machine_weight
.
size
();
++
k
)
{
//for all layers
// For the weights
if
(
k
==
0
)
bob
::
math
::
prod
_
(
input
.
transpose
(
1
,
0
),
m_error
[
k
],
m_deriv
[
k
]);
else
bob
::
math
::
prod
_
(
m_output
[
k
-
1
].
transpose
(
1
,
0
),
m_error
[
k
],
m_deriv
[
k
]);
if
(
k
==
0
)
bob
::
math
::
prod
(
input
.
transpose
(
1
,
0
),
m_error
[
k
],
m_deriv
[
k
]);
else
bob
::
math
::
prod
(
m_output
[
k
-
1
].
transpose
(
1
,
0
),
m_error
[
k
],
m_deriv
[
k
]);
m_deriv
[
k
]
/=
m_batch_size
;
// For the biases
blitz
::
secondIndex
bj
;
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment