Commit a68b2cd8 authored by Manuel Günther's avatar Manuel Günther
Browse files

use checked functions from bob::math

parent 4241c715
Pipeline #10433 passed with stages
in 9 minutes and 20 seconds
......@@ -217,7 +217,7 @@ void bob::learn::mlp::Machine::forward_ (const blitz::Array<double,1>& input,
//input -> hidden[0]; hidden[0] -> hidden[1], ..., hidden[N-2] -> hidden[N-1]
for (size_t j=1; j<m_weight.size(); ++j) {
bob::math::prod_(m_buffer[j-1], m_weight[j-1], m_buffer[j]);
bob::math::prod(m_buffer[j-1], m_weight[j-1], m_buffer[j]);
m_buffer[j] += m_bias[j-1];
for (int i=0; i<m_buffer[j].extent(0); ++i) {
m_buffer[j](i) = m_hidden_activation->f(m_buffer[j](i));
......@@ -225,7 +225,7 @@ void bob::learn::mlp::Machine::forward_ (const blitz::Array<double,1>& input,
}
//hidden[N-1] -> output
bob::math::prod_(m_buffer.back(), m_weight.back(), output);
bob::math::prod(m_buffer.back(), m_weight.back(), output);
output += m_bias.back();
for (int i=0; i<output.extent(0); ++i) {
output(i) = m_output_activation->f(output(i));
......
......@@ -135,8 +135,8 @@ void bob::learn::mlp::Trainer::forward_step(const bob::learn::mlp::Machine& mach
boost::shared_ptr<bob::learn::activation::Activation> output_actfun = machine.getOutputActivation();
for (size_t k=0; k<machine_weight.size(); ++k) { //for all layers
if (k == 0) bob::math::prod_(input, machine_weight[k], m_output[k]);
else bob::math::prod_(m_output[k-1], machine_weight[k], m_output[k]);
if (k == 0) bob::math::prod(input, machine_weight[k], m_output[k]);
else bob::math::prod(m_output[k-1], machine_weight[k], m_output[k]);
boost::shared_ptr<bob::learn::activation::Activation> cur_actfun =
(k == (machine_weight.size()-1) ? output_actfun : hidden_actfun );
for (int i=0; i<(int)m_batch_size; ++i) { //for every example
......@@ -164,7 +164,7 @@ void bob::learn::mlp::Trainer::backward_step
//all other layers
boost::shared_ptr<bob::learn::activation::Activation> hidden_actfun = machine.getHiddenActivation();
for (size_t k=m_H; k>0; --k) {
bob::math::prod_(m_error[k], machine_weight[k].transpose(1,0), m_error[k-1]);
bob::math::prod(m_error[k], machine_weight[k].transpose(1,0), m_error[k-1]);
for (int i=0; i<(int)m_batch_size; ++i) { //for every example
for (int j=0; j<m_error[k-1].extent(1); ++j) { //for all variables
m_error[k-1](i,j) *= hidden_actfun->f_prime_from_f(m_output[k-1](i,j));
......@@ -175,8 +175,8 @@ void bob::learn::mlp::Trainer::backward_step
//calculate the derivatives of the cost w.r.t. the weights and biases
for (size_t k=0; k<machine_weight.size(); ++k) { //for all layers
// For the weights
if (k == 0) bob::math::prod_(input.transpose(1,0), m_error[k], m_deriv[k]);
else bob::math::prod_(m_output[k-1].transpose(1,0), m_error[k], m_deriv[k]);
if (k == 0) bob::math::prod(input.transpose(1,0), m_error[k], m_deriv[k]);
else bob::math::prod(m_output[k-1].transpose(1,0), m_error[k], m_deriv[k]);
m_deriv[k] /= m_batch_size;
// For the biases
blitz::secondIndex bj;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment