Commit 3d9b1423 authored by Manuel Günther's avatar Manuel Günther
Browse files

Updated the C++ (and Python) API to provide write-access to internal memory of machine (see #8)

parent 2001774d
......@@ -85,9 +85,9 @@ void bob::learn::mlp::BackProp::backprop_weight_update(bob::learn::mlp::Machine&
const blitz::Array<double,2>& input)
{
std::vector<blitz::Array<double,2> >& machine_weight =
machine.updateWeights();
machine.getWeights();
std::vector<blitz::Array<double,1> >& machine_bias =
machine.updateBiases();
machine.getBiases();
const std::vector<blitz::Array<double,2> >& deriv = getDerivatives();
for (size_t k=0; k<machine_weight.size(); ++k) { //for all layers
machine_weight[k] -= (((1-m_momentum)*m_learning_rate*deriv[k]) +
......
......@@ -59,8 +59,8 @@ void bob::learn::mlp::unroll(const std::vector<blitz::Array<double,2> >& w,
void bob::learn::mlp::roll(bob::learn::mlp::Machine& machine,
const blitz::Array<double,1>& vec)
{
std::vector<blitz::Array<double,1> >& b = machine.updateBiases();
std::vector<blitz::Array<double,2> >& w = machine.updateWeights();
std::vector<blitz::Array<double,1> >& b = machine.getBiases();
std::vector<blitz::Array<double,2> >& w = machine.getWeights();
roll(w, b, vec);
}
......
......@@ -128,8 +128,8 @@ static int8_t sign (double x) {
void bob::learn::mlp::RProp::rprop_weight_update(bob::learn::mlp::Machine& machine,
const blitz::Array<double,2>& input)
{
std::vector<blitz::Array<double,2> >& machine_weight = machine.updateWeights();
std::vector<blitz::Array<double,1> >& machine_bias = machine.updateBiases();
std::vector<blitz::Array<double,2> >& machine_weight = machine.getWeights();
std::vector<blitz::Array<double,1> >& machine_bias = machine.getBiases();
const std::vector<blitz::Array<double,2> >& deriv = getDerivatives();
for (size_t k=0; k<machine_weight.size(); ++k) { //for all layers
......
......@@ -9,7 +9,7 @@
#define BOB_LEARN_MLP_CONFIG_H
/* Macros that define versions and important names */
#define BOB_LEARN_MLP_API_VERSION 0x0200
#define BOB_LEARN_MLP_API_VERSION 0x0201
#ifdef BOB_IMPORT_VERSION
......
......@@ -223,11 +223,17 @@ namespace bob { namespace learn { namespace mlp {
size_t outputSize () const { return m_weight.back().extent(1); }
/**
* Returns the input subtraction factor
* Returns the input subtraction factor (read-only)
*/
const blitz::Array<double, 1>& getInputSubtraction() const
{ return m_input_sub; }
/**
* Returns the input subtraction factor (read-write)
*/
blitz::Array<double, 1>& getInputSubtraction()
{ return m_input_sub; }
/**
* Sets the current input subtraction factor. We will check that the
* number of inputs (first dimension of weights) matches the number of
......@@ -242,11 +248,17 @@ namespace bob { namespace learn { namespace mlp {
void setInputSubtraction(double v) { m_input_sub = v; }
/**
* Returns the input division factor
* Returns the input division factor (read-only)
*/
const blitz::Array<double, 1>& getInputDivision() const
{ return m_input_div; }
/**
* Returns the input division factor (read-write)
*/
blitz::Array<double, 1>& getInputDivision()
{ return m_input_div; }
/**
* Sets the current input division factor. We will check that the number
* of inputs (first dimension of weights) matches the number of values
......@@ -260,16 +272,15 @@ namespace bob { namespace learn { namespace mlp {
void setInputDivision(double v) { m_input_div = v; }
/**
* Returns the weights of all layers.
* Returns the weights of all layers (read-only)
*/
const std::vector<blitz::Array<double, 2> >& getWeights() const
{ return m_weight; }
/**
* @brief Returns the weights of all layers in order to be updated.
* This method should only be used by trainers.
* @brief Returns the weights of all (read-write)
*/
std::vector<blitz::Array<double, 2> >& updateWeights()
std::vector<blitz::Array<double, 2> >& getWeights()
{ return m_weight; }
/**
......@@ -287,17 +298,16 @@ namespace bob { namespace learn { namespace mlp {
/**
* Returns the biases of this classifier, for every hidden layer and
* output layer we have.
* output layer we have (read-only)
*/
const std::vector<blitz::Array<double, 1> >& getBiases() const
{ return m_bias; }
/**
* @brief Returns the biases of this classifier, for every hidden layer
* and output layer we have, in order to be updated.
* This method should only be used by trainers.
* and output layer we have (read-write)
*/
std::vector<blitz::Array<double, 1> >& updateBiases()
std::vector<blitz::Array<double, 1> >& getBiases()
{ return m_bias; }
/**
......
......@@ -257,14 +257,14 @@ being output.\n\
static PyObject* PyBobLearnMLPMachine_getWeights
(PyBobLearnMLPMachineObject* self, void* /*closure*/) {
const std::vector<blitz::Array<double, 2>>& weights = self->cxx->getWeights();
std::vector<blitz::Array<double, 2>>& weights = self->cxx->getWeights();
PyObject* retval = PyTuple_New(weights.size());
if (!retval) return 0;
auto retval_ = make_safe(retval);
int k=0;
for (auto i=weights.begin(); i!=weights.end(); ++i, ++k) {
PyObject* tmp = PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(*i));
PyObject* tmp = PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromArray(*i));
if (!tmp) return 0;
PyTuple_SET_ITEM(retval, k, tmp);
}
......@@ -343,14 +343,14 @@ to the output before activation.\n\
static PyObject* PyBobLearnMLPMachine_getBiases
(PyBobLearnMLPMachineObject* self, void* /*closure*/) {
const std::vector<blitz::Array<double, 1>>& biases = self->cxx->getBiases();
std::vector<blitz::Array<double, 1>>& biases = self->cxx->getBiases();
PyObject* retval = PyTuple_New(biases.size());
if (!retval) return 0;
auto retval_ = make_safe(retval);
int k=0;
for (auto i=biases.begin(); i!=biases.end(); ++i, ++k) {
PyObject* tmp = PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(*i));
PyObject* tmp = PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromArray(*i));
if (!tmp) return 0;
PyTuple_SET_ITEM(retval, k, tmp);
}
......@@ -430,7 +430,7 @@ operation in the processing chain - by default, it is set to\n\
static PyObject* PyBobLearnMLPMachine_getInputSubtraction
(PyBobLearnMLPMachineObject* self, void* /*closure*/) {
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(self->cxx->getInputSubtraction()));
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromArray(self->cxx->getInputSubtraction()));
}
static int PyBobLearnMLPMachine_setInputSubtraction
......@@ -477,7 +477,7 @@ subtraction - by default, it is set to 1.0.\n\
static PyObject* PyBobLearnMLPMachine_getInputDivision
(PyBobLearnMLPMachineObject* self, void* /*closure*/) {
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(self->cxx->getInputDivision()));
return PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromArray(self->cxx->getInputDivision()));
}
static int PyBobLearnMLPMachine_setInputDivision (PyBobLearnMLPMachineObject* self,
......
......@@ -52,7 +52,7 @@ class PythonRProp(Trainer):
weight_updates = [i * j for (i,j) in zip(self.previous_derivatives, self.derivatives)]
# Iterate over each weight and bias and see what to do:
new_weights = [numpy.array(w) for w in machine.weights]
new_weights = machine.weights
for k, up in enumerate(weight_updates):
for i in range(up.shape[0]):
for j in range(up.shape[1]):
......@@ -71,7 +71,7 @@ class PythonRProp(Trainer):
if self.train_biases:
bias_updates = [i * j for (i,j) in zip(self.previous_bias_derivatives, self.bias_derivatives)]
new_biases = [numpy.array(b) for b in machine.biases]
new_biases = machine.biases
for k, up in enumerate(bias_updates):
for i in range(up.shape[0]):
if up[i] > 0:
......
2.0.12b0
\ No newline at end of file
2.1.0b0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment