diff --git a/setup.py b/setup.py
index 2caab539931285064815bee69241105e8bd441d2..106d48bc8de3db523331320ee4b99823463f526d 100644
--- a/setup.py
+++ b/setup.py
@@ -63,6 +63,7 @@ setup(
         ),
       Extension("xbob.learn.mlp._library",
         [
+          "xbob/learn/mlp/backprop.cpp",
           "xbob/learn/mlp/trainer.cpp",
           "xbob/learn/mlp/cxx/machine.cpp",
           "xbob/learn/mlp/cxx/cross_entropy.cpp",
diff --git a/xbob/learn/mlp/backprop.cpp b/xbob/learn/mlp/backprop.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f71ee7ce2e1c05b9cc0108e28fb894fa2d286d91
--- /dev/null
+++ b/xbob/learn/mlp/backprop.cpp
@@ -0,0 +1,620 @@
+/**
+ * @author Andre Anjos <andre.anjos@idiap.ch>
+ * @date Tue  6 May 12:32:39 2014 CEST
+ *
+ * @brief Bindings for an MLP
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#define XBOB_LEARN_MLP_MODULE
+#include <xbob.blitz/cppapi.h>
+#include <xbob.blitz/cleanup.h>
+#include <xbob.learn.mlp/api.h>
+#include <structmember.h>
+
+#include "utils.h"
+
+/**************************************
+ * Implementation of BackProp trainer *
+ **************************************/
+
+PyDoc_STRVAR(s_trainer_str, XBOB_EXT_MODULE_PREFIX ".BackProp");
+
+PyDoc_STRVAR(s_trainer_doc,
+"BackProp(batch_size, cost, [trainer, [train_biases]]) -> new BackProp\n\
+\n\
+BackProp(other) -> new BackProp\n\
+\n\
+Sets an MLP to perform discrimination based on vanilla error\n\
+back-propagation as defined in \"Pattern Recognition and Machine\n\
+Learning\" by C.M. Bishop, chapter 5 or else, \"Pattern\n\
+Classification\" by Duda, Hart and Stork, chapter 6.\n\
+\n\
+To create a new trainer, either pass the batch-size, cost functor,\n\
+machine and a biases-training flag or another trainer you'd like\n\
+the parameters copied from.\n\
+\n\
+Keyword parameters:\n\
+\n\
+batch_size, int\n\
+   The size of each batch used for the forward and backward steps.\n\
+   If you set this to ``1``, then you are implementing stochastic\n\
+   training.\n\
+   \n\
+   .. note::\n\
+   \n\
+      This setting affects the convergence.\n\
+\n\
+cost, :py:class:`xbob.learn.mlp.Cost`\n\
+   An object that can calculate the cost at every iteration.\n\
+\n\
+machine, :py:class:`xbob.learn.mlp.Machine`\n\
+   This parameter that will be used as a basis for this trainer's\n\
+   internal properties (cache sizes, for instance).\n\
+\n\
+train_biases, bool\n\
+   A boolean indicating if we should train the biases weights (set\n\
+   it to ``True``) or not (set it to ``False``).\n\
+\n\
+other, :py:class:`xbob.learn.mlp.Trainer`\n\
+   Another trainer from which this new copy will get its properties\n\
+   from. If you use this constructor than a new (deep) copy of the\n\
+   trainer is created.\n\
+\n\
+");
+
+static int PyBobLearnMLPBackProp_init_discrete
+(PyBobLearnMLPBackPropObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  static const char* const_kwlist[] = {
+    "batch_size",
+    "cost",
+    "machine",
+    "train_biases",
+    0
+  };
+  static char** kwlist = const_cast<char**>(const_kwlist);
+
+  Py_ssize_t batch_size = 0;
+  PyBobLearnCostObject* cost = 0;
+  PyBobLearnMLPMachineObject* machine = 0;
+  PyObject* train_biases = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "nO!|O!O", kwlist,
+        &batch_size,
+        &PyBobLearnCost_Type, &cost,
+        &PyBobLearnMLPMachine_Type, &machine,
+        &train_biases)) return -1;
+
+  try {
+    if (machine && train_biases) {
+      self->cxx = new bob::learn::mlp::BackProp(batch_size, cost->cxx,
+          *machine->cxx, PyObject_IsTrue(train_biases));
+    }
+    else if (machine) {
+      self->cxx = new bob::learn::mlp::BackProp(batch_size, cost->cxx,
+          *machine->cxx);
+    }
+    else if (train_biases) {
+      PyErr_Format(PyExc_RuntimeError, "cannot provide a flag for `train_biases' and do not provide a `machine' upon initialisation of type `%s'", Py_TYPE(self)->tp_name);
+      return -1;
+    }
+    else {
+      self->cxx = new bob::learn::mlp::BackProp(batch_size, cost->cxx);
+    }
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return -1;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot create new object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
+    return -1;
+  }
+
+  self->parent.cxx = self->cxx;
+
+  return 0;
+
+}
+
+static int PyBobLearnMLPBackProp_init_copy
+(PyBobLearnMLPBackPropObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  static const char* const_kwlist[] = {"other", 0};
+  static char** kwlist = const_cast<char**>(const_kwlist);
+
+  PyObject* other = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!", kwlist,
+        &PyBobLearnMLPBackProp_Type, &other)) return -1;
+
+  auto copy = reinterpret_cast<PyBobLearnMLPBackPropObject*>(other);
+
+  try {
+    self->cxx = new bob::learn::mlp::BackProp(*(copy->cxx));
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return -1;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot create new object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
+    return -1;
+  }
+
+  self->parent.cxx = self->cxx;
+
+  return 0;
+
+}
+
+static int PyBobLearnMLPBackProp_init(PyBobLearnMLPBackPropObject* self,
+    PyObject* args, PyObject* kwds) {
+
+  Py_ssize_t nargs = (args?PyTuple_Size(args):0) + (kwds?PyDict_Size(kwds):0);
+
+  switch (nargs) {
+
+    case 1:
+
+      return PyBobLearnMLPBackProp_init_copy(self, args, kwds);
+
+    default:
+
+      return PyBobLearnMLPBackProp_init_discrete(self, args, kwds);
+  }
+
+  return -1;
+
+}
+
+static void PyBobLearnMLPBackProp_delete
+(PyBobLearnMLPBackPropObject* self) {
+
+  self->parent.cxx = 0;
+  delete self->cxx;
+  Py_TYPE(self)->tp_free((PyObject*)self);
+
+}
+
+int PyBobLearnMLPBackProp_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMLPBackProp_Type));
+}
+
+static PyObject* PyBobLearnMLPBackProp_new
+(PyTypeObject* type, PyObject*, PyObject*) {
+
+  /* Allocates the python object itself */
+  PyBobLearnMLPBackPropObject* self = (PyBobLearnMLPBackPropObject*)type->tp_alloc(type, 0);
+
+  self->cxx = 0;
+  self->parent.cxx = 0;
+
+  return reinterpret_cast<PyObject*>(self);
+
+}
+
+PyDoc_STRVAR(s_learning_rate_str, "learning_rate");
+PyDoc_STRVAR(s_learning_rate_doc,
+"The learning rate (:math:`\\alpha`) to be used for the\n\
+back-propagation (defaults to ``0.1``)."
+);
+
+static PyObject* PyBobLearnMLPBackProp_getLearningRate
+(PyBobLearnMLPBackPropObject* self, void* /*closure*/) {
+  return Py_BuildValue("d", self->cxx->getLearningRate());
+}
+
+static int PyBobLearnMLPBackProp_setLearningRate
+(PyBobLearnMLPBackPropObject* self, PyObject* o, void* /*closure*/) {
+
+  double value = PyFloat_AsDouble(o);
+  if (PyErr_Occurred()) return -1;
+  self->cxx->setLearningRate(value);
+  return 0;
+
+}
+
+PyDoc_STRVAR(s_momentum_str, "momentum");
+PyDoc_STRVAR(s_momentum_doc,
+"The momentum (:math:`\\mu`) to be used for the back-propagation.\n\
+This value allows for some *memory* on previous weight updates to\n\
+be used for the next update (defaults to ``0.0``).");
+
+static PyObject* PyBobLearnMLPBackProp_getMomentum
+(PyBobLearnMLPBackPropObject* self, void* /*closure*/) {
+  return Py_BuildValue("d", self->cxx->getMomentum());
+}
+
+static int PyBobLearnMLPBackProp_setMomentum
+(PyBobLearnMLPBackPropObject* self, PyObject* o, void* /*closure*/) {
+
+  double value = PyFloat_AsDouble(o);
+  if (PyErr_Occurred()) return -1;
+  self->cxx->setMomentum(value);
+  return 0;
+
+}
+
+PyDoc_STRVAR(s_previous_derivatives_str, "previous_derivatives");
+PyDoc_STRVAR(s_previous_derivatives_doc,
+"The derivatives of the cost w.r.t. to the specific\n\
+**weights** of the network, from the previous training step.\n\
+The derivatives are arranged to match the organization\n\
+of weights of the machine being trained.");
+
+static PyObject* PyBobLearnMLPBackProp_getPreviousDerivatives
+(PyBobLearnMLPBackPropObject* self, void* /*closure*/) {
+  return convert_vector<2>(self->cxx->getPreviousDerivatives());
+}
+
+static int PyBobLearnMLPBackProp_setPreviousDerivatives
+(PyBobLearnMLPBackPropObject* self, PyObject* o, void* /*closure*/) {
+
+  std::vector<blitz::Array<double,2>> bzvec;
+  int retval = convert_tuple<2>((PyObject*)self, s_previous_derivatives_str,
+      o, bzvec);
+  if (retval < 0) return retval;
+
+  try {
+    self->cxx->setPreviousDerivatives(bzvec);
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return -1;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot reset `%s' of %s: unknown exception caught", Py_TYPE(self)->tp_name, s_previous_derivatives_str);
+    return -1;
+  }
+
+  return 0;
+
+}
+
+PyDoc_STRVAR(s_previous_bias_derivatives_str, "previous_bias_derivatives");
+PyDoc_STRVAR(s_previous_bias_derivatives_doc,
+"The derivatives of the cost w.r.t. to the specific\n\
+**biases** of the network, from the previous training step.\n\
+The derivatives are arranged to match the organization\n\
+of weights of the machine being trained.");
+
+static PyObject* PyBobLearnMLPBackProp_getPreviousBiasDerivatives
+(PyBobLearnMLPBackPropObject* self, void* /*closure*/) {
+  return convert_vector<1>(self->cxx->getPreviousBiasDerivatives());
+}
+
+static int PyBobLearnMLPBackProp_setPreviousBiasDerivatives
+(PyBobLearnMLPBackPropObject* self, PyObject* o, void* /*closure*/) {
+
+  std::vector<blitz::Array<double,1>> bzvec;
+  int retval = convert_tuple<1>((PyObject*)self,
+      s_previous_bias_derivatives_str, o, bzvec);
+  if (retval < 0) return retval;
+
+  try {
+    self->cxx->setPreviousBiasDerivatives(bzvec);
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return -1;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot reset `%s' of %s: unknown exception caught", Py_TYPE(self)->tp_name, s_previous_bias_derivatives_str);
+    return -1;
+  }
+
+  return 0;
+
+}
+
+static PyGetSetDef PyBobLearnMLPBackProp_getseters[] = {
+    {
+      s_learning_rate_str,
+      (getter)PyBobLearnMLPBackProp_getLearningRate,
+      (setter)PyBobLearnMLPBackProp_setLearningRate,
+      s_learning_rate_doc,
+      0
+    },
+    {
+      s_momentum_str,
+      (getter)PyBobLearnMLPBackProp_getMomentum,
+      (setter)PyBobLearnMLPBackProp_setMomentum,
+      s_momentum_doc,
+      0
+    },
+    {
+      s_previous_derivatives_str,
+      (getter)PyBobLearnMLPBackProp_getPreviousDerivatives,
+      (setter)PyBobLearnMLPBackProp_setPreviousDerivatives,
+      s_previous_derivatives_doc,
+      0
+    },
+    {
+      s_previous_bias_derivatives_str,
+      (getter)PyBobLearnMLPBackProp_getPreviousBiasDerivatives,
+      (setter)PyBobLearnMLPBackProp_setPreviousBiasDerivatives,
+      s_previous_bias_derivatives_doc,
+      0
+    },
+    {0}  /* Sentinel */
+};
+
+PyDoc_STRVAR(s_reset_str, "reset");
+PyDoc_STRVAR(s_reset_doc,
+"Re-initializes the whole training apparatus to start training\n\
+a new machine. This will effectively reset previous derivatives\n\
+to zero.");
+
+static PyObject* PyBobLearnMLPBackProp_reset (PyBobLearnMLPBackPropObject* self) {
+
+  self->cxx->reset();
+  Py_RETURN_NONE;
+
+}
+
+PyDoc_STRVAR(s_train_str, "train");
+PyDoc_STRVAR(s_train_doc,
+"o.train(machine, input, target) -> None\n\
+\n\
+Trains the MLP to perform discrimination using error back-propagation\n\
+\n\
+Call this method to train the MLP to perform discrimination using\n\
+back-propagation with (optional) momentum. Concretely, this executes\n\
+the following update rule for the weights (and biases, optionally):\n\
+\n\
+.. math::\n\
+   :nowrap:\n\
+   \n\
+   \\begin{align}\n\
+     \\theta_j(t+1) & = & \\theta_j - [ (1-\\mu)\\Delta\\theta_j(t) + \\mu\\Delta\\theta_j(t-1) ] \\\\\n\
+     \\Delta\\theta_j(t) & = & \\alpha\\frac{1}{N}\\sum_{i=1}^{N}\\frac{\\partial J(x_i; \\theta)}{\\partial \\theta_j}\n\
+    \\end{align}\n\
+\n\
+The training is executed outside the machine context, but uses all\n\
+the current machine layout. The given machine is updated with new\n\
+weights and biases at the end of the training that is performed a single\n\
+time.\n\
+\n\
+You must iterate (in Python) as much as you want to refine the training.\n\
+\n\
+The machine given as input is checked for compatibility with the current\n\
+initialized settings. If the two are not compatible, an exception is\n\
+thrown.\n\
+\n\
+.. note::\n\
+   \n\
+   In BackProp, training is done in batches. You should set the batch\n\
+   size properly at class initialization or use setBatchSize().\n\
+\n\
+.. note::\n\
+   \n\
+   The machine is **not** initialized randomly at each call to this\n\
+   method. It is your task to call\n\
+   :py:meth:`xbob.learn.mlp.Machine.randomize` once at the machine\n\
+   you want to train and then call this method as many times as you\n\
+   think is necessary. This design allows for a *stopping criteria*\n\
+   to be encoded outside the scope of this trainer and for this method\n\
+   to only focus on applying the training when requested to.\n\
+   Stochastic training can be executed by setting the ``batch_size``\n\
+   to 1.\n\
+\n\
+Keyword arguments:\n\
+\n\
+machine, :py:class:`xbob.learn.mlp.Machine`\n\
+   The machine that will be trained. You must have called\n\
+   :py:meth:`xbob.learn.mlp.Trainer.initialize` which a similarly\n\
+   configured machine before being able to call this method, or an\n\
+   exception may be thrown.\n\
+\n\
+input, array-like, 2D with ``float64`` as data type\n\
+   A 2D :py:class:`numpy.ndarray` with 64-bit floats containing the\n\
+   input data for the MLP to which this training step will be based\n\
+   on. The matrix should be organized so each input (example) lies on\n\
+   a single row of ``input``.\n\
+\n\
+target, array-like, 2D with ``float64`` as data type\n\
+   A 2D :py:class:`numpy.ndarray` with 64-bit floats containing the\n\
+   target data for the MLP to which this training step will be based\n\
+   on. The matrix should be organized so each target lies on a single\n\
+   row of ``target``, matching each input example in ``input``.\n\
+\n\
+");
+
+static PyObject* PyBobLearnMLPBackProp_train
+(PyBobLearnMLPBackPropObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  static const char* const_kwlist[] = {"machine", "input", "target", 0};
+  static char** kwlist = const_cast<char**>(const_kwlist);
+
+  PyBobLearnMLPMachineObject* machine = 0;
+  PyBlitzArrayObject* input = 0;
+  PyBlitzArrayObject* target = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&O&", kwlist,
+        &PyBobLearnMLPMachine_Type, &machine,
+        &PyBlitzArray_Converter, &input,
+        &PyBlitzArray_Converter, &target)) return 0;
+
+  if (input->type_num != NPY_FLOAT64 || input->ndim != 2) {
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 2D 64-bit float arrays for input array `input'", Py_TYPE(self)->tp_name);
+    return 0;
+  }
+
+  if (target->type_num != NPY_FLOAT64 || target->ndim != 2) {
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 2D 64-bit float arrays for input array `target'", Py_TYPE(self)->tp_name);
+    return 0;
+  }
+
+  try {
+    self->cxx->train(*machine->cxx,
+        *PyBlitzArrayCxx_AsBlitz<double,2>(input),
+        *PyBlitzArrayCxx_AsBlitz<double,2>(target)
+        );
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return 0;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot perform training-step for `%s': unknown exception caught", Py_TYPE(self)->tp_name);
+    return 0;
+  }
+
+  Py_RETURN_NONE;
+
+}
+
+/**
+PyDoc_STRVAR(s_set_derivative_str, "set_derivative");
+PyDoc_STRVAR(s_set_derivative_doc,
+    "Sets the cost derivative w.r.t. the **weights** for a given layer.");
+
+static PyObject* PyBobLearnMLPBackProp_setDerivativeOnLayer
+(PyBobLearnMLPBackPropObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot *\/
+  static const char* const_kwlist[] = {"array", "layer", 0};
+  static char** kwlist = const_cast<char**>(const_kwlist);
+
+  PyBlitzArrayObject* array = 0;
+  Py_ssize_t layer = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&n", kwlist,
+        &PyBlitzArray_Converter, &array, &layer)) return 0;
+
+  if (array->type_num != NPY_FLOAT64 || array->ndim != 2) {
+    PyErr_Format(PyExc_TypeError, "`%s.%s' only supports 2D 64-bit float arrays for argument `array' (or any other object coercible to that), but you provided an object with %" PY_FORMAT_SIZE_T "d dimensions and with type `%s' which is not compatible - check your input", Py_TYPE(self)->tp_name, s_set_derivative_str, array->ndim, PyBlitzArray_TypenumAsString(array->type_num));
+    return 0;
+  }
+
+  try {
+    self->cxx->setDerivative(*PyBlitzArrayCxx_AsBlitz<double,2>(array), layer);
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return 0;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot set derivative at layer %" PY_FORMAT_SIZE_T "d for `%s': unknown exception caught", layer, Py_TYPE(self)->tp_name);
+    return 0;
+  }
+
+  Py_RETURN_NONE;
+
+}
+
+PyDoc_STRVAR(s_set_bias_derivative_str, "set_bias_derivative");
+PyDoc_STRVAR(s_set_bias_derivative_doc,
+    "Sets the cost derivative w.r.t. the **biases** for a given layer.");
+
+static PyObject* PyBobLearnMLPBackProp_setBiasDerivativeOnLayer
+(PyBobLearnMLPBackPropObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot *\/
+  static const char* const_kwlist[] = {"array", "layer", 0};
+  static char** kwlist = const_cast<char**>(const_kwlist);
+
+  PyBlitzArrayObject* array = 0;
+  Py_ssize_t layer = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&n", kwlist,
+        &PyBlitzArray_Converter, &array, &layer)) return 0;
+
+  if (array->type_num != NPY_FLOAT64 || array->ndim != 1) {
+    PyErr_Format(PyExc_TypeError, "`%s.%s' only supports 1D 64-bit float arrays for argument `array' (or any other object coercible to that), but you provided an object with %" PY_FORMAT_SIZE_T "d dimensions and with type `%s' which is not compatible - check your input", Py_TYPE(self)->tp_name, s_set_bias_derivative_str, array->ndim, PyBlitzArray_TypenumAsString(array->type_num));
+    return 0;
+  }
+
+  try {
+    self->cxx->setBiasDerivative(*PyBlitzArrayCxx_AsBlitz<double,1>(array), layer);
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return 0;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot set bias derivative at layer %" PY_FORMAT_SIZE_T "d for `%s': unknown exception caught", layer, Py_TYPE(self)->tp_name);
+    return 0;
+  }
+
+  Py_RETURN_NONE;
+
+}
+**/
+
+static PyMethodDef PyBobLearnMLPBackProp_methods[] = {
+  {
+    s_reset_str,
+    (PyCFunction)PyBobLearnMLPBackProp_reset,
+    METH_NOARGS,
+    s_reset_doc,
+  },
+  {
+    s_train_str,
+    (PyCFunction)PyBobLearnMLPBackProp_train,
+    METH_VARARGS|METH_KEYWORDS,
+    s_train_doc,
+  },
+  /**
+  {
+    s_set_derivative_str,
+    (PyCFunction)PyBobLearnMLPBackProp_setDerivativeOnLayer,
+    METH_VARARGS|METH_KEYWORDS,
+    s_set_derivative_doc,
+  },
+  {
+    s_set_bias_derivative_str,
+    (PyCFunction)PyBobLearnMLPBackProp_setBiasDerivativeOnLayer,
+    METH_VARARGS|METH_KEYWORDS,
+    s_set_bias_derivative_doc,
+  },
+  **/
+  {0} /* Sentinel */
+};
+
+PyTypeObject PyBobLearnMLPBackProp_Type = {
+    PyVarObject_HEAD_INIT(0, 0)
+    s_trainer_str,                                 /* tp_name */
+    sizeof(PyBobLearnMLPBackPropObject),           /* tp_basicsize */
+    0,                                             /* tp_itemsize */
+    (destructor)PyBobLearnMLPBackProp_delete,      /* tp_dealloc */
+    0,                                             /* tp_print */
+    0,                                             /* tp_getattr */
+    0,                                             /* tp_setattr */
+    0,                                             /* tp_compare */
+    0, //(reprfunc)PyBobLearnMLPBackProp_Repr,     /* tp_repr */
+    0,                                             /* tp_as_number */
+    0,                                             /* tp_as_sequence */
+    0,                                             /* tp_as_mapping */
+    0,                                             /* tp_hash */
+    0,                                             /* tp_call */
+    0, //(reprfunc)PyBobLearnMLPBackProp_Repr,     /* tp_str */
+    0,                                             /* tp_getattro */
+    0,                                             /* tp_setattro */
+    0,                                             /* tp_as_buffer */
+    Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,      /* tp_flags */
+    s_trainer_doc,                                 /* tp_doc */
+    0,                                             /* tp_traverse */
+    0,                                             /* tp_clear */
+    0,                                             /* tp_richcompare */
+    0,                                             /* tp_weaklistoffset */
+    0,                                             /* tp_iter */
+    0,                                             /* tp_iternext */
+    PyBobLearnMLPBackProp_methods,                 /* tp_methods */
+    0,                                             /* tp_members */
+    PyBobLearnMLPBackProp_getseters,               /* tp_getset */
+    0,                                             /* tp_base */
+    0,                                             /* tp_dict */
+    0,                                             /* tp_descr_get */
+    0,                                             /* tp_descr_set */
+    0,                                             /* tp_dictoffset */
+    (initproc)PyBobLearnMLPBackProp_init,          /* tp_init */
+    0,                                             /* tp_alloc */
+    PyBobLearnMLPBackProp_new,                     /* tp_new */
+};
diff --git a/xbob/learn/mlp/include/xbob.learn.mlp/api.h b/xbob/learn/mlp/include/xbob.learn.mlp/api.h
index 41aa51816ef594bd387ba83479a1561263b62c48..b6358d0c9a4bcce99b7954dfeb7886a8722e12d2 100644
--- a/xbob/learn/mlp/include/xbob.learn.mlp/api.h
+++ b/xbob/learn/mlp/include/xbob.learn.mlp/api.h
@@ -16,6 +16,7 @@
 #include "cross_entropy.h"
 #include "shuffler.h"
 #include "trainer.h"
+#include "backprop.h"
 
 #define XBOB_LEARN_MLP_MODULE_PREFIX xbob.learn.mlp
 #define XBOB_LEARN_MLP_MODULE_NAME _library
@@ -42,6 +43,8 @@ enum _PyBobLearnMLP_ENUM {
   // Bindings for xbob.learn.mlp.Trainer
   PyBobLearnMLPTrainer_Type_NUM,
   PyBobLearnMLPTrainer_Check_NUM,
+  PyBobLearnMLPBackProp_Type_NUM,
+  PyBobLearnMLPBackProp_Check_NUM,
   // Total number of C API pointers
   PyXbobLearnMLP_API_pointers
 };
@@ -117,6 +120,16 @@ typedef struct {
 #define PyBobLearnMLPTrainer_Check_RET int
 #define PyBobLearnMLPTrainer_Check_PROTO (PyObject* o)
 
+typedef struct {
+  PyBobLearnMLPTrainerObject parent;
+  bob::learn::mlp::BackProp* cxx;
+} PyBobLearnMLPBackPropObject;
+
+#define PyBobLearnMLPBackProp_Type_TYPE PyTypeObject
+
+#define PyBobLearnMLPBackProp_Check_RET int
+#define PyBobLearnMLPBackProp_Check_PROTO (PyObject* o)
+
 #ifdef XBOB_LEARN_MLP_MODULE
 
   /* This section is used when compiling `xbob.learn.mlp' itself */
@@ -165,6 +178,10 @@ typedef struct {
 
   PyBobLearnMLPTrainer_Check_RET PyBobLearnMLPTrainer_Check PyBobLearnMLPTrainer_Check_PROTO;
 
+  extern PyBobLearnMLPBackProp_Type_TYPE PyBobLearnMLPBackProp_Type;
+
+  PyBobLearnMLPBackProp_Check_RET PyBobLearnMLPBackProp_Check PyBobLearnMLPBackProp_Check_PROTO;
+
 #else
 
   /* This section is used in modules that use `xbob.learn.mlp's' C-API */
@@ -235,6 +252,10 @@ typedef struct {
 
 # define PyBobLearnMLPTrainer_Check (*(PyBobLearnMLPTrainer_Check_RET (*)PyBobLearnMLPTrainer_Check_PROTO) PyXbobLearnMLP_API[PyBobLearnMLPTrainer_Check_NUM])
 
+# define PyBobLearnMLPBackProp_Type (*(PyBobLearnMLPBackProp_Type_TYPE *)PyXbobLearnMLP_API[PyBobLearnMLPBackProp_Type_NUM])
+
+# define PyBobLearnMLPBackProp_Check (*(PyBobLearnMLPBackProp_Check_RET (*)PyBobLearnMLPBackProp_Check_PROTO) PyXbobLearnMLP_API[PyBobLearnMLPBackProp_Check_NUM])
+
 # if !defined(NO_IMPORT_ARRAY)
 
   /**
diff --git a/xbob/learn/mlp/machine.cpp b/xbob/learn/mlp/machine.cpp
index d4ab00d44c277b5a5a223a5b665cf192ed37fa45..9dccfedaa896d947624c6d91669dbb815972ca78 100644
--- a/xbob/learn/mlp/machine.cpp
+++ b/xbob/learn/mlp/machine.cpp
@@ -276,8 +276,15 @@ static PyObject* PyBobLearnMLPMachine_getWeights
 static int PyBobLearnMLPMachine_setWeights (PyBobLearnMLPMachineObject* self,
     PyObject* weights, void* /*closure*/) {
 
+  if (PyNumber_Check(weights)) {
+    double v = PyFloat_AsDouble(weights);
+    if (PyErr_Occurred()) return -1;
+    self->cxx->setWeights(v);
+    return 0;
+  }
+
   if (!PyIter_Check(weights) && !PySequence_Check(weights)) {
-    PyErr_Format(PyExc_TypeError, "setting attribute `weights' of `%s' requires an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, Py_TYPE(weights)->tp_name);
+    PyErr_Format(PyExc_TypeError, "setting attribute `weights' of `%s' requires either a float or an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, Py_TYPE(weights)->tp_name);
     return -1;
   }
 
@@ -356,8 +363,15 @@ static PyObject* PyBobLearnMLPMachine_getBiases
 static int PyBobLearnMLPMachine_setBiases (PyBobLearnMLPMachineObject* self,
     PyObject* biases, void* /*closure*/) {
 
+  if (PyNumber_Check(biases)) {
+    double v = PyFloat_AsDouble(biases);
+    if (PyErr_Occurred()) return -1;
+    self->cxx->setBiases(v);
+    return 0;
+  }
+
   if (!PyIter_Check(biases) && !PySequence_Check(biases)) {
-    PyErr_Format(PyExc_TypeError, "setting attribute `biases' of `%s' requires an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, Py_TYPE(biases)->tp_name);
+    PyErr_Format(PyExc_TypeError, "setting attribute `biases' of `%s' requires either a float or an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, Py_TYPE(biases)->tp_name);
     return -1;
   }
 
@@ -423,12 +437,19 @@ static PyObject* PyBobLearnMLPMachine_getInputSubtraction
 static int PyBobLearnMLPMachine_setInputSubtraction
 (PyBobLearnMLPMachineObject* self, PyObject* o, void* /*closure*/) {
 
+  if (PyNumber_Check(o)) {
+    double v = PyFloat_AsDouble(o);
+    if (PyErr_Occurred()) return -1;
+    self->cxx->setInputSubtraction(v);
+    return 0;
+  }
+
   PyBlitzArrayObject* input_subtract = 0;
   if (!PyBlitzArray_Converter(o, &input_subtract)) return -1;
   auto input_subtract_ = make_safe(input_subtract);
 
   if (input_subtract->type_num != NPY_FLOAT64 || input_subtract->ndim != 1) {
-    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit floats 1D arrays for property array `input_subtract'", Py_TYPE(self)->tp_name);
+    PyErr_Format(PyExc_TypeError, "`%s' only supports either a single florat or 64-bit floats 1D arrays for property array `input_subtract'", Py_TYPE(self)->tp_name);
     return -1;
   }
 
@@ -463,12 +484,19 @@ static PyObject* PyBobLearnMLPMachine_getInputDivision
 static int PyBobLearnMLPMachine_setInputDivision (PyBobLearnMLPMachineObject* self,
     PyObject* o, void* /*closure*/) {
 
+  if (PyNumber_Check(o)) {
+    double v = PyFloat_AsDouble(o);
+    if (PyErr_Occurred()) return -1;
+    self->cxx->setInputDivision(v);
+    return 0;
+  }
+
   PyBlitzArrayObject* input_divide = 0;
   if (!PyBlitzArray_Converter(o, &input_divide)) return -1;
   auto input_divide_ = make_safe(input_divide);
 
   if (input_divide->type_num != NPY_FLOAT64 || input_divide->ndim != 1) {
-    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit floats 1D arrays for property array `input_divide'", Py_TYPE(self)->tp_name);
+    PyErr_Format(PyExc_TypeError, "`%s' only supports either a single float or 64-bit floats 1D arrays for property array `input_divide'", Py_TYPE(self)->tp_name);
     return -1;
   }
 
diff --git a/xbob/learn/mlp/main.cpp b/xbob/learn/mlp/main.cpp
index bca1be86c30b987ff4ac21f0b32e8a46f53155ab..37c1461e50718ffd442f95a7ac071426cb8109f2 100644
--- a/xbob/learn/mlp/main.cpp
+++ b/xbob/learn/mlp/main.cpp
@@ -56,6 +56,9 @@ static PyObject* create_module (void) {
   PyBobLearnMLPTrainer_Type.tp_new = PyType_GenericNew;
   if (PyType_Ready(&PyBobLearnMLPTrainer_Type) < 0) return 0;
 
+  PyBobLearnMLPBackProp_Type.tp_base = &PyBobLearnMLPTrainer_Type;
+  if (PyType_Ready(&PyBobLearnMLPBackProp_Type) < 0) return 0;
+
 # if PY_VERSION_HEX >= 0x03000000
   PyObject* m = PyModule_Create(&module_definition);
 # else
@@ -87,6 +90,9 @@ static PyObject* create_module (void) {
   Py_INCREF(&PyBobLearnMLPTrainer_Type);
   if (PyModule_AddObject(m, "Trainer", (PyObject *)&PyBobLearnMLPTrainer_Type) < 0) return 0;
 
+  Py_INCREF(&PyBobLearnMLPBackProp_Type);
+  if (PyModule_AddObject(m, "BackProp", (PyObject *)&PyBobLearnMLPBackProp_Type) < 0) return 0;
+
   static void* PyXbobLearnMLP_API[PyXbobLearnMLP_API_pointers];
 
   /* exhaustive list of C APIs */
diff --git a/xbob/learn/mlp/test_backprop.py b/xbob/learn/mlp/test_backprop.py
index d8010360da8eb1554fa2f40b168b97abe849a6c1..80d9dbe9bf538bcc096062fda12522c6be1aa1c5 100644
--- a/xbob/learn/mlp/test_backprop.py
+++ b/xbob/learn/mlp/test_backprop.py
@@ -59,7 +59,7 @@ def check_training(machine, cost, bias_training, batch_size, learning_rate,
 
   python_trainer = PythonBackProp(batch_size, cost, machine, bias_training,
       learning_rate, momentum)
-  cxx_trainer = MLPBackPropTrainer(batch_size, cost, machine, bias_training)
+  cxx_trainer = BackProp(batch_size, cost, machine, bias_training)
   cxx_trainer.learning_rate = learning_rate
   cxx_trainer.momentum = momentum
 
diff --git a/xbob/learn/mlp/trainer.cpp b/xbob/learn/mlp/trainer.cpp
index 9a2ebbc4f618e7b6ea7f12c9e43403c598bdb62c..83956cb40c0a4b39c6fbfa836314e3c80c929357 100644
--- a/xbob/learn/mlp/trainer.cpp
+++ b/xbob/learn/mlp/trainer.cpp
@@ -13,6 +13,8 @@
 #include <xbob.learn.mlp/api.h>
 #include <structmember.h>
 
+#include "utils.h"
+
 /****************************************
  * Implementation of base Trainer class *
  ****************************************/
@@ -21,6 +23,7 @@ PyDoc_STRVAR(s_trainer_str, XBOB_EXT_MODULE_PREFIX ".Trainer");
 
 PyDoc_STRVAR(s_trainer_doc,
 "Trainer(batch_size, cost, [trainer, [train_biases]]) -> new Trainer\n\
+\n\
 Trainer(other) -> new Trainer\n\
 \n\
 The base python class for MLP trainers based on cost derivatives.\n\
@@ -221,12 +224,12 @@ PyDoc_STRVAR(s_cost_object_doc,
 the cost (a.k.a. *loss*) and the derivatives given the input, the\n\
 target and the MLP structure.");
 
-static PyObject* PyBobLearnMLPTrainer_getCost
+static PyObject* PyBobLearnMLPTrainer_getCostObject
 (PyBobLearnMLPTrainerObject* self, void* /*closure*/) {
   return PyBobLearnCost_NewFromCost(self->cxx->getCost());
 }
 
-static int PyBobLearnMLPTrainer_setCost
+static int PyBobLearnMLPTrainer_setCostObject
 (PyBobLearnMLPTrainerObject* self, PyObject* o, void* /*closure*/) {
 
   if (!PyBobLearnCost_Check(o)) {
@@ -257,61 +260,6 @@ static int PyBobLearnMLPTrainer_setTrainBiases
   return -1;
 }
 
-template <int N>
-PyObject* convert_vector(const std::vector<blitz::Array<double,N>>& v) {
-  PyObject* retval = PyTuple_New(v.size());
-  auto retval_ = make_safe(retval);
-  if (!retval) return 0;
-  for (int k=0; k<v.size(); ++k) {
-    auto arr = PyBlitzArrayCxx_NewFromConstArray(v[k]);
-    if (!arr) return 0;
-    PyTuple_SET_ITEM(retval, k, arr);
-  }
-  Py_INCREF(retval);
-  return retval;
-}
-
-template <int N>
-int convert_tuple(PyBobLearnMLPTrainerObject* self, const char* attr,
-    PyObject* o, std::vector<blitz::Array<double,N>>& seq) {
-
-  if (!PyIter_Check(o) && !PySequence_Check(o)) {
-    PyErr_Format(PyExc_TypeError, "setting attribute `%s' of `%s' requires an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, attr, Py_TYPE(o)->tp_name);
-    return -1;
-  }
-
-  /* Checks and converts all entries */
-  std::vector<boost::shared_ptr<PyBlitzArrayObject>> seq_;
-
-  PyObject* iterator = PyObject_GetIter(o);
-  if (!iterator) return -1;
-  auto iterator_ = make_safe(iterator);
-
-  while (PyObject* item = PyIter_Next(iterator)) {
-    auto item_ = make_safe(item);
-
-    PyBlitzArrayObject* bz = 0;
-
-    if (!PyBlitzArray_Converter(item, &bz)) {
-      PyErr_Format(PyExc_TypeError, "`%s' (while setting `%s') could not convert object of type `%s' at position %" PY_FORMAT_SIZE_T "d of input sequence into an array - check your input", Py_TYPE(self)->tp_name, attr, Py_TYPE(item)->tp_name, seq.size());
-      return -1;
-    }
-
-    if (bz->ndim != N || bz->type_num != NPY_FLOAT64) {
-      PyErr_Format(PyExc_TypeError, "`%s' only supports 2D 64-bit float arrays for attribute `%s' (or any other object coercible to that), but at position %" PY_FORMAT_SIZE_T "d I have found an object with %" PY_FORMAT_SIZE_T "d dimensions and with type `%s' which is not compatible - check your input", Py_TYPE(self)->tp_name, attr, seq.size(), bz->ndim, PyBlitzArray_TypenumAsString(bz->type_num));
-      Py_DECREF(bz);
-      return -1;
-    }
-
-    seq_.push_back(make_safe(bz)); ///< prevents data deletion
-    seq.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(bz)); ///< only a view!
-  }
-
-  if (PyErr_Occurred()) return -1;
-
-  return 0;
-}
-
 PyDoc_STRVAR(s_error_str, "error");
 PyDoc_STRVAR(s_error_doc,
 "The error (a.k.a. :math:`\\delta`'s) back-propagated through the\n\
@@ -326,7 +274,7 @@ static int PyBobLearnMLPTrainer_setError
 (PyBobLearnMLPTrainerObject* self, PyObject* o, void* /*closure*/) {
 
   std::vector<blitz::Array<double,2>> bzvec;
-  int retval = convert_tuple<2>(self, s_error_str, o, bzvec);
+  int retval = convert_tuple<2>((PyObject*)self, s_error_str, o, bzvec);
   if (retval < 0) return retval;
 
   try {
@@ -358,7 +306,7 @@ static int PyBobLearnMLPTrainer_setOutput
 (PyBobLearnMLPTrainerObject* self, PyObject* o, void* /*closure*/) {
 
   std::vector<blitz::Array<double,2>> bzvec;
-  int retval = convert_tuple<2>(self, s_output_str, o, bzvec);
+  int retval = convert_tuple<2>((PyObject*)self, s_output_str, o, bzvec);
   if (retval < 0) return retval;
 
   try {
@@ -392,7 +340,7 @@ static int PyBobLearnMLPTrainer_setDerivatives
 (PyBobLearnMLPTrainerObject* self, PyObject* o, void* /*closure*/) {
 
   std::vector<blitz::Array<double,2>> bzvec;
-  int retval = convert_tuple<2>(self, s_derivatives_str, o, bzvec);
+  int retval = convert_tuple<2>((PyObject*)self, s_derivatives_str, o, bzvec);
   if (retval < 0) return retval;
 
   try {
@@ -426,7 +374,7 @@ static int PyBobLearnMLPTrainer_setBiasDerivatives
 (PyBobLearnMLPTrainerObject* self, PyObject* o, void* /*closure*/) {
 
   std::vector<blitz::Array<double,1>> bzvec;
-  int retval = convert_tuple<1>(self, s_bias_derivatives_str, o, bzvec);
+  int retval = convert_tuple<1>((PyObject*)self, s_bias_derivatives_str, o, bzvec);
   if (retval < 0) return retval;
 
   try {
@@ -455,8 +403,8 @@ static PyGetSetDef PyBobLearnMLPTrainer_getseters[] = {
     },
     {
       s_cost_object_str,
-      (getter)PyBobLearnMLPTrainer_getCost,
-      (setter)PyBobLearnMLPTrainer_setCost,
+      (getter)PyBobLearnMLPTrainer_getCostObject,
+      (setter)PyBobLearnMLPTrainer_setCostObject,
       s_cost_object_doc,
       0
     },
@@ -556,7 +504,7 @@ static PyObject* PyBobLearnMLPTrainer_forwardStep
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
   /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"machine", "input"};
+  static const char* const_kwlist[] = {"machine", "input", 0};
   static char** kwlist = const_cast<char**>(const_kwlist);
 
   PyBobLearnMLPMachineObject* machine = 0;
@@ -596,7 +544,7 @@ static PyObject* PyBobLearnMLPTrainer_backwardStep
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
   /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"machine", "input", "target"};
+  static const char* const_kwlist[] = {"machine", "input", "target", 0};
   static char** kwlist = const_cast<char**>(const_kwlist);
 
   PyBobLearnMLPMachineObject* machine = 0;
@@ -639,7 +587,9 @@ static PyObject* PyBobLearnMLPTrainer_backwardStep
 
 PyDoc_STRVAR(s_cost_str, "cost");
 PyDoc_STRVAR(s_cost_doc,
-"o.cost(target, [machine, input]) -> float\n\
+"o.cost(target) -> float\n\
+\n\
+o.cost(machine, input, target) -> float\n\
 \n\
 Calculates the cost for a given target.\n\
 \n\
@@ -661,18 +611,28 @@ target.\n\
 static PyObject* PyBobLearnMLPTrainer_cost
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
-  /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"target", "machine", "input"};
-  static char** kwlist = const_cast<char**>(const_kwlist);
+  Py_ssize_t nargs = (args?PyTuple_Size(args):0) + (kwds?PyDict_Size(kwds):0);
 
-  PyBlitzArrayObject* target = 0;
   PyBobLearnMLPMachineObject* machine = 0;
   PyBlitzArrayObject* input = 0;
+  PyBlitzArrayObject* target = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O!O&", kwlist,
-        &PyBlitzArray_Converter, &target,
-        &PyBobLearnMLPMachine_Type, &machine,
-        &PyBlitzArray_Converter, &input)) return 0;
+  if (nargs == 1) {
+    static const char* const_kwlist[] = {"target", 0};
+    static char** kwlist = const_cast<char**>(const_kwlist);
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&", kwlist,
+          &PyBlitzArray_Converter, &target)) return 0;
+  }
+  else { //there must be three
+    static const char* const_kwlist[] = {"machine", "input", "target", 0};
+    static char** kwlist = const_cast<char**>(const_kwlist);
+    if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&O&", kwlist,
+          &PyBobLearnMLPMachine_Type, &machine,
+          &PyBlitzArray_Converter, &input,
+          &PyBlitzArray_Converter, &target)) return 0;
+  }
+
+  /* Parses input arguments in a single shot */
 
   if ((machine && !input) || (input && !machine)) {
     PyErr_Format(PyExc_RuntimeError, "`%s.%s' expects that you either provide only the target (after a call to `forward_step') with a given machine and input or target, machine *and* input. You cannot provide a machine and not an input or vice-versa", Py_TYPE(self)->tp_name, s_cost_str);
@@ -730,7 +690,7 @@ static PyObject* PyBobLearnMLPTrainer_setErrorOnLayer
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
   /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"array", "layer"};
+  static const char* const_kwlist[] = {"array", "layer", 0};
   static char** kwlist = const_cast<char**>(const_kwlist);
 
   PyBlitzArrayObject* array = 0;
@@ -767,7 +727,7 @@ static PyObject* PyBobLearnMLPTrainer_setOutputOnLayer
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
   /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"array", "layer"};
+  static const char* const_kwlist[] = {"array", "layer", 0};
   static char** kwlist = const_cast<char**>(const_kwlist);
 
   PyBlitzArrayObject* array = 0;
@@ -805,7 +765,7 @@ static PyObject* PyBobLearnMLPTrainer_setDerivativeOnLayer
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
   /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"array", "layer"};
+  static const char* const_kwlist[] = {"array", "layer", 0};
   static char** kwlist = const_cast<char**>(const_kwlist);
 
   PyBlitzArrayObject* array = 0;
@@ -843,7 +803,7 @@ static PyObject* PyBobLearnMLPTrainer_setBiasDerivativeOnLayer
 (PyBobLearnMLPTrainerObject* self, PyObject* args, PyObject* kwds) {
 
   /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"array", "layer"};
+  static const char* const_kwlist[] = {"array", "layer", 0};
   static char** kwlist = const_cast<char**>(const_kwlist);
 
   PyBlitzArrayObject* array = 0;
diff --git a/xbob/learn/mlp/utils.h b/xbob/learn/mlp/utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..d368592f77f8463ef9d0cf566ab5870fe0be590f
--- /dev/null
+++ b/xbob/learn/mlp/utils.h
@@ -0,0 +1,73 @@
+/**
+ * @author Andre Anjos <andre.anjos@idiap.ch>
+ * @date Tue  6 May 12:32:39 2014 CEST
+ *
+ * @brief Shared utilities
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_MLP_UTILS_H
+#define BOB_LEARN_MLP_UTILS_H
+
+#define XBOB_LEARN_MLP_MODULE
+#include <xbob.blitz/cppapi.h>
+#include <xbob.blitz/cleanup.h>
+#include <xbob.learn.mlp/api.h>
+
+template <int N>
+PyObject* convert_vector(const std::vector<blitz::Array<double,N>>& v) {
+  PyObject* retval = PyTuple_New(v.size());
+  auto retval_ = make_safe(retval);
+  if (!retval) return 0;
+  for (int k=0; k<v.size(); ++k) {
+    auto arr = PyBlitzArrayCxx_NewFromConstArray(v[k]);
+    if (!arr) return 0;
+    PyTuple_SET_ITEM(retval, k, PyBlitzArray_NUMPY_WRAP(arr));
+  }
+  Py_INCREF(retval);
+  return retval;
+}
+
+template <int N>
+int convert_tuple(PyObject* self, const char* attr,
+    PyObject* o, std::vector<blitz::Array<double,N>>& seq) {
+
+  if (!PyIter_Check(o) && !PySequence_Check(o)) {
+    PyErr_Format(PyExc_TypeError, "setting attribute `%s' of `%s' requires an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, attr, Py_TYPE(o)->tp_name);
+    return -1;
+  }
+
+  /* Checks and converts all entries */
+  std::vector<boost::shared_ptr<PyBlitzArrayObject>> seq_;
+
+  PyObject* iterator = PyObject_GetIter(o);
+  if (!iterator) return -1;
+  auto iterator_ = make_safe(iterator);
+
+  while (PyObject* item = PyIter_Next(iterator)) {
+    auto item_ = make_safe(item);
+
+    PyBlitzArrayObject* bz = 0;
+
+    if (!PyBlitzArray_Converter(item, &bz)) {
+      PyErr_Format(PyExc_TypeError, "`%s' (while setting `%s') could not convert object of type `%s' at position %" PY_FORMAT_SIZE_T "d of input sequence into an array - check your input", Py_TYPE(self)->tp_name, attr, Py_TYPE(item)->tp_name, seq.size());
+      return -1;
+    }
+
+    if (bz->ndim != N || bz->type_num != NPY_FLOAT64) {
+      PyErr_Format(PyExc_TypeError, "`%s' only supports 2D 64-bit float arrays for attribute `%s' (or any other object coercible to that), but at position %" PY_FORMAT_SIZE_T "d I have found an object with %" PY_FORMAT_SIZE_T "d dimensions and with type `%s' which is not compatible - check your input", Py_TYPE(self)->tp_name, attr, seq.size(), bz->ndim, PyBlitzArray_TypenumAsString(bz->type_num));
+      Py_DECREF(bz);
+      return -1;
+    }
+
+    seq_.push_back(make_safe(bz)); ///< prevents data deletion
+    seq.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(bz)); ///< only a view!
+  }
+
+  if (PyErr_Occurred()) return -1;
+
+  return 0;
+}
+
+#endif /* BOB_LEARN_MLP_UTILS_H */