diff --git a/xbob/learn/mlp/machine.cpp b/xbob/learn/mlp/machine.cpp
index 03ad07ff56f14d94cbe77860e31df274a30daad0..839d6fe57314758f45c17c8dbd756ebb3166ae66 100644
--- a/xbob/learn/mlp/machine.cpp
+++ b/xbob/learn/mlp/machine.cpp
@@ -35,7 +35,7 @@ a global activation function. References to fully-connected\n\
 feed-forward networks:\n\
 \n\
   Bishop's Pattern Recognition and Machine Learning, Chapter 5.\n\
-  Figure 5.1 shows what we mean.\n\n
+  Figure 5.1 shows what we mean.\n\
 \n\
 MLPs normally are multi-layered systems, with 1 or more hidden\n\
 layers. As a special case, this implementation also supports\n\
@@ -78,7 +78,7 @@ static int PyBobLearnMLPMachine_init_sizes
 
   while (PyObject* item = PyIter_Next(iterator)) {
     auto item_ = make_safe(item);
-    Py_ssize_t value = PyNumber_AsSsize_t(item);
+    Py_ssize_t value = PyNumber_AsSsize_t(item, PyExc_OverflowError);
     if (PyErr_Occurred()) return -1;
     cxx_shape.push_back(value);
   }
@@ -114,7 +114,7 @@ static int PyBobLearnMLPMachine_init_hdf5(PyBobLearnMLPMachineObject* self,
   auto h5f = reinterpret_cast<PyBobIoHDF5FileObject*>(config);
 
   try {
-    self->cxx = new bob::machine::LinearMachine(*(h5f->f));
+    self->cxx = new bob::machine::MLP(*(h5f->f));
   }
   catch (std::exception& ex) {
     PyErr_SetString(PyExc_RuntimeError, ex.what());
@@ -144,7 +144,7 @@ static int PyBobLearnMLPMachine_init_copy
   auto copy = reinterpret_cast<PyBobLearnMLPMachineObject*>(other);
 
   try {
-    self->cxx = new bob::machine::LinearMachine(*(copy->cxx));
+    self->cxx = new bob::machine::MLP(*(copy->cxx));
   }
   catch (std::exception& ex) {
     PyErr_SetString(PyExc_RuntimeError, ex.what());
@@ -186,7 +186,7 @@ static int PyBobLearnMLPMachine_init(PyBobLearnMLPMachineObject* self,
           return PyBobLearnMLPMachine_init_copy(self, args, kwds);
         }
 
-        if (PyIter_Check(arg)) {
+        if (PyIter_Check(arg) || PySequence_Check(arg)) {
           return PyBobLearnMLPMachine_init_sizes(self, args, kwds);
         }
 
@@ -260,8 +260,8 @@ static PyObject* PyBobLearnMLPMachine_getWeights
   if (!retval) return 0;
   auto retval_ = make_safe(retval);
 
-  int k;
-  for (auto i=weights.begin(), k=0; i!=weights.end(); ++i, ++k) {
+  int k=0;
+  for (auto i=weights.begin(); i!=weights.end(); ++i, ++k) {
     PyObject* tmp = PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(*i));
     if (!tmp) return 0;
     PyTuple_SET_ITEM(retval, k, tmp);
@@ -275,7 +275,7 @@ static PyObject* PyBobLearnMLPMachine_getWeights
 static int PyBobLearnMLPMachine_setWeights (PyBobLearnMLPMachineObject* self,
     PyObject* weights, void* /*closure*/) {
 
-  if (!PyIter_Check(weights)) {
+  if (!PyIter_Check(weights) && !PySequence_Check(weights)) {
     PyErr_Format(PyExc_TypeError, "setting attribute `weights' of `%s' requires an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, Py_TYPE(weights)->tp_name);
     return -1;
   }
@@ -340,8 +340,8 @@ static PyObject* PyBobLearnMLPMachine_getBiases
   if (!retval) return 0;
   auto retval_ = make_safe(retval);
 
-  int k;
-  for (auto i=biases.begin(), k=0; i!=biases.end(); ++i, ++k) {
+  int k=0;
+  for (auto i=biases.begin(); i!=biases.end(); ++i, ++k) {
     PyObject* tmp = PyBlitzArray_NUMPY_WRAP(PyBlitzArrayCxx_NewFromConstArray(*i));
     if (!tmp) return 0;
     PyTuple_SET_ITEM(retval, k, tmp);
@@ -355,13 +355,13 @@ static PyObject* PyBobLearnMLPMachine_getBiases
 static int PyBobLearnMLPMachine_setBiases (PyBobLearnMLPMachineObject* self,
     PyObject* biases, void* /*closure*/) {
 
-  if (!PyIter_Check(biases)) {
+  if (!PyIter_Check(biases) && !PySequence_Check(biases)) {
     PyErr_Format(PyExc_TypeError, "setting attribute `biases' of `%s' requires an iterable, but you passed `%s' which does not implement the iterator protocol", Py_TYPE(self)->tp_name, Py_TYPE(biases)->tp_name);
     return -1;
   }
 
   /* Checks and converts all entries */
-  std::vector<blitz::Array<double,2> > biases_seq;
+  std::vector<blitz::Array<double,1> > biases_seq;
   std::vector<boost::shared_ptr<PyBlitzArrayObject>> biases_seq_;
 
   PyObject* iterator = PyObject_GetIter(biases);
@@ -385,13 +385,13 @@ static int PyBobLearnMLPMachine_setBiases (PyBobLearnMLPMachineObject* self,
     }
 
     biases_seq_.push_back(make_safe(bz)); ///< prevents data deletion
-    biases_seq.push_back(*PyBlitzArrayCxx_AsBlitz<double,2>(bz)); ///< only a view!
+    biases_seq.push_back(*PyBlitzArrayCxx_AsBlitz<double,1>(bz)); ///< only a view!
   }
 
   if (PyErr_Occurred()) return -1;
 
   try {
-    self->cxx->setBiases(*PyBlitzArrayCxx_AsBlitz<double,1>(biases_seq));
+    self->cxx->setBiases(biases_seq);
   }
   catch (std::exception& ex) {
     PyErr_SetString(PyExc_RuntimeError, ex.what());
@@ -501,37 +501,44 @@ static PyObject* PyBobLearnMLPMachine_getShape
   if (!retval) return 0;
   auto retval_ = make_safe(retval);
 
-  PyTuple_SET_ITEM(retval, 0, Py_BuildValue("n", self->cxx->inputSize()));
+  //fills in all the layers
+  Py_ssize_t k = 0;
 
-  //STOPPED HERE!
+  PyTuple_SET_ITEM(retval, k++, Py_BuildValue("n", self->cxx->inputSize()));
 
-  return Py_BuildValue("(nn)", self->cxx->inputSize(),
-      self->cxx->outputSize());
+  auto biases = self->cxx->getBiases();
+  for (auto i=biases.begin(); i!=biases.end(); ++i, ++k) {
+    PyTuple_SET_ITEM(retval, k, Py_BuildValue("n", i->extent(0)));
+  }
+
+  Py_INCREF(retval);
+  return retval;
 }
 
 static int PyBobLearnMLPMachine_setShape
 (PyBobLearnMLPMachineObject* self, PyObject* o, void* /*closure*/) {
 
   if (!PySequence_Check(o)) {
-    PyErr_Format(PyExc_TypeError, "`%s' shape can only be set using tuples (or sequences), not `%s'", Py_TYPE(self)->tp_name, Py_TYPE(o)->tp_name);
+    PyErr_Format(PyExc_TypeError, "`%s' shape can only be set using sequences, not `%s'", Py_TYPE(self)->tp_name, Py_TYPE(o)->tp_name);
     return -1;
   }
 
-  PyObject* shape = PySequence_Tuple(o);
-  auto shape_ = make_safe(shape);
+  /* Iterate and extracts the shape */
+  std::vector<size_t> cxx_shape;
 
-  if (PyTuple_GET_SIZE(shape) != 2) {
-    PyErr_Format(PyExc_RuntimeError, "`%s' shape can only be set using  2-position tuples (or sequences), not an %" PY_FORMAT_SIZE_T "d-position sequence", Py_TYPE(self)->tp_name, PyTuple_GET_SIZE(shape));
-    return -1;
-  }
+  PyObject* iterator = PyObject_GetIter(o);
+  if (!iterator) return -1;
+  auto iterator_ = make_safe(iterator);
 
-  Py_ssize_t in = PyNumber_AsSsize_t(PyTuple_GET_ITEM(shape, 0), PyExc_OverflowError);
-  if (PyErr_Occurred()) return -1;
-  Py_ssize_t out = PyNumber_AsSsize_t(PyTuple_GET_ITEM(shape, 1), PyExc_OverflowError);
-  if (PyErr_Occurred()) return -1;
+  while (PyObject* item = PyIter_Next(iterator)) {
+    auto item_ = make_safe(item);
+    Py_ssize_t value = PyNumber_AsSsize_t(item, PyExc_OverflowError);
+    if (PyErr_Occurred()) return -1;
+    cxx_shape.push_back(value);
+  }
 
   try {
-    self->cxx->resize(in, out);
+    self->cxx->resize(cxx_shape);
   }
   catch (std::exception& ex) {
     PyErr_SetString(PyExc_RuntimeError, ex.what());
@@ -546,28 +553,54 @@ static int PyBobLearnMLPMachine_setShape
 
 }
 
-PyDoc_STRVAR(s_activation_str, "activation");
-PyDoc_STRVAR(s_activation_doc,
-"The activation function - by default, the identity function.\n\
-The output provided by the activation function is passed,\n\
-unchanged, to the user.\n\
+PyDoc_STRVAR(s_hidden_activation_str, "hidden_activation");
+PyDoc_STRVAR(s_hidden_activation_doc,
+"The hidden neurons activation function - by default, the\n\
+hyperbolic tangent function. The current implementation only\n\
+allows setting one global value for all hidden layers.\n\
 ");
 
-static PyObject* PyBobLearnMLPMachine_getActivation
+static PyObject* PyBobLearnMLPMachine_getHiddenActivation
 (PyBobLearnMLPMachineObject* self, void* /*closure*/) {
-  return PyBobLearnActivation_NewFromActivation(self->cxx->getActivation());
+  return PyBobLearnActivation_NewFromActivation(self->cxx->getHiddenActivation());
 }
 
-static int PyBobLearnMLPMachine_setActivation
+static int PyBobLearnMLPMachine_setHiddenActivation
 (PyBobLearnMLPMachineObject* self, PyObject* o, void* /*closure*/) {
 
   if (!PyBobLearnActivation_Check(o)) {
-    PyErr_Format(PyExc_TypeError, "%s activation requires an object of type `Activation' (or an inherited type), not `%s'", Py_TYPE(self)->tp_name, Py_TYPE(o)->tp_name);
+    PyErr_Format(PyExc_TypeError, "%s hidden activation requires an object of type `Activation' (or an inherited type), not `%s'", Py_TYPE(self)->tp_name, Py_TYPE(o)->tp_name);
     return -1;
   }
 
   auto py = reinterpret_cast<PyBobLearnActivationObject*>(o);
-  self->cxx->setActivation(py->cxx);
+  self->cxx->setHiddenActivation(py->cxx);
+  return 0;
+
+}
+
+PyDoc_STRVAR(s_output_activation_str, "output_activation");
+PyDoc_STRVAR(s_output_activation_doc,
+"The output activation function - by default, the hyperbolic\n\
+tangent function. The output provided by the output activation\n\
+function is passed, unchanged, to the user.\n\
+");
+
+static PyObject* PyBobLearnMLPMachine_getOutputActivation
+(PyBobLearnMLPMachineObject* self, void* /*closure*/) {
+  return PyBobLearnActivation_NewFromActivation(self->cxx->getOutputActivation());
+}
+
+static int PyBobLearnMLPMachine_setOutputActivation
+(PyBobLearnMLPMachineObject* self, PyObject* o, void* /*closure*/) {
+
+  if (!PyBobLearnActivation_Check(o)) {
+    PyErr_Format(PyExc_TypeError, "%s output activation requires an object of type `Activation' (or an inherited type), not `%s'", Py_TYPE(self)->tp_name, Py_TYPE(o)->tp_name);
+    return -1;
+  }
+
+  auto py = reinterpret_cast<PyBobLearnActivationObject*>(o);
+  self->cxx->setOutputActivation(py->cxx);
   return 0;
 
 }
@@ -609,10 +642,17 @@ static PyGetSetDef PyBobLearnMLPMachine_getseters[] = {
       0
     },
     {
-      s_activation_str,
-      (getter)PyBobLearnMLPMachine_getActivation,
-      (setter)PyBobLearnMLPMachine_setActivation,
-      s_activation_doc,
+      s_hidden_activation_str,
+      (getter)PyBobLearnMLPMachine_getHiddenActivation,
+      (setter)PyBobLearnMLPMachine_setHiddenActivation,
+      s_hidden_activation_doc,
+      0
+    },
+    {
+      s_output_activation_str,
+      (getter)PyBobLearnMLPMachine_getOutputActivation,
+      (setter)PyBobLearnMLPMachine_setOutputActivation,
+      s_output_activation_doc,
       0
     },
     {0}  /* Sentinel */
@@ -629,13 +669,9 @@ PyObject* PyBobLearnMLPMachine_Repr(PyBobLearnMLPMachineObject* self) {
   /**
    * Expected output:
    *
-   * <xbob.learn.linear.Machine float64@(3, 2) [act: f(z) = tanh(z)]>
+   * <xbob.learn.linear.MLP float64@(3, 5, 2) [hidden: f(z) = tanh(z), out: f(z) = * tanh(z)]>
    */
 
-  using bob::machine::IdentityActivation;
-
-  static const std::string identity_str = IdentityActivation().str();
-
   auto weights = make_safe(PyBobLearnMLPMachine_getWeights(self, 0));
   if (!weights) return 0;
   auto dtype = make_safe(PyObject_GetAttrString(weights.get(), "dtype"));
@@ -645,89 +681,19 @@ PyObject* PyBobLearnMLPMachine_Repr(PyBobLearnMLPMachineObject* self) {
 
   PyObject* retval = 0;
 
-  if (self->cxx->getActivation()->str() == identity_str) {
-    retval = PyUnicode_FromFormat("<%s %U@%U>",
-        Py_TYPE(self)->tp_name, dtype_str.get(), shape_str.get());
-  }
+  auto hidden = self->cxx->getHiddenActivation()->str();
+  auto output = self->cxx->getOutputActivation()->str();
 
-  else {
+  if (hidden == output) {
     retval = PyUnicode_FromFormat("<%s %s@%s [act: %s]>",
         Py_TYPE(self)->tp_name, dtype_str.get(), shape_str.get(),
-        self->cxx->getActivation()->str().c_str());
-  }
-
-#if PYTHON_VERSION_HEX < 0x03000000
-  if (!retval) return 0;
-  PyObject* tmp = PyObject_Str(retval);
-  Py_DECREF(retval);
-  retval = tmp;
-#endif
-
-  return retval;
-
-}
-
-PyObject* PyBobLearnMLPMachine_Str(PyBobLearnMLPMachineObject* self) {
-
-  /**
-   * Expected output:
-   *
-   * xbob.learn.linear.Machine (float64) 3 inputs, 2 outputs [act: f(z) = C*z]
-   *  subtract: [ 0.   0.5  0.5]
-   *  divide: [ 0.5  1.   1. ]
-   *  bias: [ 0.3 -3. ]
-   *  [[ 0.4  0.1]
-   *  [ 0.4  0.2]
-   *  [ 0.2  0.7]]
-   */
-
-  using bob::machine::IdentityActivation;
-
-  static const std::string identity_str = IdentityActivation().str();
-
-  boost::shared_ptr<PyObject> act;
-  if (self->cxx->getActivation()->str() != identity_str) {
-    act = make_safe(PyUnicode_FromFormat(" [act: %s]",
-          self->cxx->getActivation()->str().c_str()));
-  }
-  else act = make_safe(PyUnicode_FromString(""));
-
-  boost::shared_ptr<PyObject> sub;
-  if (blitz::any(self->cxx->getInputSubtraction())) {
-    auto t = make_safe(PyBobLearnMLPMachine_getInputSubtraction(self, 0));
-    auto t_str = make_safe(PYOBJECT_STR(t.get()));
-    sub = make_safe(PyUnicode_FromFormat("\n subtract: %U", t_str.get()));
-  }
-  else sub = make_safe(PyUnicode_FromString(""));
-
-  boost::shared_ptr<PyObject> div;
-  if (blitz::any(self->cxx->getInputDivision())) {
-    auto t = make_safe(PyBobLearnMLPMachine_getInputDivision(self, 0));
-    auto t_str = make_safe(PYOBJECT_STR(t.get()));
-    div = make_safe(PyUnicode_FromFormat("\n divide: %U", t_str.get()));
+        hidden.c_str());
   }
-  else div = make_safe(PyUnicode_FromString(""));
-
-  boost::shared_ptr<PyObject> bias;
-  if (blitz::any(self->cxx->getBiases())) {
-    auto t = make_safe(PyBobLearnMLPMachine_getBiases(self, 0));
-    auto t_str = make_safe(PYOBJECT_STR(t.get()));
-    bias = make_safe(PyUnicode_FromFormat("\n bias: %U", t_str.get()));
+  else {
+    retval = PyUnicode_FromFormat("<%s %s@%s [act: %s]>",
+        Py_TYPE(self)->tp_name, dtype_str.get(), shape_str.get(),
+        hidden.c_str(), output.c_str());
   }
-  else bias = make_safe(PyUnicode_FromString(""));
-
-  auto weights = make_safe(PyBobLearnMLPMachine_getWeights(self, 0));
-  if (!weights) return 0;
-  auto weights_str = make_safe(PYOBJECT_STR(weights.get()));
-  auto dtype = make_safe(PyObject_GetAttrString(weights.get(), "dtype"));
-  auto dtype_str = make_safe(PYOBJECT_STR(dtype.get()));
-  auto shape = make_safe(PyObject_GetAttrString(weights.get(), "shape"));
-
-  PyObject* retval = PyUnicode_FromFormat("%s (%U) %" PY_FORMAT_SIZE_T "d inputs, %" PY_FORMAT_SIZE_T "d outputs%U%U%U%U\n %U",
-    Py_TYPE(self)->tp_name, dtype_str.get(),
-    PyNumber_AsSsize_t(PyTuple_GET_ITEM(shape.get(), 0), PyExc_OverflowError),
-    PyNumber_AsSsize_t(PyTuple_GET_ITEM(shape.get(), 1), PyExc_OverflowError),
-    act.get(), sub.get(), div.get(), bias.get(), weights_str.get());
 
 #if PYTHON_VERSION_HEX < 0x03000000
   if (!retval) return 0;
@@ -944,8 +910,7 @@ PyDoc_STRVAR(s_is_similar_to_str, "is_similar_to");
 PyDoc_STRVAR(s_is_similar_to_doc,
 "o.is_similar_to(other [, r_epsilon=1e-5 [, a_epsilon=1e-8]]) -> bool\n\
 \n\
-Compares this LinearMachine with the ``other`` one to be\n\
-approximately the same.\n\
+Compares this MLP with the ``other`` one to be approximately the same.\n\
 \n\
 The optional values ``r_epsilon`` and ``a_epsilon`` refer to the\n\
 relative and absolute precision for the ``weights``, ``biases``\n\
@@ -977,67 +942,6 @@ static PyObject* PyBobLearnMLPMachine_IsSimilarTo
 
 }
 
-PyDoc_STRVAR(s_resize_str, "resize");
-PyDoc_STRVAR(s_resize_doc,
-"o.resize(input, output) -> None\n\
-\n\
-Resizes the machine. If either the input or output increases\n\
-in size, the weights and other factors should be considered\n\
-uninitialized. If the size is preserved or reduced, already\n\
-initialized values will not be changed.\n\
-\n\
-.. note::\n\
-\n\
-   Use this method to force data compression. All will work\n\
-   out given most relevant factors to be preserved are\n\
-   organized on the top of the weight matrix. In this way,\n\
-   reducing the system size will supress less relevant\n\
-   projections.\n\
-\n\
-");
-
-static PyObject* PyBobLearnMLPMachine_Resize
-(PyBobLearnMLPMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  static const char* const_kwlist[] = {"shape", 0};
-  static char** kwlist = const_cast<char**>(const_kwlist);
-
-  PyObject* shape = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O", kwlist,
-        &shape)) return -1;
-
-  /* Iterate and extracts the shape */
-  std::vector<size_t> cxx_shape;
-
-  PyObject* iterator = PyObject_GetIter(shape);
-  if (!iterator) return -1;
-  auto iterator_ = make_safe(iterator);
-
-  while (PyObject* item = PyIter_Next(iterator)) {
-    auto item_ = make_safe(item);
-    Py_ssize_t value = PyNumber_AsSsize_t(item);
-    if (PyErr_Occurred()) return -1;
-    cxx_shape.push_back(value);
-  }
-
-  try {
-    self->cxx->resize(cxx_shape);
-  }
-  catch (std::exception& ex) {
-    PyErr_SetString(PyExc_RuntimeError, ex.what());
-    return 0;
-  }
-  catch (...) {
-    PyErr_Format(PyExc_RuntimeError, "cannot resize object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
-    return 0;
-  }
-
-  Py_RETURN_NONE;
-
-}
-
 static PyMethodDef PyBobLearnMLPMachine_methods[] = {
   {
     s_forward_str,
@@ -1063,12 +967,6 @@ static PyMethodDef PyBobLearnMLPMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     s_is_similar_to_doc
   },
-  {
-    s_resize_str,
-    (PyCFunction)PyBobLearnMLPMachine_Resize,
-    METH_VARARGS|METH_KEYWORDS,
-    s_resize_doc
-  },
   {0} /* Sentinel */
 };
 
@@ -1090,7 +988,7 @@ PyObject* PyBobLearnMLPMachine_NewFromSize
 
   PyBobLearnMLPMachineObject* retval = (PyBobLearnMLPMachineObject*)PyBobLearnMLPMachine_new(&PyBobLearnMLPMachine_Type, 0, 0);
 
-  retval->cxx = new bob::machine::LinearMachine(input, output);
+  retval->cxx = new bob::machine::MLP(input, output);
 
   return reinterpret_cast<PyObject*>(retval);
 
@@ -1112,7 +1010,7 @@ PyTypeObject PyBobLearnMLPMachine_Type = {
     0,                                             /* tp_as_mapping */
     0,                                             /* tp_hash */
     (ternaryfunc)PyBobLearnMLPMachine_forward,     /* tp_call */
-    (reprfunc)PyBobLearnMLPMachine_Str,            /* tp_str */
+    (reprfunc)PyBobLearnMLPMachine_Repr,           /* tp_str */
     0,                                             /* tp_getattro */
     0,                                             /* tp_setattro */
     0,                                             /* tp_as_buffer */
diff --git a/xbob/learn/mlp/main.cpp b/xbob/learn/mlp/main.cpp
index 9e6e171b5aa1bc3b0a8f8554072b16460dd8e6a1..c9ba4d061348375f26de93591295ce93638e11b9 100644
--- a/xbob/learn/mlp/main.cpp
+++ b/xbob/learn/mlp/main.cpp
@@ -20,7 +20,7 @@ static PyMethodDef module_methods[] = {
     {0}  /* Sentinel */
 };
 
-PyDoc_STRVAR(module_docstr, "bob::machine's multi-layer perceptron machine and trainers");
+PyDoc_STRVAR(module_docstr, "bob's multi-layer perceptron machine and trainers");
 
 int PyXbobLearnMLP_APIVersion = XBOB_LEARN_MLP_API_VERSION;
 
@@ -40,12 +40,6 @@ static PyObject* create_module (void) {
   PyBobLearnMLPMachine_Type.tp_new = PyType_GenericNew;
   if (PyType_Ready(&PyBobLearnMLPMachine_Type) < 0) return 0;
 
-  PyBobLearnMLPPCATrainer_Type.tp_new = PyType_GenericNew;
-  if (PyType_Ready(&PyBobLearnMLPPCATrainer_Type) < 0) return 0;
-
-  PyBobLearnMLPFisherLDATrainer_Type.tp_new = PyType_GenericNew;
-  if (PyType_Ready(&PyBobLearnMLPFisherLDATrainer_Type) < 0) return 0;
-
 # if PY_VERSION_HEX >= 0x03000000
   PyObject* m = PyModule_Create(&module_definition);
 # else
@@ -82,14 +76,6 @@ static PyObject* create_module (void) {
 
   PyXbobLearnMLP_API[PyBobLearnMLPMachine_NewFromSize_NUM] = (void *)&PyBobLearnMLPMachine_NewFromSize;
 
-  /************************************************
-   * Bindings for xbob.learn.mlp.FisherLDATrainer *
-   ************************************************/
-
-  PyXbobLearnMLP_API[PyBobLearnMLPFisherLDATrainer_Type_NUM] = (void *)&PyBobLearnMLPFisherLDATrainer_Type;
-
-  PyXbobLearnMLP_API[PyBobLearnMLPFisherLDATrainer_Check_NUM] = (void *)&PyBobLearnMLPFisherLDATrainer_Check;
-
 #if PY_VERSION_HEX >= 0x02070000
 
   /* defines the PyCapsule */
diff --git a/xbob/learn/mlp/test_machine.py b/xbob/learn/mlp/test_machine.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8a91f4340632fb8619ce2c61fa33db5e23c139f
--- /dev/null
+++ b/xbob/learn/mlp/test_machine.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Andre Anjos <andre.anjos@idiap.ch>
+# Mon 28 Apr 2014 13:32:55 CEST
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests on the Machine infrastructure.
+"""
+
+import time
+import numpy
+import nose.tools
+
+from . import Machine
+from .test_utils import Machine as PythonMachine
+
+import xbob.io
+from xbob.io.test_utils import temporary_filename
+from xbob.learn.activation import Logistic, HyperbolicTangent
+
+def test_2in_1out():
+
+  m = Machine((2,1))
+  nose.tools.eq_(m.shape, (2,1))
+  nose.tools.eq_(m.input_divide.shape, (2,))
+  nose.tools.eq_(m.input_subtract.shape, (2,))
+  nose.tools.eq_(len(m.weights), 1)
+  nose.tools.eq_(m.weights[0].shape, (2,1))
+  assert numpy.allclose(m.weights[0], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(len(m.biases), 1)
+  nose.tools.eq_(m.biases[0].shape, (1,))
+  nose.tools.eq_(m.biases[0], 0.)
+  nose.tools.eq_(m.hidden_activation, HyperbolicTangent())
+  nose.tools.eq_(m.output_activation, HyperbolicTangent())
+
+  # calculate and match
+  weights = [numpy.random.rand(2,1)]
+  biases = [numpy.random.rand(1)]
+
+  m.weights = weights
+  m.biases = biases
+
+  pymac = PythonMachine(biases, weights, m.hidden_activation, m.output_activation)
+
+  X = numpy.random.rand(10,2)
+  assert numpy.allclose(m(X), pymac.forward(X), rtol=1e-10, atol=1e-15)
+
+def test_2in_3_1out():
+
+  m = Machine((2,3,1))
+  nose.tools.eq_(m.shape, (2,3,1))
+  nose.tools.eq_(m.input_divide.shape, (2,))
+  nose.tools.eq_(m.input_subtract.shape, (2,))
+  nose.tools.eq_(len(m.weights), 2)
+  nose.tools.eq_(m.weights[0].shape, (2,3))
+  assert numpy.allclose(m.weights[0], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.weights[1].shape, (3,1))
+  assert numpy.allclose(m.weights[1], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(len(m.biases), 2)
+  nose.tools.eq_(m.biases[0].shape, (3,))
+  assert numpy.allclose(m.biases[0], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.biases[1].shape, (1,))
+  assert numpy.allclose(m.biases[1], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.hidden_activation, HyperbolicTangent())
+  nose.tools.eq_(m.output_activation, HyperbolicTangent())
+
+  # calculate and match
+  weights = [numpy.random.rand(2,3), numpy.random.rand(3,1)]
+  biases = [numpy.random.rand(3), numpy.random.rand(1)]
+
+  m.weights = weights
+  m.biases = biases
+
+  pymac = PythonMachine(biases, weights, m.hidden_activation, m.output_activation)
+
+  X = numpy.random.rand(10,2)
+  assert numpy.allclose(m(X), pymac.forward(X), rtol=1e-10, atol=1e-15)
+
+def test_2in_3_5_1out():
+
+  m = Machine((2,3,5,1))
+  nose.tools.eq_(m.shape, (2,3,5,1))
+  nose.tools.eq_(m.input_divide.shape, (2,))
+  nose.tools.eq_(m.input_subtract.shape, (2,))
+  nose.tools.eq_(len(m.weights), 3)
+  nose.tools.eq_(m.weights[0].shape, (2,3))
+  assert numpy.allclose(m.weights[0], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.weights[1].shape, (3,5))
+  assert numpy.allclose(m.weights[1], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.weights[2].shape, (5,1))
+  assert numpy.allclose(m.weights[2], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(len(m.biases), 3)
+  nose.tools.eq_(m.biases[0].shape, (3,))
+  assert numpy.allclose(m.biases[0], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.biases[1].shape, (5,))
+  assert numpy.allclose(m.biases[1], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.biases[2].shape, (1,))
+  assert numpy.allclose(m.biases[2], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(m.hidden_activation, HyperbolicTangent())
+  nose.tools.eq_(m.output_activation, HyperbolicTangent())
+
+  # calculate and match
+  weights = [
+      numpy.random.rand(2,3),
+      numpy.random.rand(3,5),
+      numpy.random.rand(5,1)
+      ]
+  biases = [
+      numpy.random.rand(3),
+      numpy.random.rand(5),
+      numpy.random.rand(1),
+      ]
+
+  m.weights = weights
+  m.biases = biases
+
+  pymac = PythonMachine(biases, weights, m.hidden_activation, m.output_activation)
+
+  X = numpy.random.rand(10,2)
+  assert numpy.allclose(m(X), pymac.forward(X), rtol=1e-10, atol=1e-15)
+
+def test_100in_100_10_4out():
+
+  m = Machine((100,100,10,4))
+
+  # calculate and match
+  weights = [
+      numpy.random.rand(100,100),
+      numpy.random.rand(100,10),
+      numpy.random.rand(10,4)
+      ]
+  biases = [
+      numpy.random.rand(100),
+      numpy.random.rand(10),
+      numpy.random.rand(4),
+      ]
+
+  m.weights = weights
+  m.biases = biases
+
+  pymac = PythonMachine(biases, weights, m.hidden_activation, m.output_activation)
+
+  X = numpy.random.rand(20,100)
+  assert numpy.allclose(m(X), pymac.forward(X), rtol=1e-10, atol=1e-15)
+def test_resize():
+
+  m = Machine((2,3,5,1))
+  m.shape = (2,1)
+  m.hidden_activation = Logistic()
+  m.output_activation = Logistic()
+
+  nose.tools.eq_(m.shape, (2,1))
+  nose.tools.eq_(m.input_divide.shape, (2,))
+  nose.tools.eq_(m.input_subtract.shape, (2,))
+  nose.tools.eq_(len(m.weights), 1)
+  nose.tools.eq_(m.weights[0].shape, (2,1))
+  assert numpy.allclose(m.weights[0], 0., rtol=1e-10, atol=1e-15)
+  nose.tools.eq_(len(m.biases), 1)
+  nose.tools.eq_(m.biases[0].shape, (1,))
+  nose.tools.eq_(m.biases[0], 0.)
+  nose.tools.eq_(m.hidden_activation, Logistic())
+  nose.tools.eq_(m.output_activation, Logistic())
+
+  # calculate and match
+  weights = [numpy.random.rand(2,1)]
+  biases = [numpy.random.rand(1)]
+
+  m.weights = weights
+  m.biases = biases
+
+  pymac = PythonMachine(biases, weights, m.hidden_activation, m.output_activation)
+
+  X = numpy.random.rand(10,2)
+  assert numpy.allclose(m(X), pymac.forward(X), rtol=1e-10, atol=1e-15)
+
+def test_checks():
+
+  # tests if Machines check wrong settings
+  m = Machine((2,1))
+
+  # the Machine shape cannot have a single entry
+  nose.tools.assert_raises(RuntimeError, setattr, m, 'shape', (5,))
+
+  # you cannot set the weights vector with the wrong size
+  nose.tools.assert_raises(RuntimeError,
+      setattr, m, 'weights', [numpy.zeros((3,1), 'float64')])
+
+  # the same for the bias
+  nose.tools.assert_raises(RuntimeError,
+      setattr, m, 'biases', [numpy.zeros((5,), 'float64')])
+
+  # it works though if the sizes are correct
+  new_weights = [numpy.zeros((2,1), 'float64')]
+  new_weights[0].fill(3.14)
+  m.weights = new_weights
+
+  nose.tools.eq_(len(m.weights), 1)
+
+  assert (m.weights[0] == new_weights[0]).all()
+
+  new_biases = [numpy.zeros((1,), 'float64')]
+  new_biases[0].fill(5.71)
+  m.biases = new_biases
+
+  nose.tools.eq_(len(m.biases), 1)
+
+  assert (m.biases[0] == new_biases[0]).all()
+
+def test_persistence():
+
+  # make shure we can save an load an Machine machine
+  weights = []
+  weights.append(numpy.array([[.2, -.1, .2], [.2, .3, .9]]))
+  weights.append(numpy.array([[.1, .5], [-.1, .2], [-.1, 1.1]]))
+  biases = []
+  biases.append(numpy.array([-.1, .3, .1]))
+  biases.append(numpy.array([.2, -.1]))
+
+  m = Machine((2,3,2))
+  m.weights = weights
+  m.biases = biases
+
+  # creates a file that will be used in the next test!
+  machine_file = temporary_filename()
+  m.save(xbob.io.HDF5File(machine_file, 'w'))
+  m2 = Machine(xbob.io.HDF5File(machine_file))
+
+  assert m.is_similar_to(m2)
+  nose.tools.eq_(m, m2)
+  nose.tools.eq_(m.shape, m2.shape)
+  assert (m.input_subtract == m2.input_subtract).all()
+  assert (m.input_divide == m2.input_divide).all()
+
+  for i in range(len(m.weights)):
+    assert (m.weights[i] == m2.weights[i]).all()
+    assert (m.biases[i] == m2.biases[i]).all()
+
+def test_randomization():
+
+  m = Machine((2,3,2))
+  m.randomize()
+
+  for k in m.weights:
+    assert (abs(k) <= 0.1).all()
+    assert (k != 0).any()
+
+  for k in m.biases:
+    assert (abs(k) <= 0.1).all()
+    assert (k != 0).any()
+
+def test_randomization_margins():
+
+  # we can also reset the margins for randomization
+  for k in range(10):
+
+    m = Machine((2,3,2))
+    m.randomize(-0.001, 0.001)
+
+    for k in m.weights:
+      assert (abs(k) <= 0.001).all()
+      assert (k != 0).any()
+
+    for k in m.biases:
+      assert (abs(k) <= 0.001).all()
+      assert (k != 0).any()
+
+def test_randomness():
+
+  m1 = Machine((2,3,2))
+  m1.randomize()
+
+  for k in range(10):
+    time.sleep(0.1)
+    m2 = Machine((2,3,2))
+    m2.randomize()
+
+    for w1, w2 in zip(m1.weights, m2.weights):
+      nose.tools.eq_((w1 == w2).all(), False)
+
+    for b1, b2 in zip(m1.biases, m2.biases):
+      nose.tools.eq_((b1 == b2).all(), False)
diff --git a/xbob/learn/mlp/test_utils.py b/xbob/learn/mlp/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c7b56a1fd7f0ba8a3aef3004d56ba9c7b6af212
--- /dev/null
+++ b/xbob/learn/mlp/test_utils.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Andre Anjos <andre.anjos@idiap.ch>
+# Thu 13 Jun 2013 15:54:19 CEST 
+
+"""Pythonic implementations of Multi-Layer Perceptrons for code testing
+"""
+
+import numpy
+
+class Machine:
+  """Represents a Multi-Layer Perceptron Machine with a single hidden layer"""
+
+  def __init__(self, bias, weights, hidden_activation, output_activation):
+    """Initializes the MLP with a number of inputs and outputs. Weights are
+    initialized randomly with the specified seed.
+    
+    Keyword parameters:
+
+    bias
+      A list of 1D numpy.ndarray's with 64-bit floating-point numbers
+      representing the biases for each layer of the MLP. Each ndarray must have
+      as many entries as neurons in that particular layer. If set to `None`,
+      disables the use of biases.
+
+    weights
+      A list of 2D numpy.ndarray's with 64-bit floating-point numbers
+      representing the weights for the MLP. The more entries, the more layers
+      the MLP has. The weight matrix includes the bias terms weights and is
+      organized so that every neuron input is in a single column. The first
+      row always represents the bias connections.
+
+    hidden_activation
+      The activation function to use for the hidden neurons of the network.
+      Should be one of the classes derived from
+      :py:class:`bob.machine.Activation`.
+
+    output_activation
+      The activation function to use for the output neurons of the network.
+      Should be one of the classes derived from
+      :py:class:`bob.machine.Activation`.
+    """
+
+    if bias is None:
+      self.weights = weights
+      self.has_bias = False
+    else:
+      self.weights = [numpy.vstack([bias[k], weights[k]]) for k in range(len(bias))]
+      self.has_bias = True
+
+    self.hidden_activation = hidden_activation
+    self.output_activation = output_activation
+
+  def forward(self, X):
+    """Executes the forward step of the N-layer neural network.
+
+    Remember that:
+
+    1. z = X . w
+
+    and
+
+    2. Output: a = g(z), with g being the activation function
+
+    Keyword attributes:
+
+    X
+      The input vector containing examples organized in rows. The input
+      matrix does **not** contain the bias term.
+
+    Returns the outputs of the network for each row in X. Accumulates hidden
+    layer outputs and activations (for backward step). At the end of this
+    procedure:
+    
+    self.a
+      Input, including the bias term for all layers. 1 example per row. Bias =
+      first column.
+
+    self.z
+      Activations for every input X on given layer. z1 = a0 * w1
+    """
+    if self.has_bias:
+      self.a = [numpy.hstack([numpy.ones((len(X),1)), X])]
+    else:
+      self.a = [X]
+
+    self.z = []
+
+    for w in self.weights[:-1]:
+      self.z.append(numpy.dot(self.a[-1], w))
+      self.a.append(self.hidden_activation(self.z[-1]))
+      if self.has_bias:
+        self.a[-1] = numpy.hstack([numpy.ones((len(self.a[-1]),1)), self.a[-1]])
+
+    self.z.append(numpy.dot(self.a[-1], self.weights[-1]))
+    self.a.append(self.output_activation(self.z[-1]))
+
+    return self.a[-1]